From e65da73bf6fc6f0265777a5da1133612613c5164 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 15 Oct 2025 02:13:57 +0200 Subject: [PATCH 01/27] Supress solver output in SegmentedCalculation in favor of a progress bar, add method to suppress_output() and add tqdm to dependencies --- flixopt/calculation.py | 33 ++++++++++++++++++++++++--------- flixopt/utils.py | 26 ++++++++++++++++++++++++++ pyproject.toml | 1 + 3 files changed, 51 insertions(+), 9 deletions(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 9d2164e1e..1bf103aa4 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -13,6 +13,7 @@ import logging import math import pathlib +import sys import timeit import warnings from collections import Counter @@ -20,6 +21,7 @@ import numpy as np import yaml +from tqdm import tqdm from . import io as fx_io from . import utils as utils @@ -575,10 +577,19 @@ def do_modeling_and_solve( logger.info(f'{" Segmented Solving ":#^80}') self._create_sub_calculations() - for i, calculation in enumerate(self.sub_calculations): - logger.info( - f'{self.segment_names[i]} [{i + 1:>2}/{len(self.segment_names):<2}] ' - f'({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]}):' + # Create tqdm progress bar with custom format that prints to stdout + progress_bar = tqdm( + enumerate(self.sub_calculations), + total=len(self.sub_calculations), + desc='Solving segments', + unit='segment', + file=sys.stdout, # Force tqdm to write to stdout instead of stderr + ) + + for i, calculation in progress_bar: + # Update progress bar description with current segment info + progress_bar.set_description( + f'Solving ({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]})' ) if i > 0 and self.nr_of_previous_values > 0: @@ -600,11 +611,15 @@ def do_modeling_and_solve( f'Following InvestmentModels were found: {invest_elements}' ) - calculation.solve( - solver, - log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', - log_main_results=log_main_results, - ) + # Redirect solver stdout to null to avoid cluttering the output + with utils.suppress_output(): + calculation.solve( + solver, + log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', + log_main_results=log_main_results, + ) + + progress_bar.close() for calc in self.sub_calculations: for key, value in calc.durations.items(): diff --git a/flixopt/utils.py b/flixopt/utils.py index dd1f93d64..efc1836e4 100644 --- a/flixopt/utils.py +++ b/flixopt/utils.py @@ -5,6 +5,9 @@ from __future__ import annotations import logging +import os +import sys +from contextlib import contextmanager from typing import Any, Literal import numpy as np @@ -84,3 +87,26 @@ def convert_dataarray( return f':::{data.name}' else: raise ValueError(f'Unknown mode {mode}') + + +@contextmanager +def suppress_output(): + """Redirect both Python and C-level stdout/stderr to os.devnull.""" + with open(os.devnull, 'w') as devnull: + # Save original file descriptors + old_stdout_fd = os.dup(1) + old_stderr_fd = os.dup(2) + try: + # Flush any pending text + sys.stdout.flush() + sys.stderr.flush() + # Redirect low-level fds to devnull + os.dup2(devnull.fileno(), 1) + os.dup2(devnull.fileno(), 2) + yield + finally: + # Restore fds + os.dup2(old_stdout_fd, 1) + os.dup2(old_stderr_fd, 2) + os.close(old_stdout_fd) + os.close(old_stderr_fd) diff --git a/pyproject.toml b/pyproject.toml index 391f23c3a..29e0c448c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,6 +41,7 @@ dependencies = [ # Utilities "pyyaml >= 6.0.0, < 7", "rich >= 13.0.0, < 15", + "tqdm >= 4.66.0, < 5", "tomli >= 2.0.1, < 3; python_version < '3.11'", # Only needed with python 3.10 or earlier # Default solver "highspy >= 1.5.3, < 2", From 05180a8f07d4135300125dd3947ee6f7c25d8b01 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 15 Oct 2025 02:21:35 +0200 Subject: [PATCH 02/27] Add logging.info() about solving --- flixopt/calculation.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 1bf103aa4..355514c00 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -238,6 +238,8 @@ def solve( **solver.options, ) self.durations['solving'] = round(timeit.default_timer() - t_start, 2) + logger.info(f'Model solved with {solver.name} in {self.durations["solving"]} seconds.') + logger.info(f'Model status after solve: {self.model.status}') if self.model.status == 'warning': # Save the model and the flow_system to file in case of infeasibility @@ -625,6 +627,8 @@ def do_modeling_and_solve( for key, value in calc.durations.items(): self.durations[key] += value + logger.info(f'Model solved with {solver.name} in {self.durations["solving"]} seconds.') + self.results = SegmentedCalculationResults.from_calculation(self) return self From 5d9c672cd9550c3c07d4922e97b5c0febaf21e9b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 13:52:06 +0100 Subject: [PATCH 03/27] Merge main intop feature/402-feature-silent-framework --- .github/CONTRIBUTING.md | 2 +- .github/ISSUE_TEMPLATE/bug_report.yml | 118 ++- .github/ISSUE_TEMPLATE/config.yml | 8 +- .github/ISSUE_TEMPLATE/feature_request.yml | 117 +-- .github/ISSUE_TEMPLATE/task.yml | 35 + .github/workflows/python-app.yaml | 18 +- CONTRIBUTE.md | 168 +++++ docs/contribute.md | 46 +- docs/getting-started.md | 2 +- docs/index.md | 178 +++-- docs/stylesheets/extra.css | 396 +++++++++++ .../user-guide/{index.md => core-concepts.md} | 56 +- .../user-guide/mathematical-notation/index.md | 4 +- docs/user-guide/migration-guide-v3.md | 614 ++++------------ examples/00_Minmal/minimal_example.py | 87 +-- examples/01_Simple/simple_example.py | 5 +- .../02_Complex/complex_example_results.py | 5 +- .../example_calculation_types.py | 25 +- .../two_stage_optimization.py | 6 +- flixopt/__init__.py | 2 +- flixopt/aggregation.py | 17 +- flixopt/calculation.py | 9 +- flixopt/color_processing.py | 261 +++++++ flixopt/effects.py | 74 +- flixopt/flow_system.py | 243 ++++--- flixopt/interface.py | 33 +- flixopt/io.py | 671 +++++++++++++++++- flixopt/structure.py | 427 +++++++++-- flixopt/utils.py | 39 - mkdocs.yml | 272 +++++-- pyproject.toml | 17 +- tests/conftest.py | 2 + tests/ressources/Sim1--flow_system.nc4 | Bin 0 -> 218834 bytes tests/ressources/Sim1--solution.nc4 | Bin 0 -> 210822 bytes tests/ressources/Sim1--summary.yaml | 92 +++ tests/run_all_tests.py | 10 - tests/test_examples.py | 2 +- tests/test_functional.py | 36 +- tests/test_heatmap_reshape.py | 91 +++ tests/test_io.py | 14 +- tests/test_plots.py | 151 ---- tests/test_plotting_api.py | 138 ++++ tests/test_results_plots.py | 33 +- 43 files changed, 3118 insertions(+), 1406 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/task.yml create mode 100644 CONTRIBUTE.md create mode 100644 docs/stylesheets/extra.css rename docs/user-guide/{index.md => core-concepts.md} (77%) create mode 100644 flixopt/color_processing.py delete mode 100644 flixopt/utils.py create mode 100644 tests/ressources/Sim1--flow_system.nc4 create mode 100644 tests/ressources/Sim1--solution.nc4 create mode 100644 tests/ressources/Sim1--summary.yaml delete mode 100644 tests/run_all_tests.py create mode 100644 tests/test_heatmap_reshape.py delete mode 100644 tests/test_plots.py create mode 100644 tests/test_plotting_api.py diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 2a51618d9..e9876c089 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -12,7 +12,7 @@ Thanks for your interest in contributing to FlixOpt! πŸš€ 2. **Install for Development** ```bash - pip install -e ".[full]" + pip install -e ".[full, dev, docs]" ``` 3. **Make Changes & Submit PR** diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 94b4491a5..3b1a32fb2 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -6,106 +6,70 @@ body: - type: markdown attributes: value: | - Thanks for taking the time to fill out this bug report! - - **Before submitting**: Please search [existing issues](https://github.com/flixOpt/flixopt/issues) to avoid duplicates. - -- type: checkboxes - id: checks - attributes: - label: Version Confirmation - description: Please confirm you can reproduce this on a supported version - options: - - label: I have confirmed this bug exists on the latest [release](https://github.com/flixOpt/flixopt/releases) of FlixOpt - required: true + **Quick guide**: Describe what's broken, provide code to reproduce if possible. + For simple bugs, just fill the first field. - type: textarea id: problem attributes: - label: Bug Description - description: Clearly describe what went wrong - placeholder: | - What happened? What did you expect to happen instead? - - Include any error messages or unexpected outputs. + label: What's broken? + description: Describe the bug - what happened vs. what you expected validations: required: true - type: textarea id: example attributes: - label: Minimal Reproducible Example + label: Code to reproduce description: | Provide the smallest possible code example that reproduces the bug. See [how to create minimal bug reports](https://matthewrocklin.com/minimal-bug-reports). - placeholder: | - import flixopt as fx + value: | import pandas as pd + import numpy as np + import flixopt as fx - # Minimal example that reproduces the bug - timesteps = pd.date_range('2024-01-01', periods=24, freq='h') - flow_system = fx.FlowSystem(timesteps) + fx.CONFIG.Logging.console = True + fx.CONFIG.Logging.level = 'DEBUG' + fx.CONFIG.apply() + flow_system = fx.FlowSystem(pd.date_range('2020-01-01', periods=3, freq='h')) - # Add components that trigger the bug... + flow_system.add_elements( + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Effect('Costs', '€', 'Cost', is_standard=True, is_objective=True), + fx.linear_converters.Boiler( + 'Boiler', + eta=0.5, + Q_th=fx.Flow(label='Heat', bus='Heat', size=50), + Q_fu=fx.Flow(label='Gas', bus='Gas'), + ), + fx.Sink( + 'Sink', + inputs=[ + fx.Flow(label='Demand', bus='Heat', size=1, fixed_relative_profile=np.array([30, 0, 20])) + ], + ), + fx.Source( + 'Source', + outputs=[fx.Flow(label='Gas', bus='Gas', size=1000, effects_per_flow_hour=0.04)], + ), + ) + + calculation = fx.FullCalculation('Simulation1', flow_system).do_modeling().solve(fx.solvers.HighsSolver(0.01, 60)) - # Show the problematic operation - result = flow_system.solve() # This should fail/behave unexpectedly render: python - validations: - required: true - type: textarea id: error-output attributes: - label: Error Output - description: If there's an error message, paste the full traceback here + label: Error message + description: Paste the full traceback if there is one render: shell -- type: dropdown - id: solver - attributes: - label: Solver Used - description: Which solver were you using? - options: - - HiGHS (default) - - Gurobi - - CPLEX - - GLPK - - CBC - - Other (specify below) - validations: - required: true - -- type: input - id: os - attributes: - label: Operating System - placeholder: "e.g., Windows 11, macOS 14.2, Ubuntu 22.04" - validations: - required: true - -- type: input - id: python-version - attributes: - label: Python Version - placeholder: "e.g., 3.11.5" - validations: - required: true - - type: textarea - id: environment + id: context attributes: - label: Environment Info - description: | - Run one of these commands and paste the output: - - `pip freeze` - - `conda env export` - render: shell - value: > -
- - ``` - Replace this with your environment info - ``` - -
+ label: Additional context + description: Solver, Python/OS version, environment details, or anything else relevant + placeholder: "HiGHS solver, Python 3.11, macOS 14" diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 94d96c479..0c30b34f2 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,12 +3,6 @@ contact_links: - name: πŸ€” Modeling Questions url: https://github.com/flixOpt/flixopt/discussions/categories/q-a about: "How to model specific energy systems, components, and constraints" - - name: ⚑ Performance & Optimization - url: https://github.com/flixOpt/flixopt/discussions/categories/performance - about: "Solver performance, memory usage, and optimization speed issues" - - name: πŸ’‘ Ideas & Suggestions - url: https://github.com/flixOpt/flixopt/discussions/categories/ideas - about: "Share ideas and discuss potential improvements with the community" - name: πŸ“– Documentation url: https://flixopt.github.io/flixopt/latest/ - about: "Browse guides, API reference, and examples" + about: "Guides, API reference, and examples" diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index dd5c8def2..1c48cf10c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -6,122 +6,33 @@ body: - type: markdown attributes: value: | - Thanks for suggesting a new feature! - - **Before submitting**: Please search [existing issues](https://github.com/flixOpt/flixopt/issues) and check our [roadmap](https://github.com/flixOpt/flixopt/discussions) to avoid duplicates. - -- type: checkboxes - id: checks - attributes: - label: Prerequisites - options: - - label: I have searched existing issues and discussions - required: true - - label: I have checked the [documentation](https://flixopt.github.io/flixopt/latest/) - required: true - -- type: dropdown - id: feature-type - attributes: - label: Feature Category - description: What type of feature is this? - options: - - New Component (storage, generation, conversion, etc.) - - Enhancement to Existing Component - - New Optimization Feature - - Data Input/Output Improvement - - Results/Visualization Enhancement - - Performance/Solver Improvement - - API/Usability Improvement - - Documentation/Examples - - Other - validations: - required: true + **Quick guide**: Describe what you want and why it's useful. + Skip optional fields for simple ideas. - type: textarea - id: problem + id: description attributes: - label: Problem Statement - description: What problem would this feature solve? - placeholder: | - Current limitation: "FlixOpt doesn't support [specific energy system component/feature]..." - - Impact: "This prevents users from modeling [specific scenarios]..." - -- type: textarea - id: solution - attributes: - label: Proposed Solution - description: Describe your proposed solution in detail - placeholder: | - I propose adding a new component/feature that would... - - Key capabilities: - - Feature 1 - - Feature 2 - - Feature 3 + label: What feature or improvement? + description: Describe what should be added or changed validations: required: true - type: textarea id: use-case attributes: - label: Use Case & Examples - description: Provide concrete examples of how this would be used - placeholder: | - Real-world scenario: "I'm modeling a microgrid with battery storage and need to..." - - Specific requirements: - - Must handle [specific constraint] - - Should support [specific behavior] - - Would benefit [specific user group] - validations: - required: true + label: Use case + description: What problem does this solve? What would you use it for? + placeholder: "When modeling X, I need to do Y..." - type: textarea - id: code-example + id: api-idea attributes: - label: Desired API (Optional) - description: Show how you'd like to use this feature + label: API idea (optional) + description: How might it work? Sketch a rough API if you have ideas placeholder: | - # Example of proposed API - component = fx.NewComponent( + # Example: + component = fx.NewThing( label='example', - parameter1=value1, - parameter2=value2 + param=value ) - - flow_system.add_component(component) render: python - -- type: textarea - id: alternatives - attributes: - label: Alternatives Considered - description: What workarounds or alternatives have you tried? - placeholder: | - Current workaround: "I'm currently using [existing component] but it doesn't support..." - - Other approaches considered: "I looked into [alternative] but..." - -- type: dropdown - id: priority - attributes: - label: Priority/Impact - description: How important is this feature for your work? - options: - - Critical - Blocking important work - - High - Would significantly improve workflow - - Medium - Nice to have enhancement - - Low - Minor improvement - -- type: textarea - id: additional-context - attributes: - label: Additional Context - description: References, papers, examples from other tools, etc. - placeholder: | - References: - - Research paper: [Title and link] - - Similar feature in [other tool]: [description] - - Industry standard: [description] diff --git a/.github/ISSUE_TEMPLATE/task.yml b/.github/ISSUE_TEMPLATE/task.yml new file mode 100644 index 000000000..bb4741e02 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/task.yml @@ -0,0 +1,35 @@ +name: πŸ“‹ Task +description: Track work items, refactoring, cleanup, or general todos +title: "[TASK] " +labels: ["type: task"] +body: +- type: markdown + attributes: + value: | + **Quick guide**: Describe what needs to be done. + Use this for refactoring, cleanup, documentation, or general work items. + +- type: textarea + id: description + attributes: + label: What needs to be done? + description: Describe the task + validations: + required: true + +- type: textarea + id: details + attributes: + label: Details + description: Context, steps, requirements, or anything else relevant + placeholder: | + - Step 1 + - Step 2 + - Related to: #123 + +- type: textarea + id: acceptance + attributes: + label: Done when... + description: What defines this task as complete? + placeholder: "Tests pass, code is clean, docs updated..." diff --git a/.github/workflows/python-app.yaml b/.github/workflows/python-app.yaml index d8caba0d4..f4dbc28c5 100644 --- a/.github/workflows/python-app.yaml +++ b/.github/workflows/python-app.yaml @@ -35,7 +35,7 @@ jobs: - name: Set up uv uses: astral-sh/setup-uv@v6 with: - version: "0.8.23" + version: "0.9.5" enable-cache: true - name: Set up Python @@ -75,7 +75,7 @@ jobs: - name: Set up uv uses: astral-sh/setup-uv@v6 with: - version: "0.8.23" + version: "0.9.5" enable-cache: true - name: Set up Python ${{ matrix.python-version }} @@ -95,7 +95,7 @@ jobs: timeout-minutes: 45 needs: lint # Only run examples on releases (tags) - if: startsWith(github.ref, 'refs/tags/v') + if: startsWith(github.ref, 'refs/tags/v') || (github.event_name == 'push' && github.ref == 'refs/heads/main') steps: - name: Check out code @@ -104,7 +104,7 @@ jobs: - name: Set up uv uses: astral-sh/setup-uv@v6 with: - version: "0.8.23" + version: "0.9.5" enable-cache: true - name: Set up Python ${{ env.PYTHON_VERSION }} @@ -130,7 +130,7 @@ jobs: - name: Set up uv uses: astral-sh/setup-uv@v6 with: - version: "0.8.23" + version: "0.9.5" enable-cache: true - name: Set up Python @@ -170,7 +170,7 @@ jobs: - name: Set up uv uses: astral-sh/setup-uv@v6 with: - version: "0.8.23" + version: "0.9.5" enable-cache: true - name: Set up Python @@ -212,7 +212,7 @@ jobs: - name: Set up uv uses: astral-sh/setup-uv@v6 with: - version: "0.8.23" + version: "0.9.5" enable-cache: true - name: Set up Python @@ -298,7 +298,7 @@ jobs: - name: Set up uv uses: astral-sh/setup-uv@v6 with: - version: "0.8.23" + version: "0.9.5" enable-cache: true - name: Set up Python @@ -379,7 +379,7 @@ jobs: - name: Set up uv uses: astral-sh/setup-uv@v6 with: - version: "0.8.23" + version: "0.9.5" enable-cache: true - name: Set up Python diff --git a/CONTRIBUTE.md b/CONTRIBUTE.md new file mode 100644 index 000000000..5c73ba04b --- /dev/null +++ b/CONTRIBUTE.md @@ -0,0 +1,168 @@ +# Contributing to FlixOpt + +We warmly welcome contributions from the community! Whether you're fixing bugs, adding features, improving documentation, or sharing examples, your contributions are valuable. + +## Ways to Contribute + +### πŸ› Report Issues +Found a bug or have a feature request? Please [open an issue](https://github.com/flixOpt/flixopt/issues) on GitHub. + +When reporting issues, please include: +- A clear description of the problem +- Steps to reproduce the issue +- Expected vs. actual behavior +- Your environment (OS, Python version, FlixOpt version) +- Minimal code example if applicable + +### πŸ’‘ Share Examples +Help others learn FlixOpt by contributing examples: +- Real-world use cases +- Tutorial notebooks +- Integration examples with other tools +- Add them to the `examples/` directory + +### πŸ“– Improve Documentation +Documentation improvements are always welcome: +- Fix typos or clarify existing docs +- Add missing documentation +- Translate documentation +- Improve code comments + +### πŸ”§ Submit Code Contributions +Ready to contribute code? Great! See the sections below for setup and guidelines. + +--- + +## Development Setup + +### Getting Started +1. Fork and clone the repository: + ```bash + git clone https://github.com/flixOpt/flixopt.git + cd flixopt + ``` + +2. Install development dependencies: + ```bash + pip install -e ".[full, dev]" + ``` + +3. Set up pre-commit hooks (one-time setup): + ```bash + pre-commit install + ``` + +4. Verify your setup: + ```bash + pytest + ``` + +### Working with Documentation +FlixOpt uses [mkdocs](https://www.mkdocs.org/) to generate documentation. + +To work on documentation: +```bash +pip install -e ".[docs]" +mkdocs serve +``` +Then navigate to http://127.0.0.1:8000/ + +--- + +## Code Quality Standards + +### Automated Checks +We use [Ruff](https://github.com/astral-sh/ruff) for linting and formatting. After the one-time setup above, **code quality checks run automatically on every commit**. + +### Manual Checks +To run checks manually: +- `ruff check --fix .` - Check and fix linting issues +- `ruff format .` - Format code +- `pre-commit run --all-files` - Run all pre-commit checks + +### Testing +All tests are located in the `tests/` directory with a flat structure: +- `test_component.py` - Component tests +- `test_flow.py` - Flow tests +- `test_storage.py` - Storage tests +- etc. + +#### Running Tests +- `pytest` - Run the full test suite (excluding examples by default) +- `pytest tests/test_component.py` - Run a specific test file +- `pytest tests/test_component.py::TestClassName` - Run a specific test class +- `pytest tests/test_component.py::TestClassName::test_method` - Run a specific test +- `pytest -m slow` - Run only slow tests +- `pytest -m examples` - Run example tests (normally skipped) +- `pytest -k "keyword"` - Run tests matching a keyword + +#### Common Test Patterns +The `tests/conftest.py` file provides shared fixtures: +- `solver_fixture` - Parameterized solver fixture (HiGHS, Gurobi) +- `highs_solver` - HiGHS solver instance +- Coordinate configuration fixtures for timesteps, periods, scenarios + +Use these fixtures by adding them as function parameters: +```python +def test_my_feature(solver_fixture): + # solver_fixture is automatically provided by pytest + model = fx.FlowSystem(...) + model.solve(solver_fixture) +``` + +#### Testing Guidelines +- Write tests for all new functionality +- Ensure all tests pass before submitting a PR +- Aim for 100% test coverage for new code +- Use descriptive test names that explain what's being tested +- Add the `@pytest.mark.slow` decorator for tests that take >5 seconds + +### Coding Guidelines +- Follow [PEP 8](https://pep8.org/) style guidelines +- Write clear, self-documenting code with helpful comments +- Include type hints for function signatures +- Create or update tests for new functionality +- Aim for 100% test coverage for new code + +--- + +## Workflow + +### Branches & Pull Requests +1. Create a feature branch from `main`: + ```bash + git checkout -b feature/your-feature-name + ``` + +2. Make your changes and commit them with clear messages + +3. Push your branch and open a Pull Request + +4. Ensure all CI checks pass + +### Branch Naming +- Features: `feature/feature-name` +- Bug fixes: `fix/bug-description` +- Documentation: `docs/what-changed` + +### Commit Messages +- Use clear, descriptive commit messages +- Start with a verb (Add, Fix, Update, Remove, etc.) +- Keep the first line under 72 characters + +--- + +## Releases + +We follow **Semantic Versioning** (MAJOR.MINOR.PATCH). Releases are created manually from the `main` branch by maintainers. + +--- + +## Questions? + +If you have questions or need help, feel free to: +- Open a discussion on GitHub +- Ask in an issue +- Reach out to the maintainers + +Thank you for contributing to FlixOpt! diff --git a/docs/contribute.md b/docs/contribute.md index 44af34069..e1b93aecb 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -1,45 +1 @@ -# Contributing to the Project - -We warmly welcome contributions from the community! This guide will help you get started with contributing to our project. - -## Development Setup -1. Clone the repository `git clone https://github.com/flixOpt/flixopt.git` -2. Install the development dependencies `pip install -e ".[dev]"` -3. Install pre-commit hooks `pre-commit install` (one-time setup) -4. Run `pytest` to ensure your code passes all tests - -## Code Quality -We use [Ruff](https://github.com/astral-sh/ruff) for linting and formatting. After the one-time setup above, **code quality checks run automatically on every commit**. - -To run manually: -- `ruff check --fix .` to check and fix linting issues -- `ruff format .` to format code or -- `pre-commit run` or `pre-commit run --all-files` to trigger all checks - -## Documentation (Optional) -FlixOpt uses [mkdocs](https://www.mkdocs.org/) to generate documentation. -To work on documentation: -```bash -pip install -e ".[docs]" -mkdocs serve -``` -Then navigate to http://127.0.0.1:8000/ - -## Testing -- `pytest` to run the test suite -- You can also run the provided python script `run_all_test.py` - ---- -# Best practices - -## Coding Guidelines - -- Follow PEP 8 style guidelines -- Write clear, commented code -- Include type hints -- Create or update tests for new functionality -- Ensure 100% test coverage for new code - -## Branches & Releases -New features should be branched from `main` into `feature/*` -As stated, we follow **Semantic Versioning**. Releases are created manually from the `main` branch. +{! ../CONTRIBUTE.md !} diff --git a/docs/getting-started.md b/docs/getting-started.md index 9af389755..044ffb872 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -37,6 +37,6 @@ Working with FlixOpt follows a general pattern: Now that you've installed FlixOpt and understand the basic workflow, you can: -- Learn about the [core concepts of FlixOpt](user-guide/index.md) +- Learn about the [core concepts of flixopt](user-guide/core-concepts.md) - Explore some [examples](examples/index.md) - Check the [API reference](api-reference/index.md) for detailed documentation diff --git a/docs/index.md b/docs/index.md index 2c6420f7f..c9b01f284 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,108 +1,144 @@ -# FlixOpt +--- +title: Home +hide: + - navigation + - toc +--- -## 🎯 Vision +
-**FlixOpt aims to be the most accessible and flexible Python framework for energy and material flow optimization.** +

flixOpt

-We believe that optimization modeling should be **approachable for beginners** yet **powerful for experts**. Too often, frameworks force you to choose between ease of use and flexibility. FlixOpt refuses this compromise. +

Energy and Material Flow Optimization Framework

-### Where We're Going +

Model, optimize, and analyze complex energy systems with a powerful Python framework designed for flexibility and performance.

-**Short-term goals:** +

+ πŸš€ Get Started + πŸ’‘ View Examples + ⭐ GitHub +

-- **Multi-dimensional modeling**: Multi-period investments and scenario-based stochastic optimization are available (periods and scenarios are in active development for enhanced features) -- **Enhanced component library**: More pre-built, domain-specific components (sector coupling, hydrogen systems, thermal networks, demand-side management) +
-**Medium-term vision:** +## :material-map-marker-path: Quick Navigation -- **Modeling to generate alternatives (MGA)**: Built-in support for exploring near-optimal solution spaces to produce more robust, diverse solutions under uncertainty -- **Interactive tutorials**: Browser-based, reactive tutorials for learning FlixOpt without local installation ([marimo](https://marimo.io)) -- **Standardized cost calculations**: Align with industry standards (VDI 2067) for CAPEX/OPEX calculations -- **Advanced result analysis**: Time-series aggregation, automated reporting, and rich visualization options -- **Recipe collection**: Community-driven library of common modeling patterns, data manipulation techniques, and optimization strategies (see [Recipes](user-guide/recipes/index.md) - help wanted!) + -- **Researchers** who need to prototype quickly but may require deep customization later -- **Engineers** who want reliable, tested components without black-box abstractions -- **Students** learning optimization who benefit from clear, Pythonic interfaces -- **Practitioners** who need to move from model to production-ready results -- **Domain experts** from any field where things flow, transform, and need optimizing +## πŸ—οΈ Framework Architecture -Built on modern foundations ([linopy](https://github.com/PyPSA/linopy/) and [xarray](https://github.com/pydata/xarray)), FlixOpt delivers both **performance** and **transparency**. You can inspect everything, extend anything, and trust that your model does exactly what you designed. +
-Originally developed at [TU Dresden](https://github.com/gewv-tu-dresden) for the SMARTBIOGRID project (funded by the German Federal Ministry for Economic Affairs and Energy, FKZ: 03KB159B), FlixOpt has evolved from the Matlab-based flixOptMat framework while incorporating the best ideas from [oemof/solph](https://github.com/oemof/oemof-solph). +
+ ![FlixOpt Conceptual Usage](./images/architecture_flixOpt.png) +
Conceptual Usage and IO operations of FlixOpt
+
---- +**FlixOpt** provides a complete workflow for energy system optimization: -## What Makes FlixOpt Different +- **:material-file-code: Define** your system using Python components +- **:material-cog: Optimize** with powerful solvers (HiGHS, Gurobi, CPLEX) +- **:material-chart-box: Analyze** results with built-in visualization tools +- **:material-export: Export** to various formats for further analysis -### Start Simple, Scale Complex -Define a working model in minutes with high-level components, then drill down to fine-grained control when needed. No rewriting, no framework switching. +
-```python -import flixopt as fx +## :material-account-group: Community & Support -# Simple start -boiler = fx.Boiler("Boiler", eta=0.9, ...) +
-# Advanced control when needed - extend with native linopy -boiler.model.add_constraints(custom_constraint, name="my_constraint") -``` +
-### Multi-Criteria Optimization Done Right -Model costs, emissions, resource use, and any custom metric simultaneously as **Effects**. Optimize any single Effect, use weighted combinations, or apply Ξ΅-constraints: +:fontawesome-brands-github:{ .feature-icon } -```python -costs = fx.Effect('costs', '€', 'Total costs', - share_from_temporal={'CO2': 180}) # 180 €/tCO2 -co2 = fx.Effect('CO2', 'kg', 'Emissions', maximum_periodic=50000) -``` +### GitHub -### Performance at Any Scale -Choose the right calculation mode for your problem: +Report issues, request features, and contribute to the codebase -- **Full** - Maximum accuracy for smaller problems -- **Segmented** - Rolling horizon for large time series -- **Aggregated** - Typical periods using [TSAM](https://github.com/FZJ-IEK3-VSA/tsam) for massive models +[Visit Repository β†’](https://github.com/flixOpt/flixopt){target="_blank" rel="noopener noreferrer"} -### Built for Reproducibility -Every result file is self-contained with complete model information. Load it months later and know exactly what you optimized. Export to NetCDF, share with colleagues, archive for compliance. +
-
- ![FlixOpt Conceptual Usage](./images/architecture_flixOpt.png) -
Conceptual Usage and IO operations of FlixOpt
-
+
+ +:material-forum:{ .feature-icon } + +### Discussions + +Ask questions and share your projects with the community -## Installation +[Join Discussion β†’](https://github.com/flixOpt/flixopt/discussions){target="_blank" rel="noopener noreferrer"} -```bash -pip install flixopt -``` +
-For more detailed installation options, see the [Getting Started](getting-started.md) guide. +
-## License +:material-book-open-page-variant:{ .feature-icon } -FlixOpt is released under the MIT License. See [LICENSE](https://github.com/flixopt/flixopt/blob/main/LICENSE) for details. +### Contributing -## Citation +Help improve FlixOpt by contributing code, docs, or examples -If you use FlixOpt in your research or project, please cite: +[Learn How β†’](contribute/){target="_blank" rel="noopener noreferrer"} -- **Main Citation:** [DOI:10.18086/eurosun.2022.04.07](https://doi.org/10.18086/eurosun.2022.04.07) -- **Short Overview:** [DOI:10.13140/RG.2.2.14948.24969](https://doi.org/10.13140/RG.2.2.14948.24969) +
+ +
+ + +## :material-file-document-edit: Recent Updates + +!!! tip "What's New in v3.0.0" + Major improvements and breaking changes. Check the [Migration Guide](user-guide/migration-guide-v3.md) for upgrading from v2.x. + +πŸ“‹ See the full [Release Notes](changelog/) for detailed version history. + +--- + +
+ +

Ready to optimize your energy system?

+ +

+ ▢️ Start Building +

+ +
+ +--- -*A more sophisticated paper is in progress* +{% + include-markdown "../README.md" + start="## πŸ› οΈ Installation" + end="## πŸ“„ License" +%} diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 000000000..79dfc9a15 --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,396 @@ +/* ============================================================================ + flixOpt Custom Styling + ========================================================================= */ + +/* Root variables for easy customization */ +:root { + /* Spacing */ + --content-padding: 2rem; + + /* Typography */ + --heading-font-weight: 600; + + /* Colors - enhance teal theme */ + --flixopt-teal: #009688; + --flixopt-teal-light: #4DB6AC; + --flixopt-teal-dark: #00796B; +} + +/* Dark mode adjustments */ +[data-md-color-scheme="slate"] { + --md-code-bg-color: #1e1e1e; +} + +/* ============================================================================ + Typography Improvements + ========================================================================= */ + +/* Better line height for readability */ +.md-typeset { + line-height: 1.7; +} + +/* Enhanced headings */ +.md-typeset h1 { + font-weight: var(--heading-font-weight); + letter-spacing: -0.02em; + margin-top: 0; +} + +.md-typeset h2 { + font-weight: var(--heading-font-weight); + border-bottom: 1px solid var(--md-default-fg-color--lightest); + padding-bottom: 0.3em; + margin-top: 2em; +} + +/* Better code inline */ +.md-typeset code { + padding: 0.15em 0.4em; + border-radius: 0.25em; + font-size: 0.875em; +} + +/* ============================================================================ + Navigation Enhancements + ========================================================================= */ + +/* Smooth hover effects on navigation */ +.md-nav__link:hover { + opacity: 0.7; + transition: opacity 0.2s ease; +} + +/* Active navigation item enhancement */ +.md-nav__link--active { + font-weight: 600; + border-left: 3px solid var(--md-primary-fg-color); + padding-left: calc(1.2rem - 3px) !important; +} + +/* ============================================================================ + Code Block Improvements + ========================================================================= */ + +/* Better code block styling */ +.md-typeset .highlight { + border-radius: 0.5rem; + margin: 1.5em 0; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); +} + +[data-md-color-scheme="slate"] .md-typeset .highlight { + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3); +} + +/* Line numbers styling */ +.md-typeset .highlight .linenos { + user-select: none; + opacity: 0.5; +} + +/* Copy button enhancement */ +.md-clipboard { + opacity: 0; + transition: opacity 0.2s ease; +} + +.highlight:hover .md-clipboard { + opacity: 1; +} + +/* ============================================================================ + Admonitions & Callouts + ========================================================================= */ + +/* Enhanced admonitions */ +.md-typeset .admonition { + border-radius: 0.5rem; + border-left-width: 0.25rem; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05); +} + +/* ============================================================================ + Tables + ========================================================================= */ + +/* Better table styling */ +.md-typeset table:not([class]) { + border-radius: 0.5rem; + overflow: hidden; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05); +} + +.md-typeset table:not([class]) th { + background-color: var(--md-primary-fg-color); + color: white; + font-weight: 600; + text-align: left; +} + +.md-typeset table:not([class]) tr:hover { + background-color: var(--md-default-fg-color--lightest); + transition: background-color 0.2s ease; +} + +/* ============================================================================ + API Documentation Styling + ========================================================================= */ + +/* Better spacing for API docs */ +.doc-heading { + margin-top: 2rem !important; +} + +/* Parameter tables */ +.doc-md-description table { + width: 100%; + font-size: 0.9em; +} + +/* Signature styling */ +.doc-signature { + font-family: var(--md-code-font); + background-color: var(--md-code-bg-color); + border-radius: 0.5rem; + padding: 1rem; + overflow-x: auto; +} + +/* ============================================================================ + Home Page Hero (optional enhancement) + ========================================================================= */ + +.hero { + text-align: center; + padding: 4rem 2rem; + background: linear-gradient(135deg, var(--flixopt-teal-light) 0%, var(--flixopt-teal-dark) 100%); + color: white; + border-radius: 1rem; + margin-bottom: 2rem; +} + +.hero h1 { + font-size: 3rem; + margin-bottom: 1rem; + color: white; + border: none; +} + +.hero p { + font-size: 1.25rem; + opacity: 0.9; +} + +/* ============================================================================ + Responsive Design + ========================================================================= */ + +@media screen and (max-width: 76.1875em) { + .md-typeset h1 { + font-size: 2rem; + } +} + +@media screen and (max-width: 44.9375em) { + :root { + --content-padding: 1rem; + } + + .hero h1 { + font-size: 2rem; + } + + .hero p { + font-size: 1rem; + } +} + +/* ============================================================================ + Print Styles + ========================================================================= */ + +@media print { + .md-typeset { + font-size: 0.9rem; + } + + .md-header, + .md-sidebar, + .md-footer { + display: none; + } +} + +/* ============================================================================ + Home Page Inline Styles (moved from docs/index.md) + ========================================================================= */ + +.hero-section { + text-align: center; + padding: 4rem 2rem 3rem 2rem; + background: linear-gradient(135deg, rgba(0, 150, 136, 0.1) 0%, rgba(0, 121, 107, 0.1) 100%); + border-radius: 1rem; + margin-bottom: 3rem; +} + +.hero-section h1 { + font-size: 3.5rem; + font-weight: 700; + margin-bottom: 1rem; + background: linear-gradient(135deg, #009688 0%, #00796B 100%); + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + background-clip: text; +} + +.hero-section .tagline { + font-size: 1.5rem; + color: var(--md-default-fg-color--light); + margin-bottom: 2rem; + font-weight: 300; +} + +.hero-buttons { + display: flex; + gap: 1rem; + justify-content: center; + flex-wrap: wrap; + margin-top: 2rem; +} + +.feature-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(280px, 1fr)); + gap: 2rem; + margin: 3rem 0; +} + +.feature-card { + padding: 2rem; + border-radius: 0.75rem; + background: var(--md-code-bg-color); + border: 1px solid var(--md-default-fg-color--lightest); + transition: all 0.3s ease; + text-align: center; +} + +.feature-card:hover { + transform: translateY(-4px); + box-shadow: 0 8px 16px rgba(0, 0, 0, 0.1); + border-color: var(--md-primary-fg-color); +} + +.feature-icon { + font-size: 3rem; + margin-bottom: 1rem; + display: block; +} + +.feature-card h3 { + margin-top: 0; + margin-bottom: 0.5rem; + font-size: 1.25rem; +} + +.feature-card p { + color: var(--md-default-fg-color--light); + margin: 0; + font-size: 0.95rem; + line-height: 1.6; +} + +.stats-banner { + display: flex; + justify-content: space-around; + padding: 2rem; + background: var(--md-code-bg-color); + border-radius: 0.75rem; + margin: 3rem 0; + text-align: center; + flex-wrap: wrap; + gap: 2rem; +} + +.stat-item { + flex: 1; + min-width: 150px; +} + +.stat-number { + font-size: 2.5rem; + font-weight: 700; + color: var(--md-primary-fg-color); + display: block; +} + +.stat-label { + color: var(--md-default-fg-color--light); + font-size: 0.9rem; + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.architecture-section { + margin: 4rem 0; + padding: 2rem; + background: var(--md-code-bg-color); + border-radius: 0.75rem; +} + +.quick-links { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 1.5rem; + margin: 3rem 0; +} + +.quick-link-card { + padding: 1.5rem; + border-left: 4px solid var(--md-primary-fg-color); + background: var(--md-code-bg-color); + border-radius: 0.5rem; + transition: all 0.2s ease; + text-decoration: none; + display: block; +} + +.quick-link-card:hover { + background: var(--md-default-fg-color--lightest); + transform: translateX(4px); +} + +.quick-link-card h3 { + margin: 0 0 0.5rem 0; + font-size: 1.1rem; + color: var(--md-primary-fg-color); +} + +.quick-link-card p { + margin: 0; + color: var(--md-default-fg-color--light); + font-size: 0.9rem; +} + +@media screen and (max-width: 768px) { + .hero-section h1 { + font-size: 2.5rem; + } + + .hero-section .tagline { + font-size: 1.2rem; + } + + .hero-buttons { + flex-direction: column; + align-items: stretch; + } + + .feature-grid { + grid-template-columns: 1fr; + } + + .stats-banner { + flex-direction: column; + } +} diff --git a/docs/user-guide/index.md b/docs/user-guide/core-concepts.md similarity index 77% rename from docs/user-guide/index.md rename to docs/user-guide/core-concepts.md index df97bf768..bf52a26ba 100644 --- a/docs/user-guide/index.md +++ b/docs/user-guide/core-concepts.md @@ -1,10 +1,10 @@ -# FlixOpt Concepts +# Core concepts of flixopt FlixOpt is built around a set of core concepts that work together to represent and optimize **any system involving flows and conversions** - whether that's energy systems, material flows, supply chains, water networks, or production processes. This page provides a high-level overview of these concepts and how they interact. -## Core Concepts +## Main building blocks ### FlowSystem @@ -15,15 +15,22 @@ Every FlixOpt model starts with creating a FlowSystem. It: - Contains and connects [components](#components), [buses](#buses), and [flows](#flows) - Manages the [effects](#effects) (objectives and constraints) +FlowSystem provides two ways to access elements: + +- **Dict-like interface**: Access any element by label: `flow_system['Boiler']`, `'Boiler' in flow_system`, `flow_system.keys()` +- **Direct containers**: Access type-specific containers: `flow_system.components`, `flow_system.buses`, `flow_system.effects`, `flow_system.flows` + +Element labels must be unique across all types. See the [`FlowSystem` API reference][flixopt.flow_system.FlowSystem] for detailed examples and usage patterns. + ### Flows [`Flow`][flixopt.elements.Flow] objects represent the movement of energy or material between a [Bus](#buses) and a [Component](#components) in a predefined direction. -- Have a `size` which, generally speaking, defines how fast energy or material can be moved. Usually measured in MW, kW, mΒ³/h, etc. -- Have a `flow_rate`, which is defines how fast energy or material is transported. Usually measured in MW, kW, mΒ³/h, etc. +- Have a `size` which, generally speaking, defines how much energy or material can be moved. Usually measured in MW, kW, mΒ³/h, etc. +- Have a `flow_rate`, which defines how fast energy or material is transported. Usually measured in MW, kW, mΒ³/h, etc. - Have constraints to limit the flow-rate (min/max, total flow hours, on/off etc.) - Can have fixed profiles (for demands or renewable generation) -- Can have [Effects](#effects) associated by their use (operation, investment, on/off, ...) +- Can have [Effects](#effects) associated by their use (costs, emissions, labour, ...) #### Flow Hours While the **Flow Rate** defines the rate in which energy or material is transported, the **Flow Hours** define the amount of energy or material that is transported. @@ -50,21 +57,21 @@ Examples: [`Component`][flixopt.elements.Component] objects usually represent physical entities in your system that interact with [`Flows`][flixopt.elements.Flow]. The generic component types work across all domains: - [`LinearConverters`][flixopt.components.LinearConverter] - Converts input flows to output flows with (piecewise) linear relationships - - *Energy: boilers, heat pumps, turbines* - - *Manufacturing: assembly lines, processing equipment* - - *Chemistry: reactors, separators* + - *Energy: boilers, heat pumps, turbines* + - *Manufacturing: assembly lines, processing equipment* + - *Chemistry: reactors, separators* - [`Storages`][flixopt.components.Storage] - Stores energy or material over time - - *Energy: batteries, thermal storage, gas storage* - - *Logistics: warehouses, buffer inventory* - - *Water: reservoirs, tanks* + - *Energy: batteries, thermal storage, gas storage* + - *Logistics: warehouses, buffer inventory* + - *Water: reservoirs, tanks* - [`Sources`][flixopt.components.Source] / [`Sinks`][flixopt.components.Sink] / [`SourceAndSinks`][flixopt.components.SourceAndSink] - Produce or consume flows - - *Energy: demands, renewable generation* - - *Manufacturing: raw material supply, product demand* - - *Supply chain: suppliers, customers* + - *Energy: demands, renewable generation* + - *Manufacturing: raw material supply, product demand* + - *Supply chain: suppliers, customers* - [`Transmissions`][flixopt.components.Transmission] - Moves flows between locations with possible losses - - *Energy: pipelines, power lines* - - *Logistics: transport routes* - - *Water: distribution networks* + - *Energy: pipelines, power lines* + - *Logistics: transport routes* + - *Water: distribution networks* **Pre-built specialized components** for energy systems include [`Boilers`][flixopt.linear_converters.Boiler], [`HeatPumps`][flixopt.linear_converters.HeatPump], [`CHPs`][flixopt.linear_converters.CHP], etc. These can serve as blueprints for custom domain-specific components. @@ -105,7 +112,7 @@ FlixOpt offers different calculation modes: The results of a calculation are stored in a [`CalculationResults`][flixopt.results.CalculationResults] object. This object contains the solutions of the optimization as well as all information about the [`Calculation`][flixopt.calculation.Calculation] and the [`FlowSystem`][flixopt.flow_system.FlowSystem] it was created from. -The solutions is stored as an `xarray.Dataset`, but can be accessed through their assotiated Component, Bus or Effect. +The solution is stored as an `xarray.Dataset`, but can be accessed through their assotiated Component, Bus or Effect. This [`CalculationResults`][flixopt.results.CalculationResults] object can be saved to file and reloaded from file, allowing you to analyze the results anytime after the solve. @@ -114,19 +121,20 @@ This [`CalculationResults`][flixopt.results.CalculationResults] object can be sa The process of working with FlixOpt can be divided into 3 steps: 1. Create a [`FlowSystem`][flixopt.flow_system.FlowSystem], containing all the elements and data of your system - - Define the time series of your system - - Add [`Components`][flixopt.components] like [`Boilers`][flixopt.linear_converters.Boiler], [`HeatPumps`][flixopt.linear_converters.HeatPump], [`CHPs`][flixopt.linear_converters.CHP], etc. - - Add [`Buses`][flixopt.elements.Bus] as connection points in your system + - Define the time horizon of your system (and optionally your periods and scenarios, see [Dimensions](mathematical-notation/dimensions.md))) - Add [`Effects`][flixopt.effects.Effect] to represent costs, emissions, etc. - - *This [`FlowSystem`][flixopt.flow_system.FlowSystem] can also be loaded from a netCDF file* + - Add [`Buses`][flixopt.elements.Bus] as connection points in your system and [`Sinks`][flixopt.components.Sink] & [`Sources`][flixopt.components.Source] as connections to the outer world (markets, power grid, ...) + - Add [`Components`][flixopt.components] like [`Boilers`][flixopt.linear_converters.Boiler], [`HeatPumps`][flixopt.linear_converters.HeatPump], [`CHPs`][flixopt.linear_converters.CHP], etc. + - Add + - [`FlowSystems`][flixopt.flow_system.FlowSystem] can also be loaded from a netCDF file* 2. Translate the model to a mathematical optimization problem - Create a [`Calculation`][flixopt.calculation.Calculation] from your FlowSystem and choose a Solver - - ...The Calculation is translated internaly to a mathematical optimization problem... + - ...The Calculation is translated internally to a mathematical optimization problem... - ...and solved by the chosen solver. 3. Analyze the results - The results are stored in a [`CalculationResults`][flixopt.results.CalculationResults] object - This object can be saved to file and reloaded from file, retaining all information about the calculation - - As it contains the used [`FlowSystem`][flixopt.flow_system.FlowSystem], it can be used to start a new calculation + - As it contains the used [`FlowSystem`][flixopt.flow_system.FlowSystem], it fully documents all assumptions taken to create the results.
![FlixOpt Conceptual Usage](../images/architecture_flixOpt.png) diff --git a/docs/user-guide/mathematical-notation/index.md b/docs/user-guide/mathematical-notation/index.md index 05d1fed60..27e7b7e9a 100644 --- a/docs/user-guide/mathematical-notation/index.md +++ b/docs/user-guide/mathematical-notation/index.md @@ -1,9 +1,9 @@ # Mathematical Notation -This section provides the **mathematical formulations** underlying FlixOpt's optimization models. It is intended as **reference documentation** for users who want to understand the mathematical details behind the high-level FlixOpt API described in the [FlixOpt Concepts](../index.md) guide. +This section provides the **mathematical formulations** underlying FlixOpt's optimization models. It is intended as **reference documentation** for users who want to understand the mathematical details behind the high-level FlixOpt API described in the [FlixOpt Concepts](../core-concepts.md) guide. -**For typical usage**, refer to the [FlixOpt Concepts](../index.md) guide, [Examples](../../examples/index.md), and [API Reference](../../api-reference/index.md) - you don't need to understand these mathematical formulations to use FlixOpt effectively. +**For typical usage**, refer to the [FlixOpt Concepts](../core-concepts.md) guide, [Examples](../../examples/index.md), and [API Reference](../../api-reference/index.md) - you don't need to understand these mathematical formulations to use FlixOpt effectively. --- diff --git a/docs/user-guide/migration-guide-v3.md b/docs/user-guide/migration-guide-v3.md index 190503dc3..2a9cab97a 100644 --- a/docs/user-guide/migration-guide-v3.md +++ b/docs/user-guide/migration-guide-v3.md @@ -1,550 +1,232 @@ -# Migration Guide: Upgrading to v3.0.0 - -This guide helps you migrate your flixopt code from v2.x to v3.0.0. Version 3.0.0 introduces powerful new features like multi-period investments and scenario-based stochastic optimization, along with a redesigned effect sharing system. +# Migration Guide: v2.x β†’ v3.0.0 !!! tip "Quick Start" - 1. **Update your installation:** - ```bash - pip install --upgrade flixopt - ``` - 2. **Review breaking changes** in the sections below - 3. **Update deprecated parameters** to their new names - 4. **Test your code** with the new version + ```bash + pip install --upgrade flixopt + ``` + Review [breaking changes](#breaking-changes), update [deprecated parameters](#deprecated-parameters), test thoroughly. --- -## Breaking Changes +## πŸ’₯ Breaking Changes -### 1. Effect Sharing System Redesign +### Effect System Redesign -!!! warning "Breaking Change - No Deprecation" - The effect sharing syntax has been inverted and simplified. This change was made WITHOUT deprecation warnings due to the fundamental restructuring. +Terminology changed and sharing system inverted: effects now "pull" shares. -**What changed:** Effects now "pull" shares from other effects instead of "pushing" them. - -=== "v2.x (Old)" +| Concept | Old (v2.x) | New (v3.0.0) | +|---------|------------|--------------| +| Time-varying effects | `operation` | `temporal` | +| Investment effects | `invest` / `investment` | `periodic` | +| Share to other effects (operation) | `specific_share_to_other_effects_operation` | `share_from_temporal` | +| Share to other effects (invest) | `specific_share_to_other_effects_invest` | `share_from_periodic` | +=== "v2.x" ```python - # Effects "pushed" shares to other effects - CO2 = fx.Effect('CO2', 'kg', 'CO2 emissions', + CO2 = fx.Effect('CO2', 'kg', 'CO2', specific_share_to_other_effects_operation={'costs': 0.2}) - - land = fx.Effect('land', 'mΒ²', 'Land usage', - specific_share_to_other_effects_invest={'costs': 100}) - - costs = fx.Effect('costs', '€', 'Total costs') + costs = fx.Effect('costs', '€', 'Total') ``` -=== "v3.0.0 (New)" - +=== "v3.0.0" ```python - # Effects "pull" shares from other effects (clearer direction) - CO2 = fx.Effect('CO2', 'kg', 'CO2 emissions') - - land = fx.Effect('land', 'mΒ²', 'Land usage') - - costs = fx.Effect('costs', '€', 'Total costs', - share_from_temporal={'CO2': 0.2}, # From temporal (operation) effects - share_from_periodic={'land': 100}) # From periodic (investment) effects + CO2 = fx.Effect('CO2', 'kg', 'CO2') + costs = fx.Effect('costs', '€', 'Total', + share_from_temporal={'CO2': 0.2}) # Pull from CO2 ``` -!!! success "Migration Steps" - 1. Find all uses of `specific_share_to_other_effects_operation` and `specific_share_to_other_effects_invest` - 2. Move the share definition to the **receiving** effect - 3. Rename parameters: - - `specific_share_to_other_effects_operation` β†’ `share_from_temporal` - - `specific_share_to_other_effects_invest` β†’ `share_from_periodic` +!!! warning "No deprecation warning" + Move shares to receiving effect and update parameter names throughout your code. --- -### 2. Class and Variable Renaming - -=== "v2.x (Old)" +### Variable Names - ```python - # In optimization results - results.solution['component|is_invested'] - ``` - -=== "v3.0.0 (New)" - - ```python - # In optimization results - results.solution['component|invested'] - ``` +| Category | Old (v2.x) | New (v3.0.0) | +|---------------------------------|------------|--------------| +| Investment | `is_invested` | `invested` | +| Switching | `switch_on` | `switch|on` | +| Switching | `switch_off` | `switch|off` | +| Switching | `switch_on_nr` | `switch|count` | +| Effects | `Effect(invest)|total` | `Effect(periodic)` | +| Effects | `Effect(operation)|total` | `Effect(temporal)` | +| Effects | `Effect(operation)|total_per_timestep` | `Effect(temporal)|per_timestep` | +| Effects | `Effect|total` | `Effect` | --- -### 3. Calculation API Change - -!!! info "Method Chaining Support" - `Calculation.do_modeling()` now returns the Calculation object to enable method chaining. +### String Labels -=== "v2.x (Old)" +| What | Old (v2.x) | New (v3.0.0) | +|------|------------|--------------| +| Bus assignment | `bus=my_bus` (object) | `bus='electricity'` (string) | +| Effect shares | `{CO2: 0.2}` (object key) | `{'CO2': 0.2}` (string key) | +=== "v2.x" ```python - calculation = fx.FullCalculation('my_calc', flow_system) - linopy_model = calculation.do_modeling() # Returned linopy.Model - - # Access model directly from return value - print(linopy_model) + flow = fx.Flow('P_el', bus=my_bus) # ❌ Object + costs = fx.Effect('costs', '€', share_from_temporal={CO2: 0.2}) # ❌ ``` -=== "v3.0.0 (New)" - +=== "v3.0.0" ```python - calculation = fx.FullCalculation('my_calc', flow_system) - calculation.do_modeling() # Returns Calculation object - linopy_model = calculation.model # Access model via property - - # This enables chaining operations - fx.FullCalculation('my_calc', flow_system).do_modeling().solve() + flow = fx.Flow('P_el', bus='electricity') # βœ… String + costs = fx.Effect('costs', '€', share_from_temporal={'CO2': 0.2}) # βœ… ``` -!!! tip "Migration" - If you used the return value of `do_modeling()`, update to access `.model` property instead. - --- -### 4. Storage Charge State Bounds - -!!! warning "Array Dimensions Changed" - `relative_minimum_charge_state` and `relative_maximum_charge_state` no longer have an extra timestep. - -**Impact:** If you provided arrays with `len(timesteps) + 1` elements, reduce to `len(timesteps)`. - -=== "v2.x (Old)" - - ```python - # Array with extra timestep - storage = fx.Storage( - 'storage', - relative_minimum_charge_state=np.array([0.2, 0.2, 0.2, 0.2, 0.2]) # 5 values for 4 timesteps - ) - ``` - -=== "v3.0.0 (New)" +### FlowSystem & Calculation - ```python - # Array matches timesteps - storage = fx.Storage( - 'storage', - relative_minimum_charge_state=np.array([0.2, 0.2, 0.2, 0.2]), # 4 values for 4 timesteps - relative_minimum_final_charge_state=0.3 # Specify the final value directly - ) - ``` - -!!! note "Final State Control" - Use the new `relative_minimum_final_charge_state` and `relative_maximum_final_charge_state` parameters to explicitly control the final charge state. +| Change | Description | +|--------|-------------| +| **FlowSystem copying** | Each `Calculation` gets its own copy (independent) | +| **do_modeling() return** | Returns `Calculation` object (access model via `.model` property) | +| **Storage arrays** | Arrays match timestep count (no extra element) | +| **Final charge state** | Use `relative_minimum_final_charge_state` / `relative_maximum_final_charge_state` | --- -### 5. Plotting Parameter Rename - -=== "v2.x (Old)" - - ```python - results.plot_heatmap('component|variable', mode='line') - ``` +### Other Changes -=== "v3.0.0 (New)" - - ```python - results.plot_heatmap('component|variable', style='line') - ``` +| Category | Old (v2.x) | New (v3.0.0) | +|----------|------------|--------------| +| System model class | `SystemModel` | `FlowSystemModel` | +| Element submodel | `Model` | `Submodel` | +| Logging default | Enabled | Disabled | +| Enable logging | (default) | `fx.CONFIG.Logging.console = True; fx.CONFIG.apply()` | --- -## Deprecated Parameters (Still Supported) - -!!! info "Gradual Migration" - These parameters still work but will be removed in a future version. Update them at your convenience - deprecation warnings will guide you. +## πŸ—‘οΈ Deprecated Parameters -### InvestParameters +??? abstract "InvestParameters" -**Parameter Changes:** + | Old (v2.x) | New (v3.0.0) | + |------------|--------------| + | `fix_effects` | `effects_of_investment` | + | `specific_effects` | `effects_of_investment_per_size` | + | `divest_effects` | `effects_of_retirement` | + | `piecewise_effects` | `piecewise_effects_of_investment` | -| Old Parameter (v2.x) | New Parameter (v3.0.0) | -|---------------------|----------------------| -| `fix_effects` | `effects_of_investment` | -| `specific_effects` | `effects_of_investment_per_size` | -| `divest_effects` | `effects_of_retirement` | -| `piecewise_effects` | `piecewise_effects_of_investment` | +??? abstract "Effect" -=== "v2.x (Deprecated)" + | Old (v2.x) | New (v3.0.0) | + |------------|--------------| + | `minimum_investment` | `minimum_periodic` | + | `maximum_investment` | `maximum_periodic` | + | `minimum_operation` | `minimum_temporal` | + | `maximum_operation` | `maximum_temporal` | + | `minimum_operation_per_hour` | `minimum_per_hour` | + | `maximum_operation_per_hour` | `maximum_per_hour` | - ```python - fx.InvestParameters( - fix_effects=1000, - specific_effects={'costs': 10}, - divest_effects=100, - piecewise_effects=my_piecewise, - ) - ``` - -=== "v3.0.0 (Recommended)" - - ```python - fx.InvestParameters( - effects_of_investment=1000, - effects_of_investment_per_size={'costs': 10}, - effects_of_retirement=100, - piecewise_effects_of_investment=my_piecewise, - ) - ``` - -### Effect - -**Parameter Changes:** - -| Old Parameter (v2.x) | New Parameter (v3.0.0) | -|---------------------|----------------------| -| `minimum_investment` | `minimum_periodic` | -| `maximum_investment` | `maximum_periodic` | -| `minimum_operation` | `minimum_temporal` | -| `maximum_operation` | `maximum_temporal` | -| `minimum_operation_per_hour` | `minimum_per_hour` | -| `maximum_operation_per_hour` | `maximum_per_hour` | - -=== "v2.x (Deprecated)" - - ```python - fx.Effect( - 'my_effect', 'unit', 'description', - minimum_investment=10, - maximum_investment=100, - minimum_operation=5, - maximum_operation=50, - minimum_operation_per_hour=1, - maximum_operation_per_hour=10, - ) - ``` +??? abstract "Components" -=== "v3.0.0 (Recommended)" + | Old (v2.x) | New (v3.0.0) | + |------------|--------------| + | `source` (parameter) | `outputs` | + | `sink` (parameter) | `inputs` | + | `prevent_simultaneous_sink_and_source` | `prevent_simultaneous_flow_rates` | - ```python - fx.Effect( - 'my_effect', 'unit', 'description', - minimum_periodic=10, - maximum_periodic=100, - minimum_temporal=5, - maximum_temporal=50, - minimum_per_hour=1, - maximum_per_hour=10, - ) - ``` +??? abstract "TimeSeriesData" -### Component Parameters + | Old (v2.x) | New (v3.0.0) | + |------------|--------------| + | `agg_group` | `aggregation_group` | + | `agg_weight` | `aggregation_weight` | -=== "v2.x (Deprecated)" +??? abstract "Calculation" - ```python - fx.Source('my_source', source=flow) + | Old (v2.x) | New (v3.0.0) | + |------------|--------------| + | `active_timesteps=[0, 1, 2]` | Use `flow_system.sel()` or `flow_system.isel()` | - fx.Sink('my_sink', sink=flow) +--- - fx.SourceAndSink( - 'my_source_sink', - source=flow1, - sink=flow2, - prevent_simultaneous_sink_and_source=True - ) - ``` +## ✨ New Features -=== "v3.0.0 (Recommended)" +??? success "Multi-Period Investments" ```python - fx.Source('my_source', outputs=flow) - - fx.Sink('my_sink', inputs=flow) - - fx.SourceAndSink( - 'my_source_sink', - outputs=flow1, - inputs=flow2, - prevent_simultaneous_flow_rates=True - ) + periods = pd.Index(['2020', '2030']) + flow_system = fx.FlowSystem(time=timesteps, periods=periods) ``` -### TimeSeriesData +??? success "Scenario-Based Optimization" -=== "v2.x (Deprecated)" + | Parameter | Description | Example | + |-----------|-------------|---------| + | `scenarios` | Scenario index | `pd.Index(['low', 'base', 'high'], name='scenario')` | + | `scenario_weights` | Probabilities | `[0.2, 0.6, 0.2]` | + | `scenario_independent_sizes` | Separate capacities per scenario | `True` / `False` (default) | ```python - fx.TimeSeriesData( - agg_group='group1', - agg_weight=2.0 + flow_system = fx.FlowSystem( + time=timesteps, + scenarios=scenarios, + scenario_weights=[0.2, 0.6, 0.2], + scenario_independent_sizes=True ) ``` -=== "v3.0.0 (Recommended)" +??? success "Enhanced I/O" - ```python - fx.TimeSeriesData( - aggregation_group='group1', - aggregation_weight=2.0 - ) - ``` - -### Calculation - -=== "v2.x (Deprecated)" - - ```python - calculation = fx.FullCalculation( - 'calc', - flow_system, - active_timesteps=[0, 1, 2] - ) - ``` + | Method | Description | + |--------|-------------| + | `flow_system.to_netcdf('file.nc')` | Save FlowSystem | + | `fx.FlowSystem.from_netcdf('file.nc')` | Load FlowSystem | + | `flow_system.sel(time=slice(...))` | Select by label | + | `flow_system.isel(time=slice(...))` | Select by index | + | `flow_system.resample(time='D')` | Resample timeseries | + | `flow_system.copy()` | Deep copy | + | `results.flow_system` | Access from results | -=== "v3.0.0 (Recommended)" +??? success "Effects Per Component" ```python - # Use FlowSystem selection methods - flow_system_subset = flow_system.sel(time=slice('2020-01-01', '2020-01-03')) - calculation = fx.FullCalculation('calc', flow_system_subset) + effects_ds = results.effects_per_component - # Or with isel for index-based selection - flow_system_subset = flow_system.isel(time=slice(0, 3)) - calculation = fx.FullCalculation('calc', flow_system_subset) + # Access effect contributions by component + print(effects_ds['total'].sel(effect='costs')) # Total effects + print(effects_ds['temporal'].sel(effect='CO2')) # Temporal effects + print(effects_ds['periodic'].sel(effect='costs')) # Periodic effects ``` ---- - -## New Features in v3.0.0 - -### 1. Multi-Period Investments - -Model transformation pathways with distinct investment decisions in each period: - -```python -import pandas as pd - -# Define multiple investment periods -periods = pd.Index(['2020', '2030']) -flow_system = fx.FlowSystem(time=timesteps, periods=periods) - -# Components can now invest differently in each period -solar = fx.Source( - 'solar', - outputs=[fx.Flow( - 'P_el', - bus='electricity', - size=fx.InvestParameters( - minimum_size=0, - maximum_size=1000, - effects_of_investment_per_size={'costs': 100} - ) - )] -) -``` - -### 2. Scenario-Based Stochastic Optimization - -Model uncertainty with weighted scenarios: - -```python -# Define scenarios with probabilities -scenarios = pd.Index(['low_demand', 'base', 'high_demand'], name='scenario') -scenario_weights = [0.2, 0.6, 0.2] # Probabilities - -flow_system = fx.FlowSystem( - time=timesteps, - scenarios=scenarios, - scenario_weights=scenario_weights -) - -# Define scenario-dependent data -demand = xr.DataArray( - data=[[70, 80, 90], # low_demand scenario - [90, 100, 110], # base scenario - [110, 120, 130]], # high_demand scenario - dims=['scenario', 'time'], - coords={'scenario': scenarios, 'time': timesteps} -) - -``` - -**Control variable independence:** -```python -# By default: investment sizes are shared across scenarios, flow rates vary -# To make sizes scenario-independent: -flow_system = fx.FlowSystem( - time=timesteps, - scenarios=scenarios, - scenario_independent_sizes=True # Each scenario gets its own capacity -) -``` - -### 3. Enhanced I/O and Data Handling - -```python -# Save and load FlowSystem -flow_system.to_netcdf('my_system.nc') -flow_system_loaded = fx.FlowSystem.from_netcdf('my_system.nc') - -# Manipulate FlowSystem -fs_subset = flow_system.sel(time=slice('2020-01', '2020-06')) -fs_resampled = flow_system.resample(time='D') # Resample to daily -fs_copy = flow_system.copy() - -# Access FlowSystem from results (lazily loaded) -results = calculation.results -original_fs = results.flow_system # No manual restoration needed -``` - -### 4. Effects Per Component - -Analyze the impact of each component, including indirect effects through effect shares: - -```python -# Get dataset showing contribution of each component to all effects -effects_ds = calculation.results.effects_per_component() - -print(effects_ds['costs']) # Total costs by component -print(effects_ds['CO2']) # CO2 emissions by component (including indirect) -``` - -### 5. Balanced Storage - -Force charging and discharging capacities to be equal: - -```python -storage = fx.Storage( - 'storage', - charging=fx.Flow('charge', bus='electricity', size=fx.InvestParameters(effects_per_size=100, minimum_size=5)), - discharging=fx.Flow('discharge', bus='electricity', size=fx.InvestParameters(), - balanced=True, # Ensures charge_size == discharge_size - capacity_in_flow_hours=100 -) -``` - -### 6. Final Charge State Control - -Set bounds on the storage state at the end of the optimization: - -```python -storage = fx.Storage( - 'storage', - charging=fx.Flow('charge', bus='electricity', size=100), - discharging=fx.Flow('discharge', bus='electricity', size=100), - capacity_in_flow_hours=10, - relative_minimum_final_charge_state=0.5, # End at least 50% charged - relative_maximum_final_charge_state=0.8 # End at most 80% charged -) -``` - ---- - -## Configuration Changes - -### Logging (v2.2.0+) +??? success "Storage Features" -**Breaking change:** Console and file logging are now disabled by default. - -```python -import flixopt as fx - -# Enable console logging -fx.CONFIG.Logging.console = True -fx.CONFIG.Logging.level = 'INFO' -fx.CONFIG.apply() - -# Enable file logging -fx.CONFIG.Logging.file = 'flixopt.log' -fx.CONFIG.apply() - -# Deprecated: change_logging_level() - will be removed in future -# fx.change_logging_level('INFO') # ❌ Old way -``` + | Feature | Parameter | Description | + |---------|-----------|-------------| + | **Balanced storage** | `balanced=True` | Ensures charge_size == discharge_size | + | **Final state min** | `relative_minimum_final_charge_state=0.5` | End at least 50% charged | + | **Final state max** | `relative_maximum_final_charge_state=0.8` | End at most 80% charged | --- -## Testing Your Migration - -### 1. Check for Deprecation Warnings - -Run your code and watch for deprecation warnings: - -```python -import warnings -warnings.filterwarnings('default', category=DeprecationWarning) +## πŸ”§ Common Issues -# Run your flixopt code -# Review any DeprecationWarning messages -``` - -### 2. Validate Results - -Compare results from v2.x and v3.0.0 to ensure consistency: - -```python -# Save v2.x results before upgrading -calculation.results.to_file('results_v2.nc') - -# After upgrading, compare -results_v3 = calculation.results -results_v2 = fx.CalculationResults.from_file('results_v2.nc') - -# Check key variables match (within numerical tolerance) -import numpy as np -v2_costs = results_v2['effect_values'].sel(effect='costs') -v3_costs = results_v3['effect_values'].sel(effect='costs') -np.testing.assert_allclose(v2_costs, v3_costs, rtol=1e-5) -``` +| Issue | Solution | +|-------|----------| +| Effect shares not working | See [Effect System Redesign](#effect-system-redesign) | +| Storage dimensions wrong | See [FlowSystem & Calculation](#flowsystem-calculation) | +| Bus assignment error | See [String Labels](#string-labels) | +| KeyError in results | See [Variable Names](#variable-names) | +| `AttributeError: model` | Rename `.model` β†’ `.submodel` | +| No logging | See [Other Changes](#other-changes) | --- -## Common Migration Issues - -### Issue: "Effect share parameters not working" - -**Solution:** Effect sharing was completely redesigned. Move share definitions to the **receiving** effect using `share_from_temporal` and `share_from_periodic`. +## βœ… Checklist -### Issue: "Storage charge state has wrong dimensions" - -**Solution:** Remove the extra timestep from charge state bound arrays. - -### Issue: "Import error with Bus assignment" - -**Solution:** Pass bus labels (strings) instead of Bus objects to `Flow.bus`. - -```python -# Old -my_bus = fx.Bus('electricity') -flow = fx.Flow('P_el', bus=my_bus) # ❌ - -# New -my_bus = fx.Bus('electricity') -flow = fx.Flow('P_el', bus='electricity') # βœ… -``` - -### Issue: "AttributeError: module 'flixopt' has no attribute 'SystemModel'" - -**Solution:** Rename `SystemModel` β†’ `FlowSystemModel` - - ---- - -## Getting Help - -- **Documentation:** [https://flixopt.github.io/flixopt/](https://flixopt.github.io/flixopt/) -- **GitHub Issues:** [https://github.com/flixOpt/flixopt/issues](https://github.com/flixOpt/flixopt/issues) -- **Changelog:** [Full v3.0.0 release notes](https://flixopt.github.io/flixopt/latest/changelog/99984-v3.0.0/) +| Category | Tasks | +|----------|-------| +| **Install** | β€’ `pip install --upgrade flixopt` | +| **Breaking changes** | β€’ Update [effect sharing](#effect-system-redesign)
β€’ Update [variable names](#variable-names)
β€’ Update [string labels](#string-labels)
β€’ Fix [storage arrays](#flowsystem-calculation)
β€’ Update [Calculation API](#flowsystem-calculation)
β€’ Update [class names](#other-changes) | +| **Configuration** | β€’ Enable [logging](#other-changes) if needed | +| **Deprecated** | β€’ Update [deprecated parameters](#deprecated-parameters) (recommended) | +| **Testing** | β€’ Test thoroughly
β€’ Validate results match v2.x | --- -## Summary Checklist - -- [ ] Update flixopt: `pip install --upgrade flixopt` -- [ ] Update effect sharing syntax (no deprecation warning!) -- [ ] Update `Calculation.do_modeling()` usage -- [ ] Fix storage charge state array dimensions -- [ ] Rename `mode` β†’ `style` in plotting calls -- [ ] Update deprecated parameter names (optional, but recommended) -- [ ] Enable logging explicitly if needed -- [ ] Test your code thoroughly -- [ ] Explore new features (periods, scenarios, enhanced I/O) +:material-book: [Docs](https://flixopt.github.io/flixopt/) β€’ :material-github: [Issues](https://github.com/flixOpt/flixopt/issues) β€’ :material-text-box: [Changelog](https://flixopt.github.io/flixopt/latest/changelog/99984-v3.0.0/) -**Welcome to flixopt v3.0.0!** πŸŽ‰ +!!! success "Welcome to flixopt v3.0.0! πŸŽ‰" diff --git a/examples/00_Minmal/minimal_example.py b/examples/00_Minmal/minimal_example.py index 81b7c2dba..6a0ed3831 100644 --- a/examples/00_Minmal/minimal_example.py +++ b/examples/00_Minmal/minimal_example.py @@ -1,76 +1,37 @@ """ -This script shows how to use the flixopt framework to model a super minimalistic energy system. +This script shows how to use the flixopt framework to model a super minimalistic energy system in the most concise way possible. +THis can also be used to create proposals for new features, bug reports etc """ import numpy as np import pandas as pd -from rich.pretty import pprint import flixopt as fx if __name__ == '__main__': - # Enable console logging fx.CONFIG.Logging.console = True fx.CONFIG.apply() - # --- Define the Flow System, that will hold all elements, and the time steps you want to model --- - timesteps = pd.date_range('2020-01-01', periods=3, freq='h') - flow_system = fx.FlowSystem(timesteps) - - # --- Define Thermal Load Profile --- - # Load profile (e.g., kW) for heating demand over time - thermal_load_profile = np.array([30, 0, 20]) - - # --- Define Energy Buses --- - # These are balancing nodes (inputs=outputs) and balance the different energy carriers your system - flow_system.add_elements(fx.Bus('District Heating'), fx.Bus('Natural Gas')) - - # --- Define Objective Effect (Cost) --- - # Cost effect representing the optimization objective (minimizing costs) - cost_effect = fx.Effect('costs', '€', 'Cost', is_standard=True, is_objective=True) - - # --- Define Flow System Components --- - # Boiler component with thermal output (heat) and fuel input (gas) - boiler = fx.linear_converters.Boiler( - 'Boiler', - eta=0.5, - Q_th=fx.Flow(label='Thermal Output', bus='District Heating', size=50), - Q_fu=fx.Flow(label='Fuel Input', bus='Natural Gas'), + flow_system = fx.FlowSystem(pd.date_range('2020-01-01', periods=3, freq='h')) + + flow_system.add_elements( + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Effect('Costs', '€', 'Cost', is_standard=True, is_objective=True), + fx.linear_converters.Boiler( + 'Boiler', + eta=0.5, + Q_th=fx.Flow(label='Heat', bus='Heat', size=50), + Q_fu=fx.Flow(label='Gas', bus='Gas'), + ), + fx.Sink( + 'Sink', + inputs=[fx.Flow(label='Demand', bus='Heat', size=1, fixed_relative_profile=np.array([30, 0, 20]))], + ), + fx.Source( + 'Source', + outputs=[fx.Flow(label='Gas', bus='Gas', size=1000, effects_per_flow_hour=0.04)], + ), ) - # Heat load component with a fixed thermal demand profile - heat_load = fx.Sink( - 'Heat Demand', - inputs=[ - fx.Flow(label='Thermal Load', bus='District Heating', size=1, fixed_relative_profile=thermal_load_profile) - ], - ) - - # Gas source component with cost-effect per flow hour - gas_source = fx.Source( - 'Natural Gas Tariff', - outputs=[fx.Flow(label='Gas Flow', bus='Natural Gas', size=1000, effects_per_flow_hour=0.04)], # 0.04 €/kWh - ) - - # --- Build the Flow System --- - # Add all components and effects to the system - flow_system.add_elements(cost_effect, boiler, heat_load, gas_source) - - # --- Define, model and solve a Calculation --- - calculation = fx.FullCalculation('Simulation1', flow_system) - calculation.do_modeling() - calculation.solve(fx.solvers.HighsSolver(0.01, 60)) - - # --- Analyze Results --- - # Access the results of an element - df1 = calculation.results['costs'].filter_solution('time').to_dataframe() - - # Plot the results of a specific element - calculation.results['District Heating'].plot_node_balance_pie() - calculation.results['District Heating'].plot_node_balance() - - # Save results to a file - df2 = calculation.results['District Heating'].node_balance().to_dataframe() - # df2.to_csv('results/District Heating.csv') # Save results to csv - - # Print infos to the console. - pprint(calculation.summary) + calculation = fx.FullCalculation('Simulation1', flow_system).do_modeling().solve(fx.solvers.HighsSolver(0.01, 60)) + calculation.results['Heat'].plot_node_balance() diff --git a/examples/01_Simple/simple_example.py b/examples/01_Simple/simple_example.py index ee90af47a..6b62d6712 100644 --- a/examples/01_Simple/simple_example.py +++ b/examples/01_Simple/simple_example.py @@ -112,9 +112,12 @@ calculation.solve(fx.solvers.HighsSolver(mip_gap=0, time_limit_seconds=30)) # --- Analyze Results --- + # Colors are automatically assigned using default colormap + # Optional: Configure custom colors with + calculation.results.setup_colors() calculation.results['FernwΓ€rme'].plot_node_balance_pie() calculation.results['FernwΓ€rme'].plot_node_balance() - calculation.results['Storage'].plot_node_balance() + calculation.results['Storage'].plot_charge_state() calculation.results.plot_heatmap('CHP(Q_th)|flow_rate') # Convert the results for the storage component to a dataframe and display diff --git a/examples/02_Complex/complex_example_results.py b/examples/02_Complex/complex_example_results.py index 5020f71fe..96d06dd04 100644 --- a/examples/02_Complex/complex_example_results.py +++ b/examples/02_Complex/complex_example_results.py @@ -25,8 +25,9 @@ # --- Detailed Plots --- # In depth plot for individual flow rates ('__' is used as the delimiter between Component and Flow results.plot_heatmap('WΓ€rmelast(Q_th_Last)|flow_rate') - for flow_rate in results['BHKW2'].inputs + results['BHKW2'].outputs: - results.plot_heatmap(flow_rate) + for bus in results.buses.values(): + bus.plot_node_balance_pie(show=False, save=f'results/{bus.label}--pie.html') + bus.plot_node_balance(show=False, save=f'results/{bus.label}--balance.html') # --- Plotting internal variables manually --- results.plot_heatmap('BHKW2(Q_th)|on') diff --git a/examples/03_Calculation_types/example_calculation_types.py b/examples/03_Calculation_types/example_calculation_types.py index 05b25e782..c5df50034 100644 --- a/examples/03_Calculation_types/example_calculation_types.py +++ b/examples/03_Calculation_types/example_calculation_types.py @@ -36,7 +36,7 @@ data_import = pd.read_csv( pathlib.Path(__file__).parent.parent / 'resources' / 'Zeitreihen2020.csv', index_col=0 ).sort_index() - filtered_data = data_import['2020-01-01':'2020-01-02 23:45:00'] + filtered_data = data_import['2020-01-01':'2020-01-07 23:45:00'] # filtered_data = data_import[0:500] # Alternatively filter by index filtered_data.index = pd.to_datetime(filtered_data.index) @@ -202,36 +202,39 @@ def get_solutions(calcs: list, variable: str) -> xr.Dataset: # --- Plotting for comparison --- fx.plotting.with_plotly( - get_solutions(calculations, 'Speicher|charge_state').to_dataframe(), - style='line', + get_solutions(calculations, 'Speicher|charge_state'), + mode='line', title='Charge State Comparison', ylabel='Charge state', + xlabel='Time in h', ).write_html('results/Charge State.html') fx.plotting.with_plotly( - get_solutions(calculations, 'BHKW2(Q_th)|flow_rate').to_dataframe(), - style='line', + get_solutions(calculations, 'BHKW2(Q_th)|flow_rate'), + mode='line', title='BHKW2(Q_th) Flow Rate Comparison', ylabel='Flow rate', + xlabel='Time in h', ).write_html('results/BHKW2 Thermal Power.html') fx.plotting.with_plotly( - get_solutions(calculations, 'costs(temporal)|per_timestep').to_dataframe(), - style='line', + get_solutions(calculations, 'costs(temporal)|per_timestep'), + mode='line', title='Operation Cost Comparison', ylabel='Costs [€]', + xlabel='Time in h', ).write_html('results/Operation Costs.html') fx.plotting.with_plotly( - pd.DataFrame(get_solutions(calculations, 'costs(temporal)|per_timestep').to_dataframe().sum()).T, - style='stacked_bar', + get_solutions(calculations, 'costs(temporal)|per_timestep').sum('time'), + mode='stacked_bar', title='Total Cost Comparison', ylabel='Costs [€]', ).update_layout(barmode='group').write_html('results/Total Costs.html') fx.plotting.with_plotly( - pd.DataFrame([calc.durations for calc in calculations], index=[calc.name for calc in calculations]), - 'stacked_bar', + pd.DataFrame([calc.durations for calc in calculations], index=[calc.name for calc in calculations]).to_xarray(), + mode='stacked_bar', ).update_layout(title='Duration Comparison', xaxis_title='Calculation type', yaxis_title='Time (s)').write_html( 'results/Speed Comparison.html' ) diff --git a/examples/05_Two-stage-optimization/two_stage_optimization.py b/examples/05_Two-stage-optimization/two_stage_optimization.py index b6072a3c2..dde3ae069 100644 --- a/examples/05_Two-stage-optimization/two_stage_optimization.py +++ b/examples/05_Two-stage-optimization/two_stage_optimization.py @@ -123,16 +123,16 @@ # Separate optimization of flow sizes and dispatch start = timeit.default_timer() - calculation_sizing = fx.FullCalculation('Sizing', flow_system.resample('4h')) + calculation_sizing = fx.FullCalculation('Sizing', flow_system.resample('2h')) calculation_sizing.do_modeling() - calculation_sizing.solve(fx.solvers.HighsSolver(0.1 / 100, 600)) + calculation_sizing.solve(fx.solvers.HighsSolver(0.1 / 100, 60)) timer_sizing = timeit.default_timer() - start start = timeit.default_timer() calculation_dispatch = fx.FullCalculation('Dispatch', flow_system) calculation_dispatch.do_modeling() calculation_dispatch.fix_sizes(calculation_sizing.results.solution) - calculation_dispatch.solve(fx.solvers.HighsSolver(0.1 / 100, 600)) + calculation_dispatch.solve(fx.solvers.HighsSolver(0.1 / 100, 60)) timer_dispatch = timeit.default_timer() - start if (calculation_dispatch.results.sizes().round(5) == calculation_sizing.results.sizes().round(5)).all().item(): diff --git a/flixopt/__init__.py b/flixopt/__init__.py index 8fc4e4851..3633d86a1 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -7,7 +7,7 @@ try: __version__ = version('flixopt') -except PackageNotFoundError: +except (PackageNotFoundError, TypeError): # Package is not installed (development mode without editable install) __version__ = '0.0.0.dev0' diff --git a/flixopt/aggregation.py b/flixopt/aggregation.py index 91ef618a9..cd0fdde3c 100644 --- a/flixopt/aggregation.py +++ b/flixopt/aggregation.py @@ -20,7 +20,9 @@ except ImportError: TSAM_AVAILABLE = False +from .color_processing import process_colors from .components import Storage +from .config import CONFIG from .structure import ( FlowSystemModel, Submodel, @@ -141,7 +143,7 @@ def describe_clusters(self) -> str: def use_extreme_periods(self): return self.time_series_for_high_peaks or self.time_series_for_low_peaks - def plot(self, colormap: str = 'viridis', show: bool = True, save: pathlib.Path | None = None) -> go.Figure: + def plot(self, colormap: str | None = None, show: bool = True, save: pathlib.Path | None = None) -> go.Figure: from . import plotting df_org = self.original_data.copy().rename( @@ -150,13 +152,20 @@ def plot(self, colormap: str = 'viridis', show: bool = True, save: pathlib.Path df_agg = self.aggregated_data.copy().rename( columns={col: f'Aggregated - {col}' for col in self.aggregated_data.columns} ) - fig = plotting.with_plotly(df_org, 'line', colors=colormap) + colors = list( + process_colors(colormap or CONFIG.Plotting.default_qualitative_colorscale, list(df_org.columns)).values() + ) + fig = plotting.with_plotly(df_org.to_xarray(), 'line', colors=colors, xlabel='Time in h') for trace in fig.data: trace.update(dict(line=dict(dash='dash'))) - fig = plotting.with_plotly(df_agg, 'line', colors=colormap, fig=fig) + fig2 = plotting.with_plotly(df_agg.to_xarray(), 'line', colors=colors, xlabel='Time in h') + for trace in fig2.data: + fig.add_trace(trace) fig.update_layout( - title='Original vs Aggregated Data (original = ---)', xaxis_title='Index', yaxis_title='Value' + title='Original vs Aggregated Data (original = ---)', + xaxis_title='Time in h', + yaxis_title='Value', ) plotting.export_figure( diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 355514c00..1dab78e57 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -24,7 +24,6 @@ from tqdm import tqdm from . import io as fx_io -from . import utils as utils from .aggregation import Aggregation, AggregationModel, AggregationParameters from .components import Storage from .config import CONFIG @@ -115,7 +114,7 @@ def main_results(self) -> dict[str, Scalar | dict]: 'periodic': effect.submodel.periodic.total.solution.values, 'total': effect.submodel.total.solution.values, } - for effect in self.flow_system.effects + for effect in sorted(self.flow_system.effects.values(), key=lambda e: e.label_full.upper()) }, 'Invest-Decisions': { 'Invested': { @@ -146,7 +145,7 @@ def main_results(self) -> dict[str, Scalar | dict]: ], } - return utils.round_nested_floats(main_results) + return fx_io.round_nested_floats(main_results) @property def summary(self): @@ -257,7 +256,7 @@ def solve( logger.info( f'{" Main Results ":#^80}\n' + yaml.dump( - utils.round_nested_floats(self.main_results), + self.main_results, default_flow_style=False, sort_keys=False, allow_unicode=True, @@ -614,7 +613,7 @@ def do_modeling_and_solve( ) # Redirect solver stdout to null to avoid cluttering the output - with utils.suppress_output(): + with fx_io.suppress_output(): calculation.solve( solver, log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', diff --git a/flixopt/color_processing.py b/flixopt/color_processing.py new file mode 100644 index 000000000..2959acc82 --- /dev/null +++ b/flixopt/color_processing.py @@ -0,0 +1,261 @@ +"""Simplified color handling for visualization. + +This module provides clean color processing that transforms various input formats +into a label-to-color mapping dictionary, without needing to know about the plotting engine. +""" + +from __future__ import annotations + +import logging + +import matplotlib.colors as mcolors +import matplotlib.pyplot as plt +import plotly.express as px +from plotly.exceptions import PlotlyError + +logger = logging.getLogger('flixopt') + + +def _rgb_string_to_hex(color: str) -> str: + """Convert Plotly RGB/RGBA string format to hex. + + Args: + color: Color in format 'rgb(R, G, B)', 'rgba(R, G, B, A)' or already in hex + + Returns: + Color in hex format '#RRGGBB' + """ + color = color.strip() + + # If already hex, return as-is + if color.startswith('#'): + return color + + # Try to parse rgb() or rgba() + try: + if color.startswith('rgb('): + # Extract RGB values from 'rgb(R, G, B)' format + rgb_str = color[4:-1] # Remove 'rgb(' and ')' + elif color.startswith('rgba('): + # Extract RGBA values from 'rgba(R, G, B, A)' format + rgb_str = color[5:-1] # Remove 'rgba(' and ')' + else: + return color + + # Split on commas and parse first three components + components = rgb_str.split(',') + if len(components) < 3: + return color + + # Parse and clamp the first three components + r = max(0, min(255, int(round(float(components[0].strip()))))) + g = max(0, min(255, int(round(float(components[1].strip()))))) + b = max(0, min(255, int(round(float(components[2].strip()))))) + + return f'#{r:02x}{g:02x}{b:02x}' + except (ValueError, IndexError): + # If parsing fails, return original + return color + + +def process_colors( + colors: None | str | list[str] | dict[str, str], + labels: list[str], + default_colorscale: str = 'turbo', +) -> dict[str, str]: + """Process color input and return a label-to-color mapping. + + This function takes flexible color input and always returns a dictionary + mapping each label to a specific color string. The plotting engine can then + use this mapping as needed. + + Args: + colors: Color specification in one of four formats: + - None: Use the default colorscale + - str: Name of a colorscale (e.g., 'turbo', 'plasma', 'Set1', 'portland') + - list[str]: List of color strings (hex, named colors, etc.) + - dict[str, str]: Direct label-to-color mapping + labels: List of labels that need colors assigned + default_colorscale: Fallback colorscale name if requested scale not found + + Returns: + Dictionary mapping each label to a color string + + Examples: + >>> # Using None - applies default colorscale + >>> process_colors(None, ['A', 'B', 'C']) + {'A': '#0d0887', 'B': '#7e03a8', 'C': '#cc4778'} + + >>> # Using a colorscale name + >>> process_colors('plasma', ['A', 'B', 'C']) + {'A': '#0d0887', 'B': '#7e03a8', 'C': '#cc4778'} + + >>> # Using a list of colors + >>> process_colors(['red', 'blue', 'green'], ['A', 'B', 'C']) + {'A': 'red', 'B': 'blue', 'C': 'green'} + + >>> # Using a pre-made mapping + >>> process_colors({'A': 'red', 'B': 'blue'}, ['A', 'B', 'C']) + {'A': 'red', 'B': 'blue', 'C': '#0d0887'} # C gets color from default scale + """ + if not labels: + return {} + + # Case 1: Already a mapping dictionary + if isinstance(colors, dict): + return _fill_missing_colors(colors, labels, default_colorscale) + + # Case 2: None or colorscale name (string) + if colors is None or isinstance(colors, str): + colorscale_name = colors if colors is not None else default_colorscale + color_list = _get_colors_from_scale(colorscale_name, len(labels), default_colorscale) + return dict(zip(labels, color_list, strict=False)) + + # Case 3: List of colors + if isinstance(colors, list): + if len(colors) == 0: + logger.warning(f'Empty color list provided. Using {default_colorscale} instead.') + color_list = _get_colors_from_scale(default_colorscale, len(labels), default_colorscale) + return dict(zip(labels, color_list, strict=False)) + + if len(colors) < len(labels): + logger.debug( + f'Not enough colors provided ({len(colors)}) for all labels ({len(labels)}). Colors will cycle.' + ) + + # Cycle through colors if we don't have enough + return {label: colors[i % len(colors)] for i, label in enumerate(labels)} + + raise TypeError(f'colors must be None, str, list, or dict, got {type(colors)}') + + +def _fill_missing_colors( + color_mapping: dict[str, str], + labels: list[str], + default_colorscale: str, +) -> dict[str, str]: + """Fill in missing labels in a color mapping using a colorscale. + + Args: + color_mapping: Partial label-to-color mapping + labels: All labels that need colors + default_colorscale: Colorscale to use for missing labels + + Returns: + Complete label-to-color mapping + """ + missing_labels = [label for label in labels if label not in color_mapping] + + if not missing_labels: + return color_mapping.copy() + + # Log warning about missing labels + logger.debug(f'Labels missing colors: {missing_labels}. Using {default_colorscale} for these.') + + # Get colors for missing labels + missing_colors = _get_colors_from_scale(default_colorscale, len(missing_labels), default_colorscale) + + # Combine existing and new colors + result = color_mapping.copy() + result.update(dict(zip(missing_labels, missing_colors, strict=False))) + return result + + +def _get_colors_from_scale( + colorscale_name: str, + num_colors: int, + fallback_scale: str, +) -> list[str]: + """Extract a list of colors from a named colorscale. + + Tries to get colors from the named scale (Plotly first, then Matplotlib), + falls back to the fallback scale if not found. + + Args: + colorscale_name: Name of the colorscale to try + num_colors: Number of colors needed + fallback_scale: Fallback colorscale name if first fails + + Returns: + List of color strings (hex format) + """ + # Try to get the requested colorscale + colors = _try_get_colorscale(colorscale_name, num_colors) + + if colors is not None: + return colors + + # Fallback to default + logger.warning(f"Colorscale '{colorscale_name}' not found. Using '{fallback_scale}' instead.") + + colors = _try_get_colorscale(fallback_scale, num_colors) + + if colors is not None: + return colors + + # Ultimate fallback: just use basic colors + logger.warning(f"Fallback colorscale '{fallback_scale}' also not found. Using basic colors.") + basic_colors = [ + '#1f77b4', + '#ff7f0e', + '#2ca02c', + '#d62728', + '#9467bd', + '#8c564b', + '#e377c2', + '#7f7f7f', + '#bcbd22', + '#17becf', + ] + return [basic_colors[i % len(basic_colors)] for i in range(num_colors)] + + +def _try_get_colorscale(colorscale_name: str, num_colors: int) -> list[str] | None: + """Try to get colors from Plotly or Matplotlib colorscales. + + Tries Plotly colorscales first (both qualitative and sequential), + then falls back to Matplotlib colorscales. + + Args: + colorscale_name: Name of the colorscale + num_colors: Number of colors needed + + Returns: + List of color strings (hex format) if successful, None if colorscale not found + """ + # First try Plotly qualitative (discrete) color sequences + colorscale_title = colorscale_name.title() + if hasattr(px.colors.qualitative, colorscale_title): + color_list = getattr(px.colors.qualitative, colorscale_title) + # Convert to hex format for matplotlib compatibility + return [_rgb_string_to_hex(color_list[i % len(color_list)]) for i in range(num_colors)] + + # Then try Plotly sequential/continuous colorscales + try: + colorscale = px.colors.get_colorscale(colorscale_name) + # Sample evenly from the colorscale + if num_colors == 1: + sample_points = [0.5] + else: + sample_points = [i / (num_colors - 1) for i in range(num_colors)] + colors = px.colors.sample_colorscale(colorscale, sample_points) + # Convert to hex format for matplotlib compatibility + return [_rgb_string_to_hex(c) for c in colors] + except (PlotlyError, ValueError): + pass + + # Finally try Matplotlib colorscales + try: + cmap = plt.get_cmap(colorscale_name) + + # Sample evenly from the colorscale + if num_colors == 1: + colors = [cmap(0.5)] + else: + colors = [cmap(i / (num_colors - 1)) for i in range(num_colors)] + + # Convert RGBA tuples to hex strings + return [mcolors.rgb2hex(color[:3]) for color in colors] + + except (ValueError, KeyError): + return None diff --git a/flixopt/effects.py b/flixopt/effects.py index 2c7607b02..757549223 100644 --- a/flixopt/effects.py +++ b/flixopt/effects.py @@ -16,9 +16,10 @@ import numpy as np import xarray as xr +from . import io as fx_io from .core import PeriodicDataUser, Scalar, TemporalData, TemporalDataUser from .features import ShareAllocationModel -from .structure import Element, ElementModel, FlowSystemModel, Submodel, register_class_for_io +from .structure import Element, ElementContainer, ElementModel, FlowSystemModel, Submodel, register_class_for_io if TYPE_CHECKING: from collections.abc import Iterator @@ -448,13 +449,13 @@ def _do_modeling(self): EffectExpr = dict[str, linopy.LinearExpression] # Used to create Shares -class EffectCollection: +class EffectCollection(ElementContainer[Effect]): """ Handling all Effects """ def __init__(self, *effects: Effect): - self._effects = {} + super().__init__(element_type_name='effects') self._standard_effect: Effect | None = None self._objective_effect: Effect | None = None @@ -474,25 +475,23 @@ def add_effects(self, *effects: Effect) -> None: self.standard_effect = effect if effect.is_objective: self.objective_effect = effect - self._effects[effect.label] = effect + self.add(effect) # Use the inherited add() method from ElementContainer logger.info(f'Registered new Effect: {effect.label}') def create_effect_values_dict( self, effect_values_user: PeriodicEffectsUser | TemporalEffectsUser ) -> dict[str, Scalar | TemporalDataUser] | None: - """ - Converts effect values into a dictionary. If a scalar is provided, it is associated with a default effect type. - - Examples - -------- - effect_values_user = 20 -> {'': 20} - effect_values_user = {None: 20} -> {'': 20} - effect_values_user = None -> None - effect_values_user = {'effect1': 20, 'effect2': 0.3} -> {'effect1': 20, 'effect2': 0.3} - - Returns - ------- - dict or None + """Converts effect values into a dictionary. If a scalar is provided, it is associated with a default effect type. + + Examples: + ```python + effect_values_user = 20 -> {'': 20} + effect_values_user = {None: 20} -> {'': 20} + effect_values_user = None -> None + effect_values_user = {'effect1': 20, 'effect2': 0.3} -> {'effect1': 20, 'effect2': 0.3} + ``` + + Returns: A dictionary keyed by effect label, or None if input is None. Note: a standard effect must be defined when passing scalars or None labels. """ @@ -522,10 +521,13 @@ def _plausibility_checks(self) -> None: # Check circular loops in effects: temporal, periodic = self.calculate_effect_share_factors() - # Validate all referenced sources exist - unknown = {src for src, _ in list(temporal.keys()) + list(periodic.keys()) if src not in self.effects} + # Validate all referenced effects (both sources and targets) exist + edges = list(temporal.keys()) + list(periodic.keys()) + unknown_sources = {src for src, _ in edges if src not in self} + unknown_targets = {tgt for _, tgt in edges if tgt not in self} + unknown = unknown_sources | unknown_targets if unknown: - raise KeyError(f'Unknown effects used in in effect share mappings: {sorted(unknown)}') + raise KeyError(f'Unknown effects used in effect share mappings: {sorted(unknown)}') temporal_cycles = detect_cycles(tuples_to_adjacency_list([key for key in temporal])) periodic_cycles = detect_cycles(tuples_to_adjacency_list([key for key in periodic])) @@ -554,31 +556,23 @@ def __getitem__(self, effect: str | Effect | None) -> Effect: else: raise KeyError(f'Effect {effect} not found!') try: - return self.effects[effect] + return super().__getitem__(effect) # Leverage ContainerMixin suggestions except KeyError as e: - raise KeyError(f'Effect "{effect}" not found! Add it to the FlowSystem first!') from e + # Extract the original message and append context for cleaner output + original_msg = str(e).strip('\'"') + raise KeyError(f'{original_msg} Add the effect to the FlowSystem first.') from None - def __iter__(self) -> Iterator[Effect]: - return iter(self._effects.values()) - - def __len__(self) -> int: - return len(self._effects) + def __iter__(self) -> Iterator[str]: + return iter(self.keys()) # Iterate over keys like a normal dict def __contains__(self, item: str | Effect) -> bool: """Check if the effect exists. Checks for label or object""" if isinstance(item, str): - return item in self.effects # Check if the label exists + return super().__contains__(item) # Check if the label exists elif isinstance(item, Effect): - if item.label_full in self.effects: - return True - if item in self.effects.values(): # Check if the object exists - return True + return item.label_full in self and self[item.label_full] is item return False - @property - def effects(self) -> dict[str, Effect]: - return self._effects - @property def standard_effect(self) -> Effect: if self._standard_effect is None: @@ -613,7 +607,7 @@ def calculate_effect_share_factors( dict[tuple[str, str], xr.DataArray], ]: shares_periodic = {} - for name, effect in self.effects.items(): + for name, effect in self.items(): if effect.share_from_periodic: for source, data in effect.share_from_periodic.items(): if source not in shares_periodic: @@ -622,7 +616,7 @@ def calculate_effect_share_factors( shares_periodic = calculate_all_conversion_paths(shares_periodic) shares_temporal = {} - for name, effect in self.effects.items(): + for name, effect in self.items(): if effect.share_from_temporal: for source, data in effect.share_from_temporal.items(): if source not in shares_temporal: @@ -672,7 +666,7 @@ def add_share_to_penalty(self, name: str, expression: linopy.LinearExpression) - def _do_modeling(self): super()._do_modeling() - for effect in self.effects: + for effect in self.effects.values(): effect.create_model(self._model) self.penalty = self.add_submodels( ShareAllocationModel(self._model, dims=(), label_of_element='Penalty'), @@ -686,7 +680,7 @@ def _do_modeling(self): ) def _add_share_between_effects(self): - for target_effect in self.effects: + for target_effect in self.effects.values(): # 1. temporal: <- receiving temporal shares from other effects for source_effect, time_series in target_effect.share_from_temporal.items(): target_effect.submodel.temporal.add_share( diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index ad43c183b..cf958d9d1 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -4,15 +4,17 @@ from __future__ import annotations -import json import logging import warnings +from itertools import chain from typing import TYPE_CHECKING, Any, Literal, Optional import numpy as np import pandas as pd import xarray as xr +from . import io as fx_io +from .config import CONFIG from .core import ( ConversionError, DataConverter, @@ -32,7 +34,7 @@ TemporalEffectsUser, ) from .elements import Bus, Component, Flow -from .structure import Element, FlowSystemModel, Interface +from .structure import CompositeContainerMixin, Element, ElementContainer, FlowSystemModel, Interface if TYPE_CHECKING: import pathlib @@ -43,11 +45,13 @@ logger = logging.getLogger('flixopt') -class FlowSystem(Interface): +class FlowSystem(Interface, CompositeContainerMixin[Element]): """ - A FlowSystem organizes the high level Elements (Components, Buses & Effects). + A FlowSystem organizes the high level Elements (Components, Buses, Effects & Flows). - This is the main container class that users work with to build and manage their System. + This is the main container class that users work with to build and manage their energy or material flow system. + FlowSystem provides both direct container access (via .components, .buses, .effects, .flows) and a unified + dict-like interface for accessing any element by label across all container types. Args: timesteps: The timesteps of the model. @@ -69,10 +73,74 @@ class FlowSystem(Interface): - False: All flow rates are optimized separately per scenario - list[str]: Only specified flows (by label_full) are equalized across scenarios + Examples: + Creating a FlowSystem and accessing elements: + + >>> import flixopt as fx + >>> import pandas as pd + >>> timesteps = pd.date_range('2023-01-01', periods=24, freq='h') + >>> flow_system = fx.FlowSystem(timesteps) + >>> + >>> # Add elements to the system + >>> boiler = fx.Component('Boiler', inputs=[heat_flow], on_off_parameters=...) + >>> heat_bus = fx.Bus('Heat', excess_penalty_per_flow_hour=1e4) + >>> costs = fx.Effect('costs', is_objective=True, is_standard=True) + >>> flow_system.add_elements(boiler, heat_bus, costs) + + Unified dict-like access (recommended for most cases): + + >>> # Access any element by label, regardless of type + >>> boiler = flow_system['Boiler'] # Returns Component + >>> heat_bus = flow_system['Heat'] # Returns Bus + >>> costs = flow_system['costs'] # Returns Effect + >>> + >>> # Check if element exists + >>> if 'Boiler' in flow_system: + ... print('Boiler found in system') + >>> + >>> # Iterate over all elements + >>> for label in flow_system.keys(): + ... element = flow_system[label] + ... print(f'{label}: {type(element).__name__}') + >>> + >>> # Get all element labels and objects + >>> all_labels = list(flow_system.keys()) + >>> all_elements = list(flow_system.values()) + >>> for label, element in flow_system.items(): + ... print(f'{label}: {element}') + + Direct container access for type-specific operations: + + >>> # Access specific container when you need type filtering + >>> for component in flow_system.components.values(): + ... print(f'{component.label}: {len(component.inputs)} inputs') + >>> + >>> # Access buses directly + >>> for bus in flow_system.buses.values(): + ... print(f'{bus.label}') + >>> + >>> # Flows are automatically collected from all components + >>> for flow in flow_system.flows.values(): + ... print(f'{flow.label_full}: {flow.size}') + >>> + >>> # Access effects + >>> for effect in flow_system.effects.values(): + ... print(f'{effect.label}') + Notes: + - The dict-like interface (`flow_system['element']`) searches across all containers + (components, buses, effects, flows) to find the element with the matching label. + - Element labels must be unique across all container types. Attempting to add + elements with duplicate labels will raise an error, ensuring each label maps to exactly one element. + - The `.all_elements` property is deprecated. Use the dict-like interface instead: + `flow_system['element']`, `'element' in flow_system`, `flow_system.keys()`, + `flow_system.values()`, or `flow_system.items()`. + - Direct container access (`.components`, `.buses`, `.effects`, `.flows`) is useful + when you need type-specific filtering or operations. + - The `.flows` container is automatically populated from all component inputs and outputs. - Creates an empty registry for components and buses, an empty EffectCollection, and a placeholder for a SystemModel. - The instance starts disconnected (self._connected_and_transformed == False) and will be - connected_and_transformed automatically when trying to solve a calculation. + connected_and_transformed automatically when trying to solve a calculation. """ def __init__( @@ -80,7 +148,7 @@ def __init__( timesteps: pd.DatetimeIndex, periods: pd.Index | None = None, scenarios: pd.Index | None = None, - hours_of_last_timestep: float | None = None, + hours_of_last_timestep: int | float | None = None, hours_of_previous_timesteps: int | float | np.ndarray | None = None, weights: PeriodicDataUser | None = None, scenario_independent_sizes: bool | list[str] = True, @@ -104,8 +172,8 @@ def __init__( self.hours_per_timestep = self.fit_to_model_coords('hours_per_timestep', hours_per_timestep) # Element collections - self.components: dict[str, Component] = {} - self.buses: dict[str, Bus] = {} + self.components: ElementContainer[Component] = ElementContainer(element_type_name='components') + self.buses: ElementContainer[Bus] = ElementContainer(element_type_name='buses') self.effects: EffectCollection = EffectCollection() self.model: FlowSystemModel | None = None @@ -113,6 +181,7 @@ def __init__( self._used_in_calculation = False self._network_app = None + self._flows_cache: ElementContainer[Flow] | None = None # Use properties to validate and store scenario dimension settings self.scenario_independent_sizes = scenario_independent_sizes @@ -232,7 +301,7 @@ def _create_reference_structure(self) -> tuple[dict, dict[str, xr.DataArray]]: # Extract from effects effects_structure = {} - for effect in self.effects: + for effect in self.effects.values(): effect_structure, effect_arrays = effect._create_reference_structure() all_extracted_arrays.update(effect_arrays) effects_structure[effect.label] = effect_structure @@ -433,7 +502,7 @@ def connect_and_transform(self): self.weights = self.fit_to_model_coords('weights', self.weights, dims=['period', 'scenario']) self._connect_network() - for element in list(self.components.values()) + list(self.effects.effects.values()) + list(self.buses.values()): + for element in chain(self.components.values(), self.effects.values(), self.buses.values()): element.transform_data(self) self._connected_and_transformed = True @@ -484,7 +553,7 @@ def plot_network( | list[ Literal['nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer'] ] = True, - show: bool = False, + show: bool | None = None, ) -> pyvis.network.Network | None: """ Visualizes the network structure of a FlowSystem using PyVis, saving it as an interactive HTML file. @@ -514,7 +583,9 @@ def plot_network( from . import plotting node_infos, edge_infos = self.network_infos() - return plotting.plot_network(node_infos, edge_infos, path, controls, show) + return plotting.plot_network( + node_infos, edge_infos, path, controls, show if show is not None else CONFIG.Plotting.default_show + ) def start_network_app(self): """Visualizes the network structure of a FlowSystem using Dash, Cytoscape, and networkx. @@ -579,7 +650,7 @@ def network_infos(self) -> tuple[dict[str, dict[str, str]], dict[str, dict[str, 'class': 'Bus' if isinstance(node, Bus) else 'Component', 'infos': node.__str__(), } - for node in list(self.components.values()) + list(self.buses.values()) + for node in chain(self.components.values(), self.buses.values()) } edges = { @@ -601,10 +672,8 @@ def _check_if_element_is_unique(self, element: Element) -> None: Args: element: new element to check """ - if element in self.all_elements.values(): - raise ValueError(f'Element {element.label_full} already added to FlowSystem!') # check if name is already used: - if element.label_full in self.all_elements: + if element.label_full in self: raise ValueError(f'Label of Element {element.label_full} already used in another element!') def _add_effects(self, *args: Effect) -> None: @@ -614,13 +683,15 @@ def _add_components(self, *components: Component) -> None: for new_component in list(components): logger.info(f'Registered new Component: {new_component.label_full}') self._check_if_element_is_unique(new_component) # check if already exists: - self.components[new_component.label_full] = new_component # Add to existing components + self.components.add(new_component) # Add to existing components + self._flows_cache = None # Invalidate flows cache def _add_buses(self, *buses: Bus): for new_bus in list(buses): logger.info(f'Registered new Bus: {new_bus.label_full}') self._check_if_element_is_unique(new_bus) # check if already exists: - self.buses[new_bus.label_full] = new_bus # Add to existing components + self.buses.add(new_bus) # Add to existing buses + self._flows_cache = None # Invalidate flows cache def _connect_network(self): """Connects the network of components and buses. Can be rerun without changes if no elements were added""" @@ -630,7 +701,7 @@ def _connect_network(self): flow.is_input_in_component = True if flow in component.inputs else False # Add Bus if not already added (deprecated) - if flow._bus_object is not None and flow._bus_object not in self.buses.values(): + if flow._bus_object is not None and flow._bus_object.label_full not in self.buses: warnings.warn( f'The Bus {flow._bus_object.label_full} was added to the FlowSystem from {flow.label_full}.' f'This is deprecated and will be removed in the future. ' @@ -657,62 +728,40 @@ def _connect_network(self): ) def __repr__(self) -> str: - """Compact representation for debugging.""" - status = 'βœ“' if self.connected_and_transformed else '⚠' - - # Build dimension info - dims = f'{len(self.timesteps)} timesteps [{self.timesteps[0].strftime("%Y-%m-%d")} to {self.timesteps[-1].strftime("%Y-%m-%d")}]' - if self.periods is not None: - dims += f', {len(self.periods)} periods' - if self.scenarios is not None: - dims += f', {len(self.scenarios)} scenarios' - - return f'FlowSystem({dims}, {len(self.components)} Components, {len(self.buses)} Buses, {len(self.effects)} Effects, {status})' - - def __str__(self) -> str: - """Structured summary for users.""" - - def format_elements(element_names: list, label: str, alignment: int = 12): - name_list = ', '.join(element_names[:3]) - if len(element_names) > 3: - name_list += f' ... (+{len(element_names) - 3} more)' + """Return a detailed string representation showing all containers.""" + r = fx_io.format_title_with_underline('FlowSystem', '=') - suffix = f' ({name_list})' if element_names else '' - padding = alignment - len(label) - 1 # -1 for the colon - return f'{label}:{"":<{padding}} {len(element_names)}{suffix}' - - time_period = f'Time period: {self.timesteps[0].date()} to {self.timesteps[-1].date()}' + # Timestep info + time_period = f'{self.timesteps[0].date()} to {self.timesteps[-1].date()}' freq_str = str(self.timesteps.freq).replace('<', '').replace('>', '') if self.timesteps.freq else 'irregular' - - lines = [ - f'Timesteps: {len(self.timesteps)} ({freq_str}) [{time_period}]', - ] + r += f'Timesteps: {len(self.timesteps)} ({freq_str}) [{time_period}]\n' # Add periods if present if self.periods is not None: period_names = ', '.join(str(p) for p in self.periods[:3]) if len(self.periods) > 3: period_names += f' ... (+{len(self.periods) - 3} more)' - lines.append(f'Periods: {len(self.periods)} ({period_names})') + r += f'Periods: {len(self.periods)} ({period_names})\n' + else: + r += 'Periods: None\n' # Add scenarios if present if self.scenarios is not None: scenario_names = ', '.join(str(s) for s in self.scenarios[:3]) if len(self.scenarios) > 3: scenario_names += f' ... (+{len(self.scenarios) - 3} more)' - lines.append(f'Scenarios: {len(self.scenarios)} ({scenario_names})') - - lines.extend( - [ - format_elements(list(self.components.keys()), 'Components'), - format_elements(list(self.buses.keys()), 'Buses'), - format_elements(list(self.effects.effects.keys()), 'Effects'), - f'Status: {"Connected & Transformed" if self.connected_and_transformed else "Not connected"}', - ] - ) - lines = ['FlowSystem:', f'{"─" * max(len(line) for line in lines)}'] + lines + r += f'Scenarios: {len(self.scenarios)} ({scenario_names})\n' + else: + r += 'Scenarios: None\n' - return '\n'.join(lines) + # Add status + status = 'βœ“' if self.connected_and_transformed else '⚠' + r += f'Status: {status}\n' + + # Add grouped container view + r += '\n' + self._format_grouped_containers() + + return r def __eq__(self, other: FlowSystem): """Check if two FlowSystems are equal by comparing their dataset representations.""" @@ -732,38 +781,46 @@ def __eq__(self, other: FlowSystem): return True - def __getitem__(self, item) -> Element: - """Get element by exact label with helpful error messages.""" - if item in self.all_elements: - return self.all_elements[item] - - # Provide helpful error with suggestions - from difflib import get_close_matches - - suggestions = get_close_matches(item, self.all_elements.keys(), n=3, cutoff=0.6) - - if suggestions: - suggestion_str = ', '.join(f"'{s}'" for s in suggestions) - raise KeyError(f"Element '{item}' not found. Did you mean: {suggestion_str}?") - else: - raise KeyError(f"Element '{item}' not found in FlowSystem") - - def __contains__(self, item: str) -> bool: - """Check if element exists in the FlowSystem.""" - return item in self.all_elements - - def __iter__(self): - """Iterate over element labels.""" - return iter(self.all_elements.keys()) + def _get_container_groups(self) -> dict[str, ElementContainer]: + """Return ordered container groups for CompositeContainerMixin.""" + return { + 'Components': self.components, + 'Buses': self.buses, + 'Effects': self.effects, + 'Flows': self.flows, + } @property - def flows(self) -> dict[str, Flow]: - set_of_flows = {flow for comp in self.components.values() for flow in comp.inputs + comp.outputs} - return {flow.label_full: flow for flow in set_of_flows} + def flows(self) -> ElementContainer[Flow]: + if self._flows_cache is None: + flows = [f for c in self.components.values() for f in c.inputs + c.outputs] + # Deduplicate by id and sort for reproducibility + flows = sorted({id(f): f for f in flows}.values(), key=lambda f: f.label_full.lower()) + self._flows_cache = ElementContainer(flows, element_type_name='flows') + return self._flows_cache @property def all_elements(self) -> dict[str, Element]: - return {**self.components, **self.effects.effects, **self.flows, **self.buses} + """ + Get all elements as a dictionary. + + .. deprecated:: 3.2.0 + Use dict-like interface instead: `flow_system['element']`, `'element' in flow_system`, + `flow_system.keys()`, `flow_system.values()`, or `flow_system.items()`. + This property will be removed in v4.0.0. + + Returns: + Dictionary mapping element labels to element objects. + """ + warnings.warn( + "The 'all_elements' property is deprecated. Use dict-like interface instead: " + "flow_system['element'], 'element' in flow_system, flow_system.keys(), " + 'flow_system.values(), or flow_system.items(). ' + 'This property will be removed in v4.0.0.', + DeprecationWarning, + stacklevel=2, + ) + return {**self.components, **self.effects, **self.flows, **self.buses} @property def coords(self) -> dict[FlowSystemDimensions, pd.Index]: @@ -927,6 +984,8 @@ def resample( self, time: str, method: Literal['mean', 'sum', 'max', 'min', 'first', 'last', 'std', 'var', 'median', 'count'] = 'mean', + hours_of_last_timestep: int | float | None = None, + hours_of_previous_timesteps: int | float | np.ndarray | None = None, **kwargs: Any, ) -> FlowSystem: """ @@ -936,10 +995,12 @@ def resample( Args: time: Resampling frequency (e.g., '3h', '2D', '1M') method: Resampling method. Recommended: 'mean', 'first', 'last', 'max', 'min' + hours_of_last_timestep: New duration of the last time step. Defaults to the last time interval of the new timesteps + hours_of_previous_timesteps: New duration of the previous timestep. Defaults to the first time increment of the new timesteps **kwargs: Additional arguments passed to xarray.resample() Returns: - FlowSystem: New FlowSystem with resampled data + FlowSystem: New resampled FlowSystem """ if not self.connected_and_transformed: self.connect_and_transform() @@ -973,6 +1034,10 @@ def resample( else: resampled_dataset = resampled_time_data + # Let FlowSystem recalculate or use explicitly set value + resampled_dataset.attrs['hours_of_last_timestep'] = hours_of_last_timestep + resampled_dataset.attrs['hours_of_previous_timesteps'] = hours_of_previous_timesteps + return self.__class__.from_dataset(resampled_dataset) @property diff --git a/flixopt/interface.py b/flixopt/interface.py index ab47c2522..21cbc82b9 100644 --- a/flixopt/interface.py +++ b/flixopt/interface.py @@ -7,7 +7,7 @@ import logging import warnings -from typing import TYPE_CHECKING, Literal, Optional +from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd @@ -712,6 +712,8 @@ class InvestParameters(Interface): Combinable with effects_of_investment and effects_of_investment_per_size. effects_of_retirement: Costs incurred if NOT investing (demolition, penalties). Dict: {'effect_name': value}. + linked_periods: Describes which periods are linked. 1 means linked, 0 means size=0. None means no linked periods. + For convenience, pass a tuple containing the first and last period (2025, 2039), linking them and those in between Deprecated Args: fix_effects: **Deprecated**. Use `effects_of_investment` instead. @@ -724,7 +726,6 @@ class InvestParameters(Interface): Will be removed in version 4.0. optional: DEPRECATED. Use `mandatory` instead. Opposite of `mandatory`. Will be removed in version 4.0. - linked_periods: Describes which periods are linked. 1 means linked, 0 means size=0. None means no linked periods. Cost Annualization Requirements: All cost values must be properly weighted to match the optimization model's time horizon. @@ -963,6 +964,11 @@ def transform_data(self, flow_system: FlowSystem, name_prefix: str = '') -> None raise TypeError( f'If you provide a tuple to "linked_periods", it needs to be len=2. Got {len(self.linked_periods)=}' ) + if flow_system.periods is None: + raise ValueError( + f'Cannot use linked_periods={self.linked_periods} when FlowSystem has no periods defined. ' + f'Please define periods in FlowSystem or use linked_periods=None.' + ) logger.debug(f'Computing linked_periods from {self.linked_periods}') start, end = self.linked_periods if start not in flow_system.periods.values: @@ -1045,6 +1051,27 @@ def minimum_or_fixed_size(self) -> PeriodicData: def maximum_or_fixed_size(self) -> PeriodicData: return self.fixed_size if self.fixed_size is not None else self.maximum_size + def format_for_repr(self) -> str: + """Format InvestParameters for display in repr methods. + + Returns: + Formatted string showing size information + """ + from .io import numeric_to_str_for_repr + + if self.fixed_size is not None: + val = numeric_to_str_for_repr(self.fixed_size) + status = 'mandatory' if self.mandatory else 'optional' + return f'{val} ({status})' + + # Show range if available + parts = [] + if self.minimum_size is not None: + parts.append(f'min: {numeric_to_str_for_repr(self.minimum_size)}') + if self.maximum_size is not None: + parts.append(f'max: {numeric_to_str_for_repr(self.maximum_size)}') + return ', '.join(parts) if parts else 'invest' + @staticmethod def compute_linked_periods(first_period: int, last_period: int, periods: pd.Index | list[int]) -> xr.DataArray: return xr.DataArray( @@ -1318,7 +1345,7 @@ def use_switch_on(self) -> bool: return True return any( - param is not None and param != {} + self._has_value(param) for param in [ self.effects_per_switch_on, self.switch_on_total_max, diff --git a/flixopt/io.py b/flixopt/io.py index 53d3d8e8a..fa4ef4ebf 100644 --- a/flixopt/io.py +++ b/flixopt/io.py @@ -1,13 +1,18 @@ from __future__ import annotations -import importlib.util +import inspect import json import logging +import os import pathlib import re +import sys +from contextlib import contextmanager from dataclasses import dataclass -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING, Any +import numpy as np +import pandas as pd import xarray as xr import yaml @@ -34,7 +39,272 @@ def remove_none_and_empty(obj): return obj -def _save_to_yaml(data, output_file='formatted_output.yaml'): +def round_nested_floats(obj: dict | list | float | int | Any, decimals: int = 2) -> dict | list | float | int | Any: + """Recursively round floating point numbers in nested data structures and convert it to python native types. + + This function traverses nested data structures (dictionaries, lists) and rounds + any floating point numbers to the specified number of decimal places. It handles + various data types including NumPy arrays and xarray DataArrays by converting + them to lists with rounded values. + + Args: + obj: The object to process. Can be a dict, list, float, int, numpy.ndarray, + xarray.DataArray, or any other type. + decimals (int, optional): Number of decimal places to round to. Defaults to 2. + + Returns: + The processed object with the same structure as the input, but with all floating point numbers rounded to the specified precision. NumPy arrays and xarray DataArrays are converted to lists. + + Examples: + >>> data = {'a': 3.14159, 'b': [1.234, 2.678]} + >>> round_nested_floats(data, decimals=2) + {'a': 3.14, 'b': [1.23, 2.68]} + + >>> import numpy as np + >>> arr = np.array([1.234, 5.678]) + >>> round_nested_floats(arr, decimals=1) + [1.2, 5.7] + """ + if isinstance(obj, dict): + return {k: round_nested_floats(v, decimals) for k, v in obj.items()} + elif isinstance(obj, list): + return [round_nested_floats(v, decimals) for v in obj] + elif isinstance(obj, np.floating): + return round(float(obj), decimals) + elif isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.bool_): + return bool(obj) + elif isinstance(obj, float): + return round(obj, decimals) + elif isinstance(obj, int): + return obj + elif isinstance(obj, np.ndarray): + return np.round(obj, decimals).tolist() + elif isinstance(obj, xr.DataArray): + return obj.round(decimals).values.tolist() + return obj + + +# ============================================================================ +# Centralized JSON and YAML I/O Functions +# ============================================================================ + + +def load_json(path: str | pathlib.Path) -> dict | list: + """ + Load data from a JSON file. + + Args: + path: Path to the JSON file. + + Returns: + Loaded data (typically dict or list). + + Raises: + FileNotFoundError: If the file does not exist. + json.JSONDecodeError: If the file is not valid JSON. + """ + path = pathlib.Path(path) + with open(path, encoding='utf-8') as f: + return json.load(f) + + +def save_json( + data: dict | list, + path: str | pathlib.Path, + indent: int = 4, + ensure_ascii: bool = False, + **kwargs, +) -> None: + """ + Save data to a JSON file with consistent formatting. + + Args: + data: Data to save (dict or list). + path: Path to save the JSON file. + indent: Number of spaces for indentation (default: 4). + ensure_ascii: If False, allow Unicode characters (default: False). + **kwargs: Additional arguments to pass to json.dump(). + """ + path = pathlib.Path(path) + with open(path, 'w', encoding='utf-8') as f: + json.dump(data, f, indent=indent, ensure_ascii=ensure_ascii, **kwargs) + + +def load_yaml(path: str | pathlib.Path) -> dict | list: + """ + Load data from a YAML file. + + Args: + path: Path to the YAML file. + + Returns: + Loaded data (typically dict or list), or empty dict if file is empty. + + Raises: + FileNotFoundError: If the file does not exist. + yaml.YAMLError: If the file is not valid YAML. + Note: Returns {} for empty YAML files instead of None. + """ + path = pathlib.Path(path) + with open(path, encoding='utf-8') as f: + return yaml.safe_load(f) or {} + + +def _load_yaml_unsafe(path: str | pathlib.Path) -> dict | list: + """ + INTERNAL: Load YAML allowing arbitrary tags. Do not use on untrusted input. + + This function exists only for loading internally-generated files that may + contain custom YAML tags. Never use this on user-provided files. + + Args: + path: Path to the YAML file. + + Returns: + Loaded data (typically dict or list), or empty dict if file is empty. + """ + path = pathlib.Path(path) + with open(path, encoding='utf-8') as f: + return yaml.unsafe_load(f) or {} + + +def save_yaml( + data: dict | list, + path: str | pathlib.Path, + indent: int = 4, + width: int = 1000, + allow_unicode: bool = True, + sort_keys: bool = False, + compact_numeric_lists: bool = False, + **kwargs, +) -> None: + """ + Save data to a YAML file with consistent formatting. + + Args: + data: Data to save (dict or list). + path: Path to save the YAML file. + indent: Number of spaces for indentation (default: 4). + width: Maximum line width (default: 1000). + allow_unicode: If True, allow Unicode characters (default: True). + sort_keys: If True, sort dictionary keys (default: False). + compact_numeric_lists: If True, format numeric lists inline for better readability (default: False). + **kwargs: Additional arguments to pass to yaml.dump(). + """ + path = pathlib.Path(path) + + if compact_numeric_lists: + # Define custom representer for compact numeric lists + def represent_list(dumper, data): + """ + Custom representer for lists to format them inline (flow style) + but only if they contain only numbers or nested numeric lists. + """ + if data and all( + isinstance(item, (int, float, np.integer, np.floating)) + or (isinstance(item, list) and all(isinstance(x, (int, float, np.integer, np.floating)) for x in item)) + for item in data + ): + return dumper.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=True) + return dumper.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=False) + + # Create custom dumper with the representer + class CompactDumper(yaml.SafeDumper): + pass + + CompactDumper.add_representer(list, represent_list) + + with open(path, 'w', encoding='utf-8') as f: + yaml.dump( + data, + f, + Dumper=CompactDumper, + indent=indent, + width=width, + allow_unicode=allow_unicode, + sort_keys=sort_keys, + default_flow_style=False, + **kwargs, + ) + else: + with open(path, 'w', encoding='utf-8') as f: + yaml.safe_dump( + data, + f, + indent=indent, + width=width, + allow_unicode=allow_unicode, + sort_keys=sort_keys, + default_flow_style=False, + **kwargs, + ) + + +def load_config_file(path: str | pathlib.Path) -> dict: + """ + Load a configuration file, automatically detecting JSON or YAML format. + + This function intelligently tries to load the file based on its extension, + with fallback support if the primary format fails. + + Supported extensions: + - .json: Tries JSON first, falls back to YAML + - .yaml, .yml: Tries YAML first, falls back to JSON + - Others: Tries YAML, then JSON + + Args: + path: Path to the configuration file. + + Returns: + Loaded configuration as a dictionary. + + Raises: + FileNotFoundError: If the file does not exist. + ValueError: If neither JSON nor YAML parsing succeeds. + """ + path = pathlib.Path(path) + + if not path.exists(): + raise FileNotFoundError(f'Configuration file not found: {path}') + + # Try based on file extension + # Normalize extension to lowercase for case-insensitive matching + suffix = path.suffix.lower() + + if suffix == '.json': + try: + return load_json(path) + except json.JSONDecodeError: + logger.warning(f'Failed to parse {path} as JSON, trying YAML') + try: + return load_yaml(path) + except yaml.YAMLError as e: + raise ValueError(f'Failed to parse {path} as JSON or YAML') from e + + elif suffix in ['.yaml', '.yml']: + try: + return load_yaml(path) + except yaml.YAMLError: + logger.warning(f'Failed to parse {path} as YAML, trying JSON') + try: + return load_json(path) + except json.JSONDecodeError as e: + raise ValueError(f'Failed to parse {path} as YAML or JSON') from e + + else: + # Unknown extension, try YAML first (more common for config) + try: + return load_yaml(path) + except yaml.YAMLError: + try: + return load_json(path) + except json.JSONDecodeError as e: + raise ValueError(f'Failed to parse {path} as YAML or JSON') from e + + +def _save_yaml_multiline(data, output_file='formatted_output.yaml'): """ Save dictionary data to YAML with proper multi-line string formatting. Handles complex string patterns including backticks, special characters, @@ -62,14 +332,14 @@ def represent_str(dumper, data): # Use plain style for simple strings return dumper.represent_scalar('tag:yaml.org,2002:str', data) - # Add the string representer to SafeDumper - yaml.add_representer(str, represent_str, Dumper=yaml.SafeDumper) - # Configure dumper options for better formatting class CustomDumper(yaml.SafeDumper): def increase_indent(self, flow=False, indentless=False): return super().increase_indent(flow, False) + # Bind representer locally to CustomDumper to avoid global side effects + CustomDumper.add_representer(str, represent_str) + # Write to file with settings that ensure proper formatting with open(output_file, 'w', encoding='utf-8') as file: yaml.dump( @@ -80,7 +350,7 @@ def increase_indent(self, flow=False, indentless=False): default_flow_style=False, # Use block style for mappings width=1000, # Set a reasonable line width allow_unicode=True, # Support Unicode characters - indent=2, # Set consistent indentation + indent=4, # Set consistent indentation ) @@ -190,7 +460,7 @@ def document_linopy_model(model: linopy.Model, path: pathlib.Path | None = None) if path is not None: if path.suffix not in ['.yaml', '.yml']: raise ValueError(f'Invalid file extension for path {path}. Only .yaml and .yml are supported') - _save_to_yaml(documentation, str(path)) + _save_yaml_multiline(documentation, str(path)) return documentation @@ -199,7 +469,6 @@ def save_dataset_to_netcdf( ds: xr.Dataset, path: str | pathlib.Path, compression: int = 0, - engine: Literal['netcdf4', 'scipy', 'h5netcdf'] = 'h5netcdf', ) -> None: """ Save a dataset to a netcdf file. Store all attrs as JSON strings in 'attrs' attributes. @@ -216,16 +485,6 @@ def save_dataset_to_netcdf( if path.suffix not in ['.nc', '.nc4']: raise ValueError(f'Invalid file extension for path {path}. Only .nc and .nc4 are supported') - apply_encoding = False - if compression != 0: - if importlib.util.find_spec(engine) is not None: - apply_encoding = True - else: - logger.warning( - f'Dataset was exported without compression due to missing dependency "{engine}".' - f'Install {engine} via `pip install {engine}`.' - ) - ds = ds.copy(deep=True) ds.attrs = {'attrs': json.dumps(ds.attrs)} @@ -242,9 +501,9 @@ def save_dataset_to_netcdf( ds.to_netcdf( path, encoding=None - if not apply_encoding + if compression == 0 else {data_var: {'zlib': True, 'complevel': compression} for data_var in ds.data_vars}, - engine=engine, + engine='netcdf4', ) @@ -258,7 +517,7 @@ def load_dataset_from_netcdf(path: str | pathlib.Path) -> xr.Dataset: Returns: Dataset: Loaded dataset with restored attrs. """ - ds = xr.load_dataset(str(path), engine='h5netcdf') + ds = xr.load_dataset(str(path), engine='netcdf4') # Restore Dataset attrs if 'attrs' in ds.attrs: @@ -330,3 +589,371 @@ def update(self, new_name: str | None = None, new_folder: pathlib.Path | None = raise FileNotFoundError(f'Folder {new_folder} does not exist or is not a directory.') self.folder = new_folder self._update_paths() + + +def numeric_to_str_for_repr( + value: int | float | np.integer | np.floating | np.ndarray | pd.Series | pd.DataFrame | xr.DataArray, + precision: int = 1, + atol: float = 1e-10, +) -> str: + """Format value for display in repr methods. + + For single values or uniform arrays, returns the formatted value. + For arrays with variation, returns a range showing min-max. + + Args: + value: Numeric value or container (DataArray, array, Series, DataFrame) + precision: Number of decimal places (default: 1) + atol: Absolute tolerance for considering values equal (default: 1e-10) + + Returns: + Formatted string representation: + - Single/uniform values: "100.0" + - Nearly uniform values: "~100.0" (values differ slightly but display similarly) + - Varying values: "50.0-150.0" (shows range from min to max) + + Raises: + TypeError: If value cannot be converted to numeric format + """ + # Handle simple scalar types + if isinstance(value, (int, float, np.integer, np.floating)): + return f'{float(value):.{precision}f}' + + # Extract array data for variation checking + arr = None + if isinstance(value, xr.DataArray): + arr = value.values.flatten() + elif isinstance(value, (np.ndarray, pd.Series)): + arr = np.asarray(value).flatten() + elif isinstance(value, pd.DataFrame): + arr = value.values.flatten() + else: + # Fallback for unknown types + try: + return f'{float(value):.{precision}f}' + except (TypeError, ValueError) as e: + raise TypeError(f'Cannot format value of type {type(value).__name__} for repr') from e + + # Normalize dtype and handle empties + arr = arr.astype(float, copy=False) + if arr.size == 0: + return '?' + + # Filter non-finite values + finite = arr[np.isfinite(arr)] + if finite.size == 0: + return 'nan' + + # Check for single value + if finite.size == 1: + return f'{float(finite[0]):.{precision}f}' + + # Check if all values are the same or very close + min_val = float(np.nanmin(finite)) + max_val = float(np.nanmax(finite)) + + # First check: values are essentially identical + if np.allclose(min_val, max_val, atol=atol): + return f'{float(np.mean(finite)):.{precision}f}' + + # Second check: display values are the same but actual values differ slightly + min_str = f'{min_val:.{precision}f}' + max_str = f'{max_val:.{precision}f}' + if min_str == max_str: + return f'~{min_str}' + + # Values vary significantly - show range + return f'{min_str}-{max_str}' + + +def _format_value_for_repr(value) -> str: + """Format a single value for display in repr. + + Args: + value: The value to format + + Returns: + Formatted string representation of the value + """ + # Format numeric types using specialized formatter + if isinstance(value, (int, float, np.integer, np.floating, np.ndarray, pd.Series, pd.DataFrame, xr.DataArray)): + try: + return numeric_to_str_for_repr(value) + except Exception: + value_repr = repr(value) + if len(value_repr) > 50: + value_repr = value_repr[:47] + '...' + return value_repr + + # Format dicts with numeric/array values nicely + elif isinstance(value, dict): + try: + formatted_items = [] + for k, v in value.items(): + if isinstance( + v, (int, float, np.integer, np.floating, np.ndarray, pd.Series, pd.DataFrame, xr.DataArray) + ): + v_str = numeric_to_str_for_repr(v) + else: + v_str = repr(v) + if len(v_str) > 30: + v_str = v_str[:27] + '...' + formatted_items.append(f'{repr(k)}: {v_str}') + value_repr = '{' + ', '.join(formatted_items) + '}' + if len(value_repr) > 50: + value_repr = value_repr[:47] + '...' + return value_repr + except Exception: + value_repr = repr(value) + if len(value_repr) > 50: + value_repr = value_repr[:47] + '...' + return value_repr + + # Default repr with truncation + else: + value_repr = repr(value) + if len(value_repr) > 50: + value_repr = value_repr[:47] + '...' + return value_repr + + +def build_repr_from_init( + obj: object, + excluded_params: set[str] | None = None, + label_as_positional: bool = True, + skip_default_size: bool = False, +) -> str: + """Build a repr string from __init__ signature, showing non-default parameter values. + + This utility function extracts common repr logic used across flixopt classes. + It introspects the __init__ method to build a constructor-style repr showing + only parameters that differ from their defaults. + + Args: + obj: The object to create repr for + excluded_params: Set of parameter names to exclude (e.g., {'self', 'inputs', 'outputs'}) + Default excludes 'self', 'label', and 'kwargs' + label_as_positional: If True and 'label' param exists, show it as first positional arg + skip_default_size: If True, skip 'size' parameter when it equals CONFIG.Modeling.big + + Returns: + Formatted repr string like: ClassName("label", param=value) + """ + if excluded_params is None: + excluded_params = {'self', 'label', 'kwargs'} + else: + # Always exclude 'self' + excluded_params = excluded_params | {'self'} + + try: + # Get the constructor arguments and their current values + init_signature = inspect.signature(obj.__init__) + init_params = init_signature.parameters + + # Check if this has a 'label' parameter - if so, show it first as positional + has_label = 'label' in init_params and label_as_positional + + # Build kwargs for non-default parameters + kwargs_parts = [] + label_value = None + + for param_name, param in init_params.items(): + # Skip *args and **kwargs + if param.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD): + continue + + # Handle label separately if showing as positional (check BEFORE excluded_params) + if param_name == 'label' and has_label: + label_value = getattr(obj, param_name, None) + continue + + # Now check if parameter should be excluded + if param_name in excluded_params: + continue + + # Get current value + value = getattr(obj, param_name, None) + + # Skip if value matches default + if param.default != inspect.Parameter.empty: + # Special handling for empty containers (even if default was None) + if isinstance(value, (dict, list, tuple, set)) and len(value) == 0: + if param.default is None or ( + isinstance(param.default, (dict, list, tuple, set)) and len(param.default) == 0 + ): + continue + + # Handle array comparisons (xarray, numpy) + elif isinstance(value, (xr.DataArray, np.ndarray)): + try: + if isinstance(param.default, (xr.DataArray, np.ndarray)): + # Compare arrays element-wise + if isinstance(value, xr.DataArray) and isinstance(param.default, xr.DataArray): + if value.equals(param.default): + continue + elif np.array_equal(value, param.default): + continue + elif isinstance(param.default, (int, float, np.integer, np.floating)): + # Compare array to scalar (e.g., after transform_data converts scalar to DataArray) + if isinstance(value, xr.DataArray): + if np.all(value.values == float(param.default)): + continue + elif isinstance(value, np.ndarray): + if np.all(value == float(param.default)): + continue + except Exception: + pass # If comparison fails, include in repr + + # Handle numeric comparisons (deals with 0 vs 0.0, int vs float) + elif isinstance(value, (int, float, np.integer, np.floating)) and isinstance( + param.default, (int, float, np.integer, np.floating) + ): + try: + if float(value) == float(param.default): + continue + except (ValueError, TypeError): + pass + + elif value == param.default: + continue + + # Skip None values if default is None + if value is None and param.default is None: + continue + + # Special case: hide CONFIG.Modeling.big for size parameter + if skip_default_size and param_name == 'size': + from .config import CONFIG + + try: + if isinstance(value, (int, float, np.integer, np.floating)): + if float(value) == CONFIG.Modeling.big: + continue + except Exception: + pass + + # Format value using helper function + value_repr = _format_value_for_repr(value) + kwargs_parts.append(f'{param_name}={value_repr}') + + # Build args string with label first as positional if present + if has_label and label_value is not None: + # Use label_full if available, otherwise label + if hasattr(obj, 'label_full'): + label_repr = repr(obj.label_full) + else: + label_repr = repr(label_value) + + if len(label_repr) > 50: + label_repr = label_repr[:47] + '...' + args_str = label_repr + if kwargs_parts: + args_str += ', ' + ', '.join(kwargs_parts) + else: + args_str = ', '.join(kwargs_parts) + + # Build final repr + class_name = obj.__class__.__name__ + + return f'{class_name}({args_str})' + + except Exception: + # Fallback if introspection fails + return f'{obj.__class__.__name__}()' + + +def format_flow_details(obj, has_inputs: bool = True, has_outputs: bool = True) -> str: + """Format inputs and outputs as indented bullet list. + + Args: + obj: Object with 'inputs' and/or 'outputs' attributes + has_inputs: Whether to check for inputs + has_outputs: Whether to check for outputs + + Returns: + Formatted string with flow details (including leading newline), or empty string if no flows + """ + flow_lines = [] + + if has_inputs and hasattr(obj, 'inputs') and obj.inputs: + flow_lines.append(' inputs:') + for flow in obj.inputs: + flow_lines.append(f' * {repr(flow)}') + + if has_outputs and hasattr(obj, 'outputs') and obj.outputs: + flow_lines.append(' outputs:') + for flow in obj.outputs: + flow_lines.append(f' * {repr(flow)}') + + return '\n' + '\n'.join(flow_lines) if flow_lines else '' + + +def format_title_with_underline(title: str, underline_char: str = '-') -> str: + """Format a title with underline of matching length. + + Args: + title: The title text + underline_char: Character to use for underline (default: '-') + + Returns: + Formatted string: "Title\\n-----\\n" + """ + return f'{title}\n{underline_char * len(title)}\n' + + +def format_sections_with_headers(sections: dict[str, str], underline_char: str = '-') -> list[str]: + """Format sections with underlined headers. + + Args: + sections: Dict mapping section headers to content + underline_char: Character for underlining headers + + Returns: + List of formatted section strings + """ + formatted_sections = [] + for section_header, section_content in sections.items(): + underline = underline_char * len(section_header) + formatted_sections.append(f'{section_header}\n{underline}\n{section_content}') + return formatted_sections + + +def build_metadata_info(parts: list[str], prefix: str = ' | ') -> str: + """Build metadata info string from parts. + + Args: + parts: List of metadata strings (empty strings are filtered out) + prefix: Prefix to add if parts is non-empty + + Returns: + Formatted info string or empty string + """ + # Filter out empty strings + parts = [p for p in parts if p] + if not parts: + return '' + info = ' | '.join(parts) + return prefix + info if prefix else info + + +@contextmanager +def suppress_output(): + """Redirect both Python and C-level stdout/stderr to os.devnull.""" + with open(os.devnull, 'w') as devnull: + # Save original file descriptors + old_stdout_fd = os.dup(1) + old_stderr_fd = os.dup(2) + try: + # Flush any pending text + sys.stdout.flush() + sys.stderr.flush() + # Redirect low-level fds to devnull + os.dup2(devnull.fileno(), 1) + os.dup2(devnull.fileno(), 2) + yield + finally: + # Restore fds + os.dup2(old_stdout_fd, 1) + os.dup2(old_stderr_fd, 2) + os.close(old_stdout_fd) + os.close(old_stderr_fd) diff --git a/flixopt/structure.py b/flixopt/structure.py index 72efc3df2..e54680592 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -6,14 +6,17 @@ from __future__ import annotations import inspect -import json import logging +import re from dataclasses import dataclass +from difflib import get_close_matches from io import StringIO from typing import ( TYPE_CHECKING, Any, + Generic, Literal, + TypeVar, ) import linopy @@ -169,7 +172,7 @@ def solution(self): }, 'Effects': { effect.label_full: effect.submodel.results_structure() - for effect in sorted(self.flow_system.effects, key=lambda effect: effect.label_full.upper()) + for effect in sorted(self.flow_system.effects.values(), key=lambda effect: effect.label_full.upper()) }, 'Flows': { flow.label_full: flow.submodel.results_structure() @@ -243,9 +246,7 @@ def __repr__(self) -> str: } # Format sections with headers and underlines - formatted_sections = [] - for section_header, section_content in sections.items(): - formatted_sections.append(f'{section_header}\n{"-" * len(section_header)}\n{section_content}') + formatted_sections = fx_io.format_sections_with_headers(sections) title = f'FlowSystemModel ({self.type})' all_sections = '\n'.join(formatted_sections) @@ -507,6 +508,33 @@ def _validate_kwargs(self, kwargs: dict, class_name: str = None) -> None: unexpected_params = ', '.join(f"'{param}'" for param in extra_kwargs.keys()) raise TypeError(f'{class_name}.__init__() got unexpected keyword argument(s): {unexpected_params}') + @staticmethod + def _has_value(param: Any) -> bool: + """Check if a parameter has a meaningful value. + + Args: + param: The parameter to check. + + Returns: + False for: + - None + - Empty collections (dict, list, tuple, set, frozenset) + + True for all other values, including: + - Non-empty collections + - xarray DataArrays (even if they contain NaN/empty data) + - Scalar values (0, False, empty strings, etc.) + - NumPy arrays (even if empty - use .size to check those explicitly) + """ + if param is None: + return False + + # Check for empty collections (but not strings, arrays, or DataArrays) + if isinstance(param, (dict, list, tuple, set, frozenset)) and len(param) == 0: + return False + + return True + @classmethod def _resolve_dataarray_reference( cls, reference: str, arrays_dict: dict[str, xr.DataArray] @@ -788,47 +816,13 @@ def to_json(self, path: str | pathlib.Path): try: # Use the stats mode for JSON export (cleaner output) data = self.get_structure(clean=True, stats=True) - with open(path, 'w', encoding='utf-8') as f: - json.dump(data, f, indent=4, ensure_ascii=False) + fx_io.save_json(data, path) except Exception as e: raise OSError(f'Failed to save {self.__class__.__name__} to JSON file {path}: {e}') from e def __repr__(self): """Return a detailed string representation for debugging.""" - try: - # Get the constructor arguments and their current values - init_signature = inspect.signature(self.__init__) - init_args = init_signature.parameters - - # Create a dictionary with argument names and their values, with better formatting - args_parts = [] - for name in init_args: - if name == 'self': - continue - value = getattr(self, name, None) - # Truncate long representations - value_repr = repr(value) - if len(value_repr) > 50: - value_repr = value_repr[:47] + '...' - args_parts.append(f'{name}={value_repr}') - - args_str = ', '.join(args_parts) - return f'{self.__class__.__name__}({args_str})' - except Exception: - # Fallback if introspection fails - return f'{self.__class__.__name__}()' - - def __str__(self): - """Return a user-friendly string representation.""" - try: - data = self.get_structure(clean=True, stats=True) - with StringIO() as output_buffer: - console = Console(file=output_buffer, width=1000) # Adjust width as needed - console.print(Pretty(data, expand_all=True, indent_guides=True)) - return output_buffer.getvalue() - except Exception: - # Fallback if structure generation fails - return f'{self.__class__.__name__} instance' + return fx_io.build_repr_from_init(self, excluded_params={'self', 'label', 'kwargs'}) def copy(self) -> Interface: """ @@ -878,15 +872,16 @@ def create_model(self, model: FlowSystemModel) -> ElementModel: def label_full(self) -> str: return self.label + def __repr__(self) -> str: + """Return string representation.""" + return fx_io.build_repr_from_init(self, excluded_params={'self', 'label', 'kwargs'}, skip_default_size=True) + @staticmethod def _valid_label(label: str) -> str: - """ - Checks if the label is valid. If not, it is replaced by the default label + """Checks if the label is valid. If not, it is replaced by the default label. - Raises - ------ - ValueError - If the label is not valid + Raises: + ValueError: If the label is not valid. """ not_allowed = ['(', ')', '|', '->', '\\', '-slash-'] # \\ is needed to check for \ if any([sign in label for sign in not_allowed]): @@ -900,6 +895,329 @@ def _valid_label(label: str) -> str: return label +# Precompiled regex pattern for natural sorting +_NATURAL_SPLIT = re.compile(r'(\d+)') + + +def _natural_sort_key(text): + """Sort key for natural ordering (e.g., bus1, bus2, bus10 instead of bus1, bus10, bus2).""" + return [int(c) if c.isdigit() else c.lower() for c in _NATURAL_SPLIT.split(text)] + + +# Type variable for containers +T = TypeVar('T') + + +class ContainerMixin(dict[str, T]): + """ + Mixin providing shared container functionality with nice repr and error messages. + + Subclasses must implement _get_label() to extract the label from elements. + """ + + def __init__( + self, + elements: list[T] | dict[str, T] | None = None, + element_type_name: str = 'elements', + ): + """ + Args: + elements: Initial elements to add (list or dict) + element_type_name: Name for display (e.g., 'components', 'buses') + """ + super().__init__() + self._element_type_name = element_type_name + + if elements is not None: + if isinstance(elements, dict): + for element in elements.values(): + self.add(element) + else: + for element in elements: + self.add(element) + + def _get_label(self, element: T) -> str: + """ + Extract label from element. Must be implemented by subclasses. + + Args: + element: Element to get label from + + Returns: + Label string + """ + raise NotImplementedError('Subclasses must implement _get_label()') + + def add(self, element: T) -> None: + """Add an element to the container.""" + label = self._get_label(element) + if label in self: + raise ValueError( + f'Element with label "{label}" already exists in {self._element_type_name}. ' + f'Each element must have a unique label.' + ) + self[label] = element + + def __setitem__(self, label: str, element: T) -> None: + """Set element with validation.""" + element_label = self._get_label(element) + if label != element_label: + raise ValueError( + f'Key "{label}" does not match element label "{element_label}". ' + f'Use the correct label as key or use .add() method.' + ) + super().__setitem__(label, element) + + def __getitem__(self, label: str) -> T: + """ + Get element by label with helpful error messages. + + Args: + label: Label of the element to retrieve + + Returns: + The element with the given label + + Raises: + KeyError: If element is not found, with suggestions for similar labels + """ + try: + return super().__getitem__(label) + except KeyError: + # Provide helpful error with close matches suggestions + suggestions = get_close_matches(label, self.keys(), n=3, cutoff=0.6) + error_msg = f'Element "{label}" not found in {self._element_type_name}.' + if suggestions: + error_msg += f' Did you mean: {", ".join(suggestions)}?' + else: + available = list(self.keys()) + if len(available) <= 5: + error_msg += f' Available: {", ".join(available)}' + else: + error_msg += f' Available: {", ".join(available[:5])} ... (+{len(available) - 5} more)' + raise KeyError(error_msg) from None + + def __repr__(self) -> str: + """Return a string representation similar to linopy.model.Variables.""" + count = len(self) + title = f'{self._element_type_name.capitalize()} ({count} item{"s" if count != 1 else ""})' + + if not self: + r = fx_io.format_title_with_underline(title) + r += '\n' + else: + r = fx_io.format_title_with_underline(title) + for name in sorted(self.keys(), key=_natural_sort_key): + r += f' * {name}\n' + + return r + + +class ElementContainer(ContainerMixin[T]): + """ + Container for Element objects (Component, Bus, Flow, Effect). + + Uses element.label_full for keying. + """ + + def _get_label(self, element: T) -> str: + """Extract label_full from Element.""" + return element.label_full + + +class ResultsContainer(ContainerMixin[T]): + """ + Container for Results objects (ComponentResults, BusResults, etc). + + Uses element.label for keying. + """ + + def _get_label(self, element: T) -> str: + """Extract label from Results object.""" + return element.label + + +T_element = TypeVar('T_element') + + +class CompositeContainerMixin(Generic[T_element]): + """ + Mixin providing unified dict-like access across multiple typed containers. + + This mixin enables classes that manage multiple containers (e.g., components, + buses, effects, flows) to provide a unified interface for accessing elements + across all containers, as if they were a single collection. + + Type Parameter: + T_element: The type of elements stored in the containers. Can be a union type + for containers holding multiple types (e.g., 'ComponentResults | BusResults'). + + Key Features: + - Dict-like access: `obj['element_name']` searches all containers + - Iteration: `for label in obj:` iterates over all elements + - Membership: `'element' in obj` checks across all containers + - Standard dict methods: keys(), values(), items() + - Grouped display: Formatted repr showing elements by type + - Type hints: Full IDE and type checker support + + Subclasses must implement: + _get_container_groups() -> dict[str, dict]: + Returns a dictionary mapping group names (e.g., 'Components', 'Buses') + to container dictionaries. Containers are displayed in the order returned. + + Example: + ```python + class MySystem(CompositeContainerMixin[Component | Bus]): + def __init__(self): + self.components = {'Boiler': Component(...), 'CHP': Component(...)} + self.buses = {'Heat': Bus(...), 'Power': Bus(...)} + + def _get_container_groups(self): + return { + 'Components': self.components, + 'Buses': self.buses, + } + + + system = MySystem() + comp = system['Boiler'] # Type: Component | Bus (with proper IDE support) + 'Heat' in system # True + labels = system.keys() # Type: list[str] + elements = system.values() # Type: list[Component | Bus] + ``` + + Integration with ContainerMixin: + This mixin is designed to work alongside ContainerMixin-based containers + (ElementContainer, ResultsContainer) by aggregating them into a unified + interface while preserving their individual functionality. + """ + + def _get_container_groups(self) -> dict[str, ContainerMixin[Any]]: + """ + Return ordered dict of container groups to aggregate. + + Returns: + Dictionary mapping group names to container objects (e.g., ElementContainer, ResultsContainer). + Group names should be capitalized (e.g., 'Components', 'Buses'). + Order determines display order in __repr__. + + Example: + ```python + return { + 'Components': self.components, + 'Buses': self.buses, + 'Effects': self.effects, + } + ``` + """ + raise NotImplementedError('Subclasses must implement _get_container_groups()') + + def __getitem__(self, key: str) -> T_element: + """ + Get element by label, searching all containers. + + Args: + key: Element label to find + + Returns: + The element with the given label + + Raises: + KeyError: If element not found, with helpful suggestions + """ + # Search all containers in order + for container in self._get_container_groups().values(): + if key in container: + return container[key] + + # Element not found - provide helpful error + all_elements = {} + for container in self._get_container_groups().values(): + all_elements.update(container) + + suggestions = get_close_matches(key, all_elements.keys(), n=3, cutoff=0.6) + error_msg = f'Element "{key}" not found.' + + if suggestions: + error_msg += f' Did you mean: {", ".join(suggestions)}?' + else: + available = list(all_elements.keys()) + if len(available) <= 5: + error_msg += f' Available: {", ".join(available)}' + else: + error_msg += f' Available: {", ".join(available[:5])} ... (+{len(available) - 5} more)' + + raise KeyError(error_msg) + + def __iter__(self): + """Iterate over all element labels across all containers.""" + for container in self._get_container_groups().values(): + yield from container.keys() + + def __len__(self) -> int: + """Return total count of elements across all containers.""" + return sum(len(container) for container in self._get_container_groups().values()) + + def __contains__(self, key: str) -> bool: + """Check if element exists in any container.""" + return any(key in container for container in self._get_container_groups().values()) + + def keys(self) -> list[str]: + """Return all element labels across all containers.""" + return list(self) + + def values(self) -> list[T_element]: + """Return all element objects across all containers.""" + vals = [] + for container in self._get_container_groups().values(): + vals.extend(container.values()) + return vals + + def items(self) -> list[tuple[str, T_element]]: + """Return (label, element) pairs for all elements.""" + items = [] + for container in self._get_container_groups().values(): + items.extend(container.items()) + return items + + def _format_grouped_containers(self, title: str | None = None) -> str: + """ + Format containers as grouped string representation using each container's repr. + + Args: + title: Optional title for the representation. If None, no title is shown. + + Returns: + Formatted string with groups and their elements. + Empty groups are automatically hidden. + + Example output: + ``` + Components (1 item) + ------------------- + * Boiler + + Buses (2 items) + --------------- + * Heat + * Power + ``` + """ + parts = [] + + if title: + parts.append(fx_io.format_title_with_underline(title)) + + container_groups = self._get_container_groups() + for container in container_groups.values(): + if container: # Only show non-empty groups + if parts: # Add spacing between sections + parts.append('') + parts.append(repr(container).rstrip('\n')) + + return '\n'.join(parts) + + class Submodel(SubmodelsMixin): """Stores Variables and Constraints. Its a subset of a FlowSystemModel. Variables and constraints are stored in the main FlowSystemModel, and are referenced here. @@ -1061,9 +1379,7 @@ def __repr__(self) -> str: } # Format sections with headers and underlines - formatted_sections = [] - for section_header, section_content in sections.items(): - formatted_sections.append(f'{section_header}\n{"-" * len(section_header)}\n{section_content}') + formatted_sections = fx_io.format_sections_with_headers(sections) model_string = f'Submodel "{self.label_of_model}":' all_sections = '\n'.join(formatted_sections) @@ -1107,7 +1423,7 @@ def __contains__(self, name: str) -> bool: def __repr__(self) -> str: """Simple representation of the submodels collection.""" if not self.data: - return 'flixopt.structure.Submodels:\n----------------------------\n \n' + return fx_io.format_title_with_underline('flixopt.structure.Submodels') + ' \n' total_vars = sum(len(submodel.variables) for submodel in self.data.values()) total_cons = sum(len(submodel.constraints) for submodel in self.data.values()) @@ -1115,18 +1431,15 @@ def __repr__(self) -> str: title = ( f'flixopt.structure.Submodels ({total_vars} vars, {total_cons} constraints, {len(self.data)} submodels):' ) - underline = '-' * len(title) - if not self.data: - return f'{title}\n{underline}\n \n' - sub_models_string = '' + result = fx_io.format_title_with_underline(title) for name, submodel in self.data.items(): type_name = submodel.__class__.__name__ var_count = len(submodel.variables) con_count = len(submodel.constraints) - sub_models_string += f'\n * {name} [{type_name}] ({var_count}v/{con_count}c)' + result += f' * {name} [{type_name}] ({var_count}v/{con_count}c)\n' - return f'{title}\n{underline}{sub_models_string}\n' + return result def items(self) -> ItemsView[str, Submodel]: return self.data.items() diff --git a/flixopt/utils.py b/flixopt/utils.py deleted file mode 100644 index 297eea433..000000000 --- a/flixopt/utils.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -This module contains several utility functions used throughout the flixopt framework. -""" - -from __future__ import annotations - -import logging -import os -import sys -from contextlib import contextmanager -from typing import Any, Literal - -import numpy as np -import xarray as xr - -logger = logging.getLogger('flixopt') - - -@contextmanager -def suppress_output(): - """Redirect both Python and C-level stdout/stderr to os.devnull.""" - with open(os.devnull, 'w') as devnull: - # Save original file descriptors - old_stdout_fd = os.dup(1) - old_stderr_fd = os.dup(2) - try: - # Flush any pending text - sys.stdout.flush() - sys.stderr.flush() - # Redirect low-level fds to devnull - os.dup2(devnull.fileno(), 1) - os.dup2(devnull.fileno(), 2) - yield - finally: - # Restore fds - os.dup2(old_stdout_fd, 1) - os.dup2(old_stderr_fd, 2) - os.close(old_stdout_fd) - os.close(old_stderr_fd) diff --git a/mkdocs.yml b/mkdocs.yml index 72ecbe549..7d3490360 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,4 +1,4 @@ -# Options: +# flixOpt Documentation Configuration # https://mkdocstrings.github.io/python/usage/configuration/docstrings/ # https://squidfunk.github.io/mkdocs-material/setup/ @@ -11,9 +11,9 @@ repo_name: flixOpt/flixopt nav: - Home: index.md - User Guide: - - user-guide/index.md - - Migration to v3.0.0: user-guide/migration-guide-v3.md - Getting Started: getting-started.md + - Core Concepts: user-guide/core-concepts.md + - Migration to v3.0.0: user-guide/migration-guide-v3.md - Mathematical Notation: - Overview: user-guide/mathematical-notation/index.md - Dimensions: user-guide/mathematical-notation/dimensions.md @@ -39,125 +39,281 @@ nav: - API Reference: api-reference/ - Release Notes: changelog/ - theme: name: material + language: en + palette: - # Light mode + # Palette toggle for automatic mode + - media: "(prefers-color-scheme)" + toggle: + icon: material/brightness-auto + name: Switch to light mode + + # Palette toggle for light mode - media: "(prefers-color-scheme: light)" scheme: default primary: teal - accent: blue + accent: cyan toggle: icon: material/brightness-7 name: Switch to dark mode - # Dark mode + + # Palette toggle for dark mode - media: "(prefers-color-scheme: dark)" scheme: slate - primary: teal # Can be different from light mode - accent: blue + primary: teal + accent: cyan toggle: icon: material/brightness-4 - name: Switch to light mode + name: Switch to system preference + + font: + text: Inter # Modern, readable font + code: Fira Code # Beautiful code font with ligatures + logo: images/flixopt-icon.svg favicon: images/flixopt-icon.svg + icon: repo: fontawesome/brands/github + edit: material/pencil + view: material/eye + annotation: material/plus-circle + features: + # Navigation - navigation.instant - navigation.instant.progress + - navigation.instant.prefetch - navigation.tracking - navigation.tabs + - navigation.tabs.sticky - navigation.sections + - navigation.expand # Expand navigation by default + - navigation.path # Show breadcrumb path + - navigation.prune # Only render visible navigation + - navigation.indexes - navigation.top - navigation.footer + + # Table of contents - toc.follow - - navigation.indexes + - toc.integrate # Integrate TOC into navigation (optional) + + # Search - search.suggest - search.highlight + - search.share + + # Content - content.action.edit - content.action.view - content.code.copy + - content.code.select - content.code.annotate - content.tooltips - - navigation.footer.version + - content.tabs.link # Link content tabs across pages + + # Header + - announce.dismiss # Allow dismissing announcements markdown_extensions: + # Content formatting + - abbr - admonition - - markdown_include.include: - base_path: docs + - attr_list + - def_list + - footnotes + - md_in_html + - tables + - toc: + permalink: true + permalink_title: Anchor link to this section + toc_depth: 3 + + # Code blocks - pymdownx.highlight: anchor_linenums: true line_spans: __span pygments_lang_class: true + auto_title: true - pymdownx.inlinehilite - - pymdownx.superfences - - attr_list - - abbr - - md_in_html - - footnotes - - tables + - pymdownx.snippets: + base_path: .. + check_paths: true + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + + # Enhanced content + - pymdownx.details - pymdownx.tabbed: alternate_style: true + combine_header_slug: true + - pymdownx.tasklist: + custom_checkbox: true + + # Typography + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.mark + - pymdownx.tilde + - pymdownx.smartsymbols + - pymdownx.keys + + # Math - pymdownx.arithmatex: generic: true + + # Icons & emojis - pymdownx.emoji: emoji_index: !!python/name:material.extensions.emoji.twemoji emoji_generator: !!python/name:material.extensions.emoji.to_svg - - pymdownx.snippets: - base_path: .. + options: + custom_icons: + - overrides/.icons + + # Legacy support + - markdown_include.include: + base_path: docs plugins: - - search # Enables the search functionality in the documentation - - table-reader # Allows including tables from external files + - search: + separator: '[\s\u200b\-_,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])' + + - table-reader + - include-markdown + - mike: + alias_type: symlink + redirect_template: null + deploy_prefix: '' + canonical_version: null version_selector: true + css_dir: css + javascript_dir: js + - literate-nav: nav_file: SUMMARY.md + implicit_index: true + - gen-files: scripts: - - scripts/gen_ref_pages.py - - mkdocstrings: # Handles automatic API documentation generation - default_handler: python # Sets Python as the default language - handlers: - python: # Configuration for Python code documentation - options: - docstring_style: google # Sets google as the docstring style - modernize_annotations: true # Improves type annotations - merge_init_into_class: true # Promotes constructor parameters to class-level documentation - docstring_section_style: table # Renders parameter sections as a table (also: list, spacy) - - members_order: source # Orders members as they appear in the source code - inherited_members: false # Include members inherited from parent classes - show_if_no_docstring: false # Documents objects even if they don't have docstrings - - group_by_category: true - heading_level: 1 # Sets the base heading level for documented objects - line_length: 80 - filters: ["!^_", "^__init__$"] - show_root_heading: true # whether the documented object's name should be displayed as a heading at the beginning of its documentation - show_source: false # Shows the source code implementation from documentation - show_object_full_path: false # Displays simple class names instead of full import paths - show_docstring_attributes: true # Shows class attributes in the documentation - show_category_heading: true # Displays category headings (Methods, Attributes, etc.) for organization - show_signature: true # Shows method signatures with parameters - show_signature_annotations: true # Includes type annotations in the signatures when available - show_root_toc_entry: false # Whether to show a link to the root of the documentation in the sidebar - separate_signature: true # Displays signatures separate from descriptions for cleaner layout - - extra: - infer_type_annotations: true # Uses Python type hints to supplement docstring information + - scripts/gen_ref_pages.py + + - mkdocstrings: + enabled: !ENV [ENABLE_MKDOCSTRINGS, true] + default_handler: python + handlers: + python: + paths: [.] + import: + - https://docs.python.org/3/objects.inv + - https://numpy.org/doc/stable/objects.inv + - https://pandas.pydata.org/docs/objects.inv + options: + # Docstring parsing + docstring_style: google + docstring_section_style: table + + # Member ordering and filtering + members_order: source + inherited_members: false + show_if_no_docstring: false + filters: ["!^_", "^__init__$"] + group_by_category: true + + # Headings and structure + heading_level: 1 + show_root_heading: true + show_root_toc_entry: false + show_category_heading: true + + # Signatures + show_signature: true + show_signature_annotations: true + separate_signature: true + line_length: 80 + + # Source and paths + show_source: false + show_object_full_path: false + + # Attributes and annotations + show_docstring_attributes: true + modernize_annotations: true + merge_init_into_class: true + + # Improved type hints + annotations_path: brief + + # Optional: Add git info + - git-revision-date-localized: + enable_creation_date: true + type: timeago + fallback_to_build_date: true + + # Optional: Add better navigation + - tags: + tags_file: tags.md + + # Optional: Minify HTML in production + - minify: + minify_html: true + minify_js: true + minify_css: true + htmlmin_opts: + remove_comments: true extra: version: provider: mike default: latest + alias: true + + social: + - icon: fontawesome/brands/github + link: https://github.com/flixOpt/flixopt + name: flixOpt on GitHub + - icon: fontawesome/brands/python + link: https://pypi.org/project/flixopt/ + name: flixOpt on PyPI + + analytics: + provider: google + property: !ENV GOOGLE_ANALYTICS_KEY + feedback: + title: Was this page helpful? + ratings: + - icon: material/emoticon-happy-outline + name: This page was helpful + data: 1 + note: >- + Thanks for your feedback! + - icon: material/emoticon-sad-outline + name: This page could be improved + data: 0 + note: >- + Thanks for your feedback! Help us improve by + opening an issue. + + status: + new: Recently added + deprecated: Deprecated + +extra_css: + - stylesheets/extra.css extra_javascript: - - javascripts/mathjax.js # Custom MathJax 3 CDN Configuration - - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js #MathJax 3 CDN - - https://polyfill.io/v3/polyfill.min.js?features=es6 #Support for older browsers + - javascripts/mathjax.js + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + - https://polyfill.io/v3/polyfill.min.js?features=es6 watch: - flixopt + - docs diff --git a/pyproject.toml b/pyproject.toml index 29e0c448c..2b77963ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ maintainers = [ ] keywords = ["optimization", "energy systems", "numerical analysis"] classifiers = [ - "Development Status :: 5 - Production/Stable", + "Development Status :: 4 - Beta", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -34,10 +34,10 @@ dependencies = [ # Core scientific computing "numpy >= 1.21.5, < 3", "pandas >= 2.0.0, < 3", - "xarray >= 2024.2.0, < 2026.0", # CalVer: allow through next calendar year + "xarray >= 2024.2.0, < 2026.0", # CalVer: allow through next calendar year # Optimization and data handling - "linopy >= 0.5.1, < 0.6", # Widened from patch pin to minor range - "h5netcdf>=1.0.0, < 2", + "linopy >= 0.5.1, < 0.6", # Widened from patch pin to minor range + "netcdf4 >= 1.6.1, < 2", # Utilities "pyyaml >= 6.0.0, < 7", "rich >= 13.0.0, < 15", @@ -45,13 +45,11 @@ dependencies = [ "tomli >= 2.0.1, < 3; python_version < '3.11'", # Only needed with python 3.10 or earlier # Default solver "highspy >= 1.5.3, < 2", - # Visualization "matplotlib >= 3.5.2, < 4", "plotly >= 5.15.0, < 7", - # Fix for numexpr compatibility issue with numpy 1.26.4 on Python 3.10 - "numexpr >= 2.8.4, < 2.14; python_version < '3.11'", # Avoid 2.14.0 on older Python + "numexpr >= 2.8.4, < 2.14; python_version < '3.11'", # Avoid 2.14.0 on older Python ] [project.optional-dependencies] @@ -99,7 +97,8 @@ dev = [ # Documentation building docs = [ - "mkdocs-material==9.6.21", + "mkdocs==1.6.1", + "mkdocs-material==9.6.22", "mkdocstrings-python==1.18.2", "mkdocs-table-reader-plugin==3.1.0", "mkdocs-gen-files==0.5.0", @@ -109,6 +108,8 @@ docs = [ "pymdown-extensions==10.16.1", "pygments==2.19.2", "mike==2.1.3", + "mkdocs-git-revision-date-localized-plugin==1.4.7", + "mkdocs-minify-plugin==0.8.0", ] [project.urls] diff --git a/tests/conftest.py b/tests/conftest.py index ac5255562..bd940b843 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -848,4 +848,6 @@ def set_test_environment(): pio.renderers.default = 'json' # Use non-interactive renderer + fx.CONFIG.Plotting.default_show = False + yield diff --git a/tests/ressources/Sim1--flow_system.nc4 b/tests/ressources/Sim1--flow_system.nc4 new file mode 100644 index 0000000000000000000000000000000000000000..b56abf52da478f9fff54cad88c180808f50f5011 GIT binary patch literal 218834 zcmeI531C#k`M~ET8$byVAVDr6Ajly{IOPxugv1~akl=w>mSwXcTa#?u-9W$qQk8nw zqP0~NFSOpZ*0!Ry)zW&`t7_F+Yw@T@QLGpJYxVzqZ{~aZb~bN!v)RZdexu~=_q}iC z&HU!=?EB_==atPb?RIGYLj{H|U4%>InIGxT*D5@BVz#Lwna;1K4-UsAW%HM1jrpWt zU$y<;>sdoh`|M*>G{Y-DY?oINmn!+tEMef8f`O1TA+oasWk5gX&OtjvbQ1ZZi|~ZQ z!4Sy(48trC7RpQr&S8+Bg?Kt21gSE0d1mPs9mq1Rs?5LeXAFHv_SiXi$hBtCyA3vs zz8K4ZoM4bCM~>O~c;V*qnbQT&WGMoevuUETR00}@G$0U{8O~gau(Zgf3;b8SXl_LX z#On?JB20vszhH4mdBuVy#qd!JOC{7TL@Alx+#b?gWr2!q5?)ysMNgxDa(+Z zi&w_fWR1-cgec3*5h6NTOr6&}qUp|vAorN7mgOEVnVS+^d6Vr{#hM}uXa4=);|qiH zmY2;3?r@)Ug*;V`{NyBg%NSz1vfMoGhgo7taoNJ$5l*$#8BD2`>U~f}mEzOyyl{r( z+*{*}Rf>U*$#53p^c)$Bz9e)jj!rXYEPZO6bB8x_8$rStZ=e!TQ3D#FK{n{#ai$ zYNP0jXUS$;)m&+1iQXJ1FLj@~qRafk+BtSDM7pXegpb(7!Aw~^^{y;;>G6`8W>wW&U)nSx}&X_TSB#2DEEpC5Z zrLW!xf9k^Skbk2OZNtF^pOZ}$WYHaj+X)S;JT)P-^ag6{19gxDD$f`?FW|571ZD2oW?-&LQ|b%Wtv|J)sK_@tSnG2tSh#vrQ?dm^+f)h`9ZwYuG75DTsH$?; zdxD-?Ul?jJwvbEemQ+*vw56u6EZ1JeA3^R+oVrRz*xmyqOp+;2t-2o6!fbmHtpm zIX2X#Oj1T|8B21hQivn#NX(vG+S)2 z$5vC_6-~NvQ&q$g>QJ?5NyUt^#kdkia&hgpX56|;4}5tyKz7ydi?6E6=M9^`aDKCd zweSmKmfjl(SwC8ifP~@qSLH`TV>_YDs2$KDqGjZqJ+n0dc*Fru$&Gio zmcEt0Xd{S@4>t0-q_&|Z?5XqNLyqF^sjIYI%etr5cxpU#USB0#L;h+&GR0*}LDYUP zPzmR(-wGvT5zHTB{7Qi??o$XF%g31b#uM;L>3U>|WGcbWdc@EoOb;}Worq4}qpDJx zi=+(j=Xa?ruf{UwC)6|pbM>TB5e^2-CtBN+7hVX>61MLV!$_9Zb6Nai$kZc|R52h@ znx9J`#Wr|?lQogV<6FcqQr4sZVW#a5d{oa^(PchQxNyF&7QSY&-<=A7-Dy!@9=kO} z_Se-nnCdo;Sn1IB zM*h+|q!VKul|t0FEMW>%72kz4nr#Km$Au_23~LP>oG|nlRy9*)(C&IoC&6m*EMtfm zXb2}?Wy+0mbgC#}s37tj7+nSkt18G1_c;UYQ? zXNi@#3w3BP3V%k&wTiS$254< zkG@Cf(H)X(iff2@&>a4RS#f5h`)o% zaRFsu&G*1}fli``HUwP^K6e#Vr5if%>Y<+kd$`W9wHeq(jHGj;#?Ce~6j2N{y%yQT zBrXlw*~)U-+GffDHg4b8l3ITVJJ6ad5%u6{MItqMe0bN|9^J7ZF{hw|#vJ3IzE$i| z?h5}%#=cML4>db#L8kstw8Kya-IeOG!<4COoW@S(c|yLzV)$d8rExDBu@YjNiePnh zE4!8ILb3~A2<+C!U^?7|AwRebg%gV=7L6^M0RI;j70odJ7pjxn9GXLWc%Z29!ENGo zuMPUv27-Q%9A2ROQ)m1Ks&79RBS9cN<*%DmTYAbLb)xI`*sqO&^klEPl`TD8w?LX( zdb(~OO;73c*YuRm08LNn4Ak_L&LB-s_QYGy{}Gx!U3ZwKr*wvEdb;i?O;72J*7TIl zQJS988LR2ZUX8q1Jjs5dW>43hr0FRg7+Nrc&S9FSr*vpclI$s+8Ja!ihX(w0`&kb5 zb2U9(cb=xF>lSNzy6&->p3*7P^pws5O;718)bwP(NYj&jxuz%kC7Pb>kJt2Mzf9AU z{c=rD_9tk1vR|R;iNh(Hp3+&V=_wtzrl)jPYkEq@qv->vECx-|8GzIT+) z{hB?c^LtHC={%_EDV>KjJ*D#}O;71Otm!G8M>IX9^O&Y5`^Pms**~S}>AFvAdP?UR zO;71OujwhB7c@Pk^P;AwbY9W)l+NEYJ*D%irl)jX*YuRmKQ%q2vscqoI&W%vvVTj{ zll{Azp04|zrl)k?*YuRmhnk+!*{|s-osTp(xIt4^!=iA)C?l)ca74?He^C%Pw8~l^ps8)O;70@qUkA}ZknFb>8|N1ogSK= z(&??~DV=;xPw5nBdP+xO6A!BMB=fO-5O}k*%;#I`__jKlv%8jJe}Dl~0CQn>7tL}l z4g`XgFvJNHj?Abrvt1n~{?ZMS`fRS7$A7w_v;t;}3i@{ED9Cgq<}R^RG>SqH)m334 zHHO*d-^lU%r|dHugvcbt|CUG-dLGXOha+_c+3I?9#9XwF1E#R@y;^$AQRzn?urv(%$l3T9M*m*~ z?hT(@a;>rbu467}Ty@${r=DfF`k%YY=<&p%1!&PXr}z+fxW7vnAXT0z9zK3!*)yW3 zuQl#SmePZCtfuN^jS?SDx;F;}2ZjMBP1~9TgkljE9~SoyJ4zSDu)2 zR-)8fWPGXjOfem@())GstHxDd{Pd%Hz3=~U#Ja)f?J}nC8xo2wS#Qsoq9;V?wp47; zbAlyV^~%#jx0f%;-nw@FU(SAVz>N!TFm5~dzunNZ5C>I@5Qt^iNj&g}foC+Xnt4Vi z*K03bbGXUi*zI%Ajb(6KpB4Z5>aOoyu<6s0vxZfRmB&4`7!$>6w$}tET}L!~U;O3! zu|)QuLS2 z&A+UC$o6YG-}pq?n}6-~*kS$lP8N^24?PA881F`EQB^B|=P7{a{{8WNa4SyjwDpP4 z9yZH2IIe;n zDaL&&f940NU-{fJoReh0JSf~I!Rx7km6aYE#BO#Q0^~`3biLs-&6bjn1^srd{I0c_ z*qkI{s4jo_^)~}OUeM!)lU{Cw;JmoFkDB5_BZw(4E*^l{%Fc3qw^U{jGlZGM3}VMv zvI~rVUJS+%XvkgM_P3>9VPB)QeED9N1ZPh@_3qxe|2q2vvm3e|uiDZ@##9Y-#GFx8 zb~V$B$hn$~&n*{~T-3)b9O3(i7qk#`e5n->;ZGfU?_bOUuEum*T0pwL=x8q>&ib;; zINK|?P5OTS{iyGUnEU8}rIM@C$3kyC1_e)8Yu+J4ZPv9u{FL!4jg`9Sb8d_et+k z=Cn;9?CFN-bS*?*kR~!F+=Fb*i5RK*E`scsnK?95M|yh)DrDR`c@R=Na;y}8sVL4KO4HB#KfP!?XoJeO?!yCsgWz1 ztJdlsqV3xyGrfx=*&d=dJTiVS^S*=6zkTV(N2SXC7we)6b*ogV&N%&=ZPz=HdWTK@ zCF_1^FG#bWojL*V-iQ}UH0Pf>uBd;9o&I?A_#6{p0!)AjFaajO1lk_~=`+kSjMn)K zX>kUn-&R#3DV{|zmA?TZc?8Z5VQ$(-ZS&!}c4rG0h1lgjAYa}6sq0KvFYQ-9Lc7{9 zO&$(%Ci*OM|2Aq>}1IL>voN!~|1yQavT+AUjxuJ>104X^yyP<@Fk4%6G zFaajO1egF5U;<2_!xOOPj~>j4z}>e=&NvYf$LnwcV&ZXMIdpuQwT@}MRHXREDM z<|%E$iNKeYKXk*q<%>#1&L#a`fmZ;7?>k)l=kwt?VhQACgn057r*si_f_A*P|Ib^x zh&v#k)5J@GafgU<&`uD4dgGy<;xX9SWr)~Pab3Pxe5epp#Ly?+C=d(5cB&YE)yRJ0 zR2Zh5CN>q{&`-<*+p*%bU6YO!0rTUorV-=C0}ySJ=zICr$zlQIy}!u0E_aGJ8yw#v zQ9HWdbg>SyGfC_`&|{vt()BRme`;&7C;;sY@mlpCO2lPwiSgo*n^!CpyI@dpiWqgq z6^q3!X0-A7|6M8$12$vDyS05!5JiygF!A>94nINM10P3=>!3&4OtapYeEWlg_+~a$g^%i*X42VSnD63%BE-n=}gRB%91{cfC zv)$;dycMibW?5}GJKPKf=B;Kq|MJf3NoTjh*)hUeBP{P>+cHG|c}D47pTfC(@GCcp%k025#W?VCX6O%up_ zKoZT6X#(hN*!>ioFK(Cd`PudHpdZUjYk$r4!KM?Not6FSo$@R`V&mgAK3ccUR%WTK z3&v7i`0+V=;E-By@`p8P#O^QyPJs%ZwO%i2GS z_R*rF^li)KM%?0$DnqIHw&*j@*m03He^(_IIXHP|);#2hY1F4(npegnhf#P~U;<2l z2`~XBzyz2;ha(U*g4F$AvV8ipZtBmhUG6z#rhM=}thEpRY1!P^T88xHC(Rd?9_te= zL)!DAj8DsuQb+XINzYLt=DUooH|id*)R{c{@yJK^ei0eR%ik@bc;(@ndEx8-ZTrK! zbiI}xqwf7xkIBd}_x^GmBQszXW-hFzbeH?W-pZ=U#eqPu(qHEZ`$A^4JeZmaizXoy zntu)T$|Z8A=!kxt^e15^D-5r}x@OTGQto1B3MUuvEtg2Z4iI?B904k>V80!)AjFaajO1egF5U;^!%fRq1_?EJuo*neo> zHvs3J2`~XBzyz286JP>NfC;pKfE*?~n0q~5f1k{52JH2C4CDmIiD<9KS@8Mh3h^Qi zS*8E&s<_Rk{c2^Jya433TSa*Dmpk6Ta^>VaE^neqm(OahE)S8VvsO4QCcp%k z025#WOn?bw9)Zl;nBt`SC8e_n7Q8`2g~T%=)9;YXg=^a#l6jh346^%C{|X86M=gCn zK!P=7KfFd3{bYF%DX$@;eJp>MC;bt8&c_*m=-&`l=k;Uhe}h0dT-T0u3CL};pJv`A z!CN8m=pbt=(9+sU%fw%~7;h=wI1t?OKz<_Ch4ZA}^w-5g$b!8t7J=M0b@Ao@K3_)V zwH=+D*JS5=zOFg3Jm1p>mp?KACcp%k025#WOn?b6feuFC;CplZv!z>|0p8s4P`P$* z4jrzx@#cPZ^Rz#ta3?u=d&$nu{eZo_6sU6)Ccp%k025#WOn?b60VdEs3AD|`(-i>2 z?lzsG$tyu_yNRc%Z@_Z%u06-K4H5KZ?L<~$d?S}n?qH6dAkl%jxKhMI!AvpQ&iAHL_ z(tLKDet-ch6+>j2wUqfU6JP>NfC(@GCcp%kK>H>T^;G}P=w_bkm*S&ns(V|~Vo{lqYP#`_e z+`r0Ol$)lZg#^2YeAZuf@*hxlL(02{48R1kvs{0yi$D0_yn~1wd{|X1<8P+h^VAbQ z*iWD3G0xr4WA}Ty1>lPo}c&@xToL#%lop5TJYD&CrmV7 zY&`pfEuhS%On?b60Vco%m;e)K{{+wruvUS*bAo&@w(nWkHai-mK40wTXHF`UF2u~1 zZYr3-iOU@!OaF0HC8C?RvC>e>DDtg|y2XKz2{q_B%Kv4o===;)PE#?S<@3 zCWg!wOn?b60Vco%m;e)C0!*NT5Rm@d$8GG-%~w+^ajG4LR{L}RzAAJ4xlK?YyZ?ss zKkf1skTY>}#YeI?jEAhXCdj+z+1B1YX?gQp7xMM5-4g%ai1YTjwEOy}16}*Q5Y*%l z$eDO=ocG@&sq4b|{&4Dh$aRilo~hRQ9Oz24!42=1m;fN3W&%ur2`~XBzyz2;J0l>y zhAUd@H6$2A+*Tvq02)VYwa+>7&iH=CH#vrQ7F4<2uQ*$i&jC5L`M&rNK4jxK9E6ks z$D8E>#}5Wb06W#0wsTw_&1-mzuJ7b!^dtj^W0<`R2QsW?0!)AjFaajO1eid_ByceH z+DOkHDw7A~<#fzCNxoEZYHv)iTlagzcn9$oW1Oy$*%SUYY0w#8@P})nebEY+eiiz+b{tpzyz286JP>NfC(^x z4naVAZMU>^E7-L3Wmx;>e)fQzjEalL&}G9#V@dR5tOhgipjx(@k8}y;b!7a=?ma&+ z(~HRGf}Cml_x723%yt}5x0Xnbdql$1`!7=W=8rz<9hzv3gjx}tywidt)4Z9z(Dw-{8uiB) zNyJ`?8=240cEjB-4)|HLo9pDQ_M%)RIR2Ks)dYn2G!tL~On?b60Vco%m;e*#I0WV` zUr;VO4Wt4Y;KU3UcW;$)zf<>0A7}8!7tcR}Z1~uL2%tZk{I zKIuvXG9xG74}4L6DfGeXBu4+!zQ6Bw4eX_wd>P1X*Bg=gaQ(ISzWrX*jDYJ0%SjG8 z)YLDr_Pmg{{6drO200VG)4#7MzM{qVY1{Oq+^QkFO_Og2`LfGj1CmbOY@$`wdo?c> z7c^j?MF;#g9LaV1} z5*1yDW2{f+2dDcva<4|)v&00L025#WOn?b60Vco%m_Yj>FmL&yQqjqJ-^G!zUUH-u zMENfVae^33;ua9ci$W6bG{qq#z6S2e5HXa*f=)t=6-SUb&lHD|_;-jq1eRv15HQak zN#a$eID*8-KpX=3QsF)G>?jfs>x>r~P2zkI$BCmz41zdEj3M!E5NCk73I*WT%m8y0 zmVh_{im1Xm5GRQu5^piZ2_(J>;#4t_#9rV!O$KunN3b~bvu zLO@Z>4i}UFb~5`Br+r1@b#Ps{u|=Nxj$E(FH-MapeHkZ~?)(MzLNwpql%AdreCco0 zs>f&EY`)@BU-fw6VTRQz0BJ3xI@X`3^?KMlS@05gvvFVoOn?b60Vco%m;e*#I0U5E zbCBP-QqxO5EaSsx3p||FSPkiXCygX69MR=Wh?apQuPilYpf=qY~B?rA0RyZf;q6aMjKR9_oy`rp~g+ZD( zgl;zm_KGA2{>ucI025#WOn?b60Vco%Iue15@P;4Qlo{UeV^Fj9@xI4F&NOfM^E=AB z;kcW07WdDRK{6=XoEze0x__jvvS zeN`i0h<~G}Z2SY}3v`7KZ@SMt$;mrTw2ur#H$xG7$Ap59On?b60Vco%m;e)C0!*O8 z6UYefxO{eIc*plZb=$q;UxA!y-tp6So_~_8w6Pg6R@`L(en+yiT=y)LmDuuW&C<~y za~q-;B6_TNXO>KLjd>2L1LmA(K_zvEWyf8tWykaVwf;)5h*@r|VqqF3d4>H`^_J&o zUNVmzE3kZE0!)AjFaajO1eibvB_JQTpUtcX?ujc?ec(?2Ds;P9@ONAKyJH{ zyVRY6slPb-JXyJ!G4s~)rXKuWs+B9fsUr1ak$V9=2C2PR4W^PeCG0bAcfR!Lt*O^hyXWvy&3ob*xEf#akvZP<^Kc8;z3CS;`9+X3(VKp^_X9a? zB8j1QmqDap5jtj0=7=bpK5n(0OM z%zX>wOj`%J=B)e;rDY4|3B!xUm*M;)5p2*P&c?$M~O`ycgv0FFm{j4he#jmo+p&JUZQ^d0RXqn8@&v2`~XBzyz28 z6JP>NfC+Rw0`rzHC>J@NId?S*`PS9Dx5z>c46c=)*}$D&f3d9N6`V_C5=hgM+kA9q zdlr62z0q=xnA}xN{u*3khh{P@Kkq5z^DE5aMs-yq%smuQ;!eE9u$h+YtF zvS>PT&7A^AQH#XxE1$Yc6dZ~MEAHQSm)HunW5lw#cm75cz{jcLkE2f5EnWlnYlt|# z&x7}gt2zlWR($#Kp5KZK%+C6eqv`GuluIBDB{a zz3=I*-OVh4_dQ0v@+fWI_xFAW_Y8zoz3+UrK{0vXsZTe)Ztn|`L2@T=JlXZUx3f3i zUcX43ZzjM5m;e)C0!)AjFaajeVgjXQ^KFxu!DoH!M>q5{TeO(FkA z-)eJWIe5&j@ye`FH;bg~ufvr09z`jm8;v1O6q%W#nG|J$j3JKDui_1a!l9;mU(j7u z6Ik!A4m1QmSwXQTs#rtxSj9^wM#l;IDb6Gnr%#E$E>!RHhZ^eEj$G>Y)yRTxDaku3k=ZrWCp5vCw;bK-%3s%Oi&P6*;$5t9z&~VHavMiW~t?UE9u`2?^8g8 z*zyjSus+k|&q2<__4~PVu!0TIRpi4&5_hS;rshOXO@mLCqPb}*ct+kJ(ng2_Tc%#- zq6UhTkz-`-{PeeF#7cFt?SNFeP8ku&LG&l5zONq9W--zOTA!Exy@=e~F;Q8;1egF5 zU;<2l2`~XB&<+W-Q!gnuM?R=#f|v9KRJ+|v`cjks2Xfmv1*wmnr^icra9ic_h%NfC(@GCcp%~0RruGto^6MF5`Ed z0bVKu!*xVFW4VU?@!QCFxZPMTr`K1>*ZqZzkBnz5Q$F=gGNx6f-DRG`)pDW!vX97M zq&+p<`XIJYoj)eyQSprBrK|5FNfC(@GCcp$b5P|5v zH+Uh$`Pq3>4cAmv#Nn~~Nj9^|?} z?2P#Xiezpp2oXn=`hs=q_v{MR`r^hMct6FQt}ch0cgBsf`ezhnhKe#6_pclo_SM!0 zf}WaDV`rP?iYRWpMaEF(eE2pHz;!Xr??#!PX-9ka?QUufM<;JT(Yl$uSN;9E)nXWex%7p!F) z@2e@DWr{^Eyu^7FEOzW6!lJKOR#IBBtfaiS#Jy-i#d0C~zyvnzgQ)a`Jt1E>A-}`G z0_VC_`fGi4A%CDw#Yi;&4X@!cZ41r62Cl$_Knuy~2yeZQ3RkX<62;*9Xt@f?U=RDx zwo5|nzb0BPy(C&bZ*R1`es{EdBW}5E3!Cq-3HWhj^!fKHqUBeMqvhL=kCqEpN6WMx zpRPhn?n$Oq^(526d6H@AJIS=Von%^nPO`qLeCr^Z(I_qU9u+MY&5V{mSrIJ<{n7H` zrs(pG-W)B{#AnLSCu&YJ$uv`$WSWLdvOdZ9`kzE!|E-F`Bh9_qPsRnmhUgA=FT94k zym)CaQ11(d{k{++gNfC(@GCcp$bGyy07 zp?8v{u44ZoNm6Xa1egF5U;<2l2`~XBzy#Vqf#@A-U=*%?b@yz;g+qB5CRc=E1DCbF zrUiBDe4+4CPta5A3&RLVliaArS5@WnhC}W^mD_Jxz{p>C1MZLt!Ep66k1&!)$yi$+ z*UAl+-QH?XaIMcB3VXsn8b32!{Z+hy6c4sJ4*NYdQRdr6(PaiG^P|W-<}y`&*dr$@ z_6t)e_COW87sd94J<60u@C?`C%Ba9@ROt`N{L{#u;Toju29ceWm70LLS5MGg9cT#B z(4XNNtRf7I?8EI107`R^$zA2~LIo7j$e`gWOu~Eu4ICP-VanXxFjO z^L+SL5@>Dj!3OR<_*ULGTxcf11egF5U;<2l2{3_gCxPhR1DIWz_o#2Z;p(Ai9HE?G zc)wJ*+FcXyRK~uKi6-G1E}RC8A$}~`Z$MLZ4OdSYN%V`lRzsb<8alf>Uyvq-i(cD7 z-4QZhWNUfqweqD(H2v3b(O_z#crtG^P1tbdtGrQ(VtY~Ki=Z%_S!~}4%^EjcG!MOJR1Ac1^)i{5;iA2_4v(_YiO|e*!v%Xfo59Im z5~rEzhO4WxkK}>gVs?{yWwhbSjY+e)7n#xQcEi;zfxXNF&3-prv@h9VQD2$J7v}D> zWP*lkh)P+#a1CD)hqzw9pxN%D*7hb`$h`@QHy@Y)6JP>NfC(@GCcp%k025#WOn?b| zTM4wbci|fDUHG=%I$Ulhzyz286JP>NfC(^xZy^EuK4^JkB>)=w&h6q7(_n|E%-jc! zUKdP98XBicv_`m_6xL|8l4<0b?5&YyrLabhl}zKqWN%I3Pzq~uhLULl z2H9H^E0n^TFrj3c3PJWX#erm+xNM804eUcZXecnh*`@E67 zxkvOqXteJeou_@@NTz+?NM3Ed*;ZbU_JO1Gv=1D~v=1D~v=1D~+sdNzL;J$fdD<6_ zWZD;wWZD;wWZD;wWZD;wWZD;wWZD;w*2wHCyj zS_?W)tp%N@)`DbeEy$i)3p!7&1uZlBu;IdulD{Jhc`iQ)@x?)LM{C ztp&-{T98bw1*J!=1Ffzo@?yzF`2x~JXa+A6E$G0 z)LG_o7qaM7UJZ@4V9}tSJJ#tbC@e}H;hN?u1Z7?&-e!y?=%1Kb7g%Y>Oa3AN0T~M* zfu0vbgz&cfy#5*zL5XsSrduZgjgz?%)IfVsPzzqQn$?2WCORm@080rUo_ln+5@j!r z%NX@1Uey6?*+&8q_KE23c5QbWm_z*K1shG+Cbb67{g6r&fwbN zVc{pFL4HZ|LU2>qryRC@N)%=+zh6OkOdJ@C!7rxc&Q3c&Gl{kT%9}SjkK57Uge_T| z=?0rUdi(^qk3ky00rNWY`rw}hgp5bt7qMaaUDPb|Udivh{6<}~yB1_*&62PGiXTN7 znK3S#+s;W4GXKs)_XPdGZb-^-Pb+noI7>X`NkbBDNJ@9Rik)Tp*W&!EoW<}B<9a0| zO@~`c{#C_Jt~*J_CtsLbRMEY%sMvj7ZW%Om66gslGR5QeN@Bq)A$q#AtlR}TQY`1;^$B<$qr#OH zS6Omfs#IC*n;6ivpDMUYN-Ojx!qWR-=M{BVczo3?<MWqjIdh zfk!~RwJSiRPk}_{DmsD4WG$YNiAVbb#WUsPI*XNVC z<3#u=#kF8}$_sOAP%GK3ycO|wIMJuPDR$=*ocvVL)SJAeHtg$t#N3sHj^Bn-n}YX3 zZ}&2cd-;GB6!g5Jmu0$2%1fQD@`{ov*4}1L0Mpi9XG-+fCw!BAK>52nR5P22p>Gab zmR3GK83m1z`Q;wC(-v7#7_mMaFa^3Fd{wRtHL+R@s+Gx0fY-|0Vbtta9Nzw+=!x2o za^`yyMmVSCmlRq7m=k;$m=XblXKHRXKvYoJ=v0eHP4ardB-v!M0x_Yp_7DHL1*Y*H zcUk@vrxg)K`rwfXl?7At%ceMUp?`_L)EHml^myF4g|2d;z%NmmBxn}9^9vQ4e$vIg z*H3~EV*mD)Ta~I*Q4**asNts|m=v%msAyZ5%5vBBPQMQpNVBxeIUOFUa?4%QDvCY% zB~Eumd2U6y{%nLg_QY))$=rDI&jVVsB#FNdnB)Ry!v#-ZW|ctgYqjpT3&)XY0RRm zL-J`SX##EmH$th>p-+kD_AF;v$#qv$q@*}g%cdze1tksgS^tb0PwPH^;VlL7Ej?JW zc{H$O^mEbrzp+`J2y3w`oihrY<>k40LvSqqlf|vn@5QNPxGKLmzobB1$EMb;0~pGQ zEfrD!a@HL<6B%!hGuPI(ubG?KN6F}>{+rZzpt1g)fWyPXpH8V3gFy*)$$9 ztr@_YPoFWxnQ*8N{9MbXcT`zKlXcNc)yAz&9Zco*HlI=)j?{b(E*o_4siDk0&Ds{@ zJv_>%HF!q#=^m7P9$Br_bz?r(^ycTiPz5cEQ`cwaf1?WA>?QF|ZJ6f68GPK-=-ZYQ(6p(Z z&!d((U)_#->JP;xUrs((TMEE$efnr_sKzf1(&98G*w&{baaqVAX$89XE^Hz0jrPH9 zBw4$XH@Yqc3)R1xlCSrqDqL@hPq&5|c~kwkB?eZ|GBMTvCiI zeunvt9KIv9{;tOW>PB6x7bD~fP!pePP@ITI+tkG8>N9fGq$b>oPmA%28#R$-m$#Jw zJ)JA_=JENfV!grmCi_&OL; z>aQqRYp*6&tM1jxWYiqBc{OF_An;w+{gMGs$l!9%4q zB#SARws`eX509vi=j97md#&_@3v zB_7WUOT6@ecF_LCN<8Xcro>}86-ADf#B_7LJti+?8X-Yhv_gW<$?UX6;XooMRM~O%Mxk^0h&sXB{ybF|gw6jo&M?1GF z@uim3Y*@Pl-qUOZZ-qyDo>Jf3&G z5|4JCQ{vIi1|=Tt;NoGtUeL}VsJ}&tNBuXHcs%c$ zN<7;6w-S$b{-eaBo$X3I+S#GRqn&q^c(n7L5|4J?SK`smhe|x!`ACUJJ0C0YXy-E} z9_{Q=;?d4tB_8d3uEe9AgGxNwIi$p+9bEE^{R-`Tt>~kjZ1LHm5eV#oFPK zpEmfK^4y7uarmgjMZba9rb$bFqOkBV&A7wD3qR3yQ=2sLs#G)mRnOckS8?$r`Nb7- z=*Qm+O1#*dgEt(Vy`bIdWfLyS%;bGbeVMeSSJCgcL7CQQ{9}OJy$q~+BoP53Km>>Y z5g-CYfCw~90y8ETrB1gXYWO;OU+MDP85E(vhTw2`Qu@MLWA5ZZS5+5lYQUNC} zF+-$`$45<^keo5O*@ua{e;bmsDKYATd5?FjEd4Dy?z>H{&o>mE(f{6=9Vf2z+;n77 z?^O9>nc&6fkky~9iE|~Gc zlI!|74T44+~tvzEqg9sbNXcmPprMQ$=Hmy^XI=aZ_IgHIuHBemWQ>w z=0EVkjLALUIe6cY$CI`sR^GMz$KG$IEco#4vUk=Wf3?N6<6i9i^q8oMsh1tw|HN67 z)@I!L@%GQR9!PA#4(7IfUvGtZ5xfzGrG#rUCcm`%(w0B%x$x!9i(h;F(}P#X4*9st zrbxDGaJ!wN!MZ?$9X^@2GUh=0yd4t{wEuWzWz5`T7krs>E6$ia^o9t>j1K>4J$kz%eC&eD22b_O zj7Z5TOX(M(Ip-d0Gy2(#>@}{zt9GyGzj)(AV{U(7MMT);Ips&9en|W|CFZhrdzW_p zY0EBFk~n_?FLOfV#HTyev?cMkm!eZ$FPk(x6y>aQ_Nmfn8d;OSGY`O}i6nT@OI^)3lzMt&*=*UZd`RK9% z9ro`y+H`6q>p$f59pV%p!zo_*Iq%V(2Znw1>yMosH+M`pHhXI2%^Nqb8#-yy)x{k? z`QqD44t_g5@r}r9+@&|)w{LWZ*>B9#%lW7$<8y9gWw)bw<2sz2cl?!;3EzFPqE%$) ztcSkra$Bb_KGX4CI;ZPwy$Ql2qh8Y@ymro;F+XqFb?CPTS85B6 zCMPsK_E@*OBiQ%JEpGvZ0yZb+=i(8&*owW8!)8pL^juTN>JFC-Km6X*N>4lXKv}!3 z+{(({d0~IKzQy9a54X%|vSewW?MF6_+}Hc*J2N`H)2qednJ2%B-*GhM*UEd2Ep+HT zIQO}ZMS8x-qj@I}4vX6O!jz1CoA=L7|8;TB`2+4ON(14&oc$x)9ldZ|ujcHA3%j@G z>MT4mB6-{5tqXR}&1uIH?$Pdou2cBbN#z%}*687jK>$Hfhb5i{d*R+i?^+$73t!j5z-td~<8s^8^?Jr?$pkj-~JsE0O; zUsM<8eRg1NyT~PdV&0#g;40l2-6v!D3BAItcV{mG*#B%ARrK6BtGh0XU@za=VSv{( zWXzXsDkU%YGo4n4W+i=*I%&5M4PUbTLM!^Xf}vw4%SokI;*)1@Y}(@6OXGT+fB&ixx5w;!G-}|; zKaV;w?*AUVAmiBg`U#f5{*qgiv$nTKjx=z)_<;V1|G6rC{gvZGSFV43bB{YJzF8cZ zxBixXQ7M~U!w#P@son8e-^SduYv_Z&<*iSMOX<{d?ykIPTmQxXd@cN!D7FCK*1r5e>Gf+LUvPAveiOQX*Eap864xXnMs)H` z=}~X-s#{~24E8CkZZ^=FevUXZ&dJq!1D$a&{Kf-+mf+77{8^#88|W;R!>}cgM-MUh z#dO^GP0ALNSo^Pavw;qOR^rcBxB!iF=x}n*GgS;+riEgNaqrg~D(j|3eEF5RM)4r8 ze{V_>9Bjj>sIA?ADlR{=D_j;0sM?{Scp^XqhyW2F0z`laG)e;e*_3bG*_oxR5hI|t z$$`-5QT$~Njh&`X_7~(A!<#AjxXRb>w!j<^j;8Y09q_5YsZMUd$`|i8+_%BHZn)4E z>B3v$@v;EMs~b2Xi}7M(Vt>mG#1^Crb?b~hqn{UQ5$t}b?YV*QFvqiUkvA`BFfQGs zD?~|f?J;|D@jdxSp0V5ILJwmpN@}oMjDG!8JnV0(dp@Q7lv)ipLq*bfGBLoQz4hvOJw!n;>QxUH6ulO)^fFP(>V5Lsozf*YLHC8|v`~1`JJ;S_ z#WEysTaS{%6-Hab2uSdY$M8zWBX1kt*MeI(#&a)h6@9V3a4dq~UShynVRGsV;{M}Z zICf_F?VoaO|H013#`}!_T;AB7imz>V-@}?DLn9zY zKZa^rGB0_ke*NXO$%4Zgi6iLI4*tt1{MTPrOINODBOTFsJbO(C9MHkTqj^A~wQkQl z;J}BJUVQrcp2i?jNHJb^11~>gN5mocK_kStZn3abGmK&cD__2xBwND&kLdjC@PRAE zV2{m!L|P~Y0Qgk8o#QK)O-N$Chk04nI2=~O)+R^S%7-K(Km>>Y5g-CYfCw~n0(Il? zyZ}$VJcR5?eM+S;ShFYf&^tV_PzlUN4vj5mfuA4m^N+AM7fCvx)B0vO)KqDlO zb?8SsdwaIhRU19A&bim@MO2^;521w2kjKY2KPLUtBwa%7(t z&eC8PhpueI8~=-7Pr=vD?D8-AoyNL@^{y;sM_D|Z3C`gl#^Rn%WSL;GJA2{VLtWUH zFoV&9-4>I4COZpY4PYO{qbRi^XQVd?_2J zm!Z`oxf9t!Q0T`-{c`=~ELJyGar0|euwh_q5WA_N8iicdpoKVT_C zEcCG+%su!3M*LpKx|vAUYU!3a2Bf9*1( z=}gheSG$pf4vL;CzLjN-!Rw7zg$LuJ5h8sO0U|&IhyW2F0z`laG;jjqDLYrw^tW=g zI(p~#zw)UY-^C01sT<$%R9{& zI+2ZF9PFIo5r#(9Uk)lxF^&!4)sNkkoa3rFQYsN30z`la5CI}U1c(3;Xb1#^r+!Wy z4~m>Fw{Dstk<@xnx=ZyuLn(R5!Frj}T z@5{=ABqBfrhyW2F0z`laG&Tap0ziG3olgz%V86Yv+-D`s`diDsPt{TSyg|4&Qa)ee zC_erKjPLIrnBt}J1rE#IGqS2Ua9t4-jg z)ZmraXn2h}cdXM>P*{|j;dYl5x=Qjr&T?J7C5+}c^lu6Jr>2R4Bb+?MBQD|Osr`zl zZ_|})^l@s2WbS=l6e?sxGVAsUgY{uZW*`*B-0|QQev5oI$mO@*&wt;`&U4O$wCaTW z$#bp_Cek4SM1Tko0U|&IhyW2F0*!)z@YX6q{OdkV_6RsHN=Ckm4Voozk48G7@HGnl z!LOS-i;BTzHTJKH(zkAcU-!8zjQc};;Pa&XImAtT#% z`d82LqY+Mx?>tk^kcVu6^*?_QHK-51^H3;?*>^ryk?D!y!<`d|wWK_zZR~4(HzR$cgYr+0d+#UC?gz zvMNfR7W!4C-+#+%z*ybSRCY;8ms|5xxg}6g>y&wGgr)`dJvr-r-U4Es@+*6EILuh5 z98~GJjbZtH(9sqqO`mo?PIc!8Pbj_)t*kY{rLRPQ2oM1xKm>>Y5g-CYppg-%!eb1| zU#Tl^>eJ?8g3$1$>TFf)zV~t#+cmP7zq=#Ev#G=7($tCaw6&g19oIs@`n_jU)1gAV zBn$aF#t<^#U){WV`s(0S(YEt_-Sm?O{>T_*)gfQl%7Y{#Km>>Y5g-CYfCvx)BG8x! z2=DIRI^O34!z$W~`Q*j;#g^JuM$tI5PX5&*0!coSL<@ zUG|EzhphGTJJz4p<8#60*5m%LSso4Y%w|dWPOczPy-&M&>ed0WU#1h zB=bC)X=5KQ^JzRD)Oj5HpEnYeD%uH3M?`j3>ov0H4HgD#wnkPIX3s#8w?r0(#o!my zF*|ClPQVleU+p|%zV~o-!hMQoOlwrDLydHZ01+SpM1Tko0U|&Ih(N<6VCO$1RL3%bN34AyKeDD)T$6pVaI@U3Sm4#B}+@XjIL5iKfE_dBt^wDsmU{FQ~r z+4-g|?J~&tLyAwT&J9Kd$WO^cfCvx)B0vO)01+Sp4U0hCcnPld;+c4jgi-4iWB(Xg zKfDCk`O@NST@3PdZ|q+Fhg?QEYQ#lh%|o}?(ErPPweC7^-xN&O5sMAKyHMmEAr8#% zV#7VrgZPiin}WEgHq`NEHhtx`tmt?KO{zUFdU%-QkW*B%K5S-_3q|6k!BLGoMa~B~ z8m`9Re-Q>fT|DCISbo+Y(w3CMp?bm2^^OgY{V2KK0SHkt5g-CYfCvx)B0vNh7lFE& z4lDvl<_G5~AlJ>l|G{mu!rL%3ukt;5L;P5sq5h~|(_f;;$9_DTnO&>Y5g-CYfCvzQMn>R|I|CW&C9W-( zb6*gC@j&*Fpy$=j;0D6O!k^h7402bK&oc2fm9ZU|gQq&!Nt+P*C9ZrQB(s}46Xad@ z^mq&oY7jG+=hX2GW`~;un2>GIG>Y5g-CYpz#o>;yJg@Ub_p; zb8zpmm#pTc7gKt8~Vj!Hv&8J6u>M+o(=NHGq&W5nUy<2 zyk+X5-D0eNqHMuhuP43jLgCW<-ZLu?D;OUExo%!H9XxT$6_Q>}4t8G1kZKxvSn)>a zoyuy0BQ+vG1c(3;AOb{y2oM1x&;SX@x8TAweG?WfT=;QzUVvS^{m-k1c*j%R$8*L( zZ$WG5BZmAvt@+UGX4iXxA}<8FK2FJnW-9VP>Qk2m-S4ZI^UL8-HP#kZ8patb4Kv*C zvO-r$zQ@V;C-Sr6P!rt(SMcGH>++P~<}Z2f(7@Wc_U`-S6<5dao!&M}ka(FF9zxI~ zGI+&ByeSL@``)Qzk;u;nrg#XhDR_gn`ypY4Zo+Qt<2*ez@W-b#_7NBoX%GP-Km>>Y z5g-CYfCvzQMnXV%Vy(ioWL_aHOgJd&TVOtNTkcA>N*JbZa>Q5~OKrwdPl8&A$2xFt zrTubpy@ul)d3vSb=UiHX13hB&n9Q-`M~@qun>~6wEN+g6#Fp@npM$^Vrr9{o90~3} zGl%P$>Y z5g-CYfCw~n0_hV*k7W@Zuo}Z)hmApO-Yr6oU$~V&k~5ZYXL-*%MI3!LDgt%0Ui=kc zWOn8@De|izFJ71XTCiSAFY;P|AO#{o1c(3;AOb{y2>fvfG?2NK=|2ni>Gz&nxfObb z`Eh&^$c=Mu<=~^?8@Wp(wyt^aC&Ay?x+driCNDkrPiQ3YRhB*>JBvkhM8yj5sRy%8 zC_V*ZH`W=&cR}pI5>WgM`buY(h~f!I?a7i*><$x0U0D|t$AQ?Jorz+lo_ZFFf7esH zqPSg8?S^7(1V52JH_4+?LF~!8V`?sly`fw(ECz7^q|5LMh(lN}6n_P=7pTi{RwP&M zgW^P8G=7_55s3X*3Z`xZu@6MZ5DW8716h9*FVMvSD9#4)EC!7z!YUBEg1QX<1~Cyv zFJ$-<#2)Ny6k|{071Y<(7HZ!=Zwr438-krr-CZOH{5)6jt((dmG&2|w9Vrk2B0vO) z01+SpM1Tkofd)sQfxfd_8P4C{V~o2iH5_j#<@vN4u4P4c2#l!%$oAwl&wNA~+SV7Mo2J zPApTdB0D7!0U|&IhyW2F0z`laG%x}UWXAa8e&T_&h8|2FZ;hIIF!<<;N3#dBT({@- zVEzVjy9P{~^;WTAoj=rdwPU#ky3`d!c%<{p2yBL5ZS z#_OYh5?}g!$a-UKXPmE7d~@DCcpx8LaG*~jKm>>Y5g-CYfCvx)BG8x!G?1~GN6xGt zV>91I+-I!eO$7bxj-#|0cD;2MoyxqohPougJ0U|&IhyW2F0z|-< zfbe>Ltm9wm&{_+p1(#pe`juFQ{uZ?L4D+Lo^3iO>m?}1c(3;AOb{y2oM1xKm-~f zfd;ywbA01+SpM1Tko0U|&Ih(Kc|P__4~u8T$QSB*dA-tUpgUhkI| zhD*Pf_xl8#+wA>5smQBA4!!paM)kA)-rjFX`ZeL)`;E>MB?{fI5^t6@#GT!?fE-+H zdCAUO+@!DV4rVR6Mit=~#Yl^~L*7TNK-1#}0V2`wMns3Mu}S{lvv7V+*)4L3B>< zdVQLi^#hNhhyW2F0z`la5CI}U1c*QbBOttGR>wQIEqp_G;WH$ipuOkz+(3Ak@(<9lQ$gzASDw4B0vO)01+SpM4*8YFuW@LK~K0E z;HU(eV+;$^yibNlXZw0dQTh+EglnZS;)xQqakC=LezT%9j!cA4Gr=YOtyncNK^-1kDwXkR)eC?Oo zXAmTn&OC1b|DMunSu)elQp#JWrW3{Jr{Iy70$&^bN|p-cuNC_aymcZRMO}cIxcz4{-D|8kW=X;#2i6j|?bC|z{2?vE2h=1d53U}zynLm{~ z4}S*dH9LEI6nQVmjdy)~*C!|E*Yz+(?=gT7LVX{ynf}7^jiRLTGkrpK7HhKfz0aJi z8FY~ZcJSSk*RrD180*45dUkLHTLp@J+3a6#o540lGS-J3F6%UleF?D%tj&?@XS1hZ zBkgW%)qsO@8J|Z_Vgo%>Z(*@OVj%m)eGtCFY;&lcWO2tU?`F5aDb8f> z<#*iAW`pioEcui3A7&Gw45zcTb5=dV_QKZ=?CDKg9%ZRup(p#t%%sOyCR|IYY}uR+ zkF$5dSPwR*^4MAy3&wh~|FSz@Ws9JY{aD0-#I0;D6f%ij@zF1DvZoNAgEhO8iozW4BJw^a3j?oL_=bN)-UuUiYOou- z(r@yrvPphe8METW=0Oz1dz%ok^hpGW01+SpM1Tko0V2@Q2^de+;4}!=n;!#?K~#H% zK$`~QZf^akf(aIN{VU>K`iW=jLJk^2&>jbl-+c9hFjybPZ$5+yfj4YS)_3mp6hXY@{7j}!dx9?uCx5+(v@?Iirv?V+-pZ*zD_bS;jM9(yUaB9cjP^*j7YUkBbt8?2EsC=f?q-Uzhny8D zuBDp%VlygtHJi-IbrzdUX1yo0qzWDCJAno|cJ|HQ`m=iDU1($8>v3y^_Z0bkkQ?vV z*>4lFUV#e!F@xRM7~UcSQMrJ|#sVRtbRs|mhyW2F0z`la5CJ04XbAAV{%}{wIhmqb zIkI-qQ^QySIF>Q+mUSGvC^IYbqRg=wnYr1c$4`Kl=)wQxd*K)6d-BVj_#U=B82+L) ze_5r_HO*PV_av06`U@=f+4OV3m>q-!{Zo@0WZ+>%bEZAEFkH+@Xx)b138R8p=2}hO#=a^~HYc z`Ce=-1|(4bABk3ZV~kb)q>oiD9cz`_o@s7e#aQlv%pjWKmBxcM+bfm&_SI5^U#}xr*RS@YQq{ zbbK@i{%C!Is;in$A0~Ve&Cv(zqGZ7}6CUTZQdlig+#TI1&Cyq8>Ex3oV_g4aBLn(d znuG7p&qMo6S4nxP(^XzkGDS9*vDiZO#zYSKtd9ijyf_@;2vp`diZ@5^Cj=r!=|q4C z5CI}U1c(3;AOb|7;S#WV^WcfT9C>%8=D;3T>QB6$WKf@{ynP!!7#Wv=jx5%1~E4VQoS8$_j426hn3`aaL9mh9N zHbyX{zA-`}WgLP)eH>~)*%(HU`o;i(l+pi3eOwWavawZ!)Hk+(kTR|qM}1r|jxr7) zAYWq}2bmvNl%u|}CxcAK73P?3Y_K5HamBfjE?1nRY%CX%`o@Y7DI3c@q--qWkTNdR zKzqh244IBgEHE8cP@s%!Bv4*fD!w5PT)~d%xPl#JT)~bqu3$$QSFoduE7(!S73?U- zE}hxOI+h3THe4CD|6~ER_U#E@`J7i9h0}7gh|9T(~4i9W%z{IWu@B)nd!lKFx zz(s~lHStw=&Cx_;U@^*;o5on01K!@zgVZqI7coZH9JoV52Vuj|sD$F>@{*K$oTWJU zraAa88}iUZS2qUX9O&FM=dYz0dw&{(a*k#q8`IN`lNf_^@Ozgcw6na{VkrcU;AxKL zQaK!yjd46j3mF?JhZ1gnRzzry7@5+^V%%q(1BU=L2j6;v2k&Yp%VwoGHmEty6?*J6 z_f=Or3p}pr&RtiKe=ngl=tFJ2|MuklH|X}K!V>`^Km>>Y5g-CYfCvx)B0vO)z#ox7 zZASx!#Ne=>poha{7>i>@I2!jyWK4A?0z`la5CI}U1c(3;AOekrz>LWSd2vlLVvar5 z?d}M+DLv|~>cn8}`!-$or1sE+G?eK`1c(3;AOb{y2oM1xKm>@u??<4vvjf>QJMjB8 z9-Wy85CI}U1c(3;AOb{y2%KU9=3TsUBcL3GQxOMPs+H97wY3KUbw3h?&5_q?&5{|xQiFcxQiF+ z<1St(<1SvPkGpuGjJtTDKJMa$GVbDq`nZc1%D9Uc>f9~s*+QVJEFdcXC!gSol z3uWBJ3uWB@3iBIVTgeDx7b__n`&CKV*mO$D#?DewHa3WovauPIl&?H*Xm)K!3ntTO afuu 0 + else: + fig, ax = plotting.with_matplotlib(data, mode=mode) + assert fig is not None and ax is not None + + +@pytest.mark.parametrize( + 'engine,data_type', [(e, dt) for e in ['plotly', 'matplotlib'] for dt in ['dataset', 'dataframe', 'series']] +) +def test_pie_plots(engine, data_type): + """Test pie charts with all data types, including automatic summing.""" + time = pd.date_range('2020-01-01', periods=5, freq='h') + + # Single-value data + single_data = { + 'dataset': xr.Dataset({'A': xr.DataArray(10), 'B': xr.DataArray(20), 'C': xr.DataArray(30)}), + 'dataframe': pd.DataFrame({'A': [10], 'B': [20], 'C': [30]}), + 'series': pd.Series({'A': 10, 'B': 20, 'C': 30}), + }[data_type] + + # Multi-dimensional data (for summing test) + multi_data = { + 'dataset': xr.Dataset( + {'A': (['time'], [1, 2, 3, 4, 5]), 'B': (['time'], [5, 5, 5, 5, 5])}, coords={'time': time} + ), + 'dataframe': pd.DataFrame({'A': [1, 2, 3, 4, 5], 'B': [5, 5, 5, 5, 5]}, index=time), + 'series': pd.Series([1, 2, 3, 4, 5], index=time, name='A'), + }[data_type] + + for data in [single_data, multi_data]: + if engine == 'plotly': + fig = plotting.dual_pie_with_plotly(data, data) + assert fig is not None and len(fig.data) >= 2 + if data is multi_data and data_type != 'series': + assert sum(fig.data[0].values) == pytest.approx(40) + else: + fig, axes = plotting.dual_pie_with_matplotlib(data, data) + assert fig is not None and len(axes) == 2 diff --git a/tests/test_results_plots.py b/tests/test_results_plots.py index 35a219e31..a656f7c44 100644 --- a/tests/test_results_plots.py +++ b/tests/test_results_plots.py @@ -28,7 +28,7 @@ def plotting_engine(request): @pytest.fixture( params=[ - 'viridis', # Test string colormap + 'turbo', # Test string colormap ['#ff0000', '#00ff00', '#0000ff', '#ffff00', '#ff00ff', '#00ffff'], # Test color list { 'Boiler(Q_th)|flow_rate': '#ff0000', @@ -48,18 +48,29 @@ def test_results_plots(flow_system, plotting_engine, show, save, color_spec): results['Boiler'].plot_node_balance(engine=plotting_engine, save=save, show=show, colors=color_spec) - results.plot_heatmap( - 'Speicher(Q_th_load)|flow_rate', - heatmap_timeframes='D', - heatmap_timesteps_per_frame='h', - color_map='viridis', # Note: heatmap only accepts string colormap - save=show, - show=save, - engine=plotting_engine, - ) + # Matplotlib doesn't support faceting/animation, so disable them for matplotlib engine + heatmap_kwargs = { + 'reshape_time': ('D', 'h'), + 'colors': 'turbo', # Note: heatmap only accepts string colormap + 'save': save, + 'show': show, + 'engine': plotting_engine, + } + if plotting_engine == 'matplotlib': + heatmap_kwargs['facet_by'] = None + heatmap_kwargs['animate_by'] = None + + results.plot_heatmap('Speicher(Q_th_load)|flow_rate', **heatmap_kwargs) results['Speicher'].plot_node_balance_pie(engine=plotting_engine, save=save, show=show, colors=color_spec) - results['Speicher'].plot_charge_state(engine=plotting_engine) + + # Matplotlib doesn't support faceting/animation for plot_charge_state, and 'area' mode + charge_state_kwargs = {'engine': plotting_engine} + if plotting_engine == 'matplotlib': + charge_state_kwargs['facet_by'] = None + charge_state_kwargs['animate_by'] = None + charge_state_kwargs['mode'] = 'stacked_bar' # 'area' not supported by matplotlib + results['Speicher'].plot_charge_state(**charge_state_kwargs) plt.close('all') From 03b12028bf5c47df81ebf45dd6ac424fee4f8c9f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 13:52:16 +0100 Subject: [PATCH 04/27] Merge main intop feature/402-feature-silent-framework --- examples/04_Scenarios/scenario_example.py | 139 +- flixopt/components.py | 32 +- flixopt/config.py | 63 +- flixopt/elements.py | 59 +- flixopt/plotting.py | 2134 +++++++++++---------- flixopt/results.py | 1383 ++++++++++--- 6 files changed, 2549 insertions(+), 1261 deletions(-) diff --git a/examples/04_Scenarios/scenario_example.py b/examples/04_Scenarios/scenario_example.py index f06760603..d258d4142 100644 --- a/examples/04_Scenarios/scenario_example.py +++ b/examples/04_Scenarios/scenario_example.py @@ -8,20 +8,80 @@ import flixopt as fx if __name__ == '__main__': - # Create datetime array starting from '2020-01-01' for the given time period - timesteps = pd.date_range('2020-01-01', periods=9, freq='h') + # Create datetime array starting from '2020-01-01' for one week + timesteps = pd.date_range('2020-01-01', periods=24 * 7, freq='h') scenarios = pd.Index(['Base Case', 'High Demand']) periods = pd.Index([2020, 2021, 2022]) # --- Create Time Series Data --- - # Heat demand profile (e.g., kW) over time and corresponding power prices - heat_demand_per_h = pd.DataFrame( - {'Base Case': [30, 0, 90, 110, 110, 20, 20, 20, 20], 'High Demand': [30, 0, 100, 118, 125, 20, 20, 20, 20]}, - index=timesteps, + # Realistic daily patterns: morning/evening peaks, night/midday lows + np.random.seed(42) + n_hours = len(timesteps) + + # Heat demand: 24-hour patterns (kW) for Base Case and High Demand scenarios + base_daily_pattern = np.array( + [22, 20, 18, 18, 20, 25, 40, 70, 95, 110, 85, 65, 60, 58, 62, 68, 75, 88, 105, 125, 130, 122, 95, 35] + ) + high_daily_pattern = np.array( + [28, 25, 22, 22, 24, 30, 52, 88, 118, 135, 105, 80, 75, 72, 75, 82, 92, 108, 128, 148, 155, 145, 115, 48] + ) + + # Tile and add variation + base_demand = np.tile(base_daily_pattern, n_hours // 24 + 1)[:n_hours] * ( + 1 + np.random.uniform(-0.05, 0.05, n_hours) ) - power_prices = np.array([0.08, 0.09, 0.10]) + high_demand = np.tile(high_daily_pattern, n_hours // 24 + 1)[:n_hours] * ( + 1 + np.random.uniform(-0.07, 0.07, n_hours) + ) + + heat_demand_per_h = pd.DataFrame({'Base Case': base_demand, 'High Demand': high_demand}, index=timesteps) + + # Power prices: hourly factors (night low, peak high) and period escalation (2020-2022) + hourly_price_factors = np.array( + [ + 0.70, + 0.65, + 0.62, + 0.60, + 0.62, + 0.70, + 0.95, + 1.15, + 1.30, + 1.25, + 1.10, + 1.00, + 0.95, + 0.90, + 0.88, + 0.92, + 1.00, + 1.10, + 1.25, + 1.40, + 1.35, + 1.20, + 0.95, + 0.80, + ] + ) + period_base_prices = np.array([0.075, 0.095, 0.135]) # €/kWh for 2020, 2021, 2022 + + price_series = np.zeros((n_hours, 3)) + for period_idx, base_price in enumerate(period_base_prices): + price_series[:, period_idx] = ( + np.tile(hourly_price_factors, n_hours // 24 + 1)[:n_hours] + * base_price + * (1 + np.random.uniform(-0.03, 0.03, n_hours)) + ) - flow_system = fx.FlowSystem(timesteps=timesteps, periods=periods, scenarios=scenarios, weights=np.array([0.5, 0.6])) + power_prices = price_series.mean(axis=0) + + # Scenario weights: probability of each scenario occurring + # Base Case: 60% probability, High Demand: 40% probability + scenario_weights = np.array([0.6, 0.4]) + + flow_system = fx.FlowSystem(timesteps=timesteps, periods=periods, scenarios=scenarios, weights=scenario_weights) # --- Define Energy Buses --- # These represent nodes, where the used medias are balanced (electricity, heat, and gas) @@ -35,22 +95,24 @@ description='Kosten', is_standard=True, # standard effect: no explicit value needed for costs is_objective=True, # Minimizing costs as the optimization objective - share_from_temporal={'CO2': 0.2}, + share_from_temporal={'CO2': 0.2}, # Carbon price: 0.2 €/kg CO2 (e.g., carbon tax) ) - # CO2 emissions effect with an associated cost impact + # CO2 emissions effect with constraint + # Maximum of 1000 kg CO2/hour represents a regulatory or voluntary emissions limit CO2 = fx.Effect( label='CO2', unit='kg', description='CO2_e-Emissionen', - maximum_per_hour=1000, # Max CO2 emissions per hour + maximum_per_hour=1000, # Regulatory emissions limit: 1000 kg CO2/hour ) # --- Define Flow System Components --- # Boiler: Converts fuel (gas) into thermal energy (heat) + # Modern condensing gas boiler with realistic efficiency boiler = fx.linear_converters.Boiler( label='Boiler', - eta=0.5, + eta=0.92, # Realistic efficiency for modern condensing gas boiler (92%) Q_th=fx.Flow( label='Q_th', bus='FernwΓ€rme', @@ -63,27 +125,28 @@ ) # Combined Heat and Power (CHP): Generates both electricity and heat from fuel + # Modern CHP unit with realistic efficiencies (total efficiency ~88%) chp = fx.linear_converters.CHP( label='CHP', - eta_th=0.5, - eta_el=0.4, + eta_th=0.48, # Realistic thermal efficiency (48%) + eta_el=0.40, # Realistic electrical efficiency (40%) P_el=fx.Flow('P_el', bus='Strom', size=60, relative_minimum=5 / 60, on_off_parameters=fx.OnOffParameters()), Q_th=fx.Flow('Q_th', bus='FernwΓ€rme'), Q_fu=fx.Flow('Q_fu', bus='Gas'), ) - # Storage: Energy storage system with charging and discharging capabilities + # Storage: Thermal energy storage system with charging and discharging capabilities + # Realistic thermal storage parameters (e.g., insulated hot water tank) storage = fx.Storage( label='Storage', charging=fx.Flow('Q_th_load', bus='FernwΓ€rme', size=1000), discharging=fx.Flow('Q_th_unload', bus='FernwΓ€rme', size=1000), capacity_in_flow_hours=fx.InvestParameters(effects_of_investment=20, fixed_size=30, mandatory=True), initial_charge_state=0, # Initial storage state: empty - relative_maximum_charge_state=np.array([80, 70, 80, 80, 80, 80, 80, 80, 80]) * 0.01, - relative_maximum_final_charge_state=0.8, - eta_charge=0.9, - eta_discharge=1, # Efficiency factors for charging/discharging - relative_loss_per_hour=0.08, # 8% loss per hour. Absolute loss depends on current charge state + relative_maximum_final_charge_state=np.array([0.8, 0.5, 0.1]), + eta_charge=0.95, # Realistic charging efficiency (~95%) + eta_discharge=0.98, # Realistic discharging efficiency (~98%) + relative_loss_per_hour=np.array([0.008, 0.015]), # Realistic thermal losses: 0.8-1.5% per hour prevent_simultaneous_charge_and_discharge=True, # Prevent charging and discharging at the same time ) @@ -94,10 +157,22 @@ ) # Gas Source: Gas tariff source with associated costs and CO2 emissions + # Realistic gas prices varying by period (reflecting 2020-2022 energy crisis) + # 2020: 0.04 €/kWh, 2021: 0.06 €/kWh, 2022: 0.11 €/kWh + gas_prices_per_period = np.array([0.04, 0.06, 0.11]) + + # CO2 emissions factor for natural gas: ~0.202 kg CO2/kWh (realistic value) + gas_co2_emissions = 0.202 + gas_source = fx.Source( label='Gastarif', outputs=[ - fx.Flow(label='Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={costs.label: 0.04, CO2.label: 0.3}) + fx.Flow( + label='Q_Gas', + bus='Gas', + size=1000, + effects_per_flow_hour={costs.label: gas_prices_per_period, CO2.label: gas_co2_emissions}, + ) ], ) @@ -121,24 +196,26 @@ # --- Solve the Calculation and Save Results --- calculation.solve(fx.solvers.HighsSolver(mip_gap=0, time_limit_seconds=30)) + calculation.results.setup_colors( + { + 'CHP': 'red', + 'Greys': ['Gastarif', 'Einspeisung', 'Heat Demand'], + 'Storage': 'blue', + 'Boiler': 'orange', + } + ) + calculation.results.plot_heatmap('CHP(Q_th)|flow_rate') # --- Analyze Results --- - calculation.results['FernwΓ€rme'].plot_node_balance_pie() - calculation.results['FernwΓ€rme'].plot_node_balance(style='stacked_bar') - calculation.results['Storage'].plot_node_balance() + calculation.results['FernwΓ€rme'].plot_node_balance(mode='stacked_bar') calculation.results.plot_heatmap('CHP(Q_th)|flow_rate') + calculation.results['Storage'].plot_charge_state() + calculation.results['FernwΓ€rme'].plot_node_balance_pie(select={'period': 2020, 'scenario': 'Base Case'}) # Convert the results for the storage component to a dataframe and display df = calculation.results['Storage'].node_balance_with_charge_state() print(df) - # Plot charge state using matplotlib - fig, ax = calculation.results['Storage'].plot_charge_state(engine='matplotlib') - # Customize the plot further if needed - ax.set_title('Storage Charge State Over Time') - # Or save the figure - # fig.savefig('storage_charge_state.png') - # Save results to file for later usage calculation.results.to_file() diff --git a/flixopt/components.py b/flixopt/components.py index c40e6af88..8f89378ae 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -11,6 +11,7 @@ import numpy as np import xarray as xr +from . import io as fx_io from .core import PeriodicDataUser, PlausibilityError, TemporalData, TemporalDataUser from .elements import Component, ComponentModel, Flow from .features import InvestmentModel, PiecewiseModel @@ -528,6 +529,15 @@ def _plausibility_checks(self) -> None: f'{self.discharging.size.minimum_size=}, {self.discharging.size.maximum_size=}.' ) + def __repr__(self) -> str: + """Return string representation.""" + # Use build_repr_from_init directly to exclude charging and discharging + return fx_io.build_repr_from_init( + self, + excluded_params={'self', 'label', 'charging', 'discharging', 'kwargs'}, + skip_default_size=True, + ) + fx_io.format_flow_details(self) + @register_class_for_io class Transmission(Component): @@ -1304,16 +1314,18 @@ def __init__( prevent_simultaneous_flow_rates: bool = False, **kwargs, ): - """ - Initialize a Sink (consumes flow from the system). - - Supports legacy `sink=` keyword for backward compatibility (deprecated): if `sink` is provided it is used as the single input flow and a DeprecationWarning is issued; specifying both `inputs` and `sink` raises ValueError. - - Parameters: - label (str): Unique element label. - inputs (list[Flow], optional): Input flows for the sink. - meta_data (dict, optional): Arbitrary metadata attached to the element. - prevent_simultaneous_flow_rates (bool, optional): If True, prevents simultaneous nonzero flow rates across the element's inputs by wiring that restriction into the base Component setup. + """Initialize a Sink (consumes flow from the system). + + Supports legacy `sink=` keyword for backward compatibility (deprecated): if `sink` is provided + it is used as the single input flow and a DeprecationWarning is issued; specifying both + `inputs` and `sink` raises ValueError. + + Args: + label: Unique element label. + inputs: Input flows for the sink. + meta_data: Arbitrary metadata attached to the element. + prevent_simultaneous_flow_rates: If True, prevents simultaneous nonzero flow rates + across the element's inputs by wiring that restriction into the base Component setup. Note: The deprecated `sink` kwarg is accepted for compatibility but will be removed in future releases. diff --git a/flixopt/config.py b/flixopt/config.py index a7549a3ec..670f86da2 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -8,7 +8,6 @@ from types import MappingProxyType from typing import Literal -import yaml from rich.console import Console from rich.logging import RichHandler from rich.style import Style @@ -54,6 +53,16 @@ 'big_binary_bound': 100_000, } ), + 'plotting': MappingProxyType( + { + 'default_show': True, + 'default_engine': 'plotly', + 'default_dpi': 300, + 'default_facet_cols': 3, + 'default_sequential_colorscale': 'turbo', + 'default_qualitative_colorscale': 'plotly', + } + ), } ) @@ -185,6 +194,42 @@ class Modeling: epsilon: float = _DEFAULTS['modeling']['epsilon'] big_binary_bound: int = _DEFAULTS['modeling']['big_binary_bound'] + class Plotting: + """Plotting configuration. + + Configure backends via environment variables: + - Matplotlib: Set `MPLBACKEND` environment variable (e.g., 'Agg', 'TkAgg') + - Plotly: Set `PLOTLY_RENDERER` or use `plotly.io.renderers.default` + + Attributes: + default_show: Default value for the `show` parameter in plot methods. + default_engine: Default plotting engine. + default_dpi: Default DPI for saved plots. + default_facet_cols: Default number of columns for faceted plots. + default_sequential_colorscale: Default colorscale for heatmaps and continuous data. + default_qualitative_colorscale: Default colormap for categorical plots (bar/line/area charts). + + Examples: + ```python + # Set consistent theming + CONFIG.Plotting.plotly_template = 'plotly_dark' + CONFIG.apply() + + # Configure default export and color settings + CONFIG.Plotting.default_dpi = 600 + CONFIG.Plotting.default_sequential_colorscale = 'plasma' + CONFIG.Plotting.default_qualitative_colorscale = 'Dark24' + CONFIG.apply() + ``` + """ + + default_show: bool = _DEFAULTS['plotting']['default_show'] + default_engine: Literal['plotly', 'matplotlib'] = _DEFAULTS['plotting']['default_engine'] + default_dpi: int = _DEFAULTS['plotting']['default_dpi'] + default_facet_cols: int = _DEFAULTS['plotting']['default_facet_cols'] + default_sequential_colorscale: str = _DEFAULTS['plotting']['default_sequential_colorscale'] + default_qualitative_colorscale: str = _DEFAULTS['plotting']['default_qualitative_colorscale'] + config_name: str = _DEFAULTS['config_name'] @classmethod @@ -253,13 +298,15 @@ def load_from_file(cls, config_file: str | Path): Raises: FileNotFoundError: If the config file does not exist. """ + # Import here to avoid circular import + from . import io as fx_io + config_path = Path(config_file) if not config_path.exists(): raise FileNotFoundError(f'Config file not found: {config_file}') - with config_path.open() as file: - config_dict = yaml.safe_load(file) or {} - cls._apply_config_dict(config_dict) + config_dict = fx_io.load_yaml(config_path) + cls._apply_config_dict(config_dict) cls.apply() @@ -319,6 +366,14 @@ def to_dict(cls) -> dict: 'epsilon': cls.Modeling.epsilon, 'big_binary_bound': cls.Modeling.big_binary_bound, }, + 'plotting': { + 'default_show': cls.Plotting.default_show, + 'default_engine': cls.Plotting.default_engine, + 'default_dpi': cls.Plotting.default_dpi, + 'default_facet_cols': cls.Plotting.default_facet_cols, + 'default_sequential_colorscale': cls.Plotting.default_sequential_colorscale, + 'default_qualitative_colorscale': cls.Plotting.default_qualitative_colorscale, + }, } diff --git a/flixopt/elements.py b/flixopt/elements.py index 25e399811..2a9a2cf4f 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -11,6 +11,7 @@ import numpy as np import xarray as xr +from . import io as fx_io from .config import CONFIG from .core import PlausibilityError, Scalar, TemporalData, TemporalDataUser from .features import InvestmentModel, OnOffModel @@ -86,10 +87,12 @@ def __init__( super().__init__(label, meta_data=meta_data) self.inputs: list[Flow] = inputs or [] self.outputs: list[Flow] = outputs or [] - self._check_unique_flow_labels() self.on_off_parameters = on_off_parameters self.prevent_simultaneous_flows: list[Flow] = prevent_simultaneous_flows or [] + self._check_unique_flow_labels() + self._connect_flows() + self.flows: dict[str, Flow] = {flow.label: flow for flow in self.inputs + self.outputs} def create_model(self, model: FlowSystemModel) -> ComponentModel: @@ -115,6 +118,48 @@ def _check_unique_flow_labels(self): def _plausibility_checks(self) -> None: self._check_unique_flow_labels() + def _connect_flows(self): + # Inputs + for flow in self.inputs: + if flow.component not in ('UnknownComponent', self.label_full): + raise ValueError( + f'Flow "{flow.label}" already assigned to component "{flow.component}". ' + f'Cannot attach to "{self.label_full}".' + ) + flow.component = self.label_full + flow.is_input_in_component = True + # Outputs + for flow in self.outputs: + if flow.component not in ('UnknownComponent', self.label_full): + raise ValueError( + f'Flow "{flow.label}" already assigned to component "{flow.component}". ' + f'Cannot attach to "{self.label_full}".' + ) + flow.component = self.label_full + flow.is_input_in_component = False + + # Validate prevent_simultaneous_flows: only allow local flows + if self.prevent_simultaneous_flows: + # Deduplicate while preserving order + seen = set() + self.prevent_simultaneous_flows = [ + f for f in self.prevent_simultaneous_flows if id(f) not in seen and not seen.add(id(f)) + ] + local = set(self.inputs + self.outputs) + foreign = [f for f in self.prevent_simultaneous_flows if f not in local] + if foreign: + names = ', '.join(f.label_full for f in foreign) + raise ValueError( + f'prevent_simultaneous_flows for "{self.label_full}" must reference its own flows. ' + f'Foreign flows detected: {names}' + ) + + def __repr__(self) -> str: + """Return string representation with flow information.""" + return fx_io.build_repr_from_init( + self, excluded_params={'self', 'label', 'inputs', 'outputs', 'kwargs'}, skip_default_size=True + ) + fx_io.format_flow_details(self) + @register_class_for_io class Bus(Element): @@ -207,11 +252,19 @@ def _plausibility_checks(self) -> None: logger.warning( f'In Bus {self.label_full}, the excess_penalty_per_flow_hour is 0. Use "None" or a value > 0.' ) + if len(self.inputs) == 0 and len(self.outputs) == 0: + raise ValueError( + f'Bus "{self.label_full}" has no Flows connected to it. Please remove it from the FlowSystem' + ) @property def with_excess(self) -> bool: return False if self.excess_penalty_per_flow_hour is None else True + def __repr__(self) -> str: + """Return string representation.""" + return super().__repr__() + fx_io.format_flow_details(self) + @register_class_for_io class Connection: @@ -489,6 +542,10 @@ def size_is_fixed(self) -> bool: # Wenn kein InvestParameters existiert --> True; Wenn Investparameter, den Wert davon nehmen return False if (isinstance(self.size, InvestParameters) and self.size.fixed_size is None) else True + def _format_invest_params(self, params: InvestParameters) -> str: + """Format InvestParameters for display.""" + return f'size: {params.format_for_repr()}' + class FlowModel(ElementModel): element: Flow # Type hint diff --git a/flixopt/plotting.py b/flixopt/plotting.py index 356f013c0..045cf7e99 100644 --- a/flixopt/plotting.py +++ b/flixopt/plotting.py @@ -39,14 +39,17 @@ import plotly.express as px import plotly.graph_objects as go import plotly.offline -from plotly.exceptions import PlotlyError +import xarray as xr + +from .color_processing import process_colors +from .config import CONFIG if TYPE_CHECKING: import pyvis logger = logging.getLogger('flixopt') -# Define the colors for the 'portland' colormap in matplotlib +# Define the colors for the 'portland' colorscale in matplotlib _portland_colors = [ [12 / 255, 51 / 255, 131 / 255], # Dark blue [10 / 255, 136 / 255, 186 / 255], # Light blue @@ -55,7 +58,7 @@ [217 / 255, 30 / 255, 30 / 255], # Red ] -# Check if the colormap already exists before registering it +# Check if the colorscale already exists before registering it if hasattr(plt, 'colormaps'): # Matplotlib >= 3.7 registry = plt.colormaps if 'portland' not in registry: @@ -70,9 +73,9 @@ Color specifications can take several forms to accommodate different use cases: -**Named Colormaps** (str): - - Standard colormaps: 'viridis', 'plasma', 'cividis', 'tab10', 'Set1' - - Energy-focused: 'portland' (custom flixopt colormap for energy systems) +**Named colorscales** (str): + - Standard colorscales: 'turbo', 'plasma', 'cividis', 'tab10', 'Set1' + - Energy-focused: 'portland' (custom flixopt colorscale for energy systems) - Backend-specific maps available in Plotly and Matplotlib **Color Lists** (list[str]): @@ -87,8 +90,8 @@ Examples: ```python - # Named colormap - colors = 'viridis' # Automatic color generation + # Named colorscale + colors = 'turbo' # Automatic color generation # Explicit color list colors = ['red', 'blue', 'green', '#FFD700'] @@ -111,7 +114,7 @@ References: - HTML Color Names: https://htmlcolorcodes.com/color-names/ - - Matplotlib Colormaps: https://matplotlib.org/stable/tutorials/colors/colormaps.html + - Matplotlib colorscales: https://matplotlib.org/stable/tutorials/colors/colorscales.html - Plotly Built-in Colorscales: https://plotly.com/python/builtin-colorscales/ """ @@ -119,432 +122,520 @@ """Identifier for the plotting engine to use.""" -class ColorProcessor: - """Intelligent color management system for consistent multi-backend visualization. +def _ensure_dataset(data: xr.Dataset | pd.DataFrame | pd.Series) -> xr.Dataset: + """Convert DataFrame or Series to Dataset if needed.""" + if isinstance(data, xr.Dataset): + return data + elif isinstance(data, pd.DataFrame): + # Convert DataFrame to Dataset + return data.to_xarray() + elif isinstance(data, pd.Series): + # Convert Series to DataFrame first, then to Dataset + return data.to_frame().to_xarray() + else: + raise TypeError(f'Data must be xr.Dataset, pd.DataFrame, or pd.Series, got {type(data).__name__}') - This class provides unified color processing across Plotly and Matplotlib backends, - ensuring consistent visual appearance regardless of the plotting engine used. - It handles color palette generation, named colormap translation, and intelligent - color cycling for complex datasets with many categories. - Key Features: - **Backend Agnostic**: Automatic color format conversion between engines - **Palette Management**: Support for named colormaps, custom palettes, and color lists - **Intelligent Cycling**: Smart color assignment for datasets with many categories - **Fallback Handling**: Graceful degradation when requested colormaps are unavailable - **Energy System Colors**: Built-in palettes optimized for energy system visualization +def _validate_plotting_data(data: xr.Dataset, allow_empty: bool = False) -> None: + """Validate dataset for plotting (checks for empty data, non-numeric types, etc.).""" + # Check for empty data + if not allow_empty and len(data.data_vars) == 0: + raise ValueError('Empty Dataset provided (no variables). Cannot create plot.') + + # Check if dataset has any data (xarray uses nbytes for total size) + if all(data[var].size == 0 for var in data.data_vars) if len(data.data_vars) > 0 else True: + if not allow_empty and len(data.data_vars) > 0: + raise ValueError('Dataset has zero size. Cannot create plot.') + if len(data.data_vars) == 0: + return # Empty dataset, nothing to validate + return + + # Check for non-numeric data types + for var in data.data_vars: + dtype = data[var].dtype + if not np.issubdtype(dtype, np.number): + raise TypeError( + f"Variable '{var}' has non-numeric dtype '{dtype}'. " + f'Plotting requires numeric data types (int, float, etc.).' + ) - Color Input Types: - - **Named Colormaps**: 'viridis', 'plasma', 'portland', 'tab10', etc. - - **Color Lists**: ['red', 'blue', 'green'] or ['#FF0000', '#0000FF', '#00FF00'] - - **Label Dictionaries**: {'Generator': 'red', 'Storage': 'blue', 'Load': 'green'} + # Warn about NaN/Inf values + for var in data.data_vars: + if np.isnan(data[var].values).any(): + logger.debug(f"Variable '{var}' contains NaN values which may affect visualization.") + if np.isinf(data[var].values).any(): + logger.debug(f"Variable '{var}' contains Inf values which may affect visualization.") - Examples: - Basic color processing: - ```python - # Initialize for Plotly backend - processor = ColorProcessor(engine='plotly', default_colormap='viridis') +def with_plotly( + data: xr.Dataset | pd.DataFrame | pd.Series, + mode: Literal['stacked_bar', 'line', 'area', 'grouped_bar'] = 'stacked_bar', + colors: ColorType | None = None, + title: str = '', + ylabel: str = '', + xlabel: str = '', + facet_by: str | list[str] | None = None, + animate_by: str | None = None, + facet_cols: int | None = None, + shared_yaxes: bool = True, + shared_xaxes: bool = True, + **px_kwargs: Any, +) -> go.Figure: + """ + Plot data with Plotly using facets (subplots) and/or animation for multidimensional data. - # Process different color specifications - colors = processor.process_colors('plasma', ['Gen1', 'Gen2', 'Storage']) - colors = processor.process_colors(['red', 'blue', 'green'], ['A', 'B', 'C']) - colors = processor.process_colors({'Wind': 'skyblue', 'Solar': 'gold'}, ['Wind', 'Solar', 'Gas']) + Uses Plotly Express for convenient faceting and animation with automatic styling. - # Switch to Matplotlib - processor = ColorProcessor(engine='matplotlib') - mpl_colors = processor.process_colors('tab10', component_labels) - ``` + Args: + data: An xarray Dataset, pandas DataFrame, or pandas Series to plot. + mode: The plotting mode. Use 'stacked_bar' for stacked bar charts, 'line' for lines, + 'area' for stacked area charts, or 'grouped_bar' for grouped bar charts. + colors: Color specification (colorscale, list, or dict mapping labels to colors). + title: The main title of the plot. + ylabel: The label for the y-axis. + xlabel: The label for the x-axis. + facet_by: Dimension(s) to create facets for. Creates a subplot grid. + Can be a single dimension name or list of dimensions (max 2 for facet_row and facet_col). + If the dimension doesn't exist in the data, it will be silently ignored. + animate_by: Dimension to animate over. Creates animation frames. + If the dimension doesn't exist in the data, it will be silently ignored. + facet_cols: Number of columns in the facet grid (used when facet_by is single dimension). + shared_yaxes: Whether subplots share y-axes. + shared_xaxes: Whether subplots share x-axes. + **px_kwargs: Additional keyword arguments passed to the underlying Plotly Express function + (px.bar, px.line, px.area). These override default arguments if provided. + Examples: range_x=[0, 100], range_y=[0, 50], category_orders={...}, line_shape='linear' - Energy system visualization: + Returns: + A Plotly figure object containing the faceted/animated plot. You can further customize + the returned figure using Plotly's methods (e.g., fig.update_traces(), fig.update_layout()). + + Examples: + Simple plot: ```python - # Specialized energy system palette - energy_colors = { - 'Natural_Gas': '#8B4513', # Brown - 'Electricity': '#FFD700', # Gold - 'Heat': '#FF4500', # Red-orange - 'Cooling': '#87CEEB', # Sky blue - 'Hydrogen': '#E6E6FA', # Lavender - 'Battery': '#32CD32', # Lime green - } + fig = with_plotly(dataset, mode='area', title='Energy Mix') + ``` + + Facet by scenario: - processor = ColorProcessor('plotly') - flow_colors = processor.process_colors(energy_colors, flow_labels) + ```python + fig = with_plotly(dataset, facet_by='scenario', facet_cols=2) ``` - Args: - engine: Plotting backend ('plotly' or 'matplotlib'). Determines output color format. - default_colormap: Fallback colormap when requested palettes are unavailable. - Common options: 'viridis', 'plasma', 'tab10', 'portland'. + Animate by period: - """ + ```python + fig = with_plotly(dataset, animate_by='period') + ``` - def __init__(self, engine: PlottingEngine = 'plotly', default_colormap: str = 'viridis'): - """Initialize the color processor with specified backend and defaults.""" - if engine not in ['plotly', 'matplotlib']: - raise TypeError(f'engine must be "plotly" or "matplotlib", but is {engine}') - self.engine = engine - self.default_colormap = default_colormap - - def _generate_colors_from_colormap(self, colormap_name: str, num_colors: int) -> list[Any]: - """ - Generate colors from a named colormap. - - Args: - colormap_name: Name of the colormap - num_colors: Number of colors to generate - - Returns: - list of colors in the format appropriate for the engine - """ - if self.engine == 'plotly': - try: - colorscale = px.colors.get_colorscale(colormap_name) - except PlotlyError as e: - logger.error(f"Colorscale '{colormap_name}' not found in Plotly. Using {self.default_colormap}: {e}") - colorscale = px.colors.get_colorscale(self.default_colormap) - - # Generate evenly spaced points - color_points = [i / (num_colors - 1) for i in range(num_colors)] if num_colors > 1 else [0] - return px.colors.sample_colorscale(colorscale, color_points) - - else: # matplotlib - try: - cmap = plt.get_cmap(colormap_name, num_colors) - except ValueError as e: - logger.error(f"Colormap '{colormap_name}' not found in Matplotlib. Using {self.default_colormap}: {e}") - cmap = plt.get_cmap(self.default_colormap, num_colors) - - return [cmap(i) for i in range(num_colors)] - - def _handle_color_list(self, colors: list[str], num_labels: int) -> list[str]: - """ - Handle a list of colors, cycling if necessary. - - Args: - colors: list of color strings - num_labels: Number of labels that need colors - - Returns: - list of colors matching the number of labels - """ - if len(colors) == 0: - logger.error(f'Empty color list provided. Using {self.default_colormap} instead.') - return self._generate_colors_from_colormap(self.default_colormap, num_labels) - - if len(colors) < num_labels: - logger.warning( - f'Not enough colors provided ({len(colors)}) for all labels ({num_labels}). Colors will cycle.' - ) - # Cycle through the colors - color_iter = itertools.cycle(colors) - return [next(color_iter) for _ in range(num_labels)] - else: - # Trim if necessary - if len(colors) > num_labels: - logger.warning( - f'More colors provided ({len(colors)}) than labels ({num_labels}). Extra colors will be ignored.' - ) - return colors[:num_labels] - - def _handle_color_dict(self, colors: dict[str, str], labels: list[str]) -> list[str]: - """ - Handle a dictionary mapping labels to colors. - - Args: - colors: Dictionary mapping labels to colors - labels: list of labels that need colors - - Returns: - list of colors in the same order as labels - """ - if len(colors) == 0: - logger.warning(f'Empty color dictionary provided. Using {self.default_colormap} instead.') - return self._generate_colors_from_colormap(self.default_colormap, len(labels)) - - # Find missing labels - missing_labels = sorted(set(labels) - set(colors.keys())) - if missing_labels: - logger.warning( - f'Some labels have no color specified: {missing_labels}. Using {self.default_colormap} for these.' - ) + Facet and animate: - # Generate colors for missing labels - missing_colors = self._generate_colors_from_colormap(self.default_colormap, len(missing_labels)) + ```python + fig = with_plotly(dataset, facet_by='scenario', animate_by='period') + ``` - # Create a copy to avoid modifying the original - colors_copy = colors.copy() - for i, label in enumerate(missing_labels): - colors_copy[label] = missing_colors[i] - else: - colors_copy = colors - - # Create color list in the same order as labels - return [colors_copy[label] for label in labels] - - def process_colors( - self, - colors: ColorType, - labels: list[str], - return_mapping: bool = False, - ) -> list[Any] | dict[str, Any]: - """ - Process colors for the specified labels. - - Args: - colors: Color specification (colormap name, list of colors, or label-to-color mapping) - labels: list of data labels that need colors assigned - return_mapping: If True, returns a dictionary mapping labels to colors; - if False, returns a list of colors in the same order as labels - - Returns: - Either a list of colors or a dictionary mapping labels to colors - """ - if len(labels) == 0: - logger.error('No labels provided for color assignment.') - return {} if return_mapping else [] - - # Process based on type of colors input - if isinstance(colors, str): - color_list = self._generate_colors_from_colormap(colors, len(labels)) - elif isinstance(colors, list): - color_list = self._handle_color_list(colors, len(labels)) - elif isinstance(colors, dict): - color_list = self._handle_color_dict(colors, labels) - else: - logger.error( - f'Unsupported color specification type: {type(colors)}. Using {self.default_colormap} instead.' - ) - color_list = self._generate_colors_from_colormap(self.default_colormap, len(labels)) + Customize with Plotly Express kwargs: - # Return either a list or a mapping - if return_mapping: - return {label: color_list[i] for i, label in enumerate(labels)} - else: - return color_list + ```python + fig = with_plotly(dataset, range_y=[0, 100], line_shape='linear') + ``` + Further customize the returned figure: -def with_plotly( - data: pd.DataFrame, - style: Literal['stacked_bar', 'line', 'area', 'grouped_bar'] = 'stacked_bar', - colors: ColorType = 'viridis', - title: str = '', - ylabel: str = '', - xlabel: str = 'Time in h', - fig: go.Figure | None = None, -) -> go.Figure: + ```python + fig = with_plotly(dataset, mode='line') + fig.update_traces(line={'width': 5, 'dash': 'dot'}) + fig.update_layout(template='plotly_dark', width=1200, height=600) + ``` """ - Plot a DataFrame with Plotly, using either stacked bars or stepped lines. + if colors is None: + colors = CONFIG.Plotting.default_qualitative_colorscale - Args: - data: A DataFrame containing the data to plot, where the index represents time (e.g., hours), - and each column represents a separate data series. - style: The plotting style. Use 'stacked_bar' for stacked bar charts, 'line' for stepped lines, - or 'area' for stacked area charts. - colors: Color specification, can be: - - A string with a colorscale name (e.g., 'viridis', 'plasma') - - A list of color strings (e.g., ['#ff0000', '#00ff00']) - - A dictionary mapping column names to colors (e.g., {'Column1': '#ff0000'}) - title: The title of the plot. - ylabel: The label for the y-axis. - xlabel: The label for the x-axis. - fig: A Plotly figure object to plot on. If not provided, a new figure will be created. + if mode not in ('stacked_bar', 'line', 'area', 'grouped_bar'): + raise ValueError(f"'mode' must be one of {{'stacked_bar','line','area', 'grouped_bar'}}, got {mode!r}") - Returns: - A Plotly figure object containing the generated plot. - """ - if style not in ('stacked_bar', 'line', 'area', 'grouped_bar'): - raise ValueError(f"'style' must be one of {{'stacked_bar','line','area', 'grouped_bar'}}, got {style!r}") - if data.empty: - return go.Figure() + # Apply CONFIG defaults if not explicitly set + if facet_cols is None: + facet_cols = CONFIG.Plotting.default_facet_cols - processed_colors = ColorProcessor(engine='plotly').process_colors(colors, list(data.columns)) + # Ensure data is a Dataset and validate it + data = _ensure_dataset(data) + _validate_plotting_data(data, allow_empty=True) - fig = fig if fig is not None else go.Figure() + # Handle empty data + if len(data.data_vars) == 0: + logger.error('with_plotly() got an empty Dataset.') + return go.Figure() - if style == 'stacked_bar': - for i, column in enumerate(data.columns): - fig.add_trace( - go.Bar( - x=data.index, - y=data[column], - name=column, - marker=dict( - color=processed_colors[i], line=dict(width=0, color='rgba(0,0,0,0)') - ), # Transparent line with 0 width - ) - ) + # Handle all-scalar datasets (where all variables have no dimensions) + # This occurs when all variables are scalar values with dims=() + if all(len(data[var].dims) == 0 for var in data.data_vars): + # Create a simple DataFrame with variable names as x-axis + variables = list(data.data_vars.keys()) + values = [float(data[var].values) for var in data.data_vars] - fig.update_layout( - barmode='relative', - bargap=0, # No space between bars - bargroupgap=0, # No space between grouped bars - ) - if style == 'grouped_bar': - for i, column in enumerate(data.columns): - fig.add_trace(go.Bar(x=data.index, y=data[column], name=column, marker=dict(color=processed_colors[i]))) - - fig.update_layout( - barmode='group', - bargap=0.2, # No space between bars - bargroupgap=0, # space between grouped bars + # Resolve colors + color_discrete_map = process_colors( + colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale ) - elif style == 'line': - for i, column in enumerate(data.columns): - fig.add_trace( - go.Scatter( - x=data.index, - y=data[column], - mode='lines', - name=column, - line=dict(shape='hv', color=processed_colors[i]), - ) + marker_colors = [color_discrete_map.get(var, '#636EFA') for var in variables] + + # Create simple plot based on mode using go (not px) for better color control + if mode in ('stacked_bar', 'grouped_bar'): + fig = go.Figure(data=[go.Bar(x=variables, y=values, marker_color=marker_colors)]) + elif mode == 'line': + fig = go.Figure( + data=[ + go.Scatter( + x=variables, + y=values, + mode='lines+markers', + marker=dict(color=marker_colors, size=8), + line=dict(color='lightgray'), + ) + ] ) - elif style == 'area': - data = data.copy() - data[(data > -1e-5) & (data < 1e-5)] = 0 # Preventing issues with plotting - # Split columns into positive, negative, and mixed categories - positive_columns = list(data.columns[(data >= 0).where(~np.isnan(data), True).all()]) - negative_columns = list(data.columns[(data <= 0).where(~np.isnan(data), True).all()]) - negative_columns = [column for column in negative_columns if column not in positive_columns] - mixed_columns = list(set(data.columns) - set(positive_columns + negative_columns)) - - if mixed_columns: - logger.error( - f'Data for plotting stacked lines contains columns with both positive and negative values:' - f' {mixed_columns}. These can not be stacked, and are printed as simple lines' + elif mode == 'area': + fig = go.Figure( + data=[ + go.Scatter( + x=variables, + y=values, + fill='tozeroy', + marker=dict(color=marker_colors, size=8), + line=dict(color='lightgray'), + ) + ] ) - - # Get color mapping for all columns - colors_stacked = {column: processed_colors[i] for i, column in enumerate(data.columns)} - - for column in positive_columns + negative_columns: - fig.add_trace( - go.Scatter( - x=data.index, - y=data[column], - mode='lines', - name=column, - line=dict(shape='hv', color=colors_stacked[column]), - fill='tonexty', - stackgroup='pos' if column in positive_columns else 'neg', + else: + raise ValueError('"mode" must be one of "stacked_bar", "grouped_bar", "line", "area"') + + fig.update_layout(title=title, xaxis_title=xlabel, yaxis_title=ylabel, showlegend=False) + return fig + + # Convert Dataset to long-form DataFrame for Plotly Express + # Structure: time, variable, value, scenario, period, ... (all dims as columns) + dim_names = list(data.dims) + df_long = data.to_dataframe().reset_index().melt(id_vars=dim_names, var_name='variable', value_name='value') + + # Validate facet_by and animate_by dimensions exist in the data + available_dims = [col for col in df_long.columns if col not in ['variable', 'value']] + + # Check facet_by dimensions + if facet_by is not None: + if isinstance(facet_by, str): + if facet_by not in available_dims: + logger.debug( + f"Dimension '{facet_by}' not found in data. Available dimensions: {available_dims}. " + f'Ignoring facet_by parameter.' ) - ) - - for column in mixed_columns: - fig.add_trace( - go.Scatter( - x=data.index, - y=data[column], - mode='lines', - name=column, - line=dict(shape='hv', color=colors_stacked[column], dash='dash'), + facet_by = None + elif isinstance(facet_by, list): + # Filter out dimensions that don't exist + missing_dims = [dim for dim in facet_by if dim not in available_dims] + facet_by = [dim for dim in facet_by if dim in available_dims] + if missing_dims: + logger.debug( + f'Dimensions {missing_dims} not found in data. Available dimensions: {available_dims}. ' + f'Using only existing dimensions: {facet_by if facet_by else "none"}.' ) - ) + if len(facet_by) == 0: + facet_by = None + + # Check animate_by dimension + if animate_by is not None and animate_by not in available_dims: + logger.debug( + f"Dimension '{animate_by}' not found in data. Available dimensions: {available_dims}. " + f'Ignoring animate_by parameter.' + ) + animate_by = None + + # Setup faceting parameters for Plotly Express + facet_row = None + facet_col = None + if facet_by: + if isinstance(facet_by, str): + # Single facet dimension - use facet_col with facet_col_wrap + facet_col = facet_by + elif len(facet_by) == 1: + facet_col = facet_by[0] + elif len(facet_by) == 2: + # Two facet dimensions - use facet_row and facet_col + facet_row = facet_by[0] + facet_col = facet_by[1] + else: + raise ValueError(f'facet_by can have at most 2 dimensions, got {len(facet_by)}') - # Update layout for better aesthetics - fig.update_layout( - title=title, - yaxis=dict( - title=ylabel, - showgrid=True, # Enable grid lines on the y-axis - gridcolor='lightgrey', # Customize grid line color - gridwidth=0.5, # Customize grid line width - ), - xaxis=dict( - title=xlabel, - showgrid=True, # Enable grid lines on the x-axis - gridcolor='lightgrey', # Customize grid line color - gridwidth=0.5, # Customize grid line width - ), - plot_bgcolor='rgba(0,0,0,0)', # Transparent background - paper_bgcolor='rgba(0,0,0,0)', # Transparent paper background - font=dict(size=14), # Increase font size for better readability + # Process colors + all_vars = df_long['variable'].unique().tolist() + color_discrete_map = process_colors( + colors, all_vars, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale ) + # Determine which dimension to use for x-axis + # Collect dimensions used for faceting and animation + used_dims = set() + if facet_row: + used_dims.add(facet_row) + if facet_col: + used_dims.add(facet_col) + if animate_by: + used_dims.add(animate_by) + + # Find available dimensions for x-axis (not used for faceting/animation) + x_candidates = [d for d in available_dims if d not in used_dims] + + # Use 'time' if available, otherwise use the first available dimension + if 'time' in x_candidates: + x_dim = 'time' + elif len(x_candidates) > 0: + x_dim = x_candidates[0] + else: + # Fallback: use the first dimension (shouldn't happen in normal cases) + x_dim = available_dims[0] if available_dims else 'time' + + # Create plot using Plotly Express based on mode + common_args = { + 'data_frame': df_long, + 'x': x_dim, + 'y': 'value', + 'color': 'variable', + 'facet_row': facet_row, + 'facet_col': facet_col, + 'animation_frame': animate_by, + 'color_discrete_map': color_discrete_map, + 'title': title, + 'labels': {'value': ylabel, x_dim: xlabel, 'variable': ''}, + } + + # Add facet_col_wrap for single facet dimension + if facet_col and not facet_row: + common_args['facet_col_wrap'] = facet_cols + + # Add mode-specific defaults (before px_kwargs so they can be overridden) + if mode in ('line', 'area'): + common_args['line_shape'] = 'hv' # Stepped lines by default + + # Allow callers to pass any px.* keyword args (e.g., category_orders, range_x/y, line_shape) + # These will override the defaults set above + if px_kwargs: + common_args.update(px_kwargs) + + if mode == 'stacked_bar': + fig = px.bar(**common_args) + fig.update_traces(marker_line_width=0) + fig.update_layout(barmode='relative', bargap=0, bargroupgap=0) + elif mode == 'grouped_bar': + fig = px.bar(**common_args) + fig.update_layout(barmode='group', bargap=0.2, bargroupgap=0) + elif mode == 'line': + fig = px.line(**common_args) + elif mode == 'area': + # Use Plotly Express to create the area plot (preserves animation, legends, faceting) + fig = px.area(**common_args) + + # Classify each variable based on its values + variable_classification = {} + for var in all_vars: + var_data = df_long[df_long['variable'] == var]['value'] + var_data_clean = var_data[(var_data < -1e-5) | (var_data > 1e-5)] + + if len(var_data_clean) == 0: + variable_classification[var] = 'zero' + else: + has_pos, has_neg = (var_data_clean > 0).any(), (var_data_clean < 0).any() + variable_classification[var] = ( + 'mixed' if has_pos and has_neg else ('negative' if has_neg else 'positive') + ) + + # Log warning for mixed variables + mixed_vars = [v for v, c in variable_classification.items() if c == 'mixed'] + if mixed_vars: + logger.warning(f'Variables with both positive and negative values: {mixed_vars}. Plotted as dashed lines.') + + all_traces = list(fig.data) + for frame in fig.frames: + all_traces.extend(frame.data) + + for trace in all_traces: + cls = variable_classification.get(trace.name, None) + # Only stack positive and negative, not mixed or zero + trace.stackgroup = cls if cls in ('positive', 'negative') else None + + if cls in ('positive', 'negative'): + # Stacked area: add opacity to avoid hiding layers, remove line border + if hasattr(trace, 'line') and trace.line.color: + trace.fillcolor = trace.line.color + trace.line.width = 0 + elif cls == 'mixed': + # Mixed variables: show as dashed line, not stacked + if hasattr(trace, 'line'): + trace.line.width = 2 + trace.line.dash = 'dash' + if hasattr(trace, 'fill'): + trace.fill = None + + # Update axes to share if requested (Plotly Express already handles this, but we can customize) + if not shared_yaxes: + fig.update_yaxes(matches=None) + if not shared_xaxes: + fig.update_xaxes(matches=None) + return fig def with_matplotlib( - data: pd.DataFrame, - style: Literal['stacked_bar', 'line'] = 'stacked_bar', - colors: ColorType = 'viridis', + data: xr.Dataset | pd.DataFrame | pd.Series, + mode: Literal['stacked_bar', 'line'] = 'stacked_bar', + colors: ColorType | None = None, title: str = '', ylabel: str = '', xlabel: str = 'Time in h', figsize: tuple[int, int] = (12, 6), - fig: plt.Figure | None = None, - ax: plt.Axes | None = None, + plot_kwargs: dict[str, Any] | None = None, ) -> tuple[plt.Figure, plt.Axes]: """ - Plot a DataFrame with Matplotlib using stacked bars or stepped lines. + Plot data with Matplotlib using stacked bars or stepped lines. Args: - data: A DataFrame containing the data to plot. The index should represent time (e.g., hours), - and each column represents a separate data series. - style: Plotting style. Use 'stacked_bar' for stacked bar charts or 'line' for stepped lines. - colors: Color specification, can be: - - A string with a colormap name (e.g., 'viridis', 'plasma') + data: An xarray Dataset, pandas DataFrame, or pandas Series to plot. After conversion to DataFrame, + the index represents time and each column represents a separate data series (variables). + mode: Plotting mode. Use 'stacked_bar' for stacked bar charts or 'line' for stepped lines. + colors: Color specification. Can be: + - A colorscale name (e.g., 'turbo', 'plasma') - A list of color strings (e.g., ['#ff0000', '#00ff00']) - - A dictionary mapping column names to colors (e.g., {'Column1': '#ff0000'}) + - A dict mapping column names to colors (e.g., {'Column1': '#ff0000'}) title: The title of the plot. ylabel: The ylabel of the plot. xlabel: The xlabel of the plot. - figsize: Specify the size of the figure - fig: A Matplotlib figure object to plot on. If not provided, a new figure will be created. - ax: A Matplotlib axes object to plot on. If not provided, a new axes will be created. + figsize: Specify the size of the figure (width, height) in inches. + plot_kwargs: Optional dict of parameters to pass to ax.bar() or ax.step() plotting calls. + Use this to customize plot properties (e.g., linewidth, alpha, edgecolor). Returns: A tuple containing the Matplotlib figure and axes objects used for the plot. Notes: - - If `style` is 'stacked_bar', bars are stacked for both positive and negative values. + - If `mode` is 'stacked_bar', bars are stacked for both positive and negative values. Negative values are stacked separately without extra labels in the legend. - - If `style` is 'line', stepped lines are drawn for each data series. + - If `mode` is 'line', stepped lines are drawn for each data series. """ - if style not in ('stacked_bar', 'line'): - raise ValueError(f"'style' must be one of {{'stacked_bar','line'}} for matplotlib, got {style!r}") + if colors is None: + colors = CONFIG.Plotting.default_qualitative_colorscale - if fig is None or ax is None: - fig, ax = plt.subplots(figsize=figsize) + if mode not in ('stacked_bar', 'line'): + raise ValueError(f"'mode' must be one of {{'stacked_bar','line'}} for matplotlib, got {mode!r}") + + # Ensure data is a Dataset and validate it + data = _ensure_dataset(data) + _validate_plotting_data(data, allow_empty=True) + + # Create new figure and axes + fig, ax = plt.subplots(figsize=figsize) + + # Initialize plot_kwargs if not provided + if plot_kwargs is None: + plot_kwargs = {} + + # Handle all-scalar datasets (where all variables have no dimensions) + # This occurs when all variables are scalar values with dims=() + if all(len(data[var].dims) == 0 for var in data.data_vars): + # Create simple bar/line plot with variable names as x-axis + variables = list(data.data_vars.keys()) + values = [float(data[var].values) for var in data.data_vars] + + # Resolve colors + color_discrete_map = process_colors( + colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale + ) + colors_list = [color_discrete_map.get(var, '#808080') for var in variables] + + # Create plot based on mode + if mode == 'stacked_bar': + ax.bar(variables, values, color=colors_list, **plot_kwargs) + elif mode == 'line': + ax.plot( + variables, + values, + marker='o', + color=colors_list[0] if len(set(colors_list)) == 1 else None, + **plot_kwargs, + ) + # If different colors, plot each point separately + if len(set(colors_list)) > 1: + ax.clear() + for i, (var, val) in enumerate(zip(variables, values, strict=False)): + ax.plot([i], [val], marker='o', color=colors_list[i], label=var, **plot_kwargs) + ax.set_xticks(range(len(variables))) + ax.set_xticklabels(variables) + + ax.set_xlabel(xlabel, ha='center') + ax.set_ylabel(ylabel, va='center') + ax.set_title(title) + ax.grid(color='lightgrey', linestyle='-', linewidth=0.5, axis='y') + fig.tight_layout() + + return fig, ax + + # Resolve colors first (includes validation) + color_discrete_map = process_colors( + colors, list(data.data_vars), default_colorscale=CONFIG.Plotting.default_qualitative_colorscale + ) - processed_colors = ColorProcessor(engine='matplotlib').process_colors(colors, list(data.columns)) + # Convert Dataset to DataFrame for matplotlib plotting (naturally wide-form) + df = data.to_dataframe() - if style == 'stacked_bar': - cumulative_positive = np.zeros(len(data)) - cumulative_negative = np.zeros(len(data)) - width = data.index.to_series().diff().dropna().min() # Minimum time difference + # Get colors in column order + processed_colors = [color_discrete_map.get(str(col), '#808080') for col in df.columns] - for i, column in enumerate(data.columns): - positive_values = np.clip(data[column], 0, None) # Keep only positive values - negative_values = np.clip(data[column], None, 0) # Keep only negative values + if mode == 'stacked_bar': + cumulative_positive = np.zeros(len(df)) + cumulative_negative = np.zeros(len(df)) + + # Robust bar width: handle datetime-like, numeric, and single-point indexes + if len(df.index) > 1: + delta = pd.Index(df.index).to_series().diff().dropna().min() + if hasattr(delta, 'total_seconds'): # datetime-like + width = delta.total_seconds() / 86400.0 # Matplotlib date units = days + else: + width = float(delta) + else: + width = 0.8 # reasonable default for a single bar + + for i, column in enumerate(df.columns): + # Fill NaNs to avoid breaking stacking math + series = df[column].fillna(0) + positive_values = np.clip(series, 0, None) # Keep only positive values + negative_values = np.clip(series, None, 0) # Keep only negative values # Plot positive bars ax.bar( - data.index, + df.index, positive_values, bottom=cumulative_positive, color=processed_colors[i], label=column, width=width, align='center', + **plot_kwargs, ) cumulative_positive += positive_values.values # Plot negative bars ax.bar( - data.index, + df.index, negative_values, bottom=cumulative_negative, color=processed_colors[i], label='', # No label for negative bars width=width, align='center', + **plot_kwargs, ) cumulative_negative += negative_values.values - elif style == 'line': - for i, column in enumerate(data.columns): - ax.step(data.index, data[column], where='post', color=processed_colors[i], label=column) + elif mode == 'line': + for i, column in enumerate(df.columns): + ax.step(df.index, df[column], where='post', color=processed_colors[i], label=column, **plot_kwargs) # Aesthetics ax.set_xlabel(xlabel, ha='center') @@ -562,213 +653,110 @@ def with_matplotlib( return fig, ax -def heat_map_matplotlib( - data: pd.DataFrame, - color_map: str = 'viridis', - title: str = '', - xlabel: str = 'Period', - ylabel: str = 'Step', - figsize: tuple[float, float] = (12, 6), -) -> tuple[plt.Figure, plt.Axes]: +def reshape_data_for_heatmap( + data: xr.DataArray, + reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']] + | Literal['auto'] + | None = 'auto', + facet_by: str | list[str] | None = None, + animate_by: str | None = None, + fill: Literal['ffill', 'bfill'] | None = 'ffill', +) -> xr.DataArray: """ - Plots a DataFrame as a heatmap using Matplotlib. The columns of the DataFrame will be displayed on the x-axis, - the index will be displayed on the y-axis, and the values will represent the 'heat' intensity in the plot. + Reshape data for heatmap visualization, handling time dimension intelligently. - Args: - data: A DataFrame containing the data to be visualized. The index will be used for the y-axis, and columns will be used for the x-axis. - The values in the DataFrame will be represented as colors in the heatmap. - color_map: The colormap to use for the heatmap. Default is 'viridis'. Matplotlib supports various colormaps like 'plasma', 'inferno', 'cividis', etc. - title: The title of the plot. - xlabel: The label for the x-axis. - ylabel: The label for the y-axis. - figsize: The size of the figure to create. Default is (12, 6), which results in a width of 12 inches and a height of 6 inches. - - Returns: - A tuple containing the Matplotlib `Figure` and `Axes` objects. The `Figure` contains the overall plot, while the `Axes` is the area - where the heatmap is drawn. These can be used for further customization or saving the plot to a file. - - Notes: - - The y-axis is flipped so that the first row of the DataFrame is displayed at the top of the plot. - - The color scale is normalized based on the minimum and maximum values in the DataFrame. - - The x-axis labels (periods) are placed at the top of the plot. - - The colorbar is added horizontally at the bottom of the plot, with a label. - """ - - # Get the min and max values for color normalization - color_bar_min, color_bar_max = data.min().min(), data.max().max() - - # Create the heatmap plot - fig, ax = plt.subplots(figsize=figsize) - ax.pcolormesh(data.values, cmap=color_map, shading='auto') - ax.invert_yaxis() # Flip the y-axis to start at the top + This function decides whether to reshape the 'time' dimension based on the reshape_time parameter: + - 'auto': Automatically reshapes if only 'time' dimension would remain for heatmap + - Tuple: Explicitly reshapes time with specified parameters + - None: No reshaping (returns data as-is) - # Adjust ticks and labels for x and y axes - ax.set_xticks(np.arange(len(data.columns)) + 0.5) - ax.set_xticklabels(data.columns, ha='center') - ax.set_yticks(np.arange(len(data.index)) + 0.5) - ax.set_yticklabels(data.index, va='center') - - # Add labels to the axes - ax.set_xlabel(xlabel, ha='center') - ax.set_ylabel(ylabel, va='center') - ax.set_title(title) - - # Position x-axis labels at the top - ax.xaxis.set_label_position('top') - ax.xaxis.set_ticks_position('top') - - # Add the colorbar - sm1 = plt.cm.ScalarMappable(cmap=color_map, norm=plt.Normalize(vmin=color_bar_min, vmax=color_bar_max)) - sm1.set_array([]) - fig.colorbar(sm1, ax=ax, pad=0.12, aspect=15, fraction=0.2, orientation='horizontal') - - fig.tight_layout() - - return fig, ax - - -def heat_map_plotly( - data: pd.DataFrame, - color_map: str = 'viridis', - title: str = '', - xlabel: str = 'Period', - ylabel: str = 'Step', - categorical_labels: bool = True, -) -> go.Figure: - """ - Plots a DataFrame as a heatmap using Plotly. The columns of the DataFrame will be mapped to the x-axis, - and the index will be displayed on the y-axis. The values in the DataFrame will represent the 'heat' in the plot. - - Args: - data: A DataFrame with the data to be visualized. The index will be used for the y-axis, and columns will be used for the x-axis. - The values in the DataFrame will be represented as colors in the heatmap. - color_map: The color scale to use for the heatmap. Default is 'viridis'. Plotly supports various color scales like 'Cividis', 'Inferno', etc. - title: The title of the heatmap. Default is an empty string. - xlabel: The label for the x-axis. Default is 'Period'. - ylabel: The label for the y-axis. Default is 'Step'. - categorical_labels: If True, the x and y axes are treated as categorical data (i.e., the index and columns will not be interpreted as continuous data). - Default is True. If False, the axes are treated as continuous, which may be useful for time series or numeric data. - - Returns: - A Plotly figure object containing the heatmap. This can be further customized and saved - or displayed using `fig.show()`. - - Notes: - The color bar is automatically scaled to the minimum and maximum values in the data. - The y-axis is reversed to display the first row at the top. - """ - - color_bar_min, color_bar_max = data.min().min(), data.max().max() # Min and max values for color scaling - # Define the figure - fig = go.Figure( - data=go.Heatmap( - z=data.values, - x=data.columns, - y=data.index, - colorscale=color_map, - zmin=color_bar_min, - zmax=color_bar_max, - colorbar=dict( - title=dict(text='Color Bar Label', side='right'), - orientation='h', - xref='container', - yref='container', - len=0.8, # Color bar length relative to plot - x=0.5, - y=0.1, - ), - ) - ) - - # Set axis labels and style - fig.update_layout( - title=title, - xaxis=dict(title=xlabel, side='top', type='category' if categorical_labels else None), - yaxis=dict(title=ylabel, autorange='reversed', type='category' if categorical_labels else None), - ) - - return fig - - -def reshape_to_2d(data_1d: np.ndarray, nr_of_steps_per_column: int) -> np.ndarray: - """ - Reshapes a 1D numpy array into a 2D array suitable for plotting as a colormap. - - The reshaped array will have the number of rows corresponding to the steps per column - (e.g., 24 hours per day) and columns representing time periods (e.g., days or months). + All non-time dimensions are preserved during reshaping. Args: - data_1d: A 1D numpy array with the data to reshape. - nr_of_steps_per_column: The number of steps (rows) per column in the resulting 2D array. For example, - this could be 24 (for hours) or 31 (for days in a month). + data: DataArray to reshape for heatmap visualization. + reshape_time: Reshaping configuration: + - 'auto' (default): Auto-reshape if needed based on facet_by/animate_by + - Tuple (timeframes, timesteps_per_frame): Explicit time reshaping + - None: No reshaping + facet_by: Dimension(s) used for faceting (used in 'auto' decision). + animate_by: Dimension used for animation (used in 'auto' decision). + fill: Method to fill missing values: 'ffill' or 'bfill'. Default is 'ffill'. Returns: - The reshaped 2D array. Each internal array corresponds to one column, with the specified number of steps. - Each column might represents a time period (e.g., day, month, etc.). - """ + Reshaped DataArray. If time reshaping is applied, 'time' dimension is replaced + by 'timestep' and 'timeframe'. All other dimensions are preserved. - # Step 1: Ensure the input is a 1D array. - if data_1d.ndim != 1: - raise ValueError('Input must be a 1D array') - - # Step 2: Convert data to float type to allow NaN padding - if data_1d.dtype != np.float64: - data_1d = data_1d.astype(np.float64) - - # Step 3: Calculate the number of columns required - total_steps = len(data_1d) - cols = len(data_1d) // nr_of_steps_per_column # Base number of columns - - # If there's a remainder, add an extra column to hold the remaining values - if total_steps % nr_of_steps_per_column != 0: - cols += 1 + Examples: + Auto-reshaping: - # Step 4: Pad the 1D data to match the required number of rows and columns - padded_data = np.pad( - data_1d, (0, cols * nr_of_steps_per_column - total_steps), mode='constant', constant_values=np.nan - ) + ```python + # Will auto-reshape because only 'time' remains after faceting/animation + data = reshape_data_for_heatmap(data, reshape_time='auto', facet_by='scenario', animate_by='period') + ``` - # Step 5: Reshape the padded data into a 2D array - data_2d = padded_data.reshape(cols, nr_of_steps_per_column) + Explicit reshaping: - return data_2d.T + ```python + # Explicitly reshape to daily pattern + data = reshape_data_for_heatmap(data, reshape_time=('D', 'h')) + ``` + No reshaping: -def heat_map_data_from_df( - df: pd.DataFrame, - periods: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], - steps_per_period: Literal['W', 'D', 'h', '15min', 'min'], - fill: Literal['ffill', 'bfill'] | None = None, -) -> pd.DataFrame: + ```python + # Keep data as-is + data = reshape_data_for_heatmap(data, reshape_time=None) + ``` """ - Reshapes a DataFrame with a DateTime index into a 2D array for heatmap plotting, - based on a specified sample rate. - Only specific combinations of `periods` and `steps_per_period` are supported; invalid combinations raise an assertion. - - Args: - df: A DataFrame with a DateTime index containing the data to reshape. - periods: The time interval of each period (columns of the heatmap), - such as 'YS' (year start), 'W' (weekly), 'D' (daily), 'h' (hourly) etc. - steps_per_period: The time interval within each period (rows in the heatmap), - such as 'YS' (year start), 'W' (weekly), 'D' (daily), 'h' (hourly) etc. - fill: Method to fill missing values: 'ffill' for forward fill or 'bfill' for backward fill. + # If no time dimension, return data as-is + if 'time' not in data.dims: + return data + + # Handle None (disabled) - return data as-is + if reshape_time is None: + return data + + # Determine timeframes and timesteps_per_frame based on reshape_time parameter + if reshape_time == 'auto': + # Check if we need automatic time reshaping + facet_dims_used = [] + if facet_by: + facet_dims_used = [facet_by] if isinstance(facet_by, str) else list(facet_by) + if animate_by: + facet_dims_used.append(animate_by) + + # Get dimensions that would remain for heatmap + potential_heatmap_dims = [dim for dim in data.dims if dim not in facet_dims_used] + + # Auto-reshape if only 'time' dimension remains + if len(potential_heatmap_dims) == 1 and potential_heatmap_dims[0] == 'time': + logger.debug( + "Auto-applying time reshaping: Only 'time' dimension remains after faceting/animation. " + "Using default timeframes='D' and timesteps_per_frame='h'. " + "To customize, use reshape_time=('D', 'h') or disable with reshape_time=None." + ) + timeframes, timesteps_per_frame = 'D', 'h' + else: + # No reshaping needed + return data + elif isinstance(reshape_time, tuple): + # Explicit reshaping + timeframes, timesteps_per_frame = reshape_time + else: + raise ValueError(f"reshape_time must be 'auto', a tuple like ('D', 'h'), or None. Got: {reshape_time}") - Returns: - A DataFrame suitable for heatmap plotting, with rows representing steps within each period - and columns representing each period. - """ - assert pd.api.types.is_datetime64_any_dtype(df.index), ( - 'The index of the DataFrame must be datetime to transform it properly for a heatmap plot' - ) + # Validate that time is datetime + if not np.issubdtype(data.coords['time'].dtype, np.datetime64): + raise ValueError(f'Time dimension must be datetime-based, got {data.coords["time"].dtype}') - # Define formats for different combinations of `periods` and `steps_per_period` + # Define formats for different combinations formats = { ('YS', 'W'): ('%Y', '%W'), ('YS', 'D'): ('%Y', '%j'), # day of year ('YS', 'h'): ('%Y', '%j %H:00'), ('MS', 'D'): ('%Y-%m', '%d'), # day of month ('MS', 'h'): ('%Y-%m', '%d %H:00'), - ('W', 'D'): ('%Y-w%W', '%w_%A'), # week and day of week (with prefix for proper sorting) + ('W', 'D'): ('%Y-w%W', '%w_%A'), # week and day of week ('W', 'h'): ('%Y-w%W', '%w_%A %H:00'), ('D', 'h'): ('%Y-%m-%d', '%H:00'), # Day and hour ('D', '15min'): ('%Y-%m-%d', '%H:%M'), # Day and minute @@ -776,43 +764,64 @@ def heat_map_data_from_df( ('h', 'min'): ('%Y-%m-%d %H:00', '%M'), # minute of hour } - if df.empty: - raise ValueError('DataFrame is empty.') - diffs = df.index.to_series().diff().dropna() - minimum_time_diff_in_min = diffs.min().total_seconds() / 60 - time_intervals = {'min': 1, '15min': 15, 'h': 60, 'D': 24 * 60, 'W': 7 * 24 * 60} - if time_intervals[steps_per_period] > minimum_time_diff_in_min: - logger.error( - f'To compute the heatmap, the data was aggregated from {minimum_time_diff_in_min:.2f} min to ' - f'{time_intervals[steps_per_period]:.2f} min. Mean values are displayed.' - ) - - # Select the format based on the `periods` and `steps_per_period` combination - format_pair = (periods, steps_per_period) + format_pair = (timeframes, timesteps_per_frame) if format_pair not in formats: raise ValueError(f'{format_pair} is not a valid format. Choose from {list(formats.keys())}') period_format, step_format = formats[format_pair] - df = df.sort_index() # Ensure DataFrame is sorted by time index + # Check if resampling is needed + if data.sizes['time'] > 1: + # Use NumPy for more efficient timedelta computation + time_values = data.coords['time'].values # Already numpy datetime64[ns] + # Calculate differences and convert to minutes + time_diffs = np.diff(time_values).astype('timedelta64[s]').astype(float) / 60.0 + if time_diffs.size > 0: + min_time_diff_min = np.nanmin(time_diffs) + time_intervals = {'min': 1, '15min': 15, 'h': 60, 'D': 24 * 60, 'W': 7 * 24 * 60} + if time_intervals[timesteps_per_frame] > min_time_diff_min: + logger.warning( + f'Resampling data from {min_time_diff_min:.2f} min to ' + f'{time_intervals[timesteps_per_frame]:.2f} min. Mean values are displayed.' + ) - resampled_data = df.resample(steps_per_period).mean() # Resample and fill any gaps with NaN + # Resample along time dimension + resampled = data.resample(time=timesteps_per_frame).mean() - if fill == 'ffill': # Apply fill method if specified - resampled_data = resampled_data.ffill() + # Apply fill if specified + if fill == 'ffill': + resampled = resampled.ffill(dim='time') elif fill == 'bfill': - resampled_data = resampled_data.bfill() + resampled = resampled.bfill(dim='time') + + # Create period and step labels + time_values = pd.to_datetime(resampled.coords['time'].values) + period_labels = time_values.strftime(period_format) + step_labels = time_values.strftime(step_format) + + # Handle special case for weekly day format + if '%w_%A' in step_format: + step_labels = pd.Series(step_labels).replace('0_Sunday', '7_Sunday').values + + # Add period and step as coordinates + resampled = resampled.assign_coords( + { + 'timeframe': ('time', period_labels), + 'timestep': ('time', step_labels), + } + ) - resampled_data['period'] = resampled_data.index.strftime(period_format) - resampled_data['step'] = resampled_data.index.strftime(step_format) - if '%w_%A' in step_format: # Shift index of strings to ensure proper sorting - resampled_data['step'] = resampled_data['step'].apply( - lambda x: x.replace('0_Sunday', '7_Sunday') if '0_Sunday' in x else x - ) + # Convert to multi-index and unstack + resampled = resampled.set_index(time=['timeframe', 'timestep']) + result = resampled.unstack('time') + + # Ensure timestep and timeframe come first in dimension order + # Get other dimensions + other_dims = [d for d in result.dims if d not in ['timestep', 'timeframe']] - # Pivot the table so periods are columns and steps are indices - df_pivoted = resampled_data.pivot(columns='period', index='step', values=df.columns[0]) + # Reorder: timestep, timeframe, then other dimensions + result = result.transpose('timestep', 'timeframe', *other_dims) - return df_pivoted + return result def plot_network( @@ -899,518 +908,653 @@ def plot_network( ) -def pie_with_plotly( - data: pd.DataFrame, - colors: ColorType = 'viridis', - title: str = '', - legend_title: str = '', - hole: float = 0.0, - fig: go.Figure | None = None, -) -> go.Figure: +def preprocess_data_for_pie( + data: xr.Dataset | pd.DataFrame | pd.Series, + lower_percentage_threshold: float = 5.0, +) -> pd.Series: """ - Create a pie chart with Plotly to visualize the proportion of values in a DataFrame. + Preprocess data for pie chart display. + + Groups items that are individually below the threshold percentage into an "Other" category. + Converts various input types to a pandas Series for uniform handling. Args: - data: A DataFrame containing the data to plot. If multiple rows exist, - they will be summed unless a specific index value is passed. - colors: Color specification, can be: - - A string with a colorscale name (e.g., 'viridis', 'plasma') - - A list of color strings (e.g., ['#ff0000', '#00ff00']) - - A dictionary mapping column names to colors (e.g., {'Column1': '#ff0000'}) - title: The title of the plot. - legend_title: The title for the legend. - hole: Size of the hole in the center for creating a donut chart (0.0 to 1.0). - fig: A Plotly figure object to plot on. If not provided, a new figure will be created. + data: Input data (xarray Dataset, DataFrame, or Series) + lower_percentage_threshold: Percentage threshold - items below this are grouped into "Other" Returns: - A Plotly figure object containing the generated pie chart. + Processed pandas Series with small items grouped into "Other" + """ + # Convert to Series + if isinstance(data, xr.Dataset): + # Sum all dimensions for each variable to get total values + values = {} + for var in data.data_vars: + var_data = data[var] + if len(var_data.dims) > 0: + total_value = float(var_data.sum().item()) + else: + total_value = float(var_data.item()) - Notes: - - Negative values are not appropriate for pie charts and will be converted to absolute values with a warning. - - If the data contains very small values (less than 1% of the total), they can be grouped into an "Other" category - for better readability. - - By default, the sum of all columns is used for the pie chart. For time series data, consider preprocessing. + # Handle negative values + if total_value < 0: + logger.warning(f'Negative value for {var}: {total_value}. Using absolute value.') + total_value = abs(total_value) - """ - if data.empty: - logger.error('Empty DataFrame provided for pie chart. Returning empty figure.') - return go.Figure() + values[var] = total_value - # Create a copy to avoid modifying the original DataFrame - data_copy = data.copy() + series = pd.Series(values) - # Check if any negative values and warn - if (data_copy < 0).any().any(): - logger.error('Negative values detected in data. Using absolute values for pie chart.') - data_copy = data_copy.abs() + elif isinstance(data, pd.DataFrame): + # Sum across all columns if DataFrame + series = data.sum(axis=0) + # Handle negative values + negative_mask = series < 0 + if negative_mask.any(): + logger.warning(f'Negative values found: {series[negative_mask].to_dict()}. Using absolute values.') + series = series.abs() - # If data has multiple rows, sum them to get total for each column - if len(data_copy) > 1: - data_sum = data_copy.sum() - else: - data_sum = data_copy.iloc[0] + else: # pd.Series + series = data.copy() + # Handle negative values + negative_mask = series < 0 + if negative_mask.any(): + logger.warning(f'Negative values found: {series[negative_mask].to_dict()}. Using absolute values.') + series = series.abs() - # Get labels (column names) and values - labels = data_sum.index.tolist() - values = data_sum.values.tolist() + # Only keep positive values + series = series[series > 0] - # Apply color mapping using the unified color processor - processed_colors = ColorProcessor(engine='plotly').process_colors(colors, labels) + if series.empty or lower_percentage_threshold <= 0: + return series - # Create figure if not provided - fig = fig if fig is not None else go.Figure() + # Calculate percentages + total = series.sum() + percentages = (series / total) * 100 - # Add pie trace - fig.add_trace( - go.Pie( - labels=labels, - values=values, - hole=hole, - marker=dict(colors=processed_colors), - textinfo='percent+label+value', - textposition='inside', - insidetextorientation='radial', + # Find items below and above threshold + below_threshold = series[percentages < lower_percentage_threshold] + above_threshold = series[percentages >= lower_percentage_threshold] + + # Only group if there are at least 2 items below threshold + if len(below_threshold) > 1: + # Create new series with items above threshold + "Other" + result = above_threshold.copy() + result['Other'] = below_threshold.sum() + return result + + return series + + +def dual_pie_with_plotly( + data_left: xr.Dataset | pd.DataFrame | pd.Series, + data_right: xr.Dataset | pd.DataFrame | pd.Series, + colors: ColorType | None = None, + title: str = '', + subtitles: tuple[str, str] = ('Left Chart', 'Right Chart'), + legend_title: str = '', + hole: float = 0.2, + lower_percentage_group: float = 5.0, + text_info: str = 'percent+label', + text_position: str = 'inside', + hover_template: str = '%{label}: %{value} (%{percent})', +) -> go.Figure: + """ + Create two pie charts side by side with Plotly. + + Args: + data_left: Data for the left pie chart. Variables are summed across all dimensions. + data_right: Data for the right pie chart. Variables are summed across all dimensions. + colors: Color specification (colorscale name, list of colors, or dict mapping) + title: The main title of the plot. + subtitles: Tuple containing the subtitles for (left, right) charts. + legend_title: The title for the legend. + hole: Size of the hole in the center for creating donut charts (0.0 to 1.0). + lower_percentage_group: Group segments whose cumulative share is below this percentage (0–100) into "Other". + hover_template: Template for hover text. Use %{label}, %{value}, %{percent}. + text_info: What to show on pie segments: 'label', 'percent', 'value', 'label+percent', + 'label+value', 'percent+value', 'label+percent+value', or 'none'. + text_position: Position of text: 'inside', 'outside', 'auto', or 'none'. + + Returns: + Plotly Figure object + """ + if colors is None: + colors = CONFIG.Plotting.default_qualitative_colorscale + + # Preprocess data to Series + left_series = preprocess_data_for_pie(data_left, lower_percentage_group) + right_series = preprocess_data_for_pie(data_right, lower_percentage_group) + + # Extract labels and values + left_labels = left_series.index.tolist() + left_values = left_series.values.tolist() + + right_labels = right_series.index.tolist() + right_values = right_series.values.tolist() + + # Get all unique labels for consistent coloring + all_labels = sorted(set(left_labels) | set(right_labels)) + + # Create color map + color_map = process_colors(colors, all_labels, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + + # Create figure + fig = go.Figure() + + # Add left pie + if left_labels: + fig.add_trace( + go.Pie( + labels=left_labels, + values=left_values, + name=subtitles[0], + marker=dict(colors=[color_map.get(label, '#636EFA') for label in left_labels]), + hole=hole, + textinfo=text_info, + textposition=text_position, + hovertemplate=hover_template, + domain=dict(x=[0, 0.48]), + ) + ) + + # Add right pie + if right_labels: + fig.add_trace( + go.Pie( + labels=right_labels, + values=right_values, + name=subtitles[1], + marker=dict(colors=[color_map.get(label, '#636EFA') for label in right_labels]), + hole=hole, + textinfo=text_info, + textposition=text_position, + hovertemplate=hover_template, + domain=dict(x=[0.52, 1]), + ) ) - ) - # Update layout for better aesthetics + # Update layout fig.update_layout( title=title, legend_title=legend_title, - plot_bgcolor='rgba(0,0,0,0)', # Transparent background - paper_bgcolor='rgba(0,0,0,0)', # Transparent paper background - font=dict(size=14), # Increase font size for better readability + margin=dict(t=80, b=50, l=30, r=30), ) return fig -def pie_with_matplotlib( - data: pd.DataFrame, - colors: ColorType = 'viridis', +def dual_pie_with_matplotlib( + data_left: xr.Dataset | pd.DataFrame | pd.Series, + data_right: xr.Dataset | pd.DataFrame | pd.Series, + colors: ColorType | None = None, title: str = '', - legend_title: str = 'Categories', - hole: float = 0.0, - figsize: tuple[int, int] = (10, 8), - fig: plt.Figure | None = None, - ax: plt.Axes | None = None, -) -> tuple[plt.Figure, plt.Axes]: + subtitles: tuple[str, str] = ('Left Chart', 'Right Chart'), + legend_title: str = '', + hole: float = 0.2, + lower_percentage_group: float = 5.0, + figsize: tuple[int, int] = (14, 7), +) -> tuple[plt.Figure, list[plt.Axes]]: """ - Create a pie chart with Matplotlib to visualize the proportion of values in a DataFrame. + Create two pie charts side by side with Matplotlib. Args: - data: A DataFrame containing the data to plot. If multiple rows exist, - they will be summed unless a specific index value is passed. - colors: Color specification, can be: - - A string with a colormap name (e.g., 'viridis', 'plasma') - - A list of color strings (e.g., ['#ff0000', '#00ff00']) - - A dictionary mapping column names to colors (e.g., {'Column1': '#ff0000'}) - title: The title of the plot. + data_left: Data for the left pie chart. + data_right: Data for the right pie chart. + colors: Color specification (colorscale name, list of colors, or dict mapping) + title: The main title of the plot. + subtitles: Tuple containing the subtitles for (left, right) charts. legend_title: The title for the legend. - hole: Size of the hole in the center for creating a donut chart (0.0 to 1.0). + hole: Size of the hole in the center for creating donut charts (0.0 to 1.0). + lower_percentage_group: Whether to group small segments (below percentage) into an "Other" category. figsize: The size of the figure (width, height) in inches. - fig: A Matplotlib figure object to plot on. If not provided, a new figure will be created. - ax: A Matplotlib axes object to plot on. If not provided, a new axes will be created. Returns: - A tuple containing the Matplotlib figure and axes objects used for the plot. + Tuple of (Figure, list of Axes) + """ + if colors is None: + colors = CONFIG.Plotting.default_qualitative_colorscale - Notes: - - Negative values are not appropriate for pie charts and will be converted to absolute values with a warning. - - If the data contains very small values (less than 1% of the total), they can be grouped into an "Other" category - for better readability. - - By default, the sum of all columns is used for the pie chart. For time series data, consider preprocessing. + # Preprocess data to Series + left_series = preprocess_data_for_pie(data_left, lower_percentage_group) + right_series = preprocess_data_for_pie(data_right, lower_percentage_group) - """ - if data.empty: - logger.error('Empty DataFrame provided for pie chart. Returning empty figure.') - if fig is None or ax is None: - fig, ax = plt.subplots(figsize=figsize) - return fig, ax + # Extract labels and values + left_labels = left_series.index.tolist() + left_values = left_series.values.tolist() - # Create a copy to avoid modifying the original DataFrame - data_copy = data.copy() + right_labels = right_series.index.tolist() + right_values = right_series.values.tolist() - # Check if any negative values and warn - if (data_copy < 0).any().any(): - logger.error('Negative values detected in data. Using absolute values for pie chart.') - data_copy = data_copy.abs() + # Get all unique labels for consistent coloring + all_labels = sorted(set(left_labels) | set(right_labels)) - # If data has multiple rows, sum them to get total for each column - if len(data_copy) > 1: - data_sum = data_copy.sum() - else: - data_sum = data_copy.iloc[0] + # Create color map (process_colors always returns a dict) + color_map = process_colors(colors, all_labels, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) - # Get labels (column names) and values - labels = data_sum.index.tolist() - values = data_sum.values.tolist() + # Create figure + fig, axes = plt.subplots(1, 2, figsize=figsize) - # Apply color mapping using the unified color processor - processed_colors = ColorProcessor(engine='matplotlib').process_colors(colors, labels) + def draw_pie(ax, labels, values, subtitle): + """Draw a single pie chart.""" + if not labels: + ax.set_title(subtitle) + ax.axis('off') + return - # Create figure and axis if not provided - if fig is None or ax is None: - fig, ax = plt.subplots(figsize=figsize) + chart_colors = [color_map[label] for label in labels] - # Draw the pie chart - wedges, texts, autotexts = ax.pie( - values, - labels=labels, - colors=processed_colors, - autopct='%1.1f%%', - startangle=90, - shadow=False, - wedgeprops=dict(width=0.5) if hole > 0 else None, # Set width for donut - ) + # Draw pie + wedges, texts, autotexts = ax.pie( + values, + labels=labels, + colors=chart_colors, + autopct='%1.1f%%', + startangle=90, + wedgeprops=dict(width=1 - hole) if hole > 0 else None, + ) + + # Style text + for autotext in autotexts: + autotext.set_fontsize(10) + autotext.set_color('white') + autotext.set_weight('bold') + + ax.set_aspect('equal') + ax.set_title(subtitle, fontsize=14, pad=20) - # Adjust the wedgeprops to make donut hole size consistent with plotly - # For matplotlib, the hole size is determined by the wedge width - # Convert hole parameter to wedge width - if hole > 0: - # Adjust hole size to match plotly's hole parameter - # In matplotlib, wedge width is relative to the radius (which is 1) - # For plotly, hole is a fraction of the radius - wedge_width = 1 - hole - for wedge in wedges: - wedge.set_width(wedge_width) - - # Customize the appearance - # Make autopct text more visible - for autotext in autotexts: - autotext.set_fontsize(10) - autotext.set_color('white') - - # Set aspect ratio to be equal to ensure a circular pie - ax.set_aspect('equal') - - # Add title + # Draw both pies + draw_pie(axes[0], left_labels, left_values, subtitles[0]) + draw_pie(axes[1], right_labels, right_values, subtitles[1]) + + # Add main title if title: - ax.set_title(title, fontsize=16) + fig.suptitle(title, fontsize=16, y=0.98) - # Create a legend if there are many segments - if len(labels) > 6: - ax.legend(wedges, labels, title=legend_title, loc='center left', bbox_to_anchor=(1, 0, 0.5, 1)) + # Create unified legend + if left_labels or right_labels: + handles = [ + plt.Line2D([0], [0], marker='o', color='w', markerfacecolor=color_map[label], markersize=10) + for label in all_labels + ] + + fig.legend( + handles=handles, + labels=all_labels, + title=legend_title, + loc='lower center', + bbox_to_anchor=(0.5, -0.02), + ncol=min(len(all_labels), 5), + ) + + fig.subplots_adjust(bottom=0.15) - # Apply tight layout fig.tight_layout() - return fig, ax + return fig, axes -def dual_pie_with_plotly( - data_left: pd.Series, - data_right: pd.Series, - colors: ColorType = 'viridis', +def heatmap_with_plotly( + data: xr.DataArray, + colors: ColorType | None = None, title: str = '', - subtitles: tuple[str, str] = ('Left Chart', 'Right Chart'), - legend_title: str = '', - hole: float = 0.2, - lower_percentage_group: float = 5.0, - hover_template: str = '%{label}: %{value} (%{percent})', - text_info: str = 'percent+label', - text_position: str = 'inside', + facet_by: str | list[str] | None = None, + animate_by: str | None = None, + facet_cols: int | None = None, + reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']] + | Literal['auto'] + | None = 'auto', + fill: Literal['ffill', 'bfill'] | None = 'ffill', + **imshow_kwargs: Any, ) -> go.Figure: """ - Create two pie charts side by side with Plotly, with consistent coloring across both charts. + Plot a heatmap visualization using Plotly's imshow with faceting and animation support. + + This function creates heatmap visualizations from xarray DataArrays, supporting + multi-dimensional data through faceting (subplots) and animation. It automatically + handles dimension reduction and data reshaping for optimal heatmap display. + + Automatic Time Reshaping: + If only the 'time' dimension remains after faceting/animation (making the data 1D), + the function automatically reshapes time into a 2D format using default values + (timeframes='D', timesteps_per_frame='h'). This creates a daily pattern heatmap + showing hours vs days. Args: - data_left: Series for the left pie chart. - data_right: Series for the right pie chart. - colors: Color specification, can be: - - A string with a colorscale name (e.g., 'viridis', 'plasma') - - A list of color strings (e.g., ['#ff0000', '#00ff00']) - - A dictionary mapping category names to colors (e.g., {'Category1': '#ff0000'}) - title: The main title of the plot. - subtitles: Tuple containing the subtitles for (left, right) charts. - legend_title: The title for the legend. - hole: Size of the hole in the center for creating donut charts (0.0 to 1.0). - lower_percentage_group: Group segments whose cumulative share is below this percentage (0–100) into "Other". - hover_template: Template for hover text. Use %{label}, %{value}, %{percent}. - text_info: What to show on pie segments: 'label', 'percent', 'value', 'label+percent', - 'label+value', 'percent+value', 'label+percent+value', or 'none'. - text_position: Position of text: 'inside', 'outside', 'auto', or 'none'. + data: An xarray DataArray containing the data to visualize. Should have at least + 2 dimensions, or a 'time' dimension that can be reshaped into 2D. + colors: Color specification (colorscale name, list, or dict). Common options: + 'turbo', 'plasma', 'RdBu', 'portland'. + title: The main title of the heatmap. + facet_by: Dimension to create facets for. Creates a subplot grid. + Can be a single dimension name or list (only first dimension used). + Note: px.imshow only supports single-dimension faceting. + If the dimension doesn't exist in the data, it will be silently ignored. + animate_by: Dimension to animate over. Creates animation frames. + If the dimension doesn't exist in the data, it will be silently ignored. + facet_cols: Number of columns in the facet grid (used with facet_by). + reshape_time: Time reshaping configuration: + - 'auto' (default): Automatically applies ('D', 'h') if only 'time' dimension remains + - Tuple like ('D', 'h'): Explicit time reshaping (days vs hours) + - None: Disable time reshaping (will error if only 1D time data) + fill: Method to fill missing values when reshaping time: 'ffill' or 'bfill'. Default is 'ffill'. + **imshow_kwargs: Additional keyword arguments to pass to plotly.express.imshow. + Common options include: + - aspect: 'auto', 'equal', or a number for aspect ratio + - zmin, zmax: Minimum and maximum values for color scale + - labels: Dict to customize axis labels Returns: - A Plotly figure object containing the generated dual pie chart. - """ - from plotly.subplots import make_subplots + A Plotly figure object containing the heatmap visualization. - # Check for empty data - if data_left.empty and data_right.empty: - logger.error('Both datasets are empty. Returning empty figure.') - return go.Figure() + Examples: + Simple heatmap: - # Create a subplot figure - fig = make_subplots( - rows=1, cols=2, specs=[[{'type': 'pie'}, {'type': 'pie'}]], subplot_titles=subtitles, horizontal_spacing=0.05 - ) + ```python + fig = heatmap_with_plotly(data_array, colors='RdBu', title='Temperature Map') + ``` - # Process series to handle negative values and apply minimum percentage threshold - def preprocess_series(series: pd.Series): - """ - Preprocess a series for pie chart display by handling negative values - and grouping the smallest parts together if they collectively represent - less than the specified percentage threshold. + Facet by scenario: - Args: - series: The series to preprocess + ```python + fig = heatmap_with_plotly(data_array, facet_by='scenario', facet_cols=2) + ``` - Returns: - A preprocessed pandas Series - """ - # Handle negative values - if (series < 0).any(): - logger.error('Negative values detected in data. Using absolute values for pie chart.') - series = series.abs() + Animate by period: - # Remove zeros - series = series[series > 0] + ```python + fig = heatmap_with_plotly(data_array, animate_by='period') + ``` - # Apply minimum percentage threshold if needed - if lower_percentage_group and not series.empty: - total = series.sum() - if total > 0: - # Sort series by value (ascending) - sorted_series = series.sort_values() + Automatic time reshaping (when only time dimension remains): - # Calculate cumulative percentage contribution - cumulative_percent = (sorted_series.cumsum() / total) * 100 + ```python + # Data with dims ['time', 'scenario', 'period'] + # After faceting and animation, only 'time' remains -> auto-reshapes to (timestep, timeframe) + fig = heatmap_with_plotly(data_array, facet_by='scenario', animate_by='period') + ``` - # Find entries that collectively make up less than lower_percentage_group - to_group = cumulative_percent <= lower_percentage_group + Explicit time reshaping: - if to_group.sum() > 1: - # Create "Other" category for the smallest values that together are < threshold - other_sum = sorted_series[to_group].sum() + ```python + fig = heatmap_with_plotly(data_array, facet_by='scenario', animate_by='period', reshape_time=('W', 'D')) + ``` + """ + if colors is None: + colors = CONFIG.Plotting.default_sequential_colorscale - # Keep only values that aren't in the "Other" group - result_series = series[~series.index.isin(sorted_series[to_group].index)] + # Apply CONFIG defaults if not explicitly set + if facet_cols is None: + facet_cols = CONFIG.Plotting.default_facet_cols - # Add the "Other" category if it has a value - if other_sum > 0: - result_series['Other'] = other_sum + # Handle empty data + if data.size == 0: + return go.Figure() - return result_series + # Apply time reshaping using the new unified function + data = reshape_data_for_heatmap( + data, reshape_time=reshape_time, facet_by=facet_by, animate_by=animate_by, fill=fill + ) - return series + # Get available dimensions + available_dims = list(data.dims) - data_left_processed = preprocess_series(data_left) - data_right_processed = preprocess_series(data_right) + # Validate and filter facet_by dimensions + if facet_by is not None: + if isinstance(facet_by, str): + if facet_by not in available_dims: + logger.debug( + f"Dimension '{facet_by}' not found in data. Available dimensions: {available_dims}. " + f'Ignoring facet_by parameter.' + ) + facet_by = None + elif isinstance(facet_by, list): + missing_dims = [dim for dim in facet_by if dim not in available_dims] + facet_by = [dim for dim in facet_by if dim in available_dims] + if missing_dims: + logger.debug( + f'Dimensions {missing_dims} not found in data. Available dimensions: {available_dims}. ' + f'Using only existing dimensions: {facet_by if facet_by else "none"}.' + ) + if len(facet_by) == 0: + facet_by = None + + # Validate animate_by dimension + if animate_by is not None and animate_by not in available_dims: + logger.debug( + f"Dimension '{animate_by}' not found in data. Available dimensions: {available_dims}. " + f'Ignoring animate_by parameter.' + ) + animate_by = None - # Get unique set of all labels for consistent coloring - all_labels = sorted(set(data_left_processed.index) | set(data_right_processed.index)) + # Determine which dimensions are used for faceting/animation + facet_dims = [] + if facet_by: + facet_dims = [facet_by] if isinstance(facet_by, str) else facet_by + if animate_by: + facet_dims.append(animate_by) - # Get consistent color mapping for both charts using our unified function - color_map = ColorProcessor(engine='plotly').process_colors(colors, all_labels, return_mapping=True) + # Get remaining dimensions for the heatmap itself + heatmap_dims = [dim for dim in available_dims if dim not in facet_dims] - # Function to create a pie trace with consistently mapped colors - def create_pie_trace(data_series, side): - if data_series.empty: - return None + if len(heatmap_dims) < 2: + # Handle single-dimension case by adding variable name as a dimension + if len(heatmap_dims) == 1: + # Get the variable name, or use a default + var_name = data.name if data.name else 'value' - labels = data_series.index.tolist() - values = data_series.values.tolist() - trace_colors = [color_map[label] for label in labels] + # Expand the DataArray by adding a new dimension with the variable name + data = data.expand_dims({'variable': [var_name]}) - return go.Pie( - labels=labels, - values=values, - name=side, - marker=dict(colors=trace_colors), - hole=hole, - textinfo=text_info, - textposition=text_position, - insidetextorientation='radial', - hovertemplate=hover_template, - sort=True, # Sort values by default (largest first) - ) + # Update available dimensions + available_dims = list(data.dims) + heatmap_dims = [dim for dim in available_dims if dim not in facet_dims] - # Add left pie if data exists - left_trace = create_pie_trace(data_left_processed, subtitles[0]) - if left_trace: - left_trace.domain = dict(x=[0, 0.48]) - fig.add_trace(left_trace, row=1, col=1) + logger.debug(f'Only 1 dimension remaining for heatmap. Added variable dimension: {var_name}') + else: + # No dimensions at all - cannot create a heatmap + logger.error( + f'Heatmap requires at least 1 dimension. ' + f'After faceting/animation, {len(heatmap_dims)} dimension(s) remain: {heatmap_dims}' + ) + return go.Figure() + + # Setup faceting parameters for Plotly Express + # Note: px.imshow only supports facet_col, not facet_row + facet_col_param = None + if facet_by: + if isinstance(facet_by, str): + facet_col_param = facet_by + elif len(facet_by) == 1: + facet_col_param = facet_by[0] + elif len(facet_by) >= 2: + # px.imshow doesn't support facet_row, so we can only facet by one dimension + # Use the first dimension and warn about the rest + facet_col_param = facet_by[0] + logger.warning( + f'px.imshow only supports faceting by a single dimension. ' + f'Using {facet_by[0]} for faceting. Dimensions {facet_by[1:]} will be ignored. ' + f'Consider using animate_by for additional dimensions.' + ) - # Add right pie if data exists - right_trace = create_pie_trace(data_right_processed, subtitles[1]) - if right_trace: - right_trace.domain = dict(x=[0.52, 1]) - fig.add_trace(right_trace, row=1, col=2) + # Create the imshow plot - px.imshow can work directly with xarray DataArrays + common_args = { + 'img': data, + 'color_continuous_scale': colors, + 'title': title, + } - # Update layout - fig.update_layout( - title=title, - legend_title=legend_title, - plot_bgcolor='rgba(0,0,0,0)', # Transparent background - paper_bgcolor='rgba(0,0,0,0)', # Transparent paper background - font=dict(size=14), - margin=dict(t=80, b=50, l=30, r=30), - ) + # Add faceting if specified + if facet_col_param: + common_args['facet_col'] = facet_col_param + if facet_cols: + common_args['facet_col_wrap'] = facet_cols + + # Add animation if specified + if animate_by: + common_args['animation_frame'] = animate_by + + # Merge in additional imshow kwargs + common_args.update(imshow_kwargs) + + try: + fig = px.imshow(**common_args) + except Exception as e: + logger.error(f'Error creating imshow plot: {e}. Falling back to basic heatmap.') + # Fallback: create a simple heatmap without faceting + fallback_args = { + 'img': data.values, + 'color_continuous_scale': colors, + 'title': title, + } + fallback_args.update(imshow_kwargs) + fig = px.imshow(**fallback_args) return fig -def dual_pie_with_matplotlib( - data_left: pd.Series, - data_right: pd.Series, - colors: ColorType = 'viridis', +def heatmap_with_matplotlib( + data: xr.DataArray, + colors: ColorType | None = None, title: str = '', - subtitles: tuple[str, str] = ('Left Chart', 'Right Chart'), - legend_title: str = '', - hole: float = 0.2, - lower_percentage_group: float = 5.0, - figsize: tuple[int, int] = (14, 7), - fig: plt.Figure | None = None, - axes: list[plt.Axes] | None = None, -) -> tuple[plt.Figure, list[plt.Axes]]: + figsize: tuple[float, float] = (12, 6), + reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']] + | Literal['auto'] + | None = 'auto', + fill: Literal['ffill', 'bfill'] | None = 'ffill', + vmin: float | None = None, + vmax: float | None = None, + imshow_kwargs: dict[str, Any] | None = None, + cbar_kwargs: dict[str, Any] | None = None, + **kwargs: Any, +) -> tuple[plt.Figure, plt.Axes]: """ - Create two pie charts side by side with Matplotlib, with consistent coloring across both charts. - Leverages the existing pie_with_matplotlib function. + Plot a heatmap visualization using Matplotlib's imshow. + + This function creates a basic 2D heatmap from an xarray DataArray using matplotlib's + imshow function. For multi-dimensional data, only the first two dimensions are used. Args: - data_left: Series for the left pie chart. - data_right: Series for the right pie chart. - colors: Color specification, can be: - - A string with a colormap name (e.g., 'viridis', 'plasma') - - A list of color strings (e.g., ['#ff0000', '#00ff00']) - - A dictionary mapping category names to colors (e.g., {'Category1': '#ff0000'}) - title: The main title of the plot. - subtitles: Tuple containing the subtitles for (left, right) charts. - legend_title: The title for the legend. - hole: Size of the hole in the center for creating donut charts (0.0 to 1.0). - lower_percentage_group: Whether to group small segments (below percentage) into an "Other" category. + data: An xarray DataArray containing the data to visualize. Should have at least + 2 dimensions. If more than 2 dimensions exist, additional dimensions will + be reduced by taking the first slice. + colors: Color specification. Should be a colorscale name (e.g., 'turbo', 'RdBu'). + title: The title of the heatmap. figsize: The size of the figure (width, height) in inches. - fig: A Matplotlib figure object to plot on. If not provided, a new figure will be created. - axes: A list of Matplotlib axes objects to plot on. If not provided, new axes will be created. + reshape_time: Time reshaping configuration: + - 'auto' (default): Automatically applies ('D', 'h') if only 'time' dimension + - Tuple like ('D', 'h'): Explicit time reshaping (days vs hours) + - None: Disable time reshaping + fill: Method to fill missing values when reshaping time: 'ffill' or 'bfill'. Default is 'ffill'. + vmin: Minimum value for color scale. If None, uses data minimum. + vmax: Maximum value for color scale. If None, uses data maximum. + imshow_kwargs: Optional dict of parameters to pass to ax.imshow(). + Use this to customize image properties (e.g., interpolation, aspect). + cbar_kwargs: Optional dict of parameters to pass to plt.colorbar(). + Use this to customize colorbar properties (e.g., orientation, label). + **kwargs: Additional keyword arguments passed to ax.imshow(). + Common options include: + - interpolation: 'nearest', 'bilinear', 'bicubic', etc. + - alpha: Transparency level (0-1) + - extent: [left, right, bottom, top] for axis limits Returns: - A tuple containing the Matplotlib figure and list of axes objects used for the plot. - """ - # Check for empty data - if data_left.empty and data_right.empty: - logger.error('Both datasets are empty. Returning empty figure.') - if fig is None: - fig, axes = plt.subplots(1, 2, figsize=figsize) - return fig, axes - - # Create figure and axes if not provided - if fig is None or axes is None: - fig, axes = plt.subplots(1, 2, figsize=figsize) - - # Process series to handle negative values and apply minimum percentage threshold - def preprocess_series(series: pd.Series): - """ - Preprocess a series for pie chart display by handling negative values - and grouping the smallest parts together if they collectively represent - less than the specified percentage threshold. - """ - # Handle negative values - if (series < 0).any(): - logger.error('Negative values detected in data. Using absolute values for pie chart.') - series = series.abs() - - # Remove zeros - series = series[series > 0] - - # Apply minimum percentage threshold if needed - if lower_percentage_group and not series.empty: - total = series.sum() - if total > 0: - # Sort series by value (ascending) - sorted_series = series.sort_values() - - # Calculate cumulative percentage contribution - cumulative_percent = (sorted_series.cumsum() / total) * 100 - - # Find entries that collectively make up less than lower_percentage_group - to_group = cumulative_percent <= lower_percentage_group + A tuple containing the Matplotlib figure and axes objects used for the plot. - if to_group.sum() > 1: - # Create "Other" category for the smallest values that together are < threshold - other_sum = sorted_series[to_group].sum() + Notes: + - Matplotlib backend doesn't support faceting or animation. Use plotly engine for those features. + - The y-axis is automatically inverted to display data with origin at top-left. + - A colorbar is added to show the value scale. - # Keep only values that aren't in the "Other" group - result_series = series[~series.index.isin(sorted_series[to_group].index)] + Examples: + ```python + fig, ax = heatmap_with_matplotlib(data_array, colors='RdBu', title='Temperature') + plt.savefig('heatmap.png') + ``` - # Add the "Other" category if it has a value - if other_sum > 0: - result_series['Other'] = other_sum + Time reshaping: - return result_series + ```python + fig, ax = heatmap_with_matplotlib(data_array, reshape_time=('D', 'h')) + ``` + """ + if colors is None: + colors = CONFIG.Plotting.default_sequential_colorscale - return series + # Initialize kwargs if not provided + if imshow_kwargs is None: + imshow_kwargs = {} + if cbar_kwargs is None: + cbar_kwargs = {} - # Preprocess data - data_left_processed = preprocess_series(data_left) - data_right_processed = preprocess_series(data_right) + # Merge any additional kwargs into imshow_kwargs + # This allows users to pass imshow options directly + imshow_kwargs.update(kwargs) - # Convert Series to DataFrames for pie_with_matplotlib - df_left = pd.DataFrame(data_left_processed).T if not data_left_processed.empty else pd.DataFrame() - df_right = pd.DataFrame(data_right_processed).T if not data_right_processed.empty else pd.DataFrame() + # Handle empty data + if data.size == 0: + fig, ax = plt.subplots(figsize=figsize) + return fig, ax - # Get unique set of all labels for consistent coloring - all_labels = sorted(set(data_left_processed.index) | set(data_right_processed.index)) + # Apply time reshaping using the new unified function + # Matplotlib doesn't support faceting/animation, so we pass None for those + data = reshape_data_for_heatmap(data, reshape_time=reshape_time, facet_by=None, animate_by=None, fill=fill) - # Get consistent color mapping for both charts using our unified function - color_map = ColorProcessor(engine='matplotlib').process_colors(colors, all_labels, return_mapping=True) + # Handle single-dimension case by adding variable name as a dimension + if isinstance(data, xr.DataArray) and len(data.dims) == 1: + var_name = data.name if data.name else 'value' + data = data.expand_dims({'variable': [var_name]}) + logger.debug(f'Only 1 dimension in data. Added variable dimension: {var_name}') - # Configure colors for each DataFrame based on the consistent mapping - left_colors = [color_map[col] for col in df_left.columns] if not df_left.empty else [] - right_colors = [color_map[col] for col in df_right.columns] if not df_right.empty else [] + # Create figure and axes + fig, ax = plt.subplots(figsize=figsize) - # Create left pie chart - if not df_left.empty: - pie_with_matplotlib(data=df_left, colors=left_colors, title=subtitles[0], hole=hole, fig=fig, ax=axes[0]) - else: - axes[0].set_title(subtitles[0]) - axes[0].axis('off') + # Extract data values + # If data has more than 2 dimensions, we need to reduce it + if isinstance(data, xr.DataArray): + # Get the first 2 dimensions + dims = list(data.dims) + if len(dims) > 2: + logger.warning( + f'Data has {len(dims)} dimensions: {dims}. ' + f'Only the first 2 will be used for the heatmap. ' + f'Use the plotly engine for faceting/animation support.' + ) + # Select only the first 2 dimensions by taking first slice of others + selection = {dim: 0 for dim in dims[2:]} + data = data.isel(selection) - # Create right pie chart - if not df_right.empty: - pie_with_matplotlib(data=df_right, colors=right_colors, title=subtitles[1], hole=hole, fig=fig, ax=axes[1]) + values = data.values + x_labels = data.dims[1] if len(data.dims) > 1 else 'x' + y_labels = data.dims[0] if len(data.dims) > 0 else 'y' else: - axes[1].set_title(subtitles[1]) - axes[1].axis('off') - - # Add main title - if title: - fig.suptitle(title, fontsize=16, y=0.98) + values = data + x_labels = 'x' + y_labels = 'y' + + # Create the heatmap using imshow with user customizations + imshow_defaults = {'cmap': colors, 'aspect': 'auto', 'origin': 'upper', 'vmin': vmin, 'vmax': vmax} + imshow_defaults.update(imshow_kwargs) # User kwargs override defaults + im = ax.imshow(values, **imshow_defaults) + + # Add colorbar with user customizations + cbar_defaults = {'ax': ax, 'orientation': 'horizontal', 'pad': 0.1, 'aspect': 15, 'fraction': 0.05} + cbar_defaults.update(cbar_kwargs) # User kwargs override defaults + cbar = plt.colorbar(im, **cbar_defaults) + + # Set colorbar label if not overridden by user + if 'label' not in cbar_kwargs: + cbar.set_label('Value') + + # Set labels and title + ax.set_xlabel(str(x_labels).capitalize()) + ax.set_ylabel(str(y_labels).capitalize()) + ax.set_title(title) - # Adjust layout + # Apply tight layout fig.tight_layout() - # Create a unified legend if both charts have data - if not df_left.empty and not df_right.empty: - # Remove individual legends - for ax in axes: - if ax.get_legend(): - ax.get_legend().remove() - - # Create handles for the unified legend - handles = [] - labels_for_legend = [] - - for label in all_labels: - color = color_map[label] - patch = plt.Line2D([0], [0], marker='o', color='w', markerfacecolor=color, markersize=10, label=label) - handles.append(patch) - labels_for_legend.append(label) - - # Add unified legend - fig.legend( - handles=handles, - labels=labels_for_legend, - title=legend_title, - loc='lower center', - bbox_to_anchor=(0.5, 0), - ncol=min(len(all_labels), 5), # Limit columns to 5 for readability - ) - - # Add padding at the bottom for the legend - fig.subplots_adjust(bottom=0.2) - - return fig, axes + return fig, ax def export_figure( @@ -1418,8 +1562,9 @@ def export_figure( default_path: pathlib.Path, default_filetype: str | None = None, user_path: pathlib.Path | None = None, - show: bool = True, + show: bool | None = None, save: bool = False, + dpi: int | None = None, ) -> go.Figure | tuple[plt.Figure, plt.Axes]: """ Export a figure to a file and or show it. @@ -1429,13 +1574,21 @@ def export_figure( default_path: The default file path if no user filename is provided. default_filetype: The default filetype if the path doesnt end with a filetype. user_path: An optional user-specified file path. - show: Whether to display the figure (default: True). + show: Whether to display the figure. If None, uses CONFIG.Plotting.default_show (default: None). save: Whether to save the figure (default: False). + dpi: DPI (dots per inch) for saving Matplotlib figures. If None, uses CONFIG.Plotting.default_dpi. Raises: ValueError: If no default filetype is provided and the path doesn't specify a filetype. TypeError: If the figure type is not supported. """ + # Apply CONFIG defaults if not explicitly set + if show is None: + show = CONFIG.Plotting.default_show + + if dpi is None: + dpi = CONFIG.Plotting.default_dpi + filename = user_path or default_path filename = filename.with_name(filename.name.replace('|', '__')) if filename.suffix == '': @@ -1450,25 +1603,17 @@ def export_figure( filename = filename.with_suffix('.html') try: - is_test_env = 'PYTEST_CURRENT_TEST' in os.environ - - if is_test_env: - # Test environment: never open browser, only save if requested - if save: - fig.write_html(str(filename)) - # Ignore show flag in tests - else: - # Production environment: respect show and save flags - if save and show: - # Save and auto-open in browser - plotly.offline.plot(fig, filename=str(filename)) - elif save and not show: - # Save without opening - fig.write_html(str(filename)) - elif show and not save: - # Show interactively without saving - fig.show() - # If neither save nor show: do nothing + # Respect show and save flags (tests should set CONFIG.Plotting.default_show=False) + if save and show: + # Save and auto-open in browser + plotly.offline.plot(fig, filename=str(filename)) + elif save and not show: + # Save without opening + fig.write_html(str(filename)) + elif show and not save: + # Show interactively without saving + fig.show() + # If neither save nor show: do nothing finally: # Cleanup to prevent socket warnings if hasattr(fig, '_renderer'): @@ -1479,16 +1624,15 @@ def export_figure( elif isinstance(figure_like, tuple): fig, ax = figure_like if show: - # Only show if using interactive backend and not in test environment + # Only show if using interactive backend (tests should set CONFIG.Plotting.default_show=False) backend = matplotlib.get_backend().lower() is_interactive = backend not in {'agg', 'pdf', 'ps', 'svg', 'template'} - is_test_env = 'PYTEST_CURRENT_TEST' in os.environ - if is_interactive and not is_test_env: + if is_interactive: plt.show() if save: - fig.savefig(str(filename), dpi=300) + fig.savefig(str(filename), dpi=dpi) plt.close(fig) # Close figure to free memory return fig, ax diff --git a/flixopt/results.py b/flixopt/results.py index 2e951af70..26eaf9d5d 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -1,7 +1,7 @@ from __future__ import annotations +import copy import datetime -import json import logging import pathlib import warnings @@ -10,16 +10,18 @@ import linopy import numpy as np import pandas as pd -import plotly import xarray as xr -import yaml from . import io as fx_io from . import plotting +from .color_processing import process_colors +from .config import CONFIG from .flow_system import FlowSystem +from .structure import CompositeContainerMixin, ElementContainer, ResultsContainer if TYPE_CHECKING: import matplotlib.pyplot as plt + import plotly import pyvis from .calculation import Calculation, SegmentedCalculation @@ -29,13 +31,30 @@ logger = logging.getLogger('flixopt') +def load_mapping_from_file(path: pathlib.Path) -> dict[str, str | list[str]]: + """Load color mapping from JSON or YAML file. + + Tries loader based on file suffix first, with fallback to the other format. + + Args: + path: Path to config file (.json or .yaml/.yml) + + Returns: + Dictionary mapping components to colors or colorscales to component lists + + Raises: + ValueError: If file cannot be loaded as JSON or YAML + """ + return fx_io.load_config_file(path) + + class _FlowSystemRestorationError(Exception): """Exception raised when a FlowSystem cannot be restored from dataset.""" pass -class CalculationResults: +class CalculationResults(CompositeContainerMixin['ComponentResults | BusResults | EffectResults | FlowResults']): """Comprehensive container for optimization calculation results and analysis tools. This class provides unified access to all optimization results including flow rates, @@ -107,6 +126,20 @@ class CalculationResults: ).mean() ``` + Configure automatic color management for plots: + + ```python + # Dict-based configuration: + results.setup_colors({'Solar*': 'Oranges', 'Wind*': 'Blues', 'Battery': 'green'}) + + # All plots automatically use configured colors (colors=None is the default) + results['ElectricityBus'].plot_node_balance() + results['Battery'].plot_charge_state() + + # Override when needed + results['ElectricityBus'].plot_node_balance(colors='turbo') # Ignores setup + ``` + Design Patterns: **Factory Methods**: Use `from_file()` and `from_calculation()` for creation or access directly from `Calculation.results` **Dictionary Access**: Use `results[element_label]` for element-specific results @@ -137,8 +170,7 @@ def from_file(cls, folder: str | pathlib.Path, name: str) -> CalculationResults: except Exception as e: logger.critical(f'Could not load the linopy model "{name}" from file ("{paths.linopy_model}"): {e}') - with open(paths.summary, encoding='utf-8') as f: - summary = yaml.load(f, Loader=yaml.FullLoader) + summary = fx_io.load_yaml(paths.summary) return cls( solution=fx_io.load_dataset_from_netcdf(paths.solution), @@ -195,8 +227,8 @@ def __init__( if 'flow_system' in kwargs and flow_system_data is None: flow_system_data = kwargs.pop('flow_system') warnings.warn( - "The 'flow_system' parameter is deprecated. Use 'flow_system_data' instead." - "Acess is now by '.flow_system_data', while '.flow_system' returns the restored FlowSystem.", + "The 'flow_system' parameter is deprecated. Use 'flow_system_data' instead. " + "Access is now via '.flow_system_data', while '.flow_system' returns the restored FlowSystem.", DeprecationWarning, stacklevel=2, ) @@ -207,13 +239,18 @@ def __init__( self.name = name self.model = model self.folder = pathlib.Path(folder) if folder is not None else pathlib.Path.cwd() / 'results' - self.components = { + + # Create ResultsContainers for better access patterns + components_dict = { label: ComponentResults(self, **infos) for label, infos in self.solution.attrs['Components'].items() } + self.components = ResultsContainer(elements=components_dict, element_type_name='component results') - self.buses = {label: BusResults(self, **infos) for label, infos in self.solution.attrs['Buses'].items()} + buses_dict = {label: BusResults(self, **infos) for label, infos in self.solution.attrs['Buses'].items()} + self.buses = ResultsContainer(elements=buses_dict, element_type_name='bus results') - self.effects = {label: EffectResults(self, **infos) for label, infos in self.solution.attrs['Effects'].items()} + effects_dict = {label: EffectResults(self, **infos) for label, infos in self.solution.attrs['Effects'].items()} + self.effects = ResultsContainer(elements=effects_dict, element_type_name='effect results') if 'Flows' not in self.solution.attrs: warnings.warn( @@ -221,15 +258,19 @@ def __init__( 'is not availlable. We recommend to evaluate your results with a version <2.2.0.', stacklevel=2, ) - self.flows = {} + flows_dict = {} + self._has_flow_data = False else: - self.flows = { + flows_dict = { label: FlowResults(self, **infos) for label, infos in self.solution.attrs.get('Flows', {}).items() } + self._has_flow_data = True + self.flows = ResultsContainer(elements=flows_dict, element_type_name='flow results') self.timesteps_extra = self.solution.indexes['time'] self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.timesteps_extra) self.scenarios = self.solution.indexes['scenario'] if 'scenario' in self.solution.indexes else None + self.periods = self.solution.indexes['period'] if 'period' in self.solution.indexes else None self._effect_share_factors = None self._flow_system = None @@ -239,16 +280,24 @@ def __init__( self._sizes = None self._effects_per_component = None - def __getitem__(self, key: str) -> ComponentResults | BusResults | EffectResults: - if key in self.components: - return self.components[key] - if key in self.buses: - return self.buses[key] - if key in self.effects: - return self.effects[key] - if key in self.flows: - return self.flows[key] - raise KeyError(f'No element with label {key} found.') + self.colors: dict[str, str] = {} + + def _get_container_groups(self) -> dict[str, ResultsContainer]: + """Return ordered container groups for CompositeContainerMixin.""" + return { + 'Components': self.components, + 'Buses': self.buses, + 'Effects': self.effects, + 'Flows': self.flows, + } + + def __repr__(self) -> str: + """Return grouped representation of all results.""" + r = fx_io.format_title_with_underline(self.__class__.__name__, '=') + r += f'Name: "{self.name}"\nFolder: {self.folder}\n' + # Add grouped container view + r += '\n' + self._format_grouped_containers() + return r @property def storages(self) -> list[ComponentResults]: @@ -305,6 +354,131 @@ def flow_system(self) -> FlowSystem: logger.level = old_level return self._flow_system + def setup_colors( + self, + config: dict[str, str | list[str]] | str | pathlib.Path | None = None, + default_colorscale: str | None = None, + ) -> dict[str, str]: + """ + Setup colors for all variables across all elements. Overwrites existing ones. + + Args: + config: Configuration for color assignment. Can be: + - dict: Maps components to colors/colorscales: + * 'component1': 'red' # Single component to single color + * 'component1': '#FF0000' # Single component to hex color + - OR maps colorscales to multiple components: + * 'colorscale_name': ['component1', 'component2'] # Colorscale across components + - str: Path to a JSON/YAML config file or a colorscale name to apply to all + - Path: Path to a JSON/YAML config file + - None: Use default_colorscale for all components + default_colorscale: Default colorscale for unconfigured components (default: 'turbo') + + Examples: + setup_colors({ + # Direct component-to-color mappings + 'Boiler1': '#FF0000', + 'CHP': 'darkred', + # Colorscale for multiple components + 'Oranges': ['Solar1', 'Solar2'], + 'Blues': ['Wind1', 'Wind2'], + 'Greens': ['Battery1', 'Battery2', 'Battery3'], + }) + + Returns: + Complete variable-to-color mapping dictionary + """ + + def get_all_variable_names(comp: str) -> list[str]: + """Collect all variables from the component, including flows and flow_hours.""" + comp_object = self.components[comp] + var_names = [comp] + list(comp_object._variable_names) + for flow in comp_object.flows: + var_names.extend([flow, f'{flow}|flow_hours']) + return var_names + + # Set default colorscale if not provided + if default_colorscale is None: + default_colorscale = CONFIG.Plotting.default_qualitative_colorscale + + # Handle different config input types + if config is None: + # Apply default colorscale to all components + config_dict = {} + elif isinstance(config, (str, pathlib.Path)): + # Try to load from file first + config_path = pathlib.Path(config) + if config_path.exists(): + # Load config from file using helper + config_dict = load_mapping_from_file(config_path) + else: + # Treat as colorscale name to apply to all components + all_components = list(self.components.keys()) + config_dict = {config: all_components} + elif isinstance(config, dict): + config_dict = config + else: + raise TypeError(f'config must be dict, str, Path, or None, got {type(config)}') + + # Step 1: Build component-to-color mapping + component_colors: dict[str, str] = {} + + # Track which components are configured + configured_components = set() + + # Process each configuration entry + for key, value in config_dict.items(): + # Check if value is a list (colorscale -> [components]) + # or a string (component -> color OR colorscale -> [components]) + + if isinstance(value, list): + # key is colorscale, value is list of components + # Format: 'Blues': ['Wind1', 'Wind2'] + components = value + colorscale_name = key + + # Validate components exist + for component in components: + if component not in self.components: + raise ValueError(f"Component '{component}' not found") + + configured_components.update(components) + + # Use process_colors to get one color per component from the colorscale + colors_for_components = process_colors(colorscale_name, components) + component_colors.update(colors_for_components) + + elif isinstance(value, str): + # Check if key is an existing component + if key in self.components: + # Format: 'CHP': 'red' (component -> color) + component, color = key, value + + configured_components.add(component) + component_colors[component] = color + else: + raise ValueError(f"Component '{key}' not found") + else: + raise TypeError(f'Config value must be str or list, got {type(value)}') + + # Step 2: Assign colors to remaining unconfigured components + remaining_components = list(set(self.components.keys()) - configured_components) + if remaining_components: + # Use default colorscale to assign one color per remaining component + default_colors = process_colors(default_colorscale, remaining_components) + component_colors.update(default_colors) + + # Step 3: Build variable-to-color mapping + # Clear existing colors to avoid stale keys + self.colors = {} + # Each component's variables all get the same color as the component + for component, color in component_colors.items(): + variable_names = get_all_variable_names(component) + for var_name in variable_names: + self.colors[var_name] = color + + return self.colors + def filter_solution( self, variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None, @@ -388,6 +562,8 @@ def flow_rates( To recombine filtered dataarrays, use `xr.concat` with dim 'flow': >>>xr.concat([results.flow_rates(start='FernwΓ€rme'), results.flow_rates(end='FernwΓ€rme')], dim='flow') """ + if not self._has_flow_data: + raise ValueError('Flow data is not available in this results object (pre-v2.2.0).') if self._flow_rates is None: self._flow_rates = self._assign_flow_coords( xr.concat( @@ -449,6 +625,8 @@ def sizes( >>>xr.concat([results.sizes(start='FernwΓ€rme'), results.sizes(end='FernwΓ€rme')], dim='flow') """ + if not self._has_flow_data: + raise ValueError('Flow data is not available in this results object (pre-v2.2.0).') if self._sizes is None: self._sizes = self._assign_flow_coords( xr.concat( @@ -461,11 +639,12 @@ def sizes( def _assign_flow_coords(self, da: xr.DataArray): # Add start and end coordinates + flows_list = list(self.flows.values()) da = da.assign_coords( { - 'start': ('flow', [flow.start for flow in self.flows.values()]), - 'end': ('flow', [flow.end for flow in self.flows.values()]), - 'component': ('flow', [flow.component for flow in self.flows.values()]), + 'start': ('flow', [flow.start for flow in flows_list]), + 'end': ('flow', [flow.end for flow in flows_list]), + 'component': ('flow', [flow.component for flow in flows_list]), } ) @@ -584,8 +763,6 @@ def _compute_effect_total( temporal = temporal.sum('time') if periodic.isnull().all(): return temporal.rename(f'{element}->{effect}') - if 'time' in temporal.indexes: - temporal = temporal.sum('time') return periodic + temporal total = xr.DataArray(0) @@ -619,6 +796,30 @@ def _compute_effect_total( total = xr.DataArray(np.nan) return total.rename(f'{element}->{effect}({mode})') + def _create_template_for_mode(self, mode: Literal['temporal', 'periodic', 'total']) -> xr.DataArray: + """Create a template DataArray with the correct dimensions for a given mode. + + Args: + mode: The calculation mode ('temporal', 'periodic', or 'total'). + + Returns: + A DataArray filled with NaN, with dimensions appropriate for the mode. + """ + coords = {} + if mode == 'temporal': + coords['time'] = self.timesteps_extra + if self.periods is not None: + coords['period'] = self.periods + if self.scenarios is not None: + coords['scenario'] = self.scenarios + + # Create template with appropriate shape + if coords: + shape = tuple(len(coords[dim]) for dim in coords) + return xr.DataArray(np.full(shape, np.nan, dtype=float), coords=coords, dims=list(coords.keys())) + else: + return xr.DataArray(np.nan) + def _create_effects_dataset(self, mode: Literal['temporal', 'periodic', 'total']) -> xr.Dataset: """Creates a dataset containing effect totals for all components (including their flows). The dataset does contain the direct as well as the indirect effects of each component. @@ -629,32 +830,23 @@ def _create_effects_dataset(self, mode: Literal['temporal', 'periodic', 'total'] Returns: An xarray Dataset with components as dimension and effects as variables. """ + # Create template with correct dimensions for this mode + template = self._create_template_for_mode(mode) + ds = xr.Dataset() all_arrays = {} - template = None # Template is needed to determine the dimensions of the arrays. This handles the case of no shares for an effect - components_list = list(self.components) - # First pass: collect arrays and find template + # Collect arrays for all effects and components for effect in self.effects: effect_arrays = [] for component in components_list: da = self._compute_effect_total(element=component, effect=effect, mode=mode, include_flows=True) effect_arrays.append(da) - if template is None and (da.dims or not da.isnull().all()): - template = da - all_arrays[effect] = effect_arrays - # Ensure we have a template - if template is None: - raise ValueError( - f"No template with proper dimensions found for mode '{mode}'. " - f'All computed arrays are scalars, which indicates a data issue.' - ) - - # Second pass: process all effects (guaranteed to include all) + # Process all effects: expand scalar NaN arrays to match template dimensions for effect in self.effects: dataarrays = all_arrays[effect] component_arrays = [] @@ -687,68 +879,145 @@ def _create_effects_dataset(self, mode: Literal['temporal', 'periodic', 'total'] def plot_heatmap( self, - variable_name: str, - heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D', - heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h', - color_map: str = 'portland', + variable_name: str | list[str], save: bool | pathlib.Path = False, - show: bool = True, + show: bool | None = None, + colors: plotting.ColorType | None = None, engine: plotting.PlottingEngine = 'plotly', + select: dict[FlowSystemDimensions, Any] | None = None, + facet_by: str | list[str] | None = 'scenario', + animate_by: str | None = 'period', + facet_cols: int | None = None, + reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']] + | Literal['auto'] + | None = 'auto', + fill: Literal['ffill', 'bfill'] | None = 'ffill', + # Deprecated parameters (kept for backwards compatibility) indexer: dict[FlowSystemDimensions, Any] | None = None, + heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] | None = None, + heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] | None = None, + color_map: str | None = None, + **plot_kwargs: Any, ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]: """ - Plots a heatmap of the solution of a variable. + Plots a heatmap visualization of a variable using imshow or time-based reshaping. + + Supports multiple visualization features that can be combined: + - **Multi-variable**: Plot multiple variables on a single heatmap (creates 'variable' dimension) + - **Time reshaping**: Converts 'time' dimension into 2D (e.g., hours vs days) + - **Faceting**: Creates subplots for different dimension values + - **Animation**: Animates through dimension values (Plotly only) Args: - variable_name: The name of the variable to plot. - heatmap_timeframes: The timeframes to use for the heatmap. - heatmap_timesteps_per_frame: The timesteps per frame to use for the heatmap. - color_map: The color map to use for the heatmap. + variable_name: The name of the variable to plot, or a list of variable names. + When a list is provided, variables are combined into a single DataArray + with a new 'variable' dimension. save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location. show: Whether to show the plot or not. + colors: Color scheme for the heatmap. See `flixopt.plotting.ColorType` for options. engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'. - indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}. - If None, uses first value for each dimension. - If empty dict {}, uses all values. + select: Optional data selection dict. Supports single values, lists, slices, and index arrays. + Applied BEFORE faceting/animation/reshaping. + facet_by: Dimension(s) to create facets (subplots) for. Can be a single dimension name (str) + or list of dimensions. Each unique value combination creates a subplot. Ignored if not found. + animate_by: Dimension to animate over (Plotly only). Creates animation frames that cycle through + dimension values. Only one dimension can be animated. Ignored if not found. + facet_cols: Number of columns in the facet grid layout (default: 3). + reshape_time: Time reshaping configuration (default: 'auto'): + - 'auto': Automatically applies ('D', 'h') when only 'time' dimension remains + - Tuple: Explicit reshaping, e.g. ('D', 'h') for days vs hours, + ('MS', 'D') for months vs days, ('W', 'h') for weeks vs hours + - None: Disable auto-reshaping (will error if only 1D time data) + Supported timeframes: 'YS', 'MS', 'W', 'D', 'h', '15min', 'min' + fill: Method to fill missing values after reshape: 'ffill' (forward fill) or 'bfill' (backward fill). + Default is 'ffill'. + **plot_kwargs: Additional plotting customization options. + Common options: + + - **dpi** (int): Export resolution for saved plots. Default: 300. + + For heatmaps specifically: + + - **vmin** (float): Minimum value for color scale (both engines). + - **vmax** (float): Maximum value for color scale (both engines). + + For Matplotlib heatmaps: + + - **imshow_kwargs** (dict): Additional kwargs for matplotlib's imshow (e.g., interpolation, aspect). + - **cbar_kwargs** (dict): Additional kwargs for colorbar customization. Examples: - Basic usage (uses first scenario, first period, all time): + Direct imshow mode (default): + + >>> results.plot_heatmap('Battery|charge_state', select={'scenario': 'base'}) + + Facet by scenario: - >>> results.plot_heatmap('Battery|charge_state') + >>> results.plot_heatmap('Boiler(Qth)|flow_rate', facet_by='scenario', facet_cols=2) - Select specific scenario and period: + Animate by period: - >>> results.plot_heatmap('Boiler(Qth)|flow_rate', indexer={'scenario': 'base', 'period': 2024}) + >>> results.plot_heatmap('Boiler(Qth)|flow_rate', select={'scenario': 'base'}, animate_by='period') - Time filtering (summer months only): + Time reshape mode - daily patterns: + + >>> results.plot_heatmap('Boiler(Qth)|flow_rate', select={'scenario': 'base'}, reshape_time=('D', 'h')) + + Combined: time reshaping with faceting and animation: >>> results.plot_heatmap( - ... 'Boiler(Qth)|flow_rate', - ... indexer={ - ... 'scenario': 'base', - ... 'time': results.solution.time[results.solution.time.dt.month.isin([6, 7, 8])], - ... }, + ... 'Boiler(Qth)|flow_rate', facet_by='scenario', animate_by='period', reshape_time=('D', 'h') ... ) - Save to specific location: + Multi-variable heatmap (variables as one axis): >>> results.plot_heatmap( - ... 'Boiler(Qth)|flow_rate', indexer={'scenario': 'base'}, save='path/to/my_heatmap.html' + ... ['Boiler(Q_th)|flow_rate', 'CHP(Q_th)|flow_rate', 'HeatStorage|charge_state'], + ... select={'scenario': 'base', 'period': 1}, + ... reshape_time=None, ... ) - """ - dataarray = self.solution[variable_name] + Multi-variable with time reshaping: + + >>> results.plot_heatmap( + ... ['Boiler(Q_th)|flow_rate', 'CHP(Q_th)|flow_rate'], + ... facet_by='scenario', + ... animate_by='period', + ... reshape_time=('D', 'h'), + ... ) + + High-resolution export with custom color range: + + >>> results.plot_heatmap('Battery|charge_state', save=True, dpi=600, vmin=0, vmax=100) + + Matplotlib heatmap with custom imshow settings: + + >>> results.plot_heatmap( + ... 'Boiler(Q_th)|flow_rate', + ... engine='matplotlib', + ... imshow_kwargs={'interpolation': 'bilinear', 'aspect': 'auto'}, + ... ) + """ + # Delegate to module-level plot_heatmap function return plot_heatmap( - dataarray=dataarray, - name=variable_name, + data=self.solution[variable_name], + name=variable_name if isinstance(variable_name, str) else None, folder=self.folder, - heatmap_timeframes=heatmap_timeframes, - heatmap_timesteps_per_frame=heatmap_timesteps_per_frame, - color_map=color_map, + colors=colors, save=save, show=show, engine=engine, + select=select, + facet_by=facet_by, + animate_by=animate_by, + facet_cols=facet_cols, + reshape_time=reshape_time, + fill=fill, indexer=indexer, + heatmap_timeframes=heatmap_timeframes, + heatmap_timesteps_per_frame=heatmap_timesteps_per_frame, + color_map=color_map, + **plot_kwargs, ) def plot_network( @@ -805,14 +1074,13 @@ def to_file( fx_io.save_dataset_to_netcdf(self.solution, paths.solution, compression=compression) fx_io.save_dataset_to_netcdf(self.flow_system_data, paths.flow_system, compression=compression) - with open(paths.summary, 'w', encoding='utf-8') as f: - yaml.dump(self.summary, f, allow_unicode=True, sort_keys=False, indent=4, width=1000) + fx_io.save_yaml(self.summary, paths.summary, compact_numeric_lists=True) if save_linopy_model: if self.model is None: logger.critical('No model in the CalculationResults. Saving the model is not possible.') else: - self.model.to_netcdf(paths.linopy_model, engine='h5netcdf') + self.model.to_netcdf(paths.linopy_model, engine='netcdf4') if document_model: if self.model is None: @@ -856,6 +1124,14 @@ def constraints(self) -> linopy.Constraints: raise ValueError('The linopy model is not available.') return self._calculation_results.model.constraints[self._constraint_names] + def __repr__(self) -> str: + """Return string representation with element info and dataset preview.""" + class_name = self.__class__.__name__ + header = f'{class_name}: "{self.label}"' + sol = self.solution.copy(deep=False) + sol.attrs = {} + return f'{header}\n{"-" * len(header)}\n{repr(sol)}' + def filter_solution( self, variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None, @@ -917,54 +1193,182 @@ def __init__( def plot_node_balance( self, save: bool | pathlib.Path = False, - show: bool = True, - colors: plotting.ColorType = 'viridis', + show: bool | None = None, + colors: plotting.ColorType | None = None, engine: plotting.PlottingEngine = 'plotly', - indexer: dict[FlowSystemDimensions, Any] | None = None, - mode: Literal['flow_rate', 'flow_hours'] = 'flow_rate', - style: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar', + select: dict[FlowSystemDimensions, Any] | None = None, + unit_type: Literal['flow_rate', 'flow_hours'] = 'flow_rate', + mode: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar', drop_suffix: bool = True, + facet_by: str | list[str] | None = 'scenario', + animate_by: str | None = 'period', + facet_cols: int | None = None, + # Deprecated parameter (kept for backwards compatibility) + indexer: dict[FlowSystemDimensions, Any] | None = None, + **plot_kwargs: Any, ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]: """ - Plots the node balance of the Component or Bus. + Plots the node balance of the Component or Bus with optional faceting and animation. + Args: save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location. show: Whether to show the plot or not. colors: The colors to use for the plot. See `flixopt.plotting.ColorType` for options. engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'. - indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}. - If None, uses first value for each dimension (except time). - If empty dict {}, uses all values. - style: The style to use for the dataset. Can be 'flow_rate' or 'flow_hours'. + select: Optional data selection dict. Supports: + - Single values: {'scenario': 'base', 'period': 2024} + - Multiple values: {'scenario': ['base', 'high', 'renewable']} + - Slices: {'time': slice('2024-01', '2024-06')} + - Index arrays: {'time': time_array} + Note: Applied BEFORE faceting/animation. + unit_type: The unit type to use for the dataset. Can be 'flow_rate' or 'flow_hours'. - 'flow_rate': Returns the flow_rates of the Node. - 'flow_hours': Returns the flow_hours of the Node. [flow_hours(t) = flow_rate(t) * dt(t)]. Renames suffixes to |flow_hours. + mode: The plotting mode. Use 'stacked_bar' for stacked bar charts, 'line' for stepped lines, or 'area' for stacked area charts. drop_suffix: Whether to drop the suffix from the variable names. + facet_by: Dimension(s) to create facets (subplots) for. Can be a single dimension name (str) + or list of dimensions. Each unique value combination creates a subplot. Ignored if not found. + Example: 'scenario' creates one subplot per scenario. + Example: ['scenario', 'period'] creates a grid of subplots for each scenario-period combination. + animate_by: Dimension to animate over (Plotly only). Creates animation frames that cycle through + dimension values. Only one dimension can be animated. Ignored if not found. + facet_cols: Number of columns in the facet grid layout (default: 3). + **plot_kwargs: Additional plotting customization options passed to underlying plotting functions. + + Common options: + + - **dpi** (int): Export resolution in dots per inch. Default: 300. + + **For Plotly engine** (`engine='plotly'`): + + - Any Plotly Express parameter for px.bar()/px.line()/px.area() + Example: `range_y=[0, 100]`, `line_shape='linear'` + + **For Matplotlib engine** (`engine='matplotlib'`): + + - **plot_kwargs** (dict): Customize plot via `ax.bar()` or `ax.step()`. + Example: `plot_kwargs={'linewidth': 3, 'alpha': 0.7, 'edgecolor': 'black'}` + + See :func:`flixopt.plotting.with_plotly` and :func:`flixopt.plotting.with_matplotlib` + for complete parameter reference. + + Note: For Plotly, you can further customize the returned figure using `fig.update_traces()` + and `fig.update_layout()` after calling this method. + + Examples: + Basic plot (current behavior): + + >>> results['Boiler'].plot_node_balance() + + Facet by scenario: + + >>> results['Boiler'].plot_node_balance(facet_by='scenario', facet_cols=2) + + Animate by period: + + >>> results['Boiler'].plot_node_balance(animate_by='period') + + Facet by scenario AND animate by period: + + >>> results['Boiler'].plot_node_balance(facet_by='scenario', animate_by='period') + + Select single scenario, then facet by period: + + >>> results['Boiler'].plot_node_balance(select={'scenario': 'base'}, facet_by='period') + + Select multiple scenarios and facet by them: + + >>> results['Boiler'].plot_node_balance( + ... select={'scenario': ['base', 'high', 'renewable']}, facet_by='scenario' + ... ) + + Time range selection (summer months only): + + >>> results['Boiler'].plot_node_balance(select={'time': slice('2024-06', '2024-08')}, facet_by='scenario') + + High-resolution export for publication: + + >>> results['Boiler'].plot_node_balance(engine='matplotlib', save='figure.png', dpi=600) + + Plotly Express customization (e.g., set y-axis range): + + >>> results['Boiler'].plot_node_balance(range_y=[0, 100]) + + Custom matplotlib appearance: + + >>> results['Boiler'].plot_node_balance(engine='matplotlib', plot_kwargs={'linewidth': 3, 'alpha': 0.7}) + + Further customize Plotly figure after creation: + + >>> fig = results['Boiler'].plot_node_balance(mode='line', show=False) + >>> fig.update_traces(line={'width': 5, 'dash': 'dot'}) + >>> fig.update_layout(template='plotly_dark', width=1200, height=600) + >>> fig.show() """ - ds = self.node_balance(with_last_timestep=True, mode=mode, drop_suffix=drop_suffix, indexer=indexer) + # Handle deprecated indexer parameter + if indexer is not None: + # Check for conflict with new parameter + if select is not None: + raise ValueError( + "Cannot use both deprecated parameter 'indexer' and new parameter 'select'. Use only 'select'." + ) + + import warnings + + warnings.warn( + "The 'indexer' parameter is deprecated and will be removed in a future version. Use 'select' instead.", + DeprecationWarning, + stacklevel=2, + ) + select = indexer + + if engine not in {'plotly', 'matplotlib'}: + raise ValueError(f'Engine "{engine}" not supported. Use one of ["plotly", "matplotlib"]') - ds, suffix_parts = _apply_indexer_to_data(ds, indexer, drop=True) + # Extract dpi for export_figure + dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi + + # Don't pass select/indexer to node_balance - we'll apply it afterwards + ds = self.node_balance(with_last_timestep=False, unit_type=unit_type, drop_suffix=drop_suffix) + + ds, suffix_parts = _apply_selection_to_data(ds, select=select, drop=True) + + # Matplotlib requires only 'time' dimension; check for extras after selection + if engine == 'matplotlib': + extra_dims = [d for d in ds.dims if d != 'time'] + if extra_dims: + raise ValueError( + f'Matplotlib engine only supports a single time axis, but found extra dimensions: {extra_dims}. ' + f'Please use select={{...}} to reduce dimensions or switch to engine="plotly" for faceting/animation.' + ) suffix = '--' + '-'.join(suffix_parts) if suffix_parts else '' - title = f'{self.label} (flow rates){suffix}' if mode == 'flow_rate' else f'{self.label} (flow hours){suffix}' + title = ( + f'{self.label} (flow rates){suffix}' if unit_type == 'flow_rate' else f'{self.label} (flow hours){suffix}' + ) if engine == 'plotly': figure_like = plotting.with_plotly( - ds.to_dataframe(), - colors=colors, - style=style, + ds, + facet_by=facet_by, + animate_by=animate_by, + colors=colors if colors is not None else self._calculation_results.colors, + mode=mode, title=title, + facet_cols=facet_cols, + xlabel='Time in h', + **plot_kwargs, ) default_filetype = '.html' - elif engine == 'matplotlib': + else: figure_like = plotting.with_matplotlib( - ds.to_dataframe(), - colors=colors, - style=style, + ds, + colors=colors if colors is not None else self._calculation_results.colors, + mode=mode, title=title, + **plot_kwargs, ) default_filetype = '.png' - else: - raise ValueError(f'Engine "{engine}" not supported. Use "plotly" or "matplotlib"') return plotting.export_figure( figure_like=figure_like, @@ -973,19 +1377,31 @@ def plot_node_balance( user_path=None if isinstance(save, bool) else pathlib.Path(save), show=show, save=True if save else False, + dpi=dpi, ) def plot_node_balance_pie( self, lower_percentage_group: float = 5, - colors: plotting.ColorType = 'viridis', + colors: plotting.ColorType | None = None, text_info: str = 'percent+label+value', save: bool | pathlib.Path = False, - show: bool = True, + show: bool | None = None, engine: plotting.PlottingEngine = 'plotly', + select: dict[FlowSystemDimensions, Any] | None = None, + # Deprecated parameter (kept for backwards compatibility) indexer: dict[FlowSystemDimensions, Any] | None = None, + **plot_kwargs: Any, ) -> plotly.graph_objs.Figure | tuple[plt.Figure, list[plt.Axes]]: """Plot pie chart of flow hours distribution. + + Note: + Pie charts require scalar data (no extra dimensions beyond time). + If your data has dimensions like 'scenario' or 'period', either: + + - Use `select` to choose specific values: `select={'scenario': 'base', 'period': 2024}` + - Let auto-selection choose the first value (a warning will be logged) + Args: lower_percentage_group: Percentage threshold for "Others" grouping. colors: Color scheme. Also see plotly. @@ -993,10 +1409,57 @@ def plot_node_balance_pie( save: Whether to save plot. show: Whether to display plot. engine: Plotting engine ('plotly' or 'matplotlib'). - indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}. - If None, uses first value for each dimension. - If empty dict {}, uses all values. + select: Optional data selection dict. Supports single values, lists, slices, and index arrays. + Use this to select specific scenario/period before creating the pie chart. + **plot_kwargs: Additional plotting customization options. + + Common options: + + - **dpi** (int): Export resolution in dots per inch. Default: 300. + - **hover_template** (str): Hover text template (Plotly only). + Example: `hover_template='%{label}: %{value} (%{percent})'` + - **text_position** (str): Text position ('inside', 'outside', 'auto'). + - **hole** (float): Size of donut hole (0.0 to 1.0). + + See :func:`flixopt.plotting.dual_pie_with_plotly` for complete reference. + + Examples: + Basic usage (auto-selects first scenario/period if present): + + >>> results['Bus'].plot_node_balance_pie() + + Explicitly select a scenario and period: + + >>> results['Bus'].plot_node_balance_pie(select={'scenario': 'high_demand', 'period': 2030}) + + Create a donut chart with custom hover text: + + >>> results['Bus'].plot_node_balance_pie(hole=0.4, hover_template='%{label}: %{value:.2f} (%{percent})') + + High-resolution export: + + >>> results['Bus'].plot_node_balance_pie(save='figure.png', dpi=600) """ + # Handle deprecated indexer parameter + if indexer is not None: + # Check for conflict with new parameter + if select is not None: + raise ValueError( + "Cannot use both deprecated parameter 'indexer' and new parameter 'select'. Use only 'select'." + ) + + import warnings + + warnings.warn( + "The 'indexer' parameter is deprecated and will be removed in a future version. Use 'select' instead.", + DeprecationWarning, + stacklevel=2, + ) + select = indexer + + # Extract dpi for export_figure + dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi + inputs = sanitize_dataset( ds=self.solution[self.inputs] * self._calculation_results.hours_per_timestep, threshold=1e-5, @@ -1012,25 +1475,58 @@ def plot_node_balance_pie( drop_suffix='|', ) - inputs, suffix_parts = _apply_indexer_to_data(inputs, indexer, drop=True) - outputs, suffix_parts = _apply_indexer_to_data(outputs, indexer, drop=True) - suffix = '--' + '-'.join(suffix_parts) if suffix_parts else '' - - title = f'{self.label} (total flow hours){suffix}' + inputs, suffix_parts_in = _apply_selection_to_data(inputs, select=select, drop=True) + outputs, suffix_parts_out = _apply_selection_to_data(outputs, select=select, drop=True) + suffix_parts = suffix_parts_in + suffix_parts_out + # Sum over time dimension inputs = inputs.sum('time') outputs = outputs.sum('time') + # Auto-select first value for any remaining dimensions (scenario, period, etc.) + # Pie charts need scalar data, so we automatically reduce extra dimensions + extra_dims_inputs = [dim for dim in inputs.dims if dim != 'time'] + extra_dims_outputs = [dim for dim in outputs.dims if dim != 'time'] + extra_dims = sorted(set(extra_dims_inputs + extra_dims_outputs)) + + if extra_dims: + auto_select = {} + for dim in extra_dims: + # Get first value of this dimension + if dim in inputs.coords: + first_val = inputs.coords[dim].values[0] + elif dim in outputs.coords: + first_val = outputs.coords[dim].values[0] + else: + continue + auto_select[dim] = first_val + logger.info( + f'Pie chart auto-selected {dim}={first_val} (first value). ' + f'Use select={{"{dim}": value}} to choose a different value.' + ) + + # Apply auto-selection only for coords present in each dataset + inputs = inputs.sel({k: v for k, v in auto_select.items() if k in inputs.coords}) + outputs = outputs.sel({k: v for k, v in auto_select.items() if k in outputs.coords}) + + # Update suffix with auto-selected values + auto_suffix_parts = [f'{dim}={val}' for dim, val in auto_select.items()] + suffix_parts.extend(auto_suffix_parts) + + suffix = '--' + '-'.join(sorted(set(suffix_parts))) if suffix_parts else '' + title = f'{self.label} (total flow hours){suffix}' + if engine == 'plotly': figure_like = plotting.dual_pie_with_plotly( - data_left=inputs.to_pandas(), - data_right=outputs.to_pandas(), - colors=colors, + data_left=inputs, + data_right=outputs, + colors=colors if colors is not None else self._calculation_results.colors, title=title, text_info=text_info, subtitles=('Inputs', 'Outputs'), legend_title='Flows', lower_percentage_group=lower_percentage_group, + **plot_kwargs, ) default_filetype = '.html' elif engine == 'matplotlib': @@ -1038,11 +1534,12 @@ def plot_node_balance_pie( figure_like = plotting.dual_pie_with_matplotlib( data_left=inputs.to_pandas(), data_right=outputs.to_pandas(), - colors=colors, + colors=colors if colors is not None else self._calculation_results.colors, title=title, subtitles=('Inputs', 'Outputs'), legend_title='Flows', lower_percentage_group=lower_percentage_group, + **plot_kwargs, ) default_filetype = '.png' else: @@ -1055,6 +1552,7 @@ def plot_node_balance_pie( user_path=None if isinstance(save, bool) else pathlib.Path(save), show=show, save=True if save else False, + dpi=dpi, ) def node_balance( @@ -1063,8 +1561,10 @@ def node_balance( negate_outputs: bool = False, threshold: float | None = 1e-5, with_last_timestep: bool = False, - mode: Literal['flow_rate', 'flow_hours'] = 'flow_rate', + unit_type: Literal['flow_rate', 'flow_hours'] = 'flow_rate', drop_suffix: bool = False, + select: dict[FlowSystemDimensions, Any] | None = None, + # Deprecated parameter (kept for backwards compatibility) indexer: dict[FlowSystemDimensions, Any] | None = None, ) -> xr.Dataset: """ @@ -1074,14 +1574,29 @@ def node_balance( negate_outputs: Whether to negate the output flow_rates of the Node. threshold: The threshold for small values. Variables with all values below the threshold are dropped. with_last_timestep: Whether to include the last timestep in the dataset. - mode: The mode to use for the dataset. Can be 'flow_rate' or 'flow_hours'. + unit_type: The unit type to use for the dataset. Can be 'flow_rate' or 'flow_hours'. - 'flow_rate': Returns the flow_rates of the Node. - 'flow_hours': Returns the flow_hours of the Node. [flow_hours(t) = flow_rate(t) * dt(t)]. Renames suffixes to |flow_hours. drop_suffix: Whether to drop the suffix from the variable names. - indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}. - If None, uses first value for each dimension. - If empty dict {}, uses all values. + select: Optional data selection dict. Supports single values, lists, slices, and index arrays. """ + # Handle deprecated indexer parameter + if indexer is not None: + # Check for conflict with new parameter + if select is not None: + raise ValueError( + "Cannot use both deprecated parameter 'indexer' and new parameter 'select'. Use only 'select'." + ) + + import warnings + + warnings.warn( + "The 'indexer' parameter is deprecated and will be removed in a future version. Use 'select' instead.", + DeprecationWarning, + stacklevel=2, + ) + select = indexer + ds = self.solution[self.inputs + self.outputs] ds = sanitize_dataset( @@ -1100,9 +1615,9 @@ def node_balance( drop_suffix='|' if drop_suffix else None, ) - ds, _ = _apply_indexer_to_data(ds, indexer, drop=True) + ds, _ = _apply_selection_to_data(ds, select=select, drop=True) - if mode == 'flow_hours': + if unit_type == 'flow_hours': ds = ds * self._calculation_results.hours_per_timestep ds = ds.rename_vars({var: var.replace('flow_rate', 'flow_hours') for var in ds.data_vars}) @@ -1134,75 +1649,221 @@ def charge_state(self) -> xr.DataArray: def plot_charge_state( self, save: bool | pathlib.Path = False, - show: bool = True, - colors: plotting.ColorType = 'viridis', + show: bool | None = None, + colors: plotting.ColorType | None = None, engine: plotting.PlottingEngine = 'plotly', - style: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar', + mode: Literal['area', 'stacked_bar', 'line'] = 'area', + select: dict[FlowSystemDimensions, Any] | None = None, + facet_by: str | list[str] | None = 'scenario', + animate_by: str | None = 'period', + facet_cols: int | None = None, + # Deprecated parameter (kept for backwards compatibility) indexer: dict[FlowSystemDimensions, Any] | None = None, + **plot_kwargs: Any, ) -> plotly.graph_objs.Figure: - """Plot storage charge state over time, combined with the node balance. + """Plot storage charge state over time, combined with the node balance with optional faceting and animation. Args: save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location. show: Whether to show the plot or not. colors: Color scheme. Also see plotly. engine: Plotting engine to use. Only 'plotly' is implemented atm. - style: The colors to use for the plot. See `flixopt.plotting.ColorType` for options. - indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}. - If None, uses first value for each dimension. - If empty dict {}, uses all values. + mode: The plotting mode. Use 'stacked_bar' for stacked bar charts, 'line' for stepped lines, or 'area' for stacked area charts. + select: Optional data selection dict. Supports single values, lists, slices, and index arrays. + Applied BEFORE faceting/animation. + facet_by: Dimension(s) to create facets (subplots) for. Can be a single dimension name (str) + or list of dimensions. Each unique value combination creates a subplot. Ignored if not found. + animate_by: Dimension to animate over (Plotly only). Creates animation frames that cycle through + dimension values. Only one dimension can be animated. Ignored if not found. + facet_cols: Number of columns in the facet grid layout (default: 3). + **plot_kwargs: Additional plotting customization options passed to underlying plotting functions. + + Common options: + + - **dpi** (int): Export resolution in dots per inch. Default: 300. + + **For Plotly engine:** + + - Any Plotly Express parameter for px.bar()/px.line()/px.area() + Example: `range_y=[0, 100]`, `line_shape='linear'` + + **For Matplotlib engine:** + + - **plot_kwargs** (dict): Customize plot via `ax.bar()` or `ax.step()`. + + See :func:`flixopt.plotting.with_plotly` and :func:`flixopt.plotting.with_matplotlib` + for complete parameter reference. + + Note: For Plotly, you can further customize the returned figure using `fig.update_traces()` + and `fig.update_layout()` after calling this method. Raises: ValueError: If component is not a storage. + + Examples: + Basic plot: + + >>> results['Storage'].plot_charge_state() + + Facet by scenario: + + >>> results['Storage'].plot_charge_state(facet_by='scenario', facet_cols=2) + + Animate by period: + + >>> results['Storage'].plot_charge_state(animate_by='period') + + Facet by scenario AND animate by period: + + >>> results['Storage'].plot_charge_state(facet_by='scenario', animate_by='period') + + Custom layout after creation: + + >>> fig = results['Storage'].plot_charge_state(show=False) + >>> fig.update_layout(template='plotly_dark', height=800) + >>> fig.show() + + High-resolution export: + + >>> results['Storage'].plot_charge_state(save='storage.png', dpi=600) """ + # Handle deprecated indexer parameter + if indexer is not None: + # Check for conflict with new parameter + if select is not None: + raise ValueError( + "Cannot use both deprecated parameter 'indexer' and new parameter 'select'. Use only 'select'." + ) + + import warnings + + warnings.warn( + "The 'indexer' parameter is deprecated and will be removed in a future version. Use 'select' instead.", + DeprecationWarning, + stacklevel=2, + ) + select = indexer + + # Extract dpi for export_figure + dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi + + # Extract charge state line color (for overlay customization) + overlay_color = plot_kwargs.pop('charge_state_line_color', 'black') + if not self.is_storage: raise ValueError(f'Cant plot charge_state. "{self.label}" is not a storage') - ds = self.node_balance(with_last_timestep=True, indexer=indexer) - charge_state = self.charge_state + # Get node balance and charge state + ds = self.node_balance(with_last_timestep=True).fillna(0) + charge_state_da = self.charge_state - ds, suffix_parts = _apply_indexer_to_data(ds, indexer, drop=True) - charge_state, suffix_parts = _apply_indexer_to_data(charge_state, indexer, drop=True) + # Apply select filtering + ds, suffix_parts = _apply_selection_to_data(ds, select=select, drop=True) + charge_state_da, _ = _apply_selection_to_data(charge_state_da, select=select, drop=True) suffix = '--' + '-'.join(suffix_parts) if suffix_parts else '' title = f'Operation Balance of {self.label}{suffix}' if engine == 'plotly': - fig = plotting.with_plotly( - ds.to_dataframe(), - colors=colors, - style=style, + # Plot flows (node balance) with the specified mode + figure_like = plotting.with_plotly( + ds, + facet_by=facet_by, + animate_by=animate_by, + colors=colors if colors is not None else self._calculation_results.colors, + mode=mode, title=title, + facet_cols=facet_cols, + xlabel='Time in h', + **plot_kwargs, ) - # TODO: Use colors for charge state? - - charge_state = charge_state.to_dataframe() - fig.add_trace( - plotly.graph_objs.Scatter( - x=charge_state.index, y=charge_state.values.flatten(), mode='lines', name=self._charge_state - ) + # Prepare charge_state as Dataset for plotting + charge_state_ds = xr.Dataset({self._charge_state: charge_state_da}) + + # Plot charge_state with mode='line' to get Scatter traces + charge_state_fig = plotting.with_plotly( + charge_state_ds, + facet_by=facet_by, + animate_by=animate_by, + colors=colors if colors is not None else self._calculation_results.colors, + mode='line', # Always line for charge_state + title='', # No title needed for this temp figure + facet_cols=facet_cols, + xlabel='Time in h', + **plot_kwargs, ) + + # Add charge_state traces to the main figure + # This preserves subplot assignments and animation frames + for trace in charge_state_fig.data: + trace.line.width = 2 # Make charge_state line more prominent + trace.line.shape = 'linear' # Smooth line for charge state (not stepped like flows) + trace.line.color = overlay_color + figure_like.add_trace(trace) + + # Also add traces from animation frames if they exist + # Both figures use the same animate_by parameter, so they should have matching frames + if hasattr(charge_state_fig, 'frames') and charge_state_fig.frames: + # Add charge_state traces to each frame + for i, frame in enumerate(charge_state_fig.frames): + if i < len(figure_like.frames): + for trace in frame.data: + trace.line.width = 2 + trace.line.shape = 'linear' # Smooth line for charge state + trace.line.color = overlay_color + figure_like.frames[i].data = figure_like.frames[i].data + (trace,) + + default_filetype = '.html' elif engine == 'matplotlib': + # Matplotlib requires only 'time' dimension; check for extras after selection + extra_dims = [d for d in ds.dims if d != 'time'] + if extra_dims: + raise ValueError( + f'Matplotlib engine only supports a single time axis, but found extra dimensions: {extra_dims}. ' + f'Please use select={{...}} to reduce dimensions or switch to engine="plotly" for faceting/animation.' + ) + # For matplotlib, plot flows (node balance), then add charge_state as line fig, ax = plotting.with_matplotlib( - ds.to_dataframe(), - colors=colors, - style=style, + ds, + colors=colors if colors is not None else self._calculation_results.colors, + mode=mode, title=title, + **plot_kwargs, ) - charge_state = charge_state.to_dataframe() - ax.plot(charge_state.index, charge_state.values.flatten(), label=self._charge_state) + # Add charge_state as a line overlay + charge_state_df = charge_state_da.to_dataframe() + ax.plot( + charge_state_df.index, + charge_state_df.values.flatten(), + label=self._charge_state, + linewidth=2, + color=overlay_color, + ) + # Recreate legend with the same styling as with_matplotlib + handles, labels = ax.get_legend_handles_labels() + ax.legend( + handles, + labels, + loc='upper center', + bbox_to_anchor=(0.5, -0.15), + ncol=5, + frameon=False, + ) fig.tight_layout() - fig = fig, ax + + figure_like = fig, ax + default_filetype = '.png' return plotting.export_figure( - fig, + figure_like=figure_like, default_path=self._calculation_results.folder / title, - default_filetype='.html', + default_filetype=default_filetype, user_path=None if isinstance(save, bool) else pathlib.Path(save), show=show, save=True if save else False, + dpi=dpi, ) def node_balance_with_charge_state( @@ -1412,8 +2073,7 @@ def from_file(cls, folder: str | pathlib.Path, name: str) -> SegmentedCalculatio folder = pathlib.Path(folder) path = folder / name logger.info(f'loading calculation "{name}" from file ("{path.with_suffix(".nc4")}")') - with open(path.with_suffix('.json'), encoding='utf-8') as f: - meta_data = json.load(f) + meta_data = fx_io.load_json(path.with_suffix('.json')) return cls( [CalculationResults.from_file(folder, sub_name) for sub_name in meta_data['sub_calculations']], all_timesteps=pd.DatetimeIndex( @@ -1441,6 +2101,7 @@ def __init__( self.name = name self.folder = pathlib.Path(folder) if folder is not None else pathlib.Path.cwd() / 'results' self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.all_timesteps) + self._colors = {} @property def meta_data(self) -> dict[str, int | list[str]]: @@ -1455,6 +2116,64 @@ def meta_data(self) -> dict[str, int | list[str]]: def segment_names(self) -> list[str]: return [segment.name for segment in self.segment_results] + @property + def colors(self) -> dict[str, str]: + return self._colors + + @colors.setter + def colors(self, colors: dict[str, str]): + """Applies colors to all segments""" + self._colors = colors + for segment in self.segment_results: + segment.colors = copy.deepcopy(colors) + + def setup_colors( + self, + config: dict[str, str | list[str]] | str | pathlib.Path | None = None, + default_colorscale: str | None = None, + ) -> dict[str, str]: + """ + Setup colors for all variables across all segment results. + + This method applies the same color configuration to all segments, ensuring + consistent visualization across the entire segmented calculation. The color + mapping is propagated to each segment's CalculationResults instance. + + Args: + config: Configuration for color assignment. Can be: + - dict: Maps components to colors/colorscales: + * 'component1': 'red' # Single component to single color + * 'component1': '#FF0000' # Single component to hex color + - OR maps colorscales to multiple components: + * 'colorscale_name': ['component1', 'component2'] # Colorscale across components + - str: Path to a JSON/YAML config file or a colorscale name to apply to all + - Path: Path to a JSON/YAML config file + - None: Use default_colorscale for all components + default_colorscale: Default colorscale for unconfigured components (default: 'turbo') + + Examples: + ```python + # Apply colors to all segments + segmented_results.setup_colors( + { + 'CHP': 'red', + 'Blues': ['Storage1', 'Storage2'], + 'Oranges': ['Solar1', 'Solar2'], + } + ) + + # Use a single colorscale for all components in all segments + segmented_results.setup_colors('portland') + ``` + + Returns: + Complete variable-to-color mapping dictionary from the first segment + (all segments will have the same mapping) + """ + self.colors = self.segment_results[0].setup_colors(config=config, default_colorscale=default_colorscale) + + return self.colors + def solution_without_overlap(self, variable_name: str) -> xr.DataArray: """Get variable solution removing segment overlaps. @@ -1473,37 +2192,108 @@ def solution_without_overlap(self, variable_name: str) -> xr.DataArray: def plot_heatmap( self, variable_name: str, - heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D', - heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h', - color_map: str = 'portland', + reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']] + | Literal['auto'] + | None = 'auto', + colors: plotting.ColorType | None = None, save: bool | pathlib.Path = False, - show: bool = True, + show: bool | None = None, engine: plotting.PlottingEngine = 'plotly', + facet_by: str | list[str] | None = None, + animate_by: str | None = None, + facet_cols: int | None = None, + fill: Literal['ffill', 'bfill'] | None = 'ffill', + # Deprecated parameters (kept for backwards compatibility) + heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] | None = None, + heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] | None = None, + color_map: str | None = None, + **plot_kwargs: Any, ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]: """Plot heatmap of variable solution across segments. Args: variable_name: Variable to plot. - heatmap_timeframes: Time aggregation level. - heatmap_timesteps_per_frame: Timesteps per frame. - color_map: Color scheme. Also see plotly. + reshape_time: Time reshaping configuration (default: 'auto'): + - 'auto': Automatically applies ('D', 'h') when only 'time' dimension remains + - Tuple like ('D', 'h'): Explicit reshaping (days vs hours) + - None: Disable time reshaping + colors: Color scheme. See plotting.ColorType for options. save: Whether to save plot. show: Whether to display plot. engine: Plotting engine. + facet_by: Dimension(s) to create facets (subplots) for. + animate_by: Dimension to animate over (Plotly only). + facet_cols: Number of columns in the facet grid layout. + fill: Method to fill missing values: 'ffill' or 'bfill'. + heatmap_timeframes: (Deprecated) Use reshape_time instead. + heatmap_timesteps_per_frame: (Deprecated) Use reshape_time instead. + color_map: (Deprecated) Use colors instead. + **plot_kwargs: Additional plotting customization options. + Common options: + + - **dpi** (int): Export resolution for saved plots. Default: 300. + - **vmin** (float): Minimum value for color scale. + - **vmax** (float): Maximum value for color scale. + + For Matplotlib heatmaps: + + - **imshow_kwargs** (dict): Additional kwargs for matplotlib's imshow. + - **cbar_kwargs** (dict): Additional kwargs for colorbar customization. Returns: Figure object. """ + # Handle deprecated parameters + if heatmap_timeframes is not None or heatmap_timesteps_per_frame is not None: + # Check for conflict with new parameter + if reshape_time != 'auto': # Check if user explicitly set reshape_time + raise ValueError( + "Cannot use both deprecated parameters 'heatmap_timeframes'/'heatmap_timesteps_per_frame' " + "and new parameter 'reshape_time'. Use only 'reshape_time'." + ) + + import warnings + + warnings.warn( + "The 'heatmap_timeframes' and 'heatmap_timesteps_per_frame' parameters are deprecated. " + "Use 'reshape_time=(timeframes, timesteps_per_frame)' instead.", + DeprecationWarning, + stacklevel=2, + ) + # Override reshape_time if old parameters provided + if heatmap_timeframes is not None and heatmap_timesteps_per_frame is not None: + reshape_time = (heatmap_timeframes, heatmap_timesteps_per_frame) + + if color_map is not None: + # Check for conflict with new parameter + if colors is not None: # Check if user explicitly set colors + raise ValueError( + "Cannot use both deprecated parameter 'color_map' and new parameter 'colors'. Use only 'colors'." + ) + + import warnings + + warnings.warn( + "The 'color_map' parameter is deprecated. Use 'colors' instead.", + DeprecationWarning, + stacklevel=2, + ) + colors = color_map + return plot_heatmap( - dataarray=self.solution_without_overlap(variable_name), + data=self.solution_without_overlap(variable_name), name=variable_name, folder=self.folder, - heatmap_timeframes=heatmap_timeframes, - heatmap_timesteps_per_frame=heatmap_timesteps_per_frame, - color_map=color_map, + reshape_time=reshape_time, + colors=colors, save=save, show=show, engine=engine, + facet_by=facet_by, + animate_by=animate_by, + facet_cols=facet_cols, + fill=fill, + **plot_kwargs, ) def to_file(self, folder: str | pathlib.Path | None = None, name: str | None = None, compression: int = 5): @@ -1527,69 +2317,227 @@ def to_file(self, folder: str | pathlib.Path | None = None, name: str | None = N for segment in self.segment_results: segment.to_file(folder=folder, name=segment.name, compression=compression) - with open(path.with_suffix('.json'), 'w', encoding='utf-8') as f: - json.dump(self.meta_data, f, indent=4, ensure_ascii=False) + fx_io.save_json(self.meta_data, path.with_suffix('.json')) logger.info(f'Saved calculation "{name}" to {path}') def plot_heatmap( - dataarray: xr.DataArray, - name: str, - folder: pathlib.Path, - heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D', - heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h', - color_map: str = 'portland', + data: xr.DataArray | xr.Dataset, + name: str | None = None, + folder: pathlib.Path | None = None, + colors: plotting.ColorType | None = None, save: bool | pathlib.Path = False, - show: bool = True, + show: bool | None = None, engine: plotting.PlottingEngine = 'plotly', + select: dict[str, Any] | None = None, + facet_by: str | list[str] | None = None, + animate_by: str | None = None, + facet_cols: int | None = None, + reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']] + | Literal['auto'] + | None = 'auto', + fill: Literal['ffill', 'bfill'] | None = 'ffill', + # Deprecated parameters (kept for backwards compatibility) indexer: dict[str, Any] | None = None, + heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] | None = None, + heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] | None = None, + color_map: str | None = None, + **plot_kwargs: Any, ): - """Plot heatmap of time series data. + """Plot heatmap visualization with support for multi-variable, faceting, and animation. + + This function provides a standalone interface to the heatmap plotting capabilities, + supporting the same modern features as CalculationResults.plot_heatmap(). Args: - dataarray: Data to plot. - name: Variable name for title. - folder: Save folder. - heatmap_timeframes: Time aggregation level. - heatmap_timesteps_per_frame: Timesteps per frame. - color_map: Color scheme. Also see plotly. - save: Whether to save plot. - show: Whether to display plot. - engine: Plotting engine. - indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}. - If None, uses first value for each dimension. - If empty dict {}, uses all values. + data: Data to plot. Can be a single DataArray or an xarray Dataset. + When a Dataset is provided, all data variables are combined along a new 'variable' dimension. + name: Optional name for the title. If not provided, uses the DataArray name or + generates a default title for Datasets. + folder: Save folder for the plot. Defaults to current directory if not provided. + colors: Color scheme for the heatmap. See `flixopt.plotting.ColorType` for options. + save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location. + show: Whether to show the plot or not. + engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'. + select: Optional data selection dict. Supports single values, lists, slices, and index arrays. + facet_by: Dimension(s) to create facets (subplots) for. Can be a single dimension name (str) + or list of dimensions. Each unique value combination creates a subplot. + animate_by: Dimension to animate over (Plotly only). Creates animation frames. + facet_cols: Number of columns in the facet grid layout (default: 3). + reshape_time: Time reshaping configuration (default: 'auto'): + - 'auto': Automatically applies ('D', 'h') when only 'time' dimension remains + - Tuple: Explicit reshaping, e.g. ('D', 'h') for days vs hours + - None: Disable auto-reshaping + fill: Method to fill missing values after reshape: 'ffill' (forward fill) or 'bfill' (backward fill). + Default is 'ffill'. + + Examples: + Single DataArray with time reshaping: + + >>> plot_heatmap(data, name='Temperature', folder=Path('.'), reshape_time=('D', 'h')) + + Dataset with multiple variables (facet by variable): + + >>> dataset = xr.Dataset({'Boiler': data1, 'CHP': data2, 'Storage': data3}) + >>> plot_heatmap( + ... dataset, + ... folder=Path('.'), + ... facet_by='variable', + ... reshape_time=('D', 'h'), + ... ) + + Dataset with animation by variable: + + >>> plot_heatmap(dataset, animate_by='variable', reshape_time=('D', 'h')) """ - dataarray, suffix_parts = _apply_indexer_to_data(dataarray, indexer, drop=True) + # Handle deprecated heatmap time parameters + if heatmap_timeframes is not None or heatmap_timesteps_per_frame is not None: + # Check for conflict with new parameter + if reshape_time != 'auto': # User explicitly set reshape_time + raise ValueError( + "Cannot use both deprecated parameters 'heatmap_timeframes'/'heatmap_timesteps_per_frame' " + "and new parameter 'reshape_time'. Use only 'reshape_time'." + ) + + import warnings + + warnings.warn( + "The 'heatmap_timeframes' and 'heatmap_timesteps_per_frame' parameters are deprecated. " + "Use 'reshape_time=(timeframes, timesteps_per_frame)' instead.", + DeprecationWarning, + stacklevel=2, + ) + # Override reshape_time if both old parameters provided + if heatmap_timeframes is not None and heatmap_timesteps_per_frame is not None: + reshape_time = (heatmap_timeframes, heatmap_timesteps_per_frame) + + # Handle deprecated color_map parameter + if color_map is not None: + if colors is not None: # User explicitly set colors + raise ValueError( + "Cannot use both deprecated parameter 'color_map' and new parameter 'colors'. Use only 'colors'." + ) + + import warnings + + warnings.warn( + "The 'color_map' parameter is deprecated. Use 'colors' instead.", + DeprecationWarning, + stacklevel=2, + ) + colors = color_map + + # Handle deprecated indexer parameter + if indexer is not None: + # Check for conflict with new parameter + if select is not None: # User explicitly set select + raise ValueError( + "Cannot use both deprecated parameter 'indexer' and new parameter 'select'. Use only 'select'." + ) + + import warnings + + warnings.warn( + "The 'indexer' parameter is deprecated. Use 'select' instead.", + DeprecationWarning, + stacklevel=2, + ) + select = indexer + + # Convert Dataset to DataArray with 'variable' dimension + if isinstance(data, xr.Dataset): + # Extract all data variables from the Dataset + variable_names = list(data.data_vars) + dataarrays = [data[var] for var in variable_names] + + # Combine into single DataArray with 'variable' dimension + data = xr.concat(dataarrays, dim='variable') + data = data.assign_coords(variable=variable_names) + + # Use Dataset variable names for title if name not provided + if name is None: + title_name = f'Heatmap of {len(variable_names)} variables' + else: + title_name = name + else: + # Single DataArray + if name is None: + title_name = data.name if data.name else 'Heatmap' + else: + title_name = name + + # Apply select filtering + data, suffix_parts = _apply_selection_to_data(data, select=select, drop=True) suffix = '--' + '-'.join(suffix_parts) if suffix_parts else '' - name = name if not suffix_parts else name + suffix - heatmap_data = plotting.heat_map_data_from_df( - dataarray.to_dataframe(name), heatmap_timeframes, heatmap_timesteps_per_frame, 'ffill' - ) + # Matplotlib heatmaps require at most 2D data + # Time dimension will be reshaped to 2D (timeframe Γ— timestep), so can't have other dims alongside it + if engine == 'matplotlib': + dims = list(data.dims) + + # If 'time' dimension exists and will be reshaped, we can't have any other dimensions + if 'time' in dims and len(dims) > 1 and reshape_time is not None: + extra_dims = [d for d in dims if d != 'time'] + raise ValueError( + f'Matplotlib heatmaps with time reshaping cannot have additional dimensions. ' + f'Found extra dimensions: {extra_dims}. ' + f'Use select={{...}} to reduce to time only, use "reshape_time=None" or switch to engine="plotly" or use for multi-dimensional support.' + ) + # If no 'time' dimension (already reshaped or different data), allow at most 2 dimensions + elif 'time' not in dims and len(dims) > 2: + raise ValueError( + f'Matplotlib heatmaps support at most 2 dimensions, but data has {len(dims)}: {dims}. ' + f'Use select={{...}} to reduce dimensions or switch to engine="plotly".' + ) + + # Build title + title = f'{title_name}{suffix}' + if isinstance(reshape_time, tuple): + timeframes, timesteps_per_frame = reshape_time + title += f' ({timeframes} vs {timesteps_per_frame})' - xlabel, ylabel = f'timeframe [{heatmap_timeframes}]', f'timesteps [{heatmap_timesteps_per_frame}]' + # Extract dpi before passing to plotting functions + dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi + # Plot with appropriate engine if engine == 'plotly': - figure_like = plotting.heat_map_plotly( - heatmap_data, title=name, color_map=color_map, xlabel=xlabel, ylabel=ylabel + figure_like = plotting.heatmap_with_plotly( + data=data, + facet_by=facet_by, + animate_by=animate_by, + colors=colors, + title=title, + facet_cols=facet_cols, + reshape_time=reshape_time, + fill=fill, + **plot_kwargs, ) default_filetype = '.html' elif engine == 'matplotlib': - figure_like = plotting.heat_map_matplotlib( - heatmap_data, title=name, color_map=color_map, xlabel=xlabel, ylabel=ylabel + figure_like = plotting.heatmap_with_matplotlib( + data=data, + colors=colors, + title=title, + reshape_time=reshape_time, + fill=fill, + **plot_kwargs, ) default_filetype = '.png' else: raise ValueError(f'Engine "{engine}" not supported. Use "plotly" or "matplotlib"') + # Set default folder if not provided + if folder is None: + folder = pathlib.Path('.') + return plotting.export_figure( figure_like=figure_like, - default_path=folder / f'{name} ({heatmap_timeframes}-{heatmap_timesteps_per_frame})', + default_path=folder / title, default_filetype=default_filetype, user_path=None if isinstance(save, bool) else pathlib.Path(save), show=show, save=True if save else False, + dpi=dpi, ) @@ -1787,8 +2735,13 @@ def apply_filter(array, coord_name: str, coord_values: Any | list[Any]): if coord_name not in array.coords: raise AttributeError(f"Missing required coordinate '{coord_name}'") - # Convert single value to list - val_list = [coord_values] if isinstance(coord_values, str) else coord_values + # Normalize to list for sequence-like inputs (excluding strings) + if isinstance(coord_values, str): + val_list = [coord_values] + elif isinstance(coord_values, (list, tuple, np.ndarray, pd.Index)): + val_list = list(coord_values) + else: + val_list = [coord_values] # Verify coord_values exist available = set(array[coord_name].values) @@ -1798,7 +2751,7 @@ def apply_filter(array, coord_name: str, coord_values: Any | list[Any]): # Apply filter return array.where( - array[coord_name].isin(val_list) if isinstance(coord_values, list) else array[coord_name] == coord_values, + array[coord_name].isin(val_list) if len(val_list) > 1 else array[coord_name] == val_list[0], drop=True, ) @@ -1817,36 +2770,26 @@ def apply_filter(array, coord_name: str, coord_values: Any | list[Any]): return da -def _apply_indexer_to_data( - data: xr.DataArray | xr.Dataset, indexer: dict[str, Any] | None = None, drop=False +def _apply_selection_to_data( + data: xr.DataArray | xr.Dataset, + select: dict[str, Any] | None = None, + drop=False, ) -> tuple[xr.DataArray | xr.Dataset, list[str]]: """ - Apply indexer selection or auto-select first values for non-time dimensions. + Apply selection to data. Args: data: xarray Dataset or DataArray - indexer: Optional selection dict - If None, uses first value for each dimension (except time). - If empty dict {}, uses all values. + select: Optional selection dict + drop: Whether to drop dimensions after selection Returns: Tuple of (selected_data, selection_string) """ selection_string = [] - if indexer is not None: - # User provided indexer - data = data.sel(indexer, drop=drop) - selection_string.extend(f'{v}[{k}]' for k, v in indexer.items()) - else: - # Auto-select first value for each dimension except 'time' - selection = {} - for dim in data.dims: - if dim != 'time' and dim in data.coords: - first_value = data.coords[dim].values[0] - selection[dim] = first_value - selection_string.append(f'{first_value}[{dim}]') - if selection: - data = data.sel(selection, drop=drop) + if select: + data = data.sel(select, drop=drop) + selection_string.extend(f'{dim}={val}' for dim, val in select.items()) return data, selection_string From fc42bc2fd39230b09ebbdbf43ae94eefb15ded52 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 14:03:05 +0100 Subject: [PATCH 05/27] Add extra log_to_console option to solvers.py --- flixopt/solvers.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/flixopt/solvers.py b/flixopt/solvers.py index 410d69434..36f993f95 100644 --- a/flixopt/solvers.py +++ b/flixopt/solvers.py @@ -19,12 +19,14 @@ class _Solver: Args: mip_gap: Acceptable relative optimality gap in [0.0, 1.0]. time_limit_seconds: Time limit in seconds. + log_to_console: If False, no output to console. extra_options: Additional solver options merged into `options`. """ name: ClassVar[str] mip_gap: float time_limit_seconds: int + log_to_console: bool = True extra_options: dict[str, Any] = field(default_factory=dict) @property @@ -45,6 +47,7 @@ class GurobiSolver(_Solver): Args: mip_gap: Acceptable relative optimality gap in [0.0, 1.0]; mapped to Gurobi `MIPGap`. time_limit_seconds: Time limit in seconds; mapped to Gurobi `TimeLimit`. + log_to_console: If False, no output to console. extra_options: Additional solver options merged into `options`. """ @@ -55,6 +58,7 @@ def _options(self) -> dict[str, Any]: return { 'MIPGap': self.mip_gap, 'TimeLimit': self.time_limit_seconds, + 'LogToConsole': 1 if self.log_to_console else 0, } @@ -65,6 +69,7 @@ class HighsSolver(_Solver): Attributes: mip_gap: Acceptable relative optimality gap in [0.0, 1.0]; mapped to HiGHS `mip_rel_gap`. time_limit_seconds: Time limit in seconds; mapped to HiGHS `time_limit`. + log_to_console: If False, no output to console. extra_options: Additional solver options merged into `options`. threads (int | None): Number of threads to use. If None, HiGHS chooses. """ @@ -78,4 +83,5 @@ def _options(self) -> dict[str, Any]: 'mip_rel_gap': self.mip_gap, 'time_limit': self.time_limit_seconds, 'threads': self.threads, + 'log_to_console': self.log_to_console, } From d3bcdc271acc7ec1d177fbafce9ab2ec842040d0 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 14:06:51 +0100 Subject: [PATCH 06/27] Add extra log_to_console option to solvers.py --- flixopt/calculation.py | 16 +++++++++------- flixopt/io.py | 26 -------------------------- 2 files changed, 9 insertions(+), 33 deletions(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 1dab78e57..d07a23793 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -10,6 +10,7 @@ from __future__ import annotations +import copy import logging import math import pathlib @@ -612,13 +613,14 @@ def do_modeling_and_solve( f'Following InvestmentModels were found: {invest_elements}' ) - # Redirect solver stdout to null to avoid cluttering the output - with fx_io.suppress_output(): - calculation.solve( - solver, - log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', - log_main_results=log_main_results, - ) + solver_silent = copy.copy(solver) + solver_silent.log_to_console = False + + calculation.solve( + solver_silent, + log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', + log_main_results=log_main_results, + ) progress_bar.close() diff --git a/flixopt/io.py b/flixopt/io.py index fa4ef4ebf..7f832ed0e 100644 --- a/flixopt/io.py +++ b/flixopt/io.py @@ -3,11 +3,8 @@ import inspect import json import logging -import os import pathlib import re -import sys -from contextlib import contextmanager from dataclasses import dataclass from typing import TYPE_CHECKING, Any @@ -934,26 +931,3 @@ def build_metadata_info(parts: list[str], prefix: str = ' | ') -> str: return '' info = ' | '.join(parts) return prefix + info if prefix else info - - -@contextmanager -def suppress_output(): - """Redirect both Python and C-level stdout/stderr to os.devnull.""" - with open(os.devnull, 'w') as devnull: - # Save original file descriptors - old_stdout_fd = os.dup(1) - old_stderr_fd = os.dup(2) - try: - # Flush any pending text - sys.stdout.flush() - sys.stderr.flush() - # Redirect low-level fds to devnull - os.dup2(devnull.fileno(), 1) - os.dup2(devnull.fileno(), 2) - yield - finally: - # Restore fds - os.dup2(old_stdout_fd, 1) - os.dup2(old_stderr_fd, 2) - os.close(old_stdout_fd) - os.close(old_stderr_fd) From 7931580360a20e0686f4b711ded8e165bff1d031 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 14:11:10 +0100 Subject: [PATCH 07/27] Add extra log_to_console option config.py --- flixopt/config.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flixopt/config.py b/flixopt/config.py index 670f86da2..83bdbe66f 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -28,6 +28,7 @@ 'file': None, 'rich': False, 'console': False, + 'solver_to_console': True, 'max_file_size': 10_485_760, # 10MB 'backup_count': 5, 'date_format': '%Y-%m-%d %H:%M:%S', @@ -104,6 +105,7 @@ class Logging: file: Log file path for file logging. console: Enable console output. rich: Use Rich library for enhanced output. + solver_to_console: Enable solver output to console. max_file_size: Max file size before rotation. backup_count: Number of backup files to keep. date_format: Date/time format string. @@ -135,6 +137,7 @@ class Logging: file: str | None = _DEFAULTS['logging']['file'] rich: bool = _DEFAULTS['logging']['rich'] console: bool | Literal['stdout', 'stderr'] = _DEFAULTS['logging']['console'] + solver_to_console: bool = _DEFAULTS['logging']['solver_to_console'] max_file_size: int = _DEFAULTS['logging']['max_file_size'] backup_count: int = _DEFAULTS['logging']['backup_count'] date_format: str = _DEFAULTS['logging']['date_format'] @@ -346,6 +349,7 @@ def to_dict(cls) -> dict: 'file': cls.Logging.file, 'rich': cls.Logging.rich, 'console': cls.Logging.console, + 'solver_to_console': cls.Logging.solver_to_console, 'max_file_size': cls.Logging.max_file_size, 'backup_count': cls.Logging.backup_count, 'date_format': cls.Logging.date_format, From 168ec617c6860d07f80a5a0db85afc276cd52155 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 14:13:27 +0100 Subject: [PATCH 08/27] Add to tests --- tests/test_config.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/test_config.py b/tests/test_config.py index 60ed80555..ae3304188 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -28,6 +28,7 @@ def test_config_defaults(self): assert CONFIG.Logging.file is None assert CONFIG.Logging.rich is False assert CONFIG.Logging.console is False + assert CONFIG.Logging.solver_to_console is True assert CONFIG.Modeling.big == 10_000_000 assert CONFIG.Modeling.epsilon == 1e-5 assert CONFIG.Modeling.big_binary_bound == 100_000 @@ -104,6 +105,7 @@ def test_config_to_dict(self): assert config_dict['logging']['console'] is True assert config_dict['logging']['file'] is None assert config_dict['logging']['rich'] is False + assert config_dict['logging']['solver_to_console'] is True assert 'modeling' in config_dict assert config_dict['modeling']['big'] == 10_000_000 @@ -423,6 +425,7 @@ def test_config_reset(self): CONFIG.Logging.console = False CONFIG.Logging.rich = True CONFIG.Logging.file = '/tmp/test.log' + CONFIG.Logging.solver_to_console = False CONFIG.Modeling.big = 99999999 CONFIG.Modeling.epsilon = 1e-8 CONFIG.Modeling.big_binary_bound = 500000 @@ -436,6 +439,7 @@ def test_config_reset(self): assert CONFIG.Logging.console is False assert CONFIG.Logging.rich is False assert CONFIG.Logging.file is None + assert CONFIG.Logging.solver_to_console is True assert CONFIG.Modeling.big == 10_000_000 assert CONFIG.Modeling.epsilon == 1e-5 assert CONFIG.Modeling.big_binary_bound == 100_000 @@ -457,6 +461,7 @@ def test_reset_matches_class_defaults(self): CONFIG.Logging.file = '/tmp/test.log' CONFIG.Logging.rich = True CONFIG.Logging.console = True + CONFIG.Logging.solver_to_console = False CONFIG.Modeling.big = 999999 CONFIG.Modeling.epsilon = 1e-10 CONFIG.Modeling.big_binary_bound = 999999 @@ -464,6 +469,7 @@ def test_reset_matches_class_defaults(self): # Verify values are actually different from defaults assert CONFIG.Logging.level != _DEFAULTS['logging']['level'] + assert CONFIG.Logging.solver_to_console != _DEFAULTS['logging']['solver_to_console'] assert CONFIG.Modeling.big != _DEFAULTS['modeling']['big'] # Now reset @@ -474,6 +480,7 @@ def test_reset_matches_class_defaults(self): assert CONFIG.Logging.file == _DEFAULTS['logging']['file'] assert CONFIG.Logging.rich == _DEFAULTS['logging']['rich'] assert CONFIG.Logging.console == _DEFAULTS['logging']['console'] + assert CONFIG.Logging.solver_to_console == _DEFAULTS['logging']['solver_to_console'] assert CONFIG.Modeling.big == _DEFAULTS['modeling']['big'] assert CONFIG.Modeling.epsilon == _DEFAULTS['modeling']['epsilon'] assert CONFIG.Modeling.big_binary_bound == _DEFAULTS['modeling']['big_binary_bound'] From 20602f9c0cb9c813adb6b74812f398f02303b488 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 14:27:28 +0100 Subject: [PATCH 09/27] Use default from console to say if logging to console (gurobipy still has some issues...) --- flixopt/solvers.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flixopt/solvers.py b/flixopt/solvers.py index 36f993f95..7d083eef4 100644 --- a/flixopt/solvers.py +++ b/flixopt/solvers.py @@ -8,6 +8,8 @@ from dataclasses import dataclass, field from typing import Any, ClassVar +from flixopt.config import CONFIG + logger = logging.getLogger('flixopt') @@ -26,7 +28,7 @@ class _Solver: name: ClassVar[str] mip_gap: float time_limit_seconds: int - log_to_console: bool = True + log_to_console: bool = field(default_factory=lambda: CONFIG.Logging.solver_to_console) extra_options: dict[str, Any] = field(default_factory=dict) @property From 95b921770be11e0d61705d788f014a851b733a79 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 14:33:35 +0100 Subject: [PATCH 10/27] Add rounding duration of solve --- flixopt/calculation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index d07a23793..28dccb5ab 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -238,7 +238,7 @@ def solve( **solver.options, ) self.durations['solving'] = round(timeit.default_timer() - t_start, 2) - logger.info(f'Model solved with {solver.name} in {self.durations["solving"]} seconds.') + logger.info(f'Model solved with {solver.name} in {self.durations["solving"]:.2f} seconds.') logger.info(f'Model status after solve: {self.model.status}') if self.model.status == 'warning': @@ -628,7 +628,7 @@ def do_modeling_and_solve( for key, value in calc.durations.items(): self.durations[key] += value - logger.info(f'Model solved with {solver.name} in {self.durations["solving"]} seconds.') + logger.info(f'Model solved with {solver.name} in {self.durations["solving"]:.2f} seconds.') self.results = SegmentedCalculationResults.from_calculation(self) From 677f534e65c1ab656653824f79dc0c9f7218fd55 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 14:34:58 +0100 Subject: [PATCH 11/27] Use contextmanager to entirely supress output in SegmentedCalculation --- flixopt/calculation.py | 19 ++++++++++--------- flixopt/io.py | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 28dccb5ab..a55e453d1 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -573,7 +573,10 @@ def _create_sub_calculations(self): ) def do_modeling_and_solve( - self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = False + self, + solver: _Solver, + log_file: pathlib.Path | None = None, + log_main_results: bool = False, ) -> SegmentedCalculation: logger.info(f'{"":#^80}') logger.info(f'{" Segmented Solving ":#^80}') @@ -613,14 +616,12 @@ def do_modeling_and_solve( f'Following InvestmentModels were found: {invest_elements}' ) - solver_silent = copy.copy(solver) - solver_silent.log_to_console = False - - calculation.solve( - solver_silent, - log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', - log_main_results=log_main_results, - ) + with fx_io.suppress_output(): + calculation.solve( + solver, + log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', + log_main_results=log_main_results, + ) progress_bar.close() diff --git a/flixopt/io.py b/flixopt/io.py index 7f832ed0e..6a5544d7b 100644 --- a/flixopt/io.py +++ b/flixopt/io.py @@ -3,8 +3,11 @@ import inspect import json import logging +import os import pathlib import re +import sys +from contextlib import contextmanager from dataclasses import dataclass from typing import TYPE_CHECKING, Any @@ -931,3 +934,24 @@ def build_metadata_info(parts: list[str], prefix: str = ' | ') -> str: return '' info = ' | '.join(parts) return prefix + info if prefix else info + + +@contextmanager +def suppress_output(): + """Suppress all console output including C-level output from Gurobi.""" + old_stdout_fd = os.dup(1) + old_stderr_fd = os.dup(2) + + try: + devnull_fd = os.open(os.devnull, os.O_WRONLY) + sys.stdout.flush() + sys.stderr.flush() + os.dup2(devnull_fd, 1) + os.dup2(devnull_fd, 2) + yield + finally: + os.dup2(old_stdout_fd, 1) + os.dup2(old_stderr_fd, 2) + os.close(devnull_fd) + os.close(old_stdout_fd) + os.close(old_stderr_fd) From 767d8eca40fd7d89695b02f7bb115d4daed2789b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 14:36:40 +0100 Subject: [PATCH 12/27] Improve suppress_output() --- flixopt/io.py | 47 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/flixopt/io.py b/flixopt/io.py index 6a5544d7b..c5f839ed9 100644 --- a/flixopt/io.py +++ b/flixopt/io.py @@ -938,20 +938,55 @@ def build_metadata_info(parts: list[str], prefix: str = ' | ') -> str: @contextmanager def suppress_output(): - """Suppress all console output including C-level output from Gurobi.""" + """ + Suppress all console output including C-level output from solvers. + + WARNING: Not thread-safe. Modifies global file descriptors. + Use only with sequential execution or multiprocessing. + """ + # Save original file descriptors old_stdout_fd = os.dup(1) old_stderr_fd = os.dup(2) + devnull_fd = None try: + # Open devnull devnull_fd = os.open(os.devnull, os.O_WRONLY) + + # Flush Python buffers before redirecting sys.stdout.flush() sys.stderr.flush() + + # Redirect file descriptors to devnull os.dup2(devnull_fd, 1) os.dup2(devnull_fd, 2) + yield + finally: - os.dup2(old_stdout_fd, 1) - os.dup2(old_stderr_fd, 2) - os.close(devnull_fd) - os.close(old_stdout_fd) - os.close(old_stderr_fd) + # Restore original file descriptors with nested try blocks + # to ensure all cleanup happens even if one step fails + try: + # Flush any buffered output in the redirected streams + sys.stdout.flush() + sys.stderr.flush() + except (OSError, ValueError): + pass # Stream might be closed or invalid + + try: + os.dup2(old_stdout_fd, 1) + except OSError: + pass # Failed to restore stdout, continue cleanup + + try: + os.dup2(old_stderr_fd, 2) + except OSError: + pass # Failed to restore stderr, continue cleanup + + # Close all file descriptors + for fd in [devnull_fd, old_stdout_fd, old_stderr_fd]: + if fd is not None: + try: + os.close(fd) + except OSError: + pass # FD already closed or invalid From faf4267e83fe6cf3bff8ffe3c24ae49aee902886 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 15:06:22 +0100 Subject: [PATCH 13/27] More options in config.py --- flixopt/calculation.py | 4 +- flixopt/config.py | 59 +++++++++++++++-- flixopt/solvers.py | 12 ++-- tests/test_config.py | 144 +++++++++++++++++++++++++++++++++++++++-- 4 files changed, 200 insertions(+), 19 deletions(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index a55e453d1..1728725b8 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -228,7 +228,7 @@ def fix_sizes(self, ds: xr.Dataset, decimal_rounding: int | None = 5) -> FullCal return self def solve( - self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = True + self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool | None = None ) -> FullCalculation: t_start = timeit.default_timer() @@ -253,7 +253,7 @@ def solve( ) # Log the formatted output - if log_main_results: + if log_main_results if log_main_results is not None else CONFIG.Solving.log_main_results: logger.info( f'{" Main Results ":#^80}\n' + yaml.dump( diff --git a/flixopt/config.py b/flixopt/config.py index 83bdbe66f..1507621fd 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -28,7 +28,6 @@ 'file': None, 'rich': False, 'console': False, - 'solver_to_console': True, 'max_file_size': 10_485_760, # 10MB 'backup_count': 5, 'date_format': '%Y-%m-%d %H:%M:%S', @@ -64,6 +63,14 @@ 'default_qualitative_colorscale': 'plotly', } ), + 'solving': MappingProxyType( + { + 'mip_gap': 0.01, + 'time_limit_seconds': 300, + 'log_to_console': True, + 'log_main_results': True, + } + ), } ) @@ -76,6 +83,8 @@ class CONFIG: Attributes: Logging: Logging configuration. Modeling: Optimization modeling parameters. + Solving: Solver configuration and default parameters. + Plotting: Plotting configuration. config_name: Configuration name. Examples: @@ -92,6 +101,9 @@ class CONFIG: level: DEBUG console: true file: app.log + solving: + mip_gap: 0.001 + time_limit_seconds: 600 ``` """ @@ -105,7 +117,6 @@ class Logging: file: Log file path for file logging. console: Enable console output. rich: Use Rich library for enhanced output. - solver_to_console: Enable solver output to console. max_file_size: Max file size before rotation. backup_count: Number of backup files to keep. date_format: Date/time format string. @@ -137,7 +148,6 @@ class Logging: file: str | None = _DEFAULTS['logging']['file'] rich: bool = _DEFAULTS['logging']['rich'] console: bool | Literal['stdout', 'stderr'] = _DEFAULTS['logging']['console'] - solver_to_console: bool = _DEFAULTS['logging']['solver_to_console'] max_file_size: int = _DEFAULTS['logging']['max_file_size'] backup_count: int = _DEFAULTS['logging']['backup_count'] date_format: str = _DEFAULTS['logging']['date_format'] @@ -197,6 +207,30 @@ class Modeling: epsilon: float = _DEFAULTS['modeling']['epsilon'] big_binary_bound: int = _DEFAULTS['modeling']['big_binary_bound'] + class Solving: + """Solver configuration and default parameters. + + Attributes: + mip_gap: Default MIP gap tolerance for solver convergence. + time_limit_seconds: Default time limit in seconds for solver runs. + log_to_console: Whether solver should output to console. + log_main_results: Whether to log main results after solving. + + Examples: + ```python + # Set tighter convergence and longer timeout + CONFIG.Solving.mip_gap = 0.001 + CONFIG.Solving.time_limit_seconds = 600 + CONFIG.Solving.log_to_console = False + CONFIG.apply() + ``` + """ + + mip_gap: float = _DEFAULTS['solving']['mip_gap'] + time_limit_seconds: int = _DEFAULTS['solving']['time_limit_seconds'] + log_to_console: bool = _DEFAULTS['solving']['log_to_console'] + log_main_results: bool = _DEFAULTS['solving']['log_main_results'] + class Plotting: """Plotting configuration. @@ -249,6 +283,12 @@ def reset(cls): for key, value in _DEFAULTS['modeling'].items(): setattr(cls.Modeling, key, value) + for key, value in _DEFAULTS['solving'].items(): + setattr(cls.Solving, key, value) + + for key, value in _DEFAULTS['plotting'].items(): + setattr(cls.Plotting, key, value) + cls.config_name = _DEFAULTS['config_name'] cls.apply() @@ -332,6 +372,12 @@ def _apply_config_dict(cls, config_dict: dict): elif key == 'modeling' and isinstance(value, dict): for nested_key, nested_value in value.items(): setattr(cls.Modeling, nested_key, nested_value) + elif key == 'solving' and isinstance(value, dict): + for nested_key, nested_value in value.items(): + setattr(cls.Solving, nested_key, nested_value) + elif key == 'plotting' and isinstance(value, dict): + for nested_key, nested_value in value.items(): + setattr(cls.Plotting, nested_key, nested_value) elif hasattr(cls, key): setattr(cls, key, value) @@ -349,7 +395,6 @@ def to_dict(cls) -> dict: 'file': cls.Logging.file, 'rich': cls.Logging.rich, 'console': cls.Logging.console, - 'solver_to_console': cls.Logging.solver_to_console, 'max_file_size': cls.Logging.max_file_size, 'backup_count': cls.Logging.backup_count, 'date_format': cls.Logging.date_format, @@ -370,6 +415,12 @@ def to_dict(cls) -> dict: 'epsilon': cls.Modeling.epsilon, 'big_binary_bound': cls.Modeling.big_binary_bound, }, + 'solving': { + 'mip_gap': cls.Solving.mip_gap, + 'time_limit_seconds': cls.Solving.time_limit_seconds, + 'log_to_console': cls.Solving.log_to_console, + 'log_main_results': cls.Solving.log_main_results, + }, 'plotting': { 'default_show': cls.Plotting.default_show, 'default_engine': cls.Plotting.default_engine, diff --git a/flixopt/solvers.py b/flixopt/solvers.py index 7d083eef4..e5db61192 100644 --- a/flixopt/solvers.py +++ b/flixopt/solvers.py @@ -19,16 +19,16 @@ class _Solver: Abstract base class for solvers. Args: - mip_gap: Acceptable relative optimality gap in [0.0, 1.0]. - time_limit_seconds: Time limit in seconds. - log_to_console: If False, no output to console. + mip_gap: Acceptable relative optimality gap in [0.0, 1.0]. Defaults to CONFIG.Solving.mip_gap. + time_limit_seconds: Time limit in seconds. Defaults to CONFIG.Solving.time_limit_seconds. + log_to_console: If False, no output to console. Defaults to CONFIG.Solving.log_to_console. extra_options: Additional solver options merged into `options`. """ name: ClassVar[str] - mip_gap: float - time_limit_seconds: int - log_to_console: bool = field(default_factory=lambda: CONFIG.Logging.solver_to_console) + mip_gap: float = field(default_factory=lambda: CONFIG.Solving.mip_gap) + time_limit_seconds: int = field(default_factory=lambda: CONFIG.Solving.time_limit_seconds) + log_to_console: bool = field(default_factory=lambda: CONFIG.Solving.log_to_console) extra_options: dict[str, Any] = field(default_factory=dict) @property diff --git a/tests/test_config.py b/tests/test_config.py index ae3304188..a78330eb4 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -28,10 +28,13 @@ def test_config_defaults(self): assert CONFIG.Logging.file is None assert CONFIG.Logging.rich is False assert CONFIG.Logging.console is False - assert CONFIG.Logging.solver_to_console is True assert CONFIG.Modeling.big == 10_000_000 assert CONFIG.Modeling.epsilon == 1e-5 assert CONFIG.Modeling.big_binary_bound == 100_000 + assert CONFIG.Solving.mip_gap == 0.01 + assert CONFIG.Solving.time_limit_seconds == 300 + assert CONFIG.Solving.log_to_console is True + assert CONFIG.Solving.log_main_results is True assert CONFIG.config_name == 'flixopt' def test_module_initialization(self): @@ -105,9 +108,13 @@ def test_config_to_dict(self): assert config_dict['logging']['console'] is True assert config_dict['logging']['file'] is None assert config_dict['logging']['rich'] is False - assert config_dict['logging']['solver_to_console'] is True assert 'modeling' in config_dict assert config_dict['modeling']['big'] == 10_000_000 + assert 'solving' in config_dict + assert config_dict['solving']['mip_gap'] == 0.01 + assert config_dict['solving']['time_limit_seconds'] == 300 + assert config_dict['solving']['log_to_console'] is True + assert config_dict['solving']['log_main_results'] is True def test_config_load_from_file(self, tmp_path): """Test loading configuration from YAML file.""" @@ -121,6 +128,10 @@ def test_config_load_from_file(self, tmp_path): modeling: big: 20000000 epsilon: 1e-6 +solving: + mip_gap: 0.001 + time_limit_seconds: 600 + log_main_results: false """ config_file.write_text(config_content) @@ -132,6 +143,9 @@ def test_config_load_from_file(self, tmp_path): assert CONFIG.Modeling.big == 20000000 # YAML may load epsilon as string, so convert for comparison assert float(CONFIG.Modeling.epsilon) == 1e-6 + assert CONFIG.Solving.mip_gap == 0.001 + assert CONFIG.Solving.time_limit_seconds == 600 + assert CONFIG.Solving.log_main_results is False def test_config_load_from_file_not_found(self): """Test that loading from non-existent file raises error.""" @@ -266,6 +280,10 @@ def test_custom_config_yaml_complete(self, tmp_path): big: 50000000 epsilon: 1e-4 big_binary_bound: 200000 +solving: + mip_gap: 0.005 + time_limit_seconds: 900 + log_main_results: false """ config_file.write_text(config_content) @@ -280,6 +298,9 @@ def test_custom_config_yaml_complete(self, tmp_path): assert CONFIG.Modeling.big == 50000000 assert float(CONFIG.Modeling.epsilon) == 1e-4 assert CONFIG.Modeling.big_binary_bound == 200000 + assert CONFIG.Solving.mip_gap == 0.005 + assert CONFIG.Solving.time_limit_seconds == 900 + assert CONFIG.Solving.log_main_results is False # Verify logging was applied logger = logging.getLogger('flixopt') @@ -425,10 +446,13 @@ def test_config_reset(self): CONFIG.Logging.console = False CONFIG.Logging.rich = True CONFIG.Logging.file = '/tmp/test.log' - CONFIG.Logging.solver_to_console = False CONFIG.Modeling.big = 99999999 CONFIG.Modeling.epsilon = 1e-8 CONFIG.Modeling.big_binary_bound = 500000 + CONFIG.Solving.mip_gap = 0.0001 + CONFIG.Solving.time_limit_seconds = 1800 + CONFIG.Solving.log_to_console = False + CONFIG.Solving.log_main_results = False CONFIG.config_name = 'test_config' # Reset should restore all defaults @@ -439,10 +463,13 @@ def test_config_reset(self): assert CONFIG.Logging.console is False assert CONFIG.Logging.rich is False assert CONFIG.Logging.file is None - assert CONFIG.Logging.solver_to_console is True assert CONFIG.Modeling.big == 10_000_000 assert CONFIG.Modeling.epsilon == 1e-5 assert CONFIG.Modeling.big_binary_bound == 100_000 + assert CONFIG.Solving.mip_gap == 0.01 + assert CONFIG.Solving.time_limit_seconds == 300 + assert CONFIG.Solving.log_to_console is True + assert CONFIG.Solving.log_main_results is True assert CONFIG.config_name == 'flixopt' # Verify logging was also reset @@ -461,16 +488,20 @@ def test_reset_matches_class_defaults(self): CONFIG.Logging.file = '/tmp/test.log' CONFIG.Logging.rich = True CONFIG.Logging.console = True - CONFIG.Logging.solver_to_console = False CONFIG.Modeling.big = 999999 CONFIG.Modeling.epsilon = 1e-10 CONFIG.Modeling.big_binary_bound = 999999 + CONFIG.Solving.mip_gap = 0.0001 + CONFIG.Solving.time_limit_seconds = 9999 + CONFIG.Solving.log_to_console = False + CONFIG.Solving.log_main_results = False CONFIG.config_name = 'modified' # Verify values are actually different from defaults assert CONFIG.Logging.level != _DEFAULTS['logging']['level'] - assert CONFIG.Logging.solver_to_console != _DEFAULTS['logging']['solver_to_console'] assert CONFIG.Modeling.big != _DEFAULTS['modeling']['big'] + assert CONFIG.Solving.mip_gap != _DEFAULTS['solving']['mip_gap'] + assert CONFIG.Solving.log_to_console != _DEFAULTS['solving']['log_to_console'] # Now reset CONFIG.reset() @@ -480,8 +511,107 @@ def test_reset_matches_class_defaults(self): assert CONFIG.Logging.file == _DEFAULTS['logging']['file'] assert CONFIG.Logging.rich == _DEFAULTS['logging']['rich'] assert CONFIG.Logging.console == _DEFAULTS['logging']['console'] - assert CONFIG.Logging.solver_to_console == _DEFAULTS['logging']['solver_to_console'] assert CONFIG.Modeling.big == _DEFAULTS['modeling']['big'] assert CONFIG.Modeling.epsilon == _DEFAULTS['modeling']['epsilon'] assert CONFIG.Modeling.big_binary_bound == _DEFAULTS['modeling']['big_binary_bound'] + assert CONFIG.Solving.mip_gap == _DEFAULTS['solving']['mip_gap'] + assert CONFIG.Solving.time_limit_seconds == _DEFAULTS['solving']['time_limit_seconds'] + assert CONFIG.Solving.log_to_console == _DEFAULTS['solving']['log_to_console'] + assert CONFIG.Solving.log_main_results == _DEFAULTS['solving']['log_main_results'] assert CONFIG.config_name == _DEFAULTS['config_name'] + + def test_solving_config_defaults(self): + """Test that CONFIG.Solving has correct default values.""" + assert CONFIG.Solving.mip_gap == 0.01 + assert CONFIG.Solving.time_limit_seconds == 300 + assert CONFIG.Solving.log_to_console is True + assert CONFIG.Solving.log_main_results is True + + def test_solving_config_modification(self): + """Test that CONFIG.Solving attributes can be modified.""" + # Modify solving config + CONFIG.Solving.mip_gap = 0.005 + CONFIG.Solving.time_limit_seconds = 600 + CONFIG.Solving.log_main_results = False + CONFIG.apply() + + # Verify modifications + assert CONFIG.Solving.mip_gap == 0.005 + assert CONFIG.Solving.time_limit_seconds == 600 + assert CONFIG.Solving.log_main_results is False + + def test_solving_config_integration_with_solvers(self): + """Test that solvers use CONFIG.Solving defaults.""" + from flixopt import solvers + + # Test with default config + CONFIG.reset() + solver1 = solvers.HighsSolver() + assert solver1.mip_gap == CONFIG.Solving.mip_gap + assert solver1.time_limit_seconds == CONFIG.Solving.time_limit_seconds + + # Modify config and create new solver + CONFIG.Solving.mip_gap = 0.002 + CONFIG.Solving.time_limit_seconds = 900 + CONFIG.apply() + + solver2 = solvers.GurobiSolver() + assert solver2.mip_gap == 0.002 + assert solver2.time_limit_seconds == 900 + + # Explicit values should override config + solver3 = solvers.HighsSolver(mip_gap=0.1, time_limit_seconds=60) + assert solver3.mip_gap == 0.1 + assert solver3.time_limit_seconds == 60 + + def test_solving_config_yaml_loading(self, tmp_path): + """Test loading solving config from YAML file.""" + config_file = tmp_path / 'solving_config.yaml' + config_content = """ +solving: + mip_gap: 0.0001 + time_limit_seconds: 1200 + log_main_results: false +""" + config_file.write_text(config_content) + + CONFIG.load_from_file(config_file) + + assert CONFIG.Solving.mip_gap == 0.0001 + assert CONFIG.Solving.time_limit_seconds == 1200 + assert CONFIG.Solving.log_main_results is False + + def test_solving_config_in_to_dict(self): + """Test that CONFIG.Solving is included in to_dict().""" + CONFIG.Solving.mip_gap = 0.003 + CONFIG.Solving.time_limit_seconds = 450 + CONFIG.Solving.log_main_results = False + + config_dict = CONFIG.to_dict() + + assert 'solving' in config_dict + assert config_dict['solving']['mip_gap'] == 0.003 + assert config_dict['solving']['time_limit_seconds'] == 450 + assert config_dict['solving']['log_main_results'] is False + + def test_solving_config_persistence(self): + """Test that Solving config is independent of other configs.""" + # Set custom solving values + CONFIG.Solving.mip_gap = 0.007 + CONFIG.Solving.time_limit_seconds = 750 + + # Change and apply logging config + CONFIG.Logging.console = True + CONFIG.apply() + + # Solving values should be unchanged + assert CONFIG.Solving.mip_gap == 0.007 + assert CONFIG.Solving.time_limit_seconds == 750 + + # Change modeling config + CONFIG.Modeling.big = 99999999 + CONFIG.apply() + + # Solving values should still be unchanged + assert CONFIG.Solving.mip_gap == 0.007 + assert CONFIG.Solving.time_limit_seconds == 750 From 69ffb13d61338775d88b76e17e5d369d30a4a74c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 15:42:15 +0100 Subject: [PATCH 14/27] Update CHANGELOG.md --- CHANGELOG.md | 11 ++++++++- flixopt/config.py | 60 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d81c9a0b..d28ad16d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,12 +51,21 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp ## [Unreleased] - ????-??-?? -**Summary**: +**Summary**: Enhanced solver configuration with new CONFIG.Solving section for centralized solver parameter management. If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOpt/flixOpt/releases/tag/v3.0.0) and [Migration Guide](https://flixopt.github.io/flixopt/latest/user-guide/migration-guide-v3/). ### ✨ Added +**Solver configuration:** +- **New `CONFIG.Solving` configuration section** for centralized solver parameter management: + - `mip_gap`: Default MIP gap tolerance for solver convergence (default: 0.01) + - `time_limit_seconds`: Default time limit in seconds for solver runs (default: 300) + - `log_to_console`: Whether solver should output to console (default: True) + - `log_main_results`: Whether to log main results after solving (default: True) +- Solvers (`HighsSolver`, `GurobiSolver`) now use `CONFIG.Solving` defaults for parameters, allowing global configuration +- Solver parameters can still be explicitly overridden when creating solver instances + ### πŸ’₯ Breaking Changes ### ♻️ Changed diff --git a/flixopt/config.py b/flixopt/config.py index 1507621fd..a74740efb 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -431,6 +431,66 @@ def to_dict(cls) -> dict: }, } + @classmethod + def silent(cls) -> type[CONFIG]: + """Configure for silent operation. + + Disables console logging, solver output, and result logging + for clean production runs. Does not show plots. Automatically calls apply(). + """ + cls.Logging.console = False + cls.Plotting.default_show = False + cls.Logging.file = None + cls.Solving.log_to_console = False + cls.Solving.log_main_results = False + cls.apply() + return cls + + @classmethod + def debug(cls) -> type[CONFIG]: + """Configure for debug mode with verbose output. + + Enables console logging at DEBUG level and all solver output for + troubleshooting. Automatically calls apply(). + """ + cls.Logging.console = True + cls.Logging.level = 'DEBUG' + cls.Solving.log_to_console = True + cls.Solving.log_main_results = True + cls.apply() + return cls + + @classmethod + def exploring(cls) -> type[CONFIG]: + """Configure for exploring flixopt + + Enables console logging at INFO level and all solver output. + Also enables browser plotting for plotly with showing plots per default + """ + cls.Logging.console = True + cls.Logging.level = 'INFO' + cls.Solving.log_to_console = True + cls.Solving.log_main_results = True + cls.browser_plotting() + cls.apply() + return cls + + @classmethod + def browser_plotting(cls) -> type[CONFIG]: + """Configure for interactive usage with plotly to open plots in browser. + + Sets plotly.io.renderers.default = 'browser'. Useful for running examples + and viewing interactive plots. Does NOT modify CONFIG.Plotting settings. + """ + cls.Plotting.default_show = True + cls.apply() + + import plotly.io as pio + + pio.renderers.default = 'browser' + + return cls + class MultilineFormatter(logging.Formatter): """Formatter that handles multi-line messages with consistent prefixes. From 2fbbd3a072604c30583c44dc2c2310e861f19edb Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:14:59 +0100 Subject: [PATCH 15/27] Use new Config options in examples --- examples/00_Minmal/minimal_example.py | 3 +-- examples/01_Simple/simple_example.py | 7 +++---- examples/02_Complex/complex_example.py | 5 ++--- examples/02_Complex/complex_example_results.py | 5 ++--- examples/03_Calculation_types/example_calculation_types.py | 5 ++--- examples/04_Scenarios/scenario_example.py | 5 +++-- .../05_Two-stage-optimization/two_stage_optimization.py | 7 +++---- 7 files changed, 16 insertions(+), 21 deletions(-) diff --git a/examples/00_Minmal/minimal_example.py b/examples/00_Minmal/minimal_example.py index 6a0ed3831..92e6801b2 100644 --- a/examples/00_Minmal/minimal_example.py +++ b/examples/00_Minmal/minimal_example.py @@ -9,8 +9,7 @@ import flixopt as fx if __name__ == '__main__': - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.silent() flow_system = fx.FlowSystem(pd.date_range('2020-01-01', periods=3, freq='h')) flow_system.add_elements( diff --git a/examples/01_Simple/simple_example.py b/examples/01_Simple/simple_example.py index 6b62d6712..fd5a3d9b7 100644 --- a/examples/01_Simple/simple_example.py +++ b/examples/01_Simple/simple_example.py @@ -8,9 +8,8 @@ import flixopt as fx if __name__ == '__main__': - # Enable console logging - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.exploring() + # --- Create Time Series Data --- # Heat demand profile (e.g., kW) over time and corresponding power prices heat_demand_per_h = np.array([30, 0, 90, 110, 110, 20, 20, 20, 20]) @@ -101,7 +100,7 @@ flow_system.add_elements(costs, CO2, boiler, storage, chp, heat_sink, gas_source, power_sink) # Visualize the flow system for validation purposes - flow_system.plot_network(show=True) + flow_system.plot_network() # --- Define and Run Calculation --- # Create a calculation object to model the Flow System diff --git a/examples/02_Complex/complex_example.py b/examples/02_Complex/complex_example.py index 805cb08f6..b8ef76a03 100644 --- a/examples/02_Complex/complex_example.py +++ b/examples/02_Complex/complex_example.py @@ -9,9 +9,8 @@ import flixopt as fx if __name__ == '__main__': - # Enable console logging - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.exploring() + # --- Experiment Options --- # Configure options for testing various parameters and behaviors check_penalty = False diff --git a/examples/02_Complex/complex_example_results.py b/examples/02_Complex/complex_example_results.py index 96d06dd04..edc2f7a1d 100644 --- a/examples/02_Complex/complex_example_results.py +++ b/examples/02_Complex/complex_example_results.py @@ -5,9 +5,8 @@ import flixopt as fx if __name__ == '__main__': - # Enable console logging - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.exploring() + # --- Load Results --- try: results = fx.results.CalculationResults.from_file('results', 'complex example') diff --git a/examples/03_Calculation_types/example_calculation_types.py b/examples/03_Calculation_types/example_calculation_types.py index c5df50034..210747db9 100644 --- a/examples/03_Calculation_types/example_calculation_types.py +++ b/examples/03_Calculation_types/example_calculation_types.py @@ -11,9 +11,8 @@ import flixopt as fx if __name__ == '__main__': - # Enable console logging - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.exploring() + # Calculation Types full, segmented, aggregated = True, True, True diff --git a/examples/04_Scenarios/scenario_example.py b/examples/04_Scenarios/scenario_example.py index d258d4142..bf4f24617 100644 --- a/examples/04_Scenarios/scenario_example.py +++ b/examples/04_Scenarios/scenario_example.py @@ -8,6 +8,8 @@ import flixopt as fx if __name__ == '__main__': + fx.CONFIG.exploring() + # Create datetime array starting from '2020-01-01' for one week timesteps = pd.date_range('2020-01-01', periods=24 * 7, freq='h') scenarios = pd.Index(['Base Case', 'High Demand']) @@ -186,7 +188,7 @@ flow_system.add_elements(costs, CO2, boiler, storage, chp, heat_sink, gas_source, power_sink) # Visualize the flow system for validation purposes - flow_system.plot_network(show=True) + flow_system.plot_network() # --- Define and Run Calculation --- # Create a calculation object to model the Flow System @@ -215,7 +217,6 @@ # Convert the results for the storage component to a dataframe and display df = calculation.results['Storage'].node_balance_with_charge_state() - print(df) # Save results to file for later usage calculation.results.to_file() diff --git a/examples/05_Two-stage-optimization/two_stage_optimization.py b/examples/05_Two-stage-optimization/two_stage_optimization.py index dde3ae069..b2be58cbe 100644 --- a/examples/05_Two-stage-optimization/two_stage_optimization.py +++ b/examples/05_Two-stage-optimization/two_stage_optimization.py @@ -7,7 +7,6 @@ While the final optimum might differ from the global optimum, the solving will be much faster. """ -import logging import pathlib import timeit @@ -16,9 +15,9 @@ import flixopt as fx -logger = logging.getLogger('flixopt') - if __name__ == '__main__': + fx.CONFIG.exploring() + # Data Import data_import = pd.read_csv( pathlib.Path(__file__).parent.parent / 'resources' / 'Zeitreihen2020.csv', index_col=0 @@ -136,7 +135,7 @@ timer_dispatch = timeit.default_timer() - start if (calculation_dispatch.results.sizes().round(5) == calculation_sizing.results.sizes().round(5)).all().item(): - logger.info('Sizes were correctly equalized') + print('Sizes were correctly equalized') else: raise RuntimeError('Sizes were not correctly equalized') From 209cdfd9155a69b274d0a4c638a706de67a7065e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:20:37 +0100 Subject: [PATCH 16/27] Sett plotting backend in CI directly, overwriting all configs --- .github/workflows/python-app.yaml | 2 ++ tests/conftest.py | 23 ----------------------- 2 files changed, 2 insertions(+), 23 deletions(-) diff --git a/.github/workflows/python-app.yaml b/.github/workflows/python-app.yaml index f4dbc28c5..66ceceab4 100644 --- a/.github/workflows/python-app.yaml +++ b/.github/workflows/python-app.yaml @@ -24,6 +24,8 @@ concurrency: env: PYTHON_VERSION: "3.11" + MPLBACKEND: Agg # Non-interactive matplotlib backend for CI/testing + PLOTLY_RENDERER: json # Headless plotly renderer for CI/testing jobs: lint: diff --git a/tests/conftest.py b/tests/conftest.py index bd940b843..50c58e1ab 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -828,26 +828,3 @@ def cleanup_figures(): import matplotlib.pyplot as plt plt.close('all') - - -@pytest.fixture(scope='session', autouse=True) -def set_test_environment(): - """ - Configure plotting for test environment. - - This fixture runs once per test session to: - - Set matplotlib to use non-interactive 'Agg' backend - - Set plotly to use non-interactive 'json' renderer - - Prevent GUI windows from opening during tests - """ - import matplotlib - - matplotlib.use('Agg') # Use non-interactive backend - - import plotly.io as pio - - pio.renderers.default = 'json' # Use non-interactive renderer - - fx.CONFIG.Plotting.default_show = False - - yield From f529d9b68e704d39994a00b21ab407f8d14c6ec9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:47:24 +0100 Subject: [PATCH 17/27] Fixed tqdm progress bar to respect CONFIG.silent() --- flixopt/calculation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 1728725b8..37ea7a3db 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -589,6 +589,7 @@ def do_modeling_and_solve( desc='Solving segments', unit='segment', file=sys.stdout, # Force tqdm to write to stdout instead of stderr + disable=not CONFIG.Solving.log_to_console, # Respect silent configuration ) for i, calculation in progress_bar: From 3ea3881557d999acb397832633e8e92dc8f31175 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:47:38 +0100 Subject: [PATCH 18/27] Replaced print() with framework logger (examples/05_Two-stage-optimization/two_stage_optimization.py --- examples/05_Two-stage-optimization/two_stage_optimization.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/05_Two-stage-optimization/two_stage_optimization.py b/examples/05_Two-stage-optimization/two_stage_optimization.py index b2be58cbe..9647e803c 100644 --- a/examples/05_Two-stage-optimization/two_stage_optimization.py +++ b/examples/05_Two-stage-optimization/two_stage_optimization.py @@ -7,6 +7,7 @@ While the final optimum might differ from the global optimum, the solving will be much faster. """ +import logging import pathlib import timeit @@ -15,6 +16,8 @@ import flixopt as fx +logger = logging.getLogger('flixopt') + if __name__ == '__main__': fx.CONFIG.exploring() @@ -135,7 +138,7 @@ timer_dispatch = timeit.default_timer() - start if (calculation_dispatch.results.sizes().round(5) == calculation_sizing.results.sizes().round(5)).all().item(): - print('Sizes were correctly equalized') + logger.info('Sizes were correctly equalized') else: raise RuntimeError('Sizes were not correctly equalized') From 284e3a525b0fb0ee1352736faa760fdbc5b1142f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:47:58 +0100 Subject: [PATCH 19/27] Added comprehensive tests for suppress_output() --- tests/test_io.py | 107 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/tests/test_io.py b/tests/test_io.py index dbbc4cc72..83ac4251b 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -80,5 +80,112 @@ def test_flow_system_io(flow_system): flow_system.__str__() +def test_suppress_output_file_descriptors(tmp_path): + """Test that suppress_output() redirects file descriptors to /dev/null.""" + import os + import sys + + from flixopt.io import suppress_output + + # Create temporary files to capture output + test_file = tmp_path / 'test_output.txt' + + # Test that FD 1 (stdout) is redirected during suppression + with open(test_file, 'w') as f: + original_stdout_fd = os.dup(1) # Save original stdout FD + try: + # Redirect FD 1 to our test file + os.dup2(f.fileno(), 1) + os.write(1, b'before suppression\n') + + with suppress_output(): + # Inside suppress_output, writes should go to /dev/null, not our file + os.write(1, b'during suppression\n') + + # After suppress_output, writes should go to our file again + os.write(1, b'after suppression\n') + finally: + # Restore original stdout + os.dup2(original_stdout_fd, 1) + os.close(original_stdout_fd) + + # Read the file and verify content + content = test_file.read_text() + assert 'before suppression' in content + assert 'during suppression' not in content # This should NOT be in the file + assert 'after suppression' in content + + +def test_suppress_output_python_level(): + """Test that Python-level stdout/stderr continue to work after suppress_output().""" + import io + import sys + + from flixopt.io import suppress_output + + # Create a StringIO to capture Python-level output + captured_output = io.StringIO() + + # After suppress_output exits, Python streams should be functional + with suppress_output(): + pass # Just enter and exit the context + + # Redirect sys.stdout to our StringIO + old_stdout = sys.stdout + try: + sys.stdout = captured_output + print('test message') + finally: + sys.stdout = old_stdout + + # Verify Python-level stdout works + assert 'test message' in captured_output.getvalue() + + +def test_suppress_output_exception_handling(): + """Test that suppress_output() properly restores streams even on exception.""" + import sys + + from flixopt.io import suppress_output + + # Save original file descriptors + original_stdout_fd = sys.stdout.fileno() + original_stderr_fd = sys.stderr.fileno() + + try: + with suppress_output(): + raise ValueError('Test exception') + except ValueError: + pass + + # Verify streams are restored after exception + assert sys.stdout.fileno() == original_stdout_fd + assert sys.stderr.fileno() == original_stderr_fd + + # Verify we can still write to stdout/stderr + sys.stdout.write('test after exception\n') + sys.stdout.flush() + + +def test_suppress_output_c_level(): + """Test that suppress_output() suppresses C-level output (file descriptor level).""" + import os + import sys + + from flixopt.io import suppress_output + + # This test verifies that even low-level C writes are suppressed + # by writing directly to file descriptor 1 (stdout) + with suppress_output(): + # Try to write directly to FD 1 (stdout) - should be suppressed + os.write(1, b'C-level stdout write\n') + # Try to write directly to FD 2 (stderr) - should be suppressed + os.write(2, b'C-level stderr write\n') + + # After exiting context, ensure streams work + sys.stdout.write('After C-level test\n') + sys.stdout.flush() + + if __name__ == '__main__': pytest.main(['-v', '--disable-warnings']) From 8f613bc58386b290bfdf44e4fa61ca705e2cf2fb Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 31 Oct 2025 16:25:28 +0100 Subject: [PATCH 20/27] Remove unused import --- flixopt/calculation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 37ea7a3db..ff6780bb2 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -10,7 +10,6 @@ from __future__ import annotations -import copy import logging import math import pathlib From 2bd25bc7cb4d6c43f2df80b6e81b22e51f16f763 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 31 Oct 2025 16:26:22 +0100 Subject: [PATCH 21/27] Ensure progress bar cleanup on exceptions. --- flixopt/calculation.py | 63 +++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index ff6780bb2..b643dbebf 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -591,39 +591,40 @@ def do_modeling_and_solve( disable=not CONFIG.Solving.log_to_console, # Respect silent configuration ) - for i, calculation in progress_bar: - # Update progress bar description with current segment info - progress_bar.set_description( - f'Solving ({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]})' - ) - - if i > 0 and self.nr_of_previous_values > 0: - self._transfer_start_values(i) - - calculation.do_modeling() - - # Warn about Investments, but only in fist run - if i == 0: - invest_elements = [ - model.label_full - for component in calculation.flow_system.components.values() - for model in component.submodel.all_submodels - if isinstance(model, InvestmentModel) - ] - if invest_elements: - logger.critical( - f'Investments are not supported in Segmented Calculation! ' - f'Following InvestmentModels were found: {invest_elements}' - ) - - with fx_io.suppress_output(): - calculation.solve( - solver, - log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', - log_main_results=log_main_results, + try: + for i, calculation in progress_bar: + # Update progress bar description with current segment info + progress_bar.set_description( + f'Solving ({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]})' ) - progress_bar.close() + if i > 0 and self.nr_of_previous_values > 0: + self._transfer_start_values(i) + + calculation.do_modeling() + + # Warn about Investments, but only in fist run + if i == 0: + invest_elements = [ + model.label_full + for component in calculation.flow_system.components.values() + for model in component.submodel.all_submodels + if isinstance(model, InvestmentModel) + ] + if invest_elements: + logger.critical( + f'Investments are not supported in Segmented Calculation! ' + f'Following InvestmentModels were found: {invest_elements}' + ) + + with fx_io.suppress_output(): + calculation.solve( + solver, + log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', + log_main_results=log_main_results, + ) + finally: + progress_bar.close() for calc in self.sub_calculations: for key, value in calc.durations.items(): From 6d6f15efb884d3056f80cddbcea9860ea43e3dd0 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 31 Oct 2025 16:28:24 +0100 Subject: [PATCH 22/27] Add test --- tests/test_io.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tests/test_io.py b/tests/test_io.py index 83ac4251b..6d225734e 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -187,5 +187,41 @@ def test_suppress_output_c_level(): sys.stdout.flush() +def test_tqdm_cleanup_on_exception(): + """Test that tqdm progress bar is properly cleaned up even when exceptions occur. + + This test verifies the pattern used in SegmentedCalculation where a try/finally + block ensures progress_bar.close() is called even if an exception occurs. + """ + from tqdm import tqdm + + # Create a progress bar (disabled to avoid output during tests) + items = enumerate(range(5)) + progress_bar = tqdm(items, total=5, desc='Test progress', disable=True) + + # Track whether cleanup was called + cleanup_called = False + exception_raised = False + + try: + try: + for idx, _ in progress_bar: + if idx == 2: + raise ValueError('Test exception') + finally: + # This should always execute, even with exception + progress_bar.close() + cleanup_called = True + except ValueError: + exception_raised = True + + # Verify both that the exception was raised AND cleanup happened + assert exception_raised, 'Test exception should have been raised' + assert cleanup_called, 'Cleanup should have been called even with exception' + + # Verify that close() is idempotent - calling it again should not raise + progress_bar.close() # Should not raise even if already closed + + if __name__ == '__main__': pytest.main(['-v', '--disable-warnings']) From 3ad25a0c80977ce7f19771ce73913f9f2ca68283 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 31 Oct 2025 16:49:56 +0100 Subject: [PATCH 23/27] Split method in SegmentedCalculation for better distinction if show or not show solver output --- CHANGELOG.md | 1 + flixopt/calculation.py | 126 +++++++++++++++++++++++++++-------------- 2 files changed, 86 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d28ad16d1..2e4912d48 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp - Solver parameters can still be explicitly overridden when creating solver instances ### πŸ’₯ Breaking Changes +- Individual solver output is now hidden in **SegmentedCalculation**. To return t the prior behaviour, set `show_individual_solves=True` in `do_modeling_and_solve()`. ### ♻️ Changed diff --git a/flixopt/calculation.py b/flixopt/calculation.py index b643dbebf..c0b2a54c6 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -571,60 +571,104 @@ def _create_sub_calculations(self): f'({timesteps_of_segment[0]} -> {timesteps_of_segment[-1]}):' ) + def _solve_single_segment( + self, + i: int, + calculation: FullCalculation, + solver: _Solver, + log_file: pathlib.Path | None, + log_main_results: bool, + suppress_output: bool, + ) -> None: + """Solve a single segment calculation.""" + if i > 0 and self.nr_of_previous_values > 0: + self._transfer_start_values(i) + + calculation.do_modeling() + + # Warn about Investments, but only in first run + if i == 0: + invest_elements = [ + model.label_full + for component in calculation.flow_system.components.values() + for model in component.submodel.all_submodels + if isinstance(model, InvestmentModel) + ] + if invest_elements: + logger.critical( + f'Investments are not supported in Segmented Calculation! ' + f'Following InvestmentModels were found: {invest_elements}' + ) + + log_path = pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log' + + if suppress_output: + with fx_io.suppress_output(): + calculation.solve(solver, log_file=log_path, log_main_results=log_main_results) + else: + calculation.solve(solver, log_file=log_path, log_main_results=log_main_results) + def do_modeling_and_solve( self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = False, + show_individual_solves: bool = False, ) -> SegmentedCalculation: + """Model and solve all segments of the segmented calculation. + + This method creates sub-calculations for each time segment, then iteratively + models and solves each segment. It supports two output modes: a progress bar + for compact output, or detailed individual solve information. + + Args: + solver: The solver instance to use for optimization (e.g., Gurobi, HiGHS). + log_file: Optional path to the solver log file. If None, defaults to + folder/name.log. + log_main_results: Whether to log main results (objective, effects, etc.) + after each segment solve. Defaults to False. + show_individual_solves: If True, shows detailed output for each segment + solve with logger messages. If False (default), shows a compact progress + bar with suppressed solver output for cleaner display. + + Returns: + Self, for method chaining. + + Note: + The method automatically transfers all start values between segments to ensure + continuity of storage states and flow rates across segment boundaries. + """ logger.info(f'{"":#^80}') logger.info(f'{" Segmented Solving ":#^80}') self._create_sub_calculations() - # Create tqdm progress bar with custom format that prints to stdout - progress_bar = tqdm( - enumerate(self.sub_calculations), - total=len(self.sub_calculations), - desc='Solving segments', - unit='segment', - file=sys.stdout, # Force tqdm to write to stdout instead of stderr - disable=not CONFIG.Solving.log_to_console, # Respect silent configuration - ) - - try: - for i, calculation in progress_bar: - # Update progress bar description with current segment info - progress_bar.set_description( - f'Solving ({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]})' + if show_individual_solves: + # Path 1: Show individual solves with detailed output + for i, calculation in enumerate(self.sub_calculations): + logger.info( + f'Solving segment {i + 1}/{len(self.sub_calculations)}: ' + f'{calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]}' ) + self._solve_single_segment(i, calculation, solver, log_file, log_main_results, suppress_output=False) + else: + # Path 2: Show only progress bar with suppressed output + progress_bar = tqdm( + enumerate(self.sub_calculations), + total=len(self.sub_calculations), + desc='Solving segments', + unit='segment', + file=sys.stdout, + disable=not CONFIG.Solving.log_to_console, + ) - if i > 0 and self.nr_of_previous_values > 0: - self._transfer_start_values(i) - - calculation.do_modeling() - - # Warn about Investments, but only in fist run - if i == 0: - invest_elements = [ - model.label_full - for component in calculation.flow_system.components.values() - for model in component.submodel.all_submodels - if isinstance(model, InvestmentModel) - ] - if invest_elements: - logger.critical( - f'Investments are not supported in Segmented Calculation! ' - f'Following InvestmentModels were found: {invest_elements}' - ) - - with fx_io.suppress_output(): - calculation.solve( - solver, - log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', - log_main_results=log_main_results, + try: + for i, calculation in progress_bar: + progress_bar.set_description( + f'Solving ({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]})' ) - finally: - progress_bar.close() + self._solve_single_segment(i, calculation, solver, log_file, log_main_results, suppress_output=True) + finally: + progress_bar.close() for calc in self.sub_calculations: for key, value in calc.durations.items(): From 691d95c3f8105ce08fd33d9c68f4705975465d40 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 31 Oct 2025 17:17:23 +0100 Subject: [PATCH 24/27] USe config show in exmaples --- examples/02_Complex/complex_example_results.py | 2 +- examples/03_Calculation_types/example_calculation_types.py | 2 +- flixopt/calculation.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/02_Complex/complex_example_results.py b/examples/02_Complex/complex_example_results.py index edc2f7a1d..96191c4d8 100644 --- a/examples/02_Complex/complex_example_results.py +++ b/examples/02_Complex/complex_example_results.py @@ -18,7 +18,7 @@ ) from e # --- Basic overview --- - results.plot_network(show=True) + results.plot_network() results['FernwΓ€rme'].plot_node_balance() # --- Detailed Plots --- diff --git a/examples/03_Calculation_types/example_calculation_types.py b/examples/03_Calculation_types/example_calculation_types.py index 210747db9..e339c1c24 100644 --- a/examples/03_Calculation_types/example_calculation_types.py +++ b/examples/03_Calculation_types/example_calculation_types.py @@ -164,7 +164,7 @@ a_kwk, a_speicher, ) - flow_system.plot_network(controls=False, show=True) + flow_system.plot_network() # Calculations calculations: list[fx.FullCalculation | fx.AggregatedCalculation | fx.SegmentedCalculation] = [] diff --git a/flixopt/calculation.py b/flixopt/calculation.py index c0b2a54c6..875b3967b 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -370,7 +370,7 @@ def _perform_aggregation(self): ) self.aggregation.cluster() - self.aggregation.plot(show=True, save=self.folder / 'aggregation.html') + self.aggregation.plot(show=CONFIG.Plotting.default_show, save=self.folder / 'aggregation.html') if self.aggregation_parameters.aggregate_data_and_fix_non_binary_vars: ds = self.flow_system.to_dataset() for name, series in self.aggregation.aggregated_data.items(): From 8a504ef77ed16dfa121683b4b27f78a9e421870c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 31 Oct 2025 17:39:32 +0100 Subject: [PATCH 25/27] USe config show in results.plot_network() --- flixopt/results.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixopt/results.py b/flixopt/results.py index 26eaf9d5d..c02e5b769 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -1029,14 +1029,14 @@ def plot_network( ] ) = True, path: pathlib.Path | None = None, - show: bool = False, + show: bool | None = None, ) -> pyvis.network.Network | None: """Plot interactive network visualization of the system. Args: controls: Enable/disable interactive controls. path: Save path for network HTML. - show: Whether to display the plot. + show: Whether to display the plot. If None, uses CONFIG.Plotting.default_show. """ if path is None: path = self.folder / f'{self.name}--network.html' From 59b125a925871b5bc23a463b2d8d0d686be9cff1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 31 Oct 2025 17:39:40 +0100 Subject: [PATCH 26/27] Improve readabailty of code --- flixopt/calculation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 875b3967b..feb077dcf 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -252,7 +252,8 @@ def solve( ) # Log the formatted output - if log_main_results if log_main_results is not None else CONFIG.Solving.log_main_results: + should_log = log_main_results if log_main_results is not None else CONFIG.Solving.log_main_results + if should_log: logger.info( f'{" Main Results ":#^80}\n' + yaml.dump( From f3f54c94f30bea00e724f459b044791b8d4f4530 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 1 Nov 2025 13:48:27 +0100 Subject: [PATCH 27/27] Typo --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e4912d48..befccf890 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,7 +67,7 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp - Solver parameters can still be explicitly overridden when creating solver instances ### πŸ’₯ Breaking Changes -- Individual solver output is now hidden in **SegmentedCalculation**. To return t the prior behaviour, set `show_individual_solves=True` in `do_modeling_and_solve()`. +- Individual solver output is now hidden in **SegmentedCalculation**. To return to the prior behaviour, set `show_individual_solves=True` in `do_modeling_and_solve()`. ### ♻️ Changed