From b2b4b3699b1635d8b065739bda0a4b146881fb71 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 27 Jan 2026 13:49:31 +0100 Subject: [PATCH 1/5] Summary of Fixes 1. Fixed _interpolate_charge_state_segmented Bug (Critical) File: flixopt/transform_accessor.py:1965-2043 The function was incorrectly decoding timestep_mapping using timesteps_per_cluster. For segmented systems, timestep_mapping encodes cluster * n_segments + segment_idx, so this produced wrong cluster indices. Fix: Compute original period index and position directly from timestep indices instead of decoding from timestep_mapping. 2. Implemented EXPAND_FIRST_TIMESTEP for Segmented Systems Only File: flixopt/transform_accessor.py:2045-2100 Added _expand_first_timestep_only() method that places startup/shutdown events at the first timestep of each segment (not cluster). Key design decisions: - Segmented systems: Events placed at first timestep of each segment (timing within segment is lost) - Non-segmented systems: Normal expansion preserves timing within cluster (no special handling needed) 3. Added Tests - test_startup_shutdown_first_timestep_only: Verifies segmented systems place events at segment boundaries - test_startup_timing_preserved_non_segmented: Verifies non-segmented systems preserve timing within clusters 4. Updated Docstrings Clarified in expand() docstring that: - Binary events in segmented systems go to first timestep of each segment - Non-segmented systems preserve timing via normal expansion --- flixopt/transform_accessor.py | 129 +++++++++++++++++++++++--- tests/test_cluster_reduce_expand.py | 138 ++++++++++++++++++++++++++++ 2 files changed, 252 insertions(+), 15 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 8e84777a0..95c221c45 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -17,7 +17,7 @@ import xarray as xr from .modeling import _scalar_safe_reduce -from .structure import EXPAND_DIVIDE, EXPAND_INTERPOLATE, VariableCategory +from .structure import EXPAND_DIVIDE, EXPAND_FIRST_TIMESTEP, EXPAND_INTERPOLATE, VariableCategory if TYPE_CHECKING: from tsam import ClusterConfig, ExtremeConfig, SegmentConfig @@ -1986,22 +1986,37 @@ def _interpolate_charge_state_segmented( Interpolated charge_state with dims (time, ...) for original timesteps. """ # Get multi-dimensional properties from Clustering - timestep_mapping = clustering.timestep_mapping segment_assignments = clustering.results.segment_assignments segment_durations = clustering.results.segment_durations position_within_segment = clustering.results.position_within_segment + cluster_assignments = clustering.cluster_assignments - # Decode timestep_mapping into cluster and time indices - # timestep_mapping encodes original timestep -> (cluster, position_within_cluster) - # where position_within_cluster indexes into segment_assignments/position_within_segment - # which have shape (cluster, timesteps_per_cluster) + # Compute original period index and position within period directly + # This is more reliable than decoding from timestep_mapping, which encodes + # (cluster * n_segments + segment_idx) for segmented systems + n_original_timesteps = len(original_timesteps) timesteps_per_cluster = clustering.timesteps_per_cluster - cluster_indices = timestep_mapping // timesteps_per_cluster - time_indices = timestep_mapping % timesteps_per_cluster + n_original_clusters = clustering.n_original_clusters + + # For each original timestep, compute which original period it belongs to + original_period_indices = np.minimum( + np.arange(n_original_timesteps) // timesteps_per_cluster, + n_original_clusters - 1, + ) + # Position within the period (0 to timesteps_per_cluster-1) + positions_in_period = np.arange(n_original_timesteps) % timesteps_per_cluster + + # Create DataArrays for indexing (with original_time dimension, coords added later) + original_period_da = xr.DataArray(original_period_indices, dims=['original_time']) + position_in_period_da = xr.DataArray(positions_in_period, dims=['original_time']) + + # Map original period to cluster + cluster_indices = cluster_assignments.isel(original_cluster=original_period_da) # Get segment index and position for each original timestep - seg_indices = segment_assignments.isel(cluster=cluster_indices, time=time_indices) - positions = position_within_segment.isel(cluster=cluster_indices, time=time_indices) + # segment_assignments has shape (cluster, time) where time = timesteps_per_cluster + seg_indices = segment_assignments.isel(cluster=cluster_indices, time=position_in_period_da) + positions = position_within_segment.isel(cluster=cluster_indices, time=position_in_period_da) durations = segment_durations.isel(cluster=cluster_indices, segment=seg_indices) # Calculate interpolation factor: position within segment (0 to 1) @@ -2023,6 +2038,66 @@ def _interpolate_charge_state_segmented( return interpolated.transpose('time', ...).assign_attrs(da.attrs) + def _expand_first_timestep_only( + self, + da: xr.DataArray, + clustering: Clustering, + original_timesteps: pd.DatetimeIndex, + ) -> xr.DataArray: + """Expand binary event variables (startup/shutdown) to first timestep of each segment. + + For segmented systems, binary event variables like startup and shutdown indicate + that an event occurred somewhere in the segment. When expanding, we place the + event at the first timestep of each segment and set all other timesteps to 0. + + This method is only used for segmented systems. For non-segmented systems, + the timing within the cluster is preserved by normal expansion. + + Args: + da: Binary event DataArray with dims including (cluster, time). + clustering: Clustering object with segment info (must be segmented). + original_timesteps: Original timesteps to expand to. + + Returns: + Expanded DataArray with event values only at first timestep of each segment. + """ + # First expand normally (repeats values) + expanded = clustering.expand_data(da, original_time=original_timesteps) + + # Build mask: True only at first timestep of each segment + n_original_timesteps = len(original_timesteps) + timesteps_per_cluster = clustering.timesteps_per_cluster + n_original_clusters = clustering.n_original_clusters + + position_within_segment = clustering.results.position_within_segment + cluster_assignments = clustering.cluster_assignments + + # Compute original period index and position within period + original_period_indices = np.minimum( + np.arange(n_original_timesteps) // timesteps_per_cluster, + n_original_clusters - 1, + ) + positions_in_period = np.arange(n_original_timesteps) % timesteps_per_cluster + + # Create DataArrays for indexing (coords added later after rename) + original_period_da = xr.DataArray(original_period_indices, dims=['original_time']) + position_in_period_da = xr.DataArray(positions_in_period, dims=['original_time']) + + # Map to cluster and get position within segment + cluster_indices = cluster_assignments.isel(original_cluster=original_period_da) + pos_in_segment = position_within_segment.isel(cluster=cluster_indices, time=position_in_period_da) + + # Clean up and create mask + pos_in_segment = pos_in_segment.drop_vars(['cluster', 'time'], errors='ignore') + pos_in_segment = pos_in_segment.rename({'original_time': 'time'}).assign_coords(time=original_timesteps) + + # First timestep of segment has position 0 + is_first = pos_in_segment == 0 + + # Apply mask: keep value at first timestep, zero elsewhere + result = xr.where(is_first, expanded, 0) + return result.assign_attrs(da.attrs) + def expand(self) -> FlowSystem: """Expand a clustered FlowSystem back to full original timesteps. @@ -2113,12 +2188,23 @@ def expand(self) -> FlowSystem: 4. **Binary status variables** - Constant within segment: - These variables cannot be meaningfully interpolated. They indicate - the dominant state or whether an event occurred during the segment. + These variables cannot be meaningfully interpolated. The status + indicates the dominant state during the segment. + + - ``{flow}|status``: On/off status (0 or 1), repeated for all timesteps + + 5. **Binary event variables** (segmented systems only) - First timestep of segment: + + For segmented systems, these variables indicate that an event occurred + somewhere during the segment. When expanded, the event is placed at the + first timestep of each segment, with zeros elsewhere. This preserves the + total count of events while providing a reasonable temporal placement. - - ``{flow}|status``: On/off status (0 or 1) - - ``{flow}|startup``: Startup event occurred in segment - - ``{flow}|shutdown``: Shutdown event occurred in segment + For non-segmented systems, the timing within the cluster is preserved + by normal expansion (no special handling needed). + + - ``{flow}|startup``: Startup event + - ``{flow}|shutdown``: Shutdown event """ from .flow_system import FlowSystem @@ -2162,6 +2248,13 @@ def _is_state_variable(var_name: str) -> bool: # Fall back to pattern matching for backwards compatibility return var_name.endswith('|charge_state') + def _is_first_timestep_variable(var_name: str) -> bool: + """Check if a variable is a binary event (should only appear at first timestep).""" + if var_name in variable_categories: + return variable_categories[var_name] in EXPAND_FIRST_TIMESTEP + # Fall back to pattern matching for backwards compatibility + return var_name.endswith('|startup') or var_name.endswith('|shutdown') + def _append_final_state(expanded: xr.DataArray, da: xr.DataArray) -> xr.DataArray: """Append final state value from original data to expanded data.""" cluster_assignments = clustering.cluster_assignments @@ -2181,12 +2274,18 @@ def expand_da(da: xr.DataArray, var_name: str = '', is_solution: bool = False) - return da.copy() is_state = _is_state_variable(var_name) and 'cluster' in da.dims + is_first_timestep = _is_first_timestep_variable(var_name) and 'cluster' in da.dims # State variables in segmented systems: interpolate within segments if is_state and clustering.is_segmented: expanded = self._interpolate_charge_state_segmented(da, clustering, original_timesteps) return _append_final_state(expanded, da) + # Binary events (startup/shutdown) in segmented systems: first timestep of each segment + # For non-segmented systems, timing within cluster is preserved, so normal expansion is correct + if is_first_timestep and is_solution and clustering.is_segmented: + return self._expand_first_timestep_only(da, clustering, original_timesteps) + expanded = clustering.expand_data(da, original_time=original_timesteps) # Segment totals: divide by expansion divisor diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index fea6917dc..e85d98cb5 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -1440,6 +1440,144 @@ def test_segmented_expand_after_load(self, solver_fixture, timesteps_8_days, tmp ) +class TestStartupShutdownExpansion: + """Tests for correct expansion of startup/shutdown binary events.""" + + def test_startup_shutdown_first_timestep_only(self, solver_fixture, timesteps_8_days): + """Test that startup/shutdown events are placed at first timestep of each segment only.""" + from tsam import SegmentConfig + + # Create system with on/off behavior + fs = fx.FlowSystem(timesteps=timesteps_8_days) + fs.add_elements(fx.Effect('Cost', unit='EUR', is_objective=True)) + fs.add_elements(fx.Bus('Heat')) + + # Source with minimum active time (forces on/off tracking) + fs.add_elements( + fx.Source( + 'Boiler', + outputs=[ + fx.Flow( + 'Q', + bus='Heat', + size=100, + status_parameters=fx.StatusParameters(effects_per_startup={'Cost': 10}), + effects_per_flow_hour={'Cost': 50}, + ) + ], + ) + ) + + # Variable demand that forces startups + demand_pattern = np.array([0.8] * 12 + [0.0] * 12) # On/off pattern per day (0-1 range) + demand_profile = np.tile(demand_pattern, 8) + fs.add_elements( + fx.Sink('Demand', inputs=[fx.Flow('Q', bus='Heat', size=50, fixed_relative_profile=demand_profile)]) + ) + + # Cluster with segments + fs_clustered = fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + segments=SegmentConfig(n_segments=6), + ) + + fs_clustered.optimize(solver_fixture) + + # Check if startup variable exists + startup_var = 'Boiler(Q)|startup' + if startup_var not in fs_clustered.solution: + pytest.skip('Startup variable not in solution (solver may not have triggered any startups)') + + # Expand and check startup placement + fs_expanded = fs_clustered.transform.expand() + + startup_expanded = fs_expanded.solution[startup_var] + + # In expanded form, startup should be sparse: mostly zeros with 1s only at segment boundaries + # The total count should match the clustered solution (after weighting) + startup_clustered = fs_clustered.solution[startup_var] + + # Get cluster weights for proper comparison + cluster_weight = fs_clustered.to_dataset()['cluster_weight'] + + # For expanded: just sum all startups + total_expanded = float(startup_expanded.sum()) + + # For clustered: sum with weights + total_clustered = float((startup_clustered * cluster_weight).sum()) + + # They should match (startup events are preserved, just relocated to first timestep) + assert_allclose(total_expanded, total_clustered, rtol=1e-5) + + # Verify sparsity: most timesteps should be 0 + n_timesteps = startup_expanded.sizes['time'] + n_nonzero = int((startup_expanded > 0.5).sum()) # Binary, so 0.5 threshold + assert n_nonzero < n_timesteps * 0.2, f'Expected sparse startups, but got {n_nonzero}/{n_timesteps} non-zero' + + def test_startup_timing_preserved_non_segmented(self, solver_fixture, timesteps_8_days): + """Test that startup timing within cluster is preserved for non-segmented systems.""" + # Create system with on/off behavior + fs = fx.FlowSystem(timesteps=timesteps_8_days) + fs.add_elements(fx.Effect('Cost', unit='EUR', is_objective=True)) + fs.add_elements(fx.Bus('Heat')) + + fs.add_elements( + fx.Source( + 'Boiler', + outputs=[ + fx.Flow( + 'Q', + bus='Heat', + size=100, + status_parameters=fx.StatusParameters(effects_per_startup={'Cost': 10}), + effects_per_flow_hour={'Cost': 50}, + ) + ], + ) + ) + + demand_pattern = np.array([0.8] * 12 + [0.0] * 12) # On/off pattern per day (0-1 range) + demand_profile = np.tile(demand_pattern, 8) + fs.add_elements( + fx.Sink('Demand', inputs=[fx.Flow('Q', bus='Heat', size=50, fixed_relative_profile=demand_profile)]) + ) + + # Cluster WITHOUT segments + fs_clustered = fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + ) + + fs_clustered.optimize(solver_fixture) + + startup_var = 'Boiler(Q)|startup' + if startup_var not in fs_clustered.solution: + pytest.skip('Startup variable not in solution') + + fs_expanded = fs_clustered.transform.expand() + + # For non-segmented systems, timing within cluster should be preserved + # The expanded startup should match the clustered values at corresponding positions + startup_clustered = fs_clustered.solution[startup_var] + startup_expanded = fs_expanded.solution[startup_var] + + # Get cluster assignments to verify mapping + cluster_assignments = fs_clustered.clustering.cluster_assignments.values + timesteps_per_cluster = 24 + + # Check that expanded values match clustered values at correct positions + for orig_day in range(8): + cluster_id = cluster_assignments[orig_day] + for hour in range(timesteps_per_cluster): + orig_idx = orig_day * timesteps_per_cluster + hour + clustered_val = float(startup_clustered.isel(cluster=cluster_id, time=hour)) + expanded_val = float(startup_expanded.isel(time=orig_idx)) + assert abs(clustered_val - expanded_val) < 1e-6, ( + f'Mismatch at day {orig_day}, hour {hour}: clustered={clustered_val}, expanded={expanded_val}' + ) + + class TestCombineSlices: """Tests for the combine_slices utility function.""" From 347515af6b6e41437d0498ff6da53d63048a30fe Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 27 Jan 2026 13:49:31 +0100 Subject: [PATCH 2/5] Summary of Fixes 1. Fixed _interpolate_charge_state_segmented Bug (Critical) File: flixopt/transform_accessor.py:1965-2043 The function was incorrectly decoding timestep_mapping using timesteps_per_cluster. For segmented systems, timestep_mapping encodes cluster * n_segments + segment_idx, so this produced wrong cluster indices. Fix: Compute original period index and position directly from timestep indices instead of decoding from timestep_mapping. 2. Implemented EXPAND_FIRST_TIMESTEP for Segmented Systems Only File: flixopt/transform_accessor.py:2045-2100 Added _expand_first_timestep_only() method that places startup/shutdown events at the first timestep of each segment (not cluster). Key design decisions: - Segmented systems: Events placed at first timestep of each segment (timing within segment is lost) - Non-segmented systems: Normal expansion preserves timing within cluster (no special handling needed) 3. Added Tests - test_startup_shutdown_first_timestep_only: Verifies segmented systems place events at segment boundaries - test_startup_timing_preserved_non_segmented: Verifies non-segmented systems preserve timing within clusters 4. Updated Docstrings Clarified in expand() docstring that: - Binary events in segmented systems go to first timestep of each segment - Non-segmented systems preserve timing via normal expansion --- tests/test_cluster_reduce_expand.py | 66 +++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index e85d98cb5..ea6cc81ac 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -838,6 +838,72 @@ def test_clustering_without_extremes_may_miss_peaks(self, solver_fixture, timest # The peak may or may not be captured depending on clustering algorithm assert fs_no_peaks.solution is not None + def test_extremes_new_cluster_increases_n_clusters(self, solver_fixture, timesteps_8_days): + """Test that method='new_cluster' correctly increases n_clusters.""" + from tsam import ExtremeConfig + + fs = create_system_with_peak_demand(timesteps_8_days) + + # Cluster with extremes as new clusters + fs_clustered = fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + extremes=ExtremeConfig( + method='new_cluster', + max_value=['HeatDemand(Q)|fixed_relative_profile'], + ), + ) + + # n_clusters should be > 2 because extreme periods are added as new clusters + # The exact number depends on how many extreme periods are detected + assert fs_clustered.clustering.n_clusters >= 2 + + # Verify optimization works with the actual cluster count + fs_clustered.optimize(solver_fixture) + assert fs_clustered.solution is not None + + # Verify expansion works + fs_expanded = fs_clustered.transform.expand() + assert len(fs_expanded.timesteps) == 192 + + # The sum of cluster occurrences should equal n_original_clusters (8 days) + assert int(fs_clustered.clustering.cluster_occurrences.sum()) == 8 + + def test_extremes_new_cluster_with_segments(self, solver_fixture, timesteps_8_days): + """Test that method='new_cluster' works correctly with segmentation.""" + from tsam import ExtremeConfig, SegmentConfig + + fs = create_system_with_peak_demand(timesteps_8_days) + + # Cluster with BOTH extremes AND segments + fs_clustered = fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + extremes=ExtremeConfig( + method='append', + max_value=['HeatDemand(Q)|fixed_relative_profile'], + ), + segments=SegmentConfig(n_segments=6), + ) + + # n_clusters should be >= 2 (extreme periods add clusters) + n_clusters = fs_clustered.clustering.n_clusters + assert n_clusters >= 2 + + # n_representatives = n_clusters * n_segments + assert fs_clustered.clustering.n_representatives == n_clusters * 6 + + # Verify optimization works + fs_clustered.optimize(solver_fixture) + assert fs_clustered.solution is not None + + # Verify expansion works + fs_expanded = fs_clustered.transform.expand() + assert len(fs_expanded.timesteps) == 192 + + # The sum of cluster occurrences should equal n_original_clusters (8 days) + assert int(fs_clustered.clustering.cluster_occurrences.sum()) == 8 + # ==================== Data Vars Parameter Tests ==================== From c26dbd40eb0fdd21a1e102ad50efa7108141c01d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 27 Jan 2026 14:11:48 +0100 Subject: [PATCH 3/5] Validate extremeconfig method to be "replace" --- flixopt/transform_accessor.py | 16 ++++++++++++- tests/test_cluster_reduce_expand.py | 36 +++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 95c221c45..3a468e2ac 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1432,6 +1432,20 @@ def cluster( f'Use the corresponding cluster() parameters instead.' ) + # Validate ExtremeConfig compatibility with multi-period/scenario systems + # Methods 'new_cluster' and 'append' can produce different n_clusters per period, + # which breaks the xarray structure that requires uniform dimensions + is_multi_dimensional = has_periods or has_scenarios + if is_multi_dimensional and extremes is not None: + extreme_method = getattr(extremes, 'method', None) + if extreme_method in ('new_cluster', 'append'): + raise ValueError( + f'ExtremeConfig with method="{extreme_method}" is not supported for multi-period ' + f'or multi-scenario systems because it can produce different cluster counts per ' + f'period/scenario. Use method="replace" instead, which replaces existing clusters ' + f'with extreme periods while maintaining the requested n_clusters.' + ) + # Cluster each (period, scenario) combination using tsam directly tsam_aggregation_results: dict[tuple, Any] = {} # AggregationResult objects tsam_clustering_results: dict[tuple, Any] = {} # ClusteringResult objects for persistence @@ -1482,7 +1496,7 @@ def cluster( df_for_clustering, n_clusters=n_clusters, period_duration=hours_per_cluster, - timestep_duration=dt, + temporal_resolution=dt, cluster=cluster_config, extremes=extremes, segments=segments, diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index ea6cc81ac..a348f490b 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -869,6 +869,42 @@ def test_extremes_new_cluster_increases_n_clusters(self, solver_fixture, timeste # The sum of cluster occurrences should equal n_original_clusters (8 days) assert int(fs_clustered.clustering.cluster_occurrences.sum()) == 8 + def test_extremes_new_cluster_rejected_for_multi_period(self, timesteps_8_days, periods_2): + """Test that method='new_cluster' is rejected for multi-period systems.""" + from tsam import ExtremeConfig + + fs = create_system_with_periods(timesteps_8_days, periods_2) + + with pytest.raises(ValueError, match='method="new_cluster" is not supported'): + fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + extremes=ExtremeConfig( + method='new_cluster', + max_value=['HeatDemand(Q)|fixed_relative_profile'], + ), + ) + + def test_extremes_replace_works_for_multi_period(self, solver_fixture, timesteps_8_days, periods_2): + """Test that method='replace' works correctly for multi-period systems.""" + from tsam import ExtremeConfig + + fs = create_system_with_periods(timesteps_8_days, periods_2) + + # method='replace' should work - it maintains the requested n_clusters + fs_clustered = fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + extremes=ExtremeConfig( + method='replace', + max_value=['HeatDemand(Q)|fixed_relative_profile'], + ), + ) + + assert fs_clustered.clustering.n_clusters == 2 + fs_clustered.optimize(solver_fixture) + assert fs_clustered.solution is not None + def test_extremes_new_cluster_with_segments(self, solver_fixture, timesteps_8_days): """Test that method='new_cluster' works correctly with segmentation.""" from tsam import ExtremeConfig, SegmentConfig From f9c6abce7bd1dcb17cc801e3ec876a06845caf01 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 27 Jan 2026 14:20:43 +0100 Subject: [PATCH 4/5] The fix now properly handles variables with both time and period (or scenario) dimensions: 1. For each missing (period_label, scenario_label) key, it selects the specific period/scenario slice from the original variable using .sel(..., drop=True) 2. Then it slices and reshapes that specific slice 3. All slices in filled_slices now have consistent dimensions ['cluster', 'time'] + other_dims without 'period' or 'scenario' coordinates This ensures all DataArrays being concatenated have the same structure. Can you try running your clustering code again? --- flixopt/transform_accessor.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 3a468e2ac..aa9e30f4d 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -569,15 +569,12 @@ def _build_reduced_dataset( ) elif set(typical_das[name].keys()) != all_keys: # Partial typical slices: fill missing keys with constant values - time_idx = var.dims.index('time') - slices_list = [slice(None)] * len(var.dims) - slices_list[time_idx] = slice(0, n_reduced_timesteps) - sliced_values = var.values[tuple(slices_list)] + # For multi-period/scenario data, we need to select the right slice for each key - other_dims = [d for d in var.dims if d != 'time'] + # Exclude 'period' and 'scenario' - they're handled by _combine_slices_to_dataarray_2d + other_dims = [d for d in var.dims if d not in ('time', 'period', 'scenario')] other_shape = [var.sizes[d] for d in other_dims] new_shape = [actual_n_clusters, n_time_points] + other_shape - reshaped_constant = sliced_values.reshape(new_shape) new_coords = {'cluster': cluster_coords, 'time': time_coords} for dim in other_dims: @@ -590,6 +587,27 @@ def _build_reduced_dataset( if key in typical_das[name]: filled_slices[key] = typical_das[name][key] else: + # Select the specific period/scenario slice, then reshape + period_label, scenario_label = key + selector = {} + if period_label is not None and 'period' in var.dims: + selector['period'] = period_label + if scenario_label is not None and 'scenario' in var.dims: + selector['scenario'] = scenario_label + + # Select per-key slice if needed, otherwise use full variable + if selector: + var_slice = ds[name].sel(**selector, drop=True) + else: + var_slice = ds[name] + + # Now slice time and reshape + time_idx = var_slice.dims.index('time') + slices_list = [slice(None)] * len(var_slice.dims) + slices_list[time_idx] = slice(0, n_reduced_timesteps) + sliced_values = var_slice.values[tuple(slices_list)] + reshaped_constant = sliced_values.reshape(new_shape) + filled_slices[key] = xr.DataArray( reshaped_constant, dims=['cluster', 'time'] + other_dims, From 0084b6fedb8edaf23f112ee4fa9060d02f23a346 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 27 Jan 2026 15:08:50 +0100 Subject: [PATCH 5/5] Update tsam dep --- .github/workflows/docs.yaml | 4 ++-- .github/workflows/tests.yaml | 4 ++-- pyproject.toml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 22d942219..5657a5ae9 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -58,7 +58,7 @@ jobs: run: uv pip install --system ".[docs,full]" - name: Install tsam v3 (not yet on PyPI) - run: uv pip install --system "tsam @ git+https://github.com/FBumann/tsam.git@a5e2ac516fb8470377a1893742df6696668539aa" + run: uv pip install --system "tsam @ git+https://github.com/FBumann/tsam.git@71177ec3fa1b16fcecdd039ca18ceddcdfa2064a" - name: Get notebook cache key id: notebook-cache-key @@ -125,7 +125,7 @@ jobs: run: uv pip install --system ".[docs,full]" - name: Install tsam v3 (not yet on PyPI) - run: uv pip install --system "tsam @ git+https://github.com/FBumann/tsam.git@a5e2ac516fb8470377a1893742df6696668539aa" + run: uv pip install --system "tsam @ git+https://github.com/FBumann/tsam.git@71177ec3fa1b16fcecdd039ca18ceddcdfa2064a" - name: Get notebook cache key id: notebook-cache-key diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 80ee816b9..8222fe9a0 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -62,7 +62,7 @@ jobs: run: uv pip install --system .[dev] - name: Install tsam v3 (not yet on PyPI) - run: uv pip install --system "tsam @ git+https://github.com/FBumann/tsam.git@a5e2ac516fb8470377a1893742df6696668539aa" + run: uv pip install --system "tsam @ git+https://github.com/FBumann/tsam.git@71177ec3fa1b16fcecdd039ca18ceddcdfa2064a" - name: Run tests run: pytest -v --numprocesses=auto @@ -89,7 +89,7 @@ jobs: run: uv pip install --system .[dev] - name: Install tsam v3 (not yet on PyPI) - run: uv pip install --system "tsam @ git+https://github.com/FBumann/tsam.git@a5e2ac516fb8470377a1893742df6696668539aa" + run: uv pip install --system "tsam @ git+https://github.com/FBumann/tsam.git@71177ec3fa1b16fcecdd039ca18ceddcdfa2064a" - name: Run example tests run: pytest -v -m examples --numprocesses=auto diff --git a/pyproject.toml b/pyproject.toml index ac39fd48a..baf4760de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,7 +63,7 @@ network_viz = [ # Full feature set (everything except dev tools) # NOTE: For clustering features, install tsam v3 manually (not yet on PyPI): -# pip install "tsam @ git+https://github.com/FBumann/tsam.git@a5e2ac516fb8470377a1893742df6696668539aa" +# pip install "tsam @ git+https://github.com/FBumann/tsam.git@71177ec3fa1b16fcecdd039ca18ceddcdfa2064a" # This will be added back when tsam v3.0 is released on PyPI full = [ "pyvis==0.3.2", # Visualizing FlowSystem Network @@ -79,7 +79,7 @@ full = [ # Development tools and testing # NOTE: For clustering features, install tsam v3 manually (not yet on PyPI): -# pip install "tsam @ git+https://github.com/FBumann/tsam.git@a5e2ac516fb8470377a1893742df6696668539aa" +# pip install "tsam @ git+https://github.com/FBumann/tsam.git@71177ec3fa1b16fcecdd039ca18ceddcdfa2064a" dev = [ "pytest==8.4.2", "pytest-xdist==3.8.0",