From cc4d6ab7036fc4283bae69b1e29cc4e2d1aa2275 Mon Sep 17 00:00:00 2001 From: Fabian Date: Mon, 9 Feb 2026 14:28:33 +0100 Subject: [PATCH 01/66] refac: introduce consistent convention for linopy operations with subsets and supersets --- CLAUDE.md | 22 +- doc/release_notes.rst | 6 + linopy/expressions.py | 62 ++++-- linopy/model.py | 10 + linopy/monkey_patch_xarray.py | 64 +++--- linopy/variables.py | 3 +- test/test_linear_expression.py | 365 +++++++++++++++++++++++++++++++++ 7 files changed, 464 insertions(+), 68 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 67155ae3..1f696a0b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -110,27 +110,6 @@ When modifying the codebase, maintain consistency with these patterns and ensure * Always create a feature branch for new features or bug fixes. * Use the github cli (gh) to interact with the Github repository. -### GitHub Claude Code Integration - -This repository includes Claude Code GitHub Actions for automated assistance: - -1. **Automated PR Reviews** (`claude-code-review.yml`): - - Automatically reviews PRs only when first created (opened) - - Subsequent reviews require manual `@claude` mention - - Focuses on Python best practices, xarray patterns, and optimization correctness - - Can run tests and linting as part of the review - - **Skip initial review by**: Adding `[skip-review]` or `[WIP]` to PR title, or using draft PRs - -2. **Manual Claude Assistance** (`claude.yml`): - - Trigger by mentioning `@claude` in any: - - Issue comments - - Pull request comments - - Pull request reviews - - New issue body or title - - Claude can help with bug fixes, feature implementation, code explanations, etc. - -**Note**: Both workflows require the `ANTHROPIC_API_KEY` secret to be configured in the repository settings. - ## Development Guidelines @@ -140,3 +119,4 @@ This repository includes Claude Code GitHub Actions for automated assistance: 4. Use type hints and mypy for type checking. 5. Always write tests into the `test` directory, following the naming convention `test_*.py`. 6. Always write temporary and non git-tracked code in the `dev-scripts` directory. +7. In test scripts use linopy assertions from the testing.py module where useful (assert_linequal, assert_varequal, etc.) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index edf67076..311b93d0 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -6,6 +6,12 @@ Upcoming Version * Fix docs (pick highs solver) * Add the `sphinx-copybutton` to the documentation +* Harmonize coordinate alignment for operations with subset/superset objects: + - Multiplication and division fill missing coords with 0 (variable doesn't participate) + - Addition and subtraction of constants fill missing coords with 0 (identity element) and pin result to LHS coords + - Comparison operators (``==``, ``<=``, ``>=``) fill missing RHS coords with NaN (no constraint created) + - Fixes crash on ``subset + var`` / ``subset + expr`` reverse addition + - Fixes superset DataArrays expanding result coords beyond the variable's coordinate space Upcoming Version ---------------- diff --git a/linopy/expressions.py b/linopy/expressions.py index 848067cf..372a5c9f 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -532,15 +532,31 @@ def _multiply_by_linear_expression( res = res + self.reset_const() * other.const return res + def _add_constant( + self: GenericExpression, other: ConstantLike + ) -> GenericExpression: + da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) + da = da.reindex_like(self.const, fill_value=0) + return self.assign(const=self.const + da) + def _multiply_by_constant( self: GenericExpression, other: ConstantLike ) -> GenericExpression: multiplier = as_dataarray(other, coords=self.coords, dims=self.coord_dims) + multiplier = multiplier.reindex_like(self.const, fill_value=0) coeffs = self.coeffs * multiplier - assert all(coeffs.sizes[d] == s for d, s in self.coeffs.sizes.items()) const = self.const * multiplier return self.assign(coeffs=coeffs, const=const) + def _divide_by_constant( + self: GenericExpression, other: ConstantLike + ) -> GenericExpression: + divisor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) + divisor = divisor.reindex_like(self.const, fill_value=1) + coeffs = self.coeffs / divisor + const = self.const / divisor + return self.assign(coeffs=coeffs, const=const) + def __div__(self: GenericExpression, other: SideLike) -> GenericExpression: try: if isinstance( @@ -556,7 +572,7 @@ def __div__(self: GenericExpression, other: SideLike) -> GenericExpression: f"{type(self)} and {type(other)}" "Non-linear expressions are not yet supported." ) - return self._multiply_by_constant(other=1 / other) + return self._divide_by_constant(other) except TypeError: return NotImplemented @@ -862,7 +878,10 @@ def to_constraint(self, sign: SignLike, rhs: SideLike) -> Constraint: sign : str, array-like Sign(s) of the constraints. rhs : constant, Variable, LinearExpression - Right-hand side of the constraint. + Right-hand side of the constraint. If a DataArray, it is + reindexed to match expression coordinates (fill_value=np.nan). + Extra dimensions in the RHS not present in the expression + raise a ValueError. NaN entries in the RHS mean "no constraint". Returns ------- @@ -875,6 +894,15 @@ def to_constraint(self, sign: SignLike, rhs: SideLike) -> Constraint: f"Both sides of the constraint are constant. At least one side must contain variables. {self} {rhs}" ) + if isinstance(rhs, DataArray): + extra_dims = set(rhs.dims) - set(self.coord_dims) + if extra_dims: + raise ValueError( + f"RHS DataArray has dimensions {extra_dims} not present " + f"in the expression. Cannot create constraint." + ) + rhs = rhs.reindex_like(self.const, fill_value=np.nan) + all_to_lhs = (self - rhs).data data = assign_multiindex_safe( all_to_lhs[["coeffs", "vars"]], sign=sign, rhs=-all_to_lhs.const @@ -1313,9 +1341,11 @@ def __add__( try: if np.isscalar(other): return self.assign(const=self.const + other) - - other = as_expression(other, model=self.model, dims=self.coord_dims) - return merge([self, other], cls=self.__class__) + elif isinstance(other, SUPPORTED_CONSTANT_TYPES): + return self._add_constant(other) + else: + other = as_expression(other, model=self.model, dims=self.coord_dims) + return merge([self, other], cls=self.__class__) except TypeError: return NotImplemented @@ -1853,13 +1883,15 @@ def __add__(self, other: SideLike) -> QuadraticExpression: try: if np.isscalar(other): return self.assign(const=self.const + other) + elif isinstance(other, SUPPORTED_CONSTANT_TYPES): + return self._add_constant(other) + else: + other = as_expression(other, model=self.model, dims=self.coord_dims) - other = as_expression(other, model=self.model, dims=self.coord_dims) - - if isinstance(other, LinearExpression): - other = other.to_quadexpr() + if isinstance(other, LinearExpression): + other = other.to_quadexpr() - return merge([self, other], cls=self.__class__) + return merge([self, other], cls=self.__class__) except TypeError: return NotImplemented @@ -1877,13 +1909,7 @@ def __sub__(self, other: SideLike) -> QuadraticExpression: dimension names of self will be filled in other """ try: - if np.isscalar(other): - return self.assign(const=self.const - other) - - other = as_expression(other, model=self.model, dims=self.coord_dims) - if type(other) is LinearExpression: - other = other.to_quadexpr() - return merge([self, -other], cls=self.__class__) + return self.__add__(-other) except TypeError: return NotImplemented diff --git a/linopy/model.py b/linopy/model.py index 657b2d45..fc5472ae 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -699,6 +699,16 @@ def add_constraints( # TODO: add a warning here, routines should be safe against this data = data.drop_vars(drop_dims) + rhs_nan = data.rhs.isnull() + if rhs_nan.any(): + data["rhs"] = data.rhs.fillna(0) + rhs_mask = ~rhs_nan + mask = ( + rhs_mask + if mask is None + else (as_dataarray(mask).astype(bool) & rhs_mask) + ) + data["labels"] = -1 (data,) = xr.broadcast(data, exclude=[TERM_DIM]) diff --git a/linopy/monkey_patch_xarray.py b/linopy/monkey_patch_xarray.py index dc60608c..1e526c92 100644 --- a/linopy/monkey_patch_xarray.py +++ b/linopy/monkey_patch_xarray.py @@ -1,37 +1,45 @@ from __future__ import annotations from collections.abc import Callable -from functools import partialmethod, update_wrapper -from types import NotImplementedType +from functools import update_wrapper from typing import Any from xarray import DataArray from linopy import expressions, variables - -def monkey_patch(cls: type[DataArray], pass_unpatched_method: bool = False) -> Callable: - def deco(func: Callable) -> Callable: - func_name = func.__name__ - wrapped = getattr(cls, func_name) - update_wrapper(func, wrapped) - if pass_unpatched_method: - func = partialmethod(func, unpatched_method=wrapped) # type: ignore - setattr(cls, func_name, func) - return func - - return deco - - -@monkey_patch(DataArray, pass_unpatched_method=True) -def __mul__( - da: DataArray, other: Any, unpatched_method: Callable -) -> DataArray | NotImplementedType: - if isinstance( - other, - variables.Variable - | expressions.LinearExpression - | expressions.QuadraticExpression, - ): - return NotImplemented - return unpatched_method(da, other) +_LINOPY_TYPES = ( + variables.Variable, + variables.ScalarVariable, + expressions.LinearExpression, + expressions.ScalarLinearExpression, + expressions.QuadraticExpression, +) + + +def _make_patched_op(op_name: str) -> None: + """Patch a DataArray operator to return NotImplemented for linopy types, enabling reflected operators.""" + original = getattr(DataArray, op_name) + + def patched( + da: DataArray, other: Any, unpatched_method: Callable = original + ) -> Any: + if isinstance(other, _LINOPY_TYPES): + return NotImplemented + return unpatched_method(da, other) + + update_wrapper(patched, original) + setattr(DataArray, op_name, patched) + + +for _op in ( + "__mul__", + "__add__", + "__sub__", + "__truediv__", + "__le__", + "__ge__", + "__eq__", +): + _make_patched_op(_op) +del _op diff --git a/linopy/variables.py b/linopy/variables.py index d90a4775..83b4246e 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -315,6 +315,7 @@ def to_linexpr( Linear expression with the variables and coefficients. """ coefficient = as_dataarray(coefficient, coords=self.coords, dims=self.dims) + coefficient = coefficient.reindex_like(self.labels, fill_value=0) ds = Dataset({"coeffs": coefficient, "vars": self.labels}).expand_dims( TERM_DIM, -1 ) @@ -454,7 +455,7 @@ def __div__( f"{type(self)} and {type(other)}. " "Non-linear expressions are not yet supported." ) - return self.to_linexpr(1 / other) + return self.to_linexpr()._divide_by_constant(other) def __truediv__( self, coefficient: float | int | LinearExpression | Variable diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 0da9ec7f..93a02f45 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -575,6 +575,371 @@ def test_linear_expression_multiplication_invalid( expr / x +class TestSubsetCoordinateAlignment: + @pytest.fixture + def subset(self) -> xr.DataArray: + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + + @pytest.fixture + def superset(self) -> xr.DataArray: + return xr.DataArray( + np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} + ) + + @pytest.fixture + def expected_fill(self) -> np.ndarray: + arr = np.zeros(20) + arr[1] = 10.0 + arr[3] = 30.0 + return arr + + def test_var_mul_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + result = v * subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_expr_mul_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + expr = 1 * v + result = expr * subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + @pytest.mark.parametrize( + "make_lhs,make_rhs", + [ + (lambda v, s: s * v, lambda v, s: v * s), + (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), + (lambda v, s: s + v, lambda v, s: v + s), + (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), + ], + ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], + ) + def test_commutativity( + self, v: Variable, subset: xr.DataArray, make_lhs: object, make_rhs: object + ) -> None: + assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + + def test_var_add_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + result = v + subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected_fill) + + def test_var_sub_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + result = v - subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, -expected_fill) + + def test_subset_sub_var(self, v: Variable, subset: xr.DataArray) -> None: + assert_linequal(subset - v, -v + subset) + + def test_expr_add_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + expr = v + 5 + result = expr + subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected_fill + 5) + + def test_expr_sub_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + expr = v + 5 + result = expr - subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, 5 - expected_fill) + + def test_subset_sub_expr(self, v: Variable, subset: xr.DataArray) -> None: + expr = v + 5 + assert_linequal(subset - expr, -(expr - subset)) + + def test_var_div_subset(self, v: Variable, subset: xr.DataArray) -> None: + result = v / subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) + assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + + def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + con = v <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert con.rhs.sel(dim_2=3).item() == 30.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", ["<=", ">=", "=="]) + def test_var_comparison_subset( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == "<=": + con = v <= subset + elif sign == ">=": + con = v >= subset + else: + con = v == subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + expr = v + 5 + con = expr <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) + assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + def test_add_commutativity_full_coords(self, v: Variable) -> None: + full = xr.DataArray( + np.arange(20, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(20)}, + ) + assert_linequal(v + full, full + v) + + def test_superset_addition_pins_to_lhs( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v + superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + + def test_superset_add_var(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset + v, v + superset) + + def test_superset_sub_var(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset - v, -v + superset) + + def test_superset_mul_var(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset * v, v * superset) + + @pytest.mark.parametrize("sign", ["<=", ">="]) + def test_superset_comparison_var( + self, v: Variable, superset: xr.DataArray, sign: str + ) -> None: + if sign == "<=": + con = superset <= v + else: + con = superset >= v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(con.lhs.coeffs.values).any() + assert not np.isnan(con.rhs.values).any() + + def test_disjoint_addition_pins_to_lhs(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v + disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, np.zeros(20)) + + def test_expr_div_subset(self, v: Variable, subset: xr.DataArray) -> None: + expr = 1 * v + result = expr / subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) + assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + + def test_subset_add_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset + v + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + def test_subset_sub_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset - v + np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) + + @pytest.mark.parametrize("sign", ["<=", ">=", "=="]) + def test_subset_comparison_var( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == "<=": + con = subset <= v + elif sign == ">=": + con = subset >= v + else: + con = subset == v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert np.isnan(con.rhs.sel(dim_2=0).item()) + assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) + + def test_superset_mul_pins_to_lhs( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v * superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + def test_superset_div_pins_to_lhs(self, v: Variable) -> None: + superset_nonzero = xr.DataArray( + np.arange(1, 26, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + result = v / superset_nonzero + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + def test_quadexpr_add_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + qexpr = v * v + result = qexpr + subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected_fill) + + def test_quadexpr_sub_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + qexpr = v * v + result = qexpr - subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, -expected_fill) + + def test_quadexpr_mul_subset( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + qexpr = v * v + result = qexpr * subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_mul_quadexpr( + self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray + ) -> None: + qexpr = v * v + result = subset * qexpr + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: + qexpr = v * v + assert_quadequal(subset + qexpr, qexpr + subset) + + def test_multidim_subset_mul(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w * subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) + assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) + + def test_multidim_subset_add(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w + subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.const.values).any() + assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) + + def test_constraint_rhs_extra_dims_raises(self, v: Variable) -> None: + rhs = xr.DataArray( + [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} + ) + with pytest.raises(ValueError, match="not present in the expression"): + v <= rhs + + def test_da_truediv_var_raises(self, v: Variable) -> None: + da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) + with pytest.raises(TypeError): + da / v + + def test_disjoint_mul_produces_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v * disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) + + def test_disjoint_div_preserves_coeffs(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v / disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + def test_da_eq_da_still_works(self) -> None: + da1 = xr.DataArray([1, 2, 3]) + da2 = xr.DataArray([1, 2, 3]) + result = da1 == da2 + assert result.values.all() + + def test_da_eq_scalar_still_works(self) -> None: + da = xr.DataArray([1, 2, 3]) + result = da == 2 + np.testing.assert_array_equal(result.values, [False, True, False]) + + def test_subset_constraint_solve_integration(self) -> None: + from linopy import available_solvers + + if not available_solvers: + pytest.skip("No solver available") + m = Model() + coords = pd.RangeIndex(5, name="i") + x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") + subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) + m.add_constraints(x <= subset_ub, name="subset_ub") + m.add_objective(x.sum(), sense="max") + m.solve(solver_name=available_solvers[0]) + sol = m.solution["x"] + assert sol.sel(i=1).item() == pytest.approx(10.0) + assert sol.sel(i=3).item() == pytest.approx(20.0) + assert sol.sel(i=0).item() == pytest.approx(100.0) + assert sol.sel(i=2).item() == pytest.approx(100.0) + assert sol.sel(i=4).item() == pytest.approx(100.0) + + def test_expression_inherited_properties(x: Variable, y: Variable) -> None: expr = 10 * x + y assert isinstance(expr.attrs, dict) From e408b8e856c714e972e2d565133181d2d99aa120 Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 11 Feb 2026 10:08:48 +0100 Subject: [PATCH 02/66] move scalar addition to add_constant --- linopy/expressions.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index b9eb5579..11730e5d 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -95,6 +95,7 @@ SUPPORTED_CONSTANT_TYPES = ( np.number, + np.bool_, int, float, DataArray, @@ -536,6 +537,8 @@ def _multiply_by_linear_expression( def _add_constant( self: GenericExpression, other: ConstantLike ) -> GenericExpression: + if np.isscalar(other): + return self.assign(const=self.const + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) da = da.reindex_like(self.const, fill_value=0) return self.assign(const=self.const + da) @@ -1340,9 +1343,7 @@ def __add__( return other.__add__(self) try: - if np.isscalar(other): - return self.assign(const=self.const + other) - elif isinstance(other, SUPPORTED_CONSTANT_TYPES): + if isinstance(other, SUPPORTED_CONSTANT_TYPES): return self._add_constant(other) else: other = as_expression(other, model=self.model, dims=self.coord_dims) @@ -1882,9 +1883,7 @@ def __add__(self, other: SideLike) -> QuadraticExpression: dimension names of self will be filled in other """ try: - if np.isscalar(other): - return self.assign(const=self.const + other) - elif isinstance(other, SUPPORTED_CONSTANT_TYPES): + if isinstance(other, SUPPORTED_CONSTANT_TYPES): return self._add_constant(other) else: other = as_expression(other, model=self.model, dims=self.coord_dims) From 1f339e8e6bfe5dba7232d31b91c935218c6b9ec1 Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 11 Feb 2026 11:24:14 +0100 Subject: [PATCH 03/66] add overwriting logic to add constant --- linopy/expressions.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 11730e5d..052325d7 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -540,7 +540,11 @@ def _add_constant( if np.isscalar(other): return self.assign(const=self.const + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - da = da.reindex_like(self.const, fill_value=0) + if da.sizes == self.const.sizes: + # follow overwriting logic from merge function + da = da.assign_coords(coords=self.coords) + else: + da = da.reindex_like(self.const, fill_value=0) return self.assign(const=self.const + da) def _multiply_by_constant( From c47b90bd0c4aadd96cca334a104872ba194c530a Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 11 Feb 2026 12:54:03 +0100 Subject: [PATCH 04/66] add join parameter to control alignment in operations --- linopy/expressions.py | 221 +++++++++++++++++-------- linopy/variables.py | 54 +++++-- test/test_linear_expression.py | 283 +++++++++++++++++++++++++++++++++ 3 files changed, 485 insertions(+), 73 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 052325d7..1fd8176d 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -9,6 +9,7 @@ import functools import logging +import operator from abc import ABC, abstractmethod from collections.abc import Callable, Hashable, Iterator, Mapping, Sequence from dataclasses import dataclass, field @@ -93,18 +94,6 @@ from linopy.model import Model from linopy.variables import ScalarVariable, Variable -SUPPORTED_CONSTANT_TYPES = ( - np.number, - np.bool_, - int, - float, - DataArray, - pd.Series, - pd.DataFrame, - np.ndarray, - pl.Series, -) - FILL_VALUE = {"vars": -1, "coeffs": np.nan, "const": np.nan} @@ -535,46 +524,66 @@ def _multiply_by_linear_expression( return res def _add_constant( - self: GenericExpression, other: ConstantLike + self: GenericExpression, other: ConstantLike, join: str | None = None ) -> GenericExpression: - if np.isscalar(other): + if np.isscalar(other) and join is None: return self.assign(const=self.const + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - if da.sizes == self.const.sizes: - # follow overwriting logic from merge function + if join is None: + if da.sizes == self.const.sizes: + da = da.assign_coords(coords=self.coords) + else: + da = da.reindex_like(self.const, fill_value=0) + elif join == "override": da = da.assign_coords(coords=self.coords) else: - da = da.reindex_like(self.const, fill_value=0) + self_const, da = xr.align(self.const, da, join=join, fill_value=0) + return self.__class__( + self.data.reindex_like(self_const, fill_value=self._fill_value).assign( + const=self_const + da + ), + self.model, + ) return self.assign(const=self.const + da) + def _apply_constant_op( + self: GenericExpression, + other: ConstantLike, + op: Callable[[DataArray, DataArray], DataArray], + fill_value: float, + join: str | None = None, + ) -> GenericExpression: + factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) + if join is None: + factor = factor.reindex_like(self.const, fill_value=fill_value) + elif join == "override": + factor = factor.assign_coords(coords=self.coords) + else: + self_const, factor = xr.align( + self.const, factor, join=join, fill_value=fill_value + ) + data = self.data.reindex_like(self_const, fill_value=self._fill_value) + return self.__class__( + assign_multiindex_safe( + data, coeffs=op(data.coeffs, factor), const=op(self_const, factor) + ), + self.model, + ) + return self.assign(coeffs=op(self.coeffs, factor), const=op(self.const, factor)) + def _multiply_by_constant( - self: GenericExpression, other: ConstantLike + self: GenericExpression, other: ConstantLike, join: str | None = None ) -> GenericExpression: - multiplier = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - multiplier = multiplier.reindex_like(self.const, fill_value=0) - coeffs = self.coeffs * multiplier - const = self.const * multiplier - return self.assign(coeffs=coeffs, const=const) + return self._apply_constant_op(other, operator.mul, fill_value=0, join=join) def _divide_by_constant( - self: GenericExpression, other: ConstantLike + self: GenericExpression, other: ConstantLike, join: str | None = None ) -> GenericExpression: - divisor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - divisor = divisor.reindex_like(self.const, fill_value=1) - coeffs = self.coeffs / divisor - const = self.const / divisor - return self.assign(coeffs=coeffs, const=const) + return self._apply_constant_op(other, operator.truediv, fill_value=1, join=join) def __div__(self: GenericExpression, other: SideLike) -> GenericExpression: try: - if isinstance( - other, - variables.Variable - | variables.ScalarVariable - | LinearExpression - | ScalarLinearExpression - | QuadraticExpression, - ): + if isinstance(other, SUPPORTED_EXPRESSION_TYPES): raise TypeError( "unsupported operand type(s) for /: " f"{type(self)} and {type(other)}" @@ -607,36 +616,103 @@ def __lt__(self, other: Any) -> NotImplementedType: ) def add( - self: GenericExpression, other: SideLike + self: GenericExpression, + other: SideLike, + join: str | None = None, ) -> GenericExpression | QuadraticExpression: """ Add an expression to others. - """ - return self.__add__(other) + + Parameters + ---------- + other : expression-like + The expression to add. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + if join is None: + return self.__add__(other) + if isinstance(other, SUPPORTED_CONSTANT_TYPES): + return self._add_constant(other, join=join) + other = as_expression(other, model=self.model, dims=self.coord_dims) + if isinstance(other, LinearExpression) and isinstance( + self, QuadraticExpression + ): + other = other.to_quadexpr() + return merge([self, other], cls=self.__class__, join=join) def sub( - self: GenericExpression, other: SideLike + self: GenericExpression, + other: SideLike, + join: str | None = None, ) -> GenericExpression | QuadraticExpression: """ Subtract others from expression. + + Parameters + ---------- + other : expression-like + The expression to subtract. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. """ - return self.__sub__(other) + return self.add(-other, join=join) def mul( - self: GenericExpression, other: SideLike + self: GenericExpression, + other: SideLike, + join: str | None = None, ) -> GenericExpression | QuadraticExpression: """ Multiply the expr by a factor. - """ - return self.__mul__(other) + + Parameters + ---------- + other : expression-like + The factor to multiply by. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + if join is None: + return self.__mul__(other) + if isinstance(other, SUPPORTED_EXPRESSION_TYPES): + raise TypeError( + "join parameter is not supported for expression-expression multiplication" + ) + return self._multiply_by_constant(other, join=join) def div( - self: GenericExpression, other: VariableLike | ConstantLike + self: GenericExpression, + other: VariableLike | ConstantLike, + join: str | None = None, ) -> GenericExpression | QuadraticExpression: """ Divide the expr by a factor. - """ - return self.__div__(other) + + Parameters + ---------- + other : constant-like + The divisor. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + if join is None: + return self.__div__(other) + if isinstance(other, SUPPORTED_EXPRESSION_TYPES): + raise TypeError( + "unsupported operand type(s) for /: " + f"{type(self)} and {type(other)}. " + "Non-linear expressions are not yet supported." + ) + return self._divide_by_constant(other, join=join) def pow(self, other: int) -> QuadraticExpression: """ @@ -1859,13 +1935,7 @@ def __mul__(self, other: SideLike) -> QuadraticExpression: """ Multiply the expr by a factor. """ - if isinstance( - other, - BaseExpression - | ScalarLinearExpression - | variables.Variable - | variables.ScalarVariable, - ): + if isinstance(other, SUPPORTED_EXPRESSION_TYPES): raise TypeError( "unsupported operand type(s) for *: " f"{type(self)} and {type(other)}. " @@ -1935,13 +2005,7 @@ def __matmul__( """ Matrix multiplication with other, similar to xarray dot. """ - if isinstance( - other, - BaseExpression - | ScalarLinearExpression - | variables.Variable - | variables.ScalarVariable, - ): + if isinstance(other, SUPPORTED_EXPRESSION_TYPES): raise TypeError( "Higher order non-linear expressions are not yet supported." ) @@ -2094,6 +2158,7 @@ def merge( ], dim: str = TERM_DIM, cls: type[GenericExpression] = None, # type: ignore + join: str | None = None, **kwargs: Any, ) -> GenericExpression: """ @@ -2113,6 +2178,10 @@ def merge( Dimension along which the expressions should be concatenated. cls : type Explicitly set the type of the resulting expression (So that the type checker will know the return type) + join : str, optional + How to align coordinates. One of "outer", "inner", "left", "right", + "exact", "override". When None (default), auto-detects based on + expression shapes. **kwargs Additional keyword arguments passed to xarray.concat. Defaults to {coords: "minimal", compat: "override"} or, in the special case described @@ -2147,7 +2216,9 @@ def merge( model = exprs[0].model - if cls in linopy_types and dim in HELPER_DIMS: + if join is not None: + override = join == "override" + elif cls in linopy_types and dim in HELPER_DIMS: coord_dims = [ {k: v for k, v in e.sizes.items() if k not in HELPER_DIMS} for e in exprs ] @@ -2168,7 +2239,9 @@ def merge( elif cls == variables.Variable: kwargs["fill_value"] = variables.FILL_VALUE - if override: + if join is not None: + kwargs["join"] = join + elif override: kwargs["join"] = "override" else: kwargs.setdefault("join", "outer") @@ -2346,3 +2419,23 @@ def to_linexpr(self) -> LinearExpression: vars = xr.DataArray(list(self.vars), dims=TERM_DIM) ds = xr.Dataset({"coeffs": coeffs, "vars": vars}) return LinearExpression(ds, self.model) + + +SUPPORTED_CONSTANT_TYPES = ( + np.number, + np.bool_, + int, + float, + DataArray, + pd.Series, + pd.DataFrame, + np.ndarray, + pl.Series, +) + +SUPPORTED_EXPRESSION_TYPES = ( + BaseExpression, + ScalarLinearExpression, + variables.Variable, + variables.ScalarVariable, +) diff --git a/linopy/variables.py b/linopy/variables.py index 83b4246e..f6d52288 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -545,29 +545,65 @@ def __lt__(self, other: Any) -> NotImplementedType: def __contains__(self, value: str) -> bool: return self.data.__contains__(value) - def add(self, other: Variable) -> LinearExpression: + def add(self, other: SideLike, join: str | None = None) -> LinearExpression: """ Add variables to linear expressions or other variables. + + Parameters + ---------- + other : expression-like + The expression to add. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. """ - return self.__add__(other) + return self.to_linexpr().add(other, join=join) - def sub(self, other: Variable) -> LinearExpression: + def sub(self, other: SideLike, join: str | None = None) -> LinearExpression: """ Subtract linear expressions or other variables from the variables. + + Parameters + ---------- + other : expression-like + The expression to subtract. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. """ - return self.__sub__(other) + return self.to_linexpr().sub(other, join=join) - def mul(self, other: int) -> LinearExpression: + def mul(self, other: ConstantLike, join: str | None = None) -> LinearExpression: """ Multiply variables with a coefficient. + + Parameters + ---------- + other : constant-like + The coefficient to multiply by. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. """ - return self.__mul__(other) + return self.to_linexpr().mul(other, join=join) - def div(self, other: int) -> LinearExpression: + def div(self, other: ConstantLike, join: str | None = None) -> LinearExpression: """ Divide variables with a coefficient. - """ - return self.__div__(other) + + Parameters + ---------- + other : constant-like + The divisor. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + return self.to_linexpr().div(other, join=join) def pow(self, other: int) -> QuadraticExpression: """ diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 93a02f45..d956ef1f 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -1764,3 +1764,286 @@ def test_constant_only_expression_mul_linexpr_with_vars_and_const( assert not result_rev.is_constant assert (result_rev.coeffs == expected_coeffs).all() assert (result_rev.const == expected_const).all() + + +class TestJoinParameter: + @pytest.fixture + def m2(self) -> Model: + m = Model() + m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="a") + m.add_variables(coords=[pd.Index([1, 2, 3], name="i")], name="b") + m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="c") + return m + + @pytest.fixture + def a(self, m2: Model) -> Variable: + return m2.variables["a"] + + @pytest.fixture + def b(self, m2: Model) -> Variable: + return m2.variables["b"] + + @pytest.fixture + def c(self, m2: Model) -> Variable: + return m2.variables["c"] + + def test_add_join_none_preserves_default(self, a: Variable, b: Variable) -> None: + result_default = a.to_linexpr() + b.to_linexpr() + result_none = a.to_linexpr().add(b.to_linexpr(), join=None) + assert_linequal(result_default, result_none) + + def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_add_expr_join_outer(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_add_expr_join_left(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="left") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_add_expr_join_right(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="right") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_add_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_add_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().add(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: + expr = a.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [0, 1, 2]}) + result = expr.add(const, join="override") + assert list(result.data.indexes["i"]) == [0, 1, 2] + assert (result.const.values == const.values).all() + + def test_sub_expr_join_inner(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().sub(b.to_linexpr(), join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_mul_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_mul_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().mul(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + assert result.coeffs.sel(i=0).item() == 0 + assert result.coeffs.sel(i=1).item() == 2 + assert result.coeffs.sel(i=2).item() == 3 + + def test_div_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().div(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_div_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().div(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_variable_add_join(self, a: Variable, b: Variable) -> None: + result = a.add(b, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_sub_join(self, a: Variable, b: Variable) -> None: + result = a.sub(b, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_mul_join(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_div_join(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.div(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_mul_expr_with_join_raises(self, a: Variable, b: Variable) -> None: + with pytest.raises(TypeError, match="join parameter is not supported"): + a.to_linexpr().mul(b.to_linexpr(), join="inner") + + def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: + result = merge([a.to_linexpr(), b.to_linexpr()], join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_same_shape_add_join_override(self, a: Variable, c: Variable) -> None: + result = a.to_linexpr().add(c.to_linexpr(), join="override") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_add_expr_outer_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.add(expr_b, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 15 + assert result.const.sel(i=3).item() == 10 + + def test_add_expr_inner_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.add(expr_b, join="inner") + assert list(result.coords["i"].values) == [1, 2] + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 15 + + def test_add_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = expr.add(const, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 5 + assert result.const.sel(i=3).item() == 20 + + def test_add_constant_inner_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = expr.add(const, join="inner") + assert list(result.coords["i"].values) == [1] + assert result.const.sel(i=1).item() == 15 + + def test_add_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.add(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [15, 25, 35]) + + def test_sub_constant_override(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.sub(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [-5, -15, -25]) + + def test_sub_expr_outer_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.sub(expr_b, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == -5 + assert result.const.sel(i=2).item() == -5 + assert result.const.sel(i=3).item() == -10 + + def test_mul_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.mul(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [10, 15, 20]) + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) + + def test_mul_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([2, 3], dims=["i"], coords={"i": [1, 3]}) + result = expr.mul(other, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 0 + assert result.const.sel(i=1).item() == 10 + assert result.const.sel(i=2).item() == 0 + assert result.const.sel(i=3).item() == 0 + assert result.coeffs.squeeze().sel(i=1).item() == 2 + assert result.coeffs.squeeze().sel(i=0).item() == 0 + + def test_div_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 10 + other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.div(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [5.0, 2.0, 1.0]) + + def test_div_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 10 + other = xr.DataArray([2.0, 5.0], dims=["i"], coords={"i": [1, 3]}) + result = expr.div(other, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=1).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(i=1).item() == pytest.approx(0.5) + assert result.const.sel(i=0).item() == pytest.approx(10.0) + assert result.coeffs.squeeze().sel(i=0).item() == pytest.approx(1.0) + + def test_div_expr_with_join_raises(self, a: Variable, b: Variable) -> None: + with pytest.raises(TypeError): + a.to_linexpr().div(b.to_linexpr(), join="outer") + + def test_variable_add_outer_values(self, a: Variable, b: Variable) -> None: + result = a.add(b, join="outer") + assert isinstance(result, LinearExpression) + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.nterm == 2 + + def test_variable_mul_override(self, a: Variable) -> None: + other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) + result = a.mul(other, join="override") + assert isinstance(result, LinearExpression) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) + + def test_variable_div_override(self, a: Variable) -> None: + other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) + result = a.div(other, join="override") + assert isinstance(result, LinearExpression) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_almost_equal( + result.coeffs.squeeze().values, [0.5, 0.2, 0.1] + ) + + def test_merge_outer_join(self, a: Variable, b: Variable) -> None: + result = merge([a.to_linexpr(), b.to_linexpr()], join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + + def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: + expr_a = 1 * a + 5 + const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) + for join in ["override", "outer", "inner"]: + result = expr_a.add(const, join=join) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [6, 7, 8]) + + def test_add_scalar_with_explicit_join(self, a: Variable) -> None: + expr = 1 * a + 5 + result = expr.add(10, join="override") + np.testing.assert_array_equal(result.const.values, [15, 15, 15]) + assert list(result.coords["i"].values) == [0, 1, 2] + + def test_quadratic_add_constant_join_inner(self, a: Variable, b: Variable) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: + quad = a.to_linexpr() * a.to_linexpr() + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [0, 1] + + def test_quadratic_mul_constant_join_inner(self, a: Variable, b: Variable) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_merge_join_left(self, a: Variable, b: Variable) -> None: + result = merge([a.to_linexpr(), b.to_linexpr()], join="left") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_merge_join_right(self, a: Variable, b: Variable) -> None: + result = merge([a.to_linexpr(), b.to_linexpr()], join="right") + assert list(result.data.indexes["i"]) == [1, 2, 3] From 72b0ce1702b1856d4573f5ba1558fc7bf7634855 Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 11 Feb 2026 12:58:32 +0100 Subject: [PATCH 05/66] Add le, ge, eq methods with join parameter for constraints Add le(), ge(), eq() methods to LinearExpression and Variable classes, mirroring the pattern of add/sub/mul/div methods. These methods support the join parameter for flexible coordinate alignment when creating constraints. --- linopy/expressions.py | 67 +++++++++++++++++++++++++++++++++++++++++-- linopy/variables.py | 45 +++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 2 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 1fd8176d..2fa09ce8 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -714,6 +714,63 @@ def div( ) return self._divide_by_constant(other, join=join) + def le( + self: GenericExpression, + rhs: SideLike, + join: str | None = None, + ) -> Constraint: + """ + Less than or equal constraint. + + Parameters + ---------- + rhs : expression-like + Right-hand side of the constraint. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + return self.to_constraint(LESS_EQUAL, rhs, join=join) + + def ge( + self: GenericExpression, + rhs: SideLike, + join: str | None = None, + ) -> Constraint: + """ + Greater than or equal constraint. + + Parameters + ---------- + rhs : expression-like + Right-hand side of the constraint. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + return self.to_constraint(GREATER_EQUAL, rhs, join=join) + + def eq( + self: GenericExpression, + rhs: SideLike, + join: str | None = None, + ) -> Constraint: + """ + Equality constraint. + + Parameters + ---------- + rhs : expression-like + Right-hand side of the constraint. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + return self.to_constraint(EQUAL, rhs, join=join) + def pow(self, other: int) -> QuadraticExpression: """ Power of the expression with a coefficient. @@ -953,7 +1010,9 @@ def cumsum( dim_dict = {dim_name: self.data.sizes[dim_name] for dim_name in dim} return self.rolling(dim=dim_dict).sum(keep_attrs=keep_attrs, skipna=skipna) - def to_constraint(self, sign: SignLike, rhs: SideLike) -> Constraint: + def to_constraint( + self, sign: SignLike, rhs: SideLike, join: str | None = None + ) -> Constraint: """ Convert a linear expression to a constraint. @@ -966,6 +1025,10 @@ def to_constraint(self, sign: SignLike, rhs: SideLike) -> Constraint: reindexed to match expression coordinates (fill_value=np.nan). Extra dimensions in the RHS not present in the expression raise a ValueError. NaN entries in the RHS mean "no constraint". + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. Returns ------- @@ -987,7 +1050,7 @@ def to_constraint(self, sign: SignLike, rhs: SideLike) -> Constraint: ) rhs = rhs.reindex_like(self.const, fill_value=np.nan) - all_to_lhs = (self - rhs).data + all_to_lhs = self.sub(rhs, join=join).data data = assign_multiindex_safe( all_to_lhs[["coeffs", "vars"]], sign=sign, rhs=-all_to_lhs.const ) diff --git a/linopy/variables.py b/linopy/variables.py index f6d52288..3ba563da 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -605,6 +605,51 @@ def div(self, other: ConstantLike, join: str | None = None) -> LinearExpression: """ return self.to_linexpr().div(other, join=join) + def le(self, rhs: SideLike, join: str | None = None) -> Constraint: + """ + Less than or equal constraint. + + Parameters + ---------- + rhs : expression-like + Right-hand side of the constraint. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + return self.to_linexpr().le(rhs, join=join) + + def ge(self, rhs: SideLike, join: str | None = None) -> Constraint: + """ + Greater than or equal constraint. + + Parameters + ---------- + rhs : expression-like + Right-hand side of the constraint. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + return self.to_linexpr().ge(rhs, join=join) + + def eq(self, rhs: SideLike, join: str | None = None) -> Constraint: + """ + Equality constraint. + + Parameters + ---------- + rhs : expression-like + Right-hand side of the constraint. + join : str, optional + How to align coordinates. One of "outer", "inner", "left", + "right", "exact", "override". When None (default), uses the + current default behavior. + """ + return self.to_linexpr().eq(rhs, join=join) + def pow(self, other: int) -> QuadraticExpression: """ Power of the variables with a coefficient. The only coefficient allowed is 2. From d621e4f7eec6ccff1bb003a4c3fbb00d7c4242eb Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 11 Feb 2026 13:41:30 +0100 Subject: [PATCH 06/66] Extract constant alignment logic into _align_constant helper Consolidate repetitive alignment handling in _add_constant and _apply_constant_op into a single _align_constant method. This eliminates code duplication and makes the alignment behavior (handling join parameter, fill_value, size-aware defaults) testable and maintainable in one place. --- linopy/expressions.py | 72 +++++++++++++++++++++++++++++++------------ 1 file changed, 53 insertions(+), 19 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 2fa09ce8..ee56b477 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -523,28 +523,66 @@ def _multiply_by_linear_expression( res = res + self.reset_const() * other.const return res + def _align_constant( + self: GenericExpression, + other: DataArray, + fill_value: float = 0, + join: str | None = None, + ) -> tuple[DataArray, DataArray, bool]: + """ + Align a constant DataArray with self.const. + + Parameters + ---------- + other : DataArray + The constant to align. + fill_value : float, default: 0 + Fill value for missing coordinates. + join : str, optional + Alignment method. If None, uses size-aware default behavior. + + Returns + ------- + self_const : DataArray + The expression's const, potentially reindexed. + aligned : DataArray + The aligned constant. + needs_data_reindex : bool + Whether the expression's data needs reindexing. + """ + if join is None: + if other.sizes == self.const.sizes: + return self.const, other.assign_coords(coords=self.coords), False + return ( + self.const, + other.reindex_like(self.const, fill_value=fill_value), + False, + ) + elif join == "override": + return self.const, other.assign_coords(coords=self.coords), False + else: + self_const, aligned = xr.align( + self.const, other, join=join, fill_value=fill_value + ) + return self_const, aligned, True + def _add_constant( self: GenericExpression, other: ConstantLike, join: str | None = None ) -> GenericExpression: if np.isscalar(other) and join is None: return self.assign(const=self.const + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - if join is None: - if da.sizes == self.const.sizes: - da = da.assign_coords(coords=self.coords) - else: - da = da.reindex_like(self.const, fill_value=0) - elif join == "override": - da = da.assign_coords(coords=self.coords) - else: - self_const, da = xr.align(self.const, da, join=join, fill_value=0) + self_const, da, needs_data_reindex = self._align_constant( + da, fill_value=0, join=join + ) + if needs_data_reindex: return self.__class__( self.data.reindex_like(self_const, fill_value=self._fill_value).assign( const=self_const + da ), self.model, ) - return self.assign(const=self.const + da) + return self.assign(const=self_const + da) def _apply_constant_op( self: GenericExpression, @@ -554,14 +592,10 @@ def _apply_constant_op( join: str | None = None, ) -> GenericExpression: factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - if join is None: - factor = factor.reindex_like(self.const, fill_value=fill_value) - elif join == "override": - factor = factor.assign_coords(coords=self.coords) - else: - self_const, factor = xr.align( - self.const, factor, join=join, fill_value=fill_value - ) + self_const, factor, needs_data_reindex = self._align_constant( + factor, fill_value=fill_value, join=join + ) + if needs_data_reindex: data = self.data.reindex_like(self_const, fill_value=self._fill_value) return self.__class__( assign_multiindex_safe( @@ -569,7 +603,7 @@ def _apply_constant_op( ), self.model, ) - return self.assign(coeffs=op(self.coeffs, factor), const=op(self.const, factor)) + return self.assign(coeffs=op(self.coeffs, factor), const=op(self_const, factor)) def _multiply_by_constant( self: GenericExpression, other: ConstantLike, join: str | None = None From 130a5df11603862220b9c104e618a063cff572b6 Mon Sep 17 00:00:00 2001 From: Fabian Date: Fri, 13 Feb 2026 12:34:36 +0100 Subject: [PATCH 07/66] update notebooks --- doc/index.rst | 1 + examples/coordinate-alignment.ipynb | 488 ++++++++++++++++++++++++++++ examples/creating-constraints.ipynb | 6 + examples/creating-expressions.ipynb | 6 + 4 files changed, 501 insertions(+) create mode 100644 examples/coordinate-alignment.ipynb diff --git a/doc/index.rst b/doc/index.rst index bff9fa65..c575fc60 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -111,6 +111,7 @@ This package is published under MIT license. creating-variables creating-expressions creating-constraints + coordinate-alignment sos-constraints manipulating-models testing-framework diff --git a/examples/coordinate-alignment.ipynb b/examples/coordinate-alignment.ipynb new file mode 100644 index 00000000..1547bd9d --- /dev/null +++ b/examples/coordinate-alignment.ipynb @@ -0,0 +1,488 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Coordinate Alignment\n", + "\n", + "Since linopy builds on xarray, coordinate alignment matters when combining variables or expressions that live on different coordinates. By default, linopy aligns operands automatically and fills missing entries with sensible defaults. This guide shows how alignment works and how to control it with the ``join`` parameter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "import xarray as xr\n", + "\n", + "import linopy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Default Alignment Behavior\n", + "\n", + "When two operands share a dimension but have different coordinates, linopy keeps the **larger** (superset) coordinate range and fills missing positions with zeros (for addition) or zero coefficients (for multiplication)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m = linopy.Model()\n", + "\n", + "time = pd.RangeIndex(5, name=\"time\")\n", + "x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", + "\n", + "subset_time = pd.RangeIndex(3, name=\"time\")\n", + "y = m.add_variables(lower=0, coords=[subset_time], name=\"y\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Adding ``x`` (5 time steps) and ``y`` (3 time steps) gives an expression over all 5 time steps. Where ``y`` has no entry (time 3, 4), the coefficient is zero — i.e. ``y`` simply drops out of the sum at those positions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x + y" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The same applies when multiplying by a constant that covers only a subset of coordinates. Missing positions get a coefficient of zero:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "factor = xr.DataArray([2, 3, 4], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + "x * factor" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Adding a constant subset also fills missing coordinates with zero:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x + factor" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Constraints with Subset RHS\n", + "\n", + "For constraints, missing right-hand-side values are filled with ``NaN``, which tells linopy to **skip** the constraint at those positions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + "con = x <= rhs\n", + "con" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The constraint only applies at time 0, 1, 2. At time 3 and 4 the RHS is ``NaN``, so no constraint is created." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "### Same-Shape Operands: Positional Alignment\n\nWhen two operands have the **same shape** on a shared dimension, linopy uses **positional alignment** by default — coordinate labels are ignored and the left operand's labels are kept. This is a performance optimization but can be surprising:" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "offset_const = xr.DataArray(\n", + " [10, 20, 30, 40, 50], dims=[\"time\"], coords={\"time\": [5, 6, 7, 8, 9]}\n", + ")\n", + "x + offset_const" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "Even though ``offset_const`` has coordinates ``[5, 6, 7, 8, 9]`` and ``x`` has ``[0, 1, 2, 3, 4]``, the result uses ``x``'s labels. The values are aligned by **position**, not by label. The same applies when adding two variables or expressions of identical shape:" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", + "x + z" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "``x`` (time 0–4) and ``z`` (time 5–9) share no coordinate labels, yet the result has 5 entries under ``x``'s coordinates — because they have the same shape, positions are matched directly.\n\nTo force **label-based** alignment, pass an explicit ``join``:" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x.add(z, join=\"outer\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "With ``join=\"outer\"``, the result spans all 10 time steps (union of 0–4 and 5–9), filling missing positions with zeros. This is the correct label-based alignment. The same-shape positional shortcut is equivalent to ``join=\"override\"`` — see below." + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The ``join`` Parameter\n", + "\n", + "For explicit control over alignment, use the ``.add()``, ``.sub()``, ``.mul()``, and ``.div()`` methods with a ``join`` parameter. The supported values follow xarray conventions:\n", + "\n", + "- ``\"inner\"`` — intersection of coordinates\n", + "- ``\"outer\"`` — union of coordinates (with fill)\n", + "- ``\"left\"`` — keep left operand's coordinates\n", + "- ``\"right\"`` — keep right operand's coordinates\n", + "- ``\"override\"`` — positional alignment, ignore coordinate labels\n", + "- ``\"exact\"`` — coordinates must match exactly (raises on mismatch)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m2 = linopy.Model()\n", + "\n", + "i_a = pd.Index([0, 1, 2], name=\"i\")\n", + "i_b = pd.Index([1, 2, 3], name=\"i\")\n", + "\n", + "a = m2.add_variables(coords=[i_a], name=\"a\")\n", + "b = m2.add_variables(coords=[i_b], name=\"b\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Inner join** — only shared coordinates (i=1, 2):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a.add(b, join=\"inner\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Outer join** — union of coordinates (i=0, 1, 2, 3):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a.add(b, join=\"outer\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Left join** — keep left operand's coordinates (i=0, 1, 2):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a.add(b, join=\"left\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Right join** — keep right operand's coordinates (i=1, 2, 3):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a.add(b, join=\"right\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "**Override** — positional alignment, ignore coordinate labels. The result uses the left operand's coordinates. Here ``a`` has i=[0, 1, 2] and ``b`` has i=[1, 2, 3], so positions are matched as 0↔1, 1↔2, 2↔3:" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a.add(b, join=\"override\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Multiplication with ``join``\n", + "\n", + "The same ``join`` parameter works on ``.mul()`` and ``.div()``. When multiplying by a constant that covers a subset, ``join=\"inner\"`` restricts the result to shared coordinates only, while ``join=\"left\"`` fills missing values with zero:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "const = xr.DataArray([2, 3, 4], dims=[\"i\"], coords={\"i\": [1, 2, 3]})\n", + "\n", + "a.mul(const, join=\"inner\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a.mul(const, join=\"left\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Alignment in Constraints\n", + "\n", + "The ``.le()``, ``.ge()``, and ``.eq()`` methods create constraints with explicit coordinate alignment. They accept the same ``join`` parameter:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rhs = xr.DataArray([10, 20], dims=[\"i\"], coords={\"i\": [0, 1]})\n", + "\n", + "a.le(rhs, join=\"inner\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With ``join=\"inner\"``, the constraint only exists at the intersection (i=0, 1). Compare with ``join=\"left\"``:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a.le(rhs, join=\"left\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With ``join=\"left\"``, the result covers all of ``a``'s coordinates (i=0, 1, 2). At i=2, where the RHS has no value, the RHS becomes ``NaN`` and the constraint is masked out.\n", + "\n", + "The same methods work on expressions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expr = 2 * a + 1\n", + "expr.eq(rhs, join=\"inner\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "## Practical Example\n\nConsider a generation dispatch model where solar availability follows a daily profile and a minimum demand constraint only applies during peak hours." + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "m3 = linopy.Model()\n", + "\n", + "hours = pd.RangeIndex(24, name=\"hour\")\n", + "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", + "\n", + "gen = m3.add_variables(lower=0, coords=[hours, techs], name=\"gen\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Capacity limits apply to all hours and techs — standard broadcasting handles this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", + "m3.add_constraints(gen <= capacity, name=\"capacity_limit\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "For solar, we build a full 24-hour availability profile — zero at night, sine-shaped during daylight (hours 6–18). Since this covers all hours, standard alignment works directly and solar is properly constrained to zero at night:" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "solar_avail = np.zeros(24)\n", + "solar_avail[6:19] = 100 * np.sin(np.linspace(0, np.pi, 13))\n", + "solar_availability = xr.DataArray(solar_avail, dims=[\"hour\"], coords={\"hour\": hours})\n", + "\n", + "solar_gen = gen.sel(tech=\"solar\")\n", + "m3.add_constraints(solar_gen <= solar_availability, name=\"solar_avail\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "Now suppose a minimum demand of 120 MW must be met, but only during peak hours (8–20). The demand array covers a subset of hours, so we use ``join=\"inner\"`` to restrict the constraint to just those hours:" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "peak_hours = pd.RangeIndex(8, 21, name=\"hour\")\n", + "peak_demand = xr.DataArray(\n", + " np.full(len(peak_hours), 120.0), dims=[\"hour\"], coords={\"hour\": peak_hours}\n", + ")\n", + "\n", + "total_gen = gen.sum(\"tech\")\n", + "m3.add_constraints(total_gen.ge(peak_demand, join=\"inner\"), name=\"peak_demand\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "The demand constraint only applies during peak hours (8–20). Outside that range, no minimum generation is required." + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "| ``join`` | Coordinates | Fill behavior |\n", + "|----------|------------|---------------|\n", + "| ``None`` (default) | Auto-detect (keeps superset) | Zeros for arithmetic, NaN for constraint RHS |\n", + "| ``\"inner\"`` | Intersection only | No fill needed |\n", + "| ``\"outer\"`` | Union | Fill with operation identity (0 for add, 0 for mul) |\n", + "| ``\"left\"`` | Left operand's | Fill right with identity |\n", + "| ``\"right\"`` | Right operand's | Fill left with identity |\n", + "| ``\"override\"`` | Left operand's (positional) | Positional alignment, ignore labels |\n", + "| ``\"exact\"`` | Must match exactly | Raises error if different |" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/creating-constraints.ipynb b/examples/creating-constraints.ipynb index b46db1bc..55251233 100644 --- a/examples/creating-constraints.ipynb +++ b/examples/creating-constraints.ipynb @@ -231,6 +231,12 @@ "source": [ "m.constraints[\"my-constraint\"]" ] + }, + { + "cell_type": "markdown", + "id": "r0wxi7v1m7l", + "source": "## Coordinate Alignment in Constraints\n\nAs an alternative to the ``<=``, ``>=``, ``==`` operators, linopy provides ``.le()``, ``.ge()``, and ``.eq()`` methods on variables and expressions. These methods accept a ``join`` parameter (``\"inner\"``, ``\"outer\"``, ``\"left\"``, ``\"right\"``) for explicit control over how coordinates are aligned when creating constraints. See the :doc:`coordinate-alignment` guide for details.", + "metadata": {} } ], "metadata": { diff --git a/examples/creating-expressions.ipynb b/examples/creating-expressions.ipynb index aafd8a09..1d808b07 100644 --- a/examples/creating-expressions.ipynb +++ b/examples/creating-expressions.ipynb @@ -193,6 +193,12 @@ "x + b" ] }, + { + "cell_type": "markdown", + "id": "a8xsfdqrcrn", + "source": ".. tip::\n\n\tFor explicit control over how coordinates are aligned during arithmetic, use the `.add()`, `.sub()`, `.mul()`, and `.div()` methods with a ``join`` parameter (``\"inner\"``, ``\"outer\"``, ``\"left\"``, ``\"right\"``). See the :doc:`coordinate-alignment` guide for details.", + "metadata": {} + }, { "attachments": {}, "cell_type": "markdown", From b616074f6f0abc477c51f83a50f9b4bb2cae2cfb Mon Sep 17 00:00:00 2001 From: Fabian Date: Tue, 17 Feb 2026 21:55:55 +0100 Subject: [PATCH 08/66] update release notes --- doc/release_notes.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 62301fcf..754b5144 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -4,8 +4,6 @@ Release Notes Upcoming Version ---------------- -* Fix docs (pick highs solver) -* Add the `sphinx-copybutton` to the documentation * Harmonize coordinate alignment for operations with subset/superset objects: - Multiplication and division fill missing coords with 0 (variable doesn't participate) - Addition and subtraction of constants fill missing coords with 0 (identity element) and pin result to LHS coords @@ -13,6 +11,10 @@ Upcoming Version - Fixes crash on ``subset + var`` / ``subset + expr`` reverse addition - Fixes superset DataArrays expanding result coords beyond the variable's coordinate space +<<<<<<< HEAD +======= + +>>>>>>> c650560 (update release notes) Version 0.6.3 -------------- From bd04a3a3918e33a6fba02d1937a3dce7d1095508 Mon Sep 17 00:00:00 2001 From: Fabian Date: Tue, 17 Feb 2026 22:18:13 +0100 Subject: [PATCH 09/66] fix types --- linopy/variables.py | 20 ++++++++++++++------ test/test_compatible_arithmetrics.py | 8 ++++---- test/test_linear_expression.py | 14 ++++++++------ 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/linopy/variables.py b/linopy/variables.py index 3ba563da..0eea6634 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -444,7 +444,7 @@ def __matmul__( return self.to_linexpr() @ other def __div__( - self, other: float | int | LinearExpression | Variable + self, other: ConstantLike | LinearExpression | Variable ) -> LinearExpression: """ Divide variables with a coefficient. @@ -458,7 +458,7 @@ def __div__( return self.to_linexpr()._divide_by_constant(other) def __truediv__( - self, coefficient: float | int | LinearExpression | Variable + self, coefficient: ConstantLike | LinearExpression | Variable ) -> LinearExpression: """ True divide variables with a coefficient. @@ -545,7 +545,9 @@ def __lt__(self, other: Any) -> NotImplementedType: def __contains__(self, value: str) -> bool: return self.data.__contains__(value) - def add(self, other: SideLike, join: str | None = None) -> LinearExpression: + def add( + self, other: SideLike, join: str | None = None + ) -> LinearExpression | QuadraticExpression: """ Add variables to linear expressions or other variables. @@ -560,7 +562,9 @@ def add(self, other: SideLike, join: str | None = None) -> LinearExpression: """ return self.to_linexpr().add(other, join=join) - def sub(self, other: SideLike, join: str | None = None) -> LinearExpression: + def sub( + self, other: SideLike, join: str | None = None + ) -> LinearExpression | QuadraticExpression: """ Subtract linear expressions or other variables from the variables. @@ -575,7 +579,9 @@ def sub(self, other: SideLike, join: str | None = None) -> LinearExpression: """ return self.to_linexpr().sub(other, join=join) - def mul(self, other: ConstantLike, join: str | None = None) -> LinearExpression: + def mul( + self, other: ConstantLike, join: str | None = None + ) -> LinearExpression | QuadraticExpression: """ Multiply variables with a coefficient. @@ -590,7 +596,9 @@ def mul(self, other: ConstantLike, join: str | None = None) -> LinearExpression: """ return self.to_linexpr().mul(other, join=join) - def div(self, other: ConstantLike, join: str | None = None) -> LinearExpression: + def div( + self, other: ConstantLike, join: str | None = None + ) -> LinearExpression | QuadraticExpression: """ Divide variables with a coefficient. diff --git a/test/test_compatible_arithmetrics.py b/test/test_compatible_arithmetrics.py index 1d1618ba..edab1ae1 100644 --- a/test/test_compatible_arithmetrics.py +++ b/test/test_compatible_arithmetrics.py @@ -98,13 +98,13 @@ def test_arithmetric_operations_variable(m: Model) -> None: assert_linequal(x + data, x + other_datatype) assert_linequal(x - data, x - other_datatype) assert_linequal(x * data, x * other_datatype) - assert_linequal(x / data, x / other_datatype) # type: ignore - assert_linequal(data * x, other_datatype * x) # type: ignore + assert_linequal(x / data, x / other_datatype) + assert_linequal(data * x, other_datatype * x) # type: ignore[arg-type] assert x.__add__(object()) is NotImplemented assert x.__sub__(object()) is NotImplemented assert x.__mul__(object()) is NotImplemented - assert x.__truediv__(object()) is NotImplemented # type: ignore - assert x.__pow__(object()) is NotImplemented # type: ignore + assert x.__truediv__(object()) is NotImplemented + assert x.__pow__(object()) is NotImplemented # type: ignore[operator] with pytest.raises(ValueError): x.__pow__(3) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index d956ef1f..2af1a8ea 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -7,6 +7,8 @@ from __future__ import annotations +from typing import Any + import numpy as np import pandas as pd import polars as pl @@ -621,7 +623,7 @@ def test_expr_mul_subset( ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], ) def test_commutativity( - self, v: Variable, subset: xr.DataArray, make_lhs: object, make_rhs: object + self, v: Variable, subset: xr.DataArray, make_lhs: Any, make_rhs: Any ) -> None: assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) @@ -889,7 +891,7 @@ def test_constraint_rhs_extra_dims_raises(self, v: Variable) -> None: def test_da_truediv_var_raises(self, v: Variable) -> None: da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) with pytest.raises(TypeError): - da / v + da / v # type: ignore[operator] def test_disjoint_mul_produces_zeros(self, v: Variable) -> None: disjoint = xr.DataArray( @@ -1875,7 +1877,7 @@ def test_mul_expr_with_join_raises(self, a: Variable, b: Variable) -> None: a.to_linexpr().mul(b.to_linexpr(), join="inner") def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: - result = merge([a.to_linexpr(), b.to_linexpr()], join="inner") + result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="inner") assert list(result.data.indexes["i"]) == [1, 2] def test_same_shape_add_join_override(self, a: Variable, c: Variable) -> None: @@ -2005,7 +2007,7 @@ def test_variable_div_override(self, a: Variable) -> None: ) def test_merge_outer_join(self, a: Variable, b: Variable) -> None: - result = merge([a.to_linexpr(), b.to_linexpr()], join="outer") + result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="outer") assert set(result.coords["i"].values) == {0, 1, 2, 3} def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: @@ -2041,9 +2043,9 @@ def test_quadratic_mul_constant_join_inner(self, a: Variable, b: Variable) -> No assert list(result.data.indexes["i"]) == [1, 2, 3] def test_merge_join_left(self, a: Variable, b: Variable) -> None: - result = merge([a.to_linexpr(), b.to_linexpr()], join="left") + result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="left") assert list(result.data.indexes["i"]) == [0, 1, 2] def test_merge_join_right(self, a: Variable, b: Variable) -> None: - result = merge([a.to_linexpr(), b.to_linexpr()], join="right") + result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="right") assert list(result.data.indexes["i"]) == [1, 2, 3] From 32ddf91bf83d5c40f14fc448b67860617296b816 Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 18 Feb 2026 10:11:51 +0100 Subject: [PATCH 10/66] add regression test --- doc/release_notes.rst | 3 --- test/test_constraints.py | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 754b5144..9f56b0f8 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -11,10 +11,7 @@ Upcoming Version - Fixes crash on ``subset + var`` / ``subset + expr`` reverse addition - Fixes superset DataArrays expanding result coords beyond the variable's coordinate space -<<<<<<< HEAD -======= ->>>>>>> c650560 (update release notes) Version 0.6.3 -------------- diff --git a/test/test_constraints.py b/test/test_constraints.py index 01aebb69..1c4187bd 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -139,6 +139,24 @@ def test_constraint_assignment_with_reindex() -> None: assert (con.coords["dim_0"].values == shuffled_coords).all() +def test_constraint_rhs_lower_dim_numpy() -> None: + m = Model() + naxis = np.arange(10, dtype=float) + maxis = np.arange(10).astype(str) + x = m.add_variables(coords=[naxis, maxis]) + y = m.add_variables(coords=[naxis, maxis]) + + naxis_da = xr.DataArray(naxis, dims=["dim_0"]) + c_da = m.add_constraints(x - y >= naxis_da) + assert c_da.shape == (10, 10) + + c_series = m.add_constraints(x - y >= pd.Series(naxis, index=naxis)) + assert c_series.shape == (10, 10) + + c_np = m.add_constraints(x - y >= naxis) + assert c_np.shape == (10, 10) + + def test_wrong_constraint_assignment_repeated() -> None: # repeated variable assignment is forbidden m: Model = Model() From 1b0252b4c02aa99dd0a7fba63584bd26764f070b Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 18 Feb 2026 12:38:10 +0100 Subject: [PATCH 11/66] fix numpy array dim mismatch in constraints and add RHS dim tests numpy_to_dataarray no longer inflates ndim beyond arr.ndim, fixing lower-dim numpy arrays as constraint RHS. Also reject higher-dim constant arrays (numpy/pandas) consistently with DataArray behavior. Co-Authored-By: Claude Opus 4.6 --- linopy/common.py | 11 ++++--- linopy/expressions.py | 7 +++++ test/test_common.py | 6 ++-- test/test_constraints.py | 67 ++++++++++++++++++++++++++++++++++------ 4 files changed, 75 insertions(+), 16 deletions(-) diff --git a/linopy/common.py b/linopy/common.py index 0823deac..746459b4 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -213,18 +213,19 @@ def numpy_to_dataarray( if arr.ndim == 0: return DataArray(arr.item(), coords=coords, dims=dims, **kwargs) - ndim = max(arr.ndim, 0 if coords is None else len(coords)) if isinstance(dims, Iterable | Sequence): dims = list(dims) elif dims is not None: dims = [dims] if dims is not None and len(dims): - # fill up dims with default names to match the number of dimensions - dims = [get_from_iterable(dims, i) or f"dim_{i}" for i in range(ndim)] + dims = [get_from_iterable(dims, i) or f"dim_{i}" for i in range(arr.ndim)] - if isinstance(coords, list) and dims is not None and len(dims): - coords = dict(zip(dims, coords)) + if dims is not None and len(dims) and coords is not None: + if isinstance(coords, list): + coords = dict(zip(dims, coords[: arr.ndim])) + elif is_dict_like(coords): + coords = {k: v for k, v in coords.items() if k in dims} return DataArray(arr, coords=coords, dims=dims, **kwargs) diff --git a/linopy/expressions.py b/linopy/expressions.py index ee56b477..e1fbe1a9 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -1083,6 +1083,13 @@ def to_constraint( f"in the expression. Cannot create constraint." ) rhs = rhs.reindex_like(self.const, fill_value=np.nan) + elif isinstance(rhs, np.ndarray | pd.Series | pd.DataFrame) and rhs.ndim > len( + self.coord_dims + ): + raise ValueError( + f"RHS has {rhs.ndim} dimensions, but the expression only " + f"has {len(self.coord_dims)}. Cannot create constraint." + ) all_to_lhs = self.sub(rhs, join=join).data data = assign_multiindex_safe( diff --git a/test/test_common.py b/test/test_common.py index c3500155..267fbf76 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -370,8 +370,10 @@ def test_as_dataarray_with_ndarray_coords_dict_set_dims_not_aligned() -> None: target_dims = ("dim_0", "dim_1") target_coords = {"dim_0": ["a", "b"], "dim_2": ["A", "B"]} arr = np.array([[1, 2], [3, 4]]) - with pytest.raises(ValueError): - as_dataarray(arr, coords=target_coords, dims=target_dims) + da = as_dataarray(arr, coords=target_coords, dims=target_dims) + assert da.dims == target_dims + assert list(da.coords["dim_0"].values) == ["a", "b"] + assert "dim_2" not in da.coords def test_as_dataarray_with_number() -> None: diff --git a/test/test_constraints.py b/test/test_constraints.py index 1c4187bd..e5da08d4 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -139,22 +139,71 @@ def test_constraint_assignment_with_reindex() -> None: assert (con.coords["dim_0"].values == shuffled_coords).all() -def test_constraint_rhs_lower_dim_numpy() -> None: +@pytest.mark.parametrize( + "rhs_factory", + [ + pytest.param(lambda m, v: v, id="numpy"), + pytest.param(lambda m, v: xr.DataArray(v, dims=["dim_0"]), id="dataarray"), + pytest.param(lambda m, v: pd.Series(v, index=v), id="series"), + pytest.param( + lambda m, v: m.add_variables(coords=[v]), + id="variable", + ), + pytest.param( + lambda m, v: 2 * m.add_variables(coords=[v]) + 1, + id="linexpr", + ), + ], +) +def test_constraint_rhs_lower_dim(rhs_factory) -> None: m = Model() naxis = np.arange(10, dtype=float) maxis = np.arange(10).astype(str) x = m.add_variables(coords=[naxis, maxis]) y = m.add_variables(coords=[naxis, maxis]) - naxis_da = xr.DataArray(naxis, dims=["dim_0"]) - c_da = m.add_constraints(x - y >= naxis_da) - assert c_da.shape == (10, 10) - - c_series = m.add_constraints(x - y >= pd.Series(naxis, index=naxis)) - assert c_series.shape == (10, 10) + c = m.add_constraints(x - y >= rhs_factory(m, naxis)) + assert c.shape == (10, 10) + + +@pytest.mark.parametrize( + "rhs_factory", + [ + pytest.param(lambda m: np.ones((5, 3)), id="numpy"), + pytest.param( + lambda m: xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]), + id="dataarray", + ), + pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), + ], +) +def test_constraint_rhs_higher_dim_constant_raises(rhs_factory) -> None: + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + + with pytest.raises(ValueError, match="dimensions"): + m.add_constraints(x >= rhs_factory(m)) + + +@pytest.mark.parametrize( + "rhs_factory", + [ + pytest.param( + lambda m: m.add_variables(coords=[range(5), range(3)]), + id="variable", + ), + pytest.param( + lambda m: 2 * m.add_variables(coords=[range(5), range(3)]) + 1, + id="linexpr", + ), + ], +) +def test_constraint_rhs_higher_dim_expression(rhs_factory) -> None: + m = Model() + x = m.add_variables(coords=[range(5)], name="x") - c_np = m.add_constraints(x - y >= naxis) - assert c_np.shape == (10, 10) + c = m.add_constraints(x >= rhs_factory(m)) + assert c.shape == (5, 3) def test_wrong_constraint_assignment_repeated() -> None: From 91bd5152b1e7a4f4889e739356671648e85c9659 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 20 Feb 2026 08:57:42 +0100 Subject: [PATCH 12/66] Update notebook as spec --- doc/coordinate-alignment.nblink | 3 + examples/coordinate-alignment.ipynb | 456 +++++++++++++++++++--------- 2 files changed, 314 insertions(+), 145 deletions(-) create mode 100644 doc/coordinate-alignment.nblink diff --git a/doc/coordinate-alignment.nblink b/doc/coordinate-alignment.nblink new file mode 100644 index 00000000..ef588b91 --- /dev/null +++ b/doc/coordinate-alignment.nblink @@ -0,0 +1,3 @@ +{ + "path": "../examples/coordinate-alignment.ipynb" +} diff --git a/examples/coordinate-alignment.ipynb b/examples/coordinate-alignment.ipynb index 1547bd9d..e440d467 100644 --- a/examples/coordinate-alignment.ipynb +++ b/examples/coordinate-alignment.ipynb @@ -6,13 +6,30 @@ "source": [ "# Coordinate Alignment\n", "\n", - "Since linopy builds on xarray, coordinate alignment matters when combining variables or expressions that live on different coordinates. By default, linopy aligns operands automatically and fills missing entries with sensible defaults. This guide shows how alignment works and how to control it with the ``join`` parameter." + "Since linopy builds on xarray, coordinate alignment matters when combining variables or expressions that live on different coordinates.\n", + "\n", + "linopy uses **strict, operation-dependent defaults** that prevent silent data loss and ambiguous fill behavior:\n", + "\n", + "| Operation | Default | On mismatch |\n", + "|-----------|---------|-------------|\n", + "| `+`, `-` | `\"exact\"` | `ValueError` — coordinates must match |\n", + "| `*`, `/` | `\"inner\"` | Intersection — natural filtering |\n", + "| `<=`, `>=`, `==` (DataArray RHS) | `\"exact\"` | `ValueError` — coordinates must match |\n", + "\n", + "When you need to combine operands with mismatched coordinates, use the named methods (`.add()`, `.sub()`, `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`) with an explicit `join=` parameter.\n", + "\n", + "This convention is inspired by [pyoframe](https://github.com/Bravos-Power/pyoframe) and [xarray's planned direction](https://github.com/pydata/xarray/issues/8527) toward stricter alignment." ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.191791Z", + "start_time": "2026-02-20T07:55:08.189675Z" + } + }, "outputs": [], "source": [ "import numpy as np\n", @@ -26,173 +43,196 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Default Alignment Behavior\n", + "## Matching Coordinates — The Default Case\n", "\n", - "When two operands share a dimension but have different coordinates, linopy keeps the **larger** (superset) coordinate range and fills missing positions with zeros (for addition) or zero coefficients (for multiplication)." + "When two operands share the same coordinates on every shared dimension, all operators work directly. No special handling is needed." ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.219582Z", + "start_time": "2026-02-20T07:55:08.205251Z" + } + }, "outputs": [], "source": [ "m = linopy.Model()\n", "\n", "time = pd.RangeIndex(5, name=\"time\")\n", "x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", + "y = m.add_variables(lower=0, coords=[time], name=\"y\")\n", "\n", - "subset_time = pd.RangeIndex(3, name=\"time\")\n", - "y = m.add_variables(lower=0, coords=[subset_time], name=\"y\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Adding ``x`` (5 time steps) and ``y`` (3 time steps) gives an expression over all 5 time steps. Where ``y`` has no entry (time 3, 4), the coefficient is zero — i.e. ``y`` simply drops out of the sum at those positions." + "# Same coordinates — works fine\n", + "x + y" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.229459Z", + "start_time": "2026-02-20T07:55:08.224390Z" + } + }, "outputs": [], "source": [ - "x + y" + "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", + "x * factor" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The same applies when multiplying by a constant that covers only a subset of coordinates. Missing positions get a coefficient of zero:" + "## Broadcasting (Different Dimensions)\n", + "\n", + "Alignment only checks **shared** dimensions. If operands have different dimension names, they expand (broadcast) as in xarray — this is unaffected by the alignment convention." ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.240925Z", + "start_time": "2026-02-20T07:55:08.235115Z" + } + }, "outputs": [], "source": [ - "factor = xr.DataArray([2, 3, 4], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", - "x * factor" + "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", + "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", + "\n", + "# x has dim \"time\", cost has dim \"tech\" — no shared dim, pure broadcast\n", + "x * cost # -> (time, tech)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Adding a constant subset also fills missing coordinates with zero:" + "## Addition / Subtraction: `\"exact\"` Default\n", + "\n", + "When operands have different coordinates on a shared dimension, `+` and `-` raise a `ValueError`. This prevents silent data loss or ambiguous fill behavior." ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.256128Z", + "start_time": "2026-02-20T07:55:08.246697Z" + } + }, "outputs": [], "source": [ - "x + factor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Constraints with Subset RHS\n", + "subset_time = pd.RangeIndex(3, name=\"time\")\n", + "y_short = m.add_variables(lower=0, coords=[subset_time], name=\"y_short\")\n", "\n", - "For constraints, missing right-hand-side values are filled with ``NaN``, which tells linopy to **skip** the constraint at those positions:" + "# x has 5 time steps, y_short has 3 — coordinates don't match\n", + "try:\n", + " x + y_short\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.264875Z", + "start_time": "2026-02-20T07:55:08.259746Z" + } + }, "outputs": [], "source": [ - "rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", - "con = x <= rhs\n", - "con" + "# Same for adding a constant DataArray with mismatched coordinates\n", + "partial_const = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + "\n", + "try:\n", + " x + partial_const\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The constraint only applies at time 0, 1, 2. At time 3 and 4 the RHS is ``NaN``, so no constraint is created." + "## Multiplication / Division: `\"inner\"` Default\n", + "\n", + "Multiplication by a parameter array is a natural filtering operation — like applying an availability factor to a subset of time steps. The result is restricted to the **intersection** of coordinates. No fill values are needed." ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "### Same-Shape Operands: Positional Alignment\n\nWhen two operands have the **same shape** on a shared dimension, linopy uses **positional alignment** by default — coordinate labels are ignored and the left operand's labels are kept. This is a performance optimization but can be surprising:" - }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.275571Z", + "start_time": "2026-02-20T07:55:08.270271Z" + } + }, "outputs": [], "source": [ - "offset_const = xr.DataArray(\n", - " [10, 20, 30, 40, 50], dims=[\"time\"], coords={\"time\": [5, 6, 7, 8, 9]}\n", - ")\n", - "x + offset_const" + "partial_factor = xr.DataArray([2, 3, 4], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + "\n", + "# x has time 0-4, partial_factor has time 0-2\n", + "# Inner join: result restricted to time 0, 1, 2\n", + "x * partial_factor" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "Even though ``offset_const`` has coordinates ``[5, 6, 7, 8, 9]`` and ``x`` has ``[0, 1, 2, 3, 4]``, the result uses ``x``'s labels. The values are aligned by **position**, not by label. The same applies when adding two variables or expressions of identical shape:" - }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.286941Z", + "start_time": "2026-02-20T07:55:08.280454Z" + } + }, "outputs": [], "source": [ + "# Disjoint coordinates: no intersection -> empty result\n", "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", - "x + z" + "disjoint_factor = xr.DataArray(\n", + " [1, 2, 3, 4, 5], dims=[\"time\"], coords={\"time\": range(5)}\n", + ")\n", + "\n", + "z * disjoint_factor" ] }, { "cell_type": "markdown", "metadata": {}, - "source": "``x`` (time 0–4) and ``z`` (time 5–9) share no coordinate labels, yet the result has 5 entries under ``x``'s coordinates — because they have the same shape, positions are matched directly.\n\nTo force **label-based** alignment, pass an explicit ``join``:" - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], "source": [ - "x.add(z, join=\"outer\")" + "## Named Methods with `join=`\n", + "\n", + "When you intentionally want to combine operands with mismatched coordinates, use the named methods with an explicit `join=` parameter. This makes the alignment intent clear in the code." ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "With ``join=\"outer\"``, the result spans all 10 time steps (union of 0–4 and 5–9), filling missing positions with zeros. This is the correct label-based alignment. The same-shape positional shortcut is equivalent to ``join=\"override\"`` — see below." - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## The ``join`` Parameter\n", - "\n", - "For explicit control over alignment, use the ``.add()``, ``.sub()``, ``.mul()``, and ``.div()`` methods with a ``join`` parameter. The supported values follow xarray conventions:\n", - "\n", - "- ``\"inner\"`` — intersection of coordinates\n", - "- ``\"outer\"`` — union of coordinates (with fill)\n", - "- ``\"left\"`` — keep left operand's coordinates\n", - "- ``\"right\"`` — keep right operand's coordinates\n", - "- ``\"override\"`` — positional alignment, ignore coordinate labels\n", - "- ``\"exact\"`` — coordinates must match exactly (raises on mismatch)" + "### Setup: Overlapping but Non-Identical Coordinates" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.296398Z", + "start_time": "2026-02-20T07:55:08.289560Z" + } + }, "outputs": [], "source": [ "m2 = linopy.Model()\n", @@ -208,13 +248,20 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "`a` has coordinates i=[0, 1, 2] and `b` has i=[1, 2, 3]. They overlap at i=1 and i=2 but are not identical, so `a + b` raises a `ValueError`.\n", + "\n", "**Inner join** — only shared coordinates (i=1, 2):" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.309658Z", + "start_time": "2026-02-20T07:55:08.300849Z" + } + }, "outputs": [], "source": [ "a.add(b, join=\"inner\")" @@ -224,13 +271,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Outer join** — union of coordinates (i=0, 1, 2, 3):" + "**Outer join** — union of coordinates (i=0, 1, 2, 3). Where one operand is missing, it drops out of the sum (fill with zero):" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.327236Z", + "start_time": "2026-02-20T07:55:08.318480Z" + } + }, "outputs": [], "source": [ "a.add(b, join=\"outer\")" @@ -246,7 +298,12 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.345463Z", + "start_time": "2026-02-20T07:55:08.337188Z" + } + }, "outputs": [], "source": [ "a.add(b, join=\"left\")" @@ -262,7 +319,12 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.363658Z", + "start_time": "2026-02-20T07:55:08.354475Z" + } + }, "outputs": [], "source": [ "a.add(b, join=\"right\")" @@ -271,12 +333,19 @@ { "cell_type": "markdown", "metadata": {}, - "source": "**Override** — positional alignment, ignore coordinate labels. The result uses the left operand's coordinates. Here ``a`` has i=[0, 1, 2] and ``b`` has i=[1, 2, 3], so positions are matched as 0↔1, 1↔2, 2↔3:" + "source": [ + "**Override** — positional alignment, ignore coordinate labels. The result uses the left operand's coordinates:" + ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.383357Z", + "start_time": "2026-02-20T07:55:08.372963Z" + } + }, "outputs": [], "source": [ "a.add(b, join=\"override\")" @@ -286,28 +355,40 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Multiplication with ``join``\n", + "### Multiplication with `join=`\n", "\n", - "The same ``join`` parameter works on ``.mul()`` and ``.div()``. When multiplying by a constant that covers a subset, ``join=\"inner\"`` restricts the result to shared coordinates only, while ``join=\"left\"`` fills missing values with zero:" + "The same `join=` parameter works on `.mul()` and `.div()`. Since multiplication defaults to `\"inner\"`, you only need explicit `join=` when you want a different mode:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.396019Z", + "start_time": "2026-02-20T07:55:08.390987Z" + } + }, "outputs": [], "source": [ "const = xr.DataArray([2, 3, 4], dims=[\"i\"], coords={\"i\": [1, 2, 3]})\n", "\n", - "a.mul(const, join=\"inner\")" + "# Default inner join — intersection of i=[0,1,2] and i=[1,2,3]\n", + "a * const" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.411255Z", + "start_time": "2026-02-20T07:55:08.404219Z" + } + }, "outputs": [], "source": [ + "# Left join — keep a's coordinates, fill missing factor with 0\n", "a.mul(const, join=\"left\")" ] }, @@ -315,121 +396,196 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Alignment in Constraints\n", + "## Constraints with DataArray RHS\n", "\n", - "The ``.le()``, ``.ge()``, and ``.eq()`` methods create constraints with explicit coordinate alignment. They accept the same ``join`` parameter:" + "Constraint operators (`<=`, `>=`, `==`) with a DataArray right-hand side also default to `\"exact\"` — coordinates must match. Use `.le()`, `.ge()`, `.eq()` with `join=` to control alignment.\n", + "\n", + "The RHS may have **fewer** dimensions than the expression (broadcasting), but **not more**. The expression defines the problem structure; the RHS provides bounds within that structure." ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.435280Z", + "start_time": "2026-02-20T07:55:08.419069Z" + } + }, "outputs": [], "source": [ - "rhs = xr.DataArray([10, 20], dims=[\"i\"], coords={\"i\": [0, 1]})\n", + "# RHS with fewer dimensions — broadcasts (works fine)\n", + "m3 = linopy.Model()\n", + "hours = pd.RangeIndex(24, name=\"hour\")\n", + "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", + "gen = m3.add_variables(lower=0, coords=[hours, techs], name=\"gen\")\n", "\n", - "a.le(rhs, join=\"inner\")" + "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", + "m3.add_constraints(\n", + " gen <= capacity, name=\"capacity_limit\"\n", + ") # capacity broadcasts over hour" ] }, { - "cell_type": "markdown", - "metadata": {}, + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.449885Z", + "start_time": "2026-02-20T07:55:08.443051Z" + } + }, + "outputs": [], "source": [ - "With ``join=\"inner\"``, the constraint only exists at the intersection (i=0, 1). Compare with ``join=\"left\"``:" + "# RHS with matching coordinates — works fine\n", + "full_rhs = xr.DataArray(np.arange(5, dtype=float), dims=[\"time\"], coords={\"time\": time})\n", + "con = x <= full_rhs\n", + "con" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.461738Z", + "start_time": "2026-02-20T07:55:08.456633Z" + } + }, "outputs": [], "source": [ - "a.le(rhs, join=\"left\")" + "# RHS with mismatched coordinates — raises ValueError\n", + "partial_rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + "\n", + "try:\n", + " x <= partial_rhs\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" ] }, { - "cell_type": "markdown", - "metadata": {}, + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.470943Z", + "start_time": "2026-02-20T07:55:08.464365Z" + } + }, + "outputs": [], "source": [ - "With ``join=\"left\"``, the result covers all of ``a``'s coordinates (i=0, 1, 2). At i=2, where the RHS has no value, the RHS becomes ``NaN`` and the constraint is masked out.\n", - "\n", - "The same methods work on expressions:" + "# Use .le() with join=\"inner\" — constraint only at the intersection\n", + "x.to_linexpr().le(partial_rhs, join=\"inner\")" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.485672Z", + "start_time": "2026-02-20T07:55:08.478220Z" + } + }, "outputs": [], "source": [ - "expr = 2 * a + 1\n", - "expr.eq(rhs, join=\"inner\")" + "# Use .le() with join=\"left\" — constraint at all of x's coordinates,\n", + "# NaN where RHS is missing (no constraint at those positions)\n", + "x.to_linexpr().le(partial_rhs, join=\"left\")" ] }, { "cell_type": "markdown", "metadata": {}, - "source": "## Practical Example\n\nConsider a generation dispatch model where solar availability follows a daily profile and a minimum demand constraint only applies during peak hours." + "source": [ + "## Practical Example\n", + "\n", + "Consider a generation dispatch model where solar availability is a partial factor and a minimum demand constraint only applies during peak hours." + ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.495969Z", + "start_time": "2026-02-20T07:55:08.492164Z" + } + }, "outputs": [], "source": [ - "m3 = linopy.Model()\n", + "m4 = linopy.Model()\n", "\n", "hours = pd.RangeIndex(24, name=\"hour\")\n", "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", "\n", - "gen = m3.add_variables(lower=0, coords=[hours, techs], name=\"gen\")" + "gen = m4.add_variables(lower=0, coords=[hours, techs], name=\"gen\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Capacity limits apply to all hours and techs — standard broadcasting handles this:" + "Capacity limits apply to all hours and techs. The `capacity` DataArray has only the `tech` dimension — it broadcasts over `hour` (no shared dimension to conflict):" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.511065Z", + "start_time": "2026-02-20T07:55:08.499152Z" + } + }, "outputs": [], "source": [ "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", - "m3.add_constraints(gen <= capacity, name=\"capacity_limit\")" + "m4.add_constraints(gen <= capacity, name=\"capacity_limit\")" ] }, { "cell_type": "markdown", "metadata": {}, - "source": "For solar, we build a full 24-hour availability profile — zero at night, sine-shaped during daylight (hours 6–18). Since this covers all hours, standard alignment works directly and solar is properly constrained to zero at night:" + "source": [ + "Solar availability is a factor that covers all 24 hours. Since coordinates match exactly, multiplication with `*` works directly:" + ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.532326Z", + "start_time": "2026-02-20T07:55:08.519001Z" + } + }, "outputs": [], "source": [ "solar_avail = np.zeros(24)\n", - "solar_avail[6:19] = 100 * np.sin(np.linspace(0, np.pi, 13))\n", + "solar_avail[6:19] = np.sin(np.linspace(0, np.pi, 13))\n", "solar_availability = xr.DataArray(solar_avail, dims=[\"hour\"], coords={\"hour\": hours})\n", "\n", "solar_gen = gen.sel(tech=\"solar\")\n", - "m3.add_constraints(solar_gen <= solar_availability, name=\"solar_avail\")" + "m4.add_constraints(solar_gen <= 100 * solar_availability, name=\"solar_avail\")" ] }, { "cell_type": "markdown", "metadata": {}, - "source": "Now suppose a minimum demand of 120 MW must be met, but only during peak hours (8–20). The demand array covers a subset of hours, so we use ``join=\"inner\"`` to restrict the constraint to just those hours:" + "source": [ + "Peak demand of 120 MW must be met only during hours 8-20. The demand array covers a subset of hours. Use `.ge()` with `join=\"inner\"` to restrict the constraint to just those hours:" + ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T07:55:08.550668Z", + "start_time": "2026-02-20T07:55:08.537649Z" + } + }, "outputs": [], "source": [ "peak_hours = pd.RangeIndex(8, 21, name=\"hour\")\n", @@ -438,49 +594,59 @@ ")\n", "\n", "total_gen = gen.sum(\"tech\")\n", - "m3.add_constraints(total_gen.ge(peak_demand, join=\"inner\"), name=\"peak_demand\")" + "\n", + "# Constraint only at peak hours (intersection)\n", + "m4.add_constraints(total_gen.ge(peak_demand, join=\"inner\"), name=\"peak_demand\")" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "The demand constraint only applies during peak hours (8–20). Outside that range, no minimum generation is required." - }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Summary\n", "\n", - "| ``join`` | Coordinates | Fill behavior |\n", - "|----------|------------|---------------|\n", - "| ``None`` (default) | Auto-detect (keeps superset) | Zeros for arithmetic, NaN for constraint RHS |\n", - "| ``\"inner\"`` | Intersection only | No fill needed |\n", - "| ``\"outer\"`` | Union | Fill with operation identity (0 for add, 0 for mul) |\n", - "| ``\"left\"`` | Left operand's | Fill right with identity |\n", - "| ``\"right\"`` | Right operand's | Fill left with identity |\n", - "| ``\"override\"`` | Left operand's (positional) | Positional alignment, ignore labels |\n", - "| ``\"exact\"`` | Must match exactly | Raises error if different |" + "### Default Behavior\n", + "\n", + "| Context | Default `join` | Behavior |\n", + "|---------|---------------|----------|\n", + "| Arithmetic operators (`+`, `-`) | `\"exact\"` | Coordinates must match; raises `ValueError` on mismatch |\n", + "| Arithmetic operators (`*`, `/`) | `\"inner\"` | Intersection of coordinates; no fill needed |\n", + "| Constraint operators (`<=`, `>=`, `==`) with DataArray RHS | `\"exact\"` | Coordinates must match; raises `ValueError` on mismatch |\n", + "\n", + "### All Join Modes\n", + "\n", + "| `join` | Coordinates | Fill behavior |\n", + "|--------|------------|---------------|\n", + "| `\"exact\"` (default for `+`, `-`, constraints) | Must match exactly | Raises `ValueError` if different |\n", + "| `\"inner\"` (default for `*`, `/`) | Intersection only | No fill needed |\n", + "| `\"outer\"` | Union | Fill with zero (arithmetic) or `NaN` (constraint RHS) |\n", + "| `\"left\"` | Left operand's | Fill right with zero (arithmetic) or `NaN` (constraint RHS) |\n", + "| `\"right\"` | Right operand's | Fill left with zero |\n", + "| `\"override\"` | Left operand's (positional) | Positional alignment, ignores coordinate labels |\n", + "\n", + "### Quick Reference\n", + "\n", + "| Operation | Matching coords | Mismatched coords |\n", + "|-----------|----------------|-------------------|\n", + "| `x + y` | Works | `ValueError` |\n", + "| `x * factor` | Works | Intersection |\n", + "| `x.add(y, join=\"inner\")` | Works | Intersection |\n", + "| `x.add(y, join=\"outer\")` | Works | Union with fill |\n", + "| `x <= rhs` (DataArray) | Works | `ValueError` |\n", + "| `x.le(rhs, join=\"inner\")` | Works | Intersection |\n", + "| `x.le(rhs, join=\"left\")` | Works | Left coords, NaN fill |" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.11.0" } }, "nbformat": 4, From 076f29ba0aea9dc3c2e6230b01830d04e07daab9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 20 Feb 2026 09:59:17 +0100 Subject: [PATCH 13/66] Implement consistent coordinate alignment for linopy operations Use "exact" join for +/- (raises ValueError on mismatch), "inner" join for *// (intersection), and "exact" for constraint DataArray RHS. Named methods (.add(), .sub(), .mul(), .div(), .le(), .ge(), .eq()) accept explicit join= parameter as escape hatch. - Remove shape-dependent "override" heuristic from merge() and _align_constant() - Add join parameter support to to_constraint() for DataArray RHS - Forbid extra dimensions on constraint RHS - Update tests with structured raise-then-recover pattern - Update coordinate-alignment notebook with examples and migration guide Co-Authored-By: Claude Opus 4.6 --- examples/coordinate-alignment.ipynb | 464 +++++++++++++++++++++++----- linopy/expressions.py | 147 ++++++--- linopy/variables.py | 5 +- test/test_linear_expression.py | 451 ++++++++++++++------------- test/test_optimization.py | 4 +- 5 files changed, 732 insertions(+), 339 deletions(-) diff --git a/examples/coordinate-alignment.ipynb b/examples/coordinate-alignment.ipynb index e440d467..b92b084b 100644 --- a/examples/coordinate-alignment.ipynb +++ b/examples/coordinate-alignment.ipynb @@ -18,7 +18,7 @@ "\n", "When you need to combine operands with mismatched coordinates, use the named methods (`.add()`, `.sub()`, `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`) with an explicit `join=` parameter.\n", "\n", - "This convention is inspired by [pyoframe](https://github.com/Bravos-Power/pyoframe) and [xarray's planned direction](https://github.com/pydata/xarray/issues/8527) toward stricter alignment." + "This convention is inspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." ] }, { @@ -26,8 +26,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.191791Z", - "start_time": "2026-02-20T07:55:08.189675Z" + "end_time": "2026-02-20T08:34:40.590547Z", + "start_time": "2026-02-20T08:34:40.580957Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:10.188745Z", + "iopub.status.busy": "2026-02-20T08:31:10.188638Z", + "iopub.status.idle": "2026-02-20T08:31:11.700268Z", + "shell.execute_reply": "2026-02-20T08:31:11.700023Z" } }, "outputs": [], @@ -53,8 +59,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.219582Z", - "start_time": "2026-02-20T07:55:08.205251Z" + "end_time": "2026-02-20T08:34:40.620850Z", + "start_time": "2026-02-20T08:34:40.599526Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.701873Z", + "iopub.status.busy": "2026-02-20T08:31:11.701711Z", + "iopub.status.idle": "2026-02-20T08:31:11.760554Z", + "shell.execute_reply": "2026-02-20T08:31:11.760331Z" } }, "outputs": [], @@ -74,8 +86,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.229459Z", - "start_time": "2026-02-20T07:55:08.224390Z" + "end_time": "2026-02-20T08:34:40.633254Z", + "start_time": "2026-02-20T08:34:40.626281Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.761623Z", + "iopub.status.busy": "2026-02-20T08:31:11.761542Z", + "iopub.status.idle": "2026-02-20T08:31:11.766540Z", + "shell.execute_reply": "2026-02-20T08:31:11.766356Z" } }, "outputs": [], @@ -90,7 +108,9 @@ "source": [ "## Broadcasting (Different Dimensions)\n", "\n", - "Alignment only checks **shared** dimensions. If operands have different dimension names, they expand (broadcast) as in xarray — this is unaffected by the alignment convention." + "Alignment only checks **shared** dimensions. If operands have different dimension names, they expand (broadcast) as in xarray — this is unaffected by the alignment convention.\n", + "\n", + "This works in both directions: a constant with extra dimensions expands the expression, and an expression with extra dimensions expands over the constant." ] }, { @@ -98,8 +118,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.240925Z", - "start_time": "2026-02-20T07:55:08.235115Z" + "end_time": "2026-02-20T08:34:40.650251Z", + "start_time": "2026-02-20T08:34:40.639851Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.767547Z", + "iopub.status.busy": "2026-02-20T08:31:11.767487Z", + "iopub.status.idle": "2026-02-20T08:31:11.773127Z", + "shell.execute_reply": "2026-02-20T08:31:11.772954Z" } }, "outputs": [], @@ -111,6 +137,36 @@ "x * cost # -> (time, tech)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T08:34:40.667715Z", + "start_time": "2026-02-20T08:34:40.656983Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.774071Z", + "iopub.status.busy": "2026-02-20T08:31:11.773994Z", + "iopub.status.idle": "2026-02-20T08:31:11.780472Z", + "shell.execute_reply": "2026-02-20T08:31:11.780265Z" + } + }, + "outputs": [], + "source": [ + "# Constant with MORE dimensions than the expression — also broadcasts\n", + "w = m.add_variables(lower=0, coords=[techs], name=\"w\") # dims: (tech,)\n", + "time_profile = xr.DataArray(\n", + " [[1, 2], [3, 4], [5, 6]],\n", + " dims=[\"tech\", \"time\"],\n", + " coords={\"tech\": techs, \"time\": [0, 1]},\n", + ")\n", + "\n", + "# w has dim \"tech\", time_profile has dims (\"tech\", \"time\")\n", + "# \"time\" is extra — it expands the expression via broadcasting\n", + "w + time_profile # -> (tech, time)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -125,8 +181,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.256128Z", - "start_time": "2026-02-20T07:55:08.246697Z" + "end_time": "2026-02-20T08:34:40.683657Z", + "start_time": "2026-02-20T08:34:40.673488Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.781625Z", + "iopub.status.busy": "2026-02-20T08:31:11.781557Z", + "iopub.status.idle": "2026-02-20T08:31:11.786621Z", + "shell.execute_reply": "2026-02-20T08:31:11.786442Z" } }, "outputs": [], @@ -146,8 +208,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.264875Z", - "start_time": "2026-02-20T07:55:08.259746Z" + "end_time": "2026-02-20T08:34:40.698177Z", + "start_time": "2026-02-20T08:34:40.691406Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.787505Z", + "iopub.status.busy": "2026-02-20T08:31:11.787448Z", + "iopub.status.idle": "2026-02-20T08:31:11.790477Z", + "shell.execute_reply": "2026-02-20T08:31:11.790298Z" } }, "outputs": [], @@ -175,8 +243,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.275571Z", - "start_time": "2026-02-20T07:55:08.270271Z" + "end_time": "2026-02-20T08:34:40.722403Z", + "start_time": "2026-02-20T08:34:40.704702Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.791396Z", + "iopub.status.busy": "2026-02-20T08:31:11.791334Z", + "iopub.status.idle": "2026-02-20T08:31:11.796458Z", + "shell.execute_reply": "2026-02-20T08:31:11.796262Z" } }, "outputs": [], @@ -193,8 +267,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.286941Z", - "start_time": "2026-02-20T07:55:08.280454Z" + "end_time": "2026-02-20T08:34:40.750336Z", + "start_time": "2026-02-20T08:34:40.739583Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.797412Z", + "iopub.status.busy": "2026-02-20T08:31:11.797355Z", + "iopub.status.idle": "2026-02-20T08:31:11.803105Z", + "shell.execute_reply": "2026-02-20T08:31:11.802861Z" } }, "outputs": [], @@ -229,8 +309,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.296398Z", - "start_time": "2026-02-20T07:55:08.289560Z" + "end_time": "2026-02-20T08:34:40.770327Z", + "start_time": "2026-02-20T08:34:40.762873Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.804161Z", + "iopub.status.busy": "2026-02-20T08:31:11.804100Z", + "iopub.status.idle": "2026-02-20T08:31:11.807917Z", + "shell.execute_reply": "2026-02-20T08:31:11.807731Z" } }, "outputs": [], @@ -258,8 +344,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.309658Z", - "start_time": "2026-02-20T07:55:08.300849Z" + "end_time": "2026-02-20T08:34:40.785505Z", + "start_time": "2026-02-20T08:34:40.775987Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.808856Z", + "iopub.status.busy": "2026-02-20T08:31:11.808774Z", + "iopub.status.idle": "2026-02-20T08:31:11.815876Z", + "shell.execute_reply": "2026-02-20T08:31:11.815678Z" } }, "outputs": [], @@ -279,8 +371,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.327236Z", - "start_time": "2026-02-20T07:55:08.318480Z" + "end_time": "2026-02-20T08:34:40.811388Z", + "start_time": "2026-02-20T08:34:40.797806Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.816893Z", + "iopub.status.busy": "2026-02-20T08:31:11.816817Z", + "iopub.status.idle": "2026-02-20T08:31:11.824433Z", + "shell.execute_reply": "2026-02-20T08:31:11.824155Z" } }, "outputs": [], @@ -300,8 +398,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.345463Z", - "start_time": "2026-02-20T07:55:08.337188Z" + "end_time": "2026-02-20T08:34:40.834416Z", + "start_time": "2026-02-20T08:34:40.823515Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.825622Z", + "iopub.status.busy": "2026-02-20T08:31:11.825544Z", + "iopub.status.idle": "2026-02-20T08:31:11.832608Z", + "shell.execute_reply": "2026-02-20T08:31:11.832423Z" } }, "outputs": [], @@ -321,8 +425,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.363658Z", - "start_time": "2026-02-20T07:55:08.354475Z" + "end_time": "2026-02-20T08:34:40.854542Z", + "start_time": "2026-02-20T08:34:40.841131Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.833545Z", + "iopub.status.busy": "2026-02-20T08:31:11.833490Z", + "iopub.status.idle": "2026-02-20T08:31:11.840073Z", + "shell.execute_reply": "2026-02-20T08:31:11.839884Z" } }, "outputs": [], @@ -342,8 +452,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.383357Z", - "start_time": "2026-02-20T07:55:08.372963Z" + "end_time": "2026-02-20T08:34:40.872890Z", + "start_time": "2026-02-20T08:34:40.862894Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.841049Z", + "iopub.status.busy": "2026-02-20T08:31:11.840991Z", + "iopub.status.idle": "2026-02-20T08:31:11.847135Z", + "shell.execute_reply": "2026-02-20T08:31:11.846968Z" } }, "outputs": [], @@ -365,8 +481,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.396019Z", - "start_time": "2026-02-20T07:55:08.390987Z" + "end_time": "2026-02-20T08:34:40.899679Z", + "start_time": "2026-02-20T08:34:40.889148Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.848157Z", + "iopub.status.busy": "2026-02-20T08:31:11.848101Z", + "iopub.status.idle": "2026-02-20T08:31:11.852887Z", + "shell.execute_reply": "2026-02-20T08:31:11.852713Z" } }, "outputs": [], @@ -378,29 +500,29 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": { "ExecuteTime": { "end_time": "2026-02-20T07:55:08.411255Z", "start_time": "2026-02-20T07:55:08.404219Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:23:53.301013Z", + "iopub.status.busy": "2026-02-20T08:23:53.300958Z", + "iopub.status.idle": "2026-02-20T08:23:53.305201Z", + "shell.execute_reply": "2026-02-20T08:23:53.305026Z" } }, - "outputs": [], - "source": [ - "# Left join — keep a's coordinates, fill missing factor with 0\n", - "a.mul(const, join=\"left\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, "source": [ "## Constraints with DataArray RHS\n", "\n", - "Constraint operators (`<=`, `>=`, `==`) with a DataArray right-hand side also default to `\"exact\"` — coordinates must match. Use `.le()`, `.ge()`, `.eq()` with `join=` to control alignment.\n", + "Constraint operators (`<=`, `>=`, `==`) with a DataArray right-hand side also default to `\"exact\"` — coordinates on shared dimensions must match. Use `.le()`, `.ge()`, `.eq()` with `join=` to control alignment.\n", "\n", - "The RHS may have **fewer** dimensions than the expression (broadcasting), but **not more**. The expression defines the problem structure; the RHS provides bounds within that structure." + "**Dimension rules for constraint RHS:**\n", + "- The RHS may have **fewer** dimensions than the expression — the bound broadcasts. This is the standard way to apply a per-tech capacity across all time steps.\n", + "- The RHS must **not** have **more** dimensions than the expression. An expression with `dims=(tech,)` defines one variable per tech; an RHS with `dims=(time, tech)` would create redundant constraints on the same variable, which is almost always a mistake.\n", + "\n", + "Note: this is different from arithmetic, where a constant with extra dims freely expands the expression. For constraints, the expression defines the problem structure." ] }, { @@ -408,8 +530,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.435280Z", - "start_time": "2026-02-20T07:55:08.419069Z" + "end_time": "2026-02-20T08:34:40.925963Z", + "start_time": "2026-02-20T08:34:40.907028Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.853850Z", + "iopub.status.busy": "2026-02-20T08:31:11.853793Z", + "iopub.status.idle": "2026-02-20T08:31:11.866027Z", + "shell.execute_reply": "2026-02-20T08:31:11.865850Z" } }, "outputs": [], @@ -431,8 +559,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.449885Z", - "start_time": "2026-02-20T07:55:08.443051Z" + "end_time": "2026-02-20T08:34:40.949905Z", + "start_time": "2026-02-20T08:34:40.939482Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.866958Z", + "iopub.status.busy": "2026-02-20T08:31:11.866899Z", + "iopub.status.idle": "2026-02-20T08:31:11.871115Z", + "shell.execute_reply": "2026-02-20T08:31:11.870937Z" } }, "outputs": [], @@ -448,8 +582,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.461738Z", - "start_time": "2026-02-20T07:55:08.456633Z" + "end_time": "2026-02-20T08:34:40.970277Z", + "start_time": "2026-02-20T08:34:40.961415Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.872062Z", + "iopub.status.busy": "2026-02-20T08:31:11.872004Z", + "iopub.status.idle": "2026-02-20T08:31:11.874934Z", + "shell.execute_reply": "2026-02-20T08:31:11.874765Z" } }, "outputs": [], @@ -468,8 +608,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.470943Z", - "start_time": "2026-02-20T07:55:08.464365Z" + "end_time": "2026-02-20T08:34:41.006270Z", + "start_time": "2026-02-20T08:34:40.997162Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.875820Z", + "iopub.status.busy": "2026-02-20T08:31:11.875763Z", + "iopub.status.idle": "2026-02-20T08:31:11.879949Z", + "shell.execute_reply": "2026-02-20T08:31:11.879781Z" } }, "outputs": [], @@ -483,8 +629,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.485672Z", - "start_time": "2026-02-20T07:55:08.478220Z" + "end_time": "2026-02-20T08:34:41.036419Z", + "start_time": "2026-02-20T08:34:41.022644Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.880853Z", + "iopub.status.busy": "2026-02-20T08:31:11.880793Z", + "iopub.status.idle": "2026-02-20T08:31:11.884663Z", + "shell.execute_reply": "2026-02-20T08:31:11.884503Z" } }, "outputs": [], @@ -494,6 +646,35 @@ "x.to_linexpr().le(partial_rhs, join=\"left\")" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T08:34:41.053285Z", + "start_time": "2026-02-20T08:34:41.043483Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.885511Z", + "iopub.status.busy": "2026-02-20T08:31:11.885453Z", + "iopub.status.idle": "2026-02-20T08:31:11.889326Z", + "shell.execute_reply": "2026-02-20T08:31:11.889148Z" + } + }, + "outputs": [], + "source": [ + "# RHS with MORE dimensions than expression — raises ValueError\n", + "y_tech = m.add_variables(lower=0, coords=[techs], name=\"y_tech\") # dims: (tech,)\n", + "rhs_extra_dims = xr.DataArray(\n", + " np.ones((5, 3)), dims=[\"time\", \"tech\"], coords={\"time\": time, \"tech\": techs}\n", + ")\n", + "\n", + "try:\n", + " y_tech <= rhs_extra_dims # \"time\" is not in the expression\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -508,8 +689,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.495969Z", - "start_time": "2026-02-20T07:55:08.492164Z" + "end_time": "2026-02-20T08:34:41.067545Z", + "start_time": "2026-02-20T08:34:41.062532Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.890266Z", + "iopub.status.busy": "2026-02-20T08:31:11.890211Z", + "iopub.status.idle": "2026-02-20T08:31:11.893636Z", + "shell.execute_reply": "2026-02-20T08:31:11.893458Z" } }, "outputs": [], @@ -534,8 +721,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.511065Z", - "start_time": "2026-02-20T07:55:08.499152Z" + "end_time": "2026-02-20T08:34:41.084071Z", + "start_time": "2026-02-20T08:34:41.071733Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.894513Z", + "iopub.status.busy": "2026-02-20T08:31:11.894459Z", + "iopub.status.idle": "2026-02-20T08:31:11.904179Z", + "shell.execute_reply": "2026-02-20T08:31:11.904001Z" } }, "outputs": [], @@ -556,8 +749,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.532326Z", - "start_time": "2026-02-20T07:55:08.519001Z" + "end_time": "2026-02-20T08:34:41.104144Z", + "start_time": "2026-02-20T08:34:41.091479Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.905131Z", + "iopub.status.busy": "2026-02-20T08:31:11.905072Z", + "iopub.status.idle": "2026-02-20T08:31:11.914430Z", + "shell.execute_reply": "2026-02-20T08:31:11.914242Z" } }, "outputs": [], @@ -573,17 +772,21 @@ { "cell_type": "markdown", "metadata": {}, - "source": [ - "Peak demand of 120 MW must be met only during hours 8-20. The demand array covers a subset of hours. Use `.ge()` with `join=\"inner\"` to restrict the constraint to just those hours:" - ] + "source": "Peak demand of 120 MW must be met only during hours 8-20. The demand array covers a subset of hours. Use `.ge()` with `join=\"inner\"` to restrict the constraint to just those hours:" }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.550668Z", - "start_time": "2026-02-20T07:55:08.537649Z" + "end_time": "2026-02-20T08:34:41.122157Z", + "start_time": "2026-02-20T08:34:41.107939Z" + }, + "execution": { + "iopub.execute_input": "2026-02-20T08:31:11.915441Z", + "iopub.status.busy": "2026-02-20T08:31:11.915362Z", + "iopub.status.idle": "2026-02-20T08:31:11.925960Z", + "shell.execute_reply": "2026-02-20T08:31:11.925775Z" } }, "outputs": [], @@ -599,6 +802,112 @@ "m4.add_constraints(total_gen.ge(peak_demand, join=\"inner\"), name=\"peak_demand\")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "Selecting the correct subset of the variable produces the same result, and is arguably more readable:" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-20T08:37:39.113259Z", + "start_time": "2026-02-20T08:37:39.065983Z" + } + }, + "outputs": [], + "source": [ + "# Constraint only at peak hours (intersection)\n", + "m4.add_constraints(\n", + " total_gen.sel(hour=peak_hours) >= peak_demand, name=\"peak_demand_sel\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Migrating from Previous Versions\n", + "\n", + "Previous versions of linopy used a **shape-dependent heuristic** for coordinate alignment. The behavior depended on whether operands happened to have the same shape, and was inconsistent between `Variable` and `LinearExpression`:\n", + "\n", + "| Condition | Old behavior | New behavior |\n", + "|-----------|-------------|-------------|\n", + "| Same shape, same coordinates | Works correctly | Works correctly (no change) |\n", + "| Same shape, **different** coordinates, `+`/`-` | `\"override\"` — positional alignment (**bug-prone**) | `\"exact\"` — raises `ValueError` |\n", + "| Same shape, **different** coordinates, `*`/`/` | Buggy (crashes or produces garbage) | `\"inner\"` — intersection |\n", + "| Different shape, expr + expr | `\"outer\"` — union of coordinates | `\"exact\"` — raises `ValueError` |\n", + "| Different shape, expr + constant | `\"left\"` — keeps expression coords, fills missing with 0 | `\"exact\"` — raises `ValueError` |\n", + "| Different shape, expr * constant | Buggy (crashes for `LinearExpression`, produces garbage for `Variable`) | `\"inner\"` — intersection |\n", + "| Constraint with mismatched DataArray RHS | Same-shape: `\"override\"` (positional); different-shape: `\"left\"` (fills missing RHS with 0) | `\"exact\"` — raises `ValueError` |\n", + "\n", + "### Why the change?\n", + "\n", + "The old heuristic caused several classes of bugs:\n", + "\n", + "1. **Silent positional alignment**: When two operands happened to have the same shape but entirely different coordinates (e.g., `x(time=[0,1,2]) + z(time=[5,6,7])`), they were matched by position — giving a wrong result with no warning.\n", + "\n", + "2. **Non-associative addition**: `(y + factor) + x` could give a different result than `y + (x + factor)` because `\"left\"` for expr+constant dropped the constant's extra coordinates before they could be recovered by a subsequent addition.\n", + "\n", + "3. **Broken multiplication**: Multiplying a `LinearExpression` by a DataArray with mismatched coordinates would crash with an `AssertionError`. Multiplying a `Variable` by such a DataArray produced a result with misaligned coefficients and variable references.\n", + "\n", + "### How to update your code\n", + "\n", + "If your code combines operands with **mismatched coordinates** and you relied on the old behavior, you'll now get a `ValueError` (for `+`/`-`) or a smaller result (for `*`/`/`). Here's how to migrate:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Addition with mismatched coordinates** — expr+expr previously used `\"outer\"`, expr+constant used `\"left\"`. Both now raise `ValueError`:\n", + "\n", + "```python\n", + "# Old code (worked silently):\n", + "result = x + y_short # different-size expr+expr → was \"outer\"\n", + "result = x + partial_const # expr + constant → was \"left\"\n", + "\n", + "# New code — be explicit about the join:\n", + "result = x.add(y_short, join=\"outer\") # union of coordinates\n", + "result = x.add(partial_const, join=\"left\") # keep x's coordinates, fill 0\n", + "```\n", + "\n", + "**Same-shape but different coordinates** — previously matched by position (`\"override\"`) for addition. Now raises `ValueError` for `+`/`-`, gives intersection for `*`/`/`:\n", + "\n", + "```python\n", + "# Old code (silently matched positions — likely a bug!):\n", + "x_abc = m.add_variables(coords=[[\"a\", \"b\", \"c\"]], name=\"x_abc\")\n", + "y_def = m.add_variables(coords=[[\"d\", \"e\", \"f\"]], name=\"y_def\")\n", + "result = x_abc + y_def # Old: positional match → New: ValueError\n", + "\n", + "# If you really want positional matching (rare):\n", + "result = x_abc.add(y_def, join=\"override\")\n", + "```\n", + "\n", + "**Multiplication with mismatched coordinates** — previously broken (crash or garbage). Now uses `\"inner\"` (intersection):\n", + "\n", + "```python\n", + "# Old code — would crash (LinExpr) or produce garbage (Variable):\n", + "x * partial_factor # x has 5 coords, partial_factor has 3\n", + "\n", + "# New code — result has 3 entries (intersection). This now works correctly!\n", + "# If you need to keep all of x's coordinates (zero-fill missing):\n", + "x.mul(partial_factor, join=\"left\")\n", + "```\n", + "\n", + "**Constraints with mismatched DataArray RHS** — previously used positional alignment (same shape) or `\"left\"` with 0-fill (different shape). Now raises `ValueError`:\n", + "\n", + "```python\n", + "# Old code:\n", + "con = x <= partial_rhs # Old: \"left\" (fill 0) or \"override\" → New: ValueError\n", + "\n", + "# New code — be explicit:\n", + "con = x.to_linexpr().le(partial_rhs, join=\"left\") # keep x's coords, NaN fill\n", + "con = x.to_linexpr().le(partial_rhs, join=\"inner\") # intersection only\n", + "```" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -609,9 +918,16 @@ "\n", "| Context | Default `join` | Behavior |\n", "|---------|---------------|----------|\n", - "| Arithmetic operators (`+`, `-`) | `\"exact\"` | Coordinates must match; raises `ValueError` on mismatch |\n", - "| Arithmetic operators (`*`, `/`) | `\"inner\"` | Intersection of coordinates; no fill needed |\n", - "| Constraint operators (`<=`, `>=`, `==`) with DataArray RHS | `\"exact\"` | Coordinates must match; raises `ValueError` on mismatch |\n", + "| Arithmetic operators (`+`, `-`) | `\"exact\"` | Coordinates must match on shared dims; raises `ValueError` on mismatch |\n", + "| Arithmetic operators (`*`, `/`) | `\"inner\"` | Intersection of coordinates on shared dims; no fill needed |\n", + "| Constraint operators (`<=`, `>=`, `==`) with DataArray RHS | `\"exact\"` | Coordinates must match on shared dims; raises `ValueError` on mismatch |\n", + "\n", + "### Extra Dimensions (Broadcasting)\n", + "\n", + "| Context | Extra dims on constant/RHS | Extra dims on expression |\n", + "|---------|--------------------------|------------------------|\n", + "| Arithmetic (`+`, `-`, `*`, `/`) | Expands the expression (standard xarray broadcast) | Expands over the constant |\n", + "| Constraint RHS (`<=`, `>=`, `==`) | **Forbidden** — raises `ValueError` | RHS broadcasts over expression's extra dims |\n", "\n", "### All Join Modes\n", "\n", @@ -645,8 +961,16 @@ "name": "python3" }, "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", "name": "python", - "version": "3.11.0" + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" } }, "nbformat": 4, diff --git a/linopy/expressions.py b/linopy/expressions.py index e1fbe1a9..cd172a8a 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -48,7 +48,6 @@ LocIndexer, as_dataarray, assign_multiindex_safe, - check_common_keys_values, check_has_nulls, check_has_nulls_polars, fill_missing_coords, @@ -528,6 +527,7 @@ def _align_constant( other: DataArray, fill_value: float = 0, join: str | None = None, + default_join: str = "exact", ) -> tuple[DataArray, DataArray, bool]: """ Align a constant DataArray with self.const. @@ -539,7 +539,10 @@ def _align_constant( fill_value : float, default: 0 Fill value for missing coordinates. join : str, optional - Alignment method. If None, uses size-aware default behavior. + Alignment method. If None, uses default_join. + default_join : str, default: "exact" + Default join mode when join is None. Use "exact" for add/sub, + "inner" for mul/div. Returns ------- @@ -551,19 +554,32 @@ def _align_constant( Whether the expression's data needs reindexing. """ if join is None: - if other.sizes == self.const.sizes: - return self.const, other.assign_coords(coords=self.coords), False + join = default_join + + if join == "override": + return self.const, other.assign_coords(coords=self.coords), False + elif join == "left": return ( self.const, other.reindex_like(self.const, fill_value=fill_value), False, ) - elif join == "override": - return self.const, other.assign_coords(coords=self.coords), False else: - self_const, aligned = xr.align( - self.const, other, join=join, fill_value=fill_value - ) + try: + self_const, aligned = xr.align( + self.const, other, join=join, fill_value=fill_value + ) + except ValueError as e: + if "exact" in str(e): + raise ValueError( + f"{e}\n" + "Use .add()/.sub()/.mul()/.div() with an explicit join= parameter:\n" + ' .add(other, join="inner") # intersection of coordinates\n' + ' .add(other, join="outer") # union of coordinates (with fill)\n' + ' .add(other, join="left") # keep left operand\'s coordinates\n' + ' .add(other, join="override") # positional alignment' + ) from None + raise return self_const, aligned, True def _add_constant( @@ -573,7 +589,7 @@ def _add_constant( return self.assign(const=self.const + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, da, needs_data_reindex = self._align_constant( - da, fill_value=0, join=join + da, fill_value=0, join=join, default_join="exact" ) if needs_data_reindex: return self.__class__( @@ -593,7 +609,7 @@ def _apply_constant_op( ) -> GenericExpression: factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, factor, needs_data_reindex = self._align_constant( - factor, fill_value=fill_value, join=join + factor, fill_value=fill_value, join=join, default_join="inner" ) if needs_data_reindex: data = self.data.reindex_like(self_const, fill_value=self._fill_value) @@ -1082,7 +1098,40 @@ def to_constraint( f"RHS DataArray has dimensions {extra_dims} not present " f"in the expression. Cannot create constraint." ) - rhs = rhs.reindex_like(self.const, fill_value=np.nan) + effective_join = join if join is not None else "exact" + if effective_join == "override": + aligned_rhs = rhs.assign_coords(coords=self.const.coords) + expr_const = self.const + expr_data = self.data + elif effective_join == "left": + aligned_rhs = rhs.reindex_like(self.const, fill_value=np.nan) + expr_const = self.const + expr_data = self.data + else: + try: + expr_const_aligned, aligned_rhs = xr.align( + self.const, rhs, join=effective_join, fill_value=np.nan + ) + except ValueError as e: + if "exact" in str(e): + raise ValueError( + f"{e}\n" + "Use .le()/.ge()/.eq() with an explicit join= parameter:\n" + ' .le(rhs, join="inner") # intersection of coordinates\n' + ' .le(rhs, join="left") # keep expression coordinates (NaN fill)\n' + ' .le(rhs, join="override") # positional alignment' + ) from None + raise + expr_const = expr_const_aligned.fillna(0) + expr_data = self.data.reindex_like( + expr_const_aligned, fill_value=self._fill_value + ) + aligned_rhs = aligned_rhs + constraint_rhs = aligned_rhs - expr_const + data = assign_multiindex_safe( + expr_data[["coeffs", "vars"]], sign=sign, rhs=constraint_rhs + ) + return constraints.Constraint(data, model=self.model) elif isinstance(rhs, np.ndarray | pd.Series | pd.DataFrame) and rhs.ndim > len( self.coord_dims ): @@ -2320,16 +2369,6 @@ def merge( model = exprs[0].model - if join is not None: - override = join == "override" - elif cls in linopy_types and dim in HELPER_DIMS: - coord_dims = [ - {k: v for k, v in e.sizes.items() if k not in HELPER_DIMS} for e in exprs - ] - override = check_common_keys_values(coord_dims) # type: ignore - else: - override = False - data = [e.data if isinstance(e, linopy_types) else e for e in exprs] data = [fill_missing_coords(ds, fill_helper_dims=True) for ds in data] @@ -2345,23 +2384,55 @@ def merge( if join is not None: kwargs["join"] = join - elif override: - kwargs["join"] = "override" + elif dim == TERM_DIM: + kwargs["join"] = "exact" + elif dim == FACTOR_DIM: + kwargs["join"] = "inner" else: - kwargs.setdefault("join", "outer") - - if dim == TERM_DIM: - ds = xr.concat([d[["coeffs", "vars"]] for d in data], dim, **kwargs) - subkwargs = {**kwargs, "fill_value": 0} - const = xr.concat([d["const"] for d in data], dim, **subkwargs).sum(TERM_DIM) - ds = assign_multiindex_safe(ds, const=const) - elif dim == FACTOR_DIM: - ds = xr.concat([d[["vars"]] for d in data], dim, **kwargs) - coeffs = xr.concat([d["coeffs"] for d in data], dim, **kwargs).prod(FACTOR_DIM) - const = xr.concat([d["const"] for d in data], dim, **kwargs).prod(FACTOR_DIM) - ds = assign_multiindex_safe(ds, coeffs=coeffs, const=const) - else: - ds = xr.concat(data, dim, **kwargs) + kwargs["join"] = "outer" + + try: + if dim == TERM_DIM: + ds = xr.concat([d[["coeffs", "vars"]] for d in data], dim, **kwargs) + subkwargs = {**kwargs, "fill_value": 0} + const = xr.concat([d["const"] for d in data], dim, **subkwargs).sum( + TERM_DIM + ) + ds = assign_multiindex_safe(ds, const=const) + elif dim == FACTOR_DIM: + ds = xr.concat([d[["vars"]] for d in data], dim, **kwargs) + coeffs = xr.concat([d["coeffs"] for d in data], dim, **kwargs).prod( + FACTOR_DIM + ) + const = xr.concat([d["const"] for d in data], dim, **kwargs).prod( + FACTOR_DIM + ) + ds = assign_multiindex_safe(ds, coeffs=coeffs, const=const) + else: + # Pre-pad helper dims to same size before concat + fill = kwargs.get("fill_value", FILL_VALUE) + for helper_dim in HELPER_DIMS: + sizes = [d.sizes.get(helper_dim, 0) for d in data] + max_size = max(sizes) if sizes else 0 + if max_size > 0 and min(sizes) < max_size: + data = [ + d.reindex({helper_dim: range(max_size)}, fill_value=fill) + if d.sizes.get(helper_dim, 0) < max_size + else d + for d in data + ] + ds = xr.concat(data, dim, **kwargs) + except ValueError as e: + if "exact" in str(e): + raise ValueError( + f"{e}\n" + "Use .add()/.sub()/.mul()/.div() with an explicit join= parameter:\n" + ' .add(other, join="inner") # intersection of coordinates\n' + ' .add(other, join="outer") # union of coordinates (with fill)\n' + ' .add(other, join="left") # keep left operand\'s coordinates\n' + ' .add(other, join="override") # positional alignment' + ) from None + raise for d in set(HELPER_DIMS) & set(ds.coords): ds = ds.reset_index(d, drop=True) diff --git a/linopy/variables.py b/linopy/variables.py index 0eea6634..274344a1 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -400,8 +400,9 @@ def __mul__(self, other: SideLike) -> ExpressionLike: try: if isinstance(other, Variable | ScalarVariable): return self.to_linexpr() * other - - return self.to_linexpr(other) + if isinstance(other, expressions.LinearExpression): + return self.to_linexpr() * other + return self.to_linexpr()._multiply_by_constant(other) except TypeError: return NotImplemented diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 2af1a8ea..d77bd00f 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -7,8 +7,6 @@ from __future__ import annotations -from typing import Any - import numpy as np import pandas as pd import polars as pl @@ -443,8 +441,12 @@ def test_linear_expression_sum( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # test special case otherride coords - expr = v.loc[:9] + v.loc[10:] + # disjoint coords now raise with exact default + with pytest.raises(ValueError, match="exact"): + v.loc[:9] + v.loc[10:] + + # explicit outer join gives union + expr = v.loc[:9].add(v.loc[10:], join="override") assert expr.nterm == 2 assert len(expr.coords["dim_2"]) == 10 @@ -467,8 +469,12 @@ def test_linear_expression_sum_with_const( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # test special case otherride coords - expr = v.loc[:9] + v.loc[10:] + # disjoint coords now raise with exact default + with pytest.raises(ValueError, match="exact"): + v.loc[:9] + v.loc[10:] + + # explicit outer join gives union + expr = v.loc[:9].add(v.loc[10:], join="override") assert expr.nterm == 2 assert len(expr.coords["dim_2"]) == 10 @@ -577,7 +583,18 @@ def test_linear_expression_multiplication_invalid( expr / x -class TestSubsetCoordinateAlignment: +class TestExactAlignmentDefault: + """ + Test the new alignment convention: exact for +/-, inner for *//. + + v has dim_2=[0..19] (20 entries). + subset has dim_2=[1, 3] (2 entries, subset of v's coords). + superset has dim_2=[0..24] (25 entries, superset of v's coords). + + Each test shows the operation, verifies the new behavior (raises or + intersection), then shows the explicit join= that recovers the old result. + """ + @pytest.fixture def subset(self) -> xr.DataArray: return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) @@ -588,298 +605,293 @@ def superset(self) -> xr.DataArray: np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} ) + @pytest.fixture + def matching(self) -> xr.DataArray: + return xr.DataArray( + np.arange(20, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(20)}, + ) + @pytest.fixture def expected_fill(self) -> np.ndarray: + """Old expected result: 20-entry array with values at positions 1,3.""" arr = np.zeros(20) arr[1] = 10.0 arr[3] = 30.0 return arr - def test_var_mul_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - result = v * subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - def test_expr_mul_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - expr = 1 * v - result = expr * subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - @pytest.mark.parametrize( - "make_lhs,make_rhs", - [ - (lambda v, s: s * v, lambda v, s: v * s), - (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), - (lambda v, s: s + v, lambda v, s: v + s), - (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), - ], - ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], - ) - def test_commutativity( - self, v: Variable, subset: xr.DataArray, make_lhs: Any, make_rhs: Any - ) -> None: - assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + # --- Addition / subtraction with subset constant --- def test_var_add_subset( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray ) -> None: - result = v + subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() + # now raises + with pytest.raises(ValueError, match="exact"): + v + subset + + # explicit join="left" recovers old behavior: 20 entries, fill 0 + result = v.add(subset, join="left") + assert result.sizes["dim_2"] == 20 np.testing.assert_array_equal(result.const.values, expected_fill) def test_var_sub_subset( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray ) -> None: - result = v - subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, -expected_fill) + with pytest.raises(ValueError, match="exact"): + v - subset - def test_subset_sub_var(self, v: Variable, subset: xr.DataArray) -> None: - assert_linequal(subset - v, -v + subset) + result = v.sub(subset, join="left") + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal(result.const.values, -expected_fill) def test_expr_add_subset( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray ) -> None: - expr = v + 5 - result = expr + subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() + with pytest.raises(ValueError, match="exact"): + (v + 5) + subset + + result = (v + 5).add(subset, join="left") + assert result.sizes["dim_2"] == 20 np.testing.assert_array_equal(result.const.values, expected_fill + 5) - def test_expr_sub_subset( + # --- Addition with superset constant --- + + def test_var_add_superset(self, v: Variable, superset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + v + superset + + result = v.add(superset, join="left") + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.const.values).any() + + # --- Addition / multiplication with disjoint coords --- + + def test_disjoint_add(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + with pytest.raises(ValueError, match="exact"): + v + disjoint + + result = v.add(disjoint, join="outer") + assert result.sizes["dim_2"] == 22 # union of [0..19] and [50, 60] + + def test_disjoint_mul(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + # inner join: no intersection → empty + result = v * disjoint + assert result.sizes["dim_2"] == 0 + + # explicit join="left": 20 entries, all zeros + result = v.mul(disjoint, join="left") + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) + + def test_disjoint_div(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v / disjoint + assert result.sizes["dim_2"] == 0 + + # --- Multiplication / division with subset constant --- + + def test_var_mul_subset( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray ) -> None: - expr = v + 5 - result = expr - subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, 5 - expected_fill) + # inner join: 2 entries (intersection) + result = v * subset + assert result.sizes["dim_2"] == 2 + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(10.0) + assert result.coeffs.squeeze().sel(dim_2=3).item() == pytest.approx(30.0) - def test_subset_sub_expr(self, v: Variable, subset: xr.DataArray) -> None: - expr = v + 5 - assert_linequal(subset - expr, -(expr - subset)) + # explicit join="left" recovers old behavior: 20 entries, fill 0 + result = v.mul(subset, join="left") + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_expr_mul_subset(self, v: Variable, subset: xr.DataArray) -> None: + result = (1 * v) * subset + assert result.sizes["dim_2"] == 2 + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(10.0) + + def test_var_mul_superset(self, v: Variable, superset: xr.DataArray) -> None: + # inner join: intersection = v's 20 coords + result = v * superset + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.coeffs.values).any() def test_var_div_subset(self, v: Variable, subset: xr.DataArray) -> None: + # inner join: 2 entries result = v / subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() + assert result.sizes["dim_2"] == 2 + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) + assert result.coeffs.squeeze().sel(dim_2=3).item() == pytest.approx(1.0 / 30) + + # explicit join="left": 20 entries, fill 1 + result = v.div(subset, join="left") + assert result.sizes["dim_2"] == 20 assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + # --- Constraints with subset RHS --- + def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - con = v <= subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert con.rhs.sel(dim_2=3).item() == 30.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) + with pytest.raises(ValueError, match="exact"): + v <= subset - @pytest.mark.parametrize("sign", ["<=", ">=", "=="]) - def test_var_comparison_subset( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - if sign == "<=": - con = v <= subset - elif sign == ">=": - con = v >= subset - else: - con = v == subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] + # explicit join="left": 20 entries, NaN where RHS missing + con = v.to_linexpr().le(subset, join="left") + assert con.sizes["dim_2"] == 20 assert con.rhs.sel(dim_2=1).item() == 10.0 + assert con.rhs.sel(dim_2=3).item() == 30.0 assert np.isnan(con.rhs.sel(dim_2=0).item()) def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: expr = v + 5 - con = expr <= subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] + with pytest.raises(ValueError, match="exact"): + expr <= subset + + con = expr.le(subset, join="left") + assert con.sizes["dim_2"] == 20 assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) assert np.isnan(con.rhs.sel(dim_2=0).item()) - def test_add_commutativity_full_coords(self, v: Variable) -> None: - full = xr.DataArray( - np.arange(20, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(20)}, - ) - assert_linequal(v + full, full + v) - - def test_superset_addition_pins_to_lhs( - self, v: Variable, superset: xr.DataArray + @pytest.mark.parametrize("sign", ["<=", ">=", "=="]) + def test_var_comparison_subset( + self, v: Variable, subset: xr.DataArray, sign: str ) -> None: - result = v + superset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - - def test_superset_add_var(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset + v, v + superset) - - def test_superset_sub_var(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset - v, -v + superset) + with pytest.raises(ValueError, match="exact"): + if sign == "<=": + v <= subset + elif sign == ">=": + v >= subset + else: + v == subset + + def test_constraint_le_join_inner(self, v: Variable, subset: xr.DataArray) -> None: + con = v.to_linexpr().le(subset, join="inner") + assert con.sizes["dim_2"] == 2 + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert con.rhs.sel(dim_2=3).item() == 30.0 - def test_superset_mul_var(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset * v, v * superset) + # --- Matching coordinates: unchanged behavior --- - @pytest.mark.parametrize("sign", ["<=", ">="]) - def test_superset_comparison_var( - self, v: Variable, superset: xr.DataArray, sign: str - ) -> None: - if sign == "<=": - con = superset <= v - else: - con = superset >= v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(con.lhs.coeffs.values).any() - assert not np.isnan(con.rhs.values).any() - - def test_disjoint_addition_pins_to_lhs(self, v: Variable) -> None: - disjoint = xr.DataArray( - [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v + disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] + def test_add_matching_unchanged(self, v: Variable, matching: xr.DataArray) -> None: + result = v + matching + assert result.sizes["dim_2"] == 20 assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, np.zeros(20)) - def test_expr_div_subset(self, v: Variable, subset: xr.DataArray) -> None: - expr = 1 * v - result = expr / subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) - assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + def test_mul_matching_unchanged(self, v: Variable, matching: xr.DataArray) -> None: + result = v * matching + assert result.sizes["dim_2"] == 20 - def test_subset_add_var_coefficients( - self, v: Variable, subset: xr.DataArray - ) -> None: - result = subset + v - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + def test_le_matching_unchanged(self, v: Variable, matching: xr.DataArray) -> None: + con = v <= matching + assert con.sizes["dim_2"] == 20 - def test_subset_sub_var_coefficients( - self, v: Variable, subset: xr.DataArray + def test_add_commutativity_matching( + self, v: Variable, matching: xr.DataArray ) -> None: - result = subset - v - np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) + assert_linequal(v + matching, matching + v) - @pytest.mark.parametrize("sign", ["<=", ">=", "=="]) - def test_subset_comparison_var( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - if sign == "<=": - con = subset <= v - elif sign == ">=": - con = subset >= v - else: - con = subset == v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert np.isnan(con.rhs.sel(dim_2=0).item()) - assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) + def test_mul_commutativity(self, v: Variable, subset: xr.DataArray) -> None: + assert_linequal(v * subset, subset * v) - def test_superset_mul_pins_to_lhs( - self, v: Variable, superset: xr.DataArray - ) -> None: - result = v * superset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() + # --- Explicit join modes --- - def test_superset_div_pins_to_lhs(self, v: Variable) -> None: - superset_nonzero = xr.DataArray( - np.arange(1, 26, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, + def test_add_join_inner(self, v: Variable, subset: xr.DataArray) -> None: + result = v.add(subset, join="inner") + assert result.sizes["dim_2"] == 2 + assert result.const.sel(dim_2=1).item() == 10.0 + assert result.const.sel(dim_2=3).item() == 30.0 + + def test_add_join_outer(self, v: Variable, subset: xr.DataArray) -> None: + result = v.add(subset, join="outer") + assert result.sizes["dim_2"] == 20 + assert result.const.sel(dim_2=1).item() == 10.0 + assert result.const.sel(dim_2=0).item() == 0.0 + + def test_add_join_override(self, v: Variable) -> None: + disjoint = xr.DataArray( + np.ones(20), dims=["dim_2"], coords={"dim_2": range(50, 70)} ) - result = v / superset_nonzero - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() + result = v.add(disjoint, join="override") + assert result.sizes["dim_2"] == 20 + assert list(result.coords["dim_2"].values) == list(range(20)) + + # --- Quadratic expressions --- def test_quadexpr_add_subset( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray ) -> None: qexpr = v * v - result = qexpr + subset - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected_fill) + with pytest.raises(ValueError, match="exact"): + qexpr + subset - def test_quadexpr_sub_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - qexpr = v * v - result = qexpr - subset + result = qexpr.add(subset, join="left") assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, -expected_fill) + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal(result.const.values, expected_fill) def test_quadexpr_mul_subset( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray ) -> None: qexpr = v * v + # inner join: 2 entries result = qexpr * subset assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + assert result.sizes["dim_2"] == 2 - def test_subset_mul_quadexpr( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - qexpr = v * v - result = subset * qexpr + # explicit join="left": 20 entries + result = qexpr.mul(subset, join="left") assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() + assert result.sizes["dim_2"] == 20 np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: - qexpr = v * v - assert_quadequal(subset + qexpr, qexpr + subset) + # --- Multi-dimensional --- def test_multidim_subset_mul(self, m: Model) -> None: coords_a = pd.RangeIndex(4, name="a") coords_b = pd.RangeIndex(5, name="b") w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( [[2.0, 3.0], [4.0, 5.0]], dims=["a", "b"], coords={"a": [1, 3], "b": [0, 4]}, ) + + # inner join: 2x2 result = w * subset_2d + assert result.sizes["a"] == 2 + assert result.sizes["b"] == 2 + + # explicit join="left": 4x5, zeros at non-subset positions + result = w.mul(subset_2d, join="left") assert result.sizes["a"] == 4 assert result.sizes["b"] == 5 - assert not np.isnan(result.coeffs.values).any() assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) - assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) def test_multidim_subset_add(self, m: Model) -> None: coords_a = pd.RangeIndex(4, name="a") coords_b = pd.RangeIndex(5, name="b") w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( [[2.0, 3.0], [4.0, 5.0]], dims=["a", "b"], coords={"a": [1, 3], "b": [0, 4]}, ) - result = w + subset_2d - assert result.sizes["a"] == 4 - assert result.sizes["b"] == 5 - assert not np.isnan(result.const.values).any() - assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) - assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) - assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) + + with pytest.raises(ValueError, match="exact"): + w + subset_2d + + # --- Edge cases --- def test_constraint_rhs_extra_dims_raises(self, v: Variable) -> None: rhs = xr.DataArray( @@ -893,24 +905,6 @@ def test_da_truediv_var_raises(self, v: Variable) -> None: with pytest.raises(TypeError): da / v # type: ignore[operator] - def test_disjoint_mul_produces_zeros(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v * disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) - - def test_disjoint_div_preserves_coeffs(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v / disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) - def test_da_eq_da_still_works(self) -> None: da1 = xr.DataArray([1, 2, 3]) da2 = xr.DataArray([1, 2, 3]) @@ -931,7 +925,8 @@ def test_subset_constraint_solve_integration(self) -> None: coords = pd.RangeIndex(5, name="i") x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) - m.add_constraints(x <= subset_ub, name="subset_ub") + # exact default raises — use explicit join="left" (NaN = no constraint) + m.add_constraints(x.to_linexpr().le(subset_ub, join="left"), name="subset_ub") m.add_objective(x.sum(), sense="max") m.solve(solver_name=available_solvers[0]) sol = m.solution["x"] @@ -1789,10 +1784,12 @@ def b(self, m2: Model) -> Variable: def c(self, m2: Model) -> Variable: return m2.variables["c"] - def test_add_join_none_preserves_default(self, a: Variable, b: Variable) -> None: - result_default = a.to_linexpr() + b.to_linexpr() - result_none = a.to_linexpr().add(b.to_linexpr(), join=None) - assert_linequal(result_default, result_none) + def test_add_join_none_raises_on_mismatch(self, a: Variable, b: Variable) -> None: + # a has i=[0,1,2], b has i=[1,2,3] — exact default raises + with pytest.raises(ValueError, match="exact"): + a.to_linexpr() + b.to_linexpr() + with pytest.raises(ValueError, match="exact"): + a.to_linexpr().add(b.to_linexpr(), join=None) def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: result = a.to_linexpr().add(b.to_linexpr(), join="inner") @@ -2028,10 +2025,10 @@ def test_quadratic_add_constant_join_inner(self, a: Variable, b: Variable) -> No quad = a.to_linexpr() * b.to_linexpr() const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] + assert list(result.data.indexes["i"]) == [1, 2] - def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: - quad = a.to_linexpr() * a.to_linexpr() + def test_quadratic_add_expr_join_inner(self, a: Variable, b: Variable) -> None: + quad = a.to_linexpr() * b.to_linexpr() const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) result = quad.add(const, join="inner") assert list(result.data.indexes["i"]) == [0, 1] @@ -2040,7 +2037,7 @@ def test_quadratic_mul_constant_join_inner(self, a: Variable, b: Variable) -> No quad = a.to_linexpr() * b.to_linexpr() const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] + assert list(result.data.indexes["i"]) == [1, 2] def test_merge_join_left(self, a: Variable, b: Variable) -> None: result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="left") diff --git a/test/test_optimization.py b/test/test_optimization.py index 492d703a..6bcb1627 100644 --- a/test/test_optimization.py +++ b/test/test_optimization.py @@ -186,8 +186,8 @@ def model_with_non_aligned_variables() -> Model: lower = pd.Series(0, range(8)) y = m.add_variables(lower=lower, coords=[lower.index], name="y") - m.add_constraints(x + y, GREATER_EQUAL, 10.5) - m.objective = 1 * x + 0.5 * y + m.add_constraints(x.add(y, join="outer"), GREATER_EQUAL, 10.5) + m.objective = x.add(0.5 * y, join="outer") return m From 140021ec7eb0c5ad973bb5b78cb77794d3e79152 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 20 Feb 2026 13:15:21 +0100 Subject: [PATCH 14/66] Added user warning for joins which result in size 0 expressions.py --- linopy/expressions.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/linopy/expressions.py b/linopy/expressions.py index cd172a8a..ad455a03 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -611,6 +611,14 @@ def _apply_constant_op( self_const, factor, needs_data_reindex = self._align_constant( factor, fill_value=fill_value, join=join, default_join="inner" ) + if self_const.size == 0 and self.const.size > 0: + warn( + "Multiplication/division resulted in an empty expression because " + "the operands have no overlapping coordinates (inner join). " + "This is likely a modeling error.", + UserWarning, + stacklevel=3, + ) if needs_data_reindex: data = self.data.reindex_like(self_const, fill_value=self._fill_value) return self.__class__( From abd3ac204963d7d8b497c133b4d9c773a3415240 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 20 Feb 2026 13:35:17 +0100 Subject: [PATCH 15/66] Update convention and tests. Make notebooks mroe concise --- examples/coordinate-alignment.ipynb | 891 +++++++--------------------- linopy/expressions.py | 15 +- test/test_linear_expression.py | 22 +- test/test_typing.py | 7 +- 4 files changed, 237 insertions(+), 698 deletions(-) diff --git a/examples/coordinate-alignment.ipynb b/examples/coordinate-alignment.ipynb index b92b084b..0eed3e97 100644 --- a/examples/coordinate-alignment.ipynb +++ b/examples/coordinate-alignment.ipynb @@ -2,38 +2,40 @@ "cells": [ { "cell_type": "markdown", + "id": "7fb27b941602401d91542211134fc71a", "metadata": {}, "source": [ - "# Coordinate Alignment\n", + "# Coordinate Alignment in linopy\n", "\n", - "Since linopy builds on xarray, coordinate alignment matters when combining variables or expressions that live on different coordinates.\n", + "linopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n", "\n", - "linopy uses **strict, operation-dependent defaults** that prevent silent data loss and ambiguous fill behavior:\n", + "| Operation | Shared-dim alignment | Extra dims on constant/RHS |\n", + "|-----------|---------------------|---------------------------|\n", + "| `+`, `-` | `\"exact\"` — must match | **Forbidden** |\n", + "| `*`, `/` | `\"inner\"` — intersection | Expands the expression |\n", + "| `<=`, `>=`, `==` | `\"exact\"` — must match | **Forbidden** |\n", "\n", - "| Operation | Default | On mismatch |\n", - "|-----------|---------|-------------|\n", - "| `+`, `-` | `\"exact\"` | `ValueError` — coordinates must match |\n", - "| `*`, `/` | `\"inner\"` | Intersection — natural filtering |\n", - "| `<=`, `>=`, `==` (DataArray RHS) | `\"exact\"` | `ValueError` — coordinates must match |\n", + "**Why?** Addition and constraint RHS only change constant terms — expanding into new dimensions would duplicate the same variable. Multiplication changes coefficients, so expanding is meaningful. The rules are consistent: `a*x + b <= 0` and `a*x <= -b` always behave identically.\n", "\n", - "When you need to combine operands with mismatched coordinates, use the named methods (`.add()`, `.sub()`, `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`) with an explicit `join=` parameter.\n", + "When coordinates don't match, use the named methods (`.add()`, `.sub()`, `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`) with an explicit `join=` parameter.\n", "\n", - "This convention is inspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." + "Inspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." ] }, { "cell_type": "code", "execution_count": null, + "id": "acae54e37e7d407bbb7b55eff062a284", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.590547Z", - "start_time": "2026-02-20T08:34:40.580957Z" + "end_time": "2026-02-20T12:33:12.721284Z", + "start_time": "2026-02-20T12:33:11.943302Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:10.188745Z", - "iopub.status.busy": "2026-02-20T08:31:10.188638Z", - "iopub.status.idle": "2026-02-20T08:31:11.700268Z", - "shell.execute_reply": "2026-02-20T08:31:11.700023Z" + "iopub.execute_input": "2026-02-20T12:32:49.360029Z", + "iopub.status.busy": "2026-02-20T12:32:49.359903Z", + "iopub.status.idle": "2026-02-20T12:32:50.514585Z", + "shell.execute_reply": "2026-02-20T12:32:50.514115Z" } }, "outputs": [], @@ -47,26 +49,26 @@ }, { "cell_type": "markdown", + "id": "9a63283cbaf04dbcab1f6479b197f3a8", "metadata": {}, "source": [ - "## Matching Coordinates — The Default Case\n", - "\n", - "When two operands share the same coordinates on every shared dimension, all operators work directly. No special handling is needed." + "## What works by default" ] }, { "cell_type": "code", "execution_count": null, + "id": "8dd0d8092fe74a7c96281538738b07e2", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.620850Z", - "start_time": "2026-02-20T08:34:40.599526Z" + "end_time": "2026-02-20T12:33:12.760105Z", + "start_time": "2026-02-20T12:33:12.724924Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.701873Z", - "iopub.status.busy": "2026-02-20T08:31:11.701711Z", - "iopub.status.idle": "2026-02-20T08:31:11.760554Z", - "shell.execute_reply": "2026-02-20T08:31:11.760331Z" + "iopub.execute_input": "2026-02-20T12:32:50.516166Z", + "iopub.status.busy": "2026-02-20T12:32:50.515988Z", + "iopub.status.idle": "2026-02-20T12:32:50.558987Z", + "shell.execute_reply": "2026-02-20T12:32:50.558720Z" } }, "outputs": [], @@ -74,527 +76,238 @@ "m = linopy.Model()\n", "\n", "time = pd.RangeIndex(5, name=\"time\")\n", + "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", + "\n", "x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", "y = m.add_variables(lower=0, coords=[time], name=\"y\")\n", - "\n", - "# Same coordinates — works fine\n", - "x + y" + "gen = m.add_variables(lower=0, coords=[time, techs], name=\"gen\")" ] }, { "cell_type": "code", "execution_count": null, + "id": "72eea5119410473aa328ad9291626812", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.633254Z", - "start_time": "2026-02-20T08:34:40.626281Z" + "end_time": "2026-02-20T12:33:12.772918Z", + "start_time": "2026-02-20T12:33:12.764952Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.761623Z", - "iopub.status.busy": "2026-02-20T08:31:11.761542Z", - "iopub.status.idle": "2026-02-20T08:31:11.766540Z", - "shell.execute_reply": "2026-02-20T08:31:11.766356Z" + "iopub.execute_input": "2026-02-20T12:32:50.560447Z", + "iopub.status.busy": "2026-02-20T12:32:50.560319Z", + "iopub.status.idle": "2026-02-20T12:32:50.568613Z", + "shell.execute_reply": "2026-02-20T12:32:50.568245Z" } }, "outputs": [], "source": [ - "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", - "x * factor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Broadcasting (Different Dimensions)\n", - "\n", - "Alignment only checks **shared** dimensions. If operands have different dimension names, they expand (broadcast) as in xarray — this is unaffected by the alignment convention.\n", - "\n", - "This works in both directions: a constant with extra dimensions expands the expression, and an expression with extra dimensions expands over the constant." + "# Addition/subtraction — matching coordinates\n", + "x + y" ] }, { "cell_type": "code", "execution_count": null, + "id": "8edb47106e1a46a883d545849b8ab81b", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.650251Z", - "start_time": "2026-02-20T08:34:40.639851Z" + "end_time": "2026-02-20T12:33:12.783370Z", + "start_time": "2026-02-20T12:33:12.777304Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.767547Z", - "iopub.status.busy": "2026-02-20T08:31:11.767487Z", - "iopub.status.idle": "2026-02-20T08:31:11.773127Z", - "shell.execute_reply": "2026-02-20T08:31:11.772954Z" + "iopub.execute_input": "2026-02-20T12:32:50.569890Z", + "iopub.status.busy": "2026-02-20T12:32:50.569796Z", + "iopub.status.idle": "2026-02-20T12:32:50.575044Z", + "shell.execute_reply": "2026-02-20T12:32:50.574842Z" } }, "outputs": [], "source": [ - "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", - "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", - "\n", - "# x has dim \"time\", cost has dim \"tech\" — no shared dim, pure broadcast\n", - "x * cost # -> (time, tech)" + "# Multiplication — matching coordinates\n", + "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", + "x * factor" ] }, { "cell_type": "code", "execution_count": null, + "id": "10185d26023b46108eb7d9f57d49d2b3", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.667715Z", - "start_time": "2026-02-20T08:34:40.656983Z" + "end_time": "2026-02-20T12:33:12.795728Z", + "start_time": "2026-02-20T12:33:12.789249Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.774071Z", - "iopub.status.busy": "2026-02-20T08:31:11.773994Z", - "iopub.status.idle": "2026-02-20T08:31:11.780472Z", - "shell.execute_reply": "2026-02-20T08:31:11.780265Z" + "iopub.execute_input": "2026-02-20T12:32:50.576125Z", + "iopub.status.busy": "2026-02-20T12:32:50.576061Z", + "iopub.status.idle": "2026-02-20T12:32:50.581269Z", + "shell.execute_reply": "2026-02-20T12:32:50.581041Z" } }, "outputs": [], "source": [ - "# Constant with MORE dimensions than the expression — also broadcasts\n", - "w = m.add_variables(lower=0, coords=[techs], name=\"w\") # dims: (tech,)\n", - "time_profile = xr.DataArray(\n", - " [[1, 2], [3, 4], [5, 6]],\n", - " dims=[\"tech\", \"time\"],\n", - " coords={\"tech\": techs, \"time\": [0, 1]},\n", - ")\n", - "\n", - "# w has dim \"tech\", time_profile has dims (\"tech\", \"time\")\n", - "# \"time\" is extra — it expands the expression via broadcasting\n", - "w + time_profile # -> (tech, time)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Addition / Subtraction: `\"exact\"` Default\n", - "\n", - "When operands have different coordinates on a shared dimension, `+` and `-` raise a `ValueError`. This prevents silent data loss or ambiguous fill behavior." + "# Multiplication — partial overlap gives intersection\n", + "partial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + "x * partial # result: time 0, 1, 2 only" ] }, { "cell_type": "code", "execution_count": null, + "id": "8763a12b2bbd4a93a75aff182afb95dc", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.683657Z", - "start_time": "2026-02-20T08:34:40.673488Z" + "end_time": "2026-02-20T12:33:12.805925Z", + "start_time": "2026-02-20T12:33:12.798771Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.781625Z", - "iopub.status.busy": "2026-02-20T08:31:11.781557Z", - "iopub.status.idle": "2026-02-20T08:31:11.786621Z", - "shell.execute_reply": "2026-02-20T08:31:11.786442Z" + "iopub.execute_input": "2026-02-20T12:32:50.582245Z", + "iopub.status.busy": "2026-02-20T12:32:50.582185Z", + "iopub.status.idle": "2026-02-20T12:32:50.587327Z", + "shell.execute_reply": "2026-02-20T12:32:50.587163Z" } }, "outputs": [], "source": [ - "subset_time = pd.RangeIndex(3, name=\"time\")\n", - "y_short = m.add_variables(lower=0, coords=[subset_time], name=\"y_short\")\n", - "\n", - "# x has 5 time steps, y_short has 3 — coordinates don't match\n", - "try:\n", - " x + y_short\n", - "except ValueError as e:\n", - " print(\"ValueError:\", e)" + "# Multiplication — different dims broadcast (expands the expression)\n", + "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", + "x * cost # result: (time, tech)" ] }, { "cell_type": "code", "execution_count": null, + "id": "7623eae2785240b9bd12b16a66d81610", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.698177Z", - "start_time": "2026-02-20T08:34:40.691406Z" + "end_time": "2026-02-20T12:33:12.823606Z", + "start_time": "2026-02-20T12:33:12.811410Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.787505Z", - "iopub.status.busy": "2026-02-20T08:31:11.787448Z", - "iopub.status.idle": "2026-02-20T08:31:11.790477Z", - "shell.execute_reply": "2026-02-20T08:31:11.790298Z" + "iopub.execute_input": "2026-02-20T12:32:50.588398Z", + "iopub.status.busy": "2026-02-20T12:32:50.588330Z", + "iopub.status.idle": "2026-02-20T12:32:50.598610Z", + "shell.execute_reply": "2026-02-20T12:32:50.598402Z" } }, "outputs": [], "source": [ - "# Same for adding a constant DataArray with mismatched coordinates\n", - "partial_const = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", - "\n", - "try:\n", - " x + partial_const\n", - "except ValueError as e:\n", - " print(\"ValueError:\", e)" + "# Constraints — RHS with fewer dims broadcasts naturally\n", + "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", + "m.add_constraints(gen <= capacity, name=\"cap\") # capacity broadcasts over time" ] }, { "cell_type": "markdown", + "id": "7cdc8c89c7104fffa095e18ddfef8986", "metadata": {}, "source": [ - "## Multiplication / Division: `\"inner\"` Default\n", - "\n", - "Multiplication by a parameter array is a natural filtering operation — like applying an availability factor to a subset of time steps. The result is restricted to the **intersection** of coordinates. No fill values are needed." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.722403Z", - "start_time": "2026-02-20T08:34:40.704702Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.791396Z", - "iopub.status.busy": "2026-02-20T08:31:11.791334Z", - "iopub.status.idle": "2026-02-20T08:31:11.796458Z", - "shell.execute_reply": "2026-02-20T08:31:11.796262Z" - } - }, - "outputs": [], - "source": [ - "partial_factor = xr.DataArray([2, 3, 4], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", - "\n", - "# x has time 0-4, partial_factor has time 0-2\n", - "# Inner join: result restricted to time 0, 1, 2\n", - "x * partial_factor" + "## What raises an error" ] }, { "cell_type": "code", "execution_count": null, + "id": "b118ea5561624da68c537baed56e602f", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.750336Z", - "start_time": "2026-02-20T08:34:40.739583Z" + "end_time": "2026-02-20T12:33:12.840568Z", + "start_time": "2026-02-20T12:33:12.834424Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.797412Z", - "iopub.status.busy": "2026-02-20T08:31:11.797355Z", - "iopub.status.idle": "2026-02-20T08:31:11.803105Z", - "shell.execute_reply": "2026-02-20T08:31:11.802861Z" + "iopub.execute_input": "2026-02-20T12:32:50.599634Z", + "iopub.status.busy": "2026-02-20T12:32:50.599571Z", + "iopub.status.idle": "2026-02-20T12:32:50.605548Z", + "shell.execute_reply": "2026-02-20T12:32:50.605322Z" } }, "outputs": [], "source": [ - "# Disjoint coordinates: no intersection -> empty result\n", - "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", - "disjoint_factor = xr.DataArray(\n", - " [1, 2, 3, 4, 5], dims=[\"time\"], coords={\"time\": range(5)}\n", + "# Addition with mismatched coordinates\n", + "y_short = m.add_variables(\n", + " lower=0, coords=[pd.RangeIndex(3, name=\"time\")], name=\"y_short\"\n", ")\n", "\n", - "z * disjoint_factor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Named Methods with `join=`\n", - "\n", - "When you intentionally want to combine operands with mismatched coordinates, use the named methods with an explicit `join=` parameter. This makes the alignment intent clear in the code." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Setup: Overlapping but Non-Identical Coordinates" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.770327Z", - "start_time": "2026-02-20T08:34:40.762873Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.804161Z", - "iopub.status.busy": "2026-02-20T08:31:11.804100Z", - "iopub.status.idle": "2026-02-20T08:31:11.807917Z", - "shell.execute_reply": "2026-02-20T08:31:11.807731Z" - } - }, - "outputs": [], - "source": [ - "m2 = linopy.Model()\n", - "\n", - "i_a = pd.Index([0, 1, 2], name=\"i\")\n", - "i_b = pd.Index([1, 2, 3], name=\"i\")\n", - "\n", - "a = m2.add_variables(coords=[i_a], name=\"a\")\n", - "b = m2.add_variables(coords=[i_b], name=\"b\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`a` has coordinates i=[0, 1, 2] and `b` has i=[1, 2, 3]. They overlap at i=1 and i=2 but are not identical, so `a + b` raises a `ValueError`.\n", - "\n", - "**Inner join** — only shared coordinates (i=1, 2):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.785505Z", - "start_time": "2026-02-20T08:34:40.775987Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.808856Z", - "iopub.status.busy": "2026-02-20T08:31:11.808774Z", - "iopub.status.idle": "2026-02-20T08:31:11.815876Z", - "shell.execute_reply": "2026-02-20T08:31:11.815678Z" - } - }, - "outputs": [], - "source": [ - "a.add(b, join=\"inner\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Outer join** — union of coordinates (i=0, 1, 2, 3). Where one operand is missing, it drops out of the sum (fill with zero):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.811388Z", - "start_time": "2026-02-20T08:34:40.797806Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.816893Z", - "iopub.status.busy": "2026-02-20T08:31:11.816817Z", - "iopub.status.idle": "2026-02-20T08:31:11.824433Z", - "shell.execute_reply": "2026-02-20T08:31:11.824155Z" - } - }, - "outputs": [], - "source": [ - "a.add(b, join=\"outer\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Left join** — keep left operand's coordinates (i=0, 1, 2):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.834416Z", - "start_time": "2026-02-20T08:34:40.823515Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.825622Z", - "iopub.status.busy": "2026-02-20T08:31:11.825544Z", - "iopub.status.idle": "2026-02-20T08:31:11.832608Z", - "shell.execute_reply": "2026-02-20T08:31:11.832423Z" - } - }, - "outputs": [], - "source": [ - "a.add(b, join=\"left\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Right join** — keep right operand's coordinates (i=1, 2, 3):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.854542Z", - "start_time": "2026-02-20T08:34:40.841131Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.833545Z", - "iopub.status.busy": "2026-02-20T08:31:11.833490Z", - "iopub.status.idle": "2026-02-20T08:31:11.840073Z", - "shell.execute_reply": "2026-02-20T08:31:11.839884Z" - } - }, - "outputs": [], - "source": [ - "a.add(b, join=\"right\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Override** — positional alignment, ignore coordinate labels. The result uses the left operand's coordinates:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.872890Z", - "start_time": "2026-02-20T08:34:40.862894Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.841049Z", - "iopub.status.busy": "2026-02-20T08:31:11.840991Z", - "iopub.status.idle": "2026-02-20T08:31:11.847135Z", - "shell.execute_reply": "2026-02-20T08:31:11.846968Z" - } - }, - "outputs": [], - "source": [ - "a.add(b, join=\"override\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Multiplication with `join=`\n", - "\n", - "The same `join=` parameter works on `.mul()` and `.div()`. Since multiplication defaults to `\"inner\"`, you only need explicit `join=` when you want a different mode:" + "try:\n", + " x + y_short # time coords don't match\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" ] }, { "cell_type": "code", "execution_count": null, + "id": "938c804e27f84196a10c8828c723f798", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.899679Z", - "start_time": "2026-02-20T08:34:40.889148Z" + "end_time": "2026-02-20T12:33:12.847760Z", + "start_time": "2026-02-20T12:33:12.843647Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.848157Z", - "iopub.status.busy": "2026-02-20T08:31:11.848101Z", - "iopub.status.idle": "2026-02-20T08:31:11.852887Z", - "shell.execute_reply": "2026-02-20T08:31:11.852713Z" + "iopub.execute_input": "2026-02-20T12:32:50.606568Z", + "iopub.status.busy": "2026-02-20T12:32:50.606506Z", + "iopub.status.idle": "2026-02-20T12:32:50.609742Z", + "shell.execute_reply": "2026-02-20T12:32:50.609552Z" } }, "outputs": [], "source": [ - "const = xr.DataArray([2, 3, 4], dims=[\"i\"], coords={\"i\": [1, 2, 3]})\n", - "\n", - "# Default inner join — intersection of i=[0,1,2] and i=[1,2,3]\n", - "a * const" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T07:55:08.411255Z", - "start_time": "2026-02-20T07:55:08.404219Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:23:53.301013Z", - "iopub.status.busy": "2026-02-20T08:23:53.300958Z", - "iopub.status.idle": "2026-02-20T08:23:53.305201Z", - "shell.execute_reply": "2026-02-20T08:23:53.305026Z" - } - }, - "source": [ - "## Constraints with DataArray RHS\n", - "\n", - "Constraint operators (`<=`, `>=`, `==`) with a DataArray right-hand side also default to `\"exact\"` — coordinates on shared dimensions must match. Use `.le()`, `.ge()`, `.eq()` with `join=` to control alignment.\n", - "\n", - "**Dimension rules for constraint RHS:**\n", - "- The RHS may have **fewer** dimensions than the expression — the bound broadcasts. This is the standard way to apply a per-tech capacity across all time steps.\n", - "- The RHS must **not** have **more** dimensions than the expression. An expression with `dims=(tech,)` defines one variable per tech; an RHS with `dims=(time, tech)` would create redundant constraints on the same variable, which is almost always a mistake.\n", - "\n", - "Note: this is different from arithmetic, where a constant with extra dims freely expands the expression. For constraints, the expression defines the problem structure." + "# Addition with extra dimensions on the constant\n", + "profile = xr.DataArray(\n", + " np.ones((3, 5)), dims=[\"tech\", \"time\"], coords={\"tech\": techs, \"time\": time}\n", + ")\n", + "try:\n", + " x + profile # would duplicate x[t] across techs\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" ] }, { "cell_type": "code", "execution_count": null, + "id": "504fb2a444614c0babb325280ed9130a", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.925963Z", - "start_time": "2026-02-20T08:34:40.907028Z" + "end_time": "2026-02-20T12:33:12.855699Z", + "start_time": "2026-02-20T12:33:12.851877Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.853850Z", - "iopub.status.busy": "2026-02-20T08:31:11.853793Z", - "iopub.status.idle": "2026-02-20T08:31:11.866027Z", - "shell.execute_reply": "2026-02-20T08:31:11.865850Z" + "iopub.execute_input": "2026-02-20T12:32:50.610738Z", + "iopub.status.busy": "2026-02-20T12:32:50.610665Z", + "iopub.status.idle": "2026-02-20T12:32:50.614493Z", + "shell.execute_reply": "2026-02-20T12:32:50.614335Z" } }, "outputs": [], "source": [ - "# RHS with fewer dimensions — broadcasts (works fine)\n", - "m3 = linopy.Model()\n", - "hours = pd.RangeIndex(24, name=\"hour\")\n", - "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", - "gen = m3.add_variables(lower=0, coords=[hours, techs], name=\"gen\")\n", + "# Multiplication with zero overlap\n", + "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", "\n", - "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", - "m3.add_constraints(\n", - " gen <= capacity, name=\"capacity_limit\"\n", - ") # capacity broadcasts over hour" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.949905Z", - "start_time": "2026-02-20T08:34:40.939482Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.866958Z", - "iopub.status.busy": "2026-02-20T08:31:11.866899Z", - "iopub.status.idle": "2026-02-20T08:31:11.871115Z", - "shell.execute_reply": "2026-02-20T08:31:11.870937Z" - } - }, - "outputs": [], - "source": [ - "# RHS with matching coordinates — works fine\n", - "full_rhs = xr.DataArray(np.arange(5, dtype=float), dims=[\"time\"], coords={\"time\": time})\n", - "con = x <= full_rhs\n", - "con" + "try:\n", + " z * factor # z has time 5-9, factor has time 0-4 — no intersection\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" ] }, { "cell_type": "code", "execution_count": null, + "id": "59bbdb311c014d738909a11f9e486628", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:40.970277Z", - "start_time": "2026-02-20T08:34:40.961415Z" + "end_time": "2026-02-20T12:33:12.864666Z", + "start_time": "2026-02-20T12:33:12.860577Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.872062Z", - "iopub.status.busy": "2026-02-20T08:31:11.872004Z", - "iopub.status.idle": "2026-02-20T08:31:11.874934Z", - "shell.execute_reply": "2026-02-20T08:31:11.874765Z" + "iopub.execute_input": "2026-02-20T12:32:50.615336Z", + "iopub.status.busy": "2026-02-20T12:32:50.615276Z", + "iopub.status.idle": "2026-02-20T12:32:50.618275Z", + "shell.execute_reply": "2026-02-20T12:32:50.618094Z" } }, "outputs": [], "source": [ - "# RHS with mismatched coordinates — raises ValueError\n", + "# Constraint RHS with mismatched coordinates\n", "partial_rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", "\n", "try:\n", @@ -606,351 +319,159 @@ { "cell_type": "code", "execution_count": null, + "id": "b43b363d81ae4b689946ece5c682cd59", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:41.006270Z", - "start_time": "2026-02-20T08:34:40.997162Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.875820Z", - "iopub.status.busy": "2026-02-20T08:31:11.875763Z", - "iopub.status.idle": "2026-02-20T08:31:11.879949Z", - "shell.execute_reply": "2026-02-20T08:31:11.879781Z" - } - }, - "outputs": [], - "source": [ - "# Use .le() with join=\"inner\" — constraint only at the intersection\n", - "x.to_linexpr().le(partial_rhs, join=\"inner\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:41.036419Z", - "start_time": "2026-02-20T08:34:41.022644Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.880853Z", - "iopub.status.busy": "2026-02-20T08:31:11.880793Z", - "iopub.status.idle": "2026-02-20T08:31:11.884663Z", - "shell.execute_reply": "2026-02-20T08:31:11.884503Z" - } - }, - "outputs": [], - "source": [ - "# Use .le() with join=\"left\" — constraint at all of x's coordinates,\n", - "# NaN where RHS is missing (no constraint at those positions)\n", - "x.to_linexpr().le(partial_rhs, join=\"left\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:41.053285Z", - "start_time": "2026-02-20T08:34:41.043483Z" + "end_time": "2026-02-20T12:33:12.874035Z", + "start_time": "2026-02-20T12:33:12.869466Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.885511Z", - "iopub.status.busy": "2026-02-20T08:31:11.885453Z", - "iopub.status.idle": "2026-02-20T08:31:11.889326Z", - "shell.execute_reply": "2026-02-20T08:31:11.889148Z" + "iopub.execute_input": "2026-02-20T12:32:50.619169Z", + "iopub.status.busy": "2026-02-20T12:32:50.619111Z", + "iopub.status.idle": "2026-02-20T12:32:50.622842Z", + "shell.execute_reply": "2026-02-20T12:32:50.622654Z" } }, "outputs": [], "source": [ - "# RHS with MORE dimensions than expression — raises ValueError\n", - "y_tech = m.add_variables(lower=0, coords=[techs], name=\"y_tech\") # dims: (tech,)\n", - "rhs_extra_dims = xr.DataArray(\n", + "# Constraint RHS with extra dimensions\n", + "w = m.add_variables(lower=0, coords=[techs], name=\"w\") # dims: (tech,)\n", + "rhs_2d = xr.DataArray(\n", " np.ones((5, 3)), dims=[\"time\", \"tech\"], coords={\"time\": time, \"tech\": techs}\n", ")\n", - "\n", "try:\n", - " y_tech <= rhs_extra_dims # \"time\" is not in the expression\n", + " w <= rhs_2d # would create redundant constraints on w[tech]\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" ] }, { "cell_type": "markdown", + "id": "8a65eabff63a45729fe45fb5ade58bdc", "metadata": {}, "source": [ - "## Practical Example\n", + "## Positional alignment with `join=\"override\"`\n", "\n", - "Consider a generation dispatch model where solar availability is a partial factor and a minimum demand constraint only applies during peak hours." + "A common pattern: two arrays with the same shape but different (or no) coordinate labels. Use `join=\"override\"` to align by position, ignoring labels." ] }, { "cell_type": "code", "execution_count": null, + "id": "c3933fab20d04ec698c2621248eb3be0", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:41.067545Z", - "start_time": "2026-02-20T08:34:41.062532Z" + "end_time": "2026-02-20T12:33:12.887673Z", + "start_time": "2026-02-20T12:33:12.877435Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.890266Z", - "iopub.status.busy": "2026-02-20T08:31:11.890211Z", - "iopub.status.idle": "2026-02-20T08:31:11.893636Z", - "shell.execute_reply": "2026-02-20T08:31:11.893458Z" + "iopub.execute_input": "2026-02-20T12:32:50.623830Z", + "iopub.status.busy": "2026-02-20T12:32:50.623775Z", + "iopub.status.idle": "2026-02-20T12:32:50.632476Z", + "shell.execute_reply": "2026-02-20T12:32:50.632321Z" } }, "outputs": [], "source": [ - "m4 = linopy.Model()\n", + "m2 = linopy.Model()\n", "\n", - "hours = pd.RangeIndex(24, name=\"hour\")\n", - "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", + "a = m2.add_variables(coords=[[\"x\", \"y\", \"z\"]], name=\"a\")\n", + "b = m2.add_variables(coords=[[\"p\", \"q\", \"r\"]], name=\"b\")\n", "\n", - "gen = m4.add_variables(lower=0, coords=[hours, techs], name=\"gen\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Capacity limits apply to all hours and techs. The `capacity` DataArray has only the `tech` dimension — it broadcasts over `hour` (no shared dimension to conflict):" + "# a + b fails because labels don't match\n", + "# join=\"override\" aligns by position and keeps left operand's labels\n", + "a.add(b, join=\"override\")" ] }, { "cell_type": "code", "execution_count": null, + "id": "4dd4641cc4064e0191573fe9c69df29b", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:41.084071Z", - "start_time": "2026-02-20T08:34:41.071733Z" + "end_time": "2026-02-20T12:33:12.905207Z", + "start_time": "2026-02-20T12:33:12.899976Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.894513Z", - "iopub.status.busy": "2026-02-20T08:31:11.894459Z", - "iopub.status.idle": "2026-02-20T08:31:11.904179Z", - "shell.execute_reply": "2026-02-20T08:31:11.904001Z" + "iopub.execute_input": "2026-02-20T12:32:50.633382Z", + "iopub.status.busy": "2026-02-20T12:32:50.633328Z", + "iopub.status.idle": "2026-02-20T12:32:50.637298Z", + "shell.execute_reply": "2026-02-20T12:32:50.637123Z" } }, "outputs": [], "source": [ - "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", - "m4.add_constraints(gen <= capacity, name=\"capacity_limit\")" + "# Same for constraints\n", + "rhs = xr.DataArray([1.0, 2.0, 3.0], dims=[\"dim_0\"], coords={\"dim_0\": [\"p\", \"q\", \"r\"]})\n", + "a.to_linexpr().le(rhs, join=\"override\")" ] }, { "cell_type": "markdown", + "id": "8309879909854d7188b41380fd92a7c3", "metadata": {}, "source": [ - "Solar availability is a factor that covers all 24 hours. Since coordinates match exactly, multiplication with `*` works directly:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:34:41.104144Z", - "start_time": "2026-02-20T08:34:41.091479Z" - }, - "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.905131Z", - "iopub.status.busy": "2026-02-20T08:31:11.905072Z", - "iopub.status.idle": "2026-02-20T08:31:11.914430Z", - "shell.execute_reply": "2026-02-20T08:31:11.914242Z" - } - }, - "outputs": [], - "source": [ - "solar_avail = np.zeros(24)\n", - "solar_avail[6:19] = np.sin(np.linspace(0, np.pi, 13))\n", - "solar_availability = xr.DataArray(solar_avail, dims=[\"hour\"], coords={\"hour\": hours})\n", + "## Other join modes\n", + "\n", + "All named methods (`.add()`, `.sub()`, `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`) accept a `join=` parameter:\n", "\n", - "solar_gen = gen.sel(tech=\"solar\")\n", - "m4.add_constraints(solar_gen <= 100 * solar_availability, name=\"solar_avail\")" + "| `join` | Coordinates kept | Fill |\n", + "|--------|-----------------|------|\n", + "| `\"exact\"` | Must match | `ValueError` if different |\n", + "| `\"inner\"` | Intersection | — |\n", + "| `\"outer\"` | Union | Zero (arithmetic) / NaN (constraints) |\n", + "| `\"left\"` | Left operand's | Zero / NaN for missing right |\n", + "| `\"right\"` | Right operand's | Zero for missing left |\n", + "| `\"override\"` | Left operand's | Positional alignment |" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "Peak demand of 120 MW must be met only during hours 8-20. The demand array covers a subset of hours. Use `.ge()` with `join=\"inner\"` to restrict the constraint to just those hours:" - }, { "cell_type": "code", "execution_count": null, + "id": "3ed186c9a28b402fb0bc4494df01f08d", "metadata": { "ExecuteTime": { - "end_time": "2026-02-20T08:34:41.122157Z", - "start_time": "2026-02-20T08:34:41.107939Z" + "end_time": "2026-02-20T12:33:12.934067Z", + "start_time": "2026-02-20T12:33:12.909515Z" }, "execution": { - "iopub.execute_input": "2026-02-20T08:31:11.915441Z", - "iopub.status.busy": "2026-02-20T08:31:11.915362Z", - "iopub.status.idle": "2026-02-20T08:31:11.925960Z", - "shell.execute_reply": "2026-02-20T08:31:11.925775Z" - } - }, - "outputs": [], - "source": [ - "peak_hours = pd.RangeIndex(8, 21, name=\"hour\")\n", - "peak_demand = xr.DataArray(\n", - " np.full(len(peak_hours), 120.0), dims=[\"hour\"], coords={\"hour\": peak_hours}\n", - ")\n", - "\n", - "total_gen = gen.sum(\"tech\")\n", - "\n", - "# Constraint only at peak hours (intersection)\n", - "m4.add_constraints(total_gen.ge(peak_demand, join=\"inner\"), name=\"peak_demand\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "Selecting the correct subset of the variable produces the same result, and is arguably more readable:" - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T08:37:39.113259Z", - "start_time": "2026-02-20T08:37:39.065983Z" + "iopub.execute_input": "2026-02-20T12:32:50.638240Z", + "iopub.status.busy": "2026-02-20T12:32:50.638182Z", + "iopub.status.idle": "2026-02-20T12:32:50.659703Z", + "shell.execute_reply": "2026-02-20T12:32:50.659485Z" } }, "outputs": [], "source": [ - "# Constraint only at peak hours (intersection)\n", - "m4.add_constraints(\n", - " total_gen.sel(hour=peak_hours) >= peak_demand, name=\"peak_demand_sel\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Migrating from Previous Versions\n", - "\n", - "Previous versions of linopy used a **shape-dependent heuristic** for coordinate alignment. The behavior depended on whether operands happened to have the same shape, and was inconsistent between `Variable` and `LinearExpression`:\n", - "\n", - "| Condition | Old behavior | New behavior |\n", - "|-----------|-------------|-------------|\n", - "| Same shape, same coordinates | Works correctly | Works correctly (no change) |\n", - "| Same shape, **different** coordinates, `+`/`-` | `\"override\"` — positional alignment (**bug-prone**) | `\"exact\"` — raises `ValueError` |\n", - "| Same shape, **different** coordinates, `*`/`/` | Buggy (crashes or produces garbage) | `\"inner\"` — intersection |\n", - "| Different shape, expr + expr | `\"outer\"` — union of coordinates | `\"exact\"` — raises `ValueError` |\n", - "| Different shape, expr + constant | `\"left\"` — keeps expression coords, fills missing with 0 | `\"exact\"` — raises `ValueError` |\n", - "| Different shape, expr * constant | Buggy (crashes for `LinearExpression`, produces garbage for `Variable`) | `\"inner\"` — intersection |\n", - "| Constraint with mismatched DataArray RHS | Same-shape: `\"override\"` (positional); different-shape: `\"left\"` (fills missing RHS with 0) | `\"exact\"` — raises `ValueError` |\n", - "\n", - "### Why the change?\n", - "\n", - "The old heuristic caused several classes of bugs:\n", - "\n", - "1. **Silent positional alignment**: When two operands happened to have the same shape but entirely different coordinates (e.g., `x(time=[0,1,2]) + z(time=[5,6,7])`), they were matched by position — giving a wrong result with no warning.\n", - "\n", - "2. **Non-associative addition**: `(y + factor) + x` could give a different result than `y + (x + factor)` because `\"left\"` for expr+constant dropped the constant's extra coordinates before they could be recovered by a subsequent addition.\n", - "\n", - "3. **Broken multiplication**: Multiplying a `LinearExpression` by a DataArray with mismatched coordinates would crash with an `AssertionError`. Multiplying a `Variable` by such a DataArray produced a result with misaligned coefficients and variable references.\n", - "\n", - "### How to update your code\n", - "\n", - "If your code combines operands with **mismatched coordinates** and you relied on the old behavior, you'll now get a `ValueError` (for `+`/`-`) or a smaller result (for `*`/`/`). Here's how to migrate:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Addition with mismatched coordinates** — expr+expr previously used `\"outer\"`, expr+constant used `\"left\"`. Both now raise `ValueError`:\n", - "\n", - "```python\n", - "# Old code (worked silently):\n", - "result = x + y_short # different-size expr+expr → was \"outer\"\n", - "result = x + partial_const # expr + constant → was \"left\"\n", - "\n", - "# New code — be explicit about the join:\n", - "result = x.add(y_short, join=\"outer\") # union of coordinates\n", - "result = x.add(partial_const, join=\"left\") # keep x's coordinates, fill 0\n", - "```\n", - "\n", - "**Same-shape but different coordinates** — previously matched by position (`\"override\"`) for addition. Now raises `ValueError` for `+`/`-`, gives intersection for `*`/`/`:\n", - "\n", - "```python\n", - "# Old code (silently matched positions — likely a bug!):\n", - "x_abc = m.add_variables(coords=[[\"a\", \"b\", \"c\"]], name=\"x_abc\")\n", - "y_def = m.add_variables(coords=[[\"d\", \"e\", \"f\"]], name=\"y_def\")\n", - "result = x_abc + y_def # Old: positional match → New: ValueError\n", - "\n", - "# If you really want positional matching (rare):\n", - "result = x_abc.add(y_def, join=\"override\")\n", - "```\n", - "\n", - "**Multiplication with mismatched coordinates** — previously broken (crash or garbage). Now uses `\"inner\"` (intersection):\n", - "\n", - "```python\n", - "# Old code — would crash (LinExpr) or produce garbage (Variable):\n", - "x * partial_factor # x has 5 coords, partial_factor has 3\n", - "\n", - "# New code — result has 3 entries (intersection). This now works correctly!\n", - "# If you need to keep all of x's coordinates (zero-fill missing):\n", - "x.mul(partial_factor, join=\"left\")\n", - "```\n", - "\n", - "**Constraints with mismatched DataArray RHS** — previously used positional alignment (same shape) or `\"left\"` with 0-fill (different shape). Now raises `ValueError`:\n", + "i_a = pd.Index([0, 1, 2], name=\"i\")\n", + "i_b = pd.Index([1, 2, 3], name=\"i\")\n", "\n", - "```python\n", - "# Old code:\n", - "con = x <= partial_rhs # Old: \"left\" (fill 0) or \"override\" → New: ValueError\n", + "a = m2.add_variables(coords=[i_a], name=\"a2\")\n", + "b = m2.add_variables(coords=[i_b], name=\"b2\")\n", "\n", - "# New code — be explicit:\n", - "con = x.to_linexpr().le(partial_rhs, join=\"left\") # keep x's coords, NaN fill\n", - "con = x.to_linexpr().le(partial_rhs, join=\"inner\") # intersection only\n", - "```" + "print(\"inner:\", list(a.add(b, join=\"inner\").coords[\"i\"].values)) # [1, 2]\n", + "print(\"outer:\", list(a.add(b, join=\"outer\").coords[\"i\"].values)) # [0, 1, 2, 3]\n", + "print(\"left: \", list(a.add(b, join=\"left\").coords[\"i\"].values)) # [0, 1, 2]\n", + "print(\"right:\", list(a.add(b, join=\"right\").coords[\"i\"].values)) # [1, 2, 3]" ] }, { "cell_type": "markdown", + "id": "cb1e1581032b452c9409d6c6813c49d1", "metadata": {}, "source": [ - "## Summary\n", - "\n", - "### Default Behavior\n", - "\n", - "| Context | Default `join` | Behavior |\n", - "|---------|---------------|----------|\n", - "| Arithmetic operators (`+`, `-`) | `\"exact\"` | Coordinates must match on shared dims; raises `ValueError` on mismatch |\n", - "| Arithmetic operators (`*`, `/`) | `\"inner\"` | Intersection of coordinates on shared dims; no fill needed |\n", - "| Constraint operators (`<=`, `>=`, `==`) with DataArray RHS | `\"exact\"` | Coordinates must match on shared dims; raises `ValueError` on mismatch |\n", - "\n", - "### Extra Dimensions (Broadcasting)\n", - "\n", - "| Context | Extra dims on constant/RHS | Extra dims on expression |\n", - "|---------|--------------------------|------------------------|\n", - "| Arithmetic (`+`, `-`, `*`, `/`) | Expands the expression (standard xarray broadcast) | Expands over the constant |\n", - "| Constraint RHS (`<=`, `>=`, `==`) | **Forbidden** — raises `ValueError` | RHS broadcasts over expression's extra dims |\n", - "\n", - "### All Join Modes\n", + "## Migrating from previous versions\n", "\n", - "| `join` | Coordinates | Fill behavior |\n", - "|--------|------------|---------------|\n", - "| `\"exact\"` (default for `+`, `-`, constraints) | Must match exactly | Raises `ValueError` if different |\n", - "| `\"inner\"` (default for `*`, `/`) | Intersection only | No fill needed |\n", - "| `\"outer\"` | Union | Fill with zero (arithmetic) or `NaN` (constraint RHS) |\n", - "| `\"left\"` | Left operand's | Fill right with zero (arithmetic) or `NaN` (constraint RHS) |\n", - "| `\"right\"` | Right operand's | Fill left with zero |\n", - "| `\"override\"` | Left operand's (positional) | Positional alignment, ignores coordinate labels |\n", + "Previous versions used a shape-dependent heuristic that caused silent bugs (positional alignment on same-shape operands, non-associative addition, broken multiplication). The new behavior:\n", "\n", - "### Quick Reference\n", + "| Condition | Old | New |\n", + "|-----------|-----|-----|\n", + "| Same shape, different coords, `+`/`-` | Positional match (silent bug) | `ValueError` |\n", + "| Different shape, `+`/`-` | `\"outer\"` or `\"left\"` (implicit) | `ValueError` |\n", + "| Mismatched coords, `*`/`/` | Crash or garbage | Intersection (or error if empty) |\n", + "| Constraint with mismatched RHS | `\"override\"` or `\"left\"` | `ValueError` |\n", "\n", - "| Operation | Matching coords | Mismatched coords |\n", - "|-----------|----------------|-------------------|\n", - "| `x + y` | Works | `ValueError` |\n", - "| `x * factor` | Works | Intersection |\n", - "| `x.add(y, join=\"inner\")` | Works | Intersection |\n", - "| `x.add(y, join=\"outer\")` | Works | Union with fill |\n", - "| `x <= rhs` (DataArray) | Works | `ValueError` |\n", - "| `x.le(rhs, join=\"inner\")` | Works | Intersection |\n", - "| `x.le(rhs, join=\"left\")` | Works | Left coords, NaN fill |" + "To migrate: replace `x + y` with `x.add(y, join=\"outer\")` (or whichever join matches your intent)." ] } ], @@ -974,5 +495,5 @@ } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } diff --git a/linopy/expressions.py b/linopy/expressions.py index ad455a03..3a150d0d 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -588,6 +588,14 @@ def _add_constant( if np.isscalar(other) and join is None: return self.assign(const=self.const + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) + extra_dims = set(da.dims) - set(self.coord_dims) + if extra_dims: + raise ValueError( + f"Constant has dimensions {extra_dims} not present in the " + f"expression. Addition/subtraction cannot introduce new " + f"dimensions — use multiplication to expand, or select/reindex " + f"the constant to match the expression's dimensions." + ) self_const, da, needs_data_reindex = self._align_constant( da, fill_value=0, join=join, default_join="exact" ) @@ -612,12 +620,9 @@ def _apply_constant_op( factor, fill_value=fill_value, join=join, default_join="inner" ) if self_const.size == 0 and self.const.size > 0: - warn( + raise ValueError( "Multiplication/division resulted in an empty expression because " - "the operands have no overlapping coordinates (inner join). " - "This is likely a modeling error.", - UserWarning, - stacklevel=3, + "the operands have no overlapping coordinates (inner join)." ) if needs_data_reindex: data = self.data.reindex_like(self_const, fill_value=self._fill_value) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index d77bd00f..ea81bb2b 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -681,9 +681,9 @@ def test_disjoint_mul(self, v: Variable) -> None: disjoint = xr.DataArray( [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} ) - # inner join: no intersection → empty - result = v * disjoint - assert result.sizes["dim_2"] == 0 + # inner join: no intersection → error + with pytest.raises(ValueError, match="no overlapping coordinates"): + v * disjoint # explicit join="left": 20 entries, all zeros result = v.mul(disjoint, join="left") @@ -694,8 +694,8 @@ def test_disjoint_div(self, v: Variable) -> None: disjoint = xr.DataArray( [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} ) - result = v / disjoint - assert result.sizes["dim_2"] == 0 + with pytest.raises(ValueError, match="no overlapping coordinates"): + v / disjoint # --- Multiplication / division with subset constant --- @@ -900,6 +900,18 @@ def test_constraint_rhs_extra_dims_raises(self, v: Variable) -> None: with pytest.raises(ValueError, match="not present in the expression"): v <= rhs + def test_add_constant_extra_dims_raises(self, v: Variable) -> None: + da = xr.DataArray( + [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} + ) + with pytest.raises(ValueError, match="not present in the expression"): + v + da + with pytest.raises(ValueError, match="not present in the expression"): + v - da + # multiplication still allows extra dims (broadcasts) + result = v * da + assert "extra" in result.dims + def test_da_truediv_var_raises(self, v: Variable) -> None: da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) with pytest.raises(TypeError): diff --git a/test/test_typing.py b/test/test_typing.py index 99a27033..312f76c9 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -7,6 +7,7 @@ def test_operations_with_data_arrays_are_typed_correctly() -> None: m = linopy.Model() a: xr.DataArray = xr.DataArray([1, 2, 3]) + s: xr.DataArray = xr.DataArray(5.0) v: linopy.Variable = m.add_variables(lower=0.0, name="v") e: linopy.LinearExpression = v * 1.0 @@ -14,12 +15,12 @@ def test_operations_with_data_arrays_are_typed_correctly() -> None: _ = a * v _ = v * a - _ = v + a + _ = v + s _ = a * e _ = e * a - _ = e + a + _ = e + s _ = a * q _ = q * a - _ = q + a + _ = q + s From 1e18984320bb9ce6fbae8e585b03cc8b85b3064b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 20 Feb 2026 13:38:27 +0100 Subject: [PATCH 16/66] show assign_coords pattern --- examples/coordinate-alignment.ipynb | 383 ++++++++++++++-------------- 1 file changed, 191 insertions(+), 192 deletions(-) diff --git a/examples/coordinate-alignment.ipynb b/examples/coordinate-alignment.ipynb index 0eed3e97..e1309e37 100644 --- a/examples/coordinate-alignment.ipynb +++ b/examples/coordinate-alignment.ipynb @@ -2,7 +2,6 @@ "cells": [ { "cell_type": "markdown", - "id": "7fb27b941602401d91542211134fc71a", "metadata": {}, "source": [ "# Coordinate Alignment in linopy\n", @@ -24,32 +23,30 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "acae54e37e7d407bbb7b55eff062a284", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.721284Z", - "start_time": "2026-02-20T12:33:11.943302Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:49.360029Z", - "iopub.status.busy": "2026-02-20T12:32:49.359903Z", - "iopub.status.idle": "2026-02-20T12:32:50.514585Z", - "shell.execute_reply": "2026-02-20T12:32:50.514115Z" + "iopub.execute_input": "2026-02-20T12:35:53.150316Z", + "iopub.status.busy": "2026-02-20T12:35:53.150100Z", + "iopub.status.idle": "2026-02-20T12:35:54.105967Z", + "shell.execute_reply": "2026-02-20T12:35:54.105432Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.193551Z", + "start_time": "2026-02-20T12:36:56.190913Z" } }, - "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", "\n", "import linopy" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "9a63283cbaf04dbcab1f6479b197f3a8", "metadata": {}, "source": [ "## What works by default" @@ -57,21 +54,18 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "8dd0d8092fe74a7c96281538738b07e2", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.760105Z", - "start_time": "2026-02-20T12:33:12.724924Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.516166Z", - "iopub.status.busy": "2026-02-20T12:32:50.515988Z", - "iopub.status.idle": "2026-02-20T12:32:50.558987Z", - "shell.execute_reply": "2026-02-20T12:32:50.558720Z" + "iopub.execute_input": "2026-02-20T12:35:54.110532Z", + "iopub.status.busy": "2026-02-20T12:35:54.109029Z", + "iopub.status.idle": "2026-02-20T12:35:54.164335Z", + "shell.execute_reply": "2026-02-20T12:35:54.163789Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.215580Z", + "start_time": "2026-02-20T12:36:56.207497Z" } }, - "outputs": [], "source": [ "m = linopy.Model()\n", "\n", @@ -81,125 +75,121 @@ "x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", "y = m.add_variables(lower=0, coords=[time], name=\"y\")\n", "gen = m.add_variables(lower=0, coords=[time, techs], name=\"gen\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "72eea5119410473aa328ad9291626812", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.772918Z", - "start_time": "2026-02-20T12:33:12.764952Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.560447Z", - "iopub.status.busy": "2026-02-20T12:32:50.560319Z", - "iopub.status.idle": "2026-02-20T12:32:50.568613Z", - "shell.execute_reply": "2026-02-20T12:32:50.568245Z" + "iopub.execute_input": "2026-02-20T12:35:54.166957Z", + "iopub.status.busy": "2026-02-20T12:35:54.166600Z", + "iopub.status.idle": "2026-02-20T12:35:54.185234Z", + "shell.execute_reply": "2026-02-20T12:35:54.184778Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.230513Z", + "start_time": "2026-02-20T12:36:56.222101Z" } }, - "outputs": [], "source": [ "# Addition/subtraction — matching coordinates\n", "x + y" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "8edb47106e1a46a883d545849b8ab81b", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.783370Z", - "start_time": "2026-02-20T12:33:12.777304Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.569890Z", - "iopub.status.busy": "2026-02-20T12:32:50.569796Z", - "iopub.status.idle": "2026-02-20T12:32:50.575044Z", - "shell.execute_reply": "2026-02-20T12:32:50.574842Z" + "iopub.execute_input": "2026-02-20T12:35:54.187479Z", + "iopub.status.busy": "2026-02-20T12:35:54.187284Z", + "iopub.status.idle": "2026-02-20T12:35:54.197488Z", + "shell.execute_reply": "2026-02-20T12:35:54.197090Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.241644Z", + "start_time": "2026-02-20T12:36:56.235473Z" } }, - "outputs": [], "source": [ "# Multiplication — matching coordinates\n", "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", "x * factor" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "10185d26023b46108eb7d9f57d49d2b3", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.795728Z", - "start_time": "2026-02-20T12:33:12.789249Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.576125Z", - "iopub.status.busy": "2026-02-20T12:32:50.576061Z", - "iopub.status.idle": "2026-02-20T12:32:50.581269Z", - "shell.execute_reply": "2026-02-20T12:32:50.581041Z" + "iopub.execute_input": "2026-02-20T12:35:54.199528Z", + "iopub.status.busy": "2026-02-20T12:35:54.199323Z", + "iopub.status.idle": "2026-02-20T12:35:54.210352Z", + "shell.execute_reply": "2026-02-20T12:35:54.209978Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.253971Z", + "start_time": "2026-02-20T12:36:56.246880Z" } }, - "outputs": [], "source": [ "# Multiplication — partial overlap gives intersection\n", "partial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", "x * partial # result: time 0, 1, 2 only" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "8763a12b2bbd4a93a75aff182afb95dc", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.805925Z", - "start_time": "2026-02-20T12:33:12.798771Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.582245Z", - "iopub.status.busy": "2026-02-20T12:32:50.582185Z", - "iopub.status.idle": "2026-02-20T12:32:50.587327Z", - "shell.execute_reply": "2026-02-20T12:32:50.587163Z" + "iopub.execute_input": "2026-02-20T12:35:54.212115Z", + "iopub.status.busy": "2026-02-20T12:35:54.211953Z", + "iopub.status.idle": "2026-02-20T12:35:54.223732Z", + "shell.execute_reply": "2026-02-20T12:35:54.223319Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.267382Z", + "start_time": "2026-02-20T12:36:56.259835Z" } }, - "outputs": [], "source": [ "# Multiplication — different dims broadcast (expands the expression)\n", "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", "x * cost # result: (time, tech)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "7623eae2785240b9bd12b16a66d81610", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.823606Z", - "start_time": "2026-02-20T12:33:12.811410Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.588398Z", - "iopub.status.busy": "2026-02-20T12:32:50.588330Z", - "iopub.status.idle": "2026-02-20T12:32:50.598610Z", - "shell.execute_reply": "2026-02-20T12:32:50.598402Z" + "iopub.execute_input": "2026-02-20T12:35:54.225717Z", + "iopub.status.busy": "2026-02-20T12:35:54.225519Z", + "iopub.status.idle": "2026-02-20T12:35:54.247553Z", + "shell.execute_reply": "2026-02-20T12:35:54.247125Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.305476Z", + "start_time": "2026-02-20T12:36:56.292Z" } }, - "outputs": [], "source": [ "# Constraints — RHS with fewer dims broadcasts naturally\n", "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", "m.add_constraints(gen <= capacity, name=\"cap\") # capacity broadcasts over time" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "7cdc8c89c7104fffa095e18ddfef8986", "metadata": {}, "source": [ "## What raises an error" @@ -207,21 +197,18 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "b118ea5561624da68c537baed56e602f", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.840568Z", - "start_time": "2026-02-20T12:33:12.834424Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.599634Z", - "iopub.status.busy": "2026-02-20T12:32:50.599571Z", - "iopub.status.idle": "2026-02-20T12:32:50.605548Z", - "shell.execute_reply": "2026-02-20T12:32:50.605322Z" + "iopub.execute_input": "2026-02-20T12:35:54.249529Z", + "iopub.status.busy": "2026-02-20T12:35:54.249355Z", + "iopub.status.idle": "2026-02-20T12:35:54.260588Z", + "shell.execute_reply": "2026-02-20T12:35:54.259868Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.319773Z", + "start_time": "2026-02-20T12:36:56.312636Z" } }, - "outputs": [], "source": [ "# Addition with mismatched coordinates\n", "y_short = m.add_variables(\n", @@ -232,25 +219,24 @@ " x + y_short # time coords don't match\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "938c804e27f84196a10c8828c723f798", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.847760Z", - "start_time": "2026-02-20T12:33:12.843647Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.606568Z", - "iopub.status.busy": "2026-02-20T12:32:50.606506Z", - "iopub.status.idle": "2026-02-20T12:32:50.609742Z", - "shell.execute_reply": "2026-02-20T12:32:50.609552Z" + "iopub.execute_input": "2026-02-20T12:35:54.262548Z", + "iopub.status.busy": "2026-02-20T12:35:54.262376Z", + "iopub.status.idle": "2026-02-20T12:35:54.268753Z", + "shell.execute_reply": "2026-02-20T12:35:54.268391Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.331386Z", + "start_time": "2026-02-20T12:36:56.326247Z" } }, - "outputs": [], "source": [ "# Addition with extra dimensions on the constant\n", "profile = xr.DataArray(\n", @@ -260,25 +246,24 @@ " x + profile # would duplicate x[t] across techs\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "504fb2a444614c0babb325280ed9130a", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.855699Z", - "start_time": "2026-02-20T12:33:12.851877Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.610738Z", - "iopub.status.busy": "2026-02-20T12:32:50.610665Z", - "iopub.status.idle": "2026-02-20T12:32:50.614493Z", - "shell.execute_reply": "2026-02-20T12:32:50.614335Z" + "iopub.execute_input": "2026-02-20T12:35:54.270585Z", + "iopub.status.busy": "2026-02-20T12:35:54.270420Z", + "iopub.status.idle": "2026-02-20T12:35:54.277993Z", + "shell.execute_reply": "2026-02-20T12:35:54.276363Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.350503Z", + "start_time": "2026-02-20T12:36:56.343806Z" } }, - "outputs": [], "source": [ "# Multiplication with zero overlap\n", "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", @@ -287,25 +272,24 @@ " z * factor # z has time 5-9, factor has time 0-4 — no intersection\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "59bbdb311c014d738909a11f9e486628", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.864666Z", - "start_time": "2026-02-20T12:33:12.860577Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.615336Z", - "iopub.status.busy": "2026-02-20T12:32:50.615276Z", - "iopub.status.idle": "2026-02-20T12:32:50.618275Z", - "shell.execute_reply": "2026-02-20T12:32:50.618094Z" + "iopub.execute_input": "2026-02-20T12:35:54.281858Z", + "iopub.status.busy": "2026-02-20T12:35:54.281316Z", + "iopub.status.idle": "2026-02-20T12:35:54.287843Z", + "shell.execute_reply": "2026-02-20T12:35:54.287269Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.361211Z", + "start_time": "2026-02-20T12:36:56.356813Z" } }, - "outputs": [], "source": [ "# Constraint RHS with mismatched coordinates\n", "partial_rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", @@ -314,25 +298,24 @@ " x <= partial_rhs\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "b43b363d81ae4b689946ece5c682cd59", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.874035Z", - "start_time": "2026-02-20T12:33:12.869466Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.619169Z", - "iopub.status.busy": "2026-02-20T12:32:50.619111Z", - "iopub.status.idle": "2026-02-20T12:32:50.622842Z", - "shell.execute_reply": "2026-02-20T12:32:50.622654Z" + "iopub.execute_input": "2026-02-20T12:35:54.290439Z", + "iopub.status.busy": "2026-02-20T12:35:54.290235Z", + "iopub.status.idle": "2026-02-20T12:35:54.302535Z", + "shell.execute_reply": "2026-02-20T12:35:54.302145Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.385743Z", + "start_time": "2026-02-20T12:36:56.380702Z" } }, - "outputs": [], "source": [ "# Constraint RHS with extra dimensions\n", "w = m.add_variables(lower=0, coords=[techs], name=\"w\") # dims: (tech,)\n", @@ -343,72 +326,90 @@ " w <= rhs_2d # would create redundant constraints on w[tech]\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "8a65eabff63a45729fe45fb5ade58bdc", "metadata": {}, "source": [ - "## Positional alignment with `join=\"override\"`\n", + "## Positional alignment\n", "\n", - "A common pattern: two arrays with the same shape but different (or no) coordinate labels. Use `join=\"override\"` to align by position, ignoring labels." + "A common pattern: two arrays with the same shape but different (or no) coordinate labels. The cleanest fix is to relabel one operand with `.assign_coords()` so that coordinates match explicitly:" ] }, { "cell_type": "code", - "execution_count": null, - "id": "c3933fab20d04ec698c2621248eb3be0", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.887673Z", - "start_time": "2026-02-20T12:33:12.877435Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.623830Z", - "iopub.status.busy": "2026-02-20T12:32:50.623775Z", - "iopub.status.idle": "2026-02-20T12:32:50.632476Z", - "shell.execute_reply": "2026-02-20T12:32:50.632321Z" + "iopub.execute_input": "2026-02-20T12:35:54.304505Z", + "iopub.status.busy": "2026-02-20T12:35:54.304317Z", + "iopub.status.idle": "2026-02-20T12:35:54.322551Z", + "shell.execute_reply": "2026-02-20T12:35:54.322153Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:37:36.671817Z", + "start_time": "2026-02-20T12:37:36.662325Z" } }, - "outputs": [], "source": [ "m2 = linopy.Model()\n", "\n", "a = m2.add_variables(coords=[[\"x\", \"y\", \"z\"]], name=\"a\")\n", "b = m2.add_variables(coords=[[\"p\", \"q\", \"r\"]], name=\"b\")\n", "\n", - "# a + b fails because labels don't match\n", - "# join=\"override\" aligns by position and keeps left operand's labels\n", - "a.add(b, join=\"override\")" - ] + "# Relabel b's coordinates to match a, then add normally\n", + "a + b.assign_coords(dim_0=a.coords[\"dim_0\"])" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "4dd4641cc4064e0191573fe9c69df29b", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.905207Z", - "start_time": "2026-02-20T12:33:12.899976Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.633382Z", - "iopub.status.busy": "2026-02-20T12:32:50.633328Z", - "iopub.status.idle": "2026-02-20T12:32:50.637298Z", - "shell.execute_reply": "2026-02-20T12:32:50.637123Z" + "iopub.execute_input": "2026-02-20T12:35:54.324642Z", + "iopub.status.busy": "2026-02-20T12:35:54.324465Z", + "iopub.status.idle": "2026-02-20T12:35:54.332579Z", + "shell.execute_reply": "2026-02-20T12:35:54.332088Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.424015Z", + "start_time": "2026-02-20T12:36:56.418311Z" } }, - "outputs": [], "source": [ "# Same for constraints\n", "rhs = xr.DataArray([1.0, 2.0, 3.0], dims=[\"dim_0\"], coords={\"dim_0\": [\"p\", \"q\", \"r\"]})\n", - "a.to_linexpr().le(rhs, join=\"override\")" - ] + "a <= rhs.assign_coords(dim_0=a.coords[\"dim_0\"])" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": { + "execution": { + "iopub.execute_input": "2026-02-20T12:35:54.336196Z", + "iopub.status.busy": "2026-02-20T12:35:54.335947Z", + "iopub.status.idle": "2026-02-20T12:35:54.360683Z", + "shell.execute_reply": "2026-02-20T12:35:54.359622Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.441516Z", + "start_time": "2026-02-20T12:36:56.432774Z" + } + }, + "source": [ + "# Shorthand: join=\"override\" does the same (positional match, keeps left labels)\n", + "a.add(b, join=\"override\")" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "8309879909854d7188b41380fd92a7c3", "metadata": {}, "source": [ "## Other join modes\n", @@ -427,21 +428,18 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "3ed186c9a28b402fb0bc4494df01f08d", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-20T12:33:12.934067Z", - "start_time": "2026-02-20T12:33:12.909515Z" - }, "execution": { - "iopub.execute_input": "2026-02-20T12:32:50.638240Z", - "iopub.status.busy": "2026-02-20T12:32:50.638182Z", - "iopub.status.idle": "2026-02-20T12:32:50.659703Z", - "shell.execute_reply": "2026-02-20T12:32:50.659485Z" + "iopub.execute_input": "2026-02-20T12:35:54.363885Z", + "iopub.status.busy": "2026-02-20T12:35:54.363642Z", + "iopub.status.idle": "2026-02-20T12:35:54.404550Z", + "shell.execute_reply": "2026-02-20T12:35:54.403860Z" + }, + "ExecuteTime": { + "end_time": "2026-02-20T12:36:56.472328Z", + "start_time": "2026-02-20T12:36:56.446352Z" } }, - "outputs": [], "source": [ "i_a = pd.Index([0, 1, 2], name=\"i\")\n", "i_b = pd.Index([1, 2, 3], name=\"i\")\n", @@ -453,11 +451,12 @@ "print(\"outer:\", list(a.add(b, join=\"outer\").coords[\"i\"].values)) # [0, 1, 2, 3]\n", "print(\"left: \", list(a.add(b, join=\"left\").coords[\"i\"].values)) # [0, 1, 2]\n", "print(\"right:\", list(a.add(b, join=\"right\").coords[\"i\"].values)) # [1, 2, 3]" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "cb1e1581032b452c9409d6c6813c49d1", "metadata": {}, "source": [ "## Migrating from previous versions\n", From e7ea997217e9e2ebc5c88af91996b482d933af20 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 20 Feb 2026 13:51:38 +0100 Subject: [PATCH 17/66] All join="override" usages in tests have been replaced with assign_coords. Here's what changed: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - test_linear_expression_sum / test_linear_expression_sum_with_const: v.loc[:9].add(v.loc[10:], join="override") → v.loc[:9] + v.loc[10:].assign_coords(dim_2=v.loc[:9].coords["dim_2"]) - test_add_join_override → test_add_positional_assign_coords: uses v + disjoint.assign_coords(...) - test_add_constant_join_override → test_add_constant_positional: now uses different coords [5,6,7] + assign_coords to make the test meaningful - test_same_shape_add_join_override → test_same_shape_add_assign_coords: uses + c.to_linexpr().assign_coords(...) - test_add_constant_override_positional → test_add_constant_positional_different_coords: expr + other.assign_coords(...) - test_sub_constant_override → test_sub_constant_positional: expr - other.assign_coords(...) - test_mul_constant_override_positional → test_mul_constant_positional: expr * other.assign_coords(...) - test_div_constant_override_positional → test_div_constant_positional: expr / other.assign_coords(...) - test_variable_mul_override → test_variable_mul_positional: a * other.assign_coords(...) - test_variable_div_override → test_variable_div_positional: a / other.assign_coords(...) - test_add_same_coords_all_joins: removed "override" from loop, added assign_coords variant - test_add_scalar_with_explicit_join → test_add_scalar: simplified to expr + 10 --- test/test_linear_expression.py | 56 ++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index ea81bb2b..2ced61a0 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -445,8 +445,8 @@ def test_linear_expression_sum( with pytest.raises(ValueError, match="exact"): v.loc[:9] + v.loc[10:] - # explicit outer join gives union - expr = v.loc[:9].add(v.loc[10:], join="override") + # positional alignment via assign_coords + expr = v.loc[:9] + v.loc[10:].assign_coords(dim_2=v.loc[:9].coords["dim_2"]) assert expr.nterm == 2 assert len(expr.coords["dim_2"]) == 10 @@ -473,8 +473,8 @@ def test_linear_expression_sum_with_const( with pytest.raises(ValueError, match="exact"): v.loc[:9] + v.loc[10:] - # explicit outer join gives union - expr = v.loc[:9].add(v.loc[10:], join="override") + # positional alignment via assign_coords + expr = v.loc[:9] + v.loc[10:].assign_coords(dim_2=v.loc[:9].coords["dim_2"]) assert expr.nterm == 2 assert len(expr.coords["dim_2"]) == 10 @@ -816,11 +816,11 @@ def test_add_join_outer(self, v: Variable, subset: xr.DataArray) -> None: assert result.const.sel(dim_2=1).item() == 10.0 assert result.const.sel(dim_2=0).item() == 0.0 - def test_add_join_override(self, v: Variable) -> None: + def test_add_positional_assign_coords(self, v: Variable) -> None: disjoint = xr.DataArray( np.ones(20), dims=["dim_2"], coords={"dim_2": range(50, 70)} ) - result = v.add(disjoint, join="override") + result = v + disjoint.assign_coords(dim_2=v.coords["dim_2"]) assert result.sizes["dim_2"] == 20 assert list(result.coords["dim_2"].values) == list(range(20)) @@ -1829,10 +1829,10 @@ def test_add_constant_join_outer(self, a: Variable) -> None: result = a.to_linexpr().add(const, join="outer") assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: + def test_add_constant_positional(self, a: Variable) -> None: expr = a.to_linexpr() - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [0, 1, 2]}) - result = expr.add(const, join="override") + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr + const.assign_coords(i=expr.coords["i"]) assert list(result.data.indexes["i"]) == [0, 1, 2] assert (result.const.values == const.values).all() @@ -1889,8 +1889,8 @@ def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="inner") assert list(result.data.indexes["i"]) == [1, 2] - def test_same_shape_add_join_override(self, a: Variable, c: Variable) -> None: - result = a.to_linexpr().add(c.to_linexpr(), join="override") + def test_same_shape_add_assign_coords(self, a: Variable, c: Variable) -> None: + result = a.to_linexpr() + c.to_linexpr().assign_coords(i=a.coords["i"]) assert list(result.data.indexes["i"]) == [0, 1, 2] def test_add_expr_outer_const_values(self, a: Variable, b: Variable) -> None: @@ -1928,17 +1928,17 @@ def test_add_constant_inner_fill_values(self, a: Variable) -> None: assert list(result.coords["i"].values) == [1] assert result.const.sel(i=1).item() == 15 - def test_add_constant_override_positional(self, a: Variable) -> None: + def test_add_constant_positional_different_coords(self, a: Variable) -> None: expr = 1 * a + 5 other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.add(other, join="override") + result = expr + other.assign_coords(i=expr.coords["i"]) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.const.values, [15, 25, 35]) - def test_sub_constant_override(self, a: Variable) -> None: + def test_sub_constant_positional(self, a: Variable) -> None: expr = 1 * a + 5 other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.sub(other, join="override") + result = expr - other.assign_coords(i=expr.coords["i"]) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.const.values, [-5, -15, -25]) @@ -1952,10 +1952,10 @@ def test_sub_expr_outer_const_values(self, a: Variable, b: Variable) -> None: assert result.const.sel(i=2).item() == -5 assert result.const.sel(i=3).item() == -10 - def test_mul_constant_override_positional(self, a: Variable) -> None: + def test_mul_constant_positional(self, a: Variable) -> None: expr = 1 * a + 5 other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.mul(other, join="override") + result = expr * other.assign_coords(i=expr.coords["i"]) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.const.values, [10, 15, 20]) np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) @@ -1972,10 +1972,10 @@ def test_mul_constant_outer_fill_values(self, a: Variable) -> None: assert result.coeffs.squeeze().sel(i=1).item() == 2 assert result.coeffs.squeeze().sel(i=0).item() == 0 - def test_div_constant_override_positional(self, a: Variable) -> None: + def test_div_constant_positional(self, a: Variable) -> None: expr = 1 * a + 10 other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.div(other, join="override") + result = expr / other.assign_coords(i=expr.coords["i"]) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.const.values, [5.0, 2.0, 1.0]) @@ -1999,16 +1999,16 @@ def test_variable_add_outer_values(self, a: Variable, b: Variable) -> None: assert set(result.coords["i"].values) == {0, 1, 2, 3} assert result.nterm == 2 - def test_variable_mul_override(self, a: Variable) -> None: + def test_variable_mul_positional(self, a: Variable) -> None: other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) - result = a.mul(other, join="override") + result = a * other.assign_coords(i=a.coords["i"]) assert isinstance(result, LinearExpression) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - def test_variable_div_override(self, a: Variable) -> None: + def test_variable_div_positional(self, a: Variable) -> None: other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) - result = a.div(other, join="override") + result = a / other.assign_coords(i=a.coords["i"]) assert isinstance(result, LinearExpression) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_almost_equal( @@ -2022,14 +2022,18 @@ def test_merge_outer_join(self, a: Variable, b: Variable) -> None: def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: expr_a = 1 * a + 5 const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) - for join in ["override", "outer", "inner"]: + for join in ["outer", "inner"]: result = expr_a.add(const, join=join) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.const.values, [6, 7, 8]) + # assign_coords also works when coords already match + result = expr_a + const.assign_coords(i=expr_a.coords["i"]) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [6, 7, 8]) - def test_add_scalar_with_explicit_join(self, a: Variable) -> None: + def test_add_scalar(self, a: Variable) -> None: expr = 1 * a + 5 - result = expr.add(10, join="override") + result = expr + 10 np.testing.assert_array_equal(result.const.values, [15, 15, 15]) assert list(result.coords["i"].values) == [0, 1, 2] From 533092dfde430923400f188ab7591a801388cc89 Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 4 Mar 2026 07:47:20 +0100 Subject: [PATCH 18/66] remove pandas reindexing warning --- linopy/common.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/linopy/common.py b/linopy/common.py index 746459b4..09f67355 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -161,26 +161,6 @@ def pandas_to_dataarray( axis.name or get_from_iterable(dims, i) or f"dim_{i}" for i, axis in enumerate(arr.axes) ] - if coords is not None: - pandas_coords = dict(zip(dims, arr.axes)) - if isinstance(coords, Sequence): - coords = dict(zip(dims, coords)) - shared_dims = set(pandas_coords.keys()) & set(coords.keys()) - non_aligned = [] - for dim in shared_dims: - coord = coords[dim] - if not isinstance(coord, pd.Index): - coord = pd.Index(coord) - if not pandas_coords[dim].equals(coord): - non_aligned.append(dim) - if any(non_aligned): - warn( - f"coords for dimension(s) {non_aligned} is not aligned with the pandas object. " - "Previously, the indexes of the pandas were ignored and overwritten in " - "these cases. Now, the pandas object's coordinates are taken considered" - " for alignment." - ) - return DataArray(arr, coords=None, dims=dims, **kwargs) From 9051f230bc53d12924097a3db010b8d82865556e Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 4 Mar 2026 08:47:09 +0100 Subject: [PATCH 19/66] Fix mypy errors: type ignores for xr.align/merge, match override signature, add test type hints --- linopy/expressions.py | 11 ++++++++--- test/test_constraints.py | 8 +++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index e1fbe1a9..39c45c16 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -562,7 +562,10 @@ def _align_constant( return self.const, other.assign_coords(coords=self.coords), False else: self_const, aligned = xr.align( - self.const, other, join=join, fill_value=fill_value + self.const, + other, + join=join, + fill_value=fill_value, # type: ignore[call-overload] ) return self_const, aligned, True @@ -675,7 +678,7 @@ def add( self, QuadraticExpression ): other = other.to_quadexpr() - return merge([self, other], cls=self.__class__, join=join) + return merge([self, other], cls=self.__class__, join=join) # type: ignore[list-item] def sub( self: GenericExpression, @@ -2130,7 +2133,9 @@ def solution(self) -> DataArray: sol = (self.coeffs * vals.prod(FACTOR_DIM)).sum(TERM_DIM) + self.const return sol.rename("solution") - def to_constraint(self, sign: SignLike, rhs: SideLike) -> NotImplementedType: + def to_constraint( + self, sign: SignLike, rhs: SideLike, join: str | None = None + ) -> NotImplementedType: raise NotImplementedError( "Quadratic expressions cannot be used in constraints." ) diff --git a/test/test_constraints.py b/test/test_constraints.py index e5da08d4..332101ab 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -5,6 +5,8 @@ @author: fabulous """ +from typing import Any + import dask import dask.array.core import numpy as np @@ -155,7 +157,7 @@ def test_constraint_assignment_with_reindex() -> None: ), ], ) -def test_constraint_rhs_lower_dim(rhs_factory) -> None: +def test_constraint_rhs_lower_dim(rhs_factory: Any) -> None: m = Model() naxis = np.arange(10, dtype=float) maxis = np.arange(10).astype(str) @@ -177,7 +179,7 @@ def test_constraint_rhs_lower_dim(rhs_factory) -> None: pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), ], ) -def test_constraint_rhs_higher_dim_constant_raises(rhs_factory) -> None: +def test_constraint_rhs_higher_dim_constant_raises(rhs_factory: Any) -> None: m = Model() x = m.add_variables(coords=[range(5)], name="x") @@ -198,7 +200,7 @@ def test_constraint_rhs_higher_dim_constant_raises(rhs_factory) -> None: ), ], ) -def test_constraint_rhs_higher_dim_expression(rhs_factory) -> None: +def test_constraint_rhs_higher_dim_expression(rhs_factory: Any) -> None: m = Model() x = m.add_variables(coords=[range(5)], name="x") From 648053bab0d286516adaa38efaab4cf188bc911a Mon Sep 17 00:00:00 2001 From: Fabian Date: Wed, 4 Mar 2026 09:03:29 +0100 Subject: [PATCH 20/66] remove outdated warning tests --- test/test_common.py | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/test/test_common.py b/test/test_common.py index 267fbf76..efade41a 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -96,17 +96,6 @@ def test_as_dataarray_with_series_dims_superset() -> None: assert list(da.coords[target_dim].values) == target_index -def test_as_dataarray_with_series_override_coords() -> None: - target_dim = "dim_0" - target_index = ["a", "b", "c"] - s = pd.Series([1, 2, 3], index=target_index) - with pytest.warns(UserWarning): - da = as_dataarray(s, coords=[[1, 2, 3]]) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - def test_as_dataarray_with_series_aligned_coords() -> None: """This should not give out a warning even though coords are given.""" target_dim = "dim_0" @@ -214,19 +203,6 @@ def test_as_dataarray_dataframe_dims_superset() -> None: assert list(da.coords[target_dims[1]].values) == target_columns -def test_as_dataarray_dataframe_override_coords() -> None: - target_dims = ("dim_0", "dim_1") - target_index = ["a", "b"] - target_columns = ["A", "B"] - df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) - with pytest.warns(UserWarning): - da = as_dataarray(df, coords=[[1, 2], [2, 3]]) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - def test_as_dataarray_dataframe_aligned_coords() -> None: """This should not give out a warning even though coords are given.""" target_dims = ("dim_0", "dim_1") From 27cea32a68ea6f03878ddd26980017c951458e91 Mon Sep 17 00:00:00 2001 From: Fabian Date: Thu, 5 Mar 2026 07:25:24 +0100 Subject: [PATCH 21/66] reintroduce expansions of extra rhs dims, fix multiindex alignment --- linopy/expressions.py | 8 +++++--- linopy/model.py | 2 +- test/test_constraints.py | 14 ++++++++++---- test/test_linear_expression.py | 10 ++++++---- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 39c45c16..fc7e9539 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -687,6 +687,7 @@ def sub( ) -> GenericExpression | QuadraticExpression: """ Subtract others from expression. + > Parameters ---------- @@ -1081,9 +1082,10 @@ def to_constraint( if isinstance(rhs, DataArray): extra_dims = set(rhs.dims) - set(self.coord_dims) if extra_dims: - raise ValueError( - f"RHS DataArray has dimensions {extra_dims} not present " - f"in the expression. Cannot create constraint." + logger.warning( + f"Constant RHS contains dimensions {extra_dims} not present " + f"in the expression, which might lead to inefficiencies. " + f"Consider collapsing the dimensions by taking min/max." ) rhs = rhs.reindex_like(self.const, fill_value=np.nan) elif isinstance(rhs, np.ndarray | pd.Series | pd.DataFrame) and rhs.ndim > len( diff --git a/linopy/model.py b/linopy/model.py index 05870d9a..a39c19e5 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -785,7 +785,7 @@ def add_constraints( rhs_nan = data.rhs.isnull() if rhs_nan.any(): - data["rhs"] = data.rhs.fillna(0) + data = assign_multiindex_safe(data, rhs=data.rhs.fillna(0)) rhs_mask = ~rhs_nan mask = ( rhs_mask diff --git a/test/test_constraints.py b/test/test_constraints.py index 332101ab..9c8c8ff1 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -172,10 +172,6 @@ def test_constraint_rhs_lower_dim(rhs_factory: Any) -> None: "rhs_factory", [ pytest.param(lambda m: np.ones((5, 3)), id="numpy"), - pytest.param( - lambda m: xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]), - id="dataarray", - ), pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), ], ) @@ -187,6 +183,16 @@ def test_constraint_rhs_higher_dim_constant_raises(rhs_factory: Any) -> None: m.add_constraints(x >= rhs_factory(m)) +def test_constraint_rhs_higher_dim_dataarray_reindexes() -> None: + """DataArray RHS with extra dims reindexes to expression coords (no raise).""" + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + rhs = xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]) + + c = m.add_constraints(x >= rhs) + assert c.shape == (5, 3) + + @pytest.mark.parametrize( "rhs_factory", [ diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 2af1a8ea..612d6824 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -881,12 +881,13 @@ def test_multidim_subset_add(self, m: Model) -> None: assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) - def test_constraint_rhs_extra_dims_raises(self, v: Variable) -> None: + def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: + """DataArray RHS with extra dims logs a warning and broadcasts.""" rhs = xr.DataArray( [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} ) - with pytest.raises(ValueError, match="not present in the expression"): - v <= rhs + c = v <= rhs + assert "extra" in c.dims def test_da_truediv_var_raises(self, v: Variable) -> None: da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) @@ -927,13 +928,14 @@ def test_subset_constraint_solve_integration(self) -> None: if not available_solvers: pytest.skip("No solver available") + solver = "highs" if "highs" in available_solvers else available_solvers[0] m = Model() coords = pd.RangeIndex(5, name="i") x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) m.add_constraints(x <= subset_ub, name="subset_ub") m.add_objective(x.sum(), sense="max") - m.solve(solver_name=available_solvers[0]) + m.solve(solver_name=solver) sol = m.solution["x"] assert sol.sel(i=1).item() == pytest.approx(10.0) assert sol.sel(i=3).item() == pytest.approx(20.0) From 8259bf8661bd18de3fc8d8ad174dbd30b181289a Mon Sep 17 00:00:00 2001 From: Fabian Date: Thu, 5 Mar 2026 08:39:06 +0100 Subject: [PATCH 22/66] refactor test fixtures and use sign constants --- pyproject.toml | 1 + test/conftest.py | 41 ++ test/test_constraints.py | 96 ++- test/test_linear_expression.py | 1209 +++++++++++++++----------------- 4 files changed, 702 insertions(+), 645 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index aaac2cf1..14a53a22 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -159,6 +159,7 @@ ignore = [ 'D101', # Missing docstring in public class 'D102', # Missing docstring in public method 'D103', # Missing docstring in public function + 'D106', # Missing docstring in public nested class 'D107', # Missing docstring in __init__ 'D202', # No blank lines allowed after function docstring 'D203', # 1 blank line required before class docstring diff --git a/test/conftest.py b/test/conftest.py index 3197689b..b0a846ba 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -2,8 +2,11 @@ import os +import pandas as pd import pytest +from linopy import Model, Variable + def pytest_addoption(parser: pytest.Parser) -> None: """Add custom command line options.""" @@ -48,3 +51,41 @@ def pytest_collection_modifyitems( if solver_supports(solver, SolverFeature.GPU_ACCELERATION): item.add_marker(skip_gpu) item.add_marker(pytest.mark.gpu) + + +@pytest.fixture +def m() -> Model: + m = Model() + m.add_variables(pd.Series([0, 0]), 1, name="x") + m.add_variables(4, pd.Series([8, 10]), name="y") + m.add_variables(0, pd.DataFrame([[1, 2], [3, 4], [5, 6]]).T, name="z") + m.add_variables(coords=[pd.RangeIndex(20, name="dim_2")], name="v") + idx = pd.MultiIndex.from_product([[1, 2], ["a", "b"]], names=("level1", "level2")) + idx.name = "dim_3" + m.add_variables(coords=[idx], name="u") + return m + + +@pytest.fixture +def x(m: Model) -> Variable: + return m.variables["x"] + + +@pytest.fixture +def y(m: Model) -> Variable: + return m.variables["y"] + + +@pytest.fixture +def z(m: Model) -> Variable: + return m.variables["z"] + + +@pytest.fixture +def v(m: Model) -> Variable: + return m.variables["v"] + + +@pytest.fixture +def u(m: Model) -> Variable: + return m.variables["u"] diff --git a/test/test_constraints.py b/test/test_constraints.py index 9c8c8ff1..b8caa886 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -14,7 +14,7 @@ import pytest import xarray as xr -from linopy import EQUAL, GREATER_EQUAL, LESS_EQUAL, Model +from linopy import EQUAL, GREATER_EQUAL, LESS_EQUAL, Model, Variable, available_solvers from linopy.testing import assert_conequal # Test model functions @@ -341,3 +341,97 @@ def test_sanitize_infinities() -> None: m.add_constraints(x >= np.inf, name="con_wrong_inf") with pytest.raises(ValueError): m.add_constraints(y <= -np.inf, name="con_wrong_neg_inf") + + +class TestConstraintCoordinateAlignment: + @pytest.fixture + def subset(self) -> xr.DataArray: + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + + @pytest.fixture + def superset(self) -> xr.DataArray: + return xr.DataArray( + np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} + ) + + def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + con = v <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert con.rhs.sel(dim_2=3).item() == 30.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_var_comparison_subset( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = v <= subset + elif sign == GREATER_EQUAL: + con = v >= subset + else: + con = v == subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + expr = v + 5 + con = expr <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) + assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_subset_comparison_var( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = subset <= v + elif sign == GREATER_EQUAL: + con = subset >= v + else: + con = subset == v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert np.isnan(con.rhs.sel(dim_2=0).item()) + assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) + def test_superset_comparison_var( + self, v: Variable, superset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = superset <= v + else: + con = superset >= v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(con.lhs.coeffs.values).any() + assert not np.isnan(con.rhs.values).any() + + def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: + rhs = xr.DataArray( + [[1.0, 2.0]], + dims=["extra", "dim_2"], + coords={"dim_2": [0, 1]}, + ) + c = v <= rhs + assert "extra" in c.dims + + def test_subset_constraint_solve_integration(self) -> None: + if not available_solvers: + pytest.skip("No solver available") + solver = "highs" if "highs" in available_solvers else available_solvers[0] + m = Model() + coords = pd.RangeIndex(5, name="i") + x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") + subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) + m.add_constraints(x <= subset_ub, name="subset_ub") + m.add_objective(x.sum(), sense="max") + m.solve(solver_name=solver) + sol = m.solution["x"] + assert sol.sel(i=1).item() == pytest.approx(10.0) + assert sol.sel(i=3).item() == pytest.approx(20.0) + assert sol.sel(i=0).item() == pytest.approx(100.0) + assert sol.sel(i=2).item() == pytest.approx(100.0) + assert sol.sel(i=4).item() == pytest.approx(100.0) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 612d6824..0b7c16d7 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -23,46 +23,6 @@ from linopy.variables import ScalarVariable -@pytest.fixture -def m() -> Model: - m = Model() - - m.add_variables(pd.Series([0, 0]), 1, name="x") - m.add_variables(4, pd.Series([8, 10]), name="y") - m.add_variables(0, pd.DataFrame([[1, 2], [3, 4], [5, 6]]).T, name="z") - m.add_variables(coords=[pd.RangeIndex(20, name="dim_2")], name="v") - - idx = pd.MultiIndex.from_product([[1, 2], ["a", "b"]], names=("level1", "level2")) - idx.name = "dim_3" - m.add_variables(coords=[idx], name="u") - return m - - -@pytest.fixture -def x(m: Model) -> Variable: - return m.variables["x"] - - -@pytest.fixture -def y(m: Model) -> Variable: - return m.variables["y"] - - -@pytest.fixture -def z(m: Model) -> Variable: - return m.variables["z"] - - -@pytest.fixture -def v(m: Model) -> Variable: - return m.variables["v"] - - -@pytest.fixture -def u(m: Model) -> Variable: - return m.variables["u"] - - def test_empty_linexpr(m: Model) -> None: LinearExpression(None, m) @@ -577,7 +537,7 @@ def test_linear_expression_multiplication_invalid( expr / x -class TestSubsetCoordinateAlignment: +class TestCoordinateAlignment: @pytest.fixture def subset(self) -> xr.DataArray: return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) @@ -595,353 +555,292 @@ def expected_fill(self) -> np.ndarray: arr[3] = 30.0 return arr - def test_var_mul_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - result = v * subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - def test_expr_mul_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - expr = 1 * v - result = expr * subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - @pytest.mark.parametrize( - "make_lhs,make_rhs", - [ - (lambda v, s: s * v, lambda v, s: v * s), - (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), - (lambda v, s: s + v, lambda v, s: v + s), - (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), - ], - ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], - ) - def test_commutativity( - self, v: Variable, subset: xr.DataArray, make_lhs: Any, make_rhs: Any - ) -> None: - assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) - - def test_var_add_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - result = v + subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected_fill) - - def test_var_sub_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - result = v - subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, -expected_fill) - - def test_subset_sub_var(self, v: Variable, subset: xr.DataArray) -> None: - assert_linequal(subset - v, -v + subset) - - def test_expr_add_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - expr = v + 5 - result = expr + subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected_fill + 5) - - def test_expr_sub_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - expr = v + 5 - result = expr - subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, 5 - expected_fill) - - def test_subset_sub_expr(self, v: Variable, subset: xr.DataArray) -> None: - expr = v + 5 - assert_linequal(subset - expr, -(expr - subset)) - - def test_var_div_subset(self, v: Variable, subset: xr.DataArray) -> None: - result = v / subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) - assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) - - def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - con = v <= subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert con.rhs.sel(dim_2=3).item() == 30.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - @pytest.mark.parametrize("sign", ["<=", ">=", "=="]) - def test_var_comparison_subset( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - if sign == "<=": - con = v <= subset - elif sign == ">=": - con = v >= subset - else: - con = v == subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - expr = v + 5 - con = expr <= subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) - assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - def test_add_commutativity_full_coords(self, v: Variable) -> None: - full = xr.DataArray( - np.arange(20, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(20)}, - ) - assert_linequal(v + full, full + v) - - def test_superset_addition_pins_to_lhs( - self, v: Variable, superset: xr.DataArray - ) -> None: - result = v + superset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - - def test_superset_add_var(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset + v, v + superset) - - def test_superset_sub_var(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset - v, -v + superset) - - def test_superset_mul_var(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset * v, v * superset) - - @pytest.mark.parametrize("sign", ["<=", ">="]) - def test_superset_comparison_var( - self, v: Variable, superset: xr.DataArray, sign: str - ) -> None: - if sign == "<=": - con = superset <= v - else: - con = superset >= v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(con.lhs.coeffs.values).any() - assert not np.isnan(con.rhs.values).any() - - def test_disjoint_addition_pins_to_lhs(self, v: Variable) -> None: - disjoint = xr.DataArray( - [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v + disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, np.zeros(20)) - - def test_expr_div_subset(self, v: Variable, subset: xr.DataArray) -> None: - expr = 1 * v - result = expr / subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) - assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) - - def test_subset_add_var_coefficients( - self, v: Variable, subset: xr.DataArray - ) -> None: - result = subset + v - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) - - def test_subset_sub_var_coefficients( - self, v: Variable, subset: xr.DataArray - ) -> None: - result = subset - v - np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) - - @pytest.mark.parametrize("sign", ["<=", ">=", "=="]) - def test_subset_comparison_var( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - if sign == "<=": - con = subset <= v - elif sign == ">=": - con = subset >= v - else: - con = subset == v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert np.isnan(con.rhs.sel(dim_2=0).item()) - assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) - - def test_superset_mul_pins_to_lhs( - self, v: Variable, superset: xr.DataArray - ) -> None: - result = v * superset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - - def test_superset_div_pins_to_lhs(self, v: Variable) -> None: - superset_nonzero = xr.DataArray( - np.arange(1, 26, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, + class TestSubset: + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_fills_zeros( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target * subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_subset_fills_zeros( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + if operand == "var": + result = v + subset + expected = expected_fill + else: + result = (v + 5) + subset + expected = expected_fill + 5 + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_subset_fills_negated( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + if operand == "var": + result = v - subset + expected = -expected_fill + else: + result = (v + 5) - subset + expected = 5 - expected_fill + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_subset_inverts_nonzero( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + result = target / subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) + assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + + def test_subset_add_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset + v + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + def test_subset_sub_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset - v + np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) + + class TestSuperset: + def test_add_superset_pins_to_lhs_coords( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v + superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + + def test_add_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset + v, v + superset) + + def test_sub_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset - v, -v + superset) + + def test_mul_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset * v, v * superset) + + def test_mul_superset_pins_to_lhs_coords( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v * superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: + superset_nonzero = xr.DataArray( + np.arange(1, 26, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + result = v / superset_nonzero + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + class TestDisjoint: + def test_add_disjoint_fills_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v + disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, np.zeros(20)) + + def test_mul_disjoint_fills_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v * disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) + + def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v / disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + class TestCommutativity: + @pytest.mark.parametrize( + "make_lhs,make_rhs", + [ + (lambda v, s: s * v, lambda v, s: v * s), + (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), + (lambda v, s: s + v, lambda v, s: v + s), + (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), + ], + ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], ) - result = v / superset_nonzero - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - - def test_quadexpr_add_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - qexpr = v * v - result = qexpr + subset - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected_fill) - - def test_quadexpr_sub_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - qexpr = v * v - result = qexpr - subset - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, -expected_fill) - - def test_quadexpr_mul_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - qexpr = v * v - result = qexpr * subset - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - def test_subset_mul_quadexpr( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - qexpr = v * v - result = subset * qexpr - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: - qexpr = v * v - assert_quadequal(subset + qexpr, qexpr + subset) - - def test_multidim_subset_mul(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - result = w * subset_2d - assert result.sizes["a"] == 4 - assert result.sizes["b"] == 5 - assert not np.isnan(result.coeffs.values).any() - assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) - assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) - assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) - assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) - - def test_multidim_subset_add(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - result = w + subset_2d - assert result.sizes["a"] == 4 - assert result.sizes["b"] == 5 - assert not np.isnan(result.const.values).any() - assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) - assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) - assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) - - def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: - """DataArray RHS with extra dims logs a warning and broadcasts.""" - rhs = xr.DataArray( - [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} - ) - c = v <= rhs - assert "extra" in c.dims - - def test_da_truediv_var_raises(self, v: Variable) -> None: - da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) - with pytest.raises(TypeError): - da / v # type: ignore[operator] - - def test_disjoint_mul_produces_zeros(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v * disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) - - def test_disjoint_div_preserves_coeffs(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v / disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) - - def test_da_eq_da_still_works(self) -> None: - da1 = xr.DataArray([1, 2, 3]) - da2 = xr.DataArray([1, 2, 3]) - result = da1 == da2 - assert result.values.all() - - def test_da_eq_scalar_still_works(self) -> None: - da = xr.DataArray([1, 2, 3]) - result = da == 2 - np.testing.assert_array_equal(result.values, [False, True, False]) - - def test_subset_constraint_solve_integration(self) -> None: - from linopy import available_solvers - - if not available_solvers: - pytest.skip("No solver available") - solver = "highs" if "highs" in available_solvers else available_solvers[0] - m = Model() - coords = pd.RangeIndex(5, name="i") - x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") - subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) - m.add_constraints(x <= subset_ub, name="subset_ub") - m.add_objective(x.sum(), sense="max") - m.solve(solver_name=solver) - sol = m.solution["x"] - assert sol.sel(i=1).item() == pytest.approx(10.0) - assert sol.sel(i=3).item() == pytest.approx(20.0) - assert sol.sel(i=0).item() == pytest.approx(100.0) - assert sol.sel(i=2).item() == pytest.approx(100.0) - assert sol.sel(i=4).item() == pytest.approx(100.0) + def test_commutativity( + self, + v: Variable, + subset: xr.DataArray, + make_lhs: Any, + make_rhs: Any, + ) -> None: + assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + + def test_sub_var_anticommutative( + self, v: Variable, subset: xr.DataArray + ) -> None: + assert_linequal(subset - v, -v + subset) + + def test_sub_expr_anticommutative( + self, v: Variable, subset: xr.DataArray + ) -> None: + expr = v + 5 + assert_linequal(subset - expr, -(expr - subset)) + + def test_add_commutativity_full_coords(self, v: Variable) -> None: + full = xr.DataArray( + np.arange(20, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(20)}, + ) + assert_linequal(v + full, full + v) + + class TestQuadratic: + def test_quadexpr_add_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr + subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected_fill) + + def test_quadexpr_sub_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr - subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, -expected_fill) + + def test_quadexpr_mul_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr * subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_mul_quadexpr( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = subset * qexpr + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: + qexpr = v * v + assert_quadequal(subset + qexpr, qexpr + subset) + + class TestMultiDim: + def test_multidim_subset_mul(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w * subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) + assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) + + def test_multidim_subset_add(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w + subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.const.values).any() + assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) + + class TestXarrayCompat: + def test_da_eq_da_still_works(self) -> None: + da1 = xr.DataArray([1, 2, 3]) + da2 = xr.DataArray([1, 2, 3]) + result = da1 == da2 + assert result.values.all() + + def test_da_eq_scalar_still_works(self) -> None: + da = xr.DataArray([1, 2, 3]) + result = da == 2 + np.testing.assert_array_equal(result.values, [False, True, False]) + + def test_da_truediv_var_raises(self, v: Variable) -> None: + da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) + with pytest.raises(TypeError): + da / v # type: ignore[operator] def test_expression_inherited_properties(x: Variable, y: Variable) -> None: @@ -1791,263 +1690,285 @@ def b(self, m2: Model) -> Variable: def c(self, m2: Model) -> Variable: return m2.variables["c"] - def test_add_join_none_preserves_default(self, a: Variable, b: Variable) -> None: - result_default = a.to_linexpr() + b.to_linexpr() - result_none = a.to_linexpr().add(b.to_linexpr(), join=None) - assert_linequal(result_default, result_none) - - def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_add_expr_join_outer(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_add_expr_join_left(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="left") - assert list(result.data.indexes["i"]) == [0, 1, 2] - - def test_add_expr_join_right(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="right") - assert list(result.data.indexes["i"]) == [1, 2, 3] - - def test_add_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_add_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().add(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: - expr = a.to_linexpr() - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [0, 1, 2]}) - result = expr.add(const, join="override") - assert list(result.data.indexes["i"]) == [0, 1, 2] - assert (result.const.values == const.values).all() - - def test_sub_expr_join_inner(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().sub(b.to_linexpr(), join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_mul_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_mul_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().mul(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - assert result.coeffs.sel(i=0).item() == 0 - assert result.coeffs.sel(i=1).item() == 2 - assert result.coeffs.sel(i=2).item() == 3 - - def test_div_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().div(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_div_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().div(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_variable_add_join(self, a: Variable, b: Variable) -> None: - result = a.add(b, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_sub_join(self, a: Variable, b: Variable) -> None: - result = a.sub(b, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_mul_join(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_div_join(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.div(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_mul_expr_with_join_raises(self, a: Variable, b: Variable) -> None: - with pytest.raises(TypeError, match="join parameter is not supported"): - a.to_linexpr().mul(b.to_linexpr(), join="inner") - - def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_same_shape_add_join_override(self, a: Variable, c: Variable) -> None: - result = a.to_linexpr().add(c.to_linexpr(), join="override") - assert list(result.data.indexes["i"]) == [0, 1, 2] - - def test_add_expr_outer_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.add(expr_b, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 15 - assert result.const.sel(i=3).item() == 10 - - def test_add_expr_inner_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.add(expr_b, join="inner") - assert list(result.coords["i"].values) == [1, 2] - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 15 - - def test_add_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) - result = expr.add(const, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 5 - assert result.const.sel(i=3).item() == 20 - - def test_add_constant_inner_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) - result = expr.add(const, join="inner") - assert list(result.coords["i"].values) == [1] - assert result.const.sel(i=1).item() == 15 - - def test_add_constant_override_positional(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.add(other, join="override") - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [15, 25, 35]) - - def test_sub_constant_override(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.sub(other, join="override") - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [-5, -15, -25]) - - def test_sub_expr_outer_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.sub(expr_b, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == -5 - assert result.const.sel(i=2).item() == -5 - assert result.const.sel(i=3).item() == -10 - - def test_mul_constant_override_positional(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.mul(other, join="override") - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [10, 15, 20]) - np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - - def test_mul_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([2, 3], dims=["i"], coords={"i": [1, 3]}) - result = expr.mul(other, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 0 - assert result.const.sel(i=1).item() == 10 - assert result.const.sel(i=2).item() == 0 - assert result.const.sel(i=3).item() == 0 - assert result.coeffs.squeeze().sel(i=1).item() == 2 - assert result.coeffs.squeeze().sel(i=0).item() == 0 - - def test_div_constant_override_positional(self, a: Variable) -> None: - expr = 1 * a + 10 - other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.div(other, join="override") - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [5.0, 2.0, 1.0]) - - def test_div_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 10 - other = xr.DataArray([2.0, 5.0], dims=["i"], coords={"i": [1, 3]}) - result = expr.div(other, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=1).item() == pytest.approx(5.0) - assert result.coeffs.squeeze().sel(i=1).item() == pytest.approx(0.5) - assert result.const.sel(i=0).item() == pytest.approx(10.0) - assert result.coeffs.squeeze().sel(i=0).item() == pytest.approx(1.0) - - def test_div_expr_with_join_raises(self, a: Variable, b: Variable) -> None: - with pytest.raises(TypeError): - a.to_linexpr().div(b.to_linexpr(), join="outer") - - def test_variable_add_outer_values(self, a: Variable, b: Variable) -> None: - result = a.add(b, join="outer") - assert isinstance(result, LinearExpression) - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.nterm == 2 - - def test_variable_mul_override(self, a: Variable) -> None: - other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) - result = a.mul(other, join="override") - assert isinstance(result, LinearExpression) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - - def test_variable_div_override(self, a: Variable) -> None: - other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) - result = a.div(other, join="override") - assert isinstance(result, LinearExpression) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_almost_equal( - result.coeffs.squeeze().values, [0.5, 0.2, 0.1] - ) + class TestAddition: + def test_add_join_none_preserves_default( + self, a: Variable, b: Variable + ) -> None: + result_default = a.to_linexpr() + b.to_linexpr() + result_none = a.to_linexpr().add(b.to_linexpr(), join=None) + assert_linequal(result_default, result_none) + + def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_add_expr_join_outer(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_add_expr_join_left(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="left") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_add_expr_join_right(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="right") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_add_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_add_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().add(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: + expr = a.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [0, 1, 2]}) + result = expr.add(const, join="override") + assert list(result.data.indexes["i"]) == [0, 1, 2] + assert (result.const.values == const.values).all() + + def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: + expr_a = 1 * a + 5 + const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) + for join in ["override", "outer", "inner"]: + result = expr_a.add(const, join=join) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [6, 7, 8]) + + def test_add_scalar_with_explicit_join(self, a: Variable) -> None: + expr = 1 * a + 5 + result = expr.add(10, join="override") + np.testing.assert_array_equal(result.const.values, [15, 15, 15]) + assert list(result.coords["i"].values) == [0, 1, 2] - def test_merge_outer_join(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} + class TestSubtraction: + def test_sub_expr_join_inner(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().sub(b.to_linexpr(), join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_sub_constant_override(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.sub(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [-5, -15, -25]) + + class TestMultiplication: + def test_mul_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_mul_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().mul(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + assert result.coeffs.sel(i=0).item() == 0 + assert result.coeffs.sel(i=1).item() == 2 + assert result.coeffs.sel(i=2).item() == 3 + + def test_mul_expr_with_join_raises(self, a: Variable, b: Variable) -> None: + with pytest.raises(TypeError, match="join parameter is not supported"): + a.to_linexpr().mul(b.to_linexpr(), join="inner") + + class TestDivision: + def test_div_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().div(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_div_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().div(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_div_expr_with_join_raises(self, a: Variable, b: Variable) -> None: + with pytest.raises(TypeError): + a.to_linexpr().div(b.to_linexpr(), join="outer") + + class TestVariableOperations: + def test_variable_add_join(self, a: Variable, b: Variable) -> None: + result = a.add(b, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_sub_join(self, a: Variable, b: Variable) -> None: + result = a.sub(b, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_mul_join(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_div_join(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.div(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_add_outer_values(self, a: Variable, b: Variable) -> None: + result = a.add(b, join="outer") + assert isinstance(result, LinearExpression) + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.nterm == 2 + + def test_variable_mul_override(self, a: Variable) -> None: + other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) + result = a.mul(other, join="override") + assert isinstance(result, LinearExpression) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: - expr_a = 1 * a + 5 - const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) - for join in ["override", "outer", "inner"]: - result = expr_a.add(const, join=join) + def test_variable_div_override(self, a: Variable) -> None: + other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) + result = a.div(other, join="override") + assert isinstance(result, LinearExpression) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_almost_equal( + result.coeffs.squeeze().values, [0.5, 0.2, 0.1] + ) + + def test_same_shape_add_join_override(self, a: Variable, c: Variable) -> None: + result = a.to_linexpr().add(c.to_linexpr(), join="override") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + class TestMerge: + def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="inner" + ) + assert list(result.data.indexes["i"]) == [1, 2] + + def test_merge_outer_join(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="outer" + ) + assert set(result.coords["i"].values) == {0, 1, 2, 3} + + def test_merge_join_left(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="left" + ) + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_merge_join_right(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="right" + ) + assert list(result.data.indexes["i"]) == [1, 2, 3] + + class TestValueVerification: + def test_add_expr_outer_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.add(expr_b, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 15 + assert result.const.sel(i=3).item() == 10 + + def test_add_expr_inner_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.add(expr_b, join="inner") + assert list(result.coords["i"].values) == [1, 2] + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 15 + + def test_add_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = expr.add(const, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 5 + assert result.const.sel(i=3).item() == 20 + + def test_add_constant_inner_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = expr.add(const, join="inner") + assert list(result.coords["i"].values) == [1] + assert result.const.sel(i=1).item() == 15 + + def test_add_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.add(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [15, 25, 35]) + + def test_sub_expr_outer_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.sub(expr_b, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == -5 + assert result.const.sel(i=2).item() == -5 + assert result.const.sel(i=3).item() == -10 + + def test_mul_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.mul(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [10, 15, 20]) + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) + + def test_mul_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([2, 3], dims=["i"], coords={"i": [1, 3]}) + result = expr.mul(other, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 0 + assert result.const.sel(i=1).item() == 10 + assert result.const.sel(i=2).item() == 0 + assert result.const.sel(i=3).item() == 0 + assert result.coeffs.squeeze().sel(i=1).item() == 2 + assert result.coeffs.squeeze().sel(i=0).item() == 0 + + def test_div_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 10 + other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.div(other, join="override") assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [6, 7, 8]) - - def test_add_scalar_with_explicit_join(self, a: Variable) -> None: - expr = 1 * a + 5 - result = expr.add(10, join="override") - np.testing.assert_array_equal(result.const.values, [15, 15, 15]) - assert list(result.coords["i"].values) == [0, 1, 2] - - def test_quadratic_add_constant_join_inner(self, a: Variable, b: Variable) -> None: - quad = a.to_linexpr() * b.to_linexpr() - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] - - def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: - quad = a.to_linexpr() * a.to_linexpr() - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) - result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [0, 1] - - def test_quadratic_mul_constant_join_inner(self, a: Variable, b: Variable) -> None: - quad = a.to_linexpr() * b.to_linexpr() - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = quad.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] - - def test_merge_join_left(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="left") - assert list(result.data.indexes["i"]) == [0, 1, 2] - - def test_merge_join_right(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="right") - assert list(result.data.indexes["i"]) == [1, 2, 3] + np.testing.assert_array_equal(result.const.values, [5.0, 2.0, 1.0]) + + def test_div_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 10 + other = xr.DataArray([2.0, 5.0], dims=["i"], coords={"i": [1, 3]}) + result = expr.div(other, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=1).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(i=1).item() == pytest.approx(0.5) + assert result.const.sel(i=0).item() == pytest.approx(10.0) + assert result.coeffs.squeeze().sel(i=0).item() == pytest.approx(1.0) + + class TestQuadratic: + def test_quadratic_add_constant_join_inner( + self, a: Variable, b: Variable + ) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: + quad = a.to_linexpr() * a.to_linexpr() + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [0, 1] + + def test_quadratic_mul_constant_join_inner( + self, a: Variable, b: Variable + ) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] From cff9c3db1033bd5622e6da4eeecbf79e7cb1aec3 Mon Sep 17 00:00:00 2001 From: Fabian Date: Thu, 5 Mar 2026 08:57:31 +0100 Subject: [PATCH 23/66] add tests for pandas series subset/superset --- linopy/expressions.py | 11 +++-------- test/test_constraints.py | 24 ++++++++++++++++-------- test/test_linear_expression.py | 24 ++++++++++++++++-------- 3 files changed, 35 insertions(+), 24 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index fc7e9539..4c78a636 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -1079,7 +1079,9 @@ def to_constraint( f"Both sides of the constraint are constant. At least one side must contain variables. {self} {rhs}" ) - if isinstance(rhs, DataArray): + if isinstance(rhs, SUPPORTED_CONSTANT_TYPES): + rhs = as_dataarray(rhs, coords=self.coords, dims=self.coord_dims) + extra_dims = set(rhs.dims) - set(self.coord_dims) if extra_dims: logger.warning( @@ -1088,13 +1090,6 @@ def to_constraint( f"Consider collapsing the dimensions by taking min/max." ) rhs = rhs.reindex_like(self.const, fill_value=np.nan) - elif isinstance(rhs, np.ndarray | pd.Series | pd.DataFrame) and rhs.ndim > len( - self.coord_dims - ): - raise ValueError( - f"RHS has {rhs.ndim} dimensions, but the expression only " - f"has {len(self.coord_dims)}. Cannot create constraint." - ) all_to_lhs = self.sub(rhs, join=join).data data = assign_multiindex_safe( diff --git a/test/test_constraints.py b/test/test_constraints.py index b8caa886..1ef40d3c 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -344,14 +344,22 @@ def test_sanitize_infinities() -> None: class TestConstraintCoordinateAlignment: - @pytest.fixture - def subset(self) -> xr.DataArray: - return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - - @pytest.fixture - def superset(self) -> xr.DataArray: - return xr.DataArray( - np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def subset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "xarray": + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) + + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def superset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "xarray": + return xr.DataArray( + np.arange(25, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + return pd.Series( + np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") ) def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 0b7c16d7..90e34b5e 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -538,14 +538,22 @@ def test_linear_expression_multiplication_invalid( class TestCoordinateAlignment: - @pytest.fixture - def subset(self) -> xr.DataArray: - return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - - @pytest.fixture - def superset(self) -> xr.DataArray: - return xr.DataArray( - np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} + @pytest.fixture(params=["da", "series"]) + def subset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) + + @pytest.fixture(params=["da", "series"]) + def superset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray( + np.arange(25, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + return pd.Series( + np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") ) @pytest.fixture From 48f9704d7b21c5d69bcb02138e5810d66e409893 Mon Sep 17 00:00:00 2001 From: Fabian Date: Thu, 5 Mar 2026 09:07:01 +0100 Subject: [PATCH 24/66] test: add TestMissingValues for same-shape constants with NaN entries --- test/test_linear_expression.py | 119 +++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 90e34b5e..2bb1d359 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -563,6 +563,16 @@ def expected_fill(self) -> np.ndarray: arr[3] = 30.0 return arr + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + vals[5] = np.nan + vals[19] = np.nan + if request.param == "xarray": + return xr.DataArray(vals, dims=["dim_2"], coords={"dim_2": range(20)}) + return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) + class TestSubset: @pytest.mark.parametrize("operand", ["var", "expr"]) def test_mul_subset_fills_zeros( @@ -795,6 +805,115 @@ def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: qexpr = v * v assert_quadequal(subset + qexpr, qexpr + subset) + class TestMissingValues: + """Same shape as variable but with NaN entries in the constant.""" + + EXPECTED_NAN_MASK = np.zeros(20, dtype=bool) + EXPECTED_NAN_MASK[[0, 5, 19]] = True + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_nan_propagates( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else v + 5 + result = target + nan_constant + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.const.values), self.EXPECTED_NAN_MASK + ) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_nan_propagates( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else v + 5 + result = target - nan_constant + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.const.values), self.EXPECTED_NAN_MASK + ) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_nan_propagates( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target * nan_constant + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.coeffs.squeeze().values), self.EXPECTED_NAN_MASK + ) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_nan_propagates( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target / nan_constant + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.coeffs.squeeze().values), self.EXPECTED_NAN_MASK + ) + + def test_add_commutativity( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + result_a = v + nan_constant + result_b = nan_constant + v + # Compare non-NaN values are equal and NaN positions match + nan_mask_a = np.isnan(result_a.const.values) + nan_mask_b = np.isnan(result_b.const.values) + np.testing.assert_array_equal(nan_mask_a, nan_mask_b) + np.testing.assert_array_equal( + result_a.const.values[~nan_mask_a], + result_b.const.values[~nan_mask_b], + ) + np.testing.assert_array_equal( + result_a.coeffs.values, result_b.coeffs.values + ) + + def test_mul_commutativity( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + result_a = v * nan_constant + result_b = nan_constant * v + nan_mask_a = np.isnan(result_a.coeffs.values) + nan_mask_b = np.isnan(result_b.coeffs.values) + np.testing.assert_array_equal(nan_mask_a, nan_mask_b) + np.testing.assert_array_equal( + result_a.coeffs.values[~nan_mask_a], + result_b.coeffs.values[~nan_mask_b], + ) + + def test_quadexpr_add_nan( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + qexpr = v * v + result = qexpr + nan_constant + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.const.values), self.EXPECTED_NAN_MASK + ) + class TestMultiDim: def test_multidim_subset_mul(self, m: Model) -> None: coords_a = pd.RangeIndex(4, name="a") From 898ac798278f107e76847b1a48a892f36292202f Mon Sep 17 00:00:00 2001 From: Fabian Date: Thu, 5 Mar 2026 11:45:05 +0100 Subject: [PATCH 25/66] Fix broken test imports, stray docstring char, and incorrect test assertion from fixture refactor --- linopy/expressions.py | 1 - test/test_common.py | 1 - test/test_constraints.py | 7 +++++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index cf2c96c5..e29a39b9 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -735,7 +735,6 @@ def sub( ) -> GenericExpression | QuadraticExpression: """ Subtract others from expression. - > Parameters ---------- diff --git a/test/test_common.py b/test/test_common.py index efade41a..f1190024 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -10,7 +10,6 @@ import polars as pl import pytest import xarray as xr -from test_linear_expression import m, u, x # noqa: F401 from xarray import DataArray from xarray.testing.assertions import assert_equal diff --git a/test/test_constraints.py b/test/test_constraints.py index 1ef40d3c..9a467c8c 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -175,12 +175,15 @@ def test_constraint_rhs_lower_dim(rhs_factory: Any) -> None: pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), ], ) -def test_constraint_rhs_higher_dim_constant_raises(rhs_factory: Any) -> None: +def test_constraint_rhs_higher_dim_constant_warns( + rhs_factory: Any, caplog: Any +) -> None: m = Model() x = m.add_variables(coords=[range(5)], name="x") - with pytest.raises(ValueError, match="dimensions"): + with caplog.at_level("WARNING", logger="linopy.expressions"): m.add_constraints(x >= rhs_factory(m)) + assert "dimensions" in caplog.text def test_constraint_rhs_higher_dim_dataarray_reindexes() -> None: From c0ef60ff5661199c8cb3ad073ecf0531b7c02adb Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 17:39:25 +0100 Subject: [PATCH 26/66] Add arithmetic convention doc --- doc/arithmetic-convention.nblink | 3 + doc/coordinate-alignment.nblink | 3 - doc/index.rst | 2 +- examples/arithmetic-convention.ipynb | 460 +++++++++++++++++++++++++ examples/coordinate-alignment.ipynb | 498 --------------------------- 5 files changed, 464 insertions(+), 502 deletions(-) create mode 100644 doc/arithmetic-convention.nblink delete mode 100644 doc/coordinate-alignment.nblink create mode 100644 examples/arithmetic-convention.ipynb delete mode 100644 examples/coordinate-alignment.ipynb diff --git a/doc/arithmetic-convention.nblink b/doc/arithmetic-convention.nblink new file mode 100644 index 00000000..39928e92 --- /dev/null +++ b/doc/arithmetic-convention.nblink @@ -0,0 +1,3 @@ +{ + "path": "../examples/arithmetic-convention.ipynb" +} diff --git a/doc/coordinate-alignment.nblink b/doc/coordinate-alignment.nblink deleted file mode 100644 index ef588b91..00000000 --- a/doc/coordinate-alignment.nblink +++ /dev/null @@ -1,3 +0,0 @@ -{ - "path": "../examples/coordinate-alignment.ipynb" -} diff --git a/doc/index.rst b/doc/index.rst index fd7f9ed8..70b8b439 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -108,10 +108,10 @@ This package is published under MIT license. :caption: User Guide user-guide + arithmetic-convention creating-variables creating-expressions creating-constraints - coordinate-alignment sos-constraints piecewise-linear-constraints piecewise-linear-constraints-tutorial diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb new file mode 100644 index 00000000..4f6a240b --- /dev/null +++ b/examples/arithmetic-convention.ipynb @@ -0,0 +1,460 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c68183ce878b22db", + "metadata": {}, + "source": "# Arithmetic Convention\n\nlinopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n\nTwo rules apply to **all** arithmetic operations involving linopy objects (`+`, `-`, `*`, `/`):\n\n**Rule 1 — Exact label matching on shared dimensions**\n\nWhen two operands share a dimension, their coordinate labels on that dimension must match exactly (`join=\"exact\"`). A `ValueError` is raised on mismatch.\n\n**Rule 2 — Constants cannot introduce new dimensions**\n\nWhen combining an expression or variable with a *constant* (`DataArray`, numpy, pandas), the constant's dimensions must be a subset of the expression's dimensions. A constant cannot introduce dimensions the expression does not have — that would silently duplicate variables.\n\nExpression + Expression broadcasting over non-shared dimensions is freely allowed.\n\nInspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4251ba8271bff255", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.617834Z", + "start_time": "2026-03-09T16:33:16.648576Z" + } + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "import xarray as xr\n", + "\n", + "import linopy" + ] + }, + { + "cell_type": "markdown", + "id": "c9d84bb1c59f2690", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "57506c7b4bf9f4bf", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.680021Z", + "start_time": "2026-03-09T16:33:17.621212Z" + } + }, + "outputs": [], + "source": [ + "m = linopy.Model()\n", + "\n", + "time = pd.RangeIndex(5, name=\"time\")\n", + "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", + "scenarios = pd.Index([\"low\", \"high\"], name=\"scenario\")\n", + "\n", + "x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", + "y = m.add_variables(lower=0, coords=[time], name=\"y\")\n", + "gen = m.add_variables(lower=0, coords=[time, techs], name=\"gen\")\n", + "risk = m.add_variables(lower=0, coords=[techs, scenarios], name=\"risk\")" + ] + }, + { + "cell_type": "markdown", + "id": "61636799d26f4d99", + "metadata": {}, + "source": [ + "## What works by default" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1f7af87e662800c", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.691699Z", + "start_time": "2026-03-09T16:33:17.682845Z" + } + }, + "outputs": [], + "source": [ + "# Same coords — just works\n", + "x + y" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "985ade4e21e26271", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.706036Z", + "start_time": "2026-03-09T16:33:17.700165Z" + } + }, + "outputs": [], + "source": [ + "# Constant with matching coords\n", + "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", + "x * factor" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f6a99d864238dbb", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.723220Z", + "start_time": "2026-03-09T16:33:17.713960Z" + } + }, + "outputs": [], + "source": [ + "# Constant with fewer dims — broadcasts freely\n", + "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", + "gen * cost # cost broadcasts over time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d417bfa628cb280a", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.743959Z", + "start_time": "2026-03-09T16:33:17.733360Z" + } + }, + "outputs": [], + "source": [ + "# Expression + Expression with non-shared dims — broadcasts freely\n", + "gen + risk # (time, tech) + (tech, scenario) → (time, tech, scenario)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "400b4084ef94eb35", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.755530Z", + "start_time": "2026-03-09T16:33:17.750874Z" + } + }, + "outputs": [], + "source": [ + "# Scalar — always fine\n", + "x + 5" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2e4640266401ba61", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.773938Z", + "start_time": "2026-03-09T16:33:17.763057Z" + } + }, + "outputs": [], + "source": [ + "# Constraints — RHS with fewer dims broadcasts naturally\n", + "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", + "m.add_constraints(gen <= capacity, name=\"cap\") # capacity broadcasts over time" + ] + }, + { + "cell_type": "markdown", + "id": "c4e9c6dbcec7c0d9", + "metadata": {}, + "source": [ + "## What raises an error" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe1b95f337be4e9f", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.787118Z", + "start_time": "2026-03-09T16:33:17.781050Z" + } + }, + "outputs": [], + "source": [ + "# Mismatched coordinates on shared dimension\n", + "y_short = m.add_variables(\n", + " lower=0, coords=[pd.RangeIndex(3, name=\"time\")], name=\"y_short\"\n", + ")\n", + "\n", + "try:\n", + " x + y_short # time coords don't match\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a0bb6e7d4b175c5", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.797529Z", + "start_time": "2026-03-09T16:33:17.794047Z" + } + }, + "outputs": [], + "source": [ + "# Constant introduces new dimensions\n", + "profile = xr.DataArray(\n", + " np.ones((3, 5)), dims=[\"tech\", \"time\"], coords={\"tech\": techs, \"time\": time}\n", + ")\n", + "try:\n", + " (\n", + " x + profile\n", + " ) # would duplicate x[t] across techs. Reduce using mean, max or sth similar\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0f899f096773d96", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.810737Z", + "start_time": "2026-03-09T16:33:17.803654Z" + } + }, + "outputs": [], + "source": "# Multiplication with mismatched coordinates\npartial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\ntry:\n x * partial # time coords [0..4] vs [0,1,2]\nexcept ValueError as e:\n print(\"ValueError:\", e)" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aa03d3184a0e8b65", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.817090Z", + "start_time": "2026-03-09T16:33:17.813403Z" + } + }, + "outputs": [], + "source": [ + "# Constraint RHS with mismatched coordinates\n", + "partial_rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + "\n", + "try:\n", + " x <= partial_rhs\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" + ] + }, + { + "cell_type": "markdown", + "id": "64a6f983ce55547e", + "metadata": {}, + "source": [ + "## Escape hatches\n", + "\n", + "When coordinates don't match, linopy provides several ways to state your intent explicitly." + ] + }, + { + "cell_type": "markdown", + "id": "709150bc01fc8c3", + "metadata": {}, + "source": [ + "### 1. `.sel()` — Subset before operating\n", + "\n", + "The cleanest way to restrict to matching coordinates. No need for an inner join — explicitly select what you want." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b4f5bf23a8ee17d5", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.835634Z", + "start_time": "2026-03-09T16:33:17.826825Z" + } + }, + "outputs": [], + "source": [ + "x.sel(time=[0, 1, 2]) + y_short # select matching coords first" + ] + }, + { + "cell_type": "markdown", + "id": "c49f61e386de7350", + "metadata": {}, + "source": [ + "### 2. `| 0` — Inline outer join with fill\n", + "\n", + "When one operand covers a subset of coordinates, use the `|` operator to declare a fill value. This creates a lightweight `FillWrapper` that is consumed immediately — it never mutates or persists.\n", + "\n", + "The `|` operator only works on linopy types (`Variable`, `LinearExpression`, `QuadraticExpression`). For external types, use `.reindex()` before operating." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b27db4d757db423", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T16:33:17.932308Z", + "start_time": "2026-03-09T16:33:17.838621Z" + } + }, + "outputs": [], + "source": "x + (y_short | 0) # fill missing time coords of y_short with 0" + }, + { + "cell_type": "markdown", + "id": "f12b0cb6d0e31651", + "metadata": {}, + "source": [ + "### 3. Named methods with `join=`\n", + "\n", + "All arithmetic operations have named-method equivalents that accept a `join` parameter:\n", + "\n", + "| `join` | Coordinates kept | Fill |\n", + "|--------|-----------------|------|\n", + "| `\"exact\"` | Must match | `ValueError` if different |\n", + "| `\"inner\"` | Intersection | — |\n", + "| `\"outer\"` | Union | Zero (arithmetic) / NaN (constraints) |\n", + "| `\"left\"` | Left operand's | Zero / NaN for missing right |\n", + "| `\"right\"` | Right operand's | Zero for missing left |\n", + "| `\"override\"` | Left operand's | Positional alignment |" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78c967671819ef0c", + "metadata": {}, + "outputs": [], + "source": [ + "m2 = linopy.Model()\n", + "\n", + "i_a = pd.Index([0, 1, 2], name=\"i\")\n", + "i_b = pd.Index([1, 2, 3], name=\"i\")\n", + "\n", + "a = m2.add_variables(coords=[i_a], name=\"a\")\n", + "b = m2.add_variables(coords=[i_b], name=\"b\")\n", + "\n", + "print(\"inner:\", list(a.add(b, join=\"inner\").coords[\"i\"].values)) # [1, 2]\n", + "print(\"outer:\", list(a.add(b, join=\"outer\").coords[\"i\"].values)) # [0, 1, 2, 3]\n", + "print(\"left: \", list(a.add(b, join=\"left\").coords[\"i\"].values)) # [0, 1, 2]\n", + "print(\"right:\", list(a.add(b, join=\"right\").coords[\"i\"].values)) # [1, 2, 3]" + ] + }, + { + "cell_type": "markdown", + "id": "424610ceccde798a", + "metadata": {}, + "source": [ + "### 4. `linopy.align()` — Explicit pre-alignment\n", + "\n", + "For complex multi-operand alignment:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23f414e973e33c34", + "metadata": {}, + "outputs": [], + "source": "a_aligned, b_aligned = linopy.align(a, b, join=\"outer\", fill_value=0)\na_aligned + b_aligned" + }, + { + "cell_type": "markdown", + "id": "e64caf260c82ea6d", + "metadata": {}, + "source": [ + "## Positional alignment\n", + "\n", + "When two arrays have the same shape but different coordinate labels, use `.assign_coords()` to relabel one operand so coordinates match explicitly:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9a513a6be9e5925e", + "metadata": {}, + "outputs": [], + "source": [ + "c = m2.add_variables(coords=[[\"x\", \"y\", \"z\"]], name=\"c\")\n", + "d = m2.add_variables(coords=[[\"p\", \"q\", \"r\"]], name=\"d\")\n", + "\n", + "# Relabel d's coordinates to match c, then add\n", + "c + d.assign_coords(dim_0=c.coords[\"dim_0\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "262eaf85fa44e152", + "metadata": {}, + "outputs": [], + "source": [ + "# Or use join=\"override\" for positional matching\n", + "c.add(d, join=\"override\")" + ] + }, + { + "cell_type": "markdown", + "id": "cd0ef5ca04e57be", + "metadata": {}, + "source": [ + "## Working with pandas\n", + "\n", + "Under the strict convention, pandas objects must have **named indices** to avoid dimension name mismatches. A `pd.Series` without a named index becomes `dim_0` and will fail the exact join against a named variable dimension.\n", + "\n", + "```python\n", + "# Bad — index name is None, becomes \"dim_0\"\n", + "cost = pd.Series([10, 20], index=[\"wind\", \"solar\"])\n", + "\n", + "# Good — explicit dimension name\n", + "cost = pd.Series([10, 20], index=pd.Index([\"wind\", \"solar\"], name=\"tech\"))\n", + "```\n", + "\n", + "Consider using `force_dim_names=True` on the model to catch unnamed dimension issues at variable creation time." + ] + }, + { + "cell_type": "markdown", + "id": "f0c3e862b0430c11", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "| Situation | Behavior | How to handle |\n", + "|---|---|---|\n", + "| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n", + "| Non-shared dims, expr + expr | ✓ Broadcasts | `gen(time,tech) + risk(tech,scenario)` |\n", + "| Constant with subset dims | ✓ Broadcasts | `cost(tech) * gen(tech,time)` |\n", + "| Constant introduces new dims | ✗ Raises | Restructure, or multiply if meaningful |\n", + "| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `x + (y \\| 0)` |\n", + "| Pandas without named index | ✗ Raises on dim mismatch | Name the index |" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/coordinate-alignment.ipynb b/examples/coordinate-alignment.ipynb deleted file mode 100644 index e1309e37..00000000 --- a/examples/coordinate-alignment.ipynb +++ /dev/null @@ -1,498 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Coordinate Alignment in linopy\n", - "\n", - "linopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n", - "\n", - "| Operation | Shared-dim alignment | Extra dims on constant/RHS |\n", - "|-----------|---------------------|---------------------------|\n", - "| `+`, `-` | `\"exact\"` — must match | **Forbidden** |\n", - "| `*`, `/` | `\"inner\"` — intersection | Expands the expression |\n", - "| `<=`, `>=`, `==` | `\"exact\"` — must match | **Forbidden** |\n", - "\n", - "**Why?** Addition and constraint RHS only change constant terms — expanding into new dimensions would duplicate the same variable. Multiplication changes coefficients, so expanding is meaningful. The rules are consistent: `a*x + b <= 0` and `a*x <= -b` always behave identically.\n", - "\n", - "When coordinates don't match, use the named methods (`.add()`, `.sub()`, `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`) with an explicit `join=` parameter.\n", - "\n", - "Inspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." - ] - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:53.150316Z", - "iopub.status.busy": "2026-02-20T12:35:53.150100Z", - "iopub.status.idle": "2026-02-20T12:35:54.105967Z", - "shell.execute_reply": "2026-02-20T12:35:54.105432Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.193551Z", - "start_time": "2026-02-20T12:36:56.190913Z" - } - }, - "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import xarray as xr\n", - "\n", - "import linopy" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## What works by default" - ] - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.110532Z", - "iopub.status.busy": "2026-02-20T12:35:54.109029Z", - "iopub.status.idle": "2026-02-20T12:35:54.164335Z", - "shell.execute_reply": "2026-02-20T12:35:54.163789Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.215580Z", - "start_time": "2026-02-20T12:36:56.207497Z" - } - }, - "source": [ - "m = linopy.Model()\n", - "\n", - "time = pd.RangeIndex(5, name=\"time\")\n", - "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", - "\n", - "x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", - "y = m.add_variables(lower=0, coords=[time], name=\"y\")\n", - "gen = m.add_variables(lower=0, coords=[time, techs], name=\"gen\")" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.166957Z", - "iopub.status.busy": "2026-02-20T12:35:54.166600Z", - "iopub.status.idle": "2026-02-20T12:35:54.185234Z", - "shell.execute_reply": "2026-02-20T12:35:54.184778Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.230513Z", - "start_time": "2026-02-20T12:36:56.222101Z" - } - }, - "source": [ - "# Addition/subtraction — matching coordinates\n", - "x + y" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.187479Z", - "iopub.status.busy": "2026-02-20T12:35:54.187284Z", - "iopub.status.idle": "2026-02-20T12:35:54.197488Z", - "shell.execute_reply": "2026-02-20T12:35:54.197090Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.241644Z", - "start_time": "2026-02-20T12:36:56.235473Z" - } - }, - "source": [ - "# Multiplication — matching coordinates\n", - "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", - "x * factor" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.199528Z", - "iopub.status.busy": "2026-02-20T12:35:54.199323Z", - "iopub.status.idle": "2026-02-20T12:35:54.210352Z", - "shell.execute_reply": "2026-02-20T12:35:54.209978Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.253971Z", - "start_time": "2026-02-20T12:36:56.246880Z" - } - }, - "source": [ - "# Multiplication — partial overlap gives intersection\n", - "partial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", - "x * partial # result: time 0, 1, 2 only" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.212115Z", - "iopub.status.busy": "2026-02-20T12:35:54.211953Z", - "iopub.status.idle": "2026-02-20T12:35:54.223732Z", - "shell.execute_reply": "2026-02-20T12:35:54.223319Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.267382Z", - "start_time": "2026-02-20T12:36:56.259835Z" - } - }, - "source": [ - "# Multiplication — different dims broadcast (expands the expression)\n", - "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", - "x * cost # result: (time, tech)" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.225717Z", - "iopub.status.busy": "2026-02-20T12:35:54.225519Z", - "iopub.status.idle": "2026-02-20T12:35:54.247553Z", - "shell.execute_reply": "2026-02-20T12:35:54.247125Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.305476Z", - "start_time": "2026-02-20T12:36:56.292Z" - } - }, - "source": [ - "# Constraints — RHS with fewer dims broadcasts naturally\n", - "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", - "m.add_constraints(gen <= capacity, name=\"cap\") # capacity broadcasts over time" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## What raises an error" - ] - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.249529Z", - "iopub.status.busy": "2026-02-20T12:35:54.249355Z", - "iopub.status.idle": "2026-02-20T12:35:54.260588Z", - "shell.execute_reply": "2026-02-20T12:35:54.259868Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.319773Z", - "start_time": "2026-02-20T12:36:56.312636Z" - } - }, - "source": [ - "# Addition with mismatched coordinates\n", - "y_short = m.add_variables(\n", - " lower=0, coords=[pd.RangeIndex(3, name=\"time\")], name=\"y_short\"\n", - ")\n", - "\n", - "try:\n", - " x + y_short # time coords don't match\n", - "except ValueError as e:\n", - " print(\"ValueError:\", e)" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.262548Z", - "iopub.status.busy": "2026-02-20T12:35:54.262376Z", - "iopub.status.idle": "2026-02-20T12:35:54.268753Z", - "shell.execute_reply": "2026-02-20T12:35:54.268391Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.331386Z", - "start_time": "2026-02-20T12:36:56.326247Z" - } - }, - "source": [ - "# Addition with extra dimensions on the constant\n", - "profile = xr.DataArray(\n", - " np.ones((3, 5)), dims=[\"tech\", \"time\"], coords={\"tech\": techs, \"time\": time}\n", - ")\n", - "try:\n", - " x + profile # would duplicate x[t] across techs\n", - "except ValueError as e:\n", - " print(\"ValueError:\", e)" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.270585Z", - "iopub.status.busy": "2026-02-20T12:35:54.270420Z", - "iopub.status.idle": "2026-02-20T12:35:54.277993Z", - "shell.execute_reply": "2026-02-20T12:35:54.276363Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.350503Z", - "start_time": "2026-02-20T12:36:56.343806Z" - } - }, - "source": [ - "# Multiplication with zero overlap\n", - "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", - "\n", - "try:\n", - " z * factor # z has time 5-9, factor has time 0-4 — no intersection\n", - "except ValueError as e:\n", - " print(\"ValueError:\", e)" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.281858Z", - "iopub.status.busy": "2026-02-20T12:35:54.281316Z", - "iopub.status.idle": "2026-02-20T12:35:54.287843Z", - "shell.execute_reply": "2026-02-20T12:35:54.287269Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.361211Z", - "start_time": "2026-02-20T12:36:56.356813Z" - } - }, - "source": [ - "# Constraint RHS with mismatched coordinates\n", - "partial_rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", - "\n", - "try:\n", - " x <= partial_rhs\n", - "except ValueError as e:\n", - " print(\"ValueError:\", e)" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.290439Z", - "iopub.status.busy": "2026-02-20T12:35:54.290235Z", - "iopub.status.idle": "2026-02-20T12:35:54.302535Z", - "shell.execute_reply": "2026-02-20T12:35:54.302145Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.385743Z", - "start_time": "2026-02-20T12:36:56.380702Z" - } - }, - "source": [ - "# Constraint RHS with extra dimensions\n", - "w = m.add_variables(lower=0, coords=[techs], name=\"w\") # dims: (tech,)\n", - "rhs_2d = xr.DataArray(\n", - " np.ones((5, 3)), dims=[\"time\", \"tech\"], coords={\"time\": time, \"tech\": techs}\n", - ")\n", - "try:\n", - " w <= rhs_2d # would create redundant constraints on w[tech]\n", - "except ValueError as e:\n", - " print(\"ValueError:\", e)" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Positional alignment\n", - "\n", - "A common pattern: two arrays with the same shape but different (or no) coordinate labels. The cleanest fix is to relabel one operand with `.assign_coords()` so that coordinates match explicitly:" - ] - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.304505Z", - "iopub.status.busy": "2026-02-20T12:35:54.304317Z", - "iopub.status.idle": "2026-02-20T12:35:54.322551Z", - "shell.execute_reply": "2026-02-20T12:35:54.322153Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:37:36.671817Z", - "start_time": "2026-02-20T12:37:36.662325Z" - } - }, - "source": [ - "m2 = linopy.Model()\n", - "\n", - "a = m2.add_variables(coords=[[\"x\", \"y\", \"z\"]], name=\"a\")\n", - "b = m2.add_variables(coords=[[\"p\", \"q\", \"r\"]], name=\"b\")\n", - "\n", - "# Relabel b's coordinates to match a, then add normally\n", - "a + b.assign_coords(dim_0=a.coords[\"dim_0\"])" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.324642Z", - "iopub.status.busy": "2026-02-20T12:35:54.324465Z", - "iopub.status.idle": "2026-02-20T12:35:54.332579Z", - "shell.execute_reply": "2026-02-20T12:35:54.332088Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.424015Z", - "start_time": "2026-02-20T12:36:56.418311Z" - } - }, - "source": [ - "# Same for constraints\n", - "rhs = xr.DataArray([1.0, 2.0, 3.0], dims=[\"dim_0\"], coords={\"dim_0\": [\"p\", \"q\", \"r\"]})\n", - "a <= rhs.assign_coords(dim_0=a.coords[\"dim_0\"])" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.336196Z", - "iopub.status.busy": "2026-02-20T12:35:54.335947Z", - "iopub.status.idle": "2026-02-20T12:35:54.360683Z", - "shell.execute_reply": "2026-02-20T12:35:54.359622Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.441516Z", - "start_time": "2026-02-20T12:36:56.432774Z" - } - }, - "source": [ - "# Shorthand: join=\"override\" does the same (positional match, keeps left labels)\n", - "a.add(b, join=\"override\")" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Other join modes\n", - "\n", - "All named methods (`.add()`, `.sub()`, `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`) accept a `join=` parameter:\n", - "\n", - "| `join` | Coordinates kept | Fill |\n", - "|--------|-----------------|------|\n", - "| `\"exact\"` | Must match | `ValueError` if different |\n", - "| `\"inner\"` | Intersection | — |\n", - "| `\"outer\"` | Union | Zero (arithmetic) / NaN (constraints) |\n", - "| `\"left\"` | Left operand's | Zero / NaN for missing right |\n", - "| `\"right\"` | Right operand's | Zero for missing left |\n", - "| `\"override\"` | Left operand's | Positional alignment |" - ] - }, - { - "cell_type": "code", - "metadata": { - "execution": { - "iopub.execute_input": "2026-02-20T12:35:54.363885Z", - "iopub.status.busy": "2026-02-20T12:35:54.363642Z", - "iopub.status.idle": "2026-02-20T12:35:54.404550Z", - "shell.execute_reply": "2026-02-20T12:35:54.403860Z" - }, - "ExecuteTime": { - "end_time": "2026-02-20T12:36:56.472328Z", - "start_time": "2026-02-20T12:36:56.446352Z" - } - }, - "source": [ - "i_a = pd.Index([0, 1, 2], name=\"i\")\n", - "i_b = pd.Index([1, 2, 3], name=\"i\")\n", - "\n", - "a = m2.add_variables(coords=[i_a], name=\"a2\")\n", - "b = m2.add_variables(coords=[i_b], name=\"b2\")\n", - "\n", - "print(\"inner:\", list(a.add(b, join=\"inner\").coords[\"i\"].values)) # [1, 2]\n", - "print(\"outer:\", list(a.add(b, join=\"outer\").coords[\"i\"].values)) # [0, 1, 2, 3]\n", - "print(\"left: \", list(a.add(b, join=\"left\").coords[\"i\"].values)) # [0, 1, 2]\n", - "print(\"right:\", list(a.add(b, join=\"right\").coords[\"i\"].values)) # [1, 2, 3]" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Migrating from previous versions\n", - "\n", - "Previous versions used a shape-dependent heuristic that caused silent bugs (positional alignment on same-shape operands, non-associative addition, broken multiplication). The new behavior:\n", - "\n", - "| Condition | Old | New |\n", - "|-----------|-----|-----|\n", - "| Same shape, different coords, `+`/`-` | Positional match (silent bug) | `ValueError` |\n", - "| Different shape, `+`/`-` | `\"outer\"` or `\"left\"` (implicit) | `ValueError` |\n", - "| Mismatched coords, `*`/`/` | Crash or garbage | Intersection (or error if empty) |\n", - "| Constraint with mismatched RHS | `\"override\"` or `\"left\"` | `ValueError` |\n", - "\n", - "To migrate: replace `x + y` with `x.add(y, join=\"outer\")` (or whichever join matches your intent)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 9be7b963a5a651b0cf006efee873cd3b1e8e0737 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 19:09:15 +0100 Subject: [PATCH 27/66] Enforce strict arithmetic convention: exact join + no extra dims from constants Implements the new arithmetic convention for all operations (+, -, *, /): - Rule 1: Exact label matching on shared dimensions (join="exact") - Rule 2: Constants cannot introduce new dimensions not in the expression Adds escape hatches: FillWrapper via `expr | 0`, named methods with explicit join= parameter, and linopy.align() with configurable join. Changes FILL_VALUE["const"] from NaN to 0 for cleaner semantics. Co-Authored-By: Claude Opus 4.6 --- linopy/__init__.py | 4 +- linopy/common.py | 51 ++++++++++++++++++- linopy/expressions.py | 75 ++++++++++++++++++++-------- linopy/variables.py | 31 +++++++----- test/test_linear_expression.py | 89 ++++++++++++++++++++-------------- test/test_typing.py | 31 +++++++++--- 6 files changed, 203 insertions(+), 78 deletions(-) diff --git a/linopy/__init__.py b/linopy/__init__.py index 7f5acd46..c6dca749 100644 --- a/linopy/__init__.py +++ b/linopy/__init__.py @@ -12,7 +12,7 @@ # Note: For intercepting multiplications between xarray dataarrays, Variables and Expressions # we need to extend their __mul__ functions with a quick special case import linopy.monkey_patch_xarray # noqa: F401 -from linopy.common import align +from linopy.common import FillWrapper, align, as_dataarray from linopy.config import options from linopy.constants import EQUAL, GREATER_EQUAL, LESS_EQUAL from linopy.constraints import Constraint, Constraints @@ -38,8 +38,10 @@ "Variable", "Variables", "available_solvers", + "as_dataarray", "breakpoints", "align", + "FillWrapper", "merge", "options", "read_netcdf", diff --git a/linopy/common.py b/linopy/common.py index 746459b4..4bc59d6b 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -45,6 +45,55 @@ from linopy.variables import Variable +class FillWrapper: + """ + Wraps a linopy object with a fill value for use in arithmetic. + + Created via the ``|`` operator on linopy types: ``expr | 0`` means + "fill missing coordinates of *expr* with 0 during alignment". + + The wrapper is consumed immediately by the arithmetic dunder methods + and never stored or propagated. + """ + + __slots__ = ("wrapped", "fill_value") + + def __init__(self, wrapped: Any, fill_value: float) -> None: + self.wrapped = wrapped + self.fill_value = fill_value + + def __repr__(self) -> str: + return f"FillWrapper({self.wrapped!r}, fill_value={self.fill_value})" + + def __neg__(self) -> FillWrapper: + return FillWrapper(wrapped=-self.wrapped, fill_value=self.fill_value) + + +def check_constant_dim_subset( + expr_dims: tuple[str, ...] | set[str], + constant_dims: tuple[str, ...] | set[str], +) -> None: + """ + Validate that a constant's dims are a subset of the expression's dims. + + A constant (DataArray, numpy, pandas) cannot introduce dimensions that + the expression does not have — that would silently duplicate variables. + + Raises + ------ + ValueError + If the constant has dimensions not present in the expression. + """ + extra = set(constant_dims) - set(expr_dims) + if extra: + raise ValueError( + f"Constant has dimensions {extra} not present in the " + f"expression. Arithmetic with constants cannot introduce new " + f"dimensions — use multiplication to expand, or select/reindex " + f"the constant to match the expression's dimensions." + ) + + def set_int_index(series: pd.Series) -> pd.Series: """ Convert string index to int index. @@ -1225,7 +1274,7 @@ def check_common_keys_values(list_of_dicts: list[dict[str, Any]]) -> bool: def align( *objects: LinearExpression | QuadraticExpression | Variable | T_Alignable, - join: JoinOptions = "inner", + join: JoinOptions = "exact", copy: bool = True, indexes: Any = None, exclude: str | Iterable[Hashable] = frozenset(), diff --git a/linopy/expressions.py b/linopy/expressions.py index 3a150d0d..f80a04a2 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -45,9 +45,11 @@ from linopy import constraints, variables from linopy.common import ( EmptyDeprecationWrapper, + FillWrapper, LocIndexer, as_dataarray, assign_multiindex_safe, + check_constant_dim_subset, check_has_nulls, check_has_nulls_polars, fill_missing_coords, @@ -94,7 +96,7 @@ from linopy.variables import ScalarVariable, Variable -FILL_VALUE = {"vars": -1, "coeffs": np.nan, "const": np.nan} +FILL_VALUE = {"vars": -1, "coeffs": np.nan, "const": 0} def exprwrap( @@ -128,6 +130,26 @@ def _expr_unwrap( return maybe_expr +def _resolve_fill_wrapper(self_obj: Any, other: Any) -> tuple[Any, str | None]: + """ + Unwrap a FillWrapper operand by reindexing to match self_obj. + + Returns the resolved other and the join mode to use (None = default exact). + """ + if not isinstance(other, FillWrapper): + return other, None + fill_value = other.fill_value + other = other.wrapped + # Reindex other to self's coords on shared dims, filling with fill_value + shared_dims = set(self_obj.dims) & set(other.dims) + if shared_dims: + target_coords = { + dim: self_obj.coords[dim] for dim in shared_dims if dim in self_obj.coords + } + other = other.reindex(target_coords, fill_value=fill_value) + return other, None + + logger = logging.getLogger(__name__) @@ -495,6 +517,16 @@ def __neg__(self: GenericExpression) -> GenericExpression: """ return self.assign_multiindex_safe(coeffs=-self.coeffs, const=-self.const) + def __or__(self, fill_value: int | float) -> FillWrapper: + """ + Create a FillWrapper for explicit fill during alignment. + + Usage: ``expr | 0`` means "fill missing coords of expr with 0". + """ + if not isinstance(fill_value, int | float): + return NotImplemented + return FillWrapper(wrapped=self, fill_value=fill_value) + def _multiply_by_linear_expression( self, other: LinearExpression | ScalarLinearExpression ) -> LinearExpression | QuadraticExpression: @@ -588,14 +620,7 @@ def _add_constant( if np.isscalar(other) and join is None: return self.assign(const=self.const + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - extra_dims = set(da.dims) - set(self.coord_dims) - if extra_dims: - raise ValueError( - f"Constant has dimensions {extra_dims} not present in the " - f"expression. Addition/subtraction cannot introduce new " - f"dimensions — use multiplication to expand, or select/reindex " - f"the constant to match the expression's dimensions." - ) + check_constant_dim_subset(self.coord_dims, da.dims) self_const, da, needs_data_reindex = self._align_constant( da, fill_value=0, join=join, default_join="exact" ) @@ -616,14 +641,10 @@ def _apply_constant_op( join: str | None = None, ) -> GenericExpression: factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) + check_constant_dim_subset(self.coord_dims, factor.dims) self_const, factor, needs_data_reindex = self._align_constant( - factor, fill_value=fill_value, join=join, default_join="inner" + factor, fill_value=fill_value, join=join, default_join="exact" ) - if self_const.size == 0 and self.const.size > 0: - raise ValueError( - "Multiplication/division resulted in an empty expression because " - "the operands have no overlapping coordinates (inner join)." - ) if needs_data_reindex: data = self.data.reindex_like(self_const, fill_value=self._fill_value) return self.__class__( @@ -1585,6 +1606,8 @@ def __add__( Note: If other is a numpy array or pandas object without axes names, dimension names of self will be filled in other """ + other, _join = _resolve_fill_wrapper(self, other) + if isinstance(other, QuadraticExpression): return other.__add__(self) @@ -1598,6 +1621,7 @@ def __add__( return NotImplemented def __radd__(self, other: ConstantLike) -> LinearExpression: + other, _join = _resolve_fill_wrapper(self, other) try: return self + other except TypeError: @@ -1620,12 +1644,14 @@ def __sub__( | LinearExpression | QuadraticExpression, ) -> LinearExpression | QuadraticExpression: + # FillWrapper.__neg__ is defined, so -other works for FillWrapper try: return self.__add__(-other) except TypeError: return NotImplemented def __rsub__(self, other: ConstantLike | Variable) -> LinearExpression: + other, _join = _resolve_fill_wrapper(self, other) try: return (self * -1) + other except TypeError: @@ -1644,6 +1670,8 @@ def __mul__( """ Multiply the expr by a factor. """ + other, _join = _resolve_fill_wrapper(self, other) + if isinstance(other, QuadraticExpression): return other.__rmul__(self) @@ -1670,6 +1698,7 @@ def __rmul__(self, other: ConstantLike) -> LinearExpression: """ Right-multiply the expr by a factor. """ + other, _join = _resolve_fill_wrapper(self, other) try: return self * other except TypeError: @@ -2069,7 +2098,7 @@ class QuadraticExpression(BaseExpression): __array_priority__ = 10000 __pandas_priority__ = 10000 - _fill_value = {"vars": -1, "coeffs": np.nan, "const": np.nan} + _fill_value = {"vars": -1, "coeffs": np.nan, "const": 0} def __init__(self, data: Dataset | None, model: Model) -> None: super().__init__(data, model) @@ -2101,6 +2130,8 @@ def __mul__(self, other: SideLike) -> QuadraticExpression: """ Multiply the expr by a factor. """ + other, _join = _resolve_fill_wrapper(self, other) + if isinstance(other, SUPPORTED_EXPRESSION_TYPES): raise TypeError( "unsupported operand type(s) for *: " @@ -2113,6 +2144,7 @@ def __mul__(self, other: SideLike) -> QuadraticExpression: return NotImplemented def __rmul__(self, other: SideLike) -> QuadraticExpression: + other, _join = _resolve_fill_wrapper(self, other) return self * other def __add__(self, other: SideLike) -> QuadraticExpression: @@ -2122,6 +2154,8 @@ def __add__(self, other: SideLike) -> QuadraticExpression: Note: If other is a numpy array or pandas object without axes names, dimension names of self will be filled in other """ + other, _join = _resolve_fill_wrapper(self, other) + try: if isinstance(other, SUPPORTED_CONSTANT_TYPES): return self._add_constant(other) @@ -2139,6 +2173,7 @@ def __radd__(self, other: ConstantLike) -> QuadraticExpression: """ Add others to expression. """ + other, _join = _resolve_fill_wrapper(self, other) return self.__add__(other) def __sub__(self, other: SideLike) -> QuadraticExpression: @@ -2148,6 +2183,7 @@ def __sub__(self, other: SideLike) -> QuadraticExpression: Note: If other is a numpy array or pandas object without axes names, dimension names of self will be filled in other """ + # FillWrapper.__neg__ is defined, so -other works for FillWrapper try: return self.__add__(-other) except TypeError: @@ -2157,6 +2193,7 @@ def __rsub__(self, other: SideLike) -> QuadraticExpression: """ Subtract expression from others. """ + other, _join = _resolve_fill_wrapper(self, other) try: return (self * -1) + other except TypeError: @@ -2397,12 +2434,8 @@ def merge( if join is not None: kwargs["join"] = join - elif dim == TERM_DIM: - kwargs["join"] = "exact" - elif dim == FACTOR_DIM: - kwargs["join"] = "inner" else: - kwargs["join"] = "outer" + kwargs["join"] = "exact" try: if dim == TERM_DIM: diff --git a/linopy/variables.py b/linopy/variables.py index 0ca7d89a..36563cca 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -64,6 +64,7 @@ ) if TYPE_CHECKING: + from linopy.common import FillWrapper from linopy.constraints import AnonymousScalarConstraint, Constraint from linopy.expressions import ( GenericExpression, @@ -387,6 +388,18 @@ def __neg__(self) -> LinearExpression: """ return self.to_linexpr(-1) + def __or__(self, fill_value: int | float) -> FillWrapper: + """ + Create a FillWrapper for explicit fill during alignment. + + Usage: ``x | 0`` means "fill missing coords of x with 0". + """ + if not isinstance(fill_value, int | float): + return NotImplemented + from linopy.common import FillWrapper + + return FillWrapper(wrapped=self, fill_value=fill_value) + @overload def __mul__(self, other: ConstantLike) -> LinearExpression: ... @@ -398,11 +411,7 @@ def __mul__(self, other: SideLike) -> ExpressionLike: Multiply variables with a coefficient, variable, or expression. """ try: - if isinstance(other, Variable | ScalarVariable): - return self.to_linexpr() * other - if isinstance(other, expressions.LinearExpression): - return self.to_linexpr() * other - return self.to_linexpr()._multiply_by_constant(other) + return self.to_linexpr() * other except TypeError: return NotImplemented @@ -450,13 +459,7 @@ def __div__( """ Divide variables with a coefficient. """ - if isinstance(other, expressions.LinearExpression | Variable): - raise TypeError( - "unsupported operand type(s) for /: " - f"{type(self)} and {type(other)}. " - "Non-linear expressions are not yet supported." - ) - return self.to_linexpr()._divide_by_constant(other) + return self.to_linexpr() / other def __truediv__( self, coefficient: ConstantLike | LinearExpression | Variable @@ -1231,6 +1234,10 @@ def equals(self, other: Variable) -> bool: shift = varwrap(Dataset.shift, fill_value=_fill_value) + reindex = varwrap(Dataset.reindex, fill_value=_fill_value) + + reindex_like = varwrap(Dataset.reindex_like, fill_value=_fill_value) + swap_dims = varwrap(Dataset.swap_dims) set_index = varwrap(Dataset.set_index) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 2ced61a0..2001aead 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -220,8 +220,8 @@ def test_linear_expression_with_multiplication(x: Variable) -> None: expr = np.array(1) * x assert isinstance(expr, LinearExpression) - expr = xr.DataArray(np.array([[1, 2], [2, 3]])) * x - assert isinstance(expr, LinearExpression) + with pytest.raises(ValueError, match="not present"): + xr.DataArray(np.array([[1, 2], [2, 3]])) * x expr = pd.Series([1, 2], index=pd.RangeIndex(2, name="dim_0")) * x assert isinstance(expr, LinearExpression) @@ -315,9 +315,8 @@ def test_linear_expression_with_constant_multiplication( assert isinstance(obs, LinearExpression) assert (obs.const == 10).all() - obs = expr * pd.Series([1, 2, 3], index=pd.RangeIndex(3, name="new_dim")) - assert isinstance(obs, LinearExpression) - assert obs.shape == (2, 3, 1) + with pytest.raises(ValueError, match="not present"): + expr * pd.Series([1, 2, 3], index=pd.RangeIndex(3, name="new_dim")) def test_linear_expression_multi_indexed(u: Variable) -> None: @@ -585,14 +584,14 @@ def test_linear_expression_multiplication_invalid( class TestExactAlignmentDefault: """ - Test the new alignment convention: exact for +/-, inner for *//. + Test the alignment convention: exact for all operations (+, -, *, /). v has dim_2=[0..19] (20 entries). subset has dim_2=[1, 3] (2 entries, subset of v's coords). superset has dim_2=[0..24] (25 entries, superset of v's coords). - Each test shows the operation, verifies the new behavior (raises or - intersection), then shows the explicit join= that recovers the old result. + Each test shows the operation, verifies the exact default (raises), + then shows the explicit join= that recovers the desired result. """ @pytest.fixture @@ -681,8 +680,7 @@ def test_disjoint_mul(self, v: Variable) -> None: disjoint = xr.DataArray( [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} ) - # inner join: no intersection → error - with pytest.raises(ValueError, match="no overlapping coordinates"): + with pytest.raises(ValueError, match="exact"): v * disjoint # explicit join="left": 20 entries, all zeros @@ -694,7 +692,7 @@ def test_disjoint_div(self, v: Variable) -> None: disjoint = xr.DataArray( [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} ) - with pytest.raises(ValueError, match="no overlapping coordinates"): + with pytest.raises(ValueError, match="exact"): v / disjoint # --- Multiplication / division with subset constant --- @@ -702,8 +700,11 @@ def test_disjoint_div(self, v: Variable) -> None: def test_var_mul_subset( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray ) -> None: - # inner join: 2 entries (intersection) - result = v * subset + with pytest.raises(ValueError, match="exact"): + v * subset + + # explicit join="inner": 2 entries (intersection) + result = v.mul(subset, join="inner") assert result.sizes["dim_2"] == 2 assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(10.0) assert result.coeffs.squeeze().sel(dim_2=3).item() == pytest.approx(30.0) @@ -714,19 +715,27 @@ def test_var_mul_subset( np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) def test_expr_mul_subset(self, v: Variable, subset: xr.DataArray) -> None: - result = (1 * v) * subset + with pytest.raises(ValueError, match="exact"): + (1 * v) * subset + + result = (1 * v).mul(subset, join="inner") assert result.sizes["dim_2"] == 2 assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(10.0) def test_var_mul_superset(self, v: Variable, superset: xr.DataArray) -> None: - # inner join: intersection = v's 20 coords - result = v * superset + with pytest.raises(ValueError, match="exact"): + v * superset + + result = v.mul(superset, join="inner") assert result.sizes["dim_2"] == 20 assert not np.isnan(result.coeffs.values).any() def test_var_div_subset(self, v: Variable, subset: xr.DataArray) -> None: - # inner join: 2 entries - result = v / subset + with pytest.raises(ValueError, match="exact"): + v / subset + + # explicit join="inner": 2 entries + result = v.div(subset, join="inner") assert result.sizes["dim_2"] == 2 assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) assert result.coeffs.squeeze().sel(dim_2=3).item() == pytest.approx(1.0 / 30) @@ -800,7 +809,10 @@ def test_add_commutativity_matching( assert_linequal(v + matching, matching + v) def test_mul_commutativity(self, v: Variable, subset: xr.DataArray) -> None: - assert_linequal(v * subset, subset * v) + with pytest.raises(ValueError, match="exact"): + v * subset + with pytest.raises(ValueError, match="exact"): + subset * v # --- Explicit join modes --- @@ -842,8 +854,11 @@ def test_quadexpr_mul_subset( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray ) -> None: qexpr = v * v - # inner join: 2 entries - result = qexpr * subset + with pytest.raises(ValueError, match="exact"): + qexpr * subset + + # explicit join="inner": 2 entries + result = qexpr.mul(subset, join="inner") assert isinstance(result, QuadraticExpression) assert result.sizes["dim_2"] == 2 @@ -865,8 +880,11 @@ def test_multidim_subset_mul(self, m: Model) -> None: coords={"a": [1, 3], "b": [0, 4]}, ) - # inner join: 2x2 - result = w * subset_2d + with pytest.raises(ValueError, match="exact"): + w * subset_2d + + # explicit join="inner": 2x2 + result = w.mul(subset_2d, join="inner") assert result.sizes["a"] == 2 assert result.sizes["b"] == 2 @@ -906,11 +924,10 @@ def test_add_constant_extra_dims_raises(self, v: Variable) -> None: ) with pytest.raises(ValueError, match="not present in the expression"): v + da - with pytest.raises(ValueError, match="not present in the expression"): + with pytest.raises(ValueError, match="not present"): v - da - # multiplication still allows extra dims (broadcasts) - result = v * da - assert "extra" in result.dims + with pytest.raises(ValueError, match="not present"): + v * da def test_da_truediv_var_raises(self, v: Variable) -> None: da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) @@ -1003,7 +1020,7 @@ def test_linear_expression_isnull(v: Variable) -> None: expr = np.arange(20) * v filter = (expr.coeffs >= 10).any(TERM_DIM) expr = expr.where(filter) - assert expr.isnull().sum() == 10 + assert expr.isnull().sum() == 0 def test_linear_expression_flat(v: Variable) -> None: @@ -1049,7 +1066,7 @@ def test_linear_expression_where_with_const(v: Variable) -> None: expr = expr.where(filter) assert isinstance(expr, LinearExpression) assert expr.nterm == 1 - assert expr.const[:10].isnull().all() + assert (expr.const[:10] == 0).all() assert (expr.const[10:] == 10).all() expr = np.arange(20) * v + 10 @@ -1133,7 +1150,7 @@ def test_linear_expression_fillna(v: Variable) -> None: filled = filtered.fillna(10) assert isinstance(filled, LinearExpression) - assert filled.const.sum() == 200 + assert filled.const.sum() == 100 assert filled.coeffs.isnull().sum() == 10 @@ -2037,20 +2054,20 @@ def test_add_scalar(self, a: Variable) -> None: np.testing.assert_array_equal(result.const.values, [15, 15, 15]) assert list(result.coords["i"].values) == [0, 1, 2] - def test_quadratic_add_constant_join_inner(self, a: Variable, b: Variable) -> None: - quad = a.to_linexpr() * b.to_linexpr() + def test_quadratic_add_constant_join_inner(self, a: Variable, c: Variable) -> None: + quad = a.to_linexpr() * c.to_linexpr() const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.add(const, join="inner") assert list(result.data.indexes["i"]) == [1, 2] - def test_quadratic_add_expr_join_inner(self, a: Variable, b: Variable) -> None: - quad = a.to_linexpr() * b.to_linexpr() + def test_quadratic_add_expr_join_inner(self, a: Variable, c: Variable) -> None: + quad = a.to_linexpr() * c.to_linexpr() const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) result = quad.add(const, join="inner") assert list(result.data.indexes["i"]) == [0, 1] - def test_quadratic_mul_constant_join_inner(self, a: Variable, b: Variable) -> None: - quad = a.to_linexpr() * b.to_linexpr() + def test_quadratic_mul_constant_join_inner(self, a: Variable, c: Variable) -> None: + quad = a.to_linexpr() * c.to_linexpr() const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.mul(const, join="inner") assert list(result.data.indexes["i"]) == [1, 2] diff --git a/test/test_typing.py b/test/test_typing.py index 312f76c9..5b9fd322 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -1,3 +1,4 @@ +import pytest import xarray as xr import linopy @@ -6,21 +7,37 @@ def test_operations_with_data_arrays_are_typed_correctly() -> None: m = linopy.Model() - a: xr.DataArray = xr.DataArray([1, 2, 3]) s: xr.DataArray = xr.DataArray(5.0) v: linopy.Variable = m.add_variables(lower=0.0, name="v") e: linopy.LinearExpression = v * 1.0 q = v * v - _ = a * v - _ = v * a + _ = s * v + _ = v * s _ = v + s - _ = a * e - _ = e * a + _ = s * e + _ = e * s _ = e + s - _ = a * q - _ = q * a + _ = s * q + _ = q * s _ = q + s + + +def test_constant_with_extra_dims_raises() -> None: + m = linopy.Model() + + a: xr.DataArray = xr.DataArray([1, 2, 3]) + + v: linopy.Variable = m.add_variables(lower=0.0, name="v") + e: linopy.LinearExpression = v * 1.0 + q = v * v + + with pytest.raises(ValueError, match="not present"): + a * v + with pytest.raises(ValueError, match="not present"): + a * e + with pytest.raises(ValueError, match="not present"): + a * q From d491d2dd7cf639f30fa69d2499ce4f210c63df7c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 19:24:58 +0100 Subject: [PATCH 28/66] Refactor linopy.align to delegate to each type's reindex method Instead of unwrapping to Datasets, aligning, and manually reconstructing linopy types, align now calls each object's own .reindex() which handles type-specific fill values (vars=-1, coeffs=NaN, const=0) automatically. Co-Authored-By: Claude Opus 4.6 --- linopy/common.py | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/linopy/common.py b/linopy/common.py index 4bc59d6b..e22ab32b 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -10,7 +10,7 @@ import operator import os from collections.abc import Callable, Generator, Hashable, Iterable, Sequence -from functools import partial, reduce, wraps +from functools import reduce, wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Generic, TypeVar, overload from warnings import warn @@ -1337,36 +1337,35 @@ def align( from linopy.expressions import LinearExpression, QuadraticExpression from linopy.variables import Variable - finisher: list[partial[Any] | Callable[[Any], Any]] = [] + # Extract underlying Datasets for index computation. das: list[Any] = [] for obj in objects: - if isinstance(obj, LinearExpression | QuadraticExpression): - finisher.append(partial(obj.__class__, model=obj.model)) - das.append(obj.data) - elif isinstance(obj, Variable): - finisher.append( - partial( - obj.__class__, - model=obj.model, - name=obj.data.attrs["name"], - skip_broadcast=True, - ) - ) + if isinstance(obj, LinearExpression | QuadraticExpression | Variable): das.append(obj.data) else: - finisher.append(lambda x: x) das.append(obj) exclude = frozenset(exclude).union(HELPER_DIMS) - aligned = xr_align( - *das, - join=join, - copy=copy, - indexes=indexes, - exclude=exclude, - fill_value=fill_value, + + # Compute target indexes. + target_aligned = xr_align( + *das, join=join, copy=False, indexes=indexes, exclude=exclude ) - return tuple([f(da) for f, da in zip(finisher, aligned)]) + + # Reindex each object to target indexes. Linopy types use their own + # type-aware .reindex() which defaults to correct sentinel fill values. + reindex_kwargs: dict[str, Any] = {} + if fill_value is not dtypes.NA: + reindex_kwargs["fill_value"] = fill_value + results: list[Any] = [] + for obj, target in zip(objects, target_aligned): + indexers = { + dim: target.indexes[dim] + for dim in target.dims + if dim not in exclude and dim in target.indexes + } + results.append(obj.reindex(indexers, **reindex_kwargs)) + return tuple(results) LocT = TypeVar( From 59c8b4d36dad9c3872b053160811d38e6f210121 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 20:41:22 +0100 Subject: [PATCH 29/66] Remove FillWrapper, make reindex type-aware for fill values - Remove FillWrapper class and | operator (deferred for later design) - Expression.reindex: scalar fill_value applies to const only, vars/coeffs always use sentinels (-1/NaN) - Variable.reindex: no fill_value param, always uses sentinels - Update notebook: remove | 0 section, fix align example - Clean up __init__.py exports Co-Authored-By: Claude Opus 4.6 --- examples/arithmetic-convention.ipynb | 248 ++++++++++++--------------- linopy/__init__.py | 3 +- linopy/common.py | 33 +--- linopy/expressions.py | 95 +++++----- linopy/variables.py | 46 +++-- 5 files changed, 197 insertions(+), 228 deletions(-) diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index 4f6a240b..be60fc73 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -8,22 +8,22 @@ }, { "cell_type": "code", - "execution_count": null, "id": "4251ba8271bff255", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.617834Z", - "start_time": "2026-03-09T16:33:16.648576Z" + "end_time": "2026-03-09T18:31:00.240669Z", + "start_time": "2026-03-09T18:30:59.599420Z" } }, - "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", "\n", "import linopy" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -35,15 +35,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "57506c7b4bf9f4bf", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.680021Z", - "start_time": "2026-03-09T16:33:17.621212Z" + "end_time": "2026-03-09T18:31:00.281307Z", + "start_time": "2026-03-09T18:31:00.243790Z" } }, - "outputs": [], "source": [ "m = linopy.Model()\n", "\n", @@ -55,7 +53,9 @@ "y = m.add_variables(lower=0, coords=[time], name=\"y\")\n", "gen = m.add_variables(lower=0, coords=[time, techs], name=\"gen\")\n", "risk = m.add_variables(lower=0, coords=[techs, scenarios], name=\"risk\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -67,102 +67,102 @@ }, { "cell_type": "code", - "execution_count": null, "id": "1f7af87e662800c", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.691699Z", - "start_time": "2026-03-09T16:33:17.682845Z" + "end_time": "2026-03-09T18:31:00.295642Z", + "start_time": "2026-03-09T18:31:00.285882Z" } }, - "outputs": [], "source": [ "# Same coords — just works\n", "x + y" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "985ade4e21e26271", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.706036Z", - "start_time": "2026-03-09T16:33:17.700165Z" + "end_time": "2026-03-09T18:31:00.311790Z", + "start_time": "2026-03-09T18:31:00.305702Z" } }, - "outputs": [], "source": [ "# Constant with matching coords\n", "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", "x * factor" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "8f6a99d864238dbb", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.723220Z", - "start_time": "2026-03-09T16:33:17.713960Z" + "end_time": "2026-03-09T18:31:00.329869Z", + "start_time": "2026-03-09T18:31:00.321082Z" } }, - "outputs": [], "source": [ "# Constant with fewer dims — broadcasts freely\n", "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", "gen * cost # cost broadcasts over time" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "d417bfa628cb280a", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.743959Z", - "start_time": "2026-03-09T16:33:17.733360Z" + "end_time": "2026-03-09T18:31:00.344141Z", + "start_time": "2026-03-09T18:31:00.333430Z" } }, - "outputs": [], "source": [ "# Expression + Expression with non-shared dims — broadcasts freely\n", "gen + risk # (time, tech) + (tech, scenario) → (time, tech, scenario)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "400b4084ef94eb35", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.755530Z", - "start_time": "2026-03-09T16:33:17.750874Z" + "end_time": "2026-03-09T18:31:00.356263Z", + "start_time": "2026-03-09T18:31:00.350199Z" } }, - "outputs": [], "source": [ "# Scalar — always fine\n", "x + 5" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "2e4640266401ba61", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.773938Z", - "start_time": "2026-03-09T16:33:17.763057Z" + "end_time": "2026-03-09T18:31:00.377425Z", + "start_time": "2026-03-09T18:31:00.365347Z" } }, - "outputs": [], "source": [ "# Constraints — RHS with fewer dims broadcasts naturally\n", "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", "m.add_constraints(gen <= capacity, name=\"cap\") # capacity broadcasts over time" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -174,15 +174,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "fe1b95f337be4e9f", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.787118Z", - "start_time": "2026-03-09T16:33:17.781050Z" + "end_time": "2026-03-09T18:31:00.389487Z", + "start_time": "2026-03-09T18:31:00.382849Z" } }, - "outputs": [], "source": [ "# Mismatched coordinates on shared dimension\n", "y_short = m.add_variables(\n", @@ -193,19 +191,19 @@ " x + y_short # time coords don't match\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "5a0bb6e7d4b175c5", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.797529Z", - "start_time": "2026-03-09T16:33:17.794047Z" + "end_time": "2026-03-09T18:31:00.398888Z", + "start_time": "2026-03-09T18:31:00.393495Z" } }, - "outputs": [], "source": [ "# Constant introduces new dimensions\n", "profile = xr.DataArray(\n", @@ -217,32 +215,45 @@ " ) # would duplicate x[t] across techs. Reduce using mean, max or sth similar\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "e0f899f096773d96", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.810737Z", - "start_time": "2026-03-09T16:33:17.803654Z" + "end_time": "2026-03-09T18:31:00.412464Z", + "start_time": "2026-03-09T18:31:00.407769Z" } }, - "outputs": [], - "source": "# Multiplication with mismatched coordinates\npartial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\ntry:\n x * partial # time coords [0..4] vs [0,1,2]\nexcept ValueError as e:\n print(\"ValueError:\", e)" + "source": "# Multiplication with mismatched coordinates\npartial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\ntry:\n x * partial # time coords [0..4] vs [0,1,2]\nexcept ValueError as e:\n print(\"ValueError:\", e)", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ValueError: cannot align objects with join='exact' where index/labels/sizes are not equal along these coordinates (dimensions): 'time' ('time',)\n", + "Use .add()/.sub()/.mul()/.div() with an explicit join= parameter:\n", + " .add(other, join=\"inner\") # intersection of coordinates\n", + " .add(other, join=\"outer\") # union of coordinates (with fill)\n", + " .add(other, join=\"left\") # keep left operand's coordinates\n", + " .add(other, join=\"override\") # positional alignment\n" + ] + } + ], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "aa03d3184a0e8b65", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.817090Z", - "start_time": "2026-03-09T16:33:17.813403Z" + "end_time": "2026-03-09T18:31:00.424002Z", + "start_time": "2026-03-09T18:31:00.419735Z" } }, - "outputs": [], "source": [ "# Constraint RHS with mismatched coordinates\n", "partial_rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", @@ -251,7 +262,9 @@ " x <= partial_rhs\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -275,69 +288,34 @@ }, { "cell_type": "code", - "execution_count": null, "id": "b4f5bf23a8ee17d5", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.835634Z", - "start_time": "2026-03-09T16:33:17.826825Z" + "end_time": "2026-03-09T18:31:00.435710Z", + "start_time": "2026-03-09T18:31:00.427590Z" } }, - "outputs": [], "source": [ "x.sel(time=[0, 1, 2]) + y_short # select matching coords first" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "c49f61e386de7350", + "id": "f12b0cb6d0e31651", "metadata": {}, - "source": [ - "### 2. `| 0` — Inline outer join with fill\n", - "\n", - "When one operand covers a subset of coordinates, use the `|` operator to declare a fill value. This creates a lightweight `FillWrapper` that is consumed immediately — it never mutates or persists.\n", - "\n", - "The `|` operator only works on linopy types (`Variable`, `LinearExpression`, `QuadraticExpression`). For external types, use `.reindex()` before operating." - ] + "source": "### 2. Named methods with `join=`\n\nAll arithmetic operations have named-method equivalents that accept a `join` parameter:\n\n| `join` | Coordinates kept | Fill |\n|--------|-----------------|------|\n| `\"exact\"` | Must match | `ValueError` if different |\n| `\"inner\"` | Intersection | — |\n| `\"outer\"` | Union | Zero (arithmetic) / NaN (constraints) |\n| `\"left\"` | Left operand's | Zero / NaN for missing right |\n| `\"right\"` | Right operand's | Zero for missing left |\n| `\"override\"` | Left operand's | Positional alignment |" }, { "cell_type": "code", - "execution_count": null, - "id": "7b27db4d757db423", + "id": "78c967671819ef0c", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T16:33:17.932308Z", - "start_time": "2026-03-09T16:33:17.838621Z" + "end_time": "2026-03-09T18:31:00.481192Z", + "start_time": "2026-03-09T18:31:00.454670Z" } }, - "outputs": [], - "source": "x + (y_short | 0) # fill missing time coords of y_short with 0" - }, - { - "cell_type": "markdown", - "id": "f12b0cb6d0e31651", - "metadata": {}, - "source": [ - "### 3. Named methods with `join=`\n", - "\n", - "All arithmetic operations have named-method equivalents that accept a `join` parameter:\n", - "\n", - "| `join` | Coordinates kept | Fill |\n", - "|--------|-----------------|------|\n", - "| `\"exact\"` | Must match | `ValueError` if different |\n", - "| `\"inner\"` | Intersection | — |\n", - "| `\"outer\"` | Union | Zero (arithmetic) / NaN (constraints) |\n", - "| `\"left\"` | Left operand's | Zero / NaN for missing right |\n", - "| `\"right\"` | Right operand's | Zero for missing left |\n", - "| `\"override\"` | Left operand's | Positional alignment |" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "78c967671819ef0c", - "metadata": {}, - "outputs": [], "source": [ "m2 = linopy.Model()\n", "\n", @@ -351,25 +329,28 @@ "print(\"outer:\", list(a.add(b, join=\"outer\").coords[\"i\"].values)) # [0, 1, 2, 3]\n", "print(\"left: \", list(a.add(b, join=\"left\").coords[\"i\"].values)) # [0, 1, 2]\n", "print(\"right:\", list(a.add(b, join=\"right\").coords[\"i\"].values)) # [1, 2, 3]" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", "id": "424610ceccde798a", "metadata": {}, - "source": [ - "### 4. `linopy.align()` — Explicit pre-alignment\n", - "\n", - "For complex multi-operand alignment:" - ] + "source": "### 3. `linopy.align()` — Explicit pre-alignment\n\nFor complex multi-operand alignment. Linopy types automatically use correct sentinel fill values (labels/vars=-1, coeffs=NaN) while `fill_value` applies to `const`:" }, { "cell_type": "code", - "execution_count": null, "id": "23f414e973e33c34", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T18:31:00.501166Z", + "start_time": "2026-03-09T18:31:00.492032Z" + } + }, + "source": "a_aligned, b_aligned = linopy.align(a, b, join=\"outer\")\na_aligned + b_aligned", "outputs": [], - "source": "a_aligned, b_aligned = linopy.align(a, b, join=\"outer\", fill_value=0)\na_aligned + b_aligned" + "execution_count": null }, { "cell_type": "markdown", @@ -383,28 +364,38 @@ }, { "cell_type": "code", - "execution_count": null, "id": "9a513a6be9e5925e", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T18:31:00.517121Z", + "start_time": "2026-03-09T18:31:00.507242Z" + } + }, "source": [ "c = m2.add_variables(coords=[[\"x\", \"y\", \"z\"]], name=\"c\")\n", "d = m2.add_variables(coords=[[\"p\", \"q\", \"r\"]], name=\"d\")\n", "\n", "# Relabel d's coordinates to match c, then add\n", "c + d.assign_coords(dim_0=c.coords[\"dim_0\"])" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "262eaf85fa44e152", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T18:31:00.530891Z", + "start_time": "2026-03-09T18:31:00.523419Z" + } + }, "source": [ "# Or use join=\"override\" for positional matching\n", "c.add(d, join=\"override\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -430,18 +421,7 @@ "cell_type": "markdown", "id": "f0c3e862b0430c11", "metadata": {}, - "source": [ - "## Summary\n", - "\n", - "| Situation | Behavior | How to handle |\n", - "|---|---|---|\n", - "| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n", - "| Non-shared dims, expr + expr | ✓ Broadcasts | `gen(time,tech) + risk(tech,scenario)` |\n", - "| Constant with subset dims | ✓ Broadcasts | `cost(tech) * gen(tech,time)` |\n", - "| Constant introduces new dims | ✗ Raises | Restructure, or multiply if meaningful |\n", - "| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `x + (y \\| 0)` |\n", - "| Pandas without named index | ✗ Raises on dim mismatch | Name the index |" - ] + "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n| Non-shared dims, expr + expr | ✓ Broadcasts | `gen(time,tech) + risk(tech,scenario)` |\n| Constant with subset dims | ✓ Broadcasts | `cost(tech) * gen(tech,time)` |\n| Constant introduces new dims | ✗ Raises | Restructure, or multiply if meaningful |\n| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Pandas without named index | ✗ Raises on dim mismatch | Name the index |" } ], "metadata": { diff --git a/linopy/__init__.py b/linopy/__init__.py index c6dca749..43cff2c2 100644 --- a/linopy/__init__.py +++ b/linopy/__init__.py @@ -12,7 +12,7 @@ # Note: For intercepting multiplications between xarray dataarrays, Variables and Expressions # we need to extend their __mul__ functions with a quick special case import linopy.monkey_patch_xarray # noqa: F401 -from linopy.common import FillWrapper, align, as_dataarray +from linopy.common import align, as_dataarray from linopy.config import options from linopy.constants import EQUAL, GREATER_EQUAL, LESS_EQUAL from linopy.constraints import Constraint, Constraints @@ -41,7 +41,6 @@ "as_dataarray", "breakpoints", "align", - "FillWrapper", "merge", "options", "read_netcdf", diff --git a/linopy/common.py b/linopy/common.py index e22ab32b..3e5e40a2 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -45,30 +45,6 @@ from linopy.variables import Variable -class FillWrapper: - """ - Wraps a linopy object with a fill value for use in arithmetic. - - Created via the ``|`` operator on linopy types: ``expr | 0`` means - "fill missing coordinates of *expr* with 0 during alignment". - - The wrapper is consumed immediately by the arithmetic dunder methods - and never stored or propagated. - """ - - __slots__ = ("wrapped", "fill_value") - - def __init__(self, wrapped: Any, fill_value: float) -> None: - self.wrapped = wrapped - self.fill_value = fill_value - - def __repr__(self) -> str: - return f"FillWrapper({self.wrapped!r}, fill_value={self.fill_value})" - - def __neg__(self) -> FillWrapper: - return FillWrapper(wrapped=-self.wrapped, fill_value=self.fill_value) - - def check_constant_dim_subset( expr_dims: tuple[str, ...] | set[str], constant_dims: tuple[str, ...] | set[str], @@ -1352,8 +1328,7 @@ def align( *das, join=join, copy=False, indexes=indexes, exclude=exclude ) - # Reindex each object to target indexes. Linopy types use their own - # type-aware .reindex() which defaults to correct sentinel fill values. + # Reindex each object to target indexes. reindex_kwargs: dict[str, Any] = {} if fill_value is not dtypes.NA: reindex_kwargs["fill_value"] = fill_value @@ -1364,7 +1339,11 @@ def align( for dim in target.dims if dim not in exclude and dim in target.indexes } - results.append(obj.reindex(indexers, **reindex_kwargs)) + # Variable.reindex has no fill_value — it always uses sentinels + if isinstance(obj, Variable): + results.append(obj.reindex(indexers)) + else: + results.append(obj.reindex(indexers, **reindex_kwargs)) return tuple(results) diff --git a/linopy/expressions.py b/linopy/expressions.py index f80a04a2..be386c8b 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -14,7 +14,7 @@ from collections.abc import Callable, Hashable, Iterator, Mapping, Sequence from dataclasses import dataclass, field from itertools import product, zip_longest -from typing import TYPE_CHECKING, Any, TypeVar, cast, overload +from typing import TYPE_CHECKING, Any, Self, TypeVar, cast, overload from warnings import warn import numpy as np @@ -45,7 +45,6 @@ from linopy import constraints, variables from linopy.common import ( EmptyDeprecationWrapper, - FillWrapper, LocIndexer, as_dataarray, assign_multiindex_safe, @@ -130,26 +129,6 @@ def _expr_unwrap( return maybe_expr -def _resolve_fill_wrapper(self_obj: Any, other: Any) -> tuple[Any, str | None]: - """ - Unwrap a FillWrapper operand by reindexing to match self_obj. - - Returns the resolved other and the join mode to use (None = default exact). - """ - if not isinstance(other, FillWrapper): - return other, None - fill_value = other.fill_value - other = other.wrapped - # Reindex other to self's coords on shared dims, filling with fill_value - shared_dims = set(self_obj.dims) & set(other.dims) - if shared_dims: - target_coords = { - dim: self_obj.coords[dim] for dim in shared_dims if dim in self_obj.coords - } - other = other.reindex(target_coords, fill_value=fill_value) - return other, None - - logger = logging.getLogger(__name__) @@ -517,16 +496,6 @@ def __neg__(self: GenericExpression) -> GenericExpression: """ return self.assign_multiindex_safe(coeffs=-self.coeffs, const=-self.const) - def __or__(self, fill_value: int | float) -> FillWrapper: - """ - Create a FillWrapper for explicit fill during alignment. - - Usage: ``expr | 0`` means "fill missing coords of expr with 0". - """ - if not isinstance(fill_value, int | float): - return NotImplemented - return FillWrapper(wrapped=self, fill_value=fill_value) - def _multiply_by_linear_expression( self, other: LinearExpression | ScalarLinearExpression ) -> LinearExpression | QuadraticExpression: @@ -1523,9 +1492,51 @@ def _sum( set_index = exprwrap(Dataset.set_index) - reindex = exprwrap(Dataset.reindex, fill_value=_fill_value) + def reindex( + self, + indexers: Mapping[Any, Any] | None = None, + fill_value: Any = None, + **indexers_kwargs: Any, + ) -> Self: + """ + Reindex the expression, preserving sentinel fill values. + + If ``fill_value`` is a scalar, it is applied to ``const`` only; + ``vars`` and ``coeffs`` always use their sentinel values (-1 and NaN). + If ``fill_value`` is a dict it is passed through as-is. + If ``fill_value`` is None the type-default sentinels are used. + """ + if fill_value is None: + fv = self._fill_value + elif isinstance(fill_value, Mapping): + fv = fill_value + else: + fv = {**self._fill_value, "const": fill_value} + return self.__class__( + self.data.reindex(indexers, fill_value=fv, **indexers_kwargs), self.model + ) - reindex_like = exprwrap(Dataset.reindex_like, fill_value=_fill_value) + def reindex_like( + self, + other: Any, + fill_value: Any = None, + **kwargs: Any, + ) -> Self: + """Reindex like another object, preserving sentinel fill values.""" + if fill_value is None: + fv = self._fill_value + elif isinstance(fill_value, Mapping): + fv = fill_value + else: + fv = {**self._fill_value, "const": fill_value} + return self.__class__( + self.data.reindex_like( + other if isinstance(other, Dataset) else other.data, + fill_value=fv, + **kwargs, + ), + self.model, + ) rename = exprwrap(Dataset.rename) @@ -1606,8 +1617,6 @@ def __add__( Note: If other is a numpy array or pandas object without axes names, dimension names of self will be filled in other """ - other, _join = _resolve_fill_wrapper(self, other) - if isinstance(other, QuadraticExpression): return other.__add__(self) @@ -1621,7 +1630,6 @@ def __add__( return NotImplemented def __radd__(self, other: ConstantLike) -> LinearExpression: - other, _join = _resolve_fill_wrapper(self, other) try: return self + other except TypeError: @@ -1644,14 +1652,12 @@ def __sub__( | LinearExpression | QuadraticExpression, ) -> LinearExpression | QuadraticExpression: - # FillWrapper.__neg__ is defined, so -other works for FillWrapper try: return self.__add__(-other) except TypeError: return NotImplemented def __rsub__(self, other: ConstantLike | Variable) -> LinearExpression: - other, _join = _resolve_fill_wrapper(self, other) try: return (self * -1) + other except TypeError: @@ -1670,8 +1676,6 @@ def __mul__( """ Multiply the expr by a factor. """ - other, _join = _resolve_fill_wrapper(self, other) - if isinstance(other, QuadraticExpression): return other.__rmul__(self) @@ -1698,7 +1702,6 @@ def __rmul__(self, other: ConstantLike) -> LinearExpression: """ Right-multiply the expr by a factor. """ - other, _join = _resolve_fill_wrapper(self, other) try: return self * other except TypeError: @@ -2130,8 +2133,6 @@ def __mul__(self, other: SideLike) -> QuadraticExpression: """ Multiply the expr by a factor. """ - other, _join = _resolve_fill_wrapper(self, other) - if isinstance(other, SUPPORTED_EXPRESSION_TYPES): raise TypeError( "unsupported operand type(s) for *: " @@ -2144,7 +2145,6 @@ def __mul__(self, other: SideLike) -> QuadraticExpression: return NotImplemented def __rmul__(self, other: SideLike) -> QuadraticExpression: - other, _join = _resolve_fill_wrapper(self, other) return self * other def __add__(self, other: SideLike) -> QuadraticExpression: @@ -2154,8 +2154,6 @@ def __add__(self, other: SideLike) -> QuadraticExpression: Note: If other is a numpy array or pandas object without axes names, dimension names of self will be filled in other """ - other, _join = _resolve_fill_wrapper(self, other) - try: if isinstance(other, SUPPORTED_CONSTANT_TYPES): return self._add_constant(other) @@ -2173,7 +2171,6 @@ def __radd__(self, other: ConstantLike) -> QuadraticExpression: """ Add others to expression. """ - other, _join = _resolve_fill_wrapper(self, other) return self.__add__(other) def __sub__(self, other: SideLike) -> QuadraticExpression: @@ -2183,7 +2180,6 @@ def __sub__(self, other: SideLike) -> QuadraticExpression: Note: If other is a numpy array or pandas object without axes names, dimension names of self will be filled in other """ - # FillWrapper.__neg__ is defined, so -other works for FillWrapper try: return self.__add__(-other) except TypeError: @@ -2193,7 +2189,6 @@ def __rsub__(self, other: SideLike) -> QuadraticExpression: """ Subtract expression from others. """ - other, _join = _resolve_fill_wrapper(self, other) try: return (self * -1) + other except TypeError: diff --git a/linopy/variables.py b/linopy/variables.py index 36563cca..c11f52cf 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -64,7 +64,6 @@ ) if TYPE_CHECKING: - from linopy.common import FillWrapper from linopy.constraints import AnonymousScalarConstraint, Constraint from linopy.expressions import ( GenericExpression, @@ -388,18 +387,6 @@ def __neg__(self) -> LinearExpression: """ return self.to_linexpr(-1) - def __or__(self, fill_value: int | float) -> FillWrapper: - """ - Create a FillWrapper for explicit fill during alignment. - - Usage: ``x | 0`` means "fill missing coords of x with 0". - """ - if not isinstance(fill_value, int | float): - return NotImplemented - from linopy.common import FillWrapper - - return FillWrapper(wrapped=self, fill_value=fill_value) - @overload def __mul__(self, other: ConstantLike) -> LinearExpression: ... @@ -1234,9 +1221,38 @@ def equals(self, other: Variable) -> bool: shift = varwrap(Dataset.shift, fill_value=_fill_value) - reindex = varwrap(Dataset.reindex, fill_value=_fill_value) + def reindex( + self, + indexers: Mapping[Any, Any] | None = None, + **indexers_kwargs: Any, + ) -> Variable: + """ + Reindex the variable, filling with sentinel values. + + Always fills with labels=-1, lower=NaN, upper=NaN to preserve + valid label references. + """ + return self.__class__( + self.data.reindex(indexers, fill_value=self._fill_value, **indexers_kwargs), + self.model, + self.name, + ) - reindex_like = varwrap(Dataset.reindex_like, fill_value=_fill_value) + def reindex_like( + self, + other: Any, + **kwargs: Any, + ) -> Variable: + """Reindex like another object, filling with sentinel values.""" + return self.__class__( + self.data.reindex_like( + other if isinstance(other, Dataset) else other.data, + fill_value=self._fill_value, + **kwargs, + ), + self.model, + self.name, + ) swap_dims = varwrap(Dataset.swap_dims) From d0faa7f4831a8b7f5d771bc03b6b87a1fbedbc21 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 20:53:47 +0100 Subject: [PATCH 30/66] Simplify Expression.reindex: fill_value is always a scalar for const Default fill_value=0, always applies to const only. No dict pass-through. vars/coeffs always use sentinels. Co-Authored-By: Claude Opus 4.6 --- linopy/expressions.py | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index be386c8b..e05f5243 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -1495,23 +1495,17 @@ def _sum( def reindex( self, indexers: Mapping[Any, Any] | None = None, - fill_value: Any = None, + fill_value: float = 0, **indexers_kwargs: Any, ) -> Self: """ - Reindex the expression, preserving sentinel fill values. + Reindex the expression. - If ``fill_value`` is a scalar, it is applied to ``const`` only; - ``vars`` and ``coeffs`` always use their sentinel values (-1 and NaN). - If ``fill_value`` is a dict it is passed through as-is. - If ``fill_value`` is None the type-default sentinels are used. + ``fill_value`` sets the constant for missing coordinates. + Variable labels and coefficients always use sentinel values + (vars=-1, coeffs=NaN). """ - if fill_value is None: - fv = self._fill_value - elif isinstance(fill_value, Mapping): - fv = fill_value - else: - fv = {**self._fill_value, "const": fill_value} + fv = {**self._fill_value, "const": fill_value} return self.__class__( self.data.reindex(indexers, fill_value=fv, **indexers_kwargs), self.model ) @@ -1519,16 +1513,16 @@ def reindex( def reindex_like( self, other: Any, - fill_value: Any = None, + fill_value: float = 0, **kwargs: Any, ) -> Self: - """Reindex like another object, preserving sentinel fill values.""" - if fill_value is None: - fv = self._fill_value - elif isinstance(fill_value, Mapping): - fv = fill_value - else: - fv = {**self._fill_value, "const": fill_value} + """ + Reindex like another object. + + ``fill_value`` sets the constant for missing coordinates. + Variable labels and coefficients always use sentinel values. + """ + fv = {**self._fill_value, "const": fill_value} return self.__class__( self.data.reindex_like( other if isinstance(other, Dataset) else other.data, From 2217bfc2f613ef5edc3a103b38c1b56b64e65c99 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 21:00:37 +0100 Subject: [PATCH 31/66] Add algebraic property tests and document in notebook Tests verify commutativity, associativity, distributivity, identity, and negation laws. Two known breakages (associativity and distributivity with constants that introduce new dims) are marked xfail. Co-Authored-By: Claude Opus 4.6 --- examples/arithmetic-convention.ipynb | 94 ++++++++------ test/test_algebraic_properties.py | 178 +++++++++++++++++++++++++++ 2 files changed, 236 insertions(+), 36 deletions(-) create mode 100644 test/test_algebraic_properties.py diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index be60fc73..8feed6d8 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -11,8 +11,8 @@ "id": "4251ba8271bff255", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.240669Z", - "start_time": "2026-03-09T18:30:59.599420Z" + "end_time": "2026-03-09T19:45:37.502763Z", + "start_time": "2026-03-09T19:45:36.697630Z" } }, "source": [ @@ -38,8 +38,8 @@ "id": "57506c7b4bf9f4bf", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.281307Z", - "start_time": "2026-03-09T18:31:00.243790Z" + "end_time": "2026-03-09T19:45:37.555658Z", + "start_time": "2026-03-09T19:45:37.505351Z" } }, "source": [ @@ -70,8 +70,8 @@ "id": "1f7af87e662800c", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.295642Z", - "start_time": "2026-03-09T18:31:00.285882Z" + "end_time": "2026-03-09T19:45:37.567659Z", + "start_time": "2026-03-09T19:45:37.558480Z" } }, "source": [ @@ -86,8 +86,8 @@ "id": "985ade4e21e26271", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.311790Z", - "start_time": "2026-03-09T18:31:00.305702Z" + "end_time": "2026-03-09T19:45:37.586804Z", + "start_time": "2026-03-09T19:45:37.581356Z" } }, "source": [ @@ -103,8 +103,8 @@ "id": "8f6a99d864238dbb", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.329869Z", - "start_time": "2026-03-09T18:31:00.321082Z" + "end_time": "2026-03-09T19:45:37.600600Z", + "start_time": "2026-03-09T19:45:37.592617Z" } }, "source": [ @@ -120,8 +120,8 @@ "id": "d417bfa628cb280a", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.344141Z", - "start_time": "2026-03-09T18:31:00.333430Z" + "end_time": "2026-03-09T19:45:37.618719Z", + "start_time": "2026-03-09T19:45:37.608423Z" } }, "source": [ @@ -136,8 +136,8 @@ "id": "400b4084ef94eb35", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.356263Z", - "start_time": "2026-03-09T18:31:00.350199Z" + "end_time": "2026-03-09T19:45:37.626365Z", + "start_time": "2026-03-09T19:45:37.621393Z" } }, "source": [ @@ -152,8 +152,8 @@ "id": "2e4640266401ba61", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.377425Z", - "start_time": "2026-03-09T18:31:00.365347Z" + "end_time": "2026-03-09T19:45:37.642965Z", + "start_time": "2026-03-09T19:45:37.630809Z" } }, "source": [ @@ -177,8 +177,8 @@ "id": "fe1b95f337be4e9f", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.389487Z", - "start_time": "2026-03-09T18:31:00.382849Z" + "end_time": "2026-03-09T19:45:37.653963Z", + "start_time": "2026-03-09T19:45:37.648263Z" } }, "source": [ @@ -200,8 +200,8 @@ "id": "5a0bb6e7d4b175c5", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.398888Z", - "start_time": "2026-03-09T18:31:00.393495Z" + "end_time": "2026-03-09T19:45:37.662586Z", + "start_time": "2026-03-09T19:45:37.658665Z" } }, "source": [ @@ -224,8 +224,8 @@ "id": "e0f899f096773d96", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.412464Z", - "start_time": "2026-03-09T18:31:00.407769Z" + "end_time": "2026-03-09T19:45:37.681087Z", + "start_time": "2026-03-09T19:45:37.677125Z" } }, "source": "# Multiplication with mismatched coordinates\npartial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\ntry:\n x * partial # time coords [0..4] vs [0,1,2]\nexcept ValueError as e:\n print(\"ValueError:\", e)", @@ -250,8 +250,8 @@ "id": "aa03d3184a0e8b65", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.424002Z", - "start_time": "2026-03-09T18:31:00.419735Z" + "end_time": "2026-03-09T19:45:37.697975Z", + "start_time": "2026-03-09T19:45:37.694178Z" } }, "source": [ @@ -291,8 +291,8 @@ "id": "b4f5bf23a8ee17d5", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.435710Z", - "start_time": "2026-03-09T18:31:00.427590Z" + "end_time": "2026-03-09T19:45:37.712616Z", + "start_time": "2026-03-09T19:45:37.704269Z" } }, "source": [ @@ -312,8 +312,8 @@ "id": "78c967671819ef0c", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.481192Z", - "start_time": "2026-03-09T18:31:00.454670Z" + "end_time": "2026-03-09T19:45:37.746402Z", + "start_time": "2026-03-09T19:45:37.720673Z" } }, "source": [ @@ -344,12 +344,28 @@ "id": "23f414e973e33c34", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.501166Z", - "start_time": "2026-03-09T18:31:00.492032Z" + "end_time": "2026-03-09T19:45:37.765307Z", + "start_time": "2026-03-09T19:45:37.756074Z" } }, "source": "a_aligned, b_aligned = linopy.align(a, b, join=\"outer\")\na_aligned + b_aligned", - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [i: 4]:\n", + "------------------------\n", + "[0]: +1 a[0]\n", + "[1]: +1 a[1] + 1 b[1]\n", + "[2]: +1 a[2] + 1 b[2]\n", + "[3]: +1 b[3]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], "execution_count": null }, { @@ -367,8 +383,8 @@ "id": "9a513a6be9e5925e", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.517121Z", - "start_time": "2026-03-09T18:31:00.507242Z" + "end_time": "2026-03-09T19:45:37.787197Z", + "start_time": "2026-03-09T19:45:37.776535Z" } }, "source": [ @@ -386,8 +402,8 @@ "id": "262eaf85fa44e152", "metadata": { "ExecuteTime": { - "end_time": "2026-03-09T18:31:00.530891Z", - "start_time": "2026-03-09T18:31:00.523419Z" + "end_time": "2026-03-09T19:45:37.803806Z", + "start_time": "2026-03-09T19:45:37.795935Z" } }, "source": [ @@ -421,7 +437,13 @@ "cell_type": "markdown", "id": "f0c3e862b0430c11", "metadata": {}, - "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n| Non-shared dims, expr + expr | ✓ Broadcasts | `gen(time,tech) + risk(tech,scenario)` |\n| Constant with subset dims | ✓ Broadcasts | `cost(tech) * gen(tech,time)` |\n| Constant introduces new dims | ✗ Raises | Restructure, or multiply if meaningful |\n| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Pandas without named index | ✗ Raises on dim mismatch | Name the index |" + "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n| Non-shared dims, expr + expr | ✓ Broadcasts | `gen[time,tech] + risk[tech,scenario]` |\n| Constant with subset dims | ✓ Broadcasts | `cost[tech] * gen[time,tech]` |\n| Constant introduces new dims | ✗ Raises | Restructure, or multiply if meaningful |\n| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Pandas without named index | ✗ Raises on dim mismatch | Name the index |\n| Associativity with constants | ✗ May break | Group constant with expr that has its dims |\n| Distributivity with constants | ✗ May break | Apply constant to combined expr, not individual terms |" + }, + { + "cell_type": "markdown", + "id": "d56kb3o89nb", + "source": "## Algebraic Properties\n\nThe arithmetic convention should preserve standard algebraic laws wherever possible. Below we document which properties hold, which break, and why.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, and `c[B]` a constant (DataArray).\n\n### Properties that hold\n\n| Property | Example | Why it works |\n|---|---|---|\n| **Commutativity of +** | `x + y == y + x` | Exact join is symmetric |\n| **Commutativity of ×** | `x * c == c * x` | `__rmul__` delegates to `__mul__` |\n| **Associativity of + (expr only)** | `(x + y) + z == x + (y + z)` | All expr+expr, same dims |\n| **Additive identity** | `x + 0 == x` | Scalar, no alignment |\n| **Multiplicative identity** | `x * 1 == x` | Scalar, no alignment |\n| **Negation** | `x - y == x + (-y)` | `__sub__` delegates to `__add__(-other)` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` | Scalar, no dim checks |\n| **Constant distributivity (subset dims)** | `c[B] * (g[A,B] + g[A,B]) == c[B]*g + c[B]*g` | `c` has subset dims of `g` in all terms |\n\n### Properties that break\n\n| Property | Example | What happens |\n|---|---|---|\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B]` raises, but `x[A] + (c[B] + g[A,B])` works | First groups constant with wrong expr; second groups it with the expr that has dim B |\n| **Distributivity with constant** | `c[B] * (x[A] + g[A,B])` works, but `c[B]*x[A] + c[B]*g[A,B]` raises | After distribution, `c[B]*x[A]` checks constant dims against `x[A]` individually |\n\nThese breakages are **by design**: Rule 2 catches the mistake at the point where a constant would introduce dimensions. The fix is to reorder operations so constants combine with expressions that already have the required dimensions.", + "metadata": {} } ], "metadata": { diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py new file mode 100644 index 00000000..2261d7fc --- /dev/null +++ b/test/test_algebraic_properties.py @@ -0,0 +1,178 @@ +""" +Tests for algebraic properties of the arithmetic convention. + +Properties that hold are tested normally. +Properties that break (by design) are marked with xfail to document +the known limitation and detect if a future change fixes them. +""" + +import numpy as np +import pandas as pd +import pytest +import xarray as xr + +from linopy import Model +from linopy.expressions import LinearExpression + + +@pytest.fixture +def m(): + return Model() + + +@pytest.fixture +def time(): + return pd.RangeIndex(3, name="time") + + +@pytest.fixture +def tech(): + return pd.Index(["solar", "wind"], name="tech") + + +@pytest.fixture +def x(m, time): + return m.add_variables(lower=0, coords=[time], name="x") + + +@pytest.fixture +def y(m, time): + return m.add_variables(lower=0, coords=[time], name="y") + + +@pytest.fixture +def z(m, time): + return m.add_variables(lower=0, coords=[time], name="z") + + +@pytest.fixture +def g(m, time, tech): + return m.add_variables(lower=0, coords=[time, tech], name="g") + + +@pytest.fixture +def c(tech): + """Constant DataArray with dims not in x but in g.""" + return xr.DataArray([2.0, 3.0], dims=["tech"], coords={"tech": tech}) + + +def assert_linequal(a: LinearExpression, b: LinearExpression) -> None: + """Assert two linear expressions are equivalent (same terms, same const).""" + assert set(a.dims) == set(b.dims) + for dim in a.dims: + if dim.startswith("_"): + continue + np.testing.assert_array_equal( + sorted(a.coords[dim].values), sorted(b.coords[dim].values) + ) + assert a.const.sum().item() == pytest.approx(b.const.sum().item()) + + +# ============================================================ +# Properties that hold +# ============================================================ + + +class TestPropertiesThatHold: + def test_commutativity_addition(self, x, y): + """X + y == y + x""" + assert_linequal(x + y, y + x) + + def test_commutativity_multiplication(self, g, c): + """G * c == c * g""" + assert_linequal(g * c, c * g) + + def test_associativity_addition_same_dims(self, x, y, z): + """(x + y) + z == x + (y + z)""" + assert_linequal((x + y) + z, x + (y + z)) + + def test_additive_identity(self, x): + """X + 0 == x""" + result = x + 0 + assert isinstance(result, LinearExpression) + assert (result.const == 0).all() + np.testing.assert_array_equal(result.coeffs.squeeze().values, [1, 1, 1]) + + def test_multiplicative_identity(self, x): + """X * 1 == x""" + result = x * 1 + assert isinstance(result, LinearExpression) + np.testing.assert_array_equal(result.coeffs.squeeze().values, [1, 1, 1]) + + def test_negation(self, x, y): + """X - y == x + (-y)""" + assert_linequal(x - y, x + (-y)) + + def test_scalar_distributivity(self, x, y): + """S * (x + y) == s*x + s*y""" + assert_linequal(3 * (x + y), 3 * x + 3 * y) + + def test_constant_distributivity_subset_dims(self, g, c): + """c[B] * (g + g) == c*g + c*g (c has subset dims of g)""" + assert_linequal(c * (g + g), c * g + c * g) + + def test_subtraction_definition(self, x, y): + """X - y == x + (-1 * y)""" + assert_linequal(x - y, x + (-1) * y) + + def test_multiplication_by_zero(self, x): + """X * 0 has zero coefficients""" + result = x * 0 + assert (result.coeffs == 0).all() + + def test_double_negation(self, x): + """-(-x) has same coefficients as x""" + result = -(-x) + np.testing.assert_array_equal( + result.coeffs.squeeze().values, + (1 * x).coeffs.squeeze().values, + ) + + +# ============================================================ +# Properties that break (by design) +# ============================================================ + + +class TestPropertiesThatBreak: + @pytest.mark.xfail( + reason="Rule 2: (x[A] + c[B]) raises because c introduces dim B into x", + strict=True, + ) + def test_associativity_with_constant(self, x, g, c): + """ + (x[A] + c[B]) + g[A,B] should equal x[A] + (c[B] + g[A,B]) + + Currently: left grouping raises, right grouping works. + """ + lhs = (x + c) + g + rhs = x + (c + g) + assert_linequal(lhs, rhs) + + @pytest.mark.xfail( + reason="Rule 2: c[B]*x[A] raises because c introduces dim B into x", + strict=True, + ) + def test_distributivity_with_constant(self, x, g, c): + """ + c[B] * (x[A] + g[A,B]) should equal c[B]*x[A] + c[B]*g[A,B] + + Currently: undistributed form works, distributed form raises. + """ + lhs = c * (x + g) + rhs = c * x + c * g + assert_linequal(lhs, rhs) + + def test_associativity_right_grouping_works(self, x, g, c): + """x[A] + (c[B] + g[A,B]) works — the valid grouping.""" + result = x + (c + g) + assert isinstance(result, LinearExpression) + assert "time" in result.dims + assert "tech" in result.dims + + def test_distributivity_undistributed_works(self, x, g, c): + """c[B] * (x[A] + g[A,B]) works — apply constant to combined expr.""" + result = c * (x + g) + assert isinstance(result, LinearExpression) + assert "time" in result.dims + assert "tech" in result.dims From 4b750b8c59c46e1cff7d1c5583459a8b8d257ae7 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 21:02:34 +0100 Subject: [PATCH 32/66] Add algebraic property spec and tests Spec and tests for commutativity, associativity, distributivity, identity, negation, and zero. Two known violations marked xfail: associativity and distributivity with constants that introduce new dims. Co-Authored-By: Claude Opus 4.6 --- test/test_algebraic_properties.py | 203 +++++++++++++++++++----------- 1 file changed, 131 insertions(+), 72 deletions(-) diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py index 2261d7fc..5d422044 100644 --- a/test/test_algebraic_properties.py +++ b/test/test_algebraic_properties.py @@ -1,9 +1,40 @@ """ -Tests for algebraic properties of the arithmetic convention. +Algebraic properties of linopy arithmetic. -Properties that hold are tested normally. -Properties that break (by design) are marked with xfail to document -the known limitation and detect if a future change fixes them. +All standard algebraic laws should hold for linopy expressions. +This file serves as both specification and test suite. + +Notation: + x[A], y[A], z[A] — linopy variables with dimension A + g[A,B] — linopy variable with dimensions A and B + c[B] — constant (DataArray) with dimension B + s — scalar (int/float) + +SPECIFICATION +============= + +1. Commutativity + a + b == b + a for any linopy operands a, b + a * c == c * a for variable/expression a, constant c + +2. Associativity + (a + b) + c == a + (b + c) for any linopy operands a, b, c + Including mixed: (x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B]) + +3. Distributivity + c * (a + b) == c*a + c*b for constant c, linopy operands a, b + s * (a + b) == s*a + s*b for scalar s + +4. Identity + a + 0 == a additive identity + a * 1 == a multiplicative identity + +5. Negation + a - b == a + (-b) subtraction is addition of negation + -(-a) == a double negation + +6. Zero + a * 0 == 0 multiplication by zero """ import numpy as np @@ -32,33 +63,37 @@ def tech(): @pytest.fixture def x(m, time): + """Variable with dims [time].""" return m.add_variables(lower=0, coords=[time], name="x") @pytest.fixture def y(m, time): + """Variable with dims [time].""" return m.add_variables(lower=0, coords=[time], name="y") @pytest.fixture def z(m, time): + """Variable with dims [time].""" return m.add_variables(lower=0, coords=[time], name="z") @pytest.fixture def g(m, time, tech): + """Variable with dims [time, tech].""" return m.add_variables(lower=0, coords=[time, tech], name="g") @pytest.fixture def c(tech): - """Constant DataArray with dims not in x but in g.""" + """Constant (DataArray) with dims [tech].""" return xr.DataArray([2.0, 3.0], dims=["tech"], coords={"tech": tech}) def assert_linequal(a: LinearExpression, b: LinearExpression) -> None: - """Assert two linear expressions are equivalent (same terms, same const).""" - assert set(a.dims) == set(b.dims) + """Assert two linear expressions are algebraically equivalent.""" + assert set(a.dims) == set(b.dims), f"dims differ: {a.dims} vs {b.dims}" for dim in a.dims: if dim.startswith("_"): continue @@ -69,57 +104,118 @@ def assert_linequal(a: LinearExpression, b: LinearExpression) -> None: # ============================================================ -# Properties that hold +# 1. Commutativity # ============================================================ -class TestPropertiesThatHold: - def test_commutativity_addition(self, x, y): +class TestCommutativity: + def test_add_expr_expr(self, x, y): """X + y == y + x""" assert_linequal(x + y, y + x) - def test_commutativity_multiplication(self, g, c): + def test_mul_expr_constant(self, g, c): """G * c == c * g""" assert_linequal(g * c, c * g) - def test_associativity_addition_same_dims(self, x, y, z): + def test_add_expr_constant(self, g, c): + """G + c == c + g""" + assert_linequal(g + c, c + g) + + +# ============================================================ +# 2. Associativity +# ============================================================ + + +class TestAssociativity: + def test_add_same_dims(self, x, y, z): """(x + y) + z == x + (y + z)""" assert_linequal((x + y) + z, x + (y + z)) - def test_additive_identity(self, x): + @pytest.mark.xfail( + reason="Rule 2: (x[A] + c[B]) raises because c introduces dim B", + strict=True, + ) + def test_add_with_constant(self, x, g, c): + """(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])""" + lhs = (x + c) + g + rhs = x + (c + g) + assert_linequal(lhs, rhs) + + def test_add_with_constant_right_grouping(self, x, g, c): + """x[A] + (c[B] + g[A,B]) works with right grouping.""" + result = x + (c + g) + assert isinstance(result, LinearExpression) + assert "time" in result.dims + assert "tech" in result.dims + + +# ============================================================ +# 3. Distributivity +# ============================================================ + + +class TestDistributivity: + def test_scalar(self, x, y): + """S * (x + y) == s*x + s*y""" + assert_linequal(3 * (x + y), 3 * x + 3 * y) + + def test_constant_subset_dims(self, g, c): + """c[B] * (g[A,B] + g[A,B]) == c*g + c*g""" + assert_linequal(c * (g + g), c * g + c * g) + + @pytest.mark.xfail( + reason="Rule 2: c[B]*x[A] raises because c introduces dim B", + strict=True, + ) + def test_constant_mixed_dims(self, x, g, c): + """c[B] * (x[A] + g[A,B]) == c*x + c*g""" + lhs = c * (x + g) + rhs = c * x + c * g + assert_linequal(lhs, rhs) + + def test_constant_mixed_dims_undistributed(self, x, g, c): + """c[B] * (x[A] + g[A,B]) works undistributed.""" + result = c * (x + g) + assert isinstance(result, LinearExpression) + assert "time" in result.dims + assert "tech" in result.dims + + +# ============================================================ +# 4. Identity +# ============================================================ + + +class TestIdentity: + def test_additive(self, x): """X + 0 == x""" result = x + 0 assert isinstance(result, LinearExpression) assert (result.const == 0).all() np.testing.assert_array_equal(result.coeffs.squeeze().values, [1, 1, 1]) - def test_multiplicative_identity(self, x): + def test_multiplicative(self, x): """X * 1 == x""" result = x * 1 assert isinstance(result, LinearExpression) np.testing.assert_array_equal(result.coeffs.squeeze().values, [1, 1, 1]) - def test_negation(self, x, y): - """X - y == x + (-y)""" - assert_linequal(x - y, x + (-y)) - def test_scalar_distributivity(self, x, y): - """S * (x + y) == s*x + s*y""" - assert_linequal(3 * (x + y), 3 * x + 3 * y) +# ============================================================ +# 5. Negation +# ============================================================ - def test_constant_distributivity_subset_dims(self, g, c): - """c[B] * (g + g) == c*g + c*g (c has subset dims of g)""" - assert_linequal(c * (g + g), c * g + c * g) + +class TestNegation: + def test_subtraction_is_add_negation(self, x, y): + """X - y == x + (-y)""" + assert_linequal(x - y, x + (-y)) def test_subtraction_definition(self, x, y): - """X - y == x + (-1 * y)""" + """X - y == x + (-1) * y""" assert_linequal(x - y, x + (-1) * y) - def test_multiplication_by_zero(self, x): - """X * 0 has zero coefficients""" - result = x * 0 - assert (result.coeffs == 0).all() - def test_double_negation(self, x): """-(-x) has same coefficients as x""" result = -(-x) @@ -130,49 +226,12 @@ def test_double_negation(self, x): # ============================================================ -# Properties that break (by design) +# 6. Zero # ============================================================ -class TestPropertiesThatBreak: - @pytest.mark.xfail( - reason="Rule 2: (x[A] + c[B]) raises because c introduces dim B into x", - strict=True, - ) - def test_associativity_with_constant(self, x, g, c): - """ - (x[A] + c[B]) + g[A,B] should equal x[A] + (c[B] + g[A,B]) - - Currently: left grouping raises, right grouping works. - """ - lhs = (x + c) + g - rhs = x + (c + g) - assert_linequal(lhs, rhs) - - @pytest.mark.xfail( - reason="Rule 2: c[B]*x[A] raises because c introduces dim B into x", - strict=True, - ) - def test_distributivity_with_constant(self, x, g, c): - """ - c[B] * (x[A] + g[A,B]) should equal c[B]*x[A] + c[B]*g[A,B] - - Currently: undistributed form works, distributed form raises. - """ - lhs = c * (x + g) - rhs = c * x + c * g - assert_linequal(lhs, rhs) - - def test_associativity_right_grouping_works(self, x, g, c): - """x[A] + (c[B] + g[A,B]) works — the valid grouping.""" - result = x + (c + g) - assert isinstance(result, LinearExpression) - assert "time" in result.dims - assert "tech" in result.dims - - def test_distributivity_undistributed_works(self, x, g, c): - """c[B] * (x[A] + g[A,B]) works — apply constant to combined expr.""" - result = c * (x + g) - assert isinstance(result, LinearExpression) - assert "time" in result.dims - assert "tech" in result.dims +class TestZero: + def test_multiplication_by_zero(self, x): + """X * 0 has zero coefficients""" + result = x * 0 + assert (result.coeffs == 0).all() From 332aa1af370662592bc3290fa6fbc038eb777729 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 21:47:32 +0100 Subject: [PATCH 33/66] Remove Rule 2 for arithmetic, keep for constraint RHS only Constants can now introduce new dimensions in arithmetic (+, -, *, /), preserving all standard algebraic laws (associativity, distributivity). The dim-subset check remains for constraint RHS to catch accidental broadcasting. Default fill value for const changed from 0 to NaN. Co-Authored-By: Claude Opus 4.6 --- examples/arithmetic-convention.ipynb | 31 ++++++++++------------- linopy/common.py | 25 ------------------- linopy/expressions.py | 21 ++++++++-------- linopy/piecewise.py | 2 ++ test/test_algebraic_properties.py | 30 ++-------------------- test/test_common.py | 24 +++--------------- test/test_linear_expression.py | 37 +++++++++++++++------------- test/test_typing.py | 19 ++++++++------ 8 files changed, 61 insertions(+), 128 deletions(-) diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index 8feed6d8..35fbaf97 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "id": "c68183ce878b22db", "metadata": {}, - "source": "# Arithmetic Convention\n\nlinopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n\nTwo rules apply to **all** arithmetic operations involving linopy objects (`+`, `-`, `*`, `/`):\n\n**Rule 1 — Exact label matching on shared dimensions**\n\nWhen two operands share a dimension, their coordinate labels on that dimension must match exactly (`join=\"exact\"`). A `ValueError` is raised on mismatch.\n\n**Rule 2 — Constants cannot introduce new dimensions**\n\nWhen combining an expression or variable with a *constant* (`DataArray`, numpy, pandas), the constant's dimensions must be a subset of the expression's dimensions. A constant cannot introduce dimensions the expression does not have — that would silently duplicate variables.\n\nExpression + Expression broadcasting over non-shared dimensions is freely allowed.\n\nInspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." + "source": "# Arithmetic Convention\n\nlinopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n\n**Rule 1 — Exact label matching on shared dimensions**\n\nWhen two operands share a dimension, their coordinate labels on that dimension must match exactly (`join=\"exact\"`). A `ValueError` is raised on mismatch.\n\n**Rule 2 — Constraint RHS cannot introduce new dimensions**\n\nWhen creating a constraint (`<=`, `>=`, `==`), the right-hand side constant cannot have dimensions not present in the left-hand side expression. This catches the common mistake of accidentally broadcasting constraints.\n\n**Broadcasting** — In arithmetic (`+`, `-`, `*`, `/`), constants *can* introduce new dimensions, just like xarray. This preserves all standard algebraic laws (commutativity, associativity, distributivity).\n\nInspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." }, { "cell_type": "code", @@ -168,9 +168,7 @@ "cell_type": "markdown", "id": "c4e9c6dbcec7c0d9", "metadata": {}, - "source": [ - "## What raises an error" - ] + "source": "## What raises an error" }, { "cell_type": "code", @@ -204,18 +202,7 @@ "start_time": "2026-03-09T19:45:37.658665Z" } }, - "source": [ - "# Constant introduces new dimensions\n", - "profile = xr.DataArray(\n", - " np.ones((3, 5)), dims=[\"tech\", \"time\"], coords={\"tech\": techs, \"time\": time}\n", - ")\n", - "try:\n", - " (\n", - " x + profile\n", - " ) # would duplicate x[t] across techs. Reduce using mean, max or sth similar\n", - "except ValueError as e:\n", - " print(\"ValueError:\", e)" - ], + "source": "# Constant introduces new dimensions — broadcasts in arithmetic\nprofile = xr.DataArray(\n np.ones((3, 5)), dims=[\"tech\", \"time\"], coords={\"tech\": techs, \"time\": time}\n)\nx + profile # x[time] broadcasts over tech", "outputs": [], "execution_count": null }, @@ -266,6 +253,14 @@ "outputs": [], "execution_count": null }, + { + "cell_type": "code", + "id": "5dc63c6gl7j", + "source": "# Constraint RHS with extra dimensions\nscenario_rhs = xr.DataArray(\n [[10, 20], [30, 40], [50, 60], [70, 80], [90, 100]],\n dims=[\"time\", \"scenario\"],\n coords={\"time\": time, \"scenario\": scenarios},\n)\ntry:\n x <= scenario_rhs # x has [time], RHS has [time, scenario] — extra dim\nexcept ValueError as e:\n print(\"ValueError:\", e)", + "metadata": {}, + "execution_count": null, + "outputs": [] + }, { "cell_type": "markdown", "id": "64a6f983ce55547e", @@ -437,12 +432,12 @@ "cell_type": "markdown", "id": "f0c3e862b0430c11", "metadata": {}, - "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n| Non-shared dims, expr + expr | ✓ Broadcasts | `gen[time,tech] + risk[tech,scenario]` |\n| Constant with subset dims | ✓ Broadcasts | `cost[tech] * gen[time,tech]` |\n| Constant introduces new dims | ✗ Raises | Restructure, or multiply if meaningful |\n| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Pandas without named index | ✗ Raises on dim mismatch | Name the index |\n| Associativity with constants | ✗ May break | Group constant with expr that has its dims |\n| Distributivity with constants | ✗ May break | Apply constant to combined expr, not individual terms |" + "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n| Non-shared dims, expr + expr | ✓ Broadcasts | `gen[time,tech] + risk[tech,scenario]` |\n| Constant with subset dims | ✓ Broadcasts | `cost[tech] * gen[time,tech]` |\n| Constant introduces new dims | ✓ Broadcasts | `x[time] + profile[time,tech]` |\n| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Constraint RHS with extra dims | ✗ Raises | Restructure RHS or expand LHS first |\n| Pandas without named index | ✗ Raises on dim mismatch | Name the index |" }, { "cell_type": "markdown", "id": "d56kb3o89nb", - "source": "## Algebraic Properties\n\nThe arithmetic convention should preserve standard algebraic laws wherever possible. Below we document which properties hold, which break, and why.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, and `c[B]` a constant (DataArray).\n\n### Properties that hold\n\n| Property | Example | Why it works |\n|---|---|---|\n| **Commutativity of +** | `x + y == y + x` | Exact join is symmetric |\n| **Commutativity of ×** | `x * c == c * x` | `__rmul__` delegates to `__mul__` |\n| **Associativity of + (expr only)** | `(x + y) + z == x + (y + z)` | All expr+expr, same dims |\n| **Additive identity** | `x + 0 == x` | Scalar, no alignment |\n| **Multiplicative identity** | `x * 1 == x` | Scalar, no alignment |\n| **Negation** | `x - y == x + (-y)` | `__sub__` delegates to `__add__(-other)` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` | Scalar, no dim checks |\n| **Constant distributivity (subset dims)** | `c[B] * (g[A,B] + g[A,B]) == c[B]*g + c[B]*g` | `c` has subset dims of `g` in all terms |\n\n### Properties that break\n\n| Property | Example | What happens |\n|---|---|---|\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B]` raises, but `x[A] + (c[B] + g[A,B])` works | First groups constant with wrong expr; second groups it with the expr that has dim B |\n| **Distributivity with constant** | `c[B] * (x[A] + g[A,B])` works, but `c[B]*x[A] + c[B]*g[A,B]` raises | After distribution, `c[B]*x[A]` checks constant dims against `x[A]` individually |\n\nThese breakages are **by design**: Rule 2 catches the mistake at the point where a constant would introduce dimensions. The fix is to reorder operations so constants combine with expressions that already have the required dimensions.", + "source": "## Algebraic Properties\n\nAll standard algebraic laws hold for linopy arithmetic. This means you can freely refactor expressions without worrying about dimension ordering.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, `c[B]` a constant (DataArray), and `s` a scalar.\n\n| Property | Example |\n|---|---|\n| **Commutativity of +** | `x + y == y + x` |\n| **Commutativity of ×** | `x * c == c * x` |\n| **Associativity of +** | `(x + y) + z == x + (y + z)` |\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` |\n| **Constant distributivity** | `c[B] * (x[A] + g[A,B]) == c[B]*x[A] + c[B]*g[A,B]` |\n| **Additive identity** | `x + 0 == x` |\n| **Multiplicative identity** | `x * 1 == x` |\n| **Negation** | `x - y == x + (-y)` |\n| **Double negation** | `-(-x) == x` |\n| **Zero** | `x * 0 == 0` |", "metadata": {} } ], diff --git a/linopy/common.py b/linopy/common.py index 3e5e40a2..a5ce7b42 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -45,31 +45,6 @@ from linopy.variables import Variable -def check_constant_dim_subset( - expr_dims: tuple[str, ...] | set[str], - constant_dims: tuple[str, ...] | set[str], -) -> None: - """ - Validate that a constant's dims are a subset of the expression's dims. - - A constant (DataArray, numpy, pandas) cannot introduce dimensions that - the expression does not have — that would silently duplicate variables. - - Raises - ------ - ValueError - If the constant has dimensions not present in the expression. - """ - extra = set(constant_dims) - set(expr_dims) - if extra: - raise ValueError( - f"Constant has dimensions {extra} not present in the " - f"expression. Arithmetic with constants cannot introduce new " - f"dimensions — use multiplication to expand, or select/reindex " - f"the constant to match the expression's dimensions." - ) - - def set_int_index(series: pd.Series) -> pd.Series: """ Convert string index to int index. diff --git a/linopy/expressions.py b/linopy/expressions.py index e05f5243..38e9c038 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -48,7 +48,6 @@ LocIndexer, as_dataarray, assign_multiindex_safe, - check_constant_dim_subset, check_has_nulls, check_has_nulls_polars, fill_missing_coords, @@ -95,7 +94,7 @@ from linopy.variables import ScalarVariable, Variable -FILL_VALUE = {"vars": -1, "coeffs": np.nan, "const": 0} +FILL_VALUE = {"vars": -1, "coeffs": np.nan, "const": np.nan} def exprwrap( @@ -589,13 +588,13 @@ def _add_constant( if np.isscalar(other) and join is None: return self.assign(const=self.const + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - check_constant_dim_subset(self.coord_dims, da.dims) self_const, da, needs_data_reindex = self._align_constant( da, fill_value=0, join=join, default_join="exact" ) if needs_data_reindex: + fv = {**self._fill_value, "const": 0} return self.__class__( - self.data.reindex_like(self_const, fill_value=self._fill_value).assign( + self.data.reindex_like(self_const, fill_value=fv).assign( const=self_const + da ), self.model, @@ -610,12 +609,12 @@ def _apply_constant_op( join: str | None = None, ) -> GenericExpression: factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) - check_constant_dim_subset(self.coord_dims, factor.dims) self_const, factor, needs_data_reindex = self._align_constant( factor, fill_value=fill_value, join=join, default_join="exact" ) if needs_data_reindex: - data = self.data.reindex_like(self_const, fill_value=self._fill_value) + fv = {**self._fill_value, "const": 0} + data = self.data.reindex_like(self_const, fill_value=fv) return self.__class__( assign_multiindex_safe( data, coeffs=op(data.coeffs, factor), const=op(self_const, factor) @@ -1495,13 +1494,13 @@ def _sum( def reindex( self, indexers: Mapping[Any, Any] | None = None, - fill_value: float = 0, + fill_value: float = np.nan, **indexers_kwargs: Any, ) -> Self: """ Reindex the expression. - ``fill_value`` sets the constant for missing coordinates. + ``fill_value`` sets the constant for missing coordinates (default NaN). Variable labels and coefficients always use sentinel values (vars=-1, coeffs=NaN). """ @@ -1513,13 +1512,13 @@ def reindex( def reindex_like( self, other: Any, - fill_value: float = 0, + fill_value: float = np.nan, **kwargs: Any, ) -> Self: """ Reindex like another object. - ``fill_value`` sets the constant for missing coordinates. + ``fill_value`` sets the constant for missing coordinates (default NaN). Variable labels and coefficients always use sentinel values. """ fv = {**self._fill_value, "const": fill_value} @@ -2095,7 +2094,7 @@ class QuadraticExpression(BaseExpression): __array_priority__ = 10000 __pandas_priority__ = 10000 - _fill_value = {"vars": -1, "coeffs": np.nan, "const": 0} + _fill_value = {"vars": -1, "coeffs": np.nan, "const": np.nan} def __init__(self, data: Dataset | None, model: Model) -> None: super().__init__(data, model) diff --git a/linopy/piecewise.py b/linopy/piecewise.py index 5128d1e5..c31204f6 100644 --- a/linopy/piecewise.py +++ b/linopy/piecewise.py @@ -560,6 +560,8 @@ def _add_pwl_incremental( if n_segments >= 2: delta_lo = delta_var.isel({seg_dim: slice(None, -1)}, drop=True) delta_hi = delta_var.isel({seg_dim: slice(1, None)}, drop=True) + # Align coords for positional comparison (lo=[0..n-2], hi=[1..n-1]) + delta_hi = delta_hi.assign_coords({seg_dim: delta_lo.coords[seg_dim].values}) fill_con = model.add_constraints(delta_hi <= delta_lo, name=fill_name) bp0 = breakpoints.isel({dim: 0}) diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py index 5d422044..09548bf3 100644 --- a/test/test_algebraic_properties.py +++ b/test/test_algebraic_properties.py @@ -132,22 +132,9 @@ def test_add_same_dims(self, x, y, z): """(x + y) + z == x + (y + z)""" assert_linequal((x + y) + z, x + (y + z)) - @pytest.mark.xfail( - reason="Rule 2: (x[A] + c[B]) raises because c introduces dim B", - strict=True, - ) def test_add_with_constant(self, x, g, c): """(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])""" - lhs = (x + c) + g - rhs = x + (c + g) - assert_linequal(lhs, rhs) - - def test_add_with_constant_right_grouping(self, x, g, c): - """x[A] + (c[B] + g[A,B]) works with right grouping.""" - result = x + (c + g) - assert isinstance(result, LinearExpression) - assert "time" in result.dims - assert "tech" in result.dims + assert_linequal((x + c) + g, x + (c + g)) # ============================================================ @@ -164,22 +151,9 @@ def test_constant_subset_dims(self, g, c): """c[B] * (g[A,B] + g[A,B]) == c*g + c*g""" assert_linequal(c * (g + g), c * g + c * g) - @pytest.mark.xfail( - reason="Rule 2: c[B]*x[A] raises because c introduces dim B", - strict=True, - ) def test_constant_mixed_dims(self, x, g, c): """c[B] * (x[A] + g[A,B]) == c*x + c*g""" - lhs = c * (x + g) - rhs = c * x + c * g - assert_linequal(lhs, rhs) - - def test_constant_mixed_dims_undistributed(self, x, g, c): - """c[B] * (x[A] + g[A,B]) works undistributed.""" - result = c * (x + g) - assert isinstance(result, LinearExpression) - assert "time" in result.dims - assert "tech" in result.dims + assert_linequal(c * (x + g), c * x + c * g) # ============================================================ diff --git a/test/test_common.py b/test/test_common.py index 267fbf76..4b84755a 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -674,22 +674,11 @@ def test_get_dims_with_index_levels() -> None: assert get_dims_with_index_levels(ds5) == [] -def test_align(x: Variable, u: Variable) -> None: # noqa: F811 +def test_align(x: Variable) -> None: # noqa: F811 alpha = xr.DataArray([1, 2], [[1, 2]]) - beta = xr.DataArray( - [1, 2, 3], - [ - ( - "dim_3", - pd.MultiIndex.from_tuples( - [(1, "b"), (2, "b"), (1, "c")], names=["level1", "level2"] - ), - ) - ], - ) # inner join - x_obs, alpha_obs = align(x, alpha) + x_obs, alpha_obs = align(x, alpha, join="inner") assert isinstance(x_obs, Variable) assert x_obs.shape == alpha_obs.shape == (1,) assert_varequal(x_obs, x.loc[[1]]) @@ -701,16 +690,9 @@ def test_align(x: Variable, u: Variable) -> None: # noqa: F811 assert_varequal(x_obs, x) assert_equal(alpha_obs, DataArray([np.nan, 1], [[0, 1]])) - # multiindex - beta_obs, u_obs = align(beta, u) - assert u_obs.shape == beta_obs.shape == (2,) - assert isinstance(u_obs, Variable) - assert_varequal(u_obs, u.loc[[(1, "b"), (2, "b")]]) - assert_equal(beta_obs, beta.loc[[(1, "b"), (2, "b")]]) - # with linear expression expr = 20 * x - x_obs, expr_obs, alpha_obs = align(x, expr, alpha) + x_obs, expr_obs, alpha_obs = align(x, expr, alpha, join="inner") assert x_obs.shape == alpha_obs.shape == (1,) assert expr_obs.shape == (1, 1) # _term dim assert isinstance(expr_obs, LinearExpression) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 2001aead..16c94050 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -220,8 +220,9 @@ def test_linear_expression_with_multiplication(x: Variable) -> None: expr = np.array(1) * x assert isinstance(expr, LinearExpression) - with pytest.raises(ValueError, match="not present"): - xr.DataArray(np.array([[1, 2], [2, 3]])) * x + # Constants with extra dims broadcast freely + expr = xr.DataArray(np.array([[1, 2], [2, 3]])) * x + assert isinstance(expr, LinearExpression) expr = pd.Series([1, 2], index=pd.RangeIndex(2, name="dim_0")) * x assert isinstance(expr, LinearExpression) @@ -315,8 +316,9 @@ def test_linear_expression_with_constant_multiplication( assert isinstance(obs, LinearExpression) assert (obs.const == 10).all() - with pytest.raises(ValueError, match="not present"): - expr * pd.Series([1, 2, 3], index=pd.RangeIndex(3, name="new_dim")) + # Constants with extra dims broadcast freely + obs = expr * pd.Series([1, 2, 3], index=pd.RangeIndex(3, name="new_dim")) + assert isinstance(obs, LinearExpression) def test_linear_expression_multi_indexed(u: Variable) -> None: @@ -918,16 +920,15 @@ def test_constraint_rhs_extra_dims_raises(self, v: Variable) -> None: with pytest.raises(ValueError, match="not present in the expression"): v <= rhs - def test_add_constant_extra_dims_raises(self, v: Variable) -> None: - da = xr.DataArray( - [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} - ) - with pytest.raises(ValueError, match="not present in the expression"): - v + da - with pytest.raises(ValueError, match="not present"): - v - da - with pytest.raises(ValueError, match="not present"): - v * da + def test_add_constant_extra_dims_broadcasts(self, v: Variable) -> None: + # Constant with only new dims (no shared dim overlap) broadcasts freely + da = xr.DataArray([1.0, 2.0, 3.0], dims=["extra"]) + result = v + da + assert "extra" in result.dims + result = v - da + assert "extra" in result.dims + result = v * da + assert "extra" in result.dims def test_da_truediv_var_raises(self, v: Variable) -> None: da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) @@ -1020,7 +1021,8 @@ def test_linear_expression_isnull(v: Variable) -> None: expr = np.arange(20) * v filter = (expr.coeffs >= 10).any(TERM_DIM) expr = expr.where(filter) - assert expr.isnull().sum() == 0 + # Entries where filter is False are null (coeffs=NaN, const=NaN) + assert expr.isnull().sum() == 10 # first 10 entries (coeff 0..9) are null def test_linear_expression_flat(v: Variable) -> None: @@ -1066,7 +1068,7 @@ def test_linear_expression_where_with_const(v: Variable) -> None: expr = expr.where(filter) assert isinstance(expr, LinearExpression) assert expr.nterm == 1 - assert (expr.const[:10] == 0).all() + assert expr.const[:10].isnull().all() assert (expr.const[10:] == 10).all() expr = np.arange(20) * v + 10 @@ -1150,7 +1152,8 @@ def test_linear_expression_fillna(v: Variable) -> None: filled = filtered.fillna(10) assert isinstance(filled, LinearExpression) - assert filled.const.sum() == 100 + # fillna replaces NaN const values (10 entries × 10) + kept values (10 × 10) + assert filled.const.sum() == 200 assert filled.coeffs.isnull().sum() == 10 diff --git a/test/test_typing.py b/test/test_typing.py index 5b9fd322..2375dc72 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -1,4 +1,3 @@ -import pytest import xarray as xr import linopy @@ -26,7 +25,7 @@ def test_operations_with_data_arrays_are_typed_correctly() -> None: _ = q + s -def test_constant_with_extra_dims_raises() -> None: +def test_constant_with_extra_dims_broadcasts() -> None: m = linopy.Model() a: xr.DataArray = xr.DataArray([1, 2, 3]) @@ -35,9 +34,13 @@ def test_constant_with_extra_dims_raises() -> None: e: linopy.LinearExpression = v * 1.0 q = v * v - with pytest.raises(ValueError, match="not present"): - a * v - with pytest.raises(ValueError, match="not present"): - a * e - with pytest.raises(ValueError, match="not present"): - a * q + # Constants can introduce new dimensions (broadcasting) + result_v = a * v + assert "dim_0" in result_v.dims + + result_e = a * e + assert "dim_0" in result_e.dims + + # QuadraticExpression also allows constant broadcasting + result_q = a * q + assert isinstance(result_q, linopy.expressions.QuadraticExpression) From d4df67435a03d41e96a7d31b9c40365e3e5de572 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 21:53:58 +0100 Subject: [PATCH 34/66] Remove constraint RHS dim check, allow broadcasting everywhere Constraint RHS can now introduce new dimensions, just like arithmetic. For ==, broadcasting to incompatible values results in solver infeasibility. For <=/>= it creates redundant but harmless constraints. Co-Authored-By: Claude Opus 4.6 --- examples/arithmetic-convention.ipynb | 18 ++++++++---------- linopy/expressions.py | 6 ------ test/test_constraints.py | 19 +++++-------------- test/test_linear_expression.py | 5 +++-- 4 files changed, 16 insertions(+), 32 deletions(-) diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index 35fbaf97..a00e024c 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "id": "c68183ce878b22db", "metadata": {}, - "source": "# Arithmetic Convention\n\nlinopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n\n**Rule 1 — Exact label matching on shared dimensions**\n\nWhen two operands share a dimension, their coordinate labels on that dimension must match exactly (`join=\"exact\"`). A `ValueError` is raised on mismatch.\n\n**Rule 2 — Constraint RHS cannot introduce new dimensions**\n\nWhen creating a constraint (`<=`, `>=`, `==`), the right-hand side constant cannot have dimensions not present in the left-hand side expression. This catches the common mistake of accidentally broadcasting constraints.\n\n**Broadcasting** — In arithmetic (`+`, `-`, `*`, `/`), constants *can* introduce new dimensions, just like xarray. This preserves all standard algebraic laws (commutativity, associativity, distributivity).\n\nInspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." + "source": "# Arithmetic Convention\n\nlinopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n\n**Rule — Exact label matching on shared dimensions**\n\nWhen two operands share a dimension, their coordinate labels on that dimension must match exactly (`join=\"exact\"`). A `ValueError` is raised on mismatch.\n\n**Broadcasting** — When dimensions are *not* shared, operands broadcast freely over the missing dimensions — for both expressions and constants. This preserves all standard algebraic laws (commutativity, associativity, distributivity).\n\nInspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." }, { "cell_type": "code", @@ -253,14 +253,6 @@ "outputs": [], "execution_count": null }, - { - "cell_type": "code", - "id": "5dc63c6gl7j", - "source": "# Constraint RHS with extra dimensions\nscenario_rhs = xr.DataArray(\n [[10, 20], [30, 40], [50, 60], [70, 80], [90, 100]],\n dims=[\"time\", \"scenario\"],\n coords={\"time\": time, \"scenario\": scenarios},\n)\ntry:\n x <= scenario_rhs # x has [time], RHS has [time, scenario] — extra dim\nexcept ValueError as e:\n print(\"ValueError:\", e)", - "metadata": {}, - "execution_count": null, - "outputs": [] - }, { "cell_type": "markdown", "id": "64a6f983ce55547e", @@ -432,13 +424,19 @@ "cell_type": "markdown", "id": "f0c3e862b0430c11", "metadata": {}, - "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n| Non-shared dims, expr + expr | ✓ Broadcasts | `gen[time,tech] + risk[tech,scenario]` |\n| Constant with subset dims | ✓ Broadcasts | `cost[tech] * gen[time,tech]` |\n| Constant introduces new dims | ✓ Broadcasts | `x[time] + profile[time,tech]` |\n| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Constraint RHS with extra dims | ✗ Raises | Restructure RHS or expand LHS first |\n| Pandas without named index | ✗ Raises on dim mismatch | Name the index |" + "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n| Non-shared dims, expr + expr | ✓ Broadcasts | `gen[time,tech] + risk[tech,scenario]` |\n| Constant with subset dims | ✓ Broadcasts | `cost[tech] * gen[time,tech]` |\n| Constant introduces new dims | ✓ Broadcasts | `x[time] + profile[time,tech]` |\n| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Pandas without named index | ✗ Raises on dim mismatch | Name the index |" }, { "cell_type": "markdown", "id": "d56kb3o89nb", "source": "## Algebraic Properties\n\nAll standard algebraic laws hold for linopy arithmetic. This means you can freely refactor expressions without worrying about dimension ordering.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, `c[B]` a constant (DataArray), and `s` a scalar.\n\n| Property | Example |\n|---|---|\n| **Commutativity of +** | `x + y == y + x` |\n| **Commutativity of ×** | `x * c == c * x` |\n| **Associativity of +** | `(x + y) + z == x + (y + z)` |\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` |\n| **Constant distributivity** | `c[B] * (x[A] + g[A,B]) == c[B]*x[A] + c[B]*g[A,B]` |\n| **Additive identity** | `x + 0 == x` |\n| **Multiplicative identity** | `x * 1 == x` |\n| **Negation** | `x - y == x + (-y)` |\n| **Double negation** | `-(-x) == x` |\n| **Zero** | `x * 0 == 0` |", "metadata": {} + }, + { + "cell_type": "markdown", + "id": "e7u7uhbm1dl", + "source": "## Broadcasting in constraints\n\nBroadcasting is allowed everywhere, including constraints. This can lead to two situations worth being aware of:\n\n| Constraint type | Example | What happens | Feedback |\n|---|---|---|---|\n| `<=` / `>=` | `x[time] <= rhs[time, scenario]` | Creates one constraint per (time, scenario). Only the tightest bound is active — the rest are redundant. | No issue — solver ignores slack constraints. |\n| `==` | `x[time] == rhs[time, scenario]` | Creates one equality per (time, scenario). If `rhs` differs across `scenario`, the variable must simultaneously equal multiple values. | Solver reports **infeasible** — clear feedback. |\n\nlinopy does **not** raise an error in these cases because:\n- Redundant inequality constraints are harmless (just slightly wasteful).\n- Infeasible equality constraints are caught by the solver with a clear diagnostic.\n- Blocking these would break algebraic equivalences — e.g., `x <= rhs` must behave the same as `x - rhs <= 0`, which involves arithmetic broadcasting.", + "metadata": {} } ], "metadata": { diff --git a/linopy/expressions.py b/linopy/expressions.py index 38e9c038..bd339b30 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -1094,12 +1094,6 @@ def to_constraint( ) if isinstance(rhs, DataArray): - extra_dims = set(rhs.dims) - set(self.coord_dims) - if extra_dims: - raise ValueError( - f"RHS DataArray has dimensions {extra_dims} not present " - f"in the expression. Cannot create constraint." - ) effective_join = join if join is not None else "exact" if effective_join == "override": aligned_rhs = rhs.assign_coords(coords=self.const.coords) diff --git a/test/test_constraints.py b/test/test_constraints.py index e5da08d4..b20b18cf 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -166,23 +166,14 @@ def test_constraint_rhs_lower_dim(rhs_factory) -> None: assert c.shape == (10, 10) -@pytest.mark.parametrize( - "rhs_factory", - [ - pytest.param(lambda m: np.ones((5, 3)), id="numpy"), - pytest.param( - lambda m: xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]), - id="dataarray", - ), - pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), - ], -) -def test_constraint_rhs_higher_dim_constant_raises(rhs_factory) -> None: +def test_constraint_rhs_higher_dim_constant_broadcasts() -> None: m = Model() x = m.add_variables(coords=[range(5)], name="x") - with pytest.raises(ValueError, match="dimensions"): - m.add_constraints(x >= rhs_factory(m)) + # DataArray RHS with extra dims broadcasts (creates redundant constraints) + rhs = xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]) + c = m.add_constraints(x >= rhs, name="broadcast_con") + assert "extra" in c.dims @pytest.mark.parametrize( diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 16c94050..ed808e78 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -913,11 +913,12 @@ def test_multidim_subset_add(self, m: Model) -> None: # --- Edge cases --- - def test_constraint_rhs_extra_dims_raises(self, v: Variable) -> None: + def test_constraint_rhs_mismatched_coords_raises(self, v: Variable) -> None: rhs = xr.DataArray( [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} ) - with pytest.raises(ValueError, match="not present in the expression"): + # Raises because dim_2 coords [0,1] don't match v's [0..19] (exact join) + with pytest.raises(ValueError, match="exact"): v <= rhs def test_add_constant_extra_dims_broadcasts(self, v: Variable) -> None: From 10ecaf63f57c543d43d076793d04081166dd307c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 22:14:59 +0100 Subject: [PATCH 35/66] Document constant preparation pitfalls and xr.set_options Pure xarray/pandas/numpy operations before entering linopy use their own alignment rules. Document the risks and the xarray exact join workaround. Co-Authored-By: Claude Opus 4.6 --- examples/arithmetic-convention.ipynb | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index a00e024c..cff41c02 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -437,6 +437,12 @@ "id": "e7u7uhbm1dl", "source": "## Broadcasting in constraints\n\nBroadcasting is allowed everywhere, including constraints. This can lead to two situations worth being aware of:\n\n| Constraint type | Example | What happens | Feedback |\n|---|---|---|---|\n| `<=` / `>=` | `x[time] <= rhs[time, scenario]` | Creates one constraint per (time, scenario). Only the tightest bound is active — the rest are redundant. | No issue — solver ignores slack constraints. |\n| `==` | `x[time] == rhs[time, scenario]` | Creates one equality per (time, scenario). If `rhs` differs across `scenario`, the variable must simultaneously equal multiple values. | Solver reports **infeasible** — clear feedback. |\n\nlinopy does **not** raise an error in these cases because:\n- Redundant inequality constraints are harmless (just slightly wasteful).\n- Infeasible equality constraints are caught by the solver with a clear diagnostic.\n- Blocking these would break algebraic equivalences — e.g., `x <= rhs` must behave the same as `x - rhs <= 0`, which involves arithmetic broadcasting.", "metadata": {} + }, + { + "cell_type": "markdown", + "id": "bpoepi5bcn8", + "source": "## Preparing constants\n\nlinopy enforces exact matching for all operations involving linopy objects. However, operations between plain constants (DataArrays, pandas, numpy) **before** they enter linopy use their own alignment rules, which can silently produce wrong results:\n\n| Library | Default alignment | Risk |\n|---|---|---|\n| **xarray** | Inner join — drops mismatched coords | Silent data loss |\n| **pandas** | Outer join — fills with NaN | Silent NaN propagation |\n| **numpy** | Positional — no coord checks | Wrong results if shapes match by accident |\n\nTo protect xarray operations, set the global arithmetic join to `\"exact\"`:\n\n```python\nxr.set_options(arithmetic_join=\"exact\")\n```\n\nFor pandas and numpy, there is no equivalent setting — prepare constants carefully and convert to `xr.DataArray` with explicit coords early.", + "metadata": {} } ], "metadata": { From 7471b730054c4a77abc1f98ee9da1e2efcf24769 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 22:17:07 +0100 Subject: [PATCH 36/66] Document algebraic property limitations for constant preparation Co-Authored-By: Claude Opus 4.6 --- examples/arithmetic-convention.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index cff41c02..f63c7e4c 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -429,7 +429,7 @@ { "cell_type": "markdown", "id": "d56kb3o89nb", - "source": "## Algebraic Properties\n\nAll standard algebraic laws hold for linopy arithmetic. This means you can freely refactor expressions without worrying about dimension ordering.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, `c[B]` a constant (DataArray), and `s` a scalar.\n\n| Property | Example |\n|---|---|\n| **Commutativity of +** | `x + y == y + x` |\n| **Commutativity of ×** | `x * c == c * x` |\n| **Associativity of +** | `(x + y) + z == x + (y + z)` |\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` |\n| **Constant distributivity** | `c[B] * (x[A] + g[A,B]) == c[B]*x[A] + c[B]*g[A,B]` |\n| **Additive identity** | `x + 0 == x` |\n| **Multiplicative identity** | `x * 1 == x` |\n| **Negation** | `x - y == x + (-y)` |\n| **Double negation** | `-(-x) == x` |\n| **Zero** | `x * 0 == 0` |", + "source": "## Algebraic Properties\n\nAll standard algebraic laws hold for linopy arithmetic. This means you can freely refactor expressions without worrying about dimension ordering.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, `c[B]` a constant (DataArray), and `s` a scalar.\n\n| Property | Example |\n|---|---|\n| **Commutativity of +** | `x + y == y + x` |\n| **Commutativity of ×** | `x * c == c * x` |\n| **Associativity of +** | `(x + y) + z == x + (y + z)` |\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` |\n| **Constant distributivity** | `c[B] * (x[A] + g[A,B]) == c[B]*x[A] + c[B]*g[A,B]` |\n| **Additive identity** | `x + 0 == x` |\n| **Multiplicative identity** | `x * 1 == x` |\n| **Negation** | `x - y == x + (-y)` |\n| **Double negation** | `-(-x) == x` |\n| **Zero** | `x * 0 == 0` |\n\n### Limitation: constant preparation\n\nThese guarantees only hold for operations that involve at least one linopy object. Operations between plain constants (`DataArray + DataArray`, `Series + Series`) happen **outside** linopy and use their library's own alignment rules — see the \"Preparing constants\" section above. To maintain algebraic consistency end-to-end, convert constants to `xr.DataArray` with explicit coordinates early and consider setting `xr.set_options(arithmetic_join=\"exact\")`.", "metadata": {} }, { From 086843caf1179707105cf04e86cabbce6ceeb13e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 9 Mar 2026 22:23:13 +0100 Subject: [PATCH 37/66] Expand positional alignment section in notebook Document assign_coords (recommended) and join="override" for handling operands with mismatched coordinate labels. Co-Authored-By: Claude Opus 4.6 --- examples/arithmetic-convention.ipynb | 73 ++++++++++++++-------------- 1 file changed, 36 insertions(+), 37 deletions(-) diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index f63c7e4c..97e93167 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "id": "c68183ce878b22db", "metadata": {}, - "source": "# Arithmetic Convention\n\nlinopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n\n**Rule — Exact label matching on shared dimensions**\n\nWhen two operands share a dimension, their coordinate labels on that dimension must match exactly (`join=\"exact\"`). A `ValueError` is raised on mismatch.\n\n**Broadcasting** — When dimensions are *not* shared, operands broadcast freely over the missing dimensions — for both expressions and constants. This preserves all standard algebraic laws (commutativity, associativity, distributivity).\n\nInspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." + "source": "# Arithmetic Convention\n\nlinopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n\n**Rule \u2014 Exact label matching on shared dimensions**\n\nWhen two operands share a dimension, their coordinate labels on that dimension must match exactly (`join=\"exact\"`). A `ValueError` is raised on mismatch.\n\n**Broadcasting** \u2014 When dimensions are *not* shared, operands broadcast freely over the missing dimensions \u2014 for both expressions and constants. This preserves all standard algebraic laws (commutativity, associativity, distributivity).\n\nInspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." }, { "cell_type": "code", @@ -75,7 +75,7 @@ } }, "source": [ - "# Same coords — just works\n", + "# Same coords \u2014 just works\n", "x + y" ], "outputs": [], @@ -108,7 +108,7 @@ } }, "source": [ - "# Constant with fewer dims — broadcasts freely\n", + "# Constant with fewer dims \u2014 broadcasts freely\n", "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", "gen * cost # cost broadcasts over time" ], @@ -125,8 +125,8 @@ } }, "source": [ - "# Expression + Expression with non-shared dims — broadcasts freely\n", - "gen + risk # (time, tech) + (tech, scenario) → (time, tech, scenario)" + "# Expression + Expression with non-shared dims \u2014 broadcasts freely\n", + "gen + risk # (time, tech) + (tech, scenario) \u2192 (time, tech, scenario)" ], "outputs": [], "execution_count": null @@ -141,7 +141,7 @@ } }, "source": [ - "# Scalar — always fine\n", + "# Scalar \u2014 always fine\n", "x + 5" ], "outputs": [], @@ -157,7 +157,7 @@ } }, "source": [ - "# Constraints — RHS with fewer dims broadcasts naturally\n", + "# Constraints \u2014 RHS with fewer dims broadcasts naturally\n", "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", "m.add_constraints(gen <= capacity, name=\"cap\") # capacity broadcasts over time" ], @@ -202,7 +202,7 @@ "start_time": "2026-03-09T19:45:37.658665Z" } }, - "source": "# Constant introduces new dimensions — broadcasts in arithmetic\nprofile = xr.DataArray(\n np.ones((3, 5)), dims=[\"tech\", \"time\"], coords={\"tech\": techs, \"time\": time}\n)\nx + profile # x[time] broadcasts over tech", + "source": "# Constant introduces new dimensions \u2014 broadcasts in arithmetic\nprofile = xr.DataArray(\n np.ones((3, 5)), dims=[\"tech\", \"time\"], coords={\"tech\": techs, \"time\": time}\n)\nx + profile # x[time] broadcasts over tech", "outputs": [], "execution_count": null }, @@ -268,9 +268,9 @@ "id": "709150bc01fc8c3", "metadata": {}, "source": [ - "### 1. `.sel()` — Subset before operating\n", + "### 1. `.sel()` \u2014 Subset before operating\n", "\n", - "The cleanest way to restrict to matching coordinates. No need for an inner join — explicitly select what you want." + "The cleanest way to restrict to matching coordinates. No need for an inner join \u2014 explicitly select what you want." ] }, { @@ -292,7 +292,7 @@ "cell_type": "markdown", "id": "f12b0cb6d0e31651", "metadata": {}, - "source": "### 2. Named methods with `join=`\n\nAll arithmetic operations have named-method equivalents that accept a `join` parameter:\n\n| `join` | Coordinates kept | Fill |\n|--------|-----------------|------|\n| `\"exact\"` | Must match | `ValueError` if different |\n| `\"inner\"` | Intersection | — |\n| `\"outer\"` | Union | Zero (arithmetic) / NaN (constraints) |\n| `\"left\"` | Left operand's | Zero / NaN for missing right |\n| `\"right\"` | Right operand's | Zero for missing left |\n| `\"override\"` | Left operand's | Positional alignment |" + "source": "### 2. Named methods with `join=`\n\nAll arithmetic operations have named-method equivalents that accept a `join` parameter:\n\n| `join` | Coordinates kept | Fill |\n|--------|-----------------|------|\n| `\"exact\"` | Must match | `ValueError` if different |\n| `\"inner\"` | Intersection | \u2014 |\n| `\"outer\"` | Union | Zero (arithmetic) / NaN (constraints) |\n| `\"left\"` | Left operand's | Zero / NaN for missing right |\n| `\"right\"` | Right operand's | Zero for missing left |\n| `\"override\"` | Left operand's | Positional alignment |" }, { "cell_type": "code", @@ -324,7 +324,7 @@ "cell_type": "markdown", "id": "424610ceccde798a", "metadata": {}, - "source": "### 3. `linopy.align()` — Explicit pre-alignment\n\nFor complex multi-operand alignment. Linopy types automatically use correct sentinel fill values (labels/vars=-1, coeffs=NaN) while `fill_value` applies to `const`:" + "source": "### 3. `linopy.align()` \u2014 Explicit pre-alignment\n\nFor complex multi-operand alignment. Linopy types automatically use correct sentinel fill values (labels/vars=-1, coeffs=NaN) while `fill_value` applies to `const`:" }, { "cell_type": "code", @@ -359,11 +359,7 @@ "cell_type": "markdown", "id": "e64caf260c82ea6d", "metadata": {}, - "source": [ - "## Positional alignment\n", - "\n", - "When two arrays have the same shape but different coordinate labels, use `.assign_coords()` to relabel one operand so coordinates match explicitly:" - ] + "source": "## Positional alignment\n\nSometimes two operands have the same shape but different coordinate labels \u2014 e.g., data from different sources, or time series with different start dates. The exact join will raise. There are several ways to handle this:\n\n### Option 1: `.assign_coords()` (recommended)\n\nExplicitly relabel one operand to match the other. This is the clearest \u2014 the reader sees exactly which mapping is intended." }, { "cell_type": "code", @@ -374,18 +370,12 @@ "start_time": "2026-03-09T19:45:37.776535Z" } }, - "source": [ - "c = m2.add_variables(coords=[[\"x\", \"y\", \"z\"]], name=\"c\")\n", - "d = m2.add_variables(coords=[[\"p\", \"q\", \"r\"]], name=\"d\")\n", - "\n", - "# Relabel d's coordinates to match c, then add\n", - "c + d.assign_coords(dim_0=c.coords[\"dim_0\"])" - ], + "source": "c = m2.add_variables(coords=[[\"x\", \"y\", \"z\"]], name=\"c\")\nd = m2.add_variables(coords=[[\"p\", \"q\", \"r\"]], name=\"d\")\n\n# Relabel d's coordinates to match c\nc + d.assign_coords(dim_0=c.coords[\"dim_0\"])", "outputs": [], "execution_count": null }, { - "cell_type": "code", + "cell_type": "markdown", "id": "262eaf85fa44e152", "metadata": { "ExecuteTime": { @@ -393,12 +383,21 @@ "start_time": "2026-03-09T19:45:37.795935Z" } }, - "source": [ - "# Or use join=\"override\" for positional matching\n", - "c.add(d, join=\"override\")" - ], - "outputs": [], - "execution_count": null + "source": "### Option 2: `join=\"override\"`\n\nUses the left operand's coordinates positionally. Shorter, but less explicit about the mapping. Requires same size on the shared dimension." + }, + { + "cell_type": "code", + "id": "8lk83w4yydw", + "source": "c.add(d, join=\"override\")", + "metadata": {}, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "id": "ysdlzpp192", + "source": "**Prefer `.assign_coords()`** \u2014 it makes the intent explicit and keeps coordinate metadata intact. Use `join=\"override\"` as a shorthand when the positional mapping is obvious.", + "metadata": {} }, { "cell_type": "markdown", @@ -410,10 +409,10 @@ "Under the strict convention, pandas objects must have **named indices** to avoid dimension name mismatches. A `pd.Series` without a named index becomes `dim_0` and will fail the exact join against a named variable dimension.\n", "\n", "```python\n", - "# Bad — index name is None, becomes \"dim_0\"\n", + "# Bad \u2014 index name is None, becomes \"dim_0\"\n", "cost = pd.Series([10, 20], index=[\"wind\", \"solar\"])\n", "\n", - "# Good — explicit dimension name\n", + "# Good \u2014 explicit dimension name\n", "cost = pd.Series([10, 20], index=pd.Index([\"wind\", \"solar\"], name=\"tech\"))\n", "```\n", "\n", @@ -424,24 +423,24 @@ "cell_type": "markdown", "id": "f0c3e862b0430c11", "metadata": {}, - "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | ✓ Proceeds | `x + y` |\n| Non-shared dims, expr + expr | ✓ Broadcasts | `gen[time,tech] + risk[tech,scenario]` |\n| Constant with subset dims | ✓ Broadcasts | `cost[tech] * gen[time,tech]` |\n| Constant introduces new dims | ✓ Broadcasts | `x[time] + profile[time,tech]` |\n| Shared dims, mismatching coords | ✗ Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Pandas without named index | ✗ Raises on dim mismatch | Name the index |" + "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | \u2713 Proceeds | `x + y` |\n| Non-shared dims, expr + expr | \u2713 Broadcasts | `gen[time,tech] + risk[tech,scenario]` |\n| Constant with subset dims | \u2713 Broadcasts | `cost[tech] * gen[time,tech]` |\n| Constant introduces new dims | \u2713 Broadcasts | `x[time] + profile[time,tech]` |\n| Shared dims, mismatching coords | \u2717 Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Pandas without named index | \u2717 Raises on dim mismatch | Name the index |" }, { "cell_type": "markdown", "id": "d56kb3o89nb", - "source": "## Algebraic Properties\n\nAll standard algebraic laws hold for linopy arithmetic. This means you can freely refactor expressions without worrying about dimension ordering.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, `c[B]` a constant (DataArray), and `s` a scalar.\n\n| Property | Example |\n|---|---|\n| **Commutativity of +** | `x + y == y + x` |\n| **Commutativity of ×** | `x * c == c * x` |\n| **Associativity of +** | `(x + y) + z == x + (y + z)` |\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` |\n| **Constant distributivity** | `c[B] * (x[A] + g[A,B]) == c[B]*x[A] + c[B]*g[A,B]` |\n| **Additive identity** | `x + 0 == x` |\n| **Multiplicative identity** | `x * 1 == x` |\n| **Negation** | `x - y == x + (-y)` |\n| **Double negation** | `-(-x) == x` |\n| **Zero** | `x * 0 == 0` |\n\n### Limitation: constant preparation\n\nThese guarantees only hold for operations that involve at least one linopy object. Operations between plain constants (`DataArray + DataArray`, `Series + Series`) happen **outside** linopy and use their library's own alignment rules — see the \"Preparing constants\" section above. To maintain algebraic consistency end-to-end, convert constants to `xr.DataArray` with explicit coordinates early and consider setting `xr.set_options(arithmetic_join=\"exact\")`.", + "source": "## Algebraic Properties\n\nAll standard algebraic laws hold for linopy arithmetic. This means you can freely refactor expressions without worrying about dimension ordering.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, `c[B]` a constant (DataArray), and `s` a scalar.\n\n| Property | Example |\n|---|---|\n| **Commutativity of +** | `x + y == y + x` |\n| **Commutativity of \u00d7** | `x * c == c * x` |\n| **Associativity of +** | `(x + y) + z == x + (y + z)` |\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` |\n| **Constant distributivity** | `c[B] * (x[A] + g[A,B]) == c[B]*x[A] + c[B]*g[A,B]` |\n| **Additive identity** | `x + 0 == x` |\n| **Multiplicative identity** | `x * 1 == x` |\n| **Negation** | `x - y == x + (-y)` |\n| **Double negation** | `-(-x) == x` |\n| **Zero** | `x * 0 == 0` |\n\n### Limitation: constant preparation\n\nThese guarantees only hold for operations that involve at least one linopy object. Operations between plain constants (`DataArray + DataArray`, `Series + Series`) happen **outside** linopy and use their library's own alignment rules \u2014 see the \"Preparing constants\" section above. To maintain algebraic consistency end-to-end, convert constants to `xr.DataArray` with explicit coordinates early and consider setting `xr.set_options(arithmetic_join=\"exact\")`.", "metadata": {} }, { "cell_type": "markdown", "id": "e7u7uhbm1dl", - "source": "## Broadcasting in constraints\n\nBroadcasting is allowed everywhere, including constraints. This can lead to two situations worth being aware of:\n\n| Constraint type | Example | What happens | Feedback |\n|---|---|---|---|\n| `<=` / `>=` | `x[time] <= rhs[time, scenario]` | Creates one constraint per (time, scenario). Only the tightest bound is active — the rest are redundant. | No issue — solver ignores slack constraints. |\n| `==` | `x[time] == rhs[time, scenario]` | Creates one equality per (time, scenario). If `rhs` differs across `scenario`, the variable must simultaneously equal multiple values. | Solver reports **infeasible** — clear feedback. |\n\nlinopy does **not** raise an error in these cases because:\n- Redundant inequality constraints are harmless (just slightly wasteful).\n- Infeasible equality constraints are caught by the solver with a clear diagnostic.\n- Blocking these would break algebraic equivalences — e.g., `x <= rhs` must behave the same as `x - rhs <= 0`, which involves arithmetic broadcasting.", + "source": "## Broadcasting in constraints\n\nBroadcasting is allowed everywhere, including constraints. This can lead to two situations worth being aware of:\n\n| Constraint type | Example | What happens | Feedback |\n|---|---|---|---|\n| `<=` / `>=` | `x[time] <= rhs[time, scenario]` | Creates one constraint per (time, scenario). Only the tightest bound is active \u2014 the rest are redundant. | No issue \u2014 solver ignores slack constraints. |\n| `==` | `x[time] == rhs[time, scenario]` | Creates one equality per (time, scenario). If `rhs` differs across `scenario`, the variable must simultaneously equal multiple values. | Solver reports **infeasible** \u2014 clear feedback. |\n\nlinopy does **not** raise an error in these cases because:\n- Redundant inequality constraints are harmless (just slightly wasteful).\n- Infeasible equality constraints are caught by the solver with a clear diagnostic.\n- Blocking these would break algebraic equivalences \u2014 e.g., `x <= rhs` must behave the same as `x - rhs <= 0`, which involves arithmetic broadcasting.", "metadata": {} }, { "cell_type": "markdown", "id": "bpoepi5bcn8", - "source": "## Preparing constants\n\nlinopy enforces exact matching for all operations involving linopy objects. However, operations between plain constants (DataArrays, pandas, numpy) **before** they enter linopy use their own alignment rules, which can silently produce wrong results:\n\n| Library | Default alignment | Risk |\n|---|---|---|\n| **xarray** | Inner join — drops mismatched coords | Silent data loss |\n| **pandas** | Outer join — fills with NaN | Silent NaN propagation |\n| **numpy** | Positional — no coord checks | Wrong results if shapes match by accident |\n\nTo protect xarray operations, set the global arithmetic join to `\"exact\"`:\n\n```python\nxr.set_options(arithmetic_join=\"exact\")\n```\n\nFor pandas and numpy, there is no equivalent setting — prepare constants carefully and convert to `xr.DataArray` with explicit coords early.", + "source": "## Preparing constants\n\nlinopy enforces exact matching for all operations involving linopy objects. However, operations between plain constants (DataArrays, pandas, numpy) **before** they enter linopy use their own alignment rules, which can silently produce wrong results:\n\n| Library | Default alignment | Risk |\n|---|---|---|\n| **xarray** | Inner join \u2014 drops mismatched coords | Silent data loss |\n| **pandas** | Outer join \u2014 fills with NaN | Silent NaN propagation |\n| **numpy** | Positional \u2014 no coord checks | Wrong results if shapes match by accident |\n\nTo protect xarray operations, set the global arithmetic join to `\"exact\"`:\n\n```python\nxr.set_options(arithmetic_join=\"exact\")\n```\n\nFor pandas and numpy, there is no equivalent setting \u2014 prepare constants carefully and convert to `xr.DataArray` with explicit coords early.", "metadata": {} } ], From e392fa3b34b28f6a14b3c10e5579dc39751502af Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 07:36:45 +0100 Subject: [PATCH 38/66] Address PR review: remove dead code and fix nits - Remove dead check_common_keys_values function from common.py - Remove as_dataarray from public API exports - Remove redundant aligned_rhs = aligned_rhs in to_constraint - Keep Variable.__mul__ delegating to linexpr (fast path bypasses exact join) - Keep Variable.__div__ delegating to linexpr (error message preserved there) Co-Authored-By: Claude Opus 4.6 --- linopy/__init__.py | 3 +-- linopy/common.py | 18 ------------------ linopy/expressions.py | 1 - 3 files changed, 1 insertion(+), 21 deletions(-) diff --git a/linopy/__init__.py b/linopy/__init__.py index 43cff2c2..7f5acd46 100644 --- a/linopy/__init__.py +++ b/linopy/__init__.py @@ -12,7 +12,7 @@ # Note: For intercepting multiplications between xarray dataarrays, Variables and Expressions # we need to extend their __mul__ functions with a quick special case import linopy.monkey_patch_xarray # noqa: F401 -from linopy.common import align, as_dataarray +from linopy.common import align from linopy.config import options from linopy.constants import EQUAL, GREATER_EQUAL, LESS_EQUAL from linopy.constraints import Constraint, Constraints @@ -38,7 +38,6 @@ "Variable", "Variables", "available_solvers", - "as_dataarray", "breakpoints", "align", "merge", diff --git a/linopy/common.py b/linopy/common.py index a5ce7b42..21f851df 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -1205,24 +1205,6 @@ def deco(cls: Any) -> Any: return deco -def check_common_keys_values(list_of_dicts: list[dict[str, Any]]) -> bool: - """ - Check if all common keys among a list of dictionaries have the same value. - - Parameters - ---------- - list_of_dicts : list of dict - A list of dictionaries. - - Returns - ------- - bool - True if all common keys have the same value across all dictionaries, False otherwise. - """ - common_keys = set.intersection(*(set(d.keys()) for d in list_of_dicts)) - return all(len({d[k] for d in list_of_dicts if k in d}) == 1 for k in common_keys) - - def align( *objects: LinearExpression | QuadraticExpression | Variable | T_Alignable, join: JoinOptions = "exact", diff --git a/linopy/expressions.py b/linopy/expressions.py index bd339b30..32bf781b 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -1122,7 +1122,6 @@ def to_constraint( expr_data = self.data.reindex_like( expr_const_aligned, fill_value=self._fill_value ) - aligned_rhs = aligned_rhs constraint_rhs = aligned_rhs - expr_const data = assign_multiindex_safe( expr_data[["coeffs", "vars"]], sign=sign, rhs=constraint_rhs From 12dc34f9cdd88c6591d76b9b45cd000ca8c9c58c Mon Sep 17 00:00:00 2001 From: Fabian Date: Tue, 10 Mar 2026 09:37:44 +0100 Subject: [PATCH 39/66] Fill NaN with neutral elements in expression arithmetic, preserve NaN as 'no constraint' in RHS - Fill NaN with 0 (add/sub) or fill_value (mul/div) in _add_constant/_apply_constant_op - Fill NaN coefficients with 0 in Variable.to_linexpr - Restore NaN mask in to_constraint() so subset RHS still signals unconstrained positions --- linopy/expressions.py | 38 +++++++++- linopy/variables.py | 1 + test/test_linear_expression.py | 129 ++++++++++++++++++++++++--------- 3 files changed, 129 insertions(+), 39 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index e29a39b9..d2ae9022 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -593,12 +593,16 @@ def _align_constant( def _add_constant( self: GenericExpression, other: ConstantLike, join: str | None = None ) -> GenericExpression: + # NaN values in self.const or other are filled with 0 (additive identity) + # so that missing data does not silently propagate through arithmetic. if np.isscalar(other) and join is None: - return self.assign(const=self.const + other) + return self.assign(const=self.const.fillna(0) + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, da, needs_data_reindex = self._align_constant( da, fill_value=0, join=join ) + da = da.fillna(0) + self_const = self_const.fillna(0) if needs_data_reindex: return self.__class__( self.data.reindex_like(self_const, fill_value=self._fill_value).assign( @@ -615,19 +619,30 @@ def _apply_constant_op( fill_value: float, join: str | None = None, ) -> GenericExpression: + """ + Apply a constant operation (mul, div, etc.) to this expression with a scalar or array. + + NaN values are filled with neutral elements before the operation: + - factor (other) is filled with fill_value (0 for mul, 1 for div) + - coeffs and const are filled with 0 (additive identity) + """ factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, factor, needs_data_reindex = self._align_constant( factor, fill_value=fill_value, join=join ) + factor = factor.fillna(fill_value) + self_const = self_const.fillna(0) if needs_data_reindex: data = self.data.reindex_like(self_const, fill_value=self._fill_value) + coeffs = data.coeffs.fillna(0) return self.__class__( assign_multiindex_safe( - data, coeffs=op(data.coeffs, factor), const=op(self_const, factor) + data, coeffs=op(coeffs, factor), const=op(self_const, factor) ), self.model, ) - return self.assign(coeffs=op(self.coeffs, factor), const=op(self_const, factor)) + coeffs = self.coeffs.fillna(0) + return self.assign(coeffs=op(coeffs, factor), const=op(self_const, factor)) def _multiply_by_constant( self: GenericExpression, other: ConstantLike, join: str | None = None @@ -1138,9 +1153,24 @@ def to_constraint( ) rhs = rhs.reindex_like(self.const, fill_value=np.nan) + # Remember where RHS is NaN (meaning "no constraint") before the + # subtraction, which may fill NaN with 0 as part of normal + # expression arithmetic. + if isinstance(rhs, DataArray): + rhs_nan_mask = rhs.isnull() + else: + rhs_nan_mask = None + all_to_lhs = self.sub(rhs, join=join).data + computed_rhs = -all_to_lhs.const + + # Restore NaN at positions where the original constant RHS had no + # value so that downstream code still treats them as unconstrained. + if rhs_nan_mask is not None and rhs_nan_mask.any(): + computed_rhs = xr.where(rhs_nan_mask, np.nan, computed_rhs) + data = assign_multiindex_safe( - all_to_lhs[["coeffs", "vars"]], sign=sign, rhs=-all_to_lhs.const + all_to_lhs[["coeffs", "vars"]], sign=sign, rhs=computed_rhs ) return constraints.Constraint(data, model=self.model) diff --git a/linopy/variables.py b/linopy/variables.py index 06df8cbc..f99fb938 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -317,6 +317,7 @@ def to_linexpr( """ coefficient = as_dataarray(coefficient, coords=self.coords, dims=self.dims) coefficient = coefficient.reindex_like(self.labels, fill_value=0) + coefficient = coefficient.fillna(0) ds = Dataset({"coeffs": coefficient, "vars": self.labels}).expand_dims( TERM_DIM, -1 ) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 2bb1d359..d3b8d426 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -806,41 +806,51 @@ def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: assert_quadequal(subset + qexpr, qexpr + subset) class TestMissingValues: - """Same shape as variable but with NaN entries in the constant.""" + """ + Same shape as variable but with NaN entries in the constant. - EXPECTED_NAN_MASK = np.zeros(20, dtype=bool) - EXPECTED_NAN_MASK[[0, 5, 19]] = True + NaN values are filled with operation-specific neutral elements: + - Addition/subtraction: NaN -> 0 (additive identity) + - Multiplication: NaN -> 0 (zeroes out the variable) + - Division: NaN -> 1 (multiplicative identity, no scaling) + """ + + NAN_POSITIONS = [0, 5, 19] @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_nan_propagates( + def test_add_nan_filled( self, v: Variable, nan_constant: xr.DataArray | pd.Series, operand: str, ) -> None: + base_const = 0.0 if operand == "var" else 5.0 target = v if operand == "var" else v + 5 result = target + nan_constant assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.const.values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.const.values).any() + # At NaN positions, const should be unchanged (added 0) + for i in self.NAN_POSITIONS: + assert result.const.values[i] == base_const @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_nan_propagates( + def test_sub_nan_filled( self, v: Variable, nan_constant: xr.DataArray | pd.Series, operand: str, ) -> None: + base_const = 0.0 if operand == "var" else 5.0 target = v if operand == "var" else v + 5 result = target - nan_constant assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.const.values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.const.values).any() + # At NaN positions, const should be unchanged (subtracted 0) + for i in self.NAN_POSITIONS: + assert result.const.values[i] == base_const @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_nan_propagates( + def test_mul_nan_filled( self, v: Variable, nan_constant: xr.DataArray | pd.Series, @@ -849,12 +859,13 @@ def test_mul_nan_propagates( target = v if operand == "var" else 1 * v result = target * nan_constant assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.coeffs.squeeze().values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.coeffs.squeeze().values).any() + # At NaN positions, coeffs should be 0 (variable zeroed out) + for i in self.NAN_POSITIONS: + assert result.coeffs.squeeze().values[i] == 0.0 @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_nan_propagates( + def test_div_nan_filled( self, v: Variable, nan_constant: xr.DataArray | pd.Series, @@ -863,9 +874,11 @@ def test_div_nan_propagates( target = v if operand == "var" else 1 * v result = target / nan_constant assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.coeffs.squeeze().values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.coeffs.squeeze().values).any() + # At NaN positions, coeffs should be unchanged (divided by 1) + original_coeffs = (1 * v).coeffs.squeeze().values + for i in self.NAN_POSITIONS: + assert result.coeffs.squeeze().values[i] == original_coeffs[i] def test_add_commutativity( self, @@ -874,14 +887,9 @@ def test_add_commutativity( ) -> None: result_a = v + nan_constant result_b = nan_constant + v - # Compare non-NaN values are equal and NaN positions match - nan_mask_a = np.isnan(result_a.const.values) - nan_mask_b = np.isnan(result_b.const.values) - np.testing.assert_array_equal(nan_mask_a, nan_mask_b) - np.testing.assert_array_equal( - result_a.const.values[~nan_mask_a], - result_b.const.values[~nan_mask_b], - ) + assert not np.isnan(result_a.const.values).any() + assert not np.isnan(result_b.const.values).any() + np.testing.assert_array_equal(result_a.const.values, result_b.const.values) np.testing.assert_array_equal( result_a.coeffs.values, result_b.coeffs.values ) @@ -893,12 +901,10 @@ def test_mul_commutativity( ) -> None: result_a = v * nan_constant result_b = nan_constant * v - nan_mask_a = np.isnan(result_a.coeffs.values) - nan_mask_b = np.isnan(result_b.coeffs.values) - np.testing.assert_array_equal(nan_mask_a, nan_mask_b) + assert not np.isnan(result_a.coeffs.values).any() + assert not np.isnan(result_b.coeffs.values).any() np.testing.assert_array_equal( - result_a.coeffs.values[~nan_mask_a], - result_b.coeffs.values[~nan_mask_b], + result_a.coeffs.values, result_b.coeffs.values ) def test_quadexpr_add_nan( @@ -910,9 +916,62 @@ def test_quadexpr_add_nan( result = qexpr + nan_constant assert isinstance(result, QuadraticExpression) assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.const.values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.const.values).any() + + class TestExpressionWithNaN: + """Test that NaN in expression's own const/coeffs doesn't propagate.""" + + def test_shifted_expr_add_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr + 5 + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == 5.0 + + def test_shifted_expr_mul_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr * 2 + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_add_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + expr = (1 * v).shift(dim_2=1) + result = expr + arr + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == 0.0 + + def test_shifted_expr_mul_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr * arr + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_div_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr / 2 + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_sub_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr - 3 + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == -3.0 + + def test_shifted_expr_div_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr / arr + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: + nan_coeff = np.ones(v.sizes["dim_2"]) + nan_coeff[0] = np.nan + result = v.to_linexpr(nan_coeff) + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 class TestMultiDim: def test_multidim_subset_mul(self, m: Model) -> None: From e7e0be304b66bae6dec0e1808ccb5c3f09d7220d Mon Sep 17 00:00:00 2001 From: Fabian Date: Tue, 10 Mar 2026 10:05:53 +0100 Subject: [PATCH 40/66] Fix CI doctest collection by deferring linopy import in test/conftest.py --- .gitignore | 1 + test/conftest.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 7b962a6b..10ac8e45 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,4 @@ benchmark/scripts/leftovers/ # direnv .envrc AGENTS.md +coverage.xml diff --git a/test/conftest.py b/test/conftest.py index b0a846ba..ee20cdc2 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,11 +1,15 @@ """Pytest configuration and fixtures.""" +from __future__ import annotations + import os +from typing import TYPE_CHECKING import pandas as pd import pytest -from linopy import Model, Variable +if TYPE_CHECKING: + from linopy import Model, Variable def pytest_addoption(parser: pytest.Parser) -> None: @@ -55,6 +59,8 @@ def pytest_collection_modifyitems( @pytest.fixture def m() -> Model: + from linopy import Model + m = Model() m.add_variables(pd.Series([0, 0]), 1, name="x") m.add_variables(4, pd.Series([8, 10]), name="y") From eee7eb8fcb5e28f981f1eff7a973b32bd200867b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 10:56:14 +0100 Subject: [PATCH 41/66] Fix Self import for Python 3.10 compatibility Move Self to TYPE_CHECKING block with typing_extensions fallback, since typing.Self is only available in Python 3.11+. Co-Authored-By: Claude Opus 4.6 --- linopy/expressions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 32bf781b..b89f86fa 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -14,7 +14,7 @@ from collections.abc import Callable, Hashable, Iterator, Mapping, Sequence from dataclasses import dataclass, field from itertools import product, zip_longest -from typing import TYPE_CHECKING, Any, Self, TypeVar, cast, overload +from typing import TYPE_CHECKING, Any, TypeVar, cast, overload from warnings import warn import numpy as np @@ -89,6 +89,8 @@ ) if TYPE_CHECKING: + from typing_extensions import Self + from linopy.constraints import AnonymousScalarConstraint, Constraint from linopy.model import Model from linopy.variables import ScalarVariable, Variable From c0c2d05a5894df1f83117fefb43821494091326c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 11:27:11 +0100 Subject: [PATCH 42/66] Fix mypy errors: use JoinOptions Literal type for join parameters - Replace `join: str | None` with `join: JoinOptions | None` in expressions.py and variables.py to satisfy mypy's xr.align call-overload checking - Add assertion for termination_condition in solvers.py SCIP solver - Add type: ignore comments for xarray merge and reindex edge cases Co-Authored-By: Claude Opus 4.6 --- linopy/common.py | 2 +- linopy/expressions.py | 37 ++++++++++++++++++++----------------- linopy/solvers.py | 1 + linopy/variables.py | 15 ++++++++------- 4 files changed, 30 insertions(+), 25 deletions(-) diff --git a/linopy/common.py b/linopy/common.py index 21f851df..d0ca3ad9 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -1300,7 +1300,7 @@ def align( if isinstance(obj, Variable): results.append(obj.reindex(indexers)) else: - results.append(obj.reindex(indexers, **reindex_kwargs)) + results.append(obj.reindex(indexers, **reindex_kwargs)) # type: ignore[union-attr] return tuple(results) diff --git a/linopy/expressions.py b/linopy/expressions.py index b89f86fa..af76e4f9 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -30,6 +30,7 @@ from xarray import Coordinates, DataArray, Dataset, IndexVariable from xarray.core.coordinates import DataArrayCoordinates, DatasetCoordinates from xarray.core.indexes import Indexes +from xarray.core.types import JoinOptions from xarray.core.utils import Frozen try: @@ -528,8 +529,8 @@ def _align_constant( self: GenericExpression, other: DataArray, fill_value: float = 0, - join: str | None = None, - default_join: str = "exact", + join: JoinOptions | None = None, + default_join: JoinOptions = "exact", ) -> tuple[DataArray, DataArray, bool]: """ Align a constant DataArray with self.const. @@ -585,7 +586,7 @@ def _align_constant( return self_const, aligned, True def _add_constant( - self: GenericExpression, other: ConstantLike, join: str | None = None + self: GenericExpression, other: ConstantLike, join: JoinOptions | None = None ) -> GenericExpression: if np.isscalar(other) and join is None: return self.assign(const=self.const + other) @@ -608,7 +609,7 @@ def _apply_constant_op( other: ConstantLike, op: Callable[[DataArray, DataArray], DataArray], fill_value: float, - join: str | None = None, + join: JoinOptions | None = None, ) -> GenericExpression: factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, factor, needs_data_reindex = self._align_constant( @@ -626,12 +627,12 @@ def _apply_constant_op( return self.assign(coeffs=op(self.coeffs, factor), const=op(self_const, factor)) def _multiply_by_constant( - self: GenericExpression, other: ConstantLike, join: str | None = None + self: GenericExpression, other: ConstantLike, join: JoinOptions | None = None ) -> GenericExpression: return self._apply_constant_op(other, operator.mul, fill_value=0, join=join) def _divide_by_constant( - self: GenericExpression, other: ConstantLike, join: str | None = None + self: GenericExpression, other: ConstantLike, join: JoinOptions | None = None ) -> GenericExpression: return self._apply_constant_op(other, operator.truediv, fill_value=1, join=join) @@ -672,7 +673,7 @@ def __lt__(self, other: Any) -> NotImplementedType: def add( self: GenericExpression, other: SideLike, - join: str | None = None, + join: JoinOptions | None = None, ) -> GenericExpression | QuadraticExpression: """ Add an expression to others. @@ -695,12 +696,12 @@ def add( self, QuadraticExpression ): other = other.to_quadexpr() - return merge([self, other], cls=self.__class__, join=join) + return merge([self, other], cls=self.__class__, join=join) # type: ignore[list-item] def sub( self: GenericExpression, other: SideLike, - join: str | None = None, + join: JoinOptions | None = None, ) -> GenericExpression | QuadraticExpression: """ Subtract others from expression. @@ -719,7 +720,7 @@ def sub( def mul( self: GenericExpression, other: SideLike, - join: str | None = None, + join: JoinOptions | None = None, ) -> GenericExpression | QuadraticExpression: """ Multiply the expr by a factor. @@ -744,7 +745,7 @@ def mul( def div( self: GenericExpression, other: VariableLike | ConstantLike, - join: str | None = None, + join: JoinOptions | None = None, ) -> GenericExpression | QuadraticExpression: """ Divide the expr by a factor. @@ -771,7 +772,7 @@ def div( def le( self: GenericExpression, rhs: SideLike, - join: str | None = None, + join: JoinOptions | None = None, ) -> Constraint: """ Less than or equal constraint. @@ -790,7 +791,7 @@ def le( def ge( self: GenericExpression, rhs: SideLike, - join: str | None = None, + join: JoinOptions | None = None, ) -> Constraint: """ Greater than or equal constraint. @@ -809,7 +810,7 @@ def ge( def eq( self: GenericExpression, rhs: SideLike, - join: str | None = None, + join: JoinOptions | None = None, ) -> Constraint: """ Equality constraint. @@ -1065,7 +1066,7 @@ def cumsum( return self.rolling(dim=dim_dict).sum(keep_attrs=keep_attrs, skipna=skipna) def to_constraint( - self, sign: SignLike, rhs: SideLike, join: str | None = None + self, sign: SignLike, rhs: SideLike, join: JoinOptions | None = None ) -> Constraint: """ Convert a linear expression to a constraint. @@ -2212,7 +2213,9 @@ def solution(self) -> DataArray: sol = (self.coeffs * vals.prod(FACTOR_DIM)).sum(TERM_DIM) + self.const return sol.rename("solution") - def to_constraint(self, sign: SignLike, rhs: SideLike) -> NotImplementedType: + def to_constraint( + self, sign: SignLike, rhs: SideLike, join: JoinOptions | None = None + ) -> NotImplementedType: raise NotImplementedError( "Quadratic expressions cannot be used in constraints." ) @@ -2344,7 +2347,7 @@ def merge( ], dim: str = TERM_DIM, cls: type[GenericExpression] = None, # type: ignore - join: str | None = None, + join: JoinOptions | None = None, **kwargs: Any, ) -> GenericExpression: """ diff --git a/linopy/solvers.py b/linopy/solvers.py index 16c07932..f1617fc0 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -1542,6 +1542,7 @@ def solve_problem_from_file( condition = m.getStatus() termination_condition = CONDITION_MAP.get(condition, condition) + assert termination_condition is not None status = Status.from_termination_condition(termination_condition) status.legacy_status = condition diff --git a/linopy/variables.py b/linopy/variables.py index c11f52cf..2f8daf8d 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -27,6 +27,7 @@ from xarray import DataArray, Dataset, broadcast from xarray.core.coordinates import DatasetCoordinates from xarray.core.indexes import Indexes +from xarray.core.types import JoinOptions from xarray.core.utils import Frozen import linopy.expressions as expressions @@ -537,7 +538,7 @@ def __contains__(self, value: str) -> bool: return self.data.__contains__(value) def add( - self, other: SideLike, join: str | None = None + self, other: SideLike, join: JoinOptions | None = None ) -> LinearExpression | QuadraticExpression: """ Add variables to linear expressions or other variables. @@ -554,7 +555,7 @@ def add( return self.to_linexpr().add(other, join=join) def sub( - self, other: SideLike, join: str | None = None + self, other: SideLike, join: JoinOptions | None = None ) -> LinearExpression | QuadraticExpression: """ Subtract linear expressions or other variables from the variables. @@ -571,7 +572,7 @@ def sub( return self.to_linexpr().sub(other, join=join) def mul( - self, other: ConstantLike, join: str | None = None + self, other: ConstantLike, join: JoinOptions | None = None ) -> LinearExpression | QuadraticExpression: """ Multiply variables with a coefficient. @@ -588,7 +589,7 @@ def mul( return self.to_linexpr().mul(other, join=join) def div( - self, other: ConstantLike, join: str | None = None + self, other: ConstantLike, join: JoinOptions | None = None ) -> LinearExpression | QuadraticExpression: """ Divide variables with a coefficient. @@ -604,7 +605,7 @@ def div( """ return self.to_linexpr().div(other, join=join) - def le(self, rhs: SideLike, join: str | None = None) -> Constraint: + def le(self, rhs: SideLike, join: JoinOptions | None = None) -> Constraint: """ Less than or equal constraint. @@ -619,7 +620,7 @@ def le(self, rhs: SideLike, join: str | None = None) -> Constraint: """ return self.to_linexpr().le(rhs, join=join) - def ge(self, rhs: SideLike, join: str | None = None) -> Constraint: + def ge(self, rhs: SideLike, join: JoinOptions | None = None) -> Constraint: """ Greater than or equal constraint. @@ -634,7 +635,7 @@ def ge(self, rhs: SideLike, join: str | None = None) -> Constraint: """ return self.to_linexpr().ge(rhs, join=join) - def eq(self, rhs: SideLike, join: str | None = None) -> Constraint: + def eq(self, rhs: SideLike, join: JoinOptions | None = None) -> Constraint: """ Equality constraint. From 577ee741808a455b558ddc12bd982b9279c38b95 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 11:36:48 +0100 Subject: [PATCH 43/66] Merge master into harmonize-linopy-operations-mixed --- .github/workflows/release.yml | 2 +- .github/workflows/test-models.yml | 2 +- .github/workflows/test.yml | 6 +- doc/api.rst | 3 +- doc/piecewise-linear-constraints.rst | 439 ++- doc/release_notes.rst | 15 +- examples/piecewise-linear-constraints.ipynb | 878 ++++-- linopy/__init__.py | 12 +- linopy/common.py | 20 - linopy/constants.py | 16 +- linopy/expressions.py | 70 +- linopy/model.py | 11 +- linopy/piecewise.py | 1611 ++++++----- linopy/remote/__init__.py | 6 +- linopy/remote/oetc.py | 18 +- linopy/solver_capabilities.py | 10 - linopy/solvers.py | 18 +- linopy/types.py | 5 +- linopy/variables.py | 27 +- pyproject.toml | 7 +- test/conftest.py | 41 + test/remote/test_oetc.py | 7 +- test/remote/test_oetc_job_polling.py | 6 +- test/test_common.py | 25 - test/test_constraints.py | 110 +- test/test_linear_expression.py | 1366 ++++----- test/test_optimization.py | 2 +- test/test_piecewise_constraints.py | 2876 ++++++++----------- 28 files changed, 4052 insertions(+), 3557 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 54d9a211..defdcf5a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -36,7 +36,7 @@ jobs: permissions: id-token: write steps: - - uses: actions/download-artifact@v7 + - uses: actions/download-artifact@v8 with: name: Packages path: dist diff --git a/.github/workflows/test-models.yml b/.github/workflows/test-models.yml index d5c14d4a..ded75685 100644 --- a/.github/workflows/test-models.yml +++ b/.github/workflows/test-models.yml @@ -101,7 +101,7 @@ jobs: - name: Upload artifacts if: env.pinned == 'false' - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: results-pypsa-eur-${{ matrix.version }} path: | diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2253d2cf..6484ef3e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -64,7 +64,7 @@ jobs: - name: Set up windows package manager if: matrix.os == 'windows-latest' - uses: crazy-max/ghaction-chocolatey@v3 + uses: crazy-max/ghaction-chocolatey@v4 with: args: -h @@ -74,7 +74,7 @@ jobs: choco install glpk - name: Download package - uses: actions/download-artifact@v7 + uses: actions/download-artifact@v8 with: name: Packages path: dist @@ -112,7 +112,7 @@ jobs: python-version: 3.12 - name: Download package - uses: actions/download-artifact@v7 + uses: actions/download-artifact@v8 with: name: Packages path: dist diff --git a/doc/api.rst b/doc/api.rst index 57a61e3e..20958857 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -19,8 +19,9 @@ Creating a model model.Model.add_constraints model.Model.add_objective model.Model.add_piecewise_constraints - model.Model.add_disjunctive_piecewise_constraints + piecewise.piecewise piecewise.breakpoints + piecewise.segments model.Model.linexpr model.Model.remove_constraints diff --git a/doc/piecewise-linear-constraints.rst b/doc/piecewise-linear-constraints.rst index b4c6336d..9278248a 100644 --- a/doc/piecewise-linear-constraints.rst +++ b/doc/piecewise-linear-constraints.rst @@ -7,17 +7,44 @@ Piecewise linear (PWL) constraints approximate nonlinear functions as connected linear segments, allowing you to model cost curves, efficiency curves, or production functions within a linear programming framework. -Linopy provides two methods: - -- :py:meth:`~linopy.model.Model.add_piecewise_constraints` -- for - **continuous** piecewise linear functions (segments connected end-to-end). -- :py:meth:`~linopy.model.Model.add_disjunctive_piecewise_constraints` -- for - **disconnected** segments (with gaps between them). +Use :py:func:`~linopy.piecewise.piecewise` to describe the function and +:py:meth:`~linopy.model.Model.add_piecewise_constraints` to add it to a model. .. contents:: :local: :depth: 2 +Quick Start +----------- + +.. code-block:: python + + import linopy + + m = linopy.Model() + x = m.add_variables(name="x", lower=0, upper=100) + y = m.add_variables(name="y") + + # y equals a piecewise linear function of x + x_pts = linopy.breakpoints([0, 30, 60, 100]) + y_pts = linopy.breakpoints([0, 36, 84, 170]) + + m.add_piecewise_constraints(linopy.piecewise(x, x_pts, y_pts) == y) + +The ``piecewise()`` call creates a lazy descriptor. Comparing it with a +variable (``==``, ``<=``, ``>=``) produces a +:class:`~linopy.piecewise.PiecewiseConstraintDescriptor` that +``add_piecewise_constraints`` knows how to process. + +.. note:: + + The ``piecewise(...)`` expression can appear on either side of the + comparison operator. These forms are equivalent:: + + piecewise(x, x_pts, y_pts) == y + y == piecewise(x, x_pts, y_pts) + + Formulations ------------ @@ -36,22 +63,18 @@ introduces interpolation variables :math:`\lambda_i` such that: The SOS2 constraint ensures that **at most two adjacent** :math:`\lambda_i` can be non-zero, so :math:`x` is interpolated within one segment. -**Dict (multi-variable) case.** When multiple variables share the same lambdas, -breakpoints carry an extra *link* dimension :math:`v \in V` and linking becomes -:math:`x_v = \sum_i \lambda_i \, b_{v,i}` for all :math:`v`. - .. note:: SOS2 is a combinatorial constraint handled via branch-and-bound, similar to - integer variables. It cannot be reformulated as a pure LP. Prefer the - incremental method (``method="incremental"`` or ``method="auto"``) when - breakpoints are monotonic. + integer variables. Prefer the incremental method + (``method="incremental"`` or ``method="auto"``) when breakpoints are + monotonic. Incremental (Delta) Formulation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For **strictly monotonic** breakpoints :math:`b_0 < b_1 < \cdots < b_n`, the -incremental formulation is a **pure LP** (no SOS2 or binary variables): +incremental formulation uses fill-fraction variables: .. math:: @@ -60,12 +83,27 @@ incremental formulation is a **pure LP** (no SOS2 or binary variables): x = b_0 + \sum_{i=1}^{n} \delta_i \, (b_i - b_{i-1}) The filling-order constraints enforce that segment :math:`i+1` cannot be -partially filled unless segment :math:`i` is completely filled. +partially filled unless segment :math:`i` is completely filled. Binary +indicator variables enforce integrality. + +**Limitation:** Breakpoints must be strictly monotonic. For non-monotonic +curves, use SOS2. -**Limitation:** Breakpoints must be strictly monotonic for every linked -variable. In the dict case, each variable is checked independently -- e.g. -power increasing while fuel decreases is fine, but a curve that rises then -falls is not. For non-monotonic curves, use SOS2. +LP (Tangent-Line) Formulation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For **inequality** constraints where the function is **convex** (for ``>=``) +or **concave** (for ``<=``), a pure LP formulation adds one tangent-line +constraint per segment — no SOS2 or binary variables needed. + +.. math:: + + y \le m_k \, x + c_k \quad \text{for each segment } k \text{ (concave case)} + +Domain bounds :math:`x_{\min} \le x \le x_{\max}` are added automatically. + +**Limitation:** Only valid for inequality constraints with the correct +convexity; not valid for equality constraints. Disjunctive (Disaggregated Convex Combination) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -84,228 +122,332 @@ Given :math:`K` segments, each with breakpoints :math:`b_{k,0}, \ldots, b_{k,n_k \sum_{i} \lambda_{k,i} = y_k, \quad x = \sum_{k} \sum_{i} \lambda_{k,i} \, b_{k,i} + .. _choosing-a-formulation: Choosing a Formulation ~~~~~~~~~~~~~~~~~~~~~~ -The incremental method is the fastest to solve (pure LP), but requires strictly -monotonic breakpoints. Pass ``method="auto"`` to use it automatically when -applicable, falling back to SOS2 otherwise. +Pass ``method="auto"`` (the default) and linopy will pick the best +formulation automatically: + +- **Equality + monotonic x** → incremental +- **Inequality + correct convexity** → LP +- Otherwise → SOS2 +- Disjunctive (segments) → always SOS2 with binary selection .. list-table:: :header-rows: 1 - :widths: 25 25 25 25 + :widths: 25 20 20 15 20 * - Property - SOS2 - Incremental + - LP - Disjunctive * - Segments - Connected - Connected - - Disconnected (gaps allowed) + - Connected + - Disconnected + * - Constraint type + - ``==``, ``<=``, ``>=`` + - ``==``, ``<=``, ``>=`` + - ``<=``, ``>=`` only + - ``==``, ``<=``, ``>=`` * - Breakpoint order - Any - Strictly monotonic + - Strictly increasing - Any (per segment) + * - Convexity requirement + - None + - None + - Concave (≤) or convex (≥) + - None * - Variable types - Continuous + SOS2 - - Continuous only (pure LP) + - Continuous + binary + - Continuous only - Binary + SOS2 * - Solver support - - Solvers with SOS2 support + - SOS2-capable + - MIP-capable - **Any LP solver** - - Solvers with SOS2 + MIP support + - SOS2 + MIP + Basic Usage ----------- -Single variable -~~~~~~~~~~~~~~~ +Equality constraint +~~~~~~~~~~~~~~~~~~~ + +Link ``y`` to a piecewise linear function of ``x``: .. code-block:: python import linopy m = linopy.Model() - x = m.add_variables(name="x") + x = m.add_variables(name="x", lower=0, upper=100) + y = m.add_variables(name="y") - bp = linopy.breakpoints([0, 10, 50, 100]) - m.add_piecewise_constraints(x, bp, dim="breakpoint") + x_pts = linopy.breakpoints([0, 30, 60, 100]) + y_pts = linopy.breakpoints([0, 36, 84, 170]) -Dict of variables -~~~~~~~~~~~~~~~~~~ + m.add_piecewise_constraints(linopy.piecewise(x, x_pts, y_pts) == y) + +Inequality constraints +~~~~~~~~~~~~~~~~~~~~~~ -Link multiple variables through shared interpolation weights. For example, a -turbine where power input determines power output (via a nonlinear efficiency -factor): +Use ``<=`` or ``>=`` to bound ``y`` by the piecewise function: .. code-block:: python - m = linopy.Model() + pw = linopy.piecewise(x, x_pts, y_pts) - power_in = m.add_variables(name="power_in") - power_out = m.add_variables(name="power_out") + # y must be at most the piecewise function of x (pw >= y ↔ y <= pw) + m.add_piecewise_constraints(pw >= y) - bp = linopy.breakpoints( - power_in=[0, 50, 100], - power_out=[0, 47.5, 90], - ) + # y must be at least the piecewise function of x (pw <= y ↔ y >= pw) + m.add_piecewise_constraints(pw <= y) - m.add_piecewise_constraints( - {"power_in": power_in, "power_out": power_out}, - bp, - dim="breakpoint", - ) - -Incremental method -~~~~~~~~~~~~~~~~~~~ +Choosing a method +~~~~~~~~~~~~~~~~~ .. code-block:: python - m.add_piecewise_constraints(x, bp, dim="breakpoint", method="incremental") + pw = linopy.piecewise(x, x_pts, y_pts) + + # Explicit SOS2 + m.add_piecewise_constraints(pw == y, method="sos2") + + # Explicit incremental (requires monotonic x_pts) + m.add_piecewise_constraints(pw == y, method="incremental") -Pass ``method="auto"`` to automatically select incremental when breakpoints are -strictly monotonic, falling back to SOS2 otherwise. + # Explicit LP (requires inequality + correct convexity + increasing x_pts) + m.add_piecewise_constraints(pw >= y, method="lp") + + # Auto-select best method (default) + m.add_piecewise_constraints(pw == y, method="auto") Disjunctive (disconnected segments) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use :func:`~linopy.piecewise.segments` to define breakpoints with gaps: + .. code-block:: python m = linopy.Model() - x = m.add_variables(name="x") + x = m.add_variables(name="x", lower=0, upper=100) + y = m.add_variables(name="y") + + # Two disconnected segments: [0,10] and [50,100] + x_seg = linopy.segments([(0, 10), (50, 100)]) + y_seg = linopy.segments([(0, 15), (60, 130)]) + + m.add_piecewise_constraints(linopy.piecewise(x, x_seg, y_seg) == y) + +The disjunctive formulation is selected automatically when +``x_points`` / ``y_points`` have a segment dimension (created by +:func:`~linopy.piecewise.segments`). - bp = linopy.breakpoints.segments([(0, 10), (50, 100)]) - m.add_disjunctive_piecewise_constraints(x, bp) Breakpoints Factory ------------------- -The ``linopy.breakpoints()`` factory simplifies creating breakpoint DataArrays -with correct dimensions and coordinates. +The :func:`~linopy.piecewise.breakpoints` factory creates DataArrays with +the correct ``_breakpoint`` dimension. It accepts several input types +(``BreaksLike``): From a list ~~~~~~~~~~~ .. code-block:: python - # 1D breakpoints (dims: [breakpoint]) + # 1D breakpoints (dims: [_breakpoint]) bp = linopy.breakpoints([0, 50, 100]) -From keyword arguments (multi-variable) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +From a pandas Series +~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + import pandas as pd + + bp = linopy.breakpoints(pd.Series([0, 50, 100])) + +From a DataFrame (per-entity, requires ``dim``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python - # 2D breakpoints (dims: [var, breakpoint]) - bp = linopy.breakpoints(power=[0, 50, 100], fuel=[0, 60, 140]) + # rows = entities, columns = breakpoints + df = pd.DataFrame( + {"bp0": [0, 0], "bp1": [50, 80], "bp2": [100, float("nan")]}, + index=["gen1", "gen2"], + ) + bp = linopy.breakpoints(df, dim="generator") From a dict (per-entity, ragged lengths allowed) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python - # 2D breakpoints (dims: [generator, breakpoint]), NaN-padded + # NaN-padded to the longest entry bp = linopy.breakpoints( {"gen1": [0, 50, 100], "gen2": [0, 80]}, dim="generator", ) -Per-entity with multiple variables -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +From a DataArray (pass-through) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python - # 3D breakpoints (dims: [generator, var, breakpoint]) - bp = linopy.breakpoints( - power={"gen1": [0, 50, 100], "gen2": [0, 80]}, - fuel={"gen1": [0, 60, 140], "gen2": [0, 100]}, - dim="generator", + import xarray as xr + + arr = xr.DataArray([0, 50, 100], dims=["_breakpoint"]) + bp = linopy.breakpoints(arr) # returned as-is + +Slopes mode +~~~~~~~~~~~ + +Compute y-breakpoints from segment slopes and an initial y-value: + +.. code-block:: python + + y_pts = linopy.breakpoints( + slopes=[1.2, 1.4, 1.7], + x_points=[0, 30, 60, 100], + y0=0, ) + # Equivalent to breakpoints([0, 36, 78, 146]) -Segments (for disjunctive constraints) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Segments Factory +---------------- + +The :func:`~linopy.piecewise.segments` factory creates DataArrays with both +``_segment`` and ``_breakpoint`` dimensions (``SegmentsLike``): + +From a list of sequences +~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python - # 2D breakpoints (dims: [segment, breakpoint]) - bp = linopy.breakpoints.segments([(0, 10), (50, 100)]) + # dims: [_segment, _breakpoint] + seg = linopy.segments([(0, 10), (50, 100)]) - # Per-entity segments - bp = linopy.breakpoints.segments( +From a dict (per-entity) +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + seg = linopy.segments( {"gen1": [(0, 10), (50, 100)], "gen2": [(0, 80)]}, dim="generator", ) +From a DataFrame +~~~~~~~~~~~~~~~~ + +.. code-block:: python + + # rows = segments, columns = breakpoints + seg = linopy.segments(pd.DataFrame([[0, 10], [50, 100]])) + + Auto-broadcasting ----------------- Breakpoints are automatically broadcast to match the dimensions of the -expression or variable. This means you don't need to manually call -``expand_dims`` when your variables have extra dimensions (e.g. ``time``): +expressions. You don't need ``expand_dims`` when your variables have extra +dimensions (e.g. ``time``): .. code-block:: python + import pandas as pd + import linopy + m = linopy.Model() time = pd.Index([1, 2, 3], name="time") - x = m.add_variables(name="x", coords=[time]) + x = m.add_variables(name="x", lower=0, upper=100, coords=[time]) + y = m.add_variables(name="y", coords=[time]) - # 1D breakpoints are auto-expanded to match x's time dimension - bp = linopy.breakpoints([0, 50, 100]) - m.add_piecewise_constraints(x, bp, dim="breakpoint") + # 1D breakpoints auto-expand to match x's time dimension + x_pts = linopy.breakpoints([0, 50, 100]) + y_pts = linopy.breakpoints([0, 70, 150]) + m.add_piecewise_constraints(linopy.piecewise(x, x_pts, y_pts) == y) -This also works for ``add_disjunctive_piecewise_constraints`` and dict -expressions. Method Signatures ----------------- +``piecewise`` +~~~~~~~~~~~~~ + +.. code-block:: python + + linopy.piecewise(expr, x_points, y_points) + +- ``expr`` -- ``Variable`` or ``LinearExpression``. The "x" side expression. +- ``x_points`` -- ``BreaksLike``. Breakpoint x-coordinates. +- ``y_points`` -- ``BreaksLike``. Breakpoint y-coordinates. + +Returns a :class:`~linopy.piecewise.PiecewiseExpression` that supports +``==``, ``<=``, ``>=`` comparison with another expression. + ``add_piecewise_constraints`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python Model.add_piecewise_constraints( - expr, - breakpoints, - dim="breakpoint", - mask=None, + descriptor, + method="auto", name=None, skip_nan_check=False, - method="sos2", ) -- ``expr`` -- ``Variable``, ``LinearExpression``, or ``dict`` of these. -- ``breakpoints`` -- ``xr.DataArray`` with breakpoint values. Must have ``dim`` - as a dimension. For the dict case, must also have a dimension whose - coordinates match the dict keys. -- ``dim`` -- ``str``, default ``"breakpoint"``. Breakpoint-index dimension. -- ``mask`` -- ``xr.DataArray``, optional. Boolean mask for valid constraints. +- ``descriptor`` -- :class:`~linopy.piecewise.PiecewiseConstraintDescriptor`. + Created by comparing a ``PiecewiseExpression`` with an expression, e.g. + ``piecewise(x, x_pts, y_pts) == y``. +- ``method`` -- ``"auto"`` (default), ``"sos2"``, ``"incremental"``, or ``"lp"``. - ``name`` -- ``str``, optional. Base name for generated variables/constraints. - ``skip_nan_check`` -- ``bool``, default ``False``. -- ``method`` -- ``"sos2"`` (default), ``"incremental"``, or ``"auto"``. -``add_disjunctive_piecewise_constraints`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Returns a :class:`~linopy.constraints.Constraint`, but the returned object is +formulation-dependent: typically ``{name}_convex`` (SOS2), ``{name}_fill`` or +``{name}_y_link`` (incremental), and ``{name}_select`` (disjunctive). For +inequality constraints, the returned constraint is the core piecewise +formulation constraint, not ``{name}_ineq``. + +``breakpoints`` +~~~~~~~~~~~~~~~~ .. code-block:: python - Model.add_disjunctive_piecewise_constraints( - expr, - breakpoints, - dim="breakpoint", - segment_dim="segment", - mask=None, - name=None, - skip_nan_check=False, - ) + linopy.breakpoints(values, dim=None) + linopy.breakpoints(slopes, x_points, y0, dim=None) -Same as above, plus: +- ``values`` -- ``BreaksLike`` (list, Series, DataFrame, DataArray, or dict). +- ``slopes``, ``x_points``, ``y0`` -- for slopes mode (mutually exclusive with + ``values``). +- ``dim`` -- ``str``, required when ``values`` or ``slopes`` is a DataFrame or dict. + +``segments`` +~~~~~~~~~~~~~ + +.. code-block:: python + + linopy.segments(values, dim=None) + +- ``values`` -- ``SegmentsLike`` (list of sequences, DataFrame, DataArray, or + dict). +- ``dim`` -- ``str``, required when ``values`` is a dict. -- ``segment_dim`` -- ``str``, default ``"segment"``. Dimension indexing - segments. Use NaN in breakpoints to pad segments with fewer breakpoints. Generated Variables and Constraints ------------------------------------ @@ -327,9 +469,18 @@ Given base name ``name``, the following objects are created: * - ``{name}_convex`` - Constraint - :math:`\sum_i \lambda_i = 1`. - * - ``{name}_link`` + * - ``{name}_x_link`` + - Constraint + - :math:`x = \sum_i \lambda_i \, x_i`. + * - ``{name}_y_link`` + - Constraint + - :math:`y = \sum_i \lambda_i \, y_i`. + * - ``{name}_aux`` + - Variable + - Auxiliary variable :math:`z` (inequality constraints only). + * - ``{name}_ineq`` - Constraint - - :math:`x = \sum_i \lambda_i \, b_i`. + - :math:`y \le z` or :math:`y \ge z` (inequality only). **Incremental method:** @@ -343,12 +494,49 @@ Given base name ``name``, the following objects are created: * - ``{name}_delta`` - Variable - Fill-fraction variables :math:`\delta_i \in [0, 1]`. + * - ``{name}_inc_binary`` + - Variable + - Binary indicators for each segment. + * - ``{name}_inc_link`` + - Constraint + - :math:`\delta_i \le y_i` (delta bounded by binary). * - ``{name}_fill`` - Constraint - - :math:`\delta_{i+1} \le \delta_i` (only if 3+ breakpoints). - * - ``{name}_link`` + - :math:`\delta_{i+1} \le \delta_i` (fill order, 3+ breakpoints). + * - ``{name}_inc_order`` + - Constraint + - :math:`y_{i+1} \le \delta_i` (binary ordering, 3+ breakpoints). + * - ``{name}_x_link`` + - Constraint + - :math:`x = x_0 + \sum_i \delta_i \, \Delta x_i`. + * - ``{name}_y_link`` - Constraint - - :math:`x = b_0 + \sum_i \delta_i \, s_i`. + - :math:`y = y_0 + \sum_i \delta_i \, \Delta y_i`. + * - ``{name}_aux`` + - Variable + - Auxiliary variable :math:`z` (inequality constraints only). + * - ``{name}_ineq`` + - Constraint + - :math:`y \le z` or :math:`y \ge z` (inequality only). + +**LP method:** + +.. list-table:: + :header-rows: 1 + :widths: 30 15 55 + + * - Name + - Type + - Description + * - ``{name}_lp`` + - Constraint + - Tangent-line constraints (one per segment). + * - ``{name}_lp_domain_lo`` + - Constraint + - :math:`x \ge x_{\min}`. + * - ``{name}_lp_domain_hi`` + - Constraint + - :math:`x \le x_{\max}`. **Disjunctive method:** @@ -371,14 +559,23 @@ Given base name ``name``, the following objects are created: * - ``{name}_convex`` - Constraint - :math:`\sum_i \lambda_{k,i} = y_k`. - * - ``{name}_link`` + * - ``{name}_x_link`` + - Constraint + - :math:`x = \sum_k \sum_i \lambda_{k,i} \, x_{k,i}`. + * - ``{name}_y_link`` + - Constraint + - :math:`y = \sum_k \sum_i \lambda_{k,i} \, y_{k,i}`. + * - ``{name}_aux`` + - Variable + - Auxiliary variable :math:`z` (inequality constraints only). + * - ``{name}_ineq`` - Constraint - - :math:`x = \sum_k \sum_i \lambda_{k,i} \, b_{k,i}`. + - :math:`y \le z` or :math:`y \ge z` (inequality only). See Also -------- -- :doc:`piecewise-linear-constraints-tutorial` -- Worked examples with all three formulations +- :doc:`piecewise-linear-constraints-tutorial` -- Worked examples covering SOS2, incremental, LP, and disjunctive usage - :doc:`sos-constraints` -- Low-level SOS1/SOS2 constraint API - :doc:`creating-constraints` -- General constraint creation - :doc:`user-guide` -- Overall linopy usage patterns diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 324e34be..0697e8a2 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -10,11 +10,20 @@ Upcoming Version - Comparison operators (``==``, ``<=``, ``>=``) fill missing RHS coords with NaN (no constraint created) - Fixes crash on ``subset + var`` / ``subset + expr`` reverse addition - Fixes superset DataArrays expanding result coords beyond the variable's coordinate space -* Add ``add_piecewise_constraints()`` for piecewise linear constraints with SOS2 and incremental (pure LP) formulations. -* Add ``add_disjunctive_piecewise_constraints()`` for disconnected piecewise linear segments (e.g. forbidden operating zones). -* Add ``linopy.breakpoints()`` factory for convenient breakpoint construction from lists, dicts, or keyword arguments. Includes ``breakpoints.segments()`` for disjunctive formulations. +* Add ``add_piecewise_constraints()`` with SOS2, incremental, LP, and disjunctive formulations (``linopy.piecewise(x, x_pts, y_pts) == y``). +* Add ``linopy.piecewise()`` to create piecewise linear function descriptors (`PiecewiseExpression`) from separate x/y breakpoint arrays. +* Add ``linopy.breakpoints()`` factory for convenient breakpoint construction from lists, Series, DataFrames, DataArrays, or dicts. Supports slopes mode. +* Add ``linopy.segments()`` factory for disjunctive (disconnected) breakpoints. +* Add ``active`` parameter to ``piecewise()`` for gating piecewise linear functions with a binary variable (e.g. unit commitment). Supported for incremental, SOS2, and disjunctive methods. * Add the `sphinx-copybutton` to the documentation * Add SOS1 and SOS2 reformulations for solvers not supporting them. +* Enable quadratic problems with SCIP on windows. + + +Version 0.6.5 +------------- + +* Expose the knitro context to allow for more flexible use of the knitro python API. Version 0.6.4 diff --git a/examples/piecewise-linear-constraints.ipynb b/examples/piecewise-linear-constraints.ipynb index dd9192b3..4646e87d 100644 --- a/examples/piecewise-linear-constraints.ipynb +++ b/examples/piecewise-linear-constraints.ipynb @@ -2,39 +2,24 @@ "cells": [ { "cell_type": "markdown", - "id": "intro", "metadata": {}, - "source": [ - "# Piecewise Linear Constraints\n", - "\n", - "This notebook demonstrates linopy's three PWL formulations. Each example\n", - "builds a separate dispatch model where a single power plant must meet\n", - "a time-varying demand.\n", - "\n", - "| Example | Plant | Limitation | Formulation |\n", - "|---------|-------|------------|-------------|\n", - "| 1 | Gas turbine (0–100 MW) | Convex heat rate | SOS2 |\n", - "| 2 | Coal plant (0–150 MW) | Monotonic heat rate | Incremental |\n", - "| 3 | Diesel generator (off or 50–80 MW) | Forbidden zone | Disjunctive |" - ] + "source": "# Piecewise Linear Constraints Tutorial\n\nThis notebook demonstrates linopy's piecewise linear (PWL) constraint formulations.\nEach example builds a separate dispatch model where a single power plant must meet\na time-varying demand.\n\n| Example | Plant | Limitation | Formulation |\n|---------|-------|------------|-------------|\n| 1 | Gas turbine (0–100 MW) | Convex heat rate | SOS2 |\n| 2 | Coal plant (0–150 MW) | Monotonic heat rate | Incremental |\n| 3 | Diesel generator (off or 50–80 MW) | Forbidden zone | Disjunctive |\n| 4 | Concave efficiency curve | Inequality bound | LP |\n| 5 | Gas unit with commitment | On/off + min load | Incremental + `active` |\n\n**Note:** The `piecewise(...)` expression can appear on either side of\nthe comparison operator (`==`, `<=`, `>=`). For example, both\n`linopy.piecewise(x, x_pts, y_pts) == y` and `y == linopy.piecewise(...)` work." }, { "cell_type": "code", - "execution_count": null, - "id": "imports", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.511970Z", - "start_time": "2026-02-09T19:21:33.501473Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:41.350637Z", - "iopub.status.busy": "2026-02-09T19:21:41.350440Z", - "iopub.status.idle": "2026-02-09T19:21:42.583457Z", - "shell.execute_reply": "2026-02-09T19:21:42.583146Z" + "iopub.execute_input": "2026-03-06T11:51:29.167007Z", + "iopub.status.busy": "2026-03-06T11:51:29.166576Z", + "iopub.status.idle": "2026-03-06T11:51:29.185103Z", + "shell.execute_reply": "2026-03-06T11:51:29.184712Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.166974Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:27.800436Z", + "start_time": "2026-03-09T10:17:27.796927Z" } }, - "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "import pandas as pd\n", @@ -45,56 +30,32 @@ "time = pd.Index([1, 2, 3], name=\"time\")\n", "\n", "\n", - "def plot_pwl_results(model, breakpoints, demand, color=\"C0\", fuel_rate=None):\n", + "def plot_pwl_results(\n", + " model, x_pts, y_pts, demand, x_name=\"power\", y_name=\"fuel\", color=\"C0\"\n", + "):\n", " \"\"\"Plot PWL curve with operating points and dispatch vs demand.\"\"\"\n", " sol = model.solution\n", - " bp = breakpoints.to_pandas()\n", " fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3.5))\n", "\n", " # Left: PWL curve with operating points\n", - " if \"var\" in breakpoints.dims:\n", - " # Connected: power-fuel curve from var dimension\n", + " ax1.plot(\n", + " x_pts.values.flat, y_pts.values.flat, \"o-\", color=color, label=\"Breakpoints\"\n", + " )\n", + " for t in time:\n", " ax1.plot(\n", - " bp.loc[\"power\"], bp.loc[\"fuel\"], \"o-\", color=color, label=\"Breakpoints\"\n", - " )\n", - " for t in time:\n", - " ax1.plot(\n", - " sol[\"power\"].sel(time=t),\n", - " sol[\"fuel\"].sel(time=t),\n", - " \"s\",\n", - " ms=10,\n", - " label=f\"t={t}\",\n", - " )\n", - " ax1.set(xlabel=\"Power (MW)\", ylabel=\"Fuel (MWh)\", title=\"Heat rate curve\")\n", - " else:\n", - " # Disconnected: segments with linear cost\n", - " for seg in bp.index:\n", - " lo, hi = bp.loc[seg]\n", - " pw = [lo, hi] if lo != hi else [lo]\n", - " ax1.plot(\n", - " pw,\n", - " [fuel_rate * p for p in pw],\n", - " \"o-\",\n", - " color=color,\n", - " label=\"Breakpoints\" if seg == 0 else None,\n", - " )\n", - " ax1.axvspan(\n", - " bp.iloc[0, 1] + 0.5,\n", - " bp.iloc[1, 0] - 0.5,\n", - " color=\"red\",\n", - " alpha=0.1,\n", - " label=\"Forbidden zone\",\n", + " sol[x_name].sel(time=t),\n", + " sol[y_name].sel(time=t),\n", + " \"s\",\n", + " ms=10,\n", + " label=f\"t={t}\",\n", " )\n", - " for t in time:\n", - " p = float(sol[\"power\"].sel(time=t))\n", - " ax1.plot(p, fuel_rate * p, \"s\", ms=10, label=f\"t={t}\")\n", - " ax1.set(xlabel=\"Power (MW)\", ylabel=\"Cost\", title=\"Cost curve\")\n", + " ax1.set(xlabel=x_name.title(), ylabel=y_name.title(), title=\"Heat rate curve\")\n", " ax1.legend()\n", "\n", " # Right: dispatch vs demand\n", " x = list(range(len(time)))\n", - " power_vals = sol[\"power\"].values\n", - " ax2.bar(x, power_vals, color=color, label=\"Power\")\n", + " power_vals = sol[x_name].values\n", + " ax2.bar(x, power_vals, color=color, label=x_name.title())\n", " if \"backup\" in sol:\n", " ax2.bar(\n", " x,\n", @@ -113,74 +74,78 @@ " label=\"Demand\",\n", " )\n", " ax2.set(\n", - " xlabel=\"Time\", ylabel=\"MW\", title=\"Dispatch\", xticks=x, xticklabels=time.values\n", + " xlabel=\"Time\",\n", + " ylabel=\"MW\",\n", + " title=\"Dispatch\",\n", + " xticks=x,\n", + " xticklabels=time.values,\n", " )\n", " ax2.legend()\n", " plt.tight_layout()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "sos2-md", "metadata": {}, "source": [ "## 1. SOS2 formulation — Gas turbine\n", "\n", "The gas turbine has a **convex** heat rate: efficient at moderate load,\n", "increasingly fuel-hungry at high output. We use the **SOS2** formulation\n", - "to link power output and fuel consumption." + "to link power output and fuel consumption via separate x/y breakpoints." ] }, { "cell_type": "code", - "execution_count": null, - "id": "sos2-setup", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.525641Z", - "start_time": "2026-02-09T19:21:33.516874Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:42.585470Z", - "iopub.status.busy": "2026-02-09T19:21:42.585263Z", - "iopub.status.idle": "2026-02-09T19:21:42.639106Z", - "shell.execute_reply": "2026-02-09T19:21:42.638745Z" + "iopub.execute_input": "2026-03-06T11:51:29.185693Z", + "iopub.status.busy": "2026-03-06T11:51:29.185601Z", + "iopub.status.idle": "2026-03-06T11:51:29.199760Z", + "shell.execute_reply": "2026-03-06T11:51:29.199416Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.185683Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:27.808870Z", + "start_time": "2026-03-09T10:17:27.806626Z" } }, - "outputs": [], "source": [ - "breakpoints = linopy.breakpoints(power=[0, 30, 60, 100], fuel=[0, 36, 84, 170])\n", - "breakpoints.to_pandas()" - ] + "x_pts1 = linopy.breakpoints([0, 30, 60, 100])\n", + "y_pts1 = linopy.breakpoints([0, 36, 84, 170])\n", + "print(\"x_pts:\", x_pts1.values)\n", + "print(\"y_pts:\", y_pts1.values)" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "df198d44e962132f", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.584017Z", - "start_time": "2026-02-09T19:21:33.548479Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:42.640305Z", - "iopub.status.busy": "2026-02-09T19:21:42.640145Z", - "iopub.status.idle": "2026-02-09T19:21:42.676689Z", - "shell.execute_reply": "2026-02-09T19:21:42.676404Z" + "iopub.execute_input": "2026-03-06T11:51:29.200170Z", + "iopub.status.busy": "2026-03-06T11:51:29.200087Z", + "iopub.status.idle": "2026-03-06T11:51:29.266847Z", + "shell.execute_reply": "2026-03-06T11:51:29.266379Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.200161Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:27.851223Z", + "start_time": "2026-03-09T10:17:27.811464Z" } }, - "outputs": [], "source": [ "m1 = linopy.Model()\n", "\n", "power = m1.add_variables(name=\"power\", lower=0, upper=100, coords=[time])\n", "fuel = m1.add_variables(name=\"fuel\", lower=0, coords=[time])\n", "\n", + "# piecewise(...) can be written on either side of the comparison\n", "# breakpoints are auto-broadcast to match the time dimension\n", "m1.add_piecewise_constraints(\n", - " {\"power\": power, \"fuel\": fuel},\n", - " breakpoints,\n", - " dim=\"breakpoint\",\n", + " linopy.piecewise(power, x_pts1, y_pts1) == fuel,\n", " name=\"pwl\",\n", " method=\"sos2\",\n", ")\n", @@ -188,122 +153,123 @@ "demand1 = xr.DataArray([50, 80, 30], coords=[time])\n", "m1.add_constraints(power >= demand1, name=\"demand\")\n", "m1.add_objective(fuel.sum())" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "sos2-solve", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.646228Z", - "start_time": "2026-02-09T19:21:33.602890Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:42.678723Z", - "iopub.status.busy": "2026-02-09T19:21:42.678455Z", - "iopub.status.idle": "2026-02-09T19:21:42.729810Z", - "shell.execute_reply": "2026-02-09T19:21:42.729268Z" + "iopub.execute_input": "2026-03-06T11:51:29.267522Z", + "iopub.status.busy": "2026-03-06T11:51:29.267433Z", + "iopub.status.idle": "2026-03-06T11:51:29.326758Z", + "shell.execute_reply": "2026-03-06T11:51:29.326518Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.267514Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:27.899254Z", + "start_time": "2026-03-09T10:17:27.854515Z" } }, - "outputs": [], "source": [ "m1.solve()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "sos2-results", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.671517Z", - "start_time": "2026-02-09T19:21:33.665702Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:42.732333Z", - "iopub.status.busy": "2026-02-09T19:21:42.732173Z", - "iopub.status.idle": "2026-02-09T19:21:42.737877Z", - "shell.execute_reply": "2026-02-09T19:21:42.737648Z" + "iopub.execute_input": "2026-03-06T11:51:29.327139Z", + "iopub.status.busy": "2026-03-06T11:51:29.327044Z", + "iopub.status.idle": "2026-03-06T11:51:29.339334Z", + "shell.execute_reply": "2026-03-06T11:51:29.338974Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.327130Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:27.914316Z", + "start_time": "2026-03-09T10:17:27.909570Z" } }, - "outputs": [], "source": [ "m1.solution[[\"power\", \"fuel\"]].to_pandas()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "hcqytsfoaa", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.802613Z", - "start_time": "2026-02-09T19:21:33.695925Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:42.739144Z", - "iopub.status.busy": "2026-02-09T19:21:42.738977Z", - "iopub.status.idle": "2026-02-09T19:21:42.983660Z", - "shell.execute_reply": "2026-02-09T19:21:42.982758Z" + "iopub.execute_input": "2026-03-06T11:51:29.339689Z", + "iopub.status.busy": "2026-03-06T11:51:29.339608Z", + "iopub.status.idle": "2026-03-06T11:51:29.489677Z", + "shell.execute_reply": "2026-03-06T11:51:29.489280Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.339680Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.025921Z", + "start_time": "2026-03-09T10:17:27.922945Z" } }, - "outputs": [], "source": [ - "plot_pwl_results(m1, breakpoints, demand1, color=\"C0\")" - ] + "plot_pwl_results(m1, x_pts1, y_pts1, demand1, color=\"C0\")" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "incremental-md", "metadata": {}, "source": [ "## 2. Incremental formulation — Coal plant\n", "\n", "The coal plant has a **monotonically increasing** heat rate. Since all\n", "breakpoints are strictly monotonic, we can use the **incremental**\n", - "formulation — a pure LP with no SOS2 or binary variables." + "formulation — which uses fill-fraction variables with binary indicators." ] }, { "cell_type": "code", - "execution_count": null, - "id": "incremental-setup", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.829667Z", - "start_time": "2026-02-09T19:21:33.825683Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:42.987305Z", - "iopub.status.busy": "2026-02-09T19:21:42.986204Z", - "iopub.status.idle": "2026-02-09T19:21:43.003874Z", - "shell.execute_reply": "2026-02-09T19:21:42.998265Z" + "iopub.execute_input": "2026-03-06T11:51:29.490092Z", + "iopub.status.busy": "2026-03-06T11:51:29.490011Z", + "iopub.status.idle": "2026-03-06T11:51:29.500894Z", + "shell.execute_reply": "2026-03-06T11:51:29.500558Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.490084Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.039245Z", + "start_time": "2026-03-09T10:17:28.035712Z" } }, - "outputs": [], "source": [ - "breakpoints = linopy.breakpoints(power=[0, 50, 100, 150], fuel=[0, 55, 130, 225])\n", - "breakpoints.to_pandas()" - ] + "x_pts2 = linopy.breakpoints([0, 50, 100, 150])\n", + "y_pts2 = linopy.breakpoints([0, 55, 130, 225])\n", + "print(\"x_pts:\", x_pts2.values)\n", + "print(\"y_pts:\", y_pts2.values)" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "8nq1zqvq9re", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.913679Z", - "start_time": "2026-02-09T19:21:33.855910Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.009748Z", - "iopub.status.busy": "2026-02-09T19:21:43.009216Z", - "iopub.status.idle": "2026-02-09T19:21:43.067070Z", - "shell.execute_reply": "2026-02-09T19:21:43.066402Z" + "iopub.execute_input": "2026-03-06T11:51:29.501317Z", + "iopub.status.busy": "2026-03-06T11:51:29.501216Z", + "iopub.status.idle": "2026-03-06T11:51:29.604024Z", + "shell.execute_reply": "2026-03-06T11:51:29.603543Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.501307Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.121499Z", + "start_time": "2026-03-09T10:17:28.052395Z" } }, - "outputs": [], "source": [ "m2 = linopy.Model()\n", "\n", @@ -312,9 +278,7 @@ "\n", "# breakpoints are auto-broadcast to match the time dimension\n", "m2.add_piecewise_constraints(\n", - " {\"power\": power, \"fuel\": fuel},\n", - " breakpoints,\n", - " dim=\"breakpoint\",\n", + " linopy.piecewise(power, x_pts2, y_pts2) == fuel,\n", " name=\"pwl\",\n", " method=\"incremental\",\n", ")\n", @@ -322,199 +286,577 @@ "demand2 = xr.DataArray([80, 120, 50], coords=[time])\n", "m2.add_constraints(power >= demand2, name=\"demand\")\n", "m2.add_objective(fuel.sum())" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "incremental-solve", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.981694Z", - "start_time": "2026-02-09T19:21:33.933519Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.070384Z", - "iopub.status.busy": "2026-02-09T19:21:43.070023Z", - "iopub.status.idle": "2026-02-09T19:21:43.124118Z", - "shell.execute_reply": "2026-02-09T19:21:43.123883Z" + "iopub.execute_input": "2026-03-06T11:51:29.604434Z", + "iopub.status.busy": "2026-03-06T11:51:29.604359Z", + "iopub.status.idle": "2026-03-06T11:51:29.680947Z", + "shell.execute_reply": "2026-03-06T11:51:29.680667Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.604427Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.174903Z", + "start_time": "2026-03-09T10:17:28.124418Z" } }, - "outputs": [], "source": [ "m2.solve();" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "incremental-results", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:33.991781Z", - "start_time": "2026-02-09T19:21:33.986137Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.125356Z", - "iopub.status.busy": "2026-02-09T19:21:43.125291Z", - "iopub.status.idle": "2026-02-09T19:21:43.129072Z", - "shell.execute_reply": "2026-02-09T19:21:43.128850Z" + "iopub.execute_input": "2026-03-06T11:51:29.681833Z", + "iopub.status.busy": "2026-03-06T11:51:29.681725Z", + "iopub.status.idle": "2026-03-06T11:51:29.698558Z", + "shell.execute_reply": "2026-03-06T11:51:29.698011Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.681822Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.182912Z", + "start_time": "2026-03-09T10:17:28.178226Z" } }, - "outputs": [], "source": [ "m2.solution[[\"power\", \"fuel\"]].to_pandas()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "fua98r986pl", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:34.116658Z", - "start_time": "2026-02-09T19:21:34.021992Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.130293Z", - "iopub.status.busy": "2026-02-09T19:21:43.130221Z", - "iopub.status.idle": "2026-02-09T19:21:43.281657Z", - "shell.execute_reply": "2026-02-09T19:21:43.281256Z" + "iopub.execute_input": "2026-03-06T11:51:29.699350Z", + "iopub.status.busy": "2026-03-06T11:51:29.699116Z", + "iopub.status.idle": "2026-03-06T11:51:29.852000Z", + "shell.execute_reply": "2026-03-06T11:51:29.851741Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.699334Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.285938Z", + "start_time": "2026-03-09T10:17:28.191498Z" } }, - "outputs": [], "source": [ - "plot_pwl_results(m2, breakpoints, demand2, color=\"C1\")" - ] + "plot_pwl_results(m2, x_pts2, y_pts2, demand2, color=\"C1\")" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", - "id": "disjunctive-md", "metadata": {}, "source": [ "## 3. Disjunctive formulation — Diesel generator\n", "\n", "The diesel generator has a **forbidden operating zone**: it must either\n", - "be off (0 MW) or run between 50–80 MW. Because of this gap, we add a\n", - "high-cost **backup** source to cover demand when the diesel is off or at\n", - "its maximum." + "be off (0 MW) or run between 50–80 MW. Because of this gap, we use\n", + "**disjunctive** piecewise constraints via `linopy.segments()` and add a\n", + "high-cost **backup** source to cover demand when the diesel is off or\n", + "at its maximum.\n", + "\n", + "The disjunctive formulation is selected automatically when the breakpoint\n", + "arrays have a segment dimension (created by `linopy.segments()`)." ] }, { "cell_type": "code", - "execution_count": null, - "id": "disjunctive-setup", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:34.147920Z", - "start_time": "2026-02-09T19:21:34.142740Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.283679Z", - "iopub.status.busy": "2026-02-09T19:21:43.283490Z", - "iopub.status.idle": "2026-02-09T19:21:43.290429Z", - "shell.execute_reply": "2026-02-09T19:21:43.289665Z" + "iopub.execute_input": "2026-03-06T11:51:29.852397Z", + "iopub.status.busy": "2026-03-06T11:51:29.852305Z", + "iopub.status.idle": "2026-03-06T11:51:29.866500Z", + "shell.execute_reply": "2026-03-06T11:51:29.866141Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.852387Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.301657Z", + "start_time": "2026-03-09T10:17:28.294924Z" } }, - "outputs": [], "source": [ - "breakpoints = linopy.breakpoints.segments([(0, 0), (50, 80)])\n", - "breakpoints.to_pandas()" - ] + "# x-breakpoints define where each segment lives on the power axis\n", + "# y-breakpoints define the corresponding cost values\n", + "x_seg = linopy.segments([(0, 0), (50, 80)])\n", + "y_seg = linopy.segments([(0, 0), (125, 200)])\n", + "print(\"x segments:\\n\", x_seg.to_pandas())\n", + "print(\"y segments:\\n\", y_seg.to_pandas())" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "reevc7ood3", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:34.234326Z", - "start_time": "2026-02-09T19:21:34.188461Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.293229Z", - "iopub.status.busy": "2026-02-09T19:21:43.292936Z", - "iopub.status.idle": "2026-02-09T19:21:43.363049Z", - "shell.execute_reply": "2026-02-09T19:21:43.362442Z" + "iopub.execute_input": "2026-03-06T11:51:29.866940Z", + "iopub.status.busy": "2026-03-06T11:51:29.866839Z", + "iopub.status.idle": "2026-03-06T11:51:29.955272Z", + "shell.execute_reply": "2026-03-06T11:51:29.954810Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.866931Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.381180Z", + "start_time": "2026-03-09T10:17:28.308026Z" } }, - "outputs": [], "source": [ "m3 = linopy.Model()\n", "\n", "power = m3.add_variables(name=\"power\", lower=0, upper=80, coords=[time])\n", + "cost = m3.add_variables(name=\"cost\", lower=0, coords=[time])\n", "backup = m3.add_variables(name=\"backup\", lower=0, coords=[time])\n", "\n", "# breakpoints are auto-broadcast to match the time dimension\n", - "m3.add_disjunctive_piecewise_constraints(power, breakpoints, name=\"pwl\")\n", + "m3.add_piecewise_constraints(\n", + " linopy.piecewise(power, x_seg, y_seg) == cost,\n", + " name=\"pwl\",\n", + ")\n", "\n", "demand3 = xr.DataArray([10, 70, 90], coords=[time])\n", "m3.add_constraints(power + backup >= demand3, name=\"demand\")\n", - "m3.add_objective((2.5 * power + 10 * backup).sum())" - ] + "m3.add_objective((cost + 10 * backup).sum())" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "disjunctive-solve", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:34.322383Z", - "start_time": "2026-02-09T19:21:34.260066Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.366552Z", - "iopub.status.busy": "2026-02-09T19:21:43.366148Z", - "iopub.status.idle": "2026-02-09T19:21:43.457707Z", - "shell.execute_reply": "2026-02-09T19:21:43.457113Z" + "iopub.execute_input": "2026-03-06T11:51:29.955750Z", + "iopub.status.busy": "2026-03-06T11:51:29.955667Z", + "iopub.status.idle": "2026-03-06T11:51:30.027311Z", + "shell.execute_reply": "2026-03-06T11:51:30.026945Z", + "shell.execute_reply.started": "2026-03-06T11:51:29.955741Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.437326Z", + "start_time": "2026-03-09T10:17:28.384629Z" } }, - "outputs": [], "source": [ "m3.solve()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, - "id": "disjunctive-results", "metadata": { - "ExecuteTime": { - "end_time": "2026-02-09T19:21:34.333489Z", - "start_time": "2026-02-09T19:21:34.327107Z" - }, "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.459934Z", - "iopub.status.busy": "2026-02-09T19:21:43.459654Z", - "iopub.status.idle": "2026-02-09T19:21:43.468110Z", - "shell.execute_reply": "2026-02-09T19:21:43.465566Z" + "iopub.execute_input": "2026-03-06T11:51:30.028114Z", + "iopub.status.busy": "2026-03-06T11:51:30.027864Z", + "iopub.status.idle": "2026-03-06T11:51:30.043138Z", + "shell.execute_reply": "2026-03-06T11:51:30.042813Z", + "shell.execute_reply.started": "2026-03-06T11:51:30.028095Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.449248Z", + "start_time": "2026-03-09T10:17:28.444065Z" } }, + "source": [ + "m3.solution[[\"power\", \"cost\", \"backup\"]].to_pandas()" + ], "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ - "m3.solution[[\"power\", \"backup\"]].to_pandas()" + "## 4. LP formulation — Concave efficiency bound\n", + "\n", + "When the piecewise function is **concave** and we use a `>=` constraint\n", + "(i.e. `pw >= y`, meaning y is bounded above by pw), linopy can use a\n", + "pure **LP** formulation with tangent-line constraints — no SOS2 or\n", + "binary variables needed. This is the fastest to solve.\n", + "\n", + "For this formulation, the x-breakpoints must be in **strictly increasing**\n", + "order.\n", + "\n", + "Here we bound fuel consumption *below* a concave efficiency envelope.\n" ] }, { "cell_type": "code", - "execution_count": null, - "id": "g32vxea6jwe", "metadata": { + "execution": { + "iopub.execute_input": "2026-03-06T11:51:30.043492Z", + "iopub.status.busy": "2026-03-06T11:51:30.043410Z", + "iopub.status.idle": "2026-03-06T11:51:30.113382Z", + "shell.execute_reply": "2026-03-06T11:51:30.112320Z", + "shell.execute_reply.started": "2026-03-06T11:51:30.043484Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.503165Z", + "start_time": "2026-03-09T10:17:28.458328Z" + } + }, + "source": [ + "x_pts4 = linopy.breakpoints([0, 40, 80, 120])\n", + "# Concave curve: decreasing marginal fuel per MW\n", + "y_pts4 = linopy.breakpoints([0, 50, 90, 120])\n", + "\n", + "m4 = linopy.Model()\n", + "\n", + "power = m4.add_variables(name=\"power\", lower=0, upper=120, coords=[time])\n", + "fuel = m4.add_variables(name=\"fuel\", lower=0, coords=[time])\n", + "\n", + "# pw >= fuel means fuel <= concave_function(power) → auto-selects LP method\n", + "m4.add_piecewise_constraints(\n", + " linopy.piecewise(power, x_pts4, y_pts4) >= fuel,\n", + " name=\"pwl\",\n", + ")\n", + "\n", + "demand4 = xr.DataArray([30, 80, 100], coords=[time])\n", + "m4.add_constraints(power == demand4, name=\"demand\")\n", + "# Maximize fuel (to push against the upper bound)\n", + "m4.add_objective(-fuel.sum())" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": { + "execution": { + "iopub.execute_input": "2026-03-06T11:51:30.113818Z", + "iopub.status.busy": "2026-03-06T11:51:30.113727Z", + "iopub.status.idle": "2026-03-06T11:51:30.171329Z", + "shell.execute_reply": "2026-03-06T11:51:30.170942Z", + "shell.execute_reply.started": "2026-03-06T11:51:30.113810Z" + }, "ExecuteTime": { - "end_time": "2026-02-09T19:21:34.545650Z", - "start_time": "2026-02-09T19:21:34.425456Z" + "end_time": "2026-03-09T10:17:28.554560Z", + "start_time": "2026-03-09T10:17:28.520243Z" + } + }, + "source": [ + "m4.solve()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": { + "execution": { + "iopub.execute_input": "2026-03-06T11:51:30.172009Z", + "iopub.status.busy": "2026-03-06T11:51:30.171791Z", + "iopub.status.idle": "2026-03-06T11:51:30.191956Z", + "shell.execute_reply": "2026-03-06T11:51:30.191556Z", + "shell.execute_reply.started": "2026-03-06T11:51:30.171993Z" }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.563539Z", + "start_time": "2026-03-09T10:17:28.559654Z" + } + }, + "source": [ + "m4.solution[[\"power\", \"fuel\"]].to_pandas()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": { "execution": { - "iopub.execute_input": "2026-02-09T19:21:43.475302Z", - "iopub.status.busy": "2026-02-09T19:21:43.475060Z", - "iopub.status.idle": "2026-02-09T19:21:43.697893Z", - "shell.execute_reply": "2026-02-09T19:21:43.697398Z" + "iopub.execute_input": "2026-03-06T11:51:30.192604Z", + "iopub.status.busy": "2026-03-06T11:51:30.192376Z", + "iopub.status.idle": "2026-03-06T11:51:30.345074Z", + "shell.execute_reply": "2026-03-06T11:51:30.344642Z", + "shell.execute_reply.started": "2026-03-06T11:51:30.192590Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.665419Z", + "start_time": "2026-03-09T10:17:28.575163Z" } }, + "source": [ + "plot_pwl_results(m4, x_pts4, y_pts4, demand4, color=\"C4\")" + ], "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ - "plot_pwl_results(m3, breakpoints, demand3, color=\"C2\", fuel_rate=2.5)" + "## 5. Slopes mode — Building breakpoints from slopes\n", + "\n", + "Sometimes you know the **slope** of each segment rather than the y-values\n", + "at each breakpoint. The `breakpoints()` factory can compute y-values from\n", + "slopes, x-coordinates, and an initial y-value." ] + }, + { + "cell_type": "code", + "metadata": { + "execution": { + "iopub.execute_input": "2026-03-06T11:51:30.345523Z", + "iopub.status.busy": "2026-03-06T11:51:30.345404Z", + "iopub.status.idle": "2026-03-06T11:51:30.357312Z", + "shell.execute_reply": "2026-03-06T11:51:30.356954Z", + "shell.execute_reply.started": "2026-03-06T11:51:30.345513Z" + }, + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.673673Z", + "start_time": "2026-03-09T10:17:28.668792Z" + } + }, + "source": [ + "# Marginal costs: $1.1/MW for 0-50, $1.5/MW for 50-100, $1.9/MW for 100-150\n", + "x_pts5 = linopy.breakpoints([0, 50, 100, 150])\n", + "y_pts5 = linopy.breakpoints(slopes=[1.1, 1.5, 1.9], x_points=[0, 50, 100, 150], y0=0)\n", + "print(\"y breakpoints from slopes:\", y_pts5.values)" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": "## 6. Active parameter — Unit commitment with piecewise efficiency\n\nIn unit commitment problems, a binary variable $u_t$ controls whether a\nunit is **on** or **off**. When off, both power output and fuel consumption\nmust be zero. When on, the unit operates within its piecewise-linear\nefficiency curve between $P_{min}$ and $P_{max}$.\n\nThe `active` parameter on `piecewise()` handles this by gating the\ninternal PWL formulation with the commitment binary:\n\n- **Incremental:** delta bounds tighten from $\\delta_i \\leq 1$ to\n $\\delta_i \\leq u$, and base terms are multiplied by $u$\n- **SOS2:** convexity constraint becomes $\\sum \\lambda_i = u$\n- **Disjunctive:** segment selection becomes $\\sum z_k = u$\n\nThis is the only gating behavior expressible with pure linear constraints.\nSelectively *relaxing* the PWL (letting x, y float freely when off) would\nrequire big-M or indicator constraints.", + "metadata": {} + }, + { + "cell_type": "code", + "source": "# Unit parameters: operates between 30-100 MW when on\np_min, p_max = 30, 100\nfuel_min, fuel_max = 40, 170\nstartup_cost = 50\n\nx_pts6 = linopy.breakpoints([p_min, 60, p_max])\ny_pts6 = linopy.breakpoints([fuel_min, 90, fuel_max])\nprint(\"Power breakpoints:\", x_pts6.values)\nprint(\"Fuel breakpoints: \", y_pts6.values)", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.685034Z", + "start_time": "2026-03-09T10:17:28.681601Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Power breakpoints: [ 30. 60. 100.]\n", + "Fuel breakpoints: [ 40. 90. 170.]\n" + ] + } + ], + "execution_count": null + }, + { + "cell_type": "code", + "source": "m6 = linopy.Model()\n\npower = m6.add_variables(name=\"power\", lower=0, upper=p_max, coords=[time])\nfuel = m6.add_variables(name=\"fuel\", lower=0, coords=[time])\ncommit = m6.add_variables(name=\"commit\", binary=True, coords=[time])\n\n# The active parameter gates the PWL with the commitment binary:\n# - commit=1: power in [30, 100], fuel = f(power)\n# - commit=0: power = 0, fuel = 0\nm6.add_piecewise_constraints(\n linopy.piecewise(power, x_pts6, y_pts6, active=commit) == fuel,\n name=\"pwl\",\n method=\"incremental\",\n)\n\n# Demand: low at t=1 (cheaper to stay off), high at t=2,3\ndemand6 = xr.DataArray([15, 70, 50], coords=[time])\nbackup = m6.add_variables(name=\"backup\", lower=0, coords=[time])\nm6.add_constraints(power + backup >= demand6, name=\"demand\")\n\n# Objective: fuel + startup cost + backup at $5/MW (cheap enough that\n# staying off at low demand beats committing at minimum load)\nm6.add_objective((fuel + startup_cost * commit + 5 * backup).sum())", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.787328Z", + "start_time": "2026-03-09T10:17:28.697214Z" + } + }, + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "source": "m6.solve()", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T10:17:28.878112Z", + "start_time": "2026-03-09T10:17:28.791383Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Set parameter Username\n", + "Academic license - for non-commercial use only - expires 2026-12-18\n", + "Read LP format model from file /private/var/folders/7j/18_93__x4wl2px44pq3f570m0000gn/T/linopy-problem-fm9ucuy2.lp\n", + "Reading time = 0.00 seconds\n", + "obj: 27 rows, 24 columns, 66 nonzeros\n", + "Gurobi Optimizer version 13.0.1 build v13.0.1rc0 (mac64[arm] - Darwin 25.2.0 25C56)\n", + "\n", + "CPU model: Apple M3\n", + "Thread count: 8 physical cores, 8 logical processors, using up to 8 threads\n", + "\n", + "Optimize a model with 27 rows, 24 columns and 66 nonzeros (Min)\n", + "Model fingerprint: 0x4b0d5f70\n", + "Model has 9 linear objective coefficients\n", + "Variable types: 15 continuous, 9 integer (9 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 8e+01]\n", + " Objective range [1e+00, 5e+01]\n", + " Bounds range [1e+00, 1e+02]\n", + " RHS range [2e+01, 7e+01]\n", + "\n", + "Found heuristic solution: objective 675.0000000\n", + "Presolve removed 24 rows and 19 columns\n", + "Presolve time: 0.00s\n", + "Presolved: 3 rows, 5 columns, 10 nonzeros\n", + "Found heuristic solution: objective 485.0000000\n", + "Variable types: 3 continuous, 2 integer (2 binary)\n", + "\n", + "Root relaxation: objective 3.516667e+02, 3 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 351.66667 0 1 485.00000 351.66667 27.5% - 0s\n", + "* 0 0 0 358.3333333 358.33333 0.00% - 0s\n", + "\n", + "Explored 1 nodes (5 simplex iterations) in 0.01 seconds (0.00 work units)\n", + "Thread count was 8 (of 8 available processors)\n", + "\n", + "Solution count 3: 358.333 485 675 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 3.583333333333e+02, best bound 3.583333333333e+02, gap 0.0000%\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Dual values of MILP couldn't be parsed\n" + ] + }, + { + "data": { + "text/plain": [ + "('ok', 'optimal')" + ] + }, + "execution_count": 47, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null + }, + { + "cell_type": "code", + "source": "m6.solution[[\"commit\", \"power\", \"fuel\", \"backup\"]].to_pandas()", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T10:17:29.079925Z", + "start_time": "2026-03-09T10:17:29.069821Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + " commit power fuel backup\n", + "time \n", + "1 0.0 0.0 0.000000 15.0\n", + "2 1.0 70.0 110.000000 0.0\n", + "3 1.0 50.0 73.333333 0.0" + ], + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
commitpowerfuelbackup
time
10.00.00.00000015.0
21.070.0110.0000000.0
31.050.073.3333330.0
\n", + "
" + ] + }, + "execution_count": 48, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null + }, + { + "cell_type": "code", + "source": "plot_pwl_results(m6, x_pts6, y_pts6, demand6, color=\"C2\")", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-09T10:17:29.226034Z", + "start_time": "2026-03-09T10:17:29.097467Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAFUCAYAAAA57l+/AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAABq3ElEQVR4nO3dB3hU1fbw4ZVeKKETeu9SpDeRJoiCIlxRBKliowiIUqQLBlABQYpYKCqCKKCgYqFKbyId6b1Jh0DqfM/afjP/mZBAEjKZZOb33mducs6cmZycieyz9l57bS+LxWIRAAAAAACQ4rxT/i0BAAAAAABBNwAAAAAATsRINwAAAAAATkLQDQAAAACAkxB0AwAAAADgJATdAAAAAAA4CUE3AAAAAABOQtANAAAAAICTEHQDAAAAAOAkBN0AAABAGjJ8+HDx8vKS9E5/hx49erj6NACXI+gGnGTWrFmmsdm6dWu8z9evX18eeughp17/n3/+2TTcqWnq1KnmdwcAAI73BNZHYGCg5M2bV5o2bSqTJk2SGzdupMlL5Yr7CMAdEXQDbkwbyxEjRqTqzyToBgAgfiNHjpQvv/xSpk2bJj179jT7evfuLeXLl5edO3fajhs8eLDcvn3bI+8jAHfk6+oTAJB2WSwWuXPnjgQFBYm709/T399fvL3piwQAOEezZs2katWqtu2BAwfKihUrpHnz5vLUU0/Jvn37TJvr6+trHgDcA3eXQBrz1VdfSZUqVUyjmy1bNnn++efl5MmTDsf8+eef8uyzz0rBggUlICBAChQoIH369HHoFe/UqZNMmTLFfG+f0nYvhQsXNg3/r7/+am4K9Bw++eQT89zMmTOlYcOGkitXLvMzy5Yta3rq475+z549snr1atvP0zR6q6tXr5oefT1ffY/ixYvL2LFjJTY2NlHX5pdffpFHH31UMmXKJJkzZ5Zq1arJ3LlzHX6+/t5x6TnYn8eqVavMuc2bN8+MJuTLl0+Cg4Nl+/btZv/s2bPveg+9Jvrc0qVLbftOnz4tXbp0kdy5c5vfp1y5cvLFF18k6ncBAEBp2zpkyBA5fvy4uQdIaE7377//LnXr1pUsWbJIxowZpVSpUjJo0KC72rb58+eb/aGhoZIhQwYTzDvjPkLb7o8++siM0mu6fM6cOeXxxx+Pd1rd4sWLzZQ6a1u5bNkyPnx4FLrQACe7du2a/Pvvv3ftj4qKumvf6NGjTcPbpk0beemll+TixYsyefJkqVevnvz111+moVULFiyQ8PBwee211yR79uyyefNmc9ypU6fMc+qVV16RM2fOmEZaU9kS68CBA9K2bVvz+m7duplGXWmArQ2lNt7a+75kyRJ5/fXXTaPbvXt3c8zEiRNNupzeDLzzzjtmnwakSs9XA2YNVPW9taFfv3696eU/e/asee395sNpgKvnoK/Ra6HXRBvuF154QZLj3XffNaPb/fr1k4iICNORULRoUfn222+lY8eODsfqTUzWrFnN/Dt1/vx5qVmzpq1IjN5saKdA165d5fr166ZzAQCAxHjxxRdNoPzbb7+Ztjcu7dDWTvEKFSqYFHUNXg8dOiTr1q2L915C26b+/fvLhQsXTPvauHFj2bFjhy1zLSXuI7S907ZZR+/1niU6OtoE8xs3bnQYzV+7dq0sXLjQ3DNop7nOYW/durWcOHHC/GzAI1gAOMXMmTMt+p/YvR7lypWzHX/s2DGLj4+PZfTo0Q7vs2vXLouvr6/D/vDw8Lt+XlhYmMXLy8ty/Phx277u3bubn5NYhQoVMscvW7bsrufi+5lNmza1FC1a1GGf/k6PPvroXce+++67lgwZMlj++ecfh/0DBgwwv/eJEycSPK+rV69aMmXKZKlRo4bl9u3bDs/FxsY6nH/Hjh3ver2ej/05rVy50vyeeu5xf6+BAwda/Pz8LJcvX7bti4iIsGTJksXSpUsX276uXbta8uTJY/n3338dXv/8889bQkJC4r1eAADPvifYsmVLgsdo2/Hwww+b74cNG+bQfk+YMMFsX7x4McHXW9u2fPnyWa5fv27b/+2335r9H330UYrdR6xYscLs79Wr113P2bfLeoy/v7/l0KFDtn1///232T958uQEfxfA3ZBeDjiZpmZpL3Hch/ZW29NeYB011lFuHRm3PjQ9rESJErJy5UrbsfZzrG/dumWOq127tpmDraO/D6JIkSK20Vx79j/TOnqvI9dHjhwx2/ejPeePPPKIGS22//209z0mJkbWrFmT4Gv1emll1wEDBpgUNnsPsqSKjmbHna/+3HPPmSwE/TysdORBU+P1OaXX+fvvv5cWLVqY7+1/H712ej00VR0AgMTSLLGEqphbM91++OGH+07J6tChgxlRtvrf//4nefLkMUXRUuo+QttAbX+HDRt213Nx22Vt54sVK2bb1vsfnSKm9w+ApyC9HHCy6tWrO6RZWVmDT6uDBw+axk4D7Pj4+fnZvteUrKFDh8qPP/4oV65ccTguMQHw/YLu+GgKmzauGzZsMClpcX9mSEjIPd9Xfz+tzKpp2PHRFLiEHD582HxN6SXW4vtdK1asKKVLlzbp5Jo6p/T7HDlymHl3StP+NQifMWOGeST19wEAIK6bN2+auinx0U7fzz77zKRxawd0o0aNpFWrViagjlsANO59hAbBWkPl2LFjKXYfoe2yLnmmtWfuR6eTxXcPFPfnAu6MoBtII7TnWhtGnRfs4+MTbw+40lHhxx57TC5fvmzma2mAqIVSdK60Fj1JbFGyhMRXqVwbV23g9WeNHz/eFFzRudDaaz5hwoRE/Uw9Rs/77bffjvf5kiVLyoNKaNRbr1l81zShqux6c6Nz4rRTREcL9KZE57lbK8laf9/27dvfNffbKm4mAwAACdG51BrsanAcH22vNCNMs95++uknU89EO4S1M1izseJr4xLi7PuIuBI6t/+yzwHPQNANpBGaeqUNkI6+3isA3bVrl/zzzz+mwramkNmnYMf1IKnX9rRomhYa0+DTvsfaPuX9fj9Tfz/txdc0s6SypqXt3r07wRsSa8+5jkDHpRVhtUBaYmnQreuSavqcFoLTwmhaRd5KR+s1GNcbl+T8PgAA2LMWKotvepeVjmhrB7g+tAP8vffeM0VLtS22b4s0s8ye3lto0TVrZ3BK3Edou6yremjgnpjRbsDTMacbSCM0TUx7gzXYi9v7q9uXLl1y6DG2P0a/12U74tKeaxVfIJoU8f1M7ZHXZcTi+5nx/Tydq66p6dpIx6XHa9XThDRp0sQEuWFhYWY9bXv256Q3AVo1NTIy0rZPl/iKu1TK/ZQpU8YsgaKjCPrQuXBaQd7+emjlVQ3KtSMgLk0/BwAgMXSdbl1NQzvd27VrF+8xGtzGValSJfNVO8XtzZkzx2Fu+HfffWdWCdEq49Y27EHvI7QN1NfoPUtcjGADd2OkG0gjNGAcNWqUWQ5L5121bNnSBJpHjx6VRYsWycsvv2yWttI0MD1Wv9dUMC1GosFffHOjdL1v1atXL9N7rg2t/YhtYmnQq+nkWjhMlxDREetPP/3UzD3Thjzuz9TlxfR30VFpPUbT39566y0zUq5Lnmj6mh6nxVu0x11vCPR31nnT8dHfUdPYdS6brs2tS4TpqPbff/9t5pdb19XW5/W9dJ1QDfI1LV7XPLUv4JKU0W6d76aF23Rud9w5c2PGjDGjCzVq1DDLu+hyY3pTpAXU/vjjj3hvkAAAnk2nkO3fv990NOvSkxpw6whzoUKFTBsZt1iolS4TpunlTz75pDlW64ZMnTpV8ufPb9butqcjz7qvc+fO5mfokmHaHluXIkuJ+4gGDRqYZc50+S8dWdd2V9PSdckwfU6X0gRgx9Xl0wFPXR5El7CyXzLM6vvvv7fUrVvXLK+lj9KlS5slOw4cOGA7Zu/evZbGjRtbMmbMaMmRI4elW7dutiU49OdaRUdHW3r27GnJmTOnWQbkfv/J65JbTz75ZLzP/fjjj5YKFSpYAgMDLYULF7aMHTvW8sUXX5j3PHr0qO24c+fOmffQJb70Ofulum7cuGGW5CpevLhZQkTPvXbt2pYPPvjAEhkZeZ8r+t856PFBQUGWzJkzW6pXr2755ptvHI758MMPzXIpAQEBljp16li2bt2a4JJhCxYsSPBnHTx40La029q1a+M95vz58+azKVCggFlmLDQ01NKoUSPLjBkz7vu7AAA8dxlRbQO1zXjsscfMUl72S3zFt2TY8uXLLU8//bQlb9685rX6tW3btg7LcFrbNm0Xta3NlSuXaS+1TbZfBiyl7iP0uffff9/cp+g56THNmjWzbNu2zXaMHq/tZFwJLfEJuCsv/T/7IBwAAABA+rJq1SozyqxLdGpVcwBpB3O6AQAAAABwEoJuAAAAAACchKAbAAAAAAAnIegGAABJVrhwYbOGb9xH9+7dzfO6vJ9+nz17dsmYMaNZYkgrKQNwjvr165vlupjPDaQ9FFIDAABJpuvRx8TE2LZ1zfrHHnvMLKWnN/+vvfaa/PTTTzJr1iwJCQkxSwjp0nvr1q3jagMAPApBNwAAeGC9e/eWpUuXmjV7r1+/Ljlz5pS5c+faRt10beIyZcrIhg0bpGbNmlxxAIDH8HX1CaQFsbGxcubMGcmUKZNJjQMAIC3RlNEbN25I3rx5zWhxWhMZGSlfffWV9O3b17Sj27Ztk6ioKGncuLHtmNKlS0vBggXvGXRHRESYh337fPnyZZOiTvsMAEiv7TNBt4gJuAsUKJCanw8AAEl28uRJyZ8/f5q7cosXL5arV69Kp06dzPa5c+fE399fsmTJ4nBc7ty5zXMJCQsLkxEjRjj9fAEASM322aVB95o1a+T99983PeJnz56VRYsWScuWLW3PJ9SrPW7cOHnrrbdshVyOHz9+V6M9YMCARJ+HjnBbL1bmzJmT+dsAAOAcmq6tncPW9iqt+fzzz6VZs2amp/9BDBw40IyWW127ds2MjtM+IyVotoXeb+r9ZWhoaKJfd+H2BT6ANCxXUK5EH6udfjoymSdPHjPlBUit9tmlQfetW7ekYsWK0qVLF2nVqtVdz+s/jPZ++eUX6dq1q6mAam/kyJHSrVs323ZSb0qswb0G3ATdAIC0Ki2mWGvH9x9//CELFy607dOARlPOdfTbfrRbq5ffK9gJCAgwj7hon5ESrKmf2jl06tSpRL+u/OzyfABp2K6OuxJ9rI5Enj592vwtcM+P1GyfXRp0a6+4PhISt2H+4YcfpEGDBlK0aFGH/RpkJ6XHEgAApIyZM2dKrly55Mknn7Ttq1Klivj5+cny5cttHeUHDhyQEydOSK1atbj0AACPkvaqsSRAe8d16REd6Y5rzJgxpsjKww8/bNLVo6Oj7/leWqRFUwHsHwAAIGm00JkG3R07dhRf3//rx9clwrS91lRxXUJMp5F17tzZBNxULgcAeJp0U0ht9uzZZkQ7bhp6r169pHLlypItWzZZv369mQ+maenjx49P8L0o1AIAwIPTtHIdvdZpYnFNmDDBpHDqSLd2djdt2lSmTp3KZQcAeJx0E3R/8cUX0q5dOwkMDHTYb19wpUKFCqZa6iuvvGIC6/jmhcVXqMU6Af5+vfk6Pw2eTdMlfXx8XH0aAJAmNGnSxBQlio+211OmTDEPZ4uJiTFLlCHtov0E4MnSRdD9559/mrlg8+fPv++xNWrUMOnlx44dk1KlSiWpUEtCNNg+evSoCbwBLQqkNQTSYkEjAGlDTGyMbL+wXS6GX5ScwTmlcq7K4uNNh11K04BfqxFrwTakfbSfADxVugi6dSkSLcqilc7vZ8eOHSadTYu6pFSDrunqOrqpo+H3WvQc7k3/FsLDw+XChf+WDtHlJgAgrj+O/yFjNo+R8+HnbftyB+eWAdUHSONCjblgKcgacGubHxwcTGdoGkX7CcDTuTTovnnzphw6dMi2raPJGjTr/Gxdl9Oa+r1gwQL58MMP73r9hg0bZNOmTaaiuc731u0+ffpI+/btJWvWrClyjjpqroGWLi+hDTo8W1BQkPmqgbfe5JFqDiBuwN13VV+xiGPK9YXwC2b/+PrjCbxTMKXcGnBrMVWkbbSfADyZS4PurVu3moDZyjrPWqugzpo1y3w/b94800Patm3bu16vKeL6/PDhw02RliJFipig236+dko06krnigPK2vmi8wcJugHY2ovYGDPCHTfgVrrPS7xk7Oax0qBAA1LNU4B1Djcd4ukH7ScAT+XSoLt+/foJFmCxevnll80jPlq1fOPGjZIamL8L/hYA3IvO4bZPKY8v8D4Xfs4cVy20GheT9tnjcC8FwFMxQRkAgBSgRdNS8jgAAOAeCLqR4jTdv1KlSqnSY7548WKn/xwAuJ870Xfkt2O/JepCaTVzwB116tRJWrZs6erTAIA0h6A7Fef6bTm3RX4+8rP5qtvObvg0KLU+tMjM448/Ljt37hR3oVXlmzVrlujjtU6ALlcCAClp36V98tzS52T5yeX3PE7ndIcGh5rlw+DZ7NtoXb86d+7c8thjj8kXX3zB8qQA4IYIulOpmm3T75tKl1+7SP8/+5uvuq37nUmDbA1M9bF8+XLx9fWV5s2b37coTXqha2UnZb11AEhJ2nn6+a7P5YWfX5Aj145IjqAc8kqFV0xwrf+zZ93uX70/RdTg0EYfO3ZMfvnlF1NY9o033jDttK6cAgBwHwTdqbR8TNziOtblY5wZeGtAqoGpPjTde8CAAXLy5Em5ePGiaeS1h33+/Pny6KOPSmBgoHz99dfmdZ999pmUKVPG7CtdurRMnTrV4X379+8vJUuWNFVIixYtKkOGDLlnwH748GFzXI8ePUzhPOuIs6aGlyhRwvycpk2bmnOzN23aNClWrJipHF+qVCn58ssvE0wvt/4+CxcuNDcuem66rrsuI6dWrVolnTt3lmvXrtlGFzQNXunvZz0PHW343//+l0KfAAB3debmGen6W1eZuH2iRMdGS6OCjWThUwulx8M9zLJguYJzORyv63SzXBjia6Pz5ctnCsMOGjRIfvjhBxOAW1dw0SXRXnrpJcmZM6dkzpxZGjZsKH///fdd07l0hFyXWs2YMaO8/vrrZuWVcePGmffXJdVGjx7t8LPHjx8v5cuXlwwZMkiBAgXMa3QZVytrO/3rr7+a+wF9X2sngZX+DF0tRo/TbLq33377vsVxAcBTubR6eXqkDcrt6NuJHgUJ2xyW4PIxSpeXqRFaI1EjH0G+Qcmu/KmN6VdffSXFixc3jeOtW7fMfg3EdQ30hx9+2BZ4Dx06VD7++GOz76+//pJu3bqZhlmXclO6Jro2yLp2+a5du8zzuk8b3Lg0nV0D6q5du8qoUaNs+3Xtc70JmDNnjgmqtcF//vnnZd26deb5RYsWmR7/iRMnSuPGjWXp0qUmaM6fP7/DMnNxvfPOO/LBBx+YIFq/16XmdC342rVrm/fS3+3AgQPmWL2J0GXrevXqZQJ6Peby5cvy559/JusaA/CMNmDpkaXy3qb35GbUTQn2DZYB1QdIy+Itbf8+Ny7U2CwLplXKtWiazuHWlPLE/DsPz6ZBtXYYaweyBtvPPvusWd9aA/GQkBD55JNPpFGjRvLPP/9ItmzZbB3b+vyyZcvM99pxfOTIEdM5vnr1alm/fr106dLFtKU1atQwr/H29pZJkyaZpVb1WG2DtQ2372TXdlrbU20f9fj27dtLv379bB30eu+g9wIa8GtgrtvaduvvAABwRNCdRBpw15j7X6OVEnQEvPa82ok6dtMLmyTY7781ohNDA1UNLJUG2Xny5DH7tPG06t27t7Rq1cq2PWzYMNNwWvdpg7x3717T0FuD7sGDB9uOL1y4sGmEdb30uEG3NvSaJqfB75tvvunwnI6Ma2BvvQGYPXu2abQ3b94s1atXNw29znnTGwGlvem6PJzuv1fQrefy5JNPmu9HjBgh5cqVM0G3jtjrDYveFGvPv9WJEydMh4Kep3YcFCpUyHQ2AEBc1yKuyaiNo2TZsWVmu2LOihJWN0wKZC5w17EaYLMsWOqrWrWqnDt3LtV/rrYr2ombErS90g7rtWvXmjbxwoULtqlU2gZqhtd3331nW041NjbWBL7ahpUtW9a0kdq5/PPPP5v2XjPFxo4dKytXrrS1udr227fj2in+6quvOgTd2k5Pnz7dZJwpzVYbOXKk7XntyB44cKDtfkGP1ZFxAMDdCLrdmDa8mqKtrly5YhpTLTymjbj9DYqVBubaS66j0jp6baVzyzRgtdKUdO0h12N1BF2f17Q3exrMalEYHc22b9ytdH55tWrVHG4yNEVt3759JujWr3HXZ69Tp4589NFH9/ydK1SoYPteOxmU3rDo+8dHz1EDbU1/19Q5fTzzzDMmPR0ArDad3STvrH3HdJT6ePnIqxVflZfKvyS+3jSjaYkG3KdPn5b0nk2hHcSaRq5trGan2bt9+7Zpf+2DZg24rXSalI+Pj0MHu+7TttDqjz/+kLCwMNm/f79cv37dtON37twxo9vW9k+/WgNua5tqfQ+dqqWp5tYg3tqu6z0FKeYAcDfuFpJIU7x1xDkxtp3fJq8v/2+k9l6mNpoqVXJXSdTPTgodwdV0ciudq63B86effmrS1qzHWFnnc+nz9g2p0gZc6Rzpdu3amVFkTRvX99NRbh0dt6fzzzT9/JtvvjFpbXGDcmfRKrBW1lRPHQVIiN6obN++3cz5/u2330z6uc6R27JlC5XOAUhETIRM2j5J5uydY65GocyFzOh2+ZzluTppkH0mU3r9udrprFlm2iZroKvtU1z2K3HYt3vKWhE97j5rW6g1UDS767XXXjMd45qmrqPq2uEeGRlpC7rjew8CagBIHoLuJNJGJ7Ep3rXz1jbFc7RoWnzzurWarT6vx6XGXD89d+351l7y+GhPuAbKOr9LA+v4aMq4jgxryrjV8ePH7zpO56BpKvsTTzxhgnMNaO174rVXXVPxdFRbaSqcFozRFHOlX3V+tzWlXem2ps4ll84d18IvcWnvvM5104em1+vNzIoVKxzS7gF4nn+u/CMD/hwgB68cNNttSraRN6u+maRpPkhdKZXi7Sra9mitlD59+pgaJjpyr22UjmanlG3btpkAXDvLraPh3377bZLeQzvctUNg06ZNUq9ePVu7ru+tReEAAI4Iup1IA2ktsKNVyjXAtg+8U2P5mIiICNvcNk0v1znU2nPeokWLBF+jI9haWEwbVE211vfQmxh9vc6r1gJlmjquo9uaHv7TTz+Zwinx0VF0fV5T2vWhRV6sc8y1B71nz54mTV1vKHSuWM2aNW1B+FtvvSVt2rQx86s1GF6yZIkpLKMpccmlNy36++vyaVqoRnvz9QZHOxn0piFr1qxmDpzejOgcOACeKdYSK1/u/VI+2v6RRMVGSbbAbDKi9gipX6C+q08NbsTaRmtn8Pnz500bqSnfOgrdoUMHExDXqlVLWrZsaSqRa2G0M2fOmHZVp0HZTw9LCs2A0/nakydPNvcD2qGt87GTSoudjhkzxtwX6BQurYiunecAgLuxZJiTaRVbVy0fow249kTrQ9PFNWV6wYIFUr9+wjeOmnauaegzZ840y4nocmJanVRT3dRTTz1leuA1SNZlSnTkW5cMS4gG2VpVVVPStMCZtWq6Bry69NgLL7xg5mrrcTpX3EpvMnT+thaN0WJoWshNz+le534/Wp1cC8U899xzJv1db2J0VFuDea22qqPreuOhKfH6MwF4nnO3zsnLv70sH2z9wATcj+Z/VL5/6nsCbjitjdYOYe3k1kJn2hGty4bplC7NTtOOYO0U1tU7NOjWVT40u0wz05JLO501QNbiag899JCpRq7BflJpgdQXX3zRZKRp54Bms2lnAADgbl4WJuiYIiI6squFQeLOPdbCIkePHjVBpy6plVy6fBjLx/xHg3gtrpZee8RT6m8CQNqiVclHbhgpNyJvmBoa/ar2k2dLPpvspRpTq51yZ6nRPiP1uPoz05R9LbSna6OfOnUq0a8rP5saDmnZro67nP43ADxo+0x6eSph+RgASJs0yA7bFCZLjiwx2w9lf0jCHgmTwiEpN48WAAB4LoJuAIDH0lUmBv05SM7cOiPeXt7SrXw3eaXiK+Ln7Vi5GQAAILmY041U16lTp3SbWg7APUTFRMnEbROl87LOJuDOnzG/zH58tvR4uAcBNwAASFGMdAMAPMqRq0fMUmD7Lu8z288Uf8asJJHBL4OrTw0AALghgm4AgEfQuqHf7P9Gxm8bLxExEZIlIIsMqzXMqatIAAAAEHQDANzexfCLMmT9EFl3ep3ZrpO3jrxb513JGZzT1acGAADcHEE3AMCtLT++XIZvGC5XI65KgE+A9K3SV9qWbpsmlgIDAADuj6DbCU5fvS1XbkUm+XVZM/hLvixBzjglAPA4t6JuydjNY2XRoUVmu3S20jLmkTFSLEsxV5+aW9C1bvv37y+//PKLhIeHS/HixWXmzJlStWpVWzr/sGHD5NNPPzXFM+vUqSPTpk2TEiVKuPrUAQBIVQTdTgi4G36wSiKiY5P82gBfb1nRrz6BNwA8oB0XdsjAPwfKqZunxEu8pMtDXaR7pe7i58NSYCnhypUrJohu0KCBCbpz5swpBw8elKxZs9qOGTdunEyaNElmz54tRYoUkSFDhkjTpk1l7969EhgYmCLnAQBAekDQncJ0hDs5AbfS1+nrGe0GgOSJio2ST/7+RD7d9anEWmIlT4Y88l7d96Rq6H+jr0gZY8eOlQIFCpiRbSsNrK10lHvixIkyePBgefrpp82+OXPmSO7cuWXx4sXy/PPP81EAADyGS4PuNWvWyPvvvy/btm2Ts2fPyqJFi6Rly5YO6zlrD7k97SVftmyZbfvy5cvSs2dPWbJkiXh7e0vr1q3lo48+kowZM4onq1+/vlSqVMnc9CTHnj17ZOjQoeazOX78uEyYMEF69+6d4ucJACnl2LVjZnR796XdZrt50eYyqMYgyeSfiYucwn788UfTHj/77LOyevVqyZcvn7z++uvSrVs38/zRo0fl3Llz0rjx/1WGDwkJkRo1asiGDRucGnSXn11eUtOujruS/Br7+xs/Pz8pWLCgdOjQQQYNGiS+voyHAIC78XblD79165ZUrFhRpkyZkuAxjz/+uAnIrY9vvvnG4fl27dqZAPH333+XpUuXmkD+5ZdfToWzd286P69o0aIyZswYCQ0NdfXpAECCdFR1wT8LpM3SNibg1iD7/XrvS9gjYQTcTnLkyBHb/Oxff/1VXnvtNenVq5ctkNSAW+nItj3dtj4Xn4iICLl+/brDw11Z7280Lf/NN9+U4cOHm4EIV4uMTHpNGgBAGg66mzVrJqNGjZJnnnkmwWMCAgJM0Gd92M8X27dvnxn1/uyzz0zved26dWXy5Mkyb948OXPmjHgq7UHXkQcd8dfqvPo4duxYkt6jWrVqpvHX0Qj9DAAgLbp0+5L0WtFLRm4YKbejb0uN0Bqy8KmF8niRx119am4tNjZWKleuLO+99548/PDDprNbR7mnT5/+QO8bFhZmRsStD01hd1fW+5tChQqZTgvNCtAMAp0vr6Peer8THBxs7pU0MLd2MOn8+e+++872PprVlidPHtv22rVrzXtr57nSInYvvfSSeV3mzJmlYcOG8vfff9uO12Bf30PvpXSKAPPtAcDNgu7EWLVqleTKlUtKlSplGqVLly7ZntMUtSxZstgqpSpttDTNfNOmTR7bk67Bdq1atcwNkDVDQG9cNOX+Xo9XX33V1acOAIm2+uRqafVjK1l1apX4efvJW1XfkhlNZkhoBrJznE2DvLJlyzrsK1OmjJw4ccJ8b82QOn/+vMMxun2v7KmBAwfKtWvXbI+TJ0+KpwgKCjKjzNpxvnXrVhOA632OBtpPPPGEREVFmU70evXqmXsjpQG6DkDcvn1b9u/fb/Zpp7t2nGvArnQKwIULF0zBO50ypp0ljRo1MtPzrA4dOiTff/+9LFy4UHbs2OGiKwAA7ss3radetWrVyvS8Hj582Mx10h5fbYR8fHxMipoG5PZ0LlS2bNnumb6mPekjRowQd6WjA/7+/qbBtb+5uV9Dqj3gAJDWhUeFywdbPzAp5apE1hISVjdMSmUr5epT8xhaufzAgQMO+/755x8zaqu03db2Z/ny5WYUVWkHt3aIawd6QnSE1tOyqzSo1uukafp6j6OF5tatWye1a9c2z3/99dem41z3awCtNVs++eQT85xOqdNMA73WGoiXLl3afH300Udto96bN282Qbf1un7wwQfmvXS03DodT4N9LXSno+EAAA8Luu0LrZQvX14qVKggxYoVMw2K9tIml/ak9+3b17atNwLunMJmpWuoAkB6tvvf3TLgzwFy/Ppxs92hbAfpVbmXBPh4VqDman369DFBoaaXt2nTxgR2M2bMMA+lI7JafFOnkOm8b+uSYXnz5nUomOrJtA6NZpnpCLam67/wwgtmoEH365Q5q+zZs5tsPx3RVhpQv/HGG3Lx4kUzqq1BuDXo7tq1q6xfv17efvttc6ymkd+8edO8hz0dGdfBDCvtLCHgBgAPDbrj0sJeOXLkMGlQGnRrI6O9t/aio6NNytS90tc8sSdd3a+ie/v27R94Ph4AOEN0bLR8vutzmf73dIm2REuu4Fwyuu5oqZmnJhfcBTR9WVcc0U7skSNHmqBaV8vQ4qZWGvhpwVQdTdV5xVp3ReuwMGf4P7rGuRaj08w07YzQTD1NKb8fHYTQjD4NuPUxevRoc8+jy7ht2bLFBPHWUXINuHUqgDUd3Z5Oz7PKkCFDivxdAADcIOg+deqUmdNtLRii85a1Idc5SlWqVDH7VqxYYXqM7XuJPZE24jExMQ77SC8HkB6dvHFSBv05SHZc/G+KTNPCTWVIzSESEhDi6lPzaM2bNzePhOhotwbk+sDdNNCNm4Gm8+J18EDT8K2Bs973aCq/dQ69XtdHHnlEfvjhB7N6i3Zm6HQyrVejaeda58YaROv8bZ1upwF94cKF+RgAwBODbu2B1VFrK13XUwND7cHVh8671nW3tQdX06C011wbKF0b1No46bxva8VU7d3t0aOHSUvXXmNPpo2rNtpatVxHuPV6JiW9XOd37d271/b96dOnzWej70WaOoDUmuu6+NBiGbN5jIRHh0tGv4xm3W1df1sDD8DdaCr+008/be5rNIDOlCmTDBgwwKyDrvutNKVclxnTANuaxaYF1nT+91tvveVQXFYHKDSlf9y4cVKyZEmzustPP/1kVo6xL0QLAHDT6uVanVMLgOhD6Txr/X7o0KGmUNrOnTvlqaeeMo2EzlPS0ew///zTITVcGxgtHKLp5lrdU3t8rXPKPFm/fv3MNdSecZ2nZa0om1jaKFs/G61+roVX9HtddgQAnO3qnavSd1VfGbp+qAm4q+SuIt8/9b20KNaCgBtubebMmeZ+R7MINGDWzqeff/5Z/Pz8bMfovG7NZtPg20q/j7tPO6f0tRqQd+7c2dxP6cDE8ePH71pDHQDgPF4W/dfcw2khNa34rcuTxK3gfefOHTMCn9i1K3efvibNJ69N9rks7VlXHspHymRaltS/CQBJs+70OhmybohcvH1RfL19pUelHtKpXCfx8fbx2Et5r3bKnaVk+wzXc/Vnlj9/fpO5p5kDOmUxscrPLu/U88KD2dVxl9P/BoAHbZ/T1ZxuAID7uhN9RyZsmyBz988120VDikrYI2FSNrvjetAAAADpCUF3CsuawV8CfL0lIjo2ya/V1+nrAcDT7Lu0zywFduTaEbPdtnRb6VulrwT6MoIJAADSN4LuFJYvS5Cs6FdfrtyKTPJrNeDW1wOAp4iJjZFZe2bJxzs+NsuC5QjKIe/WeVfq5qvr6lMDAABIEQTdTqCBM8EzANzbmZtnZNDaQbLt/Daz3ahgIxlWa5hkDczKpQMAAG6DoBsAkKq0fudPR3+S0RtHy82omxLsGywDqg+QlsVbUpkcAAC4HYJuAECquRZxTUZtHCXLji0z2xVzVpSwumFSIHMBPgUAAOCWCLoBAKli09lN8s7ad+R8+Hnx8fKRVyu+Ki+Vf8ksCwYAAOCuuNNxhqsnRcIvJf11wdlFsjDaA8C9RMZEyqTtk2T23tlmu1DmQmZ0u3xO1r4FAADuj6DbGQH3x1VEoiOS8WkEiPTYRuANwG38c+UfsxTYwSsHzfazJZ+VflX7SbBfsKtPDQAAIFV4p86P8SA6wp2cgFvp65IzQg4AaUysJVbm7JkjbZe2NQF3tsBsMrnhZBlaaygBN5AKChcuLBMnTuRaA0AawEi3m6pfv75UqlQp2Q3up59+KnPmzJHdu3eb7SpVqsh7770n1atXT+EzBeBuzt06J4PXDTZzuNWj+R+V4bWHmzW4AWe7OPnjVL3IOXv2SPJrOnXqJLNn/zfdQmXLlk2qVasm48aNkwoVKqTwGQIAXI2RbsRr1apV0rZtW1m5cqVs2LBBChQoIE2aNJHTp09zxQAkSKuSt/6xtQm4A30CZUjNIWaEm4AbcPT444/L2bNnzWP58uXi6+srzZs35zIBgBsi6HZD2oO+evVq+eijj8yat/o4duxYkt7j66+/ltdff92MlpcuXVo+++wziY2NNTcGABDXjcgbMujPQfLW6rfkeuR1KZe9nHzb4ltpU6oNa28D8QgICJDQ0FDz0LZ2wIABcvLkSbl48aJ5vn///lKyZEkJDg6WokWLypAhQyQqKsrhPZYsWWJGyAMDAyVHjhzyzDPPJHittR3PkiWLace1Y13vDa5evWp7fseOHQ73C7NmzTLHL168WEqUKGF+RtOmTc05AgCShqDbDWmwXatWLenWrZutF11HqjNmzHjPx6uvvprge4aHh5vGXlPgAMDetvPb5H8//k+WHFki3l7e8kqFV+TLJ76UIiFFuFBAIty8eVO++uorKV68uGTPnt3sy5Qpkwl89+7da9p1nfY1YcIE22t++uknE2Q/8cQT8tdff5lgOqEpYJq2rkH9b7/9Jo0aNUr0Z6Jt/+jRo810s3Xr1pkg/fnnn+czBYAkYk63GwoJCRF/f3/TO6496Pa92PeSOXPmBJ/THve8efNK48aNU/RcAaRfUTFRMmXHFPli9xdiEYvky5hPxjwyRirlquTqUwPSvKVLl5oOb3Xr1i3JkyeP2eft/d94yODBgx2KovXr10/mzZsnb7/9ttmnwbAGwCNGjLAdV7FixXjb7y+//NJkwJUrVy5J56id7R9//LHUqFHDbOs89DJlysjmzZup8QIASUDQ7UG0Bz05xowZYxp6TUfT9DIAOHL1iFkKbN/lfeZitCzeUgZUHyAZ/DJwcYBEaNCggUybNs18f+XKFZk6dao0a9bMBLSFChWS+fPny6RJk+Tw4cNmJDw6Otqhc1w70jWj7V4+/PBDE9Bv3brVpKgnlc4z1/R1K51upinn+/btI+gGgCQgvdyDJCe9/IMPPjBBt6akUVEVgMVikW/2fyNtlrYxAXdIQIhMqD9B3q3zLgE3kAQZMmQwneH60MBW51xrgKxp5FrAtF27diZ1XEe/NX38nXfekcjISNvrg4KC7vszHnnkEYmJiZFvv/3WYb91NF3/e7aKO18cAJByGOl2U5perg2tvaSml+scME1f+/XXX6Vq1apOOU8A6cfF8IsyZP0QWXd6ndmuk7eOjKwzUnIF53L1qQHpnhYx02D49u3bsn79ejParYG21fHjxx2O145wncfduXPnBN9T53j36NHDVErXUWtNUVc5c+Y0X7XmS9asWRO8R9DRdR0lt84VP3DggJnXrSnmAIDEI+h2Uzr/a9OmTaYKqY5iawG0pKSXjx07VoYOHSpz584173Xu3Dmz3zoqDsCzLD++XIZvGC5XI65KgE+A9KnSR14o/QKVyYFkioiIsLWtml6uc6c1jbxFixZy/fp1OXHihJnapaPgWjRt0aJFDq8fNmyYKYpWrFgxM7dbA+Sff/7ZzOG2V7t2bbNfU9c18O7du7e5H9ACq8OHDzed6//8849JRY/Lz89PevbsadLc9bUawNesWZPUcgBIItLL3ZT2Zvv4+EjZsmVNj7Y23kmh88w0je1///ufKe5ifWi6OQDPcSvqlgxbP0x6r+ptAu7S2UrL/ObzpV2ZdgTcwANYtmyZrW3VQmVbtmyRBQsWSP369eWpp56SPn36mCBXlxPTkW9dMsyeHqfH//jjj+aYhg0bmvng8albt64J3LU42+TJk00w/c0338j+/fvNiLl2tI8aNequ12lBVg3iX3jhBalTp47pdNe55gCApGGk203p2p46Jyy5krquNwD3s+PCDhn450A5dfOUeImXdH6os/So1EP8fPxcfWpAgnL27JHmr44uBaaPe9EpXvqwp6PU9lq1amUeiWnH69WrZ0bSrTSI3rlzp8Mx9nO8E/MzAACJQ9ANAHAQFRslM3bOMI9YS6zkyZBH3qv7nlQNpbYDAABAUhF0p7Tg7CK+ASLREUl/rb5OXw8ALnL8+nEzur3r311mu3nR5jKoxiDJ5J+JzwQAACC9zeles2aNKRiSN29eMzdw8eLFDktX6Dyi8uXLm2U19JgOHTrImTNnHN5Di3zpa+0fusSVy2QpINJjm8jLq5P+0Nfp6wEglWla6YJ/FsizS541AbcG2ePqjZOwR8IIuBEvLcIVt/3VdZyt7ty5I927d5fs2bObucCtW7eW8+fPczXTiU6dOplK5QCAdD7SretRVqxYUbp06XLXfKHw8HDZvn27KRyix2hlzzfeeMMUF9HlK+yNHDlSunXrZtvOlMnFIzIaOBM8A0gnLt2+JMPXD5dVp1aZ7RqhNWRU3VESmiHU1aeGNK5cuXLyxx9/2La1wrWVFgLT4l1a7CskJMQUBdO2ft26/5acAwDAU7g06NblK/QRH22gf//9d4d9upyGrhWplbgLFizoEGSHhnJzCABJtebUGhmybohcvnNZ/Lz95I3Kb8iLZV8Uby8Wt8D9aZAdX/t77do1+fzzz82yk1pVW82cOdOs77xx40az7BQAAJ4iXc3p1kZc09eyZMnisF/Tyd99910TiOuyFtq7bt/bHt/amPqw0vUwAcCThEeFy4dbP5Rv//nWbBfPUlzGPDJGSmUr5epTQzpy8OBBM/0rMDBQatWqJWFhYaYt3rZtm5km1rhxY9uxmnquz+nKGgkF3clpn2NjY1Pot4Gz8VkhrTh79qzkz5/f1acBF9NO47gZ1OLpQbfODdM53m3btpXMmTPb9vfq1UsqV64s2bJlM+tYDhw40PyHNH78+ATfS28KRowYkUpnDgBpy55/98iAPwfIsev/LSnUoWwH6VW5lwT4BLj61JCO6NrSuuxVqVKlTLur7eojjzwiu3fvlnPnzom/v/9dneS5c+c2z6VE+6zv7+3tbWq95MyZ02xrxzzSZs2IyMhIuXjxovnM9LMCXME6BVU7gE6fPs2HgFSTLoJu7S1v06aN+Ud72rRpDs/17dvX9n2FChXMP+SvvPKKabgDAuK/gdTA3P512pNeoEDKFTA7e/OsXIm4kuTXZQ3IKnky5kmx8wAAe9Gx0fLF7i9k2o5pEm2JllzBuWR03dFSMw+pvkg6++lh2v5qEF6oUCH59ttvJSgoKFmXNCntswZvRYoUMQF/3CKrSJuCg4NNtoN+doAraGas1ou6ceNGkl53PpwikGlZ7uDcyXpdak5P9k0vAffx48dlxYoVDqPc8dFGPzo6Wo4dO2Z63+OjwXhCAXlKBNzNFzeXyJjIJL/W38dflrZcSuANIMWdvHFSBv05SHZc3GG2mxZuKkNqDpGQgBCuNlKEjmqXLFlSDh06JI899pgZ2dTq1/aj3Vq9/F43OUltn7WjXYM4bfdjYmIe+HeA8/j4+Jipf2QjwJX+97//mUdSlZ9d3inng5Sxq+N/y5ymZb7pIeDWOWMrV640y47cz44dO0wPaq5cucQVdIQ7OQG30tfp6xntBpBSNEPoh8M/SNimMAmPDpeMfhnNutu6/jY3v0hJN2/elMOHD8uLL74oVapUET8/P1m+fLlZKkwdOHDAFELVud8pSf+O9WfpAwCAtMjX1Q209ohbHT161ATNOj87T548pidKlw1bunSp6cG2zgPT57V3W4uxbNq0SRo0aGDmaOi2FlFr3769ZM2aVTxZ/fr1pVKlSjJx4sRkvX7hwoXy3nvvmc9HOz9KlCghb775prmZApA+XL1zVUZsGCF/nPhvSafKuSrLe4+8J/ky5nP1qcEN9OvXT1q0aGFSyjW9e9iwYWY0U2uv6AokXbt2Nani2mZrllrPnj1NwE3lcgCAp3Fp0K3V4jRgtrLO4+rYsaMMHz5cfvzxR7OtwaM9HfXWoFJT0ObNm2eO1WqnOrdLg277+WBIHr1Jeuedd0y1We3g0I6Pzp07mwyCpk2bclmBNG796fUyeN1guXj7ovh6+0r3St2lc7nO4uPt4+pTg5s4deqUCbAvXbpkCpnVrVvXLAem36sJEyaYzDMd6dY2WtuOqVOnuvq0AQDwrKBbA2dNfUzIvZ5TWrVcG3g46tSpk6xevdo8PvroI1sWQeHChZP02dh74403ZPbs2bJ27VqCbiANuxN9RyZunyhf7/vabBcJKWKWAiubvayrTw1uRju970WXEZsyZYp5AADgySgf6YY00NYUvm7dupmqrvrQ6q8ZM2a85+PVV19NsPND5+XpfLx69eql+u8DIHH2X94vzy993hZwty3dVuY3n0/ADQAA4EJpupAakkfn0mlKuC7NYV8lVufL30vcyvDXrl2TfPnymbRAnaenaYFakRZA2hITGyOz986WyX9NNsuC5QjKISNrj5RH8j/i6lMDAADweATdHqR48eJJOl6L02mgrgXvdKRb58oXLVr0rtRzAK5z5uYZeWftO7L1/Faz3bBAQxlee7hkDfTsYpIAAABpBUG3B9EU8nvRqu/Tp0+3bWsBHGugrsXs9u3bJ2FhYQTdQBqx9MhSGb1xtNyMuinBvsEyoPoAaVm8JUuBAQAApCEE3W5K08t1mTV7SU0vjys2NtakmgNwrWsR10yw/cuxX8x2xZwVJaxumBTIXICPBgAAII0h6HZTWqlc1zA/duyYGeHWJcCSkl6uI9pVq1aVYsWKmUD7559/li+//FKmTZvm1PMGcG+bz26WQWsHyfnw8+Lj5SOvVnxVXir/klkWDAAAAGkPd2luql+/fma987Jly8rt27eTvGTYrVu35PXXXzfrsAYFBZn1ur/66it57rnnnHregNu7elIk/FKSXxYZmFkmHV4oc/bOEYtYpGCmgmYpsPI5yzvlNAEAAJAyCLrdVMmSJWXDhg3Jfv2oUaPMA0AKB9wfVxGJTsY0DS9v+TV/qFh8feV/Jf8nb1V9S4L9gvl4AAAA0jiCbgBILTrCnZyAW+s0WGKlkE9Geafh+1K/ACsIAAAApBcE3Sksa0BW8ffxl8iYyCS/Vl+nrweA+Lz/6PuSlYAbAAAgXSHoTmF5MuaRpS2XypWIK0l+rQbc+noAiP/fiCxcGAAAgHSGoNsJNHAmeAYAAAAAeHMJEsdisXCpwN8CAAAAgCQh6L4PHx8f8zUyMulztOGewsPDzVc/Pz9XnwoAAACANI708vtdIF9fCQ4OlosXL5ogy9ubfgpPznbQgPvChQuSJUsWW4cMAAAAACSEoPs+vLy8JE+ePHL06FE5fvz4/Q6HB9CAOzQ01NWnAQAAACAdIOhOBH9/fylRogQp5jDZDoxwAwAAAEgsgu5E0rTywMDARF9YAIhr87nNUp3LAgAA4FEIugHAycKjwmXslrGyb/c8+ZarDQAA4FEIugHAif6++LcM/HOgnLxxUspypQEAADwOpbgBwAmiYqNk6o6p0vGXjibgzpMhjwytNYxrDQAA4GEY6QaAFHb8+nEzur3r311m+8miT8qgGoMkc/g1Ed8AkeiIpL+pvi44O58VAABAOkPQDQApuJb79we/l3Fbxsnt6NuSyT+TDKk5RJoVafbfAf6ZRXpsEwm/lPQ314A7SwE+KwAAgHTGpenla9askRYtWkjevHnNetiLFy++6wZ26NChZp3soKAgady4sRw8eNDhmMuXL0u7du0kc+bMZv3krl27ys2bN1P5NwHg6S7dviS9VvaSERtGmIC7emh1WfjUwv8LuK00cM5bKekPAm4AAIB0yaVB961bt6RixYoyZcqUeJ8fN26cTJo0SaZPny6bNm2SDBkySNOmTeXOnTu2YzTg3rNnj/z++++ydOlSE8i//PLLqfhbAPB0a06tkVY/tpJVJ1eJn7ef9KvaTz5t8qmEZgh19akBAADAk4PuZs2ayahRo+SZZ5656zkd5Z44caIMHjxYnn76aalQoYLMmTNHzpw5YxsR37dvnyxbtkw+++wzqVGjhtStW1cmT54s8+bNM8cBgDPpiPaojaOk+/LucvnOZSmepbh88+Q30rFcR/H2ok4lPMeYMWNMxlrv3r1t+7SDvHv37pI9e3bJmDGjtG7dWs6fP+/S8wQAwBXS7F3h0aNH5dy5cyal3CokJMQE1xs2bDDb+lVTyqtWrWo7Ro/39vY2I+MJiYiIkOvXrzs8ACAp9vy7R9osaSPzD8w32y+WfVHmNZ8npbKV4kLCo2zZskU++eQT0zlur0+fPrJkyRJZsGCBrF692nSGt2rVymXnCQCAq6TZoFsDbpU7d26H/bptfU6/5sqVy+F5X19fyZYtm+2Y+ISFhZkA3vooUIDiRAASJyY2RmbsnCHtf24vx64fk1zBuWTGYzPk7WpvS4BPAJcRHkVrqOg0r08//VSyZs1q23/t2jX5/PPPZfz48dKwYUOpUqWKzJw5U9avXy8bN2506TkDAJDa0mzQ7UwDBw40NwTWx8mTJ119SgDSgVM3TknnXzvL5L8mS7QlWpoUamKKpdXKW8vVpwa4hKaPP/nkkw5ZaWrbtm0SFRXlsL906dJSsGBBW7ZafMhEAwC4ozS7ZFho6H8FiHT+l1Yvt9LtSpUq2Y65cOGCw+uio6NNRXPr6+MTEBBgHgCQGFpj4sfDP0rY5jC5FXVLMvhlkHdqvCPNizY381gBT6T1U7Zv327Sy+PSbDN/f38zBSyhbLWEMtFGjBjhlPMFAMBV0uxId5EiRUzgvHz5cts+nXutc7Vr1fpvVEm/Xr161fSoW61YsUJiY2PN3G8AeFBX71yVN1e/KYPXDTYBd+VcleX7p76XFsVaEHDDY2mG2BtvvCFff/21BAYGptj7kokGAHBHvq6eC3bo0CGH4mk7duwwc7I1BU2roGp18xIlSpggfMiQIWZN75YtW5rjy5QpI48//rh069bNLCumqWw9evSQ559/3hwHAA9i/en1Jti+ePui+Hr5SveHu0vncp3Fx9uHCwuPpp3dmmlWuXJl276YmBizbOfHH38sv/76q0RGRpqOcfvRbs1WIxMNAOBpXBp0b926VRo0aGDb7tu3r/nasWNHmTVrlrz99ttmLW9dd1sbbl0STJcIs+9V1152DbQbNWpkqpbrkiS6tjcAJNed6DsycftE+Xrf12a7SEgRGfPIGCmbvSwXFRAxbe6uXbscrkXnzp3NvO3+/fubAqV+fn4mW03bZXXgwAE5ceKELVsNAABP4dKgu379+mauZEJ0ruTIkSPNIyE6Kj537lwnnSEAT7P/8n4ZsGaAHL522Gw/X+p56Vu1rwT5Brn61IA0I1OmTPLQQw857MuQIYNZk9u6v2vXrqYzXdvpzJkzS8+ePU3AXbNmTRedNQAArpFmC6kBQGovBTZn7xyZ9NckiY6NlhxBOWRk7ZHySP5H+CCAZJgwYYItA02rkjdt2lSmTp3KtQQAeByCbgAe7+zNszJo7SDZen6ruRYNCzSUYbWHSbbAbB5/bYDEWrVqlcO2TgWbMmWKeQAA4MkIugF41Gj29gvb5WL4RckZnNNUIl92bJmM3jhabkTdMCnkA6oPkGeKP0NlcgAAAKSIRAfdulxXYuncLQBIS/44/oeM2TxGzoeft+0L9AmUOzF3zPcVclaQMXXHSIHMBVx4loDz6AohuhIIAABIo0G3Lvmhhc3uRYui6TG6bAgApKWAu++qvmIRx8KN1oC7aeGmpjq5rzfJP3BfxYoVk0KFCplVQ6yP/Pnzu/q0AABwe4m+w1y5cqVzzwQAnJRSriPccQNue39f+Fu85N6dikB6t2LFCjPvWh/ffPONWUe7aNGi0rBhQ1sQnjt3blefJgAAnht0P/roo849EwBwAp3DbZ9SHp9z4efMcdVCq/EZwG3pMp36UHfu3JH169fbgvDZs2dLVFSUWWd7z549rj5VAADcindyX/jnn39K+/btpXbt2nL69Gmz78svv5S1a9em5PkBwAPZ82/iAggtrgZ4Cq0sriPcgwcPlhEjRkivXr0kY8aMsn//flefGgAAbidZQff3339v1tsMCgqS7du3m/U31bVr1+S9995L6XMEgCS7EXlDxm0ZJxO2TUjU8VrNHHB3mlK+Zs0aE2hrOrnWa3n11VflypUr8vHHH5tiawAAIGUlq2rQqFGjZPr06dKhQweZN2+ebX+dOnXMcwDgKrGWWPnh0A8ycftEuXznstkX4BMgETH/dQ7GpXO5cwfnNsuHAe5MR7Y3bdpkKpjrlLFXXnlF5s6dK3ny5HH1qQEA4NaSFXQfOHBA6tWrd9f+kJAQuXr1akqcFwAk2d8X/5Yxm8bI7ku7zXbhzIXNutu3o2+b6uXKvqCatXha/+r9xcfbhysOt6bTwjTA1uBb53Zr4J09e3ZXnxYAAG4vWenloaGhcujQobv263xurYQKAKlJ52O/s/Ydaf9zexNwZ/DLIP2q9pOFTy2UOvnqSONCjWV8/fGSKziXw+t0hFv36/OAu9NO8RkzZkhwcLCMHTtW8ubNK+XLl5cePXrId999JxcvUtcAAIA0M9LdrVs3eeONN+SLL74w63KfOXNGNmzYIP369ZMhQ4ak/FkCQDyiYqLkq31fyfS/p0t4dLjZ17J4S3mj8huSIyiHw7EaWDco0MBUKdcgXedwa0o5I9zwFBkyZJDHH3/cPNSNGzdMZ7kuCTpu3Dhp166dlChRQnbv/i9TBAAAuDDoHjBggMTGxkqjRo0kPDzcpJoHBASYoLtnz54pdGoAkLA/T/1pCqUdu37MbFfIUcGkkpfPWT7B12iAzbJgwP8F4dmyZTOPrFmziq+vr+zbt4/LAwBAWgi6dXT7nXfekbfeesukmd+8eVPKli1rlhsBAGc6fv24CbbXnFpjtrMHZpc+VfpIi2ItxNsr2asgAm5PO8u3bt1q1uXW0e1169bJrVu3JF++fKaS+ZQpU8xXAACQBoJuK39/fxNsA4Cz3Yq6JTN2zpA5e+dIdGy0+Hr7Svsy7eWVCq9IRn86/ID70eXBNMjWuiwaXE+YMMEUVCtWrBgXDwCAtBZ0a2Oto90JWbFixYOcEwDYWCwWWXpkqVlv++Lt/wo9aXG0/tX6S5GQIlwpIJHef/99036XLFmSawYAQFoPuitVquSwHRUVJTt27DDFVzp27JhS5wbAw+25tEfCNoWZpcBUgUwFTLBdL3+9e3b8AbibrtGtj/vRIqkAAMDFQbempMVn+PDhZn43ADyIS7cvyeS/JsvCgwvNutpBvkHycoWXpUPZDuLv48/FBZJh1qxZUqhQIXn44YdNBgkAAEgHc7rjat++vVSvXl0++OCDlHxbAB4iKjZK5u+fL1N3TJUbUTfMvuZFm0vvyr0ld4bcrj49IF177bXX5JtvvpGjR49K586dTZutlcsBAIBzpWipX12rOzAwMCXfEoCH2HBmgzz747MydstYE3CXyVZG5jSbI2GPhBFwAylAq5OfPXtW3n77bVmyZIkUKFBA2rRpI7/++isj3wAApLWR7latWjlsa5qaNuS6FMmQIUNS6twApHFnb56VKxFXkvy6rAFZJU/GPOb7UzdOyQdbP5DlJ5bbnutVuZc8U/wZs642gJQTEBAgbdu2NY/jx4+blPPXX39doqOjZc+ePSz9CQCAq4PuI0eOSOHChSUkJMRhv7e3t5QqVUpGjhwpTZo0SelzBJBGA+7mi5tLZExkkl+r87IXNF8gPx/9WWbunimRsZHi4+UjbUu3lVcrviohAY7/xgBIedp2a0FC7TiPiYnhEgMAkBaC7hIlSpgR7ZkzZ5rt5557TiZNmiS5cztvrqUG+dobH5f2zGuqnK4xunr1aofnXnnlFZk+fbrTzgmAmBHu5ATcSl/X+dfOcvnOZbNdI08NGVBtgBTPWpxLCzhRRESELFy40FQoX7t2rTRv3lw+/vhjefzxx00QDgAAXBx0x612+ssvv8itW7fEmbZs2eLQA6/Lkj322GPy7LPP2vZ169bNjLJbBQcHO/WcADw4DbjzZcwn/ar2k0YFG7EEGOBk2lk9b948M5e7S5cupqhajhw5uO4AAKTl6uWpseRIzpw5HbbHjBkjxYoVk0cffdQhyA4NDXX6uQBIOW1KtpG3qr0lgb4UXwRSg2aAFSxYUIoWLWoyxOJmiVnpSDgAAHBR0K1zv/QRd19qiYyMlK+++kr69u3r8HO//vprs18D7xYtWphibvca7db0On1YXb9+3ennDsBR65KtCbiBVNShQwcySgAASA/p5Z06dTLVT9WdO3fk1VdflQwZMqRKL/nixYvl6tWr5hysXnjhBSlUqJDkzZtXdu7cKf3795cDBw7c8xzCwsJkxIgRTjlHAADSIq1UnpKmTZtmHseOHTPb5cqVk6FDh0qzZs1s9whvvvmmSWnXju6mTZvK1KlTnVoHBgCAdB90d+zY0WG7ffv2kpo+//xz05hrgG318ssv274vX7685MmTRxo1aiSHDx82aejxGThwoBkttx/p1jluAAAgcfLnz2+mfGmRVe2Unz17tjz99NPy119/mQC8T58+8tNPP8mCBQvMqic9evQwS46uW7eOSwwA8ChJCrqtVctdQSuY//HHH/cdRa9Ro4b5eujQoQSDbh2pt47WAwCApNPpXPZGjx5tRr43btxoAnLtKJ87d640bNjQdg9RpkwZ83zNmjW55AAAj5Fu1gfRxjpXrlzy5JNP3vO4HTt2mK864g0AAJxPVxnRNHJd0aRWrVqybds2iYqKksaNG9uOKV26tCnktmHDBj4SAIBHeaDq5aklNjbWBN2a3u7r+3+nrCnk2ov+xBNPSPbs2c2cbk1nq1evnlSoUMGl5wwAgLvbtWuXCbJ1/nbGjBll0aJFUrZsWdMB7u/vL1myZHE4Xudznzt3LsH3o9ApAMAdpYugW9PKT5w4YdYVtacNuj43ceJE07uu87Jbt24tgwcPdtm5Ap7geuR1mbU7ZYsyAUh/SpUqZQLsa9euyXfffWc6xxNaiiwxKHQKAHBH6SLobtKkSbxrgmuQ/SCNO4CkiYmNkcWHFstH2z+SKxFXuHyAh9PO7+LFi5vvq1SpIlu2bJGPPvpInnvuObPMp644Yj/aff78ebO8Z0IodAoAcEfpIugG4Ho7LuyQsM1hsvfSXrOdL2M+OX3ztKtPC0Aamw6mKeIagPv5+cny5ctNBprS5Tw1a03T0RNCoVMAgDsi6AZwTxfCL8iEbRNk6ZGlZjujX0Z5vdLrUjFnRWn3czuuHuChdFRal/HU4mg3btwwNVZWrVolv/76q1kirGvXrmZ5zmzZsknmzJmlZ8+eJuCmcjkAwNMQdAOIV2RMpHy590v5ZOcncjv6tniJl7Qq0Up6PtxTsgdll7M3z4q/j785Lqn0dVkDsnLlgXTswoUL0qFDBzl79qwJsrWAqQbcjz32mHl+woQJ4u3tbUa6dfS7adOmMnXqVFefNgAAqY6gG8Bd1pxaI2M3j5UTN06YbR3VHlh9oJTLUc52TJ6MeWRpy6XJmtutAbe+HkD6petw30tgYKBMmTLFPAAA8GQE3QBsjl47KuO2jJO1p9ea7RxBOaRvlb7yZNEnxdvL+64rpYEzwTMAAACQMIJuAHIz8qbM2DlDvtz3pUTHRouvt690KNtBXq7wsmTwy8AVAgAAAJKJoBvwYLGWWFlyeIlM3D5R/r39r9lXL389ebva21IocyFXnx4AAACQ7hF0Ax5q97+7JWxTmOz8d6fZ1iBbg20NugEAAACkDIJuwMPoiPak7ZNk0aFFZjvYN1herfiqtC/TXvx8/Fx9egAAAIBbIegGPERUbJTM3TdXpv89XW5G3TT7nir2lPSu3FtyBud09ekBAAAAbomgG/AA60+vlzFbxpjq5Kps9rJmCbBKuSq5+tQAAAAAt0bQDbixkzdOyvtb3peVJ1ea7WyB2czI9tPFn453CTAAAAAAKYugG3BD4VHh8tmuz2T2ntkSGRspvl6+0rZMWzN3O7N/ZlefHgAAAOAxCLoBN2KxWOSXo7/Ih9s+lAvhF8y+WnlqSf/q/aVYlmKuPj0AAADA4xB0A25i36V9MmbzGNl+YbvZzpcxn1kCrEGBBuLl5eXq0wMAAAA8EkE3kM5duXNFJv81Wb775zuxiEWCfIPkpfIvScdyHSXAJ8DVpwcAAAB4NIJuIJ2Kjo2Wbw98Kx/v+FhuRN4w+5oVaSZ9q/SV0Ayhrj49AAAAAATdQPq0+exmCdscJoeuHjLbpbKWkgHVB0jV0KquPjUAAAAAdhjpBtKRMzfPyAdbP5Dfj/9utkMCQqTXw72kdYnW4uPt4+rTAwAAABAHQTeQDtyJviMzd8+Uz3d/LhExEWaN7TYl20iPh3uYwBsAAABA2kTQDaTxJcB0VPvDrR/KmVtnzL5qodWkf7X+UipbKVefHgAAAID7IOgG0qiDVw6aJcA2n9tstrU4Wr+q/aRJoSYsAQYAAACkEwTdQBpzLeKaTN0xVeYfmC8xlhiz7FeXh7pI54c6m+XAAAAAAKQf3pKGDR8+3Izo2T9Kly5te/7OnTvSvXt3yZ49u2TMmFFat24t58+fd+k5A8kVExsjC/5ZIM0XNZe5++eagPuxQo/JDy1/kNcrvU7ADQAAAKRDaX6ku1y5cvLHH3/Ytn19/++U+/TpIz/99JMsWLBAQkJCpEePHtKqVStZt26di84WSJ7t57ebVPJ9l/eZ7eJZikv/6v2lZp6aXFIAAAAgHUvzQbcG2aGhoXftv3btmnz++ecyd+5cadiwodk3c+ZMKVOmjGzcuFFq1iRYQdp3/tZ5Gb9tvPx89Geznck/k3Sv1F3alGojft5+rj49AAAAAO4edB88eFDy5s0rgYGBUqtWLQkLC5OCBQvKtm3bJCoqSho3bmw7VlPP9bkNGzbcM+iOiIgwD6vr1687/fcAHP4GYyJkzp458umuT+V29G3xEi9pXbK19Hy4p2QLzMbFAgAAANxEmg66a9SoIbNmzZJSpUrJ2bNnZcSIEfLII4/I7t275dy5c+Lv7y9ZsmRxeE3u3LnNc/eigbu+F+CKJcBWnVwl47aMk1M3T5l9lXJWkoE1BkrZ7GX5QAAAAAA3k6aD7mbNmtm+r1ChggnCCxUqJN9++60EBSW/ivPAgQOlb9++DiPdBQoUeODzBe7lyLUjMm7zOFl35r+aA7mCcknfqn3liSJPsAQYAAAA4KbSdNAdl45qlyxZUg4dOiSPPfaYREZGytWrVx1Gu7V6eXxzwO0FBASYB5AabkTekOl/T5e5++ZKtCXazNXuWK6jdCvfTYL9gvkQAAAAADeWppcMi+vmzZty+PBhyZMnj1SpUkX8/Pxk+fLltucPHDggJ06cMHO/AVeLtcTKooOLzBJgc/bOMQF3/fz1ZfHTi+WNym8QcANI13SqVrVq1SRTpkySK1cuadmypWmH7bG0JwAAaTzo7tevn6xevVqOHTsm69evl2eeeUZ8fHykbdu2Zomwrl27mjTxlStXmsJqnTt3NgE3lcvhajsv7pR2P7WToeuHyuU7l6Vw5sIyrfE0mdxoshTMXNDVpwcAD0zb5+7du5sVQ37//XdT3LRJkyZy69Yth6U9lyxZYpb21OPPnDljlvYEAMCTpOn08lOnTpkA+9KlS5IzZ06pW7euadz1ezVhwgTx9vaW1q1bm2rkTZs2lalTp7r6tOHB/r39r0zYNkF+PPyj2c7gl0Feq/iavFD6BfHzYQkwAO5j2bJlDtta+FRHvLUTvF69eiztCQBAegi6582bd8/ndRmxKVOmmAfgSlExUfL1vq9l+s7pcivqv1Gep4s9Lb2r9JYcQTn4cAC4vWvXrpmv2bL9t+xhcpb2ZElPAIA7StNBN5Ae/HnqT7ME2LHrx8x2+RzlZUD1AVIhZwVXnxoApIrY2Fjp3bu31KlTRx566CGzLzlLe7KkJwDAHRF0A8l04voJE2yvPrXabGcPzG5Gtp8q9pR4e6XpcgkAkKJ0bvfu3btl7dq1D/Q+LOkJAHBHBN1AAmJiY2T7he1yMfyi5AzOKZVzVRYfbx8JjwqXGTtnmIrkUbFR4uvlK+3KtJNXKr4imfwzcT0BeJQePXrI0qVLZc2aNZI/f37bfl2+M6lLe7KkJwDAHRF0A/H44/gfMmbzGDkfft62L3dwbmlcqLH8fux3uXD7gtlXJ28debv621I0pCjXEYBHsVgs0rNnT1m0aJGsWrVKihQp4vC8/dKeWvBUsbQnAMATEXQD8QTcfVf1FYtYHPZrAK7F0lSBTAXk7Wpvy6P5HxUvLy+uIQCPTCmfO3eu/PDDD2atbus8bV3SMygoyGFpTy2uljlzZhOks7QnAMDTEHQDcVLKdYQ7bsBtL6NfRvm+xfcS5BfEtQPgsaZNm2a+1q9f32H/zJkzpVOnTuZ7lvYEAICgG3Cgc7jtU8rjczPqpuy+tFuqhVbj6gHw6PTy+2FpTwAARCixDNjRomkpeRwAAAAAz0bQDdjRKuUpeRwAAAAAz0bQDdjRZcG0SrmXxF8cTfeHBoea4wAAAADgfgi6ATu6DveA6gPM93EDb+t2/+r9zXEAAAAAcD8E3UAcuhb3+PrjJVdwLof9OgKu+/V5AAAAAEgMlgwD4qGBdYMCDUw1cy2apnO4NaWcEW4AAAAASUHQDSRAA2yWBQMAAADwIEgvBwAAAADASQi6AQAAAABwEoJuAAAAAACchDndAADA7VWtWlXOnTvn6tOAC509e5brD8AlCLoBAIDb04D79OnTrj4NpAGZMmVy9SkA8DAE3QAAwO2FhoYm63WxN2+l+LkgZXhnzJCsgPvdd9/lIwCQqgi6AQCA29u6dWuyXndx8scpfi5IGTl79uBSAkgXKKQGAAAAAICTEHQDAAAAAOCJQXdYWJhUq1bNzL/JlSuXtGzZUg4cOOBwTP369cXLy8vh8eqrr7rsnAEAAAAASBdB9+rVq6V79+6yceNG+f333yUqKkqaNGkit245FjXp1q2bWQbC+hg3bpzLzhkAAAAAgHRRSG3ZsmUO27NmzTIj3tu2bZN69erZ9gcHBye7KikAAAAAAB450h3XtWvXzNds2bI57P/6668lR44c8tBDD8nAgQMlPDz8nu8TEREh169fd3gAAAAAAOBRI932YmNjpXfv3lKnTh0TXFu98MILUqhQIcmbN6/s3LlT+vfvb+Z9L1y48J5zxUeMGJFKZw4AAAAA8FTpJujWud27d++WtWvXOux/+eWXbd+XL19e8uTJI40aNZLDhw9LsWLF4n0vHQ3v27evbVtHugsUKODEswcAAAAAeKJ0EXT36NFDli5dKmvWrJH8+fPf89gaNWqYr4cOHUow6A4ICDAPAAAAAAA8Nui2WCzSs2dPWbRokaxatUqKFCly39fs2LHDfNURbwAAAAAAXMk3raeUz507V3744QezVve5c+fM/pCQEAkKCjIp5Pr8E088IdmzZzdzuvv06WMqm1eoUMHVpw8AAAAA8HBpunr5tGnTTMXy+vXrm5Fr62P+/PnmeX9/f/njjz/M2t2lS5eWN998U1q3bi1Llixx9akDAODWdMpXixYtTCFTLy8vWbx48V3ZakOHDjXttnaUN27cWA4ePOiy8wUAwFXS9Ei3Ntj3osXPVq9enWrnAwAA/nPr1i2pWLGidOnSRVq1anXXZRk3bpxMmjRJZs+ebaaHDRkyRJo2bSp79+6VwMBALiMAwGOk6aAbAACkTc2aNTOPhDrNJ06cKIMHD5ann37a7JszZ47kzp3bjIg///zzqXy2AAC4TppOLwcAAOnP0aNHTR0WTSm30nosusLIhg0bEnxdRESEWcbT/gEAQHpH0A0AAFKUtfCpjmzb023rc/EJCwszwbn1odPIAABI7wi6AQBAmjBw4EBTQNX6OHnypKtPCQCAB0bQDQAAUlRoaKj5ev78eYf9um19Lj4BAQGSOXNmhwcAAOkdQTcAAEhRWq1cg+vly5fb9un87E2bNkmtWrW42gAAj0L1cgAAkGQ3b96UQ4cOORRP27Fjh2TLlk0KFiwovXv3llGjRkmJEiVsS4bpmt4tW7bkagMAPApBNwAASLKtW7dKgwYNbNt9+/Y1Xzt27CizZs2St99+26zl/fLLL8vVq1elbt26smzZMtboBgB4HIJuAACQZPXr1zfrcSfEy8tLRo4caR4AAHgy5nQDAAAAAOAkBN0AAAAAADgJQTcAAAAAAE5C0A0AAAAAgJMQdAMAAAAA4CQE3QAAAAAAOAlBNwAAAAAATkLQDQAAAAAAQTcAAAAAAOkLI90AAAAAADiJr7Pe2N2dvnpbrtyKTPLrsmbwl3xZgpxyTgAAAACAtIWgO5kBd8MPVklEdGySXxvg6y0r+tUn8AYAAAAAD0B6eTLoCHdyAm6lr0vOCDkAAAAAIP0h6AYAAAAAwEncJuieMmWKFC5cWAIDA6VGjRqyefNmV58SAAAAAMDDuUXQPX/+fOnbt68MGzZMtm/fLhUrVpSmTZvKhQsXXH1qAAAAAAAP5hZB9/jx46Vbt27SuXNnKVu2rEyfPl2Cg4Pliy++cPWpAQAAAAA8WLoPuiMjI2Xbtm3SuHFj2z5vb2+zvWHDhnhfExERIdevX3d4AAAAAACQ0tJ90P3vv/9KTEyM5M6d22G/bp87dy7e14SFhUlISIjtUaBAgVQ6WwAAAACAJ0n3QXdyDBw4UK5du2Z7nDx50tWnBAAAAABwQ76SzuXIkUN8fHzk/PnzDvt1OzQ0NN7XBAQEmAcAAAAAAM6U7ke6/f39pUqVKrJ8+XLbvtjYWLNdq1Ytl54bAAAAAMCzpfuRbqXLhXXs2FGqVq0q1atXl4kTJ8qtW7dMNXMAAAAAAFzFLYLu5557Ti5evChDhw41xdMqVaoky5Ytu6u4GgAAAAAAqcktgm7Vo0cP8wAAAAAAIK1I93O6XSFrBn8J8E3epdPX6esBAPAEU6ZMkcKFC0tgYKDUqFFDNm/e7OpTAgAgVbnNSHdqypclSFb0qy9XbkUm+bUacOvrAQBwd/Pnzzd1V6ZPn24Cbq250rRpUzlw4IDkypXL1acHAECqIOhOJg2cCZ4BAEjY+PHjpVu3brbCphp8//TTT/LFF1/IgAEDuHQAAI9AejkAAEhxkZGRsm3bNmncuPH/3XR4e5vtDRs2cMUBAB6DkW4RsVgs5mJcv37d1Z8HAAB3sbZP1vYqPfj3338lJibmrpVEdHv//v3xviYiIsI8rK5du+by9vnG7dsu+9m4t4BU+ruIuR3DR5GGpca/D/wNpG3XXdhGJLZ9JujWBvXGDXMxChQokBqfDQAAyW6vQkJC3PbqhYWFyYgRI+7aT/uMePV/mwsDCXnNff9NRPr5G7hf+0zQLSJ58+aVkydPSqZMmcTLy+uBezv05kDfL3PmzA/0Xp6E68Y1428t7eK/T9dfN+1B1wZd26v0IkeOHOLj4yPnz5932K/boaGh8b5m4MCBpvCaVWxsrFy+fFmyZ8/+wO0z+G8Z/A2Av4GUltj2maD7/88xy58/f4p+AHqDRdDNdUsN/K1x3VILf2uuvW7pbYTb399fqlSpIsuXL5eWLVvagmjd7tGjR7yvCQgIMA97WbJkSZXz9ST8twz+BsDfQMpJTPtM0A0AAJxCR607duwoVatWlerVq5slw27dumWrZg4AgCcg6AYAAE7x3HPPycWLF2Xo0KFy7tw5qVSpkixbtuyu4moAALgzgu4Upmlxw4YNuys9Dlw3/tbSBv4b5Zrxt5a6NJU8oXRypC7+/QN/A+BvwDW8LOlp/REAAAAAANIRb1efAAAAAAAA7oqgGwAAAAAAJyHoBgAAAADASQi6U9iUKVOkcOHCEhgYKDVq1JDNmzen9I9It8LCwqRatWqSKVMmyZUrl1m39cCBAw7H3LlzR7p37y7Zs2eXjBkzSuvWreX8+fMuO+e0ZsyYMeLl5SW9e/e27eOaxe/06dPSvn1787cUFBQk5cuXl61bt9qe13IWWlE5T5485vnGjRvLwYMHxVPFxMTIkCFDpEiRIuZ6FCtWTN59911znay4ZiJr1qyRFi1aSN68ec1/i4sXL3a4jom5RpcvX5Z27dqZNVJ1DequXbvKzZs3U+2zhue5398t3F9i7sHg3qZNmyYVKlSwrc9dq1Yt+eWXX1x9Wh6DoDsFzZ8/36xJqtXLt2/fLhUrVpSmTZvKhQsXUvLHpFurV682AfXGjRvl999/l6ioKGnSpIlZs9WqT58+smTJElmwYIE5/syZM9KqVSuXnndasWXLFvnkk0/MP5j2uGZ3u3LlitSpU0f8/PxMg7J371758MMPJWvWrLZjxo0bJ5MmTZLp06fLpk2bJEOGDOa/V+3E8ERjx441DfLHH38s+/btM9t6jSZPnmw7hmsm5t8r/bddO1jjk5hrpAH3nj17zL+DS5cuNQHRyy+/nCqfMzzT/f5u4f4Scw8G95Y/f34zeLNt2zYzCNGwYUN5+umnTXuEVKDVy5EyqlevbunevbttOyYmxpI3b15LWFgYlzgeFy5c0CE0y+rVq8321atXLX5+fpYFCxbYjtm3b585ZsOGDR59DW/cuGEpUaKE5ffff7c8+uijljfeeMPs55rFr3///pa6desmeD1jY2MtoaGhlvfff9+2T69lQECA5ZtvvrF4oieffNLSpUsXh32tWrWytGvXznzPNbub/tu0aNEi23ZirtHevXvN67Zs2WI75pdffrF4eXlZTp8+7YRPFrj33y08U9x7MHimrFmzWj777DNXn4ZHYKQ7hURGRpqeI00ltPL29jbbGzZsSKkf41auXbtmvmbLls181eunPa/217B06dJSsGBBj7+G2jv95JNPOlwbrlnCfvzxR6latao8++yzJo3u4Ycflk8//dT2/NGjR+XcuXMO1zMkJMRMCfHU/15r164ty5cvl3/++cds//3337J27Vpp1qyZ2eaa3V9irpF+1ZRy/fu00uO1vdCRcQBwxT0YPG9K2bx580ymg6aZw/l8U+FneIR///3X/AHnzp3bYb9u79+/32XnlVbFxsaaecmaAvzQQw+ZfXqz6u/vb25I415Dfc5T6T+KOl1B08vj4prF78iRIyZVWqd7DBo0yFy7Xr16mb+vjh072v6e4vvv1VP/1gYMGCDXr183HV0+Pj7m37PRo0ebVGjFNbu/xFwj/aodQfZ8fX3Nja+n/u0BcP09GDzDrl27TJCtU560dtKiRYukbNmyrj4tj0DQDZeN3O7evduMpCFhJ0+elDfeeMPMv9LifEj8DYWOJL733ntmW0e69e9N59lq0I27ffvtt/L111/L3LlzpVy5crJjxw5zU6aFl7hmAOA+uAfzXKVKlTLtu2Y6fPfdd6Z91/n+BN7OR3p5CsmRI4cZHYpbaVu3Q0NDU+rHuIUePXqY4kErV640RR2s9Dppmv7Vq1cdjvfka6gp91qIr3LlymY0TB/6j6MWatLvdQSNa3Y3rRwdtwEpU6aMnDhxwnxv/Xviv9f/89Zbb5nR7ueff95Uen/xxRdNkT6teMs1S5zE/F3p17jFNaOjo01Fc0/9dw6A6+/B4Bk046948eJSpUoV075rgcWPPvrI1aflEQi6U/CPWP+AdU6k/WibbjNX4j9av0X/sddUlhUrVpiliezp9dNq0/bXUJez0EDJU69ho0aNTCqQ9kpaHzqCqym/1u+5ZnfTlLm4S6HoXOVChQqZ7/VvTwMc+781Ta3WObWe+rcWHh5u5hXb045E/XdMcc3uLzHXSL9qx6J2qFnpv4d6nXXuNwC44h4MnknbnoiICFefhkcgvTwF6fxRTdPQQKh69eoyceJEU6Cgc+fOKflj0nU6k6au/vDDD2adSOv8RS00pOvZ6lddr1avo85v1DUEe/bsaW5Sa9asKZ5Ir1Pc+Va6BJGuPW3dzzW7m47QamEwTS9v06aNbN68WWbMmGEeyrrW+ahRo6REiRLm5kPXqNZUal271BPpGr46h1sLF2p6+V9//SXjx4+XLl26mOe5Zv/R9bQPHTrkUDxNO8D03yy9dvf7u9KMi8cff1y6detmpjto8Ui9EdYMAz0OcMXfLdzf/e7B4P4GDhxoiqPqf/M3btwwfw+rVq2SX3/91dWn5hlcXT7d3UyePNlSsGBBi7+/v1lCbOPGja4+pTRD/9zie8ycOdN2zO3bty2vv/66WcIgODjY8swzz1jOnj3r0vNOa+yXDFNcs/gtWbLE8tBDD5nlmkqXLm2ZMWOGw/O6vNOQIUMsuXPnNsc0atTIcuDAASd/emnX9evXzd+V/vsVGBhoKVq0qOWdd96xRERE2I7hmlksK1eujPffsY4dOyb6Gl26dMnStm1bS8aMGS2ZM2e2dO7c2SwLCLjq7xbuLzH3YHBvuixooUKFTIySM2dO0z799ttvrj4tj+Gl/+fqwB8AAAAAAHfEnG4AAAAAAJyEoBsAAAAAACch6AYAAAAAwEkIugEAAAAAcBKCbgAAAAAAnISgGwAAAAAAJyHoBgAAAADASQi6AQAAAABwEoJuAAAAwE106tRJWrZs6erTAGCHoBuArZH28vIyD39/fylevLiMHDlSoqOjuUIAAKQB1nY6ocfw4cPlo48+klmzZrn6VAHY8bXfAODZHn/8cZk5c6ZERETIzz//LN27dxc/Pz8ZOHCgS88rMjLSdAQAAODJzp49a/t+/vz5MnToUDlw4IBtX8aMGc0DQNrCSDcAm4CAAAkNDZVChQrJa6+9Jo0bN5Yff/xRrly5Ih06dJCsWbNKcHCwNGvWTA4ePGheY7FYJGfOnPLdd9/Z3qdSpUqSJ08e2/batWvNe4eHh5vtq1evyksvvWRelzlzZmnYsKH8/ffftuO1p17f47PPPpMiRYpIYGAgnxIAwONpG219hISEmNFt+30acMdNL69fv7707NlTevfubdrx3Llzy6effiq3bt2Szp07S6ZMmUx22y+//OJwfXfv3m3ae31Pfc2LL74o//77r8d/BkByEHQDSFBQUJAZZdYGfOvWrSYA37Bhgwm0n3jiCYmKijINfr169WTVqlXmNRqg79u3T27fvi379+83+1avXi3VqlUzAbt69tln5cKFC6aB37Ztm1SuXFkaNWokly9ftv3sQ4cOyffffy8LFy6UHTt28CkBAJBMs2fPlhw5csjmzZtNAK4d69oW165dW7Zv3y5NmjQxQbV957h2iD/88MOm/V+2bJmcP39e2rRpw2cAJANBN4C7aFD9xx9/yK+//ioFCxY0wbaOOj/yyCNSsWJF+frrr+X06dOyePFiWy+6Nehes2aNaaTt9+nXRx991DbqrY3+ggULpGrVqlKiRAn54IMPJEuWLA6j5Rrsz5kzx7xXhQoV+JQAAEgmbbsHDx5s2lydMqYZZBqEd+vWzezTNPVLly7Jzp07zfEff/yxaX/fe+89KV26tPn+iy++kJUrV8o///zD5wAkEUE3AJulS5eaNDJtjDWl7LnnnjOj3L6+vlKjRg3bcdmzZ5dSpUqZEW2lAfXevXvl4sWLZlRbA25r0K2j4evXrzfbStPIb968ad7DOvdMH0ePHpXDhw/bfoamuGv6OQAAeDD2ndc+Pj6mDS5fvrxtn6aPK81Cs7bVGmDbt9MafCv7thpA4lBIDYBNgwYNZNq0aaZoWd68eU2wraPc96MNd7Zs2UzArY/Ro0ebuWVjx46VLVu2mMBbU9iUBtw639s6Cm5PR7utMmTIwCcDAEAK0KKo9nRqmP0+3VaxsbG2trpFixamHY/LvmYLgMQh6AbgEOhqMRV7ZcqUMcuGbdq0yRY4awqaVkstW7asrbHW1PMffvhB9uzZI3Xr1jXzt7UK+ieffGLSyK1BtM7fPnfunAnoCxcuzNUHACCN0bZa66poO63tNYAHQ3o5gHvSuV5PP/20mfel87E15ax9+/aSL18+s99K08e/+eYbU3Vc09C8vb1NgTWd/22dz620InqtWrVMZdXffvtNjh07ZtLP33nnHVOsBQAAuJYuGarFTdu2bWsy1jSlXOu8aLXzmJgYPh4giQi6AdyXrt1dpUoVad68uQmYtdCaruNtn5qmgbU2xNa520q/j7tPR8X1tRqQa+NdsmRJef755+X48eO2OWUAAMB1dIrZunXrTBuulc11GpkuOabTwLRTHUDSeFn07hkAAAAAAKQ4uqoAAAAAAHASgm4AAAAAAJyEoBsAAAAAACch6AYAAAAAwEkIugEAAAAAcBKCbgAAAAAAnISgGwAAAAAAJyHoBgAAAADASQi6AQAAAABwEoJuAAAAAACchKAbAAAAAAAnIegGAAAAAECc4/8BG6hf5E6PdMwAAAAASUVORK5CYII=" + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + } + ], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": "At **t=1**, demand (15 MW) is below the minimum load (30 MW). The solver\nkeeps the unit off (`commit=0`), so `power=0` and `fuel=0` — the `active`\nparameter enforces this. Demand is met by the backup source.\n\nAt **t=2** and **t=3**, the unit commits and operates on the PWL curve.", + "metadata": {} } ], "metadata": { @@ -533,9 +875,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.11" + "version": "3.12.3" } }, "nbformat": 4, - "nbformat_minor": 5 + "nbformat_minor": 4 } diff --git a/linopy/__init__.py b/linopy/__init__.py index 7f5acd46..b1dc33b9 100644 --- a/linopy/__init__.py +++ b/linopy/__init__.py @@ -20,8 +20,13 @@ from linopy.io import read_netcdf from linopy.model import Model, Variable, Variables, available_solvers from linopy.objective import Objective -from linopy.piecewise import breakpoints -from linopy.remote import OetcHandler, RemoteHandler +from linopy.piecewise import breakpoints, piecewise, segments, slopes_to_points +from linopy.remote import RemoteHandler + +try: + from linopy.remote import OetcCredentials, OetcHandler, OetcSettings # noqa: F401 +except ImportError: + pass __all__ = ( "Constraint", @@ -39,6 +44,9 @@ "Variables", "available_solvers", "breakpoints", + "piecewise", + "segments", + "slopes_to_points", "align", "merge", "options", diff --git a/linopy/common.py b/linopy/common.py index d0ca3ad9..db309fa4 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -161,26 +161,6 @@ def pandas_to_dataarray( axis.name or get_from_iterable(dims, i) or f"dim_{i}" for i, axis in enumerate(arr.axes) ] - if coords is not None: - pandas_coords = dict(zip(dims, arr.axes)) - if isinstance(coords, Sequence): - coords = dict(zip(dims, coords)) - shared_dims = set(pandas_coords.keys()) & set(coords.keys()) - non_aligned = [] - for dim in shared_dims: - coord = coords[dim] - if not isinstance(coord, pd.Index): - coord = pd.Index(coord) - if not pandas_coords[dim].equals(coord): - non_aligned.append(dim) - if any(non_aligned): - warn( - f"coords for dimension(s) {non_aligned} is not aligned with the pandas object. " - "Previously, the indexes of the pandas were ignored and overwritten in " - "these cases. Now, the pandas object's coordinates are taken considered" - " for alignment." - ) - return DataArray(arr, coords=None, dims=dims, **kwargs) diff --git a/linopy/constants.py b/linopy/constants.py index c2467b83..00bbd705 100644 --- a/linopy/constants.py +++ b/linopy/constants.py @@ -38,14 +38,22 @@ PWL_LAMBDA_SUFFIX = "_lambda" PWL_CONVEX_SUFFIX = "_convex" -PWL_LINK_SUFFIX = "_link" +PWL_X_LINK_SUFFIX = "_x_link" +PWL_Y_LINK_SUFFIX = "_y_link" PWL_DELTA_SUFFIX = "_delta" PWL_FILL_SUFFIX = "_fill" PWL_BINARY_SUFFIX = "_binary" PWL_SELECT_SUFFIX = "_select" -DEFAULT_BREAKPOINT_DIM = "breakpoint" -DEFAULT_SEGMENT_DIM = "segment" -DEFAULT_LINK_DIM = "var" +PWL_AUX_SUFFIX = "_aux" +PWL_LP_SUFFIX = "_lp" +PWL_LP_DOMAIN_SUFFIX = "_lp_domain" +PWL_INC_BINARY_SUFFIX = "_inc_binary" +PWL_INC_LINK_SUFFIX = "_inc_link" +PWL_INC_ORDER_SUFFIX = "_inc_order" +PWL_ACTIVE_BOUND_SUFFIX = "_active_bound" +BREAKPOINT_DIM = "_breakpoint" +SEGMENT_DIM = "_segment" +LP_SEG_DIM = f"{BREAKPOINT_DIM}_seg" GROUPED_TERM_DIM = "_grouped_term" GROUP_DIM = "_group" FACTOR_DIM = "_factor" diff --git a/linopy/expressions.py b/linopy/expressions.py index af76e4f9..5070cbe7 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -94,12 +94,33 @@ from linopy.constraints import AnonymousScalarConstraint, Constraint from linopy.model import Model + from linopy.piecewise import PiecewiseConstraintDescriptor, PiecewiseExpression from linopy.variables import ScalarVariable, Variable FILL_VALUE = {"vars": -1, "coeffs": np.nan, "const": np.nan} +def _to_piecewise_constraint_descriptor( + lhs: Any, rhs: Any, operator: str +) -> PiecewiseConstraintDescriptor | None: + """Build a piecewise descriptor for reversed RHS syntax if applicable.""" + from linopy.piecewise import PiecewiseExpression + + if not isinstance(rhs, PiecewiseExpression): + return None + + if operator == "<=": + return rhs.__ge__(lhs) + if operator == ">=": + return rhs.__le__(lhs) + if operator == "==": + return rhs.__eq__(lhs) + + msg = f"Unsupported operator '{operator}' for piecewise dispatch." + raise ValueError(msg) + + def exprwrap( method: Callable, *default_args: Any, **new_default_kwargs: Any ) -> Callable: @@ -651,13 +672,40 @@ def __div__(self: GenericExpression, other: SideLike) -> GenericExpression: def __truediv__(self: GenericExpression, other: SideLike) -> GenericExpression: return self.__div__(other) - def __le__(self, rhs: SideLike) -> Constraint: + @overload + def __le__(self, rhs: PiecewiseExpression) -> PiecewiseConstraintDescriptor: ... + + @overload + def __le__(self, rhs: SideLike) -> Constraint: ... + + def __le__(self, rhs: SideLike) -> Constraint | PiecewiseConstraintDescriptor: + descriptor = _to_piecewise_constraint_descriptor(self, rhs, "<=") + if descriptor is not None: + return descriptor return self.to_constraint(LESS_EQUAL, rhs) - def __ge__(self, rhs: SideLike) -> Constraint: + @overload + def __ge__(self, rhs: PiecewiseExpression) -> PiecewiseConstraintDescriptor: ... + + @overload + def __ge__(self, rhs: SideLike) -> Constraint: ... + + def __ge__(self, rhs: SideLike) -> Constraint | PiecewiseConstraintDescriptor: + descriptor = _to_piecewise_constraint_descriptor(self, rhs, ">=") + if descriptor is not None: + return descriptor return self.to_constraint(GREATER_EQUAL, rhs) - def __eq__(self, rhs: SideLike) -> Constraint: # type: ignore + @overload # type: ignore[override] + def __eq__(self, rhs: PiecewiseExpression) -> PiecewiseConstraintDescriptor: ... + + @overload + def __eq__(self, rhs: SideLike) -> Constraint: ... + + def __eq__(self, rhs: SideLike) -> Constraint | PiecewiseConstraintDescriptor: + descriptor = _to_piecewise_constraint_descriptor(self, rhs, "==") + if descriptor is not None: + return descriptor return self.to_constraint(EQUAL, rhs) def __gt__(self, other: Any) -> NotImplementedType: @@ -2588,6 +2636,10 @@ def __truediv__(self, other: float | int) -> ScalarLinearExpression: return self.__div__(other) def __le__(self, other: int | float) -> AnonymousScalarConstraint: + descriptor = _to_piecewise_constraint_descriptor(self, other, "<=") + if descriptor is not None: + return descriptor # type: ignore[return-value] + if not isinstance(other, int | float | np.number): raise TypeError( f"unsupported operand type(s) for <=: {type(self)} and {type(other)}" @@ -2596,6 +2648,10 @@ def __le__(self, other: int | float) -> AnonymousScalarConstraint: return constraints.AnonymousScalarConstraint(self, LESS_EQUAL, other) def __ge__(self, other: int | float) -> AnonymousScalarConstraint: + descriptor = _to_piecewise_constraint_descriptor(self, other, ">=") + if descriptor is not None: + return descriptor # type: ignore[return-value] + if not isinstance(other, int | float | np.number): raise TypeError( f"unsupported operand type(s) for >=: {type(self)} and {type(other)}" @@ -2603,7 +2659,13 @@ def __ge__(self, other: int | float) -> AnonymousScalarConstraint: return constraints.AnonymousScalarConstraint(self, GREATER_EQUAL, other) - def __eq__(self, other: int | float) -> AnonymousScalarConstraint: # type: ignore + def __eq__( # type: ignore[override] + self, other: int | float + ) -> AnonymousScalarConstraint: + descriptor = _to_piecewise_constraint_descriptor(self, other, "==") + if descriptor is not None: + return descriptor # type: ignore[return-value] + if not isinstance(other, int | float | np.number): raise TypeError( f"unsupported operand type(s) for ==: {type(self)} and {type(other)}" diff --git a/linopy/model.py b/linopy/model.py index 3c06bbec..f1d7e5ef 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -64,10 +64,14 @@ from linopy.matrices import MatrixAccessor from linopy.objective import Objective from linopy.piecewise import ( - add_disjunctive_piecewise_constraints, add_piecewise_constraints, ) -from linopy.remote import OetcHandler, RemoteHandler +from linopy.remote import RemoteHandler + +try: + from linopy.remote import OetcHandler +except ImportError: + OetcHandler = None # type: ignore from linopy.solver_capabilities import SolverFeature, solver_supports from linopy.solvers import ( IO_APIS, @@ -660,7 +664,6 @@ def add_sos_constraints( variable.attrs.update(attrs_update) add_piecewise_constraints = add_piecewise_constraints - add_disjunctive_piecewise_constraints = add_disjunctive_piecewise_constraints def add_constraints( self, @@ -780,7 +783,7 @@ def add_constraints( rhs_nan = data.rhs.isnull() if rhs_nan.any(): - data["rhs"] = data.rhs.fillna(0) + data = assign_multiindex_safe(data, rhs=data.rhs.fillna(0)) rhs_mask = ~rhs_nan mask = ( rhs_mask diff --git a/linopy/piecewise.py b/linopy/piecewise.py index c31204f6..63bc3f4a 100644 --- a/linopy/piecewise.py +++ b/linopy/piecewise.py @@ -1,14 +1,16 @@ """ Piecewise linear constraint formulations. -Provides SOS2, incremental, and disjunctive piecewise linear constraint -methods for use with linopy.Model. +Provides SOS2, incremental, pure LP, and disjunctive piecewise linear +constraint methods for use with linopy.Model. """ from __future__ import annotations -from collections.abc import Mapping, Sequence -from typing import TYPE_CHECKING, Literal +from collections.abc import Sequence +from dataclasses import dataclass +from numbers import Real +from typing import TYPE_CHECKING, Literal, TypeAlias import numpy as np import pandas as pd @@ -16,17 +18,25 @@ from xarray import DataArray from linopy.constants import ( - DEFAULT_BREAKPOINT_DIM, - DEFAULT_LINK_DIM, - DEFAULT_SEGMENT_DIM, + BREAKPOINT_DIM, HELPER_DIMS, + LP_SEG_DIM, + PWL_ACTIVE_BOUND_SUFFIX, + PWL_AUX_SUFFIX, PWL_BINARY_SUFFIX, PWL_CONVEX_SUFFIX, PWL_DELTA_SUFFIX, PWL_FILL_SUFFIX, + PWL_INC_BINARY_SUFFIX, + PWL_INC_LINK_SUFFIX, + PWL_INC_ORDER_SUFFIX, PWL_LAMBDA_SUFFIX, - PWL_LINK_SUFFIX, + PWL_LP_DOMAIN_SUFFIX, + PWL_LP_SUFFIX, PWL_SELECT_SUFFIX, + PWL_X_LINK_SUFFIX, + PWL_Y_LINK_SUFFIX, + SEGMENT_DIM, ) if TYPE_CHECKING: @@ -35,15 +45,38 @@ from linopy.model import Model from linopy.types import LinExprLike +# Accepted input types for breakpoint-like data +BreaksLike: TypeAlias = ( + Sequence[float] | DataArray | pd.Series | pd.DataFrame | dict[str, Sequence[float]] +) + +# Accepted input types for segment-like data (2D: segments × breakpoints) +SegmentsLike: TypeAlias = ( + Sequence[Sequence[float]] + | DataArray + | pd.DataFrame + | dict[str, Sequence[Sequence[float]]] +) + + +# --------------------------------------------------------------------------- +# DataArray construction helpers +# --------------------------------------------------------------------------- -def _list_to_array(values: list[float], bp_dim: str) -> DataArray: + +def _sequence_to_array(values: Sequence[float]) -> DataArray: arr = np.asarray(values, dtype=float) if arr.ndim != 1: - raise ValueError(f"Expected a 1D list of numeric values, got shape {arr.shape}") - return DataArray(arr, dims=[bp_dim], coords={bp_dim: np.arange(len(arr))}) + raise ValueError( + f"Expected a 1D sequence of numeric values, got shape {arr.shape}" + ) + return DataArray( + arr, dims=[BREAKPOINT_DIM], coords={BREAKPOINT_DIM: np.arange(len(arr))} + ) -def _dict_to_array(d: dict[str, list[float]], dim: str, bp_dim: str) -> DataArray: +def _dict_to_array(d: dict[str, Sequence[float]], dim: str) -> DataArray: + """Convert a dict of ragged sequences to a NaN-padded 2D DataArray.""" max_len = max(len(v) for v in d.values()) keys = list(d.keys()) data = np.full((len(keys), max_len), np.nan) @@ -52,323 +85,478 @@ def _dict_to_array(d: dict[str, list[float]], dim: str, bp_dim: str) -> DataArra data[i, : len(vals)] = vals return DataArray( data, - dims=[dim, bp_dim], - coords={dim: keys, bp_dim: np.arange(max_len)}, + dims=[dim, BREAKPOINT_DIM], + coords={dim: keys, BREAKPOINT_DIM: np.arange(max_len)}, ) -def _segments_list_to_array( - values: list[Sequence[float]], bp_dim: str, seg_dim: str -) -> DataArray: +def _dataframe_to_array(df: pd.DataFrame, dim: str) -> DataArray: + # rows = entities (index), columns = breakpoints + data = np.asarray(df.values, dtype=float) + return DataArray( + data, + dims=[dim, BREAKPOINT_DIM], + coords={dim: list(df.index), BREAKPOINT_DIM: np.arange(df.shape[1])}, + ) + + +def _coerce_breaks(values: BreaksLike, dim: str | None = None) -> DataArray: + """Convert any BreaksLike input to a DataArray with BREAKPOINT_DIM.""" + if isinstance(values, DataArray): + if BREAKPOINT_DIM not in values.dims: + raise ValueError( + f"DataArray must have a '{BREAKPOINT_DIM}' dimension, " + f"got dims {list(values.dims)}" + ) + return values + if isinstance(values, pd.DataFrame): + if dim is None: + raise ValueError("'dim' is required when input is a DataFrame") + return _dataframe_to_array(values, dim) + if isinstance(values, pd.Series): + return _sequence_to_array(values) + if isinstance(values, dict): + if dim is None: + raise ValueError("'dim' is required when input is a dict") + return _dict_to_array(values, dim) + # Sequence (list, tuple, etc.) + return _sequence_to_array(values) + + +def _segments_list_to_array(values: Sequence[Sequence[float]]) -> DataArray: max_len = max(len(seg) for seg in values) data = np.full((len(values), max_len), np.nan) for i, seg in enumerate(values): data[i, : len(seg)] = seg return DataArray( data, - dims=[seg_dim, bp_dim], - coords={seg_dim: np.arange(len(values)), bp_dim: np.arange(max_len)}, + dims=[SEGMENT_DIM, BREAKPOINT_DIM], + coords={ + SEGMENT_DIM: np.arange(len(values)), + BREAKPOINT_DIM: np.arange(max_len), + }, ) def _dict_segments_to_array( - d: dict[str, list[Sequence[float]]], dim: str, bp_dim: str, seg_dim: str + d: dict[str, Sequence[Sequence[float]]], dim: str ) -> DataArray: parts = [] for key, seg_list in d.items(): - arr = _segments_list_to_array(seg_list, bp_dim, seg_dim) + arr = _segments_list_to_array(seg_list) parts.append(arr.expand_dims({dim: [key]})) combined = xr.concat(parts, dim=dim) max_bp = max(max(len(seg) for seg in sl) for sl in d.values()) max_seg = max(len(sl) for sl in d.values()) - if combined.sizes[bp_dim] < max_bp or combined.sizes[seg_dim] < max_seg: + if combined.sizes[BREAKPOINT_DIM] < max_bp or combined.sizes[SEGMENT_DIM] < max_seg: combined = combined.reindex( - {bp_dim: np.arange(max_bp), seg_dim: np.arange(max_seg)}, + {BREAKPOINT_DIM: np.arange(max_bp), SEGMENT_DIM: np.arange(max_seg)}, fill_value=np.nan, ) return combined -def _get_entity_keys( - kwargs: Mapping[str, object], -) -> list[str]: - first_dict = next(v for v in kwargs.values() if isinstance(v, dict)) - return list(first_dict.keys()) +# --------------------------------------------------------------------------- +# Public factory functions +# --------------------------------------------------------------------------- -def _validate_factory_args( - values: list | dict | None, - kwargs: dict, -) -> None: - if values is not None and kwargs: - raise ValueError("Cannot pass both positional 'values' and keyword arguments") - if values is None and not kwargs: - raise ValueError("Must pass either positional 'values' or keyword arguments") +def slopes_to_points( + x_points: list[float], slopes: list[float], y0: float +) -> list[float]: + """ + Convert segment slopes + initial y-value to y-coordinates at each breakpoint. + Parameters + ---------- + x_points : list[float] + Breakpoint x-coordinates (length n). + slopes : list[float] + Slope of each segment (length n-1). + y0 : float + y-value at the first breakpoint. -def _resolve_kwargs( - kwargs: dict[str, list[float] | dict[str, list[float]] | DataArray], - dim: str | None, - bp_dim: str, - link_dim: str, + Returns + ------- + list[float] + y-coordinates at each breakpoint (length n). + + Raises + ------ + ValueError + If ``len(slopes) != len(x_points) - 1``. + """ + if len(slopes) != len(x_points) - 1: + raise ValueError( + f"len(slopes) must be len(x_points) - 1, " + f"got {len(slopes)} slopes and {len(x_points)} x_points" + ) + y_points: list[float] = [y0] + for i, s in enumerate(slopes): + y_points.append(y_points[-1] + s * (x_points[i + 1] - x_points[i])) + return y_points + + +def breakpoints( + values: BreaksLike | None = None, + *, + slopes: BreaksLike | None = None, + x_points: BreaksLike | None = None, + y0: float | dict[str, float] | pd.Series | DataArray | None = None, + dim: str | None = None, ) -> DataArray: - has_dict = any(isinstance(v, dict) for v in kwargs.values()) - if has_dict and dim is None: - raise ValueError("'dim' is required when any kwarg value is a dict") - - arrays: dict[str, DataArray] = {} - for name, val in kwargs.items(): - if isinstance(val, DataArray): - arrays[name] = val - elif isinstance(val, dict): - assert dim is not None - arrays[name] = _dict_to_array(val, dim, bp_dim) - elif isinstance(val, list): - base = _list_to_array(val, bp_dim) - if has_dict: - base = base.expand_dims({dim: _get_entity_keys(kwargs)}) - arrays[name] = base - else: + """ + Create a breakpoint DataArray for piecewise linear constraints. + + Two modes (mutually exclusive): + + **Points mode**: ``breakpoints(values, ...)`` + + **Slopes mode**: ``breakpoints(slopes=..., x_points=..., y0=...)`` + + Parameters + ---------- + values : BreaksLike, optional + Breakpoint values. Accepted types: ``Sequence[float]``, + ``pd.Series``, ``pd.DataFrame``, or ``xr.DataArray``. + A 1D input (list, Series) creates 1D breakpoints. + A 2D input (DataFrame, multi-dim DataArray) creates per-entity + breakpoints (``dim`` is required for DataFrame). + slopes : BreaksLike, optional + Segment slopes. Mutually exclusive with ``values``. + x_points : BreaksLike, optional + Breakpoint x-coordinates. Required with ``slopes``. + y0 : float, dict, pd.Series, or DataArray, optional + Initial y-value. Required with ``slopes``. A scalar broadcasts to + all entities. A dict/Series/DataArray provides per-entity values. + dim : str, optional + Entity dimension name. Required when ``values`` or ``slopes`` is a + ``pd.DataFrame`` or ``dict``. + + Returns + ------- + DataArray + """ + # Validate mutual exclusivity + if values is not None and slopes is not None: + raise ValueError("'values' and 'slopes' are mutually exclusive") + if values is not None and (x_points is not None or y0 is not None): + raise ValueError("'x_points' and 'y0' are forbidden when 'values' is given") + if slopes is not None: + if x_points is None or y0 is None: + raise ValueError("'slopes' requires both 'x_points' and 'y0'") + + # Slopes mode: convert to points, then fall through to coerce + if slopes is not None: + if x_points is None or y0 is None: + raise ValueError("'slopes' requires both 'x_points' and 'y0'") + slopes_arr = _coerce_breaks(slopes, dim) + xp_arr = _coerce_breaks(x_points, dim) + + # 1D case: single set of breakpoints + if slopes_arr.ndim == 1: + if not isinstance(y0, Real): + raise TypeError("When 'slopes' is 1D, 'y0' must be a scalar float") + pts = slopes_to_points( + list(xp_arr.values), list(slopes_arr.values), float(y0) + ) + return _sequence_to_array(pts) + + # Multi-dim case: per-entity slopes + # Identify the entity dimension (not BREAKPOINT_DIM) + entity_dims = [d for d in slopes_arr.dims if d != BREAKPOINT_DIM] + if len(entity_dims) != 1: raise ValueError( - f"kwarg '{name}' must be a list, dict, or DataArray, got {type(val)}" + f"Expected exactly one entity dimension in slopes, got {entity_dims}" + ) + entity_dim = str(entity_dims[0]) + entity_keys = slopes_arr.coords[entity_dim].values + + # Resolve y0 per entity + if isinstance(y0, Real): + y0_map: dict[str, float] = {str(k): float(y0) for k in entity_keys} + elif isinstance(y0, dict): + y0_map = {str(k): float(y0[k]) for k in entity_keys} + elif isinstance(y0, pd.Series): + y0_map = {str(k): float(y0[k]) for k in entity_keys} + elif isinstance(y0, DataArray): + y0_map = { + str(k): float(y0.sel({entity_dim: k}).item()) for k in entity_keys + } + else: + raise TypeError( + f"'y0' must be a float, Series, DataArray, or dict, got {type(y0)}" ) - parts = [arr.expand_dims({link_dim: [name]}) for name, arr in arrays.items()] - return xr.concat(parts, dim=link_dim) + # Compute points per entity + computed: dict[str, Sequence[float]] = {} + for key in entity_keys: + sk = str(key) + sl = list(slopes_arr.sel({entity_dim: key}).values) + # Remove trailing NaN from slopes + sl = [v for v in sl if not np.isnan(v)] + if entity_dim in xp_arr.dims: + xp = list(xp_arr.sel({entity_dim: key}).values) + xp = [v for v in xp if not np.isnan(v)] + else: + xp = [v for v in xp_arr.values if not np.isnan(v)] + computed[sk] = slopes_to_points(xp, sl, y0_map[sk]) + + return _dict_to_array(computed, entity_dim) + # Points mode + if values is None: + raise ValueError("Must pass either 'values' or 'slopes'") -def _resolve_segment_kwargs( - kwargs: dict[ - str, list[Sequence[float]] | dict[str, list[Sequence[float]]] | DataArray - ], - dim: str | None, - bp_dim: str, - seg_dim: str, - link_dim: str, -) -> DataArray: - has_dict = any(isinstance(v, dict) for v in kwargs.values()) - if has_dict and dim is None: - raise ValueError("'dim' is required when any kwarg value is a dict") - - arrays: dict[str, DataArray] = {} - for name, val in kwargs.items(): - if isinstance(val, DataArray): - arrays[name] = val - elif isinstance(val, dict): - assert dim is not None - arrays[name] = _dict_segments_to_array(val, dim, bp_dim, seg_dim) - elif isinstance(val, list): - base = _segments_list_to_array(val, bp_dim, seg_dim) - if has_dict: - base = base.expand_dims({dim: _get_entity_keys(kwargs)}) - arrays[name] = base - else: + return _coerce_breaks(values, dim) + + +def _coerce_segments(values: SegmentsLike, dim: str | None = None) -> DataArray: + """Convert any SegmentsLike input to a DataArray with SEGMENT_DIM and BREAKPOINT_DIM.""" + if isinstance(values, DataArray): + if SEGMENT_DIM not in values.dims or BREAKPOINT_DIM not in values.dims: raise ValueError( - f"kwarg '{name}' must be a list, dict, or DataArray, got {type(val)}" + f"DataArray must have both '{SEGMENT_DIM}' and '{BREAKPOINT_DIM}' " + f"dimensions, got dims {list(values.dims)}" ) - - parts = [arr.expand_dims({link_dim: [name]}) for name, arr in arrays.items()] - combined = xr.concat(parts, dim=link_dim) - max_bp = max(a.sizes.get(bp_dim, 0) for a in arrays.values()) - max_seg = max(a.sizes.get(seg_dim, 0) for a in arrays.values()) - if ( - combined.sizes.get(bp_dim, 0) < max_bp - or combined.sizes.get(seg_dim, 0) < max_seg - ): - combined = combined.reindex( - {bp_dim: np.arange(max_bp), seg_dim: np.arange(max_seg)}, - fill_value=np.nan, + return values + if isinstance(values, pd.DataFrame): + data = np.asarray(values.values, dtype=float) + return DataArray( + data, + dims=[SEGMENT_DIM, BREAKPOINT_DIM], + coords={ + SEGMENT_DIM: np.arange(data.shape[0]), + BREAKPOINT_DIM: np.arange(data.shape[1]), + }, ) - return combined + if isinstance(values, dict): + if dim is None: + raise ValueError("'dim' is required when 'values' is a dict") + return _dict_segments_to_array(values, dim) + # Sequence[Sequence[float]] + return _segments_list_to_array(list(values)) + + +def segments( + values: SegmentsLike, + *, + dim: str | None = None, +) -> DataArray: + """ + Create a segmented breakpoint DataArray for disjunctive piecewise constraints. + Parameters + ---------- + values : SegmentsLike + Segment breakpoints. Accepted types: ``Sequence[Sequence[float]]``, + ``pd.DataFrame`` (rows=segments, columns=breakpoints), + ``xr.DataArray`` (must have ``SEGMENT_DIM`` and ``BREAKPOINT_DIM``), + or ``dict[str, Sequence[Sequence[float]]]`` (requires ``dim``). + dim : str, optional + Entity dimension name. Required when ``values`` is a dict. -class _BreakpointFactory: + Returns + ------- + DataArray """ - Factory for creating breakpoint DataArrays for piecewise linear constraints. + return _coerce_segments(values, dim) + - Use ``linopy.breakpoints(...)`` for continuous breakpoints and - ``linopy.breakpoints.segments(...)`` for disjunctive (disconnected) segments. +# --------------------------------------------------------------------------- +# Piecewise expression and descriptor types +# --------------------------------------------------------------------------- + + +class PiecewiseExpression: """ + Lazy descriptor representing a piecewise linear function of an expression. - def __call__( - self, - values: list[float] | dict[str, list[float]] | None = None, - *, - dim: str | None = None, - bp_dim: str = DEFAULT_BREAKPOINT_DIM, - link_dim: str = DEFAULT_LINK_DIM, - **kwargs: list[float] | dict[str, list[float]] | DataArray, - ) -> DataArray: - """ - Create a breakpoint DataArray for piecewise linear constraints. - - Parameters - ---------- - values : list or dict, optional - Breakpoint values. A list creates 1D breakpoints. A dict creates - per-entity breakpoints (requires ``dim``). Cannot be used with kwargs. - dim : str, optional - Entity dimension name. Required when ``values`` is a dict. - bp_dim : str, default "breakpoint" - Name for the breakpoint dimension. - link_dim : str, default "var" - Name for the link dimension when using kwargs. - **kwargs : list, dict, or DataArray - Per-variable breakpoints. Each kwarg becomes a coordinate on the - link dimension. - - Returns - ------- - DataArray - Breakpoint array with appropriate dimensions and coordinates. - """ - _validate_factory_args(values, kwargs) - - if values is not None: - if isinstance(values, list): - return _list_to_array(values, bp_dim) - if isinstance(values, dict): - if dim is None: - raise ValueError("'dim' is required when 'values' is a dict") - return _dict_to_array(values, dim, bp_dim) - raise TypeError(f"'values' must be a list or dict, got {type(values)}") - - return _resolve_kwargs(kwargs, dim, bp_dim, link_dim) - - def segments( - self, - values: list[Sequence[float]] | dict[str, list[Sequence[float]]] | None = None, - *, - dim: str | None = None, - bp_dim: str = DEFAULT_BREAKPOINT_DIM, - seg_dim: str = DEFAULT_SEGMENT_DIM, - link_dim: str = DEFAULT_LINK_DIM, - **kwargs: list[Sequence[float]] | dict[str, list[Sequence[float]]] | DataArray, - ) -> DataArray: - """ - Create a segmented breakpoint DataArray for disjunctive piecewise constraints. - - Parameters - ---------- - values : list or dict, optional - Segment breakpoints. A list of lists creates 2D breakpoints - ``[segment, breakpoint]``. A dict creates per-entity segments - (requires ``dim``). Cannot be used with kwargs. - dim : str, optional - Entity dimension name. Required when ``values`` is a dict. - bp_dim : str, default "breakpoint" - Name for the breakpoint dimension. - seg_dim : str, default "segment" - Name for the segment dimension. - link_dim : str, default "var" - Name for the link dimension when using kwargs. - **kwargs : list, dict, or DataArray - Per-variable segment breakpoints. - - Returns - ------- - DataArray - Breakpoint array with segment and breakpoint dimensions. - """ - _validate_factory_args(values, kwargs) - - if values is not None: - if isinstance(values, list): - return _segments_list_to_array(values, bp_dim, seg_dim) - if isinstance(values, dict): - if dim is None: - raise ValueError("'dim' is required when 'values' is a dict") - return _dict_segments_to_array(values, dim, bp_dim, seg_dim) - raise TypeError(f"'values' must be a list or dict, got {type(values)}") - - return _resolve_segment_kwargs(kwargs, dim, bp_dim, seg_dim, link_dim) - - -breakpoints = _BreakpointFactory() - - -def _auto_broadcast_breakpoints( - bp: DataArray, - expr: LinExprLike | dict[str, LinExprLike], - dim: str, - link_dim: str | None = None, - exclude_dims: set[str] | None = None, -) -> DataArray: - _, target_dims = _validate_piecewise_expr(expr) + Created by :func:`piecewise`. Supports comparison operators so that + ``piecewise(x, ...) >= y`` produces a + :class:`PiecewiseConstraintDescriptor`. + """ - skip = {dim} | set(HELPER_DIMS) - if link_dim is not None: - skip.add(link_dim) - if exclude_dims is not None: - skip.update(exclude_dims) + __slots__ = ("active", "disjunctive", "expr", "x_points", "y_points") - target_dims -= skip - missing = target_dims - {str(d) for d in bp.dims} + def __init__( + self, + expr: LinExprLike, + x_points: DataArray, + y_points: DataArray, + disjunctive: bool, + active: LinExprLike | None = None, + ) -> None: + self.expr = expr + self.x_points = x_points + self.y_points = y_points + self.disjunctive = disjunctive + self.active = active + + # y <= pw → Python tries y.__le__(pw) → NotImplemented → pw.__ge__(y) + def __ge__(self, other: LinExprLike) -> PiecewiseConstraintDescriptor: + return PiecewiseConstraintDescriptor(lhs=other, sign="<=", piecewise_func=self) + + # y >= pw → Python tries y.__ge__(pw) → NotImplemented → pw.__le__(y) + def __le__(self, other: LinExprLike) -> PiecewiseConstraintDescriptor: + return PiecewiseConstraintDescriptor(lhs=other, sign=">=", piecewise_func=self) + + # y == pw → Python tries y.__eq__(pw) → NotImplemented → pw.__eq__(y) + def __eq__(self, other: object) -> PiecewiseConstraintDescriptor: # type: ignore[override] + from linopy.expressions import LinearExpression + from linopy.variables import Variable + + if not isinstance(other, Variable | LinearExpression): + return NotImplemented + return PiecewiseConstraintDescriptor(lhs=other, sign="==", piecewise_func=self) + + +@dataclass +class PiecewiseConstraintDescriptor: + """Holds all information needed to add a piecewise constraint to a model.""" + + lhs: LinExprLike + sign: str # "<=", ">=", "==" + piecewise_func: PiecewiseExpression + + +def _detect_disjunctive(x_points: DataArray, y_points: DataArray) -> bool: + """ + Detect whether point arrays represent a disjunctive formulation. - if not missing: - return bp + Both ``x_points`` and ``y_points`` **must** use the well-known dimension + names ``BREAKPOINT_DIM`` and, for disjunctive formulations, + ``SEGMENT_DIM``. Use the :func:`breakpoints` / :func:`segments` factory + helpers to build arrays with the correct dimension names. + """ + x_has_bp = BREAKPOINT_DIM in x_points.dims + y_has_bp = BREAKPOINT_DIM in y_points.dims + if not x_has_bp and not y_has_bp: + raise ValueError( + "x_points and y_points must have a breakpoint dimension. " + f"Got x_points dims {list(x_points.dims)} and y_points dims " + f"{list(y_points.dims)}. Use the breakpoints() or segments() " + f"factory to create correctly-dimensioned arrays." + ) + if not x_has_bp: + raise ValueError( + "x_points is missing the breakpoint dimension, " + f"got dims {list(x_points.dims)}. " + "Use the breakpoints() or segments() factory." + ) + if not y_has_bp: + raise ValueError( + "y_points is missing the breakpoint dimension, " + f"got dims {list(y_points.dims)}. " + "Use the breakpoints() or segments() factory." + ) - expand_map: dict[str, list] = {} - all_exprs = expr.values() if isinstance(expr, dict) else [expr] - for d in missing: - for e in all_exprs: - if d in e.coords: - expand_map[str(d)] = list(e.coords[d].values) - break + x_has_seg = SEGMENT_DIM in x_points.dims + y_has_seg = SEGMENT_DIM in y_points.dims + if x_has_seg != y_has_seg: + raise ValueError( + "If one of x_points/y_points has a segment dimension, " + f"both must. x_points dims: {list(x_points.dims)}, " + f"y_points dims: {list(y_points.dims)}." + ) - if expand_map: - bp = bp.expand_dims(expand_map) + return x_has_seg - return bp +def piecewise( + expr: LinExprLike, + x_points: BreaksLike, + y_points: BreaksLike, + active: LinExprLike | None = None, +) -> PiecewiseExpression: + """ + Create a piecewise linear function descriptor. -def _extra_coords(breakpoints: DataArray, *exclude_dims: str | None) -> list[pd.Index]: - excluded = {d for d in exclude_dims if d is not None} - return [ - pd.Index(breakpoints.coords[d].values, name=d) - for d in breakpoints.dims - if d not in excluded - ] + Parameters + ---------- + expr : Variable or LinearExpression + The "x" side expression. + x_points : BreaksLike + Breakpoint x-coordinates. + y_points : BreaksLike + Breakpoint y-coordinates. + active : Variable or LinearExpression, optional + Binary variable that scales the piecewise function. When + ``active=0``, all auxiliary variables are forced to zero, which + in turn forces the reconstructed x and y to zero. When + ``active=1``, the normal piecewise domain ``[x₀, xₙ]`` is + active. This is the only behavior the linear formulation + supports — selectively *relaxing* the constraint (letting x and + y float freely when off) would require big-M or indicator + constraints. + Returns + ------- + PiecewiseExpression + """ + if not isinstance(x_points, DataArray): + x_points = _coerce_breaks(x_points) + if not isinstance(y_points, DataArray): + y_points = _coerce_breaks(y_points) -def _validate_breakpoints(breakpoints: DataArray, dim: str) -> None: - if dim not in breakpoints.dims: + disjunctive = _detect_disjunctive(x_points, y_points) + + # Validate compatible shapes along breakpoint dimension + if x_points.sizes[BREAKPOINT_DIM] != y_points.sizes[BREAKPOINT_DIM]: raise ValueError( - f"breakpoints must have dimension '{dim}', " - f"but only has dimensions {list(breakpoints.dims)}" + f"x_points and y_points must have same size along '{BREAKPOINT_DIM}', " + f"got {x_points.sizes[BREAKPOINT_DIM]} and " + f"{y_points.sizes[BREAKPOINT_DIM]}" ) + # Validate compatible shapes along segment dimension + if disjunctive: + if x_points.sizes[SEGMENT_DIM] != y_points.sizes[SEGMENT_DIM]: + raise ValueError( + f"x_points and y_points must have same size along '{SEGMENT_DIM}'" + ) + + return PiecewiseExpression(expr, x_points, y_points, disjunctive, active) + + +# --------------------------------------------------------------------------- +# Internal validation and utility functions +# --------------------------------------------------------------------------- -def _validate_numeric_breakpoint_coords(breakpoints: DataArray, dim: str) -> None: - if not pd.api.types.is_numeric_dtype(breakpoints.coords[dim]): + +def _validate_numeric_breakpoint_coords(bp: DataArray) -> None: + if not pd.api.types.is_numeric_dtype(bp.coords[BREAKPOINT_DIM]): raise ValueError( - f"Breakpoint dimension '{dim}' must have numeric coordinates " - f"for SOS2 weights, but got {breakpoints.coords[dim].dtype}" + f"Breakpoint dimension '{BREAKPOINT_DIM}' must have numeric coordinates " + f"for SOS2 weights, but got {bp.coords[BREAKPOINT_DIM].dtype}" ) -def _check_strict_monotonicity(breakpoints: DataArray, dim: str) -> bool: - """ - Check if breakpoints are strictly monotonic along dim. - - Each slice along non-dim dimensions is checked independently, - allowing different slices to have opposite directions (e.g., one - increasing and another decreasing). NaN values are ignored. - """ - diffs = breakpoints.diff(dim) +def _check_strict_monotonicity(bp: DataArray) -> bool: + """Check if breakpoints are strictly monotonic along BREAKPOINT_DIM (ignoring NaN).""" + diffs = bp.diff(BREAKPOINT_DIM) pos = (diffs > 0) | diffs.isnull() neg = (diffs < 0) | diffs.isnull() - all_pos_per_slice = pos.all(dim) - all_neg_per_slice = neg.all(dim) - has_non_nan = (~diffs.isnull()).any(dim) + all_pos_per_slice = pos.all(BREAKPOINT_DIM) + all_neg_per_slice = neg.all(BREAKPOINT_DIM) + has_non_nan = (~diffs.isnull()).any(BREAKPOINT_DIM) monotonic = (all_pos_per_slice | all_neg_per_slice) & has_non_nan return bool(monotonic.all()) -def _has_trailing_nan_only(breakpoints: DataArray, dim: str) -> bool: - """Check that NaN values in breakpoints only appear as trailing entries along dim.""" - valid = ~breakpoints.isnull() - cummin = np.minimum.accumulate(valid.values, axis=valid.dims.index(dim)) +def _check_strict_increasing(bp: DataArray) -> bool: + """Check if breakpoints are strictly increasing along BREAKPOINT_DIM.""" + diffs = bp.diff(BREAKPOINT_DIM) + pos = (diffs > 0) | diffs.isnull() + has_non_nan = (~diffs.isnull()).any(BREAKPOINT_DIM) + increasing = pos.all(BREAKPOINT_DIM) & has_non_nan + return bool(increasing.all()) + + +def _has_trailing_nan_only(bp: DataArray) -> bool: + """Check that NaN values only appear as trailing entries along BREAKPOINT_DIM.""" + valid = ~bp.isnull() + cummin = np.minimum.accumulate(valid.values, axis=valid.dims.index(BREAKPOINT_DIM)) cummin_da = DataArray(cummin, coords=valid.coords, dims=valid.dims) return not bool((valid & ~cummin_da).any()) @@ -381,523 +569,658 @@ def _to_linexpr(expr: LinExprLike) -> LinearExpression: return expr.to_linexpr() -def _validate_piecewise_expr( - expr: LinExprLike | dict[str, LinExprLike], -) -> tuple[bool, set[str]]: - from linopy.expressions import LinearExpression - from linopy.variables import Variable +def _extra_coords(points: DataArray, *exclude_dims: str | None) -> list[pd.Index]: + excluded = {d for d in exclude_dims if d is not None} + return [ + pd.Index(points.coords[d].values, name=d) + for d in points.dims + if d not in excluded + ] - _types = (Variable, LinearExpression) - if isinstance(expr, _types): - return True, {str(d) for d in expr.coord_dims} +def _broadcast_points( + points: DataArray, + *exprs: LinExprLike, + disjunctive: bool = False, +) -> DataArray: + """Broadcast points to cover all dimensions from exprs.""" + skip: set[str] = {BREAKPOINT_DIM} | set(HELPER_DIMS) + if disjunctive: + skip.add(SEGMENT_DIM) - if isinstance(expr, dict): - dims: set[str] = set() - for key, val in expr.items(): - if not isinstance(val, _types): - raise TypeError( - f"dict value for key '{key}' must be a Variable or " - f"LinearExpression, got {type(val)}" - ) - dims.update(str(d) for d in val.coord_dims) - return False, dims + target_dims: set[str] = set() + for e in exprs: + le = _to_linexpr(e) + target_dims.update(str(d) for d in le.coord_dims) - raise TypeError( - f"'expr' must be a Variable, LinearExpression, or dict of these, " - f"got {type(expr)}" - ) + missing = target_dims - skip - {str(d) for d in points.dims} + if not missing: + return points + expand_map: dict[str, list] = {} + for d in missing: + for e in exprs: + le = _to_linexpr(e) + if d in le.coords: + expand_map[str(d)] = list(le.coords[d].values) + break -def _compute_mask( - mask: DataArray | None, - breakpoints: DataArray, + if expand_map: + points = points.expand_dims(expand_map) + return points + + +def _compute_combined_mask( + x_points: DataArray, + y_points: DataArray, skip_nan_check: bool, ) -> DataArray | None: - if mask is not None: - return mask if skip_nan_check: + if bool(x_points.isnull().any()) or bool(y_points.isnull().any()): + raise ValueError( + "skip_nan_check=True but breakpoints contain NaN. " + "Either remove NaN values or set skip_nan_check=False." + ) return None - return ~breakpoints.isnull() - - -def _resolve_link_dim( - breakpoints: DataArray, - expr_keys: set[str], - exclude_dims: set[str], -) -> str: - for d in breakpoints.dims: - if d in exclude_dims: - continue - coord_set = {str(c) for c in breakpoints.coords[d].values} - if coord_set == expr_keys: - return str(d) - raise ValueError( - "Could not auto-detect linking dimension from breakpoints. " - "Ensure breakpoints have a dimension whose coordinates match " - f"the expression dict keys. " - f"Breakpoint dimensions: {list(breakpoints.dims)}, " - f"expression keys: {list(expr_keys)}" - ) + return ~(x_points.isnull() | y_points.isnull()) -def _build_stacked_expr( - model: Model, - expr_dict: dict[str, LinExprLike], - breakpoints: DataArray, - link_dim: str, -) -> LinearExpression: - from linopy.expressions import LinearExpression +def _detect_convexity( + x_points: DataArray, + y_points: DataArray, +) -> Literal["convex", "concave", "linear", "mixed"]: + """ + Detect convexity of the piecewise function. + + Requires strictly increasing x breakpoints and computes slopes and + second differences in the given order. + """ + if not _check_strict_increasing(x_points): + raise ValueError( + "Convexity detection requires strictly increasing x_points. " + "Pass breakpoints in increasing x-order or use method='sos2'." + ) - link_coords = list(breakpoints.coords[link_dim].values) + dx = x_points.diff(BREAKPOINT_DIM) + dy = y_points.diff(BREAKPOINT_DIM) - expr_data_list = [] - for k in link_coords: - e = expr_dict[str(k)] - linexpr = _to_linexpr(e) - expr_data_list.append(linexpr.data.expand_dims({link_dim: [k]})) + valid = ~(dx.isnull() | dy.isnull() | (dx == 0)) + slopes = dy / dx - stacked_data = xr.concat(expr_data_list, dim=link_dim) - return LinearExpression(stacked_data, model) + if slopes.sizes[BREAKPOINT_DIM] < 2: + return "linear" + slope_diffs = slopes.diff(BREAKPOINT_DIM) -def _resolve_expr( + valid_diffs = valid.isel({BREAKPOINT_DIM: slice(None, -1)}) + valid_diffs_hi = valid.isel({BREAKPOINT_DIM: slice(1, None)}) + valid_diffs_combined = valid_diffs.values & valid_diffs_hi.values + + sd_values = slope_diffs.values + if valid_diffs_combined.size == 0 or not valid_diffs_combined.any(): + return "linear" + + valid_sd = sd_values[valid_diffs_combined] + all_nonneg = bool(np.all(valid_sd >= -1e-10)) + all_nonpos = bool(np.all(valid_sd <= 1e-10)) + + if all_nonneg and all_nonpos: + return "linear" + if all_nonneg: + return "convex" + if all_nonpos: + return "concave" + return "mixed" + + +# --------------------------------------------------------------------------- +# Internal formulation functions +# --------------------------------------------------------------------------- + + +def _add_pwl_lp( model: Model, - expr: LinExprLike | dict[str, LinExprLike], - breakpoints: DataArray, - dim: str, - mask: DataArray | None, - skip_nan_check: bool, - exclude_dims: set[str] | None = None, -) -> tuple[LinearExpression, str | None, DataArray | None, DataArray | None]: - is_single, _ = _validate_piecewise_expr(expr) - - computed_mask = _compute_mask(mask, breakpoints, skip_nan_check) - - if is_single: - target_expr = _to_linexpr(expr) # type: ignore[arg-type] - return target_expr, None, computed_mask, computed_mask - - expr_dict: dict[str, LinExprLike] = expr # type: ignore[assignment] - expr_keys = set(expr_dict.keys()) - all_exclude = {dim} | (exclude_dims or set()) - resolved_link_dim = _resolve_link_dim(breakpoints, expr_keys, all_exclude) - lambda_mask = None - if computed_mask is not None: - if resolved_link_dim not in computed_mask.dims: - computed_mask = computed_mask.broadcast_like(breakpoints) - lambda_mask = computed_mask.any(dim=resolved_link_dim) - target_expr = _build_stacked_expr(model, expr_dict, breakpoints, resolved_link_dim) - return target_expr, resolved_link_dim, computed_mask, lambda_mask - - -def _add_pwl_sos2( + name: str, + x_expr: LinearExpression, + y_expr: LinearExpression, + sign: str, + x_points: DataArray, + y_points: DataArray, +) -> Constraint: + """Add pure LP tangent-line constraints.""" + dx = x_points.diff(BREAKPOINT_DIM) + dy = y_points.diff(BREAKPOINT_DIM) + slopes = dy / dx + + slopes = slopes.rename({BREAKPOINT_DIM: LP_SEG_DIM}) + n_seg = slopes.sizes[LP_SEG_DIM] + slopes[LP_SEG_DIM] = np.arange(n_seg) + + x_base = x_points.isel({BREAKPOINT_DIM: slice(None, -1)}) + y_base = y_points.isel({BREAKPOINT_DIM: slice(None, -1)}) + x_base = x_base.rename({BREAKPOINT_DIM: LP_SEG_DIM}) + y_base = y_base.rename({BREAKPOINT_DIM: LP_SEG_DIM}) + x_base[LP_SEG_DIM] = np.arange(n_seg) + y_base[LP_SEG_DIM] = np.arange(n_seg) + + rhs = y_base - slopes * x_base + lhs = y_expr - slopes * x_expr + + if sign == "<=": + con = model.add_constraints(lhs <= rhs, name=f"{name}{PWL_LP_SUFFIX}") + else: + con = model.add_constraints(lhs >= rhs, name=f"{name}{PWL_LP_SUFFIX}") + + # Domain bound constraints to keep x within [x_min, x_max] + x_lo = x_points.min(dim=BREAKPOINT_DIM) + x_hi = x_points.max(dim=BREAKPOINT_DIM) + model.add_constraints(x_expr >= x_lo, name=f"{name}{PWL_LP_DOMAIN_SUFFIX}_lo") + model.add_constraints(x_expr <= x_hi, name=f"{name}{PWL_LP_DOMAIN_SUFFIX}_hi") + + return con + + +def _add_pwl_sos2_core( model: Model, name: str, - breakpoints: DataArray, - dim: str, + x_expr: LinearExpression, target_expr: LinearExpression, - lambda_coords: list[pd.Index], + x_points: DataArray, + y_points: DataArray, lambda_mask: DataArray | None, + active: LinearExpression | None = None, ) -> Constraint: + """ + Core SOS2 formulation linking x_expr and target_expr via breakpoints. + + Creates lambda variables, SOS2 constraint, convexity constraint, + and linking constraints for both x and target. + + When ``active`` is provided, the convexity constraint becomes + ``sum(lambda) == active`` instead of ``== 1``, forcing all lambda + (and thus x, y) to zero when ``active=0``. + """ + extra = _extra_coords(x_points, BREAKPOINT_DIM) + lambda_coords = extra + [ + pd.Index(x_points.coords[BREAKPOINT_DIM].values, name=BREAKPOINT_DIM) + ] + lambda_name = f"{name}{PWL_LAMBDA_SUFFIX}" convex_name = f"{name}{PWL_CONVEX_SUFFIX}" - link_name = f"{name}{PWL_LINK_SUFFIX}" + x_link_name = f"{name}{PWL_X_LINK_SUFFIX}" + y_link_name = f"{name}{PWL_Y_LINK_SUFFIX}" lambda_var = model.add_variables( lower=0, upper=1, coords=lambda_coords, name=lambda_name, mask=lambda_mask ) - model.add_sos_constraints(lambda_var, sos_type=2, sos_dim=dim) + model.add_sos_constraints(lambda_var, sos_type=2, sos_dim=BREAKPOINT_DIM) - convex_con = model.add_constraints(lambda_var.sum(dim=dim) == 1, name=convex_name) + # Convexity constraint: sum(lambda) == 1 or sum(lambda) == active + rhs = active if active is not None else 1 + convex_con = model.add_constraints( + lambda_var.sum(dim=BREAKPOINT_DIM) == rhs, name=convex_name + ) + + x_weighted = (lambda_var * x_points).sum(dim=BREAKPOINT_DIM) + model.add_constraints(x_expr == x_weighted, name=x_link_name) - weighted_sum = (lambda_var * breakpoints).sum(dim=dim) - model.add_constraints(target_expr == weighted_sum, name=link_name) + y_weighted = (lambda_var * y_points).sum(dim=BREAKPOINT_DIM) + model.add_constraints(target_expr == y_weighted, name=y_link_name) return convex_con -def _add_pwl_incremental( +def _add_pwl_incremental_core( model: Model, name: str, - breakpoints: DataArray, - dim: str, + x_expr: LinearExpression, target_expr: LinearExpression, - extra_coords: list[pd.Index], - breakpoint_mask: DataArray | None, - link_dim: str | None, + x_points: DataArray, + y_points: DataArray, + bp_mask: DataArray | None, + active: LinearExpression | None = None, ) -> Constraint: + """ + Core incremental formulation linking x_expr and target_expr. + + Creates delta variables, fill-order constraints, and x/target link constraints. + + When ``active`` is provided, delta bounds are tightened to + ``δ_i ≤ active`` and base terms become ``x₀ * active``, + ``y₀ * active``, forcing x and y to zero when ``active=0``. + """ delta_name = f"{name}{PWL_DELTA_SUFFIX}" fill_name = f"{name}{PWL_FILL_SUFFIX}" - link_name = f"{name}{PWL_LINK_SUFFIX}" - - n_segments = breakpoints.sizes[dim] - 1 - seg_dim = f"{dim}_seg" - seg_index = pd.Index(range(n_segments), name=seg_dim) - delta_coords = extra_coords + [seg_index] - - steps = breakpoints.diff(dim).rename({dim: seg_dim}) - steps[seg_dim] = seg_index - - if breakpoint_mask is not None: - bp_mask = breakpoint_mask - if link_dim is not None: - bp_mask = bp_mask.all(dim=link_dim) - mask_lo = bp_mask.isel({dim: slice(None, -1)}).rename({dim: seg_dim}) - mask_hi = bp_mask.isel({dim: slice(1, None)}).rename({dim: seg_dim}) - mask_lo[seg_dim] = seg_index - mask_hi[seg_dim] = seg_index + x_link_name = f"{name}{PWL_X_LINK_SUFFIX}" + y_link_name = f"{name}{PWL_Y_LINK_SUFFIX}" + + n_segments = x_points.sizes[BREAKPOINT_DIM] - 1 + seg_index = pd.Index(range(n_segments), name=LP_SEG_DIM) + extra = _extra_coords(x_points, BREAKPOINT_DIM) + delta_coords = extra + [seg_index] + + x_steps = x_points.diff(BREAKPOINT_DIM).rename({BREAKPOINT_DIM: LP_SEG_DIM}) + x_steps[LP_SEG_DIM] = seg_index + y_steps = y_points.diff(BREAKPOINT_DIM).rename({BREAKPOINT_DIM: LP_SEG_DIM}) + y_steps[LP_SEG_DIM] = seg_index + + if bp_mask is not None: + mask_lo = bp_mask.isel({BREAKPOINT_DIM: slice(None, -1)}).rename( + {BREAKPOINT_DIM: LP_SEG_DIM} + ) + mask_hi = bp_mask.isel({BREAKPOINT_DIM: slice(1, None)}).rename( + {BREAKPOINT_DIM: LP_SEG_DIM} + ) + mask_lo[LP_SEG_DIM] = seg_index + mask_hi[LP_SEG_DIM] = seg_index delta_mask: DataArray | None = mask_lo & mask_hi else: delta_mask = None + # When active is provided, upper bound is active (binary) instead of 1 + delta_upper = 1 delta_var = model.add_variables( - lower=0, upper=1, coords=delta_coords, name=delta_name, mask=delta_mask + lower=0, + upper=delta_upper, + coords=delta_coords, + name=delta_name, + mask=delta_mask, ) + if active is not None: + # Tighten delta bounds: δ_i ≤ active + active_bound_name = f"{name}{PWL_ACTIVE_BOUND_SUFFIX}" + model.add_constraints(delta_var <= active, name=active_bound_name) + + # Binary indicator variables: y_i for each segment + inc_binary_name = f"{name}{PWL_INC_BINARY_SUFFIX}" + inc_link_name = f"{name}{PWL_INC_LINK_SUFFIX}" + inc_order_name = f"{name}{PWL_INC_ORDER_SUFFIX}" + + binary_var = model.add_variables( + binary=True, coords=delta_coords, name=inc_binary_name, mask=delta_mask + ) + + # Link constraints: δ_i ≤ y_i for all segments + model.add_constraints(delta_var <= binary_var, name=inc_link_name) + + # Order constraints: y_{i+1} ≤ δ_i for i = 0..n-2 fill_con: Constraint | None = None if n_segments >= 2: - delta_lo = delta_var.isel({seg_dim: slice(None, -1)}, drop=True) - delta_hi = delta_var.isel({seg_dim: slice(1, None)}, drop=True) + delta_lo = delta_var.isel({LP_SEG_DIM: slice(None, -1)}, drop=True) + delta_hi = delta_var.isel({LP_SEG_DIM: slice(1, None)}, drop=True) + # Keep existing fill constraint as LP relaxation tightener # Align coords for positional comparison (lo=[0..n-2], hi=[1..n-1]) - delta_hi = delta_hi.assign_coords({seg_dim: delta_lo.coords[seg_dim].values}) + delta_hi = delta_hi.assign_coords( + {LP_SEG_DIM: delta_lo.coords[LP_SEG_DIM].values} + ) fill_con = model.add_constraints(delta_hi <= delta_lo, name=fill_name) - bp0 = breakpoints.isel({dim: 0}) - weighted_sum = (delta_var * steps).sum(dim=seg_dim) + bp0 - link_con = model.add_constraints(target_expr == weighted_sum, name=link_name) + binary_hi = binary_var.isel({LP_SEG_DIM: slice(1, None)}, drop=True) + model.add_constraints(binary_hi <= delta_lo, name=inc_order_name) + + x0 = x_points.isel({BREAKPOINT_DIM: 0}) + y0 = y_points.isel({BREAKPOINT_DIM: 0}) + + # When active is provided, multiply base terms by active + x_base: DataArray | LinearExpression = x0 + y_base: DataArray | LinearExpression = y0 + if active is not None: + x_base = x0 * active + y_base = y0 * active - return fill_con if fill_con is not None else link_con + x_weighted = (delta_var * x_steps).sum(dim=LP_SEG_DIM) + x_base + model.add_constraints(x_expr == x_weighted, name=x_link_name) + y_weighted = (delta_var * y_steps).sum(dim=LP_SEG_DIM) + y_base + model.add_constraints(target_expr == y_weighted, name=y_link_name) -def _add_dpwl_sos2( + return fill_con if fill_con is not None else model.constraints[y_link_name] + + +def _add_dpwl_sos2_core( model: Model, name: str, - breakpoints: DataArray, - dim: str, - segment_dim: str, + x_expr: LinearExpression, target_expr: LinearExpression, - lambda_coords: list[pd.Index], + x_points: DataArray, + y_points: DataArray, lambda_mask: DataArray | None, - binary_coords: list[pd.Index], - binary_mask: DataArray | None, + active: LinearExpression | None = None, ) -> Constraint: + """ + Core disjunctive SOS2 formulation with separate x/y points. + + When ``active`` is provided, the segment selection becomes + ``sum(z_k) == active`` instead of ``== 1``, forcing all segment + binaries, lambdas, and thus x and y to zero when ``active=0``. + """ binary_name = f"{name}{PWL_BINARY_SUFFIX}" select_name = f"{name}{PWL_SELECT_SUFFIX}" lambda_name = f"{name}{PWL_LAMBDA_SUFFIX}" convex_name = f"{name}{PWL_CONVEX_SUFFIX}" - link_name = f"{name}{PWL_LINK_SUFFIX}" + x_link_name = f"{name}{PWL_X_LINK_SUFFIX}" + y_link_name = f"{name}{PWL_Y_LINK_SUFFIX}" + + extra = _extra_coords(x_points, BREAKPOINT_DIM, SEGMENT_DIM) + lambda_coords = extra + [ + pd.Index(x_points.coords[SEGMENT_DIM].values, name=SEGMENT_DIM), + pd.Index(x_points.coords[BREAKPOINT_DIM].values, name=BREAKPOINT_DIM), + ] + binary_coords = extra + [ + pd.Index(x_points.coords[SEGMENT_DIM].values, name=SEGMENT_DIM), + ] + + binary_mask = ( + lambda_mask.any(dim=BREAKPOINT_DIM) if lambda_mask is not None else None + ) binary_var = model.add_variables( binary=True, coords=binary_coords, name=binary_name, mask=binary_mask ) + # Segment selection: sum(z_k) == 1 or sum(z_k) == active + rhs = active if active is not None else 1 select_con = model.add_constraints( - binary_var.sum(dim=segment_dim) == 1, name=select_name + binary_var.sum(dim=SEGMENT_DIM) == rhs, name=select_name ) lambda_var = model.add_variables( lower=0, upper=1, coords=lambda_coords, name=lambda_name, mask=lambda_mask ) - model.add_sos_constraints(lambda_var, sos_type=2, sos_dim=dim) + model.add_sos_constraints(lambda_var, sos_type=2, sos_dim=BREAKPOINT_DIM) + + model.add_constraints( + lambda_var.sum(dim=BREAKPOINT_DIM) == binary_var, name=convex_name + ) - model.add_constraints(lambda_var.sum(dim=dim) == binary_var, name=convex_name) + x_weighted = (lambda_var * x_points).sum(dim=[SEGMENT_DIM, BREAKPOINT_DIM]) + model.add_constraints(x_expr == x_weighted, name=x_link_name) - weighted_sum = (lambda_var * breakpoints).sum(dim=[segment_dim, dim]) - model.add_constraints(target_expr == weighted_sum, name=link_name) + y_weighted = (lambda_var * y_points).sum(dim=[SEGMENT_DIM, BREAKPOINT_DIM]) + model.add_constraints(target_expr == y_weighted, name=y_link_name) return select_con +# --------------------------------------------------------------------------- +# Main entry point +# --------------------------------------------------------------------------- + + def add_piecewise_constraints( model: Model, - expr: LinExprLike | dict[str, LinExprLike], - breakpoints: DataArray, - dim: str = DEFAULT_BREAKPOINT_DIM, - mask: DataArray | None = None, + descriptor: PiecewiseConstraintDescriptor | Constraint, + method: Literal["sos2", "incremental", "auto", "lp"] = "auto", name: str | None = None, skip_nan_check: bool = False, - method: Literal["sos2", "incremental", "auto"] = "sos2", ) -> Constraint: """ - Add a piecewise linear constraint using SOS2 or incremental formulation. + Add a piecewise linear constraint from a :class:`PiecewiseConstraintDescriptor`. - This method creates a piecewise linear constraint that links one or more - variables/expressions together via a set of breakpoints. It supports two - formulations: + Typically called as:: - - **SOS2** (default): Uses SOS2 (Special Ordered Set of type 2) with lambda - (interpolation) variables. Works for any breakpoints. - - **Incremental**: Uses delta variables with filling-order constraints. - Pure LP formulation (no SOS2 or binary variables), but requires strictly - monotonic breakpoints. + m.add_piecewise_constraints(piecewise(x, x_points, y_points) >= y) Parameters ---------- model : Model - The linopy model to add the constraint to. - expr : Variable, LinearExpression, or dict of these - The variable(s) or expression(s) to be linked by the piecewise constraint. - - If a single Variable/LinearExpression is passed, the breakpoints - directly specify the piecewise points for that expression. - - If a dict is passed, the keys must match coordinates of a dimension - of the breakpoints, allowing multiple expressions to be linked. - breakpoints : xr.DataArray - The breakpoint values defining the piecewise linear function. - Must have `dim` as one of its dimensions. If `expr` is a dict, - must also have a dimension with coordinates matching the dict keys. - dim : str, default "breakpoint" - The dimension in breakpoints that represents the breakpoint index. - This dimension's coordinates must be numeric (used as SOS2 weights - for the SOS2 method). - mask : xr.DataArray, optional - Boolean mask indicating which piecewise constraints are valid. - If None, auto-detected from NaN values in breakpoints (unless - skip_nan_check is True). + The linopy model. + descriptor : PiecewiseConstraintDescriptor + Created by comparing a variable/expression with a :class:`PiecewiseExpression`. + method : {"auto", "sos2", "incremental", "lp"}, default "auto" + Formulation method. name : str, optional - Base name for the generated variables and constraints. - If None, auto-generates names like "pwl0", "pwl1", etc. + Base name for generated variables/constraints. skip_nan_check : bool, default False - If True, skip automatic NaN detection in breakpoints. Use this - when you know breakpoints contain no NaN values for better performance. - method : Literal["sos2", "incremental", "auto"], default "sos2" - Formulation method. One of: - - ``"sos2"``: SOS2 formulation with lambda variables (default). - - ``"incremental"``: Incremental (delta) formulation. Requires strictly - monotonic breakpoints. Pure LP, no SOS2 or binary variables. - - ``"auto"``: Automatically selects ``"incremental"`` if breakpoints are - strictly monotonic, otherwise falls back to ``"sos2"``. + If True, skip NaN detection. Returns ------- Constraint - For SOS2: the convexity constraint (sum of lambda = 1). - For incremental: the filling-order constraint (or the link - constraint if only 2 breakpoints). - - Raises - ------ - ValueError - If expr is not a Variable, LinearExpression, or dict of these. - If breakpoints doesn't have the required dim dimension. - If the linking dimension cannot be auto-detected when expr is a dict. - If dim coordinates are not numeric (SOS2 method only). - If breakpoints are not strictly monotonic (incremental method). - If method is not one of 'sos2', 'incremental', 'auto'. - - Examples - -------- - Single variable piecewise constraint: - - >>> from linopy import Model - >>> import xarray as xr - >>> m = Model() - >>> x = m.add_variables(name="x") - >>> breakpoints = xr.DataArray([0, 10, 50, 100], dims=["bp"]) - >>> _ = m.add_piecewise_constraints(x, breakpoints, dim="bp") - - Notes - ----- - **SOS2 formulation:** - - 1. Lambda variables λ_i with bounds [0, 1] are created for each breakpoint - 2. SOS2 constraint ensures at most two adjacent λ_i can be non-zero - 3. Convexity constraint: Σ λ_i = 1 - 4. Linking constraints: expr = Σ λ_i × breakpoint_i (for each expression) - - **Incremental formulation** (for strictly monotonic breakpoints bp₀ < bp₁ < ... < bpₙ): - - 1. Delta variables δᵢ ∈ [0, 1] for i = 1, ..., n (one per segment) - 2. Filling-order constraints: δᵢ₊₁ ≤ δᵢ for i = 1, ..., n-1 - 3. Linking constraint: expr = bp₀ + Σᵢ δᵢ × (bpᵢ - bpᵢ₋₁) """ - if method not in ("sos2", "incremental", "auto"): + if not isinstance(descriptor, PiecewiseConstraintDescriptor): + raise TypeError( + f"Expected PiecewiseConstraintDescriptor, got {type(descriptor)}. " + f"Use: m.add_piecewise_constraints(piecewise(x, x_points, y_points) >= y)" + ) + + if method not in ("sos2", "incremental", "auto", "lp"): raise ValueError( - f"method must be 'sos2', 'incremental', or 'auto', got '{method}'" + f"method must be 'sos2', 'incremental', 'auto', or 'lp', got '{method}'" ) - _validate_breakpoints(breakpoints, dim) - breakpoints = _auto_broadcast_breakpoints(breakpoints, expr, dim) + pw = descriptor.piecewise_func + sign = descriptor.sign + y_lhs = descriptor.lhs + x_expr_raw = pw.expr + x_points = pw.x_points + y_points = pw.y_points + disjunctive = pw.disjunctive + active = pw.active - if method in ("incremental", "auto"): - is_monotonic = _check_strict_monotonicity(breakpoints, dim) - trailing_nan_only = _has_trailing_nan_only(breakpoints, dim) - if method == "auto": - if is_monotonic and trailing_nan_only: - method = "incremental" - else: - method = "sos2" - elif not is_monotonic: - raise ValueError( - "Incremental method requires strictly monotonic breakpoints " - "along the breakpoint dimension." - ) - if method == "incremental" and not trailing_nan_only: - raise ValueError( - "Incremental method does not support non-trailing NaN breakpoints. " - "NaN values must only appear at the end of the breakpoint sequence. " - "Use method='sos2' for breakpoints with gaps." - ) + # Broadcast points to match expression dimensions + x_points = _broadcast_points(x_points, x_expr_raw, y_lhs, disjunctive=disjunctive) + y_points = _broadcast_points(y_points, x_expr_raw, y_lhs, disjunctive=disjunctive) - if method == "sos2": - _validate_numeric_breakpoint_coords(breakpoints, dim) + # Compute mask + mask = _compute_combined_mask(x_points, y_points, skip_nan_check) + # Name if name is None: name = f"pwl{model._pwlCounter}" model._pwlCounter += 1 - target_expr, resolved_link_dim, computed_mask, lambda_mask = _resolve_expr( - model, expr, breakpoints, dim, mask, skip_nan_check - ) + # Convert to LinearExpressions + x_expr = _to_linexpr(x_expr_raw) + y_expr = _to_linexpr(y_lhs) - extra_coords = _extra_coords(breakpoints, dim, resolved_link_dim) - lambda_coords = extra_coords + [pd.Index(breakpoints.coords[dim].values, name=dim)] + # Convert active to LinearExpression if provided + active_expr = _to_linexpr(active) if active is not None else None - if method == "sos2": - return _add_pwl_sos2( - model, name, breakpoints, dim, target_expr, lambda_coords, lambda_mask + # Validate: active is not supported with LP method + if active_expr is not None and method == "lp": + raise ValueError( + "The 'active' parameter is not supported with method='lp'. " + "Use method='incremental' or method='sos2'." + ) + + if disjunctive: + return _add_disjunctive( + model, + name, + x_expr, + y_expr, + sign, + x_points, + y_points, + mask, + method, + active_expr, ) else: - return _add_pwl_incremental( + return _add_continuous( model, name, - breakpoints, - dim, - target_expr, - extra_coords, - computed_mask, - resolved_link_dim, + x_expr, + y_expr, + sign, + x_points, + y_points, + mask, + method, + skip_nan_check, + active_expr, ) -def add_disjunctive_piecewise_constraints( +def _add_continuous( model: Model, - expr: LinExprLike | dict[str, LinExprLike], - breakpoints: DataArray, - dim: str = DEFAULT_BREAKPOINT_DIM, - segment_dim: str = DEFAULT_SEGMENT_DIM, - mask: DataArray | None = None, - name: str | None = None, - skip_nan_check: bool = False, + name: str, + x_expr: LinearExpression, + y_expr: LinearExpression, + sign: str, + x_points: DataArray, + y_points: DataArray, + mask: DataArray | None, + method: str, + skip_nan_check: bool, + active: LinearExpression | None = None, ) -> Constraint: - """ - Add a disjunctive piecewise linear constraint for disconnected segments. + """Handle continuous (non-disjunctive) piecewise constraints.""" + convexity: Literal["convex", "concave", "linear", "mixed"] | None = None + + # Determine actual method + if method == "auto": + if sign == "==": + if _check_strict_monotonicity(x_points) and _has_trailing_nan_only( + x_points + ): + method = "incremental" + else: + method = "sos2" + else: + if not _check_strict_increasing(x_points): + raise ValueError( + "Automatic method selection for piecewise inequalities requires " + "strictly increasing x_points. Pass breakpoints in increasing " + "x-order or use method='sos2'." + ) + convexity = _detect_convexity(x_points, y_points) + if convexity == "linear": + method = "lp" + elif (sign == "<=" and convexity == "concave") or ( + sign == ">=" and convexity == "convex" + ): + method = "lp" + else: + method = "sos2" + elif method == "lp": + if sign == "==": + raise ValueError("Pure LP method is not supported for equality constraints") + convexity = _detect_convexity(x_points, y_points) + if convexity != "linear": + if sign == "<=" and convexity != "concave": + raise ValueError( + f"Pure LP method for '<=' requires concave or linear function, " + f"got {convexity}" + ) + if sign == ">=" and convexity != "convex": + raise ValueError( + f"Pure LP method for '>=' requires convex or linear function, " + f"got {convexity}" + ) + elif method == "incremental": + if not _check_strict_monotonicity(x_points): + raise ValueError("Incremental method requires strictly monotonic x_points") + if not _has_trailing_nan_only(x_points): + raise ValueError( + "Incremental method does not support non-trailing NaN breakpoints. " + "NaN values must only appear at the end of the breakpoint sequence." + ) - Unlike ``add_piecewise_constraints``, which models continuous piecewise - linear functions (all segments connected end-to-end), this method handles - **disconnected segments** (with gaps between them). The variable must lie - on exactly one segment, selected by binary indicator variables. + if method == "sos2": + _validate_numeric_breakpoint_coords(x_points) + if not _has_trailing_nan_only(x_points): + raise ValueError( + "SOS2 method does not support non-trailing NaN breakpoints. " + "NaN values must only appear at the end of the breakpoint sequence." + ) - Uses the disaggregated convex combination formulation (no big-M needed, - tight LP relaxation): + # LP formulation + if method == "lp": + if active is not None: + raise ValueError( + "The 'active' parameter is not supported with method='lp'. " + "Use method='incremental' or method='sos2'." + ) + return _add_pwl_lp(model, name, x_expr, y_expr, sign, x_points, y_points) + + # SOS2 or incremental formulation + if sign == "==": + # Direct linking: y = f(x) + if method == "sos2": + return _add_pwl_sos2_core( + model, name, x_expr, y_expr, x_points, y_points, mask, active + ) + else: # incremental + return _add_pwl_incremental_core( + model, name, x_expr, y_expr, x_points, y_points, mask, active + ) + else: + # Inequality: create aux variable z, enforce z = f(x), then y <= z or y >= z + aux_name = f"{name}{PWL_AUX_SUFFIX}" + aux_coords = _extra_coords(x_points, BREAKPOINT_DIM) + z = model.add_variables(coords=aux_coords, name=aux_name) + z_expr = _to_linexpr(z) + + if method == "sos2": + result = _add_pwl_sos2_core( + model, name, x_expr, z_expr, x_points, y_points, mask, active + ) + else: # incremental + result = _add_pwl_incremental_core( + model, name, x_expr, z_expr, x_points, y_points, mask, active + ) - 1. Binary ``y_k ∈ {0,1}`` per segment, ``Σ y_k = 1`` - 2. Lambda ``λ_{k,i} ∈ [0,1]`` per breakpoint in each segment - 3. Convexity: ``Σ_i λ_{k,i} = y_k`` - 4. SOS2 within each segment (along breakpoint dim) - 5. Linking: ``expr = Σ_k Σ_i λ_{k,i} × bp_{k,i}`` + # Add inequality + ineq_name = f"{name}_ineq" + if sign == "<=": + model.add_constraints(y_expr <= z_expr, name=ineq_name) + else: + model.add_constraints(y_expr >= z_expr, name=ineq_name) - Parameters - ---------- - model : Model - The linopy model to add the constraint to. - expr : Variable, LinearExpression, or dict of these - The variable(s) or expression(s) to be linked by the piecewise - constraint. - breakpoints : xr.DataArray - Breakpoint values with at least ``dim`` and ``segment_dim`` - dimensions. Each slice along ``segment_dim`` defines one segment. - Use NaN to pad segments with fewer breakpoints. - dim : str, default "breakpoint" - Dimension for breakpoint indices within each segment. - Must have numeric coordinates. - segment_dim : str, default "segment" - Dimension indexing the segments. - mask : xr.DataArray, optional - Boolean mask. If None, auto-detected from NaN values. - name : str, optional - Base name for generated variables/constraints. Auto-generated - if None using the shared ``_pwlCounter``. - skip_nan_check : bool, default False - If True, skip NaN detection in breakpoints. + return result - Returns - ------- - Constraint - The selection constraint (``Σ y_k = 1``). - Raises - ------ - ValueError - If ``dim`` or ``segment_dim`` not in breakpoints dimensions. - If ``dim == segment_dim``. - If ``dim`` coordinates are not numeric. - If ``expr`` is not a Variable, LinearExpression, or dict. - - Examples - -------- - Two disconnected segments [0,10] and [50,100]: - - >>> from linopy import Model - >>> import xarray as xr - >>> m = Model() - >>> x = m.add_variables(name="x") - >>> breakpoints = xr.DataArray( - ... [[0, 10], [50, 100]], - ... dims=["segment", "breakpoint"], - ... coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ... ) - >>> _ = m.add_disjunctive_piecewise_constraints(x, breakpoints) - """ - _validate_breakpoints(breakpoints, dim) - if segment_dim not in breakpoints.dims: +def _add_disjunctive( + model: Model, + name: str, + x_expr: LinearExpression, + y_expr: LinearExpression, + sign: str, + x_points: DataArray, + y_points: DataArray, + mask: DataArray | None, + method: str, + active: LinearExpression | None = None, +) -> Constraint: + """Handle disjunctive piecewise constraints.""" + if method == "lp": + raise ValueError("Pure LP method is not supported for disjunctive constraints") + if method == "incremental": raise ValueError( - f"breakpoints must have dimension '{segment_dim}', " - f"but only has dimensions {list(breakpoints.dims)}" + "Incremental method is not supported for disjunctive constraints" ) - if dim == segment_dim: - raise ValueError(f"dim and segment_dim must be different, both are '{dim}'") - _validate_numeric_breakpoint_coords(breakpoints, dim) - breakpoints = _auto_broadcast_breakpoints( - breakpoints, expr, dim, exclude_dims={segment_dim} - ) - if name is None: - name = f"pwl{model._pwlCounter}" - model._pwlCounter += 1 + _validate_numeric_breakpoint_coords(x_points) + if not _has_trailing_nan_only(x_points): + raise ValueError( + "Disjunctive SOS2 does not support non-trailing NaN breakpoints. " + "NaN values must only appear at the end of the breakpoint sequence." + ) - target_expr, resolved_link_dim, computed_mask, lambda_mask = _resolve_expr( - model, - expr, - breakpoints, - dim, - mask, - skip_nan_check, - exclude_dims={segment_dim}, - ) + if sign == "==": + return _add_dpwl_sos2_core( + model, name, x_expr, y_expr, x_points, y_points, mask, active + ) + else: + # Create aux variable z, disjunctive SOS2 for z = f(x), then y <= z or y >= z + aux_name = f"{name}{PWL_AUX_SUFFIX}" + aux_coords = _extra_coords(x_points, BREAKPOINT_DIM, SEGMENT_DIM) + z = model.add_variables(coords=aux_coords, name=aux_name) + z_expr = _to_linexpr(z) + + result = _add_dpwl_sos2_core( + model, name, x_expr, z_expr, x_points, y_points, mask, active + ) - extra_coords = _extra_coords(breakpoints, dim, segment_dim, resolved_link_dim) - lambda_coords = extra_coords + [ - pd.Index(breakpoints.coords[segment_dim].values, name=segment_dim), - pd.Index(breakpoints.coords[dim].values, name=dim), - ] - binary_coords = extra_coords + [ - pd.Index(breakpoints.coords[segment_dim].values, name=segment_dim), - ] + ineq_name = f"{name}_ineq" + if sign == "<=": + model.add_constraints(y_expr <= z_expr, name=ineq_name) + else: + model.add_constraints(y_expr >= z_expr, name=ineq_name) - binary_mask = lambda_mask.any(dim=dim) if lambda_mask is not None else None - - return _add_dpwl_sos2( - model, - name, - breakpoints, - dim, - segment_dim, - target_expr, - lambda_coords, - lambda_mask, - binary_coords, - binary_mask, - ) + return result diff --git a/linopy/remote/__init__.py b/linopy/remote/__init__.py index 0ae1df26..d3d5e162 100644 --- a/linopy/remote/__init__.py +++ b/linopy/remote/__init__.py @@ -8,9 +8,13 @@ - OetcHandler: Cloud-based execution via OET Cloud service """ -from linopy.remote.oetc import OetcCredentials, OetcHandler, OetcSettings from linopy.remote.ssh import RemoteHandler +try: + from linopy.remote.oetc import OetcCredentials, OetcHandler, OetcSettings +except ImportError: + pass + __all__ = [ "RemoteHandler", "OetcHandler", diff --git a/linopy/remote/oetc.py b/linopy/remote/oetc.py index 5bea9c7c..ee94fd43 100644 --- a/linopy/remote/oetc.py +++ b/linopy/remote/oetc.py @@ -9,10 +9,15 @@ from datetime import datetime, timedelta from enum import Enum -import requests -from google.cloud import storage -from google.oauth2 import service_account -from requests import RequestException +try: + import requests + from google.cloud import storage + from google.oauth2 import service_account + from requests import RequestException + + _oetc_deps_available = True +except ImportError: + _oetc_deps_available = False import linopy @@ -85,6 +90,11 @@ class JobResult: class OetcHandler: def __init__(self, settings: OetcSettings) -> None: + if not _oetc_deps_available: + raise ImportError( + "The 'google-cloud-storage' and 'requests' packages are required " + "for OetcHandler. Install them with: pip install linopy[oetc]" + ) self.settings = settings self.jwt = self.__sign_in() self.cloud_provider_credentials = self.__get_cloud_provider_credentials() diff --git a/linopy/solver_capabilities.py b/linopy/solver_capabilities.py index f0507317..030659de 100644 --- a/linopy/solver_capabilities.py +++ b/linopy/solver_capabilities.py @@ -7,7 +7,6 @@ from __future__ import annotations -import platform from dataclasses import dataclass from enum import Enum, auto from importlib.metadata import PackageNotFoundError @@ -179,21 +178,12 @@ def supports(self, feature: SolverFeature) -> bool: display_name="SCIP", features=frozenset( { - SolverFeature.INTEGER_VARIABLES, - SolverFeature.LP_FILE_NAMES, - SolverFeature.READ_MODEL_FROM_FILE, - SolverFeature.SOLUTION_FILE_NOT_NEEDED, - } - if platform.system() == "Windows" - else { SolverFeature.INTEGER_VARIABLES, SolverFeature.QUADRATIC_OBJECTIVE, SolverFeature.LP_FILE_NAMES, SolverFeature.READ_MODEL_FROM_FILE, SolverFeature.SOLUTION_FILE_NOT_NEEDED, } - # SCIP has a bug with quadratic models on Windows, see: - # https://github.com/PyPSA/linopy/actions/runs/7615240686/job/20739454099?pr=78 ), ), "mosek": SolverInfo( diff --git a/linopy/solvers.py b/linopy/solvers.py index f1617fc0..73eb9339 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -1746,7 +1746,7 @@ def get_solver_solution() -> Solution: return Result(status, solution, m) -KnitroResult = namedtuple("KnitroResult", "reported_runtime") +KnitroResult = namedtuple("KnitroResult", "knitro_context reported_runtime") class Knitro(Solver[None]): @@ -1809,7 +1809,13 @@ def _extract_values( if n == 0: return pd.Series(dtype=float) - values = get_values_fn(kc, n - 1) + try: + # Compatible with KNITRO >= 15 + values = get_values_fn(kc) + except TypeError: + # Fallback for older wrappers requiring explicit indices + values = get_values_fn(kc, list(range(n))) + names = list(get_names_fn(kc)) return pd.Series(values, index=names, dtype=float) @@ -1932,12 +1938,14 @@ def get_solver_solution() -> Solution: knitro.KN_write_mps_file(kc, path_to_string(solution_fn)) return Result( - status, solution, KnitroResult(reported_runtime=reported_runtime) + status, + solution, + KnitroResult(knitro_context=kc, reported_runtime=reported_runtime), ) finally: - with contextlib.suppress(Exception): - knitro.KN_free(kc) + # Intentionally keep the Knitro context alive; do not free `kc` here. + pass mosek_bas_re = re.compile(r" (XL|XU)\s+([^ \t]+)\s+([^ \t]+)| (LL|UL|BS)\s+([^ \t]+)") diff --git a/linopy/types.py b/linopy/types.py index 0e3662bf..7238c552 100644 --- a/linopy/types.py +++ b/linopy/types.py @@ -17,6 +17,7 @@ QuadraticExpression, ScalarLinearExpression, ) + from linopy.piecewise import PiecewiseConstraintDescriptor from linopy.variables import ScalarVariable, Variable # Type aliases using Union for Python 3.9 compatibility @@ -46,7 +47,9 @@ "LinearExpression", "QuadraticExpression", ] -ConstraintLike = Union["Constraint", "AnonymousScalarConstraint"] +ConstraintLike = Union[ + "Constraint", "AnonymousScalarConstraint", "PiecewiseConstraintDescriptor" +] LinExprLike = Union["Variable", "LinearExpression"] MaskLike = Union[numpy.ndarray, DataArray, Series, DataFrame] # noqa: UP007 SideLike = Union[ConstantLike, VariableLike, ExpressionLike] # noqa: UP007 diff --git a/linopy/variables.py b/linopy/variables.py index 2f8daf8d..d80a14bd 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -74,6 +74,7 @@ ScalarLinearExpression, ) from linopy.model import Model + from linopy.piecewise import PiecewiseConstraintDescriptor, PiecewiseExpression logger = logging.getLogger(__name__) @@ -515,13 +516,31 @@ def __rsub__(self, other: ConstantLike) -> LinearExpression: except TypeError: return NotImplemented - def __le__(self, other: SideLike) -> Constraint: + @overload + def __le__(self, other: PiecewiseExpression) -> PiecewiseConstraintDescriptor: ... + + @overload + def __le__(self, other: SideLike) -> Constraint: ... + + def __le__(self, other: SideLike) -> Constraint | PiecewiseConstraintDescriptor: return self.to_linexpr().__le__(other) - def __ge__(self, other: SideLike) -> Constraint: + @overload + def __ge__(self, other: PiecewiseExpression) -> PiecewiseConstraintDescriptor: ... + + @overload + def __ge__(self, other: SideLike) -> Constraint: ... + + def __ge__(self, other: SideLike) -> Constraint | PiecewiseConstraintDescriptor: return self.to_linexpr().__ge__(other) - def __eq__(self, other: SideLike) -> Constraint: # type: ignore + @overload # type: ignore[override] + def __eq__(self, other: PiecewiseExpression) -> PiecewiseConstraintDescriptor: ... + + @overload + def __eq__(self, other: SideLike) -> Constraint: ... + + def __eq__(self, other: SideLike) -> Constraint | PiecewiseConstraintDescriptor: return self.to_linexpr().__eq__(other) def __gt__(self, other: Any) -> NotImplementedType: @@ -1770,7 +1789,7 @@ def __le__(self, other: int | float) -> AnonymousScalarConstraint: def __ge__(self, other: int) -> AnonymousScalarConstraint: return self.to_scalar_linexpr(1).__ge__(other) - def __eq__(self, other: int | float) -> AnonymousScalarConstraint: # type: ignore + def __eq__(self, other: int | float) -> AnonymousScalarConstraint: # type: ignore[override] return self.to_scalar_linexpr(1).__eq__(other) def __gt__(self, other: Any) -> None: diff --git a/pyproject.toml b/pyproject.toml index 0f5bd326..14a53a22 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,8 +37,6 @@ dependencies = [ "tqdm", "deprecation", "packaging", - "google-cloud-storage", - "requests", ] [project.urls] @@ -46,6 +44,10 @@ Homepage = "https://github.com/PyPSA/linopy" Source = "https://github.com/PyPSA/linopy" [project.optional-dependencies] +oetc = [ + "google-cloud-storage", + "requests", +] docs = [ "ipython==8.26.0", "numpydoc==1.7.0", @@ -157,6 +159,7 @@ ignore = [ 'D101', # Missing docstring in public class 'D102', # Missing docstring in public method 'D103', # Missing docstring in public function + 'D106', # Missing docstring in public nested class 'D107', # Missing docstring in __init__ 'D202', # No blank lines allowed after function docstring 'D203', # 1 blank line required before class docstring diff --git a/test/conftest.py b/test/conftest.py index 3197689b..b0a846ba 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -2,8 +2,11 @@ import os +import pandas as pd import pytest +from linopy import Model, Variable + def pytest_addoption(parser: pytest.Parser) -> None: """Add custom command line options.""" @@ -48,3 +51,41 @@ def pytest_collection_modifyitems( if solver_supports(solver, SolverFeature.GPU_ACCELERATION): item.add_marker(skip_gpu) item.add_marker(pytest.mark.gpu) + + +@pytest.fixture +def m() -> Model: + m = Model() + m.add_variables(pd.Series([0, 0]), 1, name="x") + m.add_variables(4, pd.Series([8, 10]), name="y") + m.add_variables(0, pd.DataFrame([[1, 2], [3, 4], [5, 6]]).T, name="z") + m.add_variables(coords=[pd.RangeIndex(20, name="dim_2")], name="v") + idx = pd.MultiIndex.from_product([[1, 2], ["a", "b"]], names=("level1", "level2")) + idx.name = "dim_3" + m.add_variables(coords=[idx], name="u") + return m + + +@pytest.fixture +def x(m: Model) -> Variable: + return m.variables["x"] + + +@pytest.fixture +def y(m: Model) -> Variable: + return m.variables["y"] + + +@pytest.fixture +def z(m: Model) -> Variable: + return m.variables["z"] + + +@pytest.fixture +def v(m: Model) -> Variable: + return m.variables["v"] + + +@pytest.fixture +def u(m: Model) -> Variable: + return m.variables["u"] diff --git a/test/remote/test_oetc.py b/test/remote/test_oetc.py index d937e376..0704d24d 100644 --- a/test/remote/test_oetc.py +++ b/test/remote/test_oetc.py @@ -5,10 +5,11 @@ from unittest.mock import Mock, patch import pytest -import requests -from requests import RequestException -from linopy.remote.oetc import ( +requests = pytest.importorskip("requests") +from requests import RequestException # noqa: E402 + +from linopy.remote.oetc import ( # noqa: E402 AuthenticationResult, ComputeProvider, GcpCredentials, diff --git a/test/remote/test_oetc_job_polling.py b/test/remote/test_oetc_job_polling.py index 96ec98b4..4b2681f9 100644 --- a/test/remote/test_oetc_job_polling.py +++ b/test/remote/test_oetc_job_polling.py @@ -9,9 +9,11 @@ from unittest.mock import Mock, patch import pytest -from requests import RequestException -from linopy.remote.oetc import ( +requests = pytest.importorskip("requests") +from requests import RequestException # noqa: E402 + +from linopy.remote.oetc import ( # noqa: E402 AuthenticationResult, ComputeProvider, OetcCredentials, diff --git a/test/test_common.py b/test/test_common.py index 4b84755a..64e4bf6f 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -10,7 +10,6 @@ import polars as pl import pytest import xarray as xr -from test_linear_expression import m, u, x # noqa: F401 from xarray import DataArray from xarray.testing.assertions import assert_equal @@ -96,17 +95,6 @@ def test_as_dataarray_with_series_dims_superset() -> None: assert list(da.coords[target_dim].values) == target_index -def test_as_dataarray_with_series_override_coords() -> None: - target_dim = "dim_0" - target_index = ["a", "b", "c"] - s = pd.Series([1, 2, 3], index=target_index) - with pytest.warns(UserWarning): - da = as_dataarray(s, coords=[[1, 2, 3]]) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - def test_as_dataarray_with_series_aligned_coords() -> None: """This should not give out a warning even though coords are given.""" target_dim = "dim_0" @@ -214,19 +202,6 @@ def test_as_dataarray_dataframe_dims_superset() -> None: assert list(da.coords[target_dims[1]].values) == target_columns -def test_as_dataarray_dataframe_override_coords() -> None: - target_dims = ("dim_0", "dim_1") - target_index = ["a", "b"] - target_columns = ["A", "B"] - df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) - with pytest.warns(UserWarning): - da = as_dataarray(df, coords=[[1, 2], [2, 3]]) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - def test_as_dataarray_dataframe_aligned_coords() -> None: """This should not give out a warning even though coords are given.""" target_dims = ("dim_0", "dim_1") diff --git a/test/test_constraints.py b/test/test_constraints.py index b20b18cf..9fc0086b 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -5,6 +5,8 @@ @author: fabulous """ +from typing import Any + import dask import dask.array.core import numpy as np @@ -12,7 +14,7 @@ import pytest import xarray as xr -from linopy import EQUAL, GREATER_EQUAL, LESS_EQUAL, Model +from linopy import EQUAL, GREATER_EQUAL, LESS_EQUAL, Model, Variable, available_solvers from linopy.testing import assert_conequal # Test model functions @@ -155,7 +157,7 @@ def test_constraint_assignment_with_reindex() -> None: ), ], ) -def test_constraint_rhs_lower_dim(rhs_factory) -> None: +def test_constraint_rhs_lower_dim(rhs_factory: Any) -> None: m = Model() naxis = np.arange(10, dtype=float) maxis = np.arange(10).astype(str) @@ -189,7 +191,7 @@ def test_constraint_rhs_higher_dim_constant_broadcasts() -> None: ), ], ) -def test_constraint_rhs_higher_dim_expression(rhs_factory) -> None: +def test_constraint_rhs_higher_dim_expression(rhs_factory: Any) -> None: m = Model() x = m.add_variables(coords=[range(5)], name="x") @@ -324,3 +326,105 @@ def test_sanitize_infinities() -> None: m.add_constraints(x >= np.inf, name="con_wrong_inf") with pytest.raises(ValueError): m.add_constraints(y <= -np.inf, name="con_wrong_neg_inf") + + +class TestConstraintCoordinateAlignment: + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def subset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "xarray": + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) + + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def superset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "xarray": + return xr.DataArray( + np.arange(25, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + return pd.Series( + np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") + ) + + def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + con = v <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert con.rhs.sel(dim_2=3).item() == 30.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_var_comparison_subset( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = v <= subset + elif sign == GREATER_EQUAL: + con = v >= subset + else: + con = v == subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + expr = v + 5 + con = expr <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) + assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_subset_comparison_var( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = subset <= v + elif sign == GREATER_EQUAL: + con = subset >= v + else: + con = subset == v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert np.isnan(con.rhs.sel(dim_2=0).item()) + assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) + def test_superset_comparison_var( + self, v: Variable, superset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = superset <= v + else: + con = superset >= v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(con.lhs.coeffs.values).any() + assert not np.isnan(con.rhs.values).any() + + def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: + rhs = xr.DataArray( + [[1.0, 2.0]], + dims=["extra", "dim_2"], + coords={"dim_2": [0, 1]}, + ) + c = v <= rhs + assert "extra" in c.dims + + def test_subset_constraint_solve_integration(self) -> None: + if not available_solvers: + pytest.skip("No solver available") + solver = "highs" if "highs" in available_solvers else available_solvers[0] + m = Model() + coords = pd.RangeIndex(5, name="i") + x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") + subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) + m.add_constraints(x <= subset_ub, name="subset_ub") + m.add_objective(x.sum(), sense="max") + m.solve(solver_name=solver) + sol = m.solution["x"] + assert sol.sel(i=1).item() == pytest.approx(10.0) + assert sol.sel(i=3).item() == pytest.approx(20.0) + assert sol.sel(i=0).item() == pytest.approx(100.0) + assert sol.sel(i=2).item() == pytest.approx(100.0) + assert sol.sel(i=4).item() == pytest.approx(100.0) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index ed808e78..4a54e6d7 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -7,6 +7,8 @@ from __future__ import annotations +from typing import Any + import numpy as np import pandas as pd import polars as pl @@ -21,46 +23,6 @@ from linopy.variables import ScalarVariable -@pytest.fixture -def m() -> Model: - m = Model() - - m.add_variables(pd.Series([0, 0]), 1, name="x") - m.add_variables(4, pd.Series([8, 10]), name="y") - m.add_variables(0, pd.DataFrame([[1, 2], [3, 4], [5, 6]]).T, name="z") - m.add_variables(coords=[pd.RangeIndex(20, name="dim_2")], name="v") - - idx = pd.MultiIndex.from_product([[1, 2], ["a", "b"]], names=("level1", "level2")) - idx.name = "dim_3" - m.add_variables(coords=[idx], name="u") - return m - - -@pytest.fixture -def x(m: Model) -> Variable: - return m.variables["x"] - - -@pytest.fixture -def y(m: Model) -> Variable: - return m.variables["y"] - - -@pytest.fixture -def z(m: Model) -> Variable: - return m.variables["z"] - - -@pytest.fixture -def v(m: Model) -> Variable: - return m.variables["v"] - - -@pytest.fixture -def u(m: Model) -> Variable: - return m.variables["u"] - - def test_empty_linexpr(m: Model) -> None: LinearExpression(None, m) @@ -584,26 +546,23 @@ def test_linear_expression_multiplication_invalid( expr / x -class TestExactAlignmentDefault: - """ - Test the alignment convention: exact for all operations (+, -, *, /). - - v has dim_2=[0..19] (20 entries). - subset has dim_2=[1, 3] (2 entries, subset of v's coords). - superset has dim_2=[0..24] (25 entries, superset of v's coords). - - Each test shows the operation, verifies the exact default (raises), - then shows the explicit join= that recovers the desired result. - """ - - @pytest.fixture - def subset(self) -> xr.DataArray: - return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - - @pytest.fixture - def superset(self) -> xr.DataArray: - return xr.DataArray( - np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} +class TestCoordinateAlignment: + @pytest.fixture(params=["da", "series"]) + def subset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) + + @pytest.fixture(params=["da", "series"]) + def superset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray( + np.arange(25, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + return pd.Series( + np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") ) @pytest.fixture @@ -622,350 +581,411 @@ def expected_fill(self) -> np.ndarray: arr[3] = 30.0 return arr - # --- Addition / subtraction with subset constant --- - - def test_var_add_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - # now raises - with pytest.raises(ValueError, match="exact"): - v + subset - - # explicit join="left" recovers old behavior: 20 entries, fill 0 - result = v.add(subset, join="left") - assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal(result.const.values, expected_fill) - - def test_var_sub_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - with pytest.raises(ValueError, match="exact"): - v - subset - - result = v.sub(subset, join="left") - assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal(result.const.values, -expected_fill) - - def test_expr_add_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - with pytest.raises(ValueError, match="exact"): - (v + 5) + subset - - result = (v + 5).add(subset, join="left") - assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal(result.const.values, expected_fill + 5) - - # --- Addition with superset constant --- - - def test_var_add_superset(self, v: Variable, superset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v + superset - - result = v.add(superset, join="left") - assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.const.values).any() - - # --- Addition / multiplication with disjoint coords --- - - def test_disjoint_add(self, v: Variable) -> None: - disjoint = xr.DataArray( - [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - with pytest.raises(ValueError, match="exact"): - v + disjoint - - result = v.add(disjoint, join="outer") - assert result.sizes["dim_2"] == 22 # union of [0..19] and [50, 60] - - def test_disjoint_mul(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - with pytest.raises(ValueError, match="exact"): - v * disjoint - - # explicit join="left": 20 entries, all zeros - result = v.mul(disjoint, join="left") - assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) - - def test_disjoint_div(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - with pytest.raises(ValueError, match="exact"): - v / disjoint - - # --- Multiplication / division with subset constant --- - - def test_var_mul_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - with pytest.raises(ValueError, match="exact"): - v * subset - - # explicit join="inner": 2 entries (intersection) - result = v.mul(subset, join="inner") - assert result.sizes["dim_2"] == 2 - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(10.0) - assert result.coeffs.squeeze().sel(dim_2=3).item() == pytest.approx(30.0) - - # explicit join="left" recovers old behavior: 20 entries, fill 0 - result = v.mul(subset, join="left") - assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - def test_expr_mul_subset(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - (1 * v) * subset - - result = (1 * v).mul(subset, join="inner") - assert result.sizes["dim_2"] == 2 - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(10.0) - - def test_var_mul_superset(self, v: Variable, superset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v * superset - - result = v.mul(superset, join="inner") - assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.coeffs.values).any() - - def test_var_div_subset(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v / subset - - # explicit join="inner": 2 entries - result = v.div(subset, join="inner") - assert result.sizes["dim_2"] == 2 - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) - assert result.coeffs.squeeze().sel(dim_2=3).item() == pytest.approx(1.0 / 30) - - # explicit join="left": 20 entries, fill 1 - result = v.div(subset, join="left") - assert result.sizes["dim_2"] == 20 - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) - assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) - - # --- Constraints with subset RHS --- - - def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v <= subset - - # explicit join="left": 20 entries, NaN where RHS missing - con = v.to_linexpr().le(subset, join="left") - assert con.sizes["dim_2"] == 20 - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert con.rhs.sel(dim_2=3).item() == 30.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - expr = v + 5 - with pytest.raises(ValueError, match="exact"): - expr <= subset - - con = expr.le(subset, join="left") - assert con.sizes["dim_2"] == 20 - assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) - assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - @pytest.mark.parametrize("sign", ["<=", ">=", "=="]) - def test_var_comparison_subset( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - with pytest.raises(ValueError, match="exact"): - if sign == "<=": - v <= subset - elif sign == ">=": - v >= subset + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + vals[5] = np.nan + vals[19] = np.nan + if request.param == "xarray": + return xr.DataArray(vals, dims=["dim_2"], coords={"dim_2": range(20)}) + return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) + + class TestSubset: + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_fills_zeros( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target * subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_subset_fills_zeros( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + if operand == "var": + result = v + subset + expected = expected_fill else: - v == subset - - def test_constraint_le_join_inner(self, v: Variable, subset: xr.DataArray) -> None: - con = v.to_linexpr().le(subset, join="inner") - assert con.sizes["dim_2"] == 2 - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert con.rhs.sel(dim_2=3).item() == 30.0 - - # --- Matching coordinates: unchanged behavior --- - - def test_add_matching_unchanged(self, v: Variable, matching: xr.DataArray) -> None: - result = v + matching - assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.const.values).any() - - def test_mul_matching_unchanged(self, v: Variable, matching: xr.DataArray) -> None: - result = v * matching - assert result.sizes["dim_2"] == 20 - - def test_le_matching_unchanged(self, v: Variable, matching: xr.DataArray) -> None: - con = v <= matching - assert con.sizes["dim_2"] == 20 - - def test_add_commutativity_matching( - self, v: Variable, matching: xr.DataArray - ) -> None: - assert_linequal(v + matching, matching + v) - - def test_mul_commutativity(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v * subset - with pytest.raises(ValueError, match="exact"): - subset * v - - # --- Explicit join modes --- - - def test_add_join_inner(self, v: Variable, subset: xr.DataArray) -> None: - result = v.add(subset, join="inner") - assert result.sizes["dim_2"] == 2 - assert result.const.sel(dim_2=1).item() == 10.0 - assert result.const.sel(dim_2=3).item() == 30.0 - - def test_add_join_outer(self, v: Variable, subset: xr.DataArray) -> None: - result = v.add(subset, join="outer") - assert result.sizes["dim_2"] == 20 - assert result.const.sel(dim_2=1).item() == 10.0 - assert result.const.sel(dim_2=0).item() == 0.0 - - def test_add_positional_assign_coords(self, v: Variable) -> None: - disjoint = xr.DataArray( - np.ones(20), dims=["dim_2"], coords={"dim_2": range(50, 70)} - ) - result = v + disjoint.assign_coords(dim_2=v.coords["dim_2"]) - assert result.sizes["dim_2"] == 20 - assert list(result.coords["dim_2"].values) == list(range(20)) - - # --- Quadratic expressions --- - - def test_quadexpr_add_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - qexpr = v * v - with pytest.raises(ValueError, match="exact"): - qexpr + subset - - result = qexpr.add(subset, join="left") - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal(result.const.values, expected_fill) - - def test_quadexpr_mul_subset( - self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray - ) -> None: - qexpr = v * v - with pytest.raises(ValueError, match="exact"): - qexpr * subset - - # explicit join="inner": 2 entries - result = qexpr.mul(subset, join="inner") - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == 2 - - # explicit join="left": 20 entries - result = qexpr.mul(subset, join="left") - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - # --- Multi-dimensional --- - - def test_multidim_subset_mul(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - - with pytest.raises(ValueError, match="exact"): - w * subset_2d - - # explicit join="inner": 2x2 - result = w.mul(subset_2d, join="inner") - assert result.sizes["a"] == 2 - assert result.sizes["b"] == 2 - - # explicit join="left": 4x5, zeros at non-subset positions - result = w.mul(subset_2d, join="left") - assert result.sizes["a"] == 4 - assert result.sizes["b"] == 5 - assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) - assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) - assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) - - def test_multidim_subset_add(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - - with pytest.raises(ValueError, match="exact"): - w + subset_2d - - # --- Edge cases --- - - def test_constraint_rhs_mismatched_coords_raises(self, v: Variable) -> None: - rhs = xr.DataArray( - [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} + result = (v + 5) + subset + expected = expected_fill + 5 + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_subset_fills_negated( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + if operand == "var": + result = v - subset + expected = -expected_fill + else: + result = (v + 5) - subset + expected = 5 - expected_fill + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_subset_inverts_nonzero( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + result = target / subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) + assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + + def test_subset_add_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset + v + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + def test_subset_sub_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset - v + np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) + + class TestSuperset: + def test_add_superset_pins_to_lhs_coords( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v + superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + + def test_add_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset + v, v + superset) + + def test_sub_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset - v, -v + superset) + + def test_mul_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset * v, v * superset) + + def test_mul_superset_pins_to_lhs_coords( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v * superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: + superset_nonzero = xr.DataArray( + np.arange(1, 26, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + result = v / superset_nonzero + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + class TestDisjoint: + def test_add_disjoint_fills_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v + disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, np.zeros(20)) + + def test_mul_disjoint_fills_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v * disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) + + def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v / disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + class TestCommutativity: + @pytest.mark.parametrize( + "make_lhs,make_rhs", + [ + (lambda v, s: s * v, lambda v, s: v * s), + (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), + (lambda v, s: s + v, lambda v, s: v + s), + (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), + ], + ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], ) - # Raises because dim_2 coords [0,1] don't match v's [0..19] (exact join) - with pytest.raises(ValueError, match="exact"): - v <= rhs - - def test_add_constant_extra_dims_broadcasts(self, v: Variable) -> None: - # Constant with only new dims (no shared dim overlap) broadcasts freely - da = xr.DataArray([1.0, 2.0, 3.0], dims=["extra"]) - result = v + da - assert "extra" in result.dims - result = v - da - assert "extra" in result.dims - result = v * da - assert "extra" in result.dims - - def test_da_truediv_var_raises(self, v: Variable) -> None: - da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) - with pytest.raises(TypeError): - da / v # type: ignore[operator] - - def test_da_eq_da_still_works(self) -> None: - da1 = xr.DataArray([1, 2, 3]) - da2 = xr.DataArray([1, 2, 3]) - result = da1 == da2 - assert result.values.all() - - def test_da_eq_scalar_still_works(self) -> None: - da = xr.DataArray([1, 2, 3]) - result = da == 2 - np.testing.assert_array_equal(result.values, [False, True, False]) - - def test_subset_constraint_solve_integration(self) -> None: - from linopy import available_solvers - - if not available_solvers: - pytest.skip("No solver available") - m = Model() - coords = pd.RangeIndex(5, name="i") - x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") - subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) - # exact default raises — use explicit join="left" (NaN = no constraint) - m.add_constraints(x.to_linexpr().le(subset_ub, join="left"), name="subset_ub") - m.add_objective(x.sum(), sense="max") - m.solve(solver_name=available_solvers[0]) - sol = m.solution["x"] - assert sol.sel(i=1).item() == pytest.approx(10.0) - assert sol.sel(i=3).item() == pytest.approx(20.0) - assert sol.sel(i=0).item() == pytest.approx(100.0) - assert sol.sel(i=2).item() == pytest.approx(100.0) - assert sol.sel(i=4).item() == pytest.approx(100.0) + def test_commutativity( + self, + v: Variable, + subset: xr.DataArray, + make_lhs: Any, + make_rhs: Any, + ) -> None: + assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + + def test_sub_var_anticommutative( + self, v: Variable, subset: xr.DataArray + ) -> None: + assert_linequal(subset - v, -v + subset) + + def test_sub_expr_anticommutative( + self, v: Variable, subset: xr.DataArray + ) -> None: + expr = v + 5 + assert_linequal(subset - expr, -(expr - subset)) + + def test_add_commutativity_full_coords(self, v: Variable) -> None: + full = xr.DataArray( + np.arange(20, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(20)}, + ) + assert_linequal(v + full, full + v) + + class TestQuadratic: + def test_quadexpr_add_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr + subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected_fill) + + def test_quadexpr_sub_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr - subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, -expected_fill) + + def test_quadexpr_mul_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr * subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_mul_quadexpr( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = subset * qexpr + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: + qexpr = v * v + assert_quadequal(subset + qexpr, qexpr + subset) + + class TestMissingValues: + """Same shape as variable but with NaN entries in the constant.""" + + EXPECTED_NAN_MASK = np.zeros(20, dtype=bool) + EXPECTED_NAN_MASK[[0, 5, 19]] = True + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_nan_propagates( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else v + 5 + result = target + nan_constant + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.const.values), self.EXPECTED_NAN_MASK + ) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_nan_propagates( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else v + 5 + result = target - nan_constant + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.const.values), self.EXPECTED_NAN_MASK + ) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_nan_propagates( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target * nan_constant + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.coeffs.squeeze().values), self.EXPECTED_NAN_MASK + ) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_nan_propagates( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target / nan_constant + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.coeffs.squeeze().values), self.EXPECTED_NAN_MASK + ) + + def test_add_commutativity( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + result_a = v + nan_constant + result_b = nan_constant + v + # Compare non-NaN values are equal and NaN positions match + nan_mask_a = np.isnan(result_a.const.values) + nan_mask_b = np.isnan(result_b.const.values) + np.testing.assert_array_equal(nan_mask_a, nan_mask_b) + np.testing.assert_array_equal( + result_a.const.values[~nan_mask_a], + result_b.const.values[~nan_mask_b], + ) + np.testing.assert_array_equal( + result_a.coeffs.values, result_b.coeffs.values + ) + + def test_mul_commutativity( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + result_a = v * nan_constant + result_b = nan_constant * v + nan_mask_a = np.isnan(result_a.coeffs.values) + nan_mask_b = np.isnan(result_b.coeffs.values) + np.testing.assert_array_equal(nan_mask_a, nan_mask_b) + np.testing.assert_array_equal( + result_a.coeffs.values[~nan_mask_a], + result_b.coeffs.values[~nan_mask_b], + ) + + def test_quadexpr_add_nan( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + qexpr = v * v + result = qexpr + nan_constant + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == 20 + np.testing.assert_array_equal( + np.isnan(result.const.values), self.EXPECTED_NAN_MASK + ) + + class TestMultiDim: + def test_multidim_subset_mul(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w * subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) + assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) + + def test_multidim_subset_add(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w + subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.const.values).any() + assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) + + class TestXarrayCompat: + def test_da_eq_da_still_works(self) -> None: + da1 = xr.DataArray([1, 2, 3]) + da2 = xr.DataArray([1, 2, 3]) + result = da1 == da2 + assert result.values.all() + + def test_da_eq_scalar_still_works(self) -> None: + da = xr.DataArray([1, 2, 3]) + result = da == 2 + np.testing.assert_array_equal(result.values, [False, True, False]) + + def test_da_truediv_var_raises(self, v: Variable) -> None: + da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) + with pytest.raises(TypeError): + da / v # type: ignore[operator] def test_expression_inherited_properties(x: Variable, y: Variable) -> None: @@ -1817,269 +1837,285 @@ def b(self, m2: Model) -> Variable: def c(self, m2: Model) -> Variable: return m2.variables["c"] - def test_add_join_none_raises_on_mismatch(self, a: Variable, b: Variable) -> None: - # a has i=[0,1,2], b has i=[1,2,3] — exact default raises - with pytest.raises(ValueError, match="exact"): - a.to_linexpr() + b.to_linexpr() - with pytest.raises(ValueError, match="exact"): - a.to_linexpr().add(b.to_linexpr(), join=None) - - def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_add_expr_join_outer(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_add_expr_join_left(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="left") - assert list(result.data.indexes["i"]) == [0, 1, 2] - - def test_add_expr_join_right(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="right") - assert list(result.data.indexes["i"]) == [1, 2, 3] - - def test_add_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_add_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().add(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_add_constant_positional(self, a: Variable) -> None: - expr = a.to_linexpr() - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr + const.assign_coords(i=expr.coords["i"]) - assert list(result.data.indexes["i"]) == [0, 1, 2] - assert (result.const.values == const.values).all() - - def test_sub_expr_join_inner(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().sub(b.to_linexpr(), join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_mul_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_mul_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().mul(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - assert result.coeffs.sel(i=0).item() == 0 - assert result.coeffs.sel(i=1).item() == 2 - assert result.coeffs.sel(i=2).item() == 3 - - def test_div_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().div(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_div_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().div(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_variable_add_join(self, a: Variable, b: Variable) -> None: - result = a.add(b, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_sub_join(self, a: Variable, b: Variable) -> None: - result = a.sub(b, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_mul_join(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_div_join(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.div(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_mul_expr_with_join_raises(self, a: Variable, b: Variable) -> None: - with pytest.raises(TypeError, match="join parameter is not supported"): - a.to_linexpr().mul(b.to_linexpr(), join="inner") - - def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_same_shape_add_assign_coords(self, a: Variable, c: Variable) -> None: - result = a.to_linexpr() + c.to_linexpr().assign_coords(i=a.coords["i"]) - assert list(result.data.indexes["i"]) == [0, 1, 2] - - def test_add_expr_outer_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.add(expr_b, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 15 - assert result.const.sel(i=3).item() == 10 - - def test_add_expr_inner_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.add(expr_b, join="inner") - assert list(result.coords["i"].values) == [1, 2] - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 15 - - def test_add_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) - result = expr.add(const, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 5 - assert result.const.sel(i=3).item() == 20 - - def test_add_constant_inner_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) - result = expr.add(const, join="inner") - assert list(result.coords["i"].values) == [1] - assert result.const.sel(i=1).item() == 15 - - def test_add_constant_positional_different_coords(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr + other.assign_coords(i=expr.coords["i"]) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [15, 25, 35]) - - def test_sub_constant_positional(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr - other.assign_coords(i=expr.coords["i"]) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [-5, -15, -25]) - - def test_sub_expr_outer_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.sub(expr_b, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == -5 - assert result.const.sel(i=2).item() == -5 - assert result.const.sel(i=3).item() == -10 - - def test_mul_constant_positional(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr * other.assign_coords(i=expr.coords["i"]) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [10, 15, 20]) - np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - - def test_mul_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([2, 3], dims=["i"], coords={"i": [1, 3]}) - result = expr.mul(other, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 0 - assert result.const.sel(i=1).item() == 10 - assert result.const.sel(i=2).item() == 0 - assert result.const.sel(i=3).item() == 0 - assert result.coeffs.squeeze().sel(i=1).item() == 2 - assert result.coeffs.squeeze().sel(i=0).item() == 0 - - def test_div_constant_positional(self, a: Variable) -> None: - expr = 1 * a + 10 - other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr / other.assign_coords(i=expr.coords["i"]) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [5.0, 2.0, 1.0]) - - def test_div_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 10 - other = xr.DataArray([2.0, 5.0], dims=["i"], coords={"i": [1, 3]}) - result = expr.div(other, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=1).item() == pytest.approx(5.0) - assert result.coeffs.squeeze().sel(i=1).item() == pytest.approx(0.5) - assert result.const.sel(i=0).item() == pytest.approx(10.0) - assert result.coeffs.squeeze().sel(i=0).item() == pytest.approx(1.0) - - def test_div_expr_with_join_raises(self, a: Variable, b: Variable) -> None: - with pytest.raises(TypeError): - a.to_linexpr().div(b.to_linexpr(), join="outer") - - def test_variable_add_outer_values(self, a: Variable, b: Variable) -> None: - result = a.add(b, join="outer") - assert isinstance(result, LinearExpression) - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.nterm == 2 - - def test_variable_mul_positional(self, a: Variable) -> None: - other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) - result = a * other.assign_coords(i=a.coords["i"]) - assert isinstance(result, LinearExpression) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - - def test_variable_div_positional(self, a: Variable) -> None: - other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) - result = a / other.assign_coords(i=a.coords["i"]) - assert isinstance(result, LinearExpression) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_almost_equal( - result.coeffs.squeeze().values, [0.5, 0.2, 0.1] - ) + class TestAddition: + def test_add_join_none_preserves_default( + self, a: Variable, b: Variable + ) -> None: + result_default = a.to_linexpr() + b.to_linexpr() + result_none = a.to_linexpr().add(b.to_linexpr(), join=None) + assert_linequal(result_default, result_none) + + def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_add_expr_join_outer(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_add_expr_join_left(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="left") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_add_expr_join_right(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="right") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_add_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_add_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().add(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: + expr = a.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [0, 1, 2]}) + result = expr.add(const, join="override") + assert list(result.data.indexes["i"]) == [0, 1, 2] + assert (result.const.values == const.values).all() + + def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: + expr_a = 1 * a + 5 + const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) + for join in ["override", "outer", "inner"]: + result = expr_a.add(const, join=join) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [6, 7, 8]) + + def test_add_scalar_with_explicit_join(self, a: Variable) -> None: + expr = 1 * a + 5 + result = expr.add(10, join="override") + np.testing.assert_array_equal(result.const.values, [15, 15, 15]) + assert list(result.coords["i"].values) == [0, 1, 2] + + class TestSubtraction: + def test_sub_expr_join_inner(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().sub(b.to_linexpr(), join="inner") + assert list(result.data.indexes["i"]) == [1, 2] - def test_merge_outer_join(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} + def test_sub_constant_override(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.sub(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [-5, -15, -25]) + + class TestMultiplication: + def test_mul_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_mul_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().mul(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + assert result.coeffs.sel(i=0).item() == 0 + assert result.coeffs.sel(i=1).item() == 2 + assert result.coeffs.sel(i=2).item() == 3 + + def test_mul_expr_with_join_raises(self, a: Variable, b: Variable) -> None: + with pytest.raises(TypeError, match="join parameter is not supported"): + a.to_linexpr().mul(b.to_linexpr(), join="inner") + + class TestDivision: + def test_div_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().div(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_div_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().div(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_div_expr_with_join_raises(self, a: Variable, b: Variable) -> None: + with pytest.raises(TypeError): + a.to_linexpr().div(b.to_linexpr(), join="outer") + + class TestVariableOperations: + def test_variable_add_join(self, a: Variable, b: Variable) -> None: + result = a.add(b, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_sub_join(self, a: Variable, b: Variable) -> None: + result = a.sub(b, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_mul_join(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_div_join(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.div(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_add_outer_values(self, a: Variable, b: Variable) -> None: + result = a.add(b, join="outer") + assert isinstance(result, LinearExpression) + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.nterm == 2 + + def test_variable_mul_override(self, a: Variable) -> None: + other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) + result = a.mul(other, join="override") + assert isinstance(result, LinearExpression) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: - expr_a = 1 * a + 5 - const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) - for join in ["outer", "inner"]: - result = expr_a.add(const, join=join) + def test_variable_div_override(self, a: Variable) -> None: + other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) + result = a.div(other, join="override") + assert isinstance(result, LinearExpression) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_almost_equal( + result.coeffs.squeeze().values, [0.5, 0.2, 0.1] + ) + + def test_same_shape_add_join_override(self, a: Variable, c: Variable) -> None: + result = a.to_linexpr().add(c.to_linexpr(), join="override") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + class TestMerge: + def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="inner" + ) + assert list(result.data.indexes["i"]) == [1, 2] + + def test_merge_outer_join(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="outer" + ) + assert set(result.coords["i"].values) == {0, 1, 2, 3} + + def test_merge_join_left(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="left" + ) + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_merge_join_right(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="right" + ) + assert list(result.data.indexes["i"]) == [1, 2, 3] + + class TestValueVerification: + def test_add_expr_outer_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.add(expr_b, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 15 + assert result.const.sel(i=3).item() == 10 + + def test_add_expr_inner_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.add(expr_b, join="inner") + assert list(result.coords["i"].values) == [1, 2] + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 15 + + def test_add_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = expr.add(const, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 5 + assert result.const.sel(i=3).item() == 20 + + def test_add_constant_inner_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = expr.add(const, join="inner") + assert list(result.coords["i"].values) == [1] + assert result.const.sel(i=1).item() == 15 + + def test_add_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.add(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [15, 25, 35]) + + def test_sub_expr_outer_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.sub(expr_b, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == -5 + assert result.const.sel(i=2).item() == -5 + assert result.const.sel(i=3).item() == -10 + + def test_mul_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.mul(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [10, 15, 20]) + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) + + def test_mul_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([2, 3], dims=["i"], coords={"i": [1, 3]}) + result = expr.mul(other, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 0 + assert result.const.sel(i=1).item() == 10 + assert result.const.sel(i=2).item() == 0 + assert result.const.sel(i=3).item() == 0 + assert result.coeffs.squeeze().sel(i=1).item() == 2 + assert result.coeffs.squeeze().sel(i=0).item() == 0 + + def test_div_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 10 + other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.div(other, join="override") assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [6, 7, 8]) - # assign_coords also works when coords already match - result = expr_a + const.assign_coords(i=expr_a.coords["i"]) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [6, 7, 8]) - - def test_add_scalar(self, a: Variable) -> None: - expr = 1 * a + 5 - result = expr + 10 - np.testing.assert_array_equal(result.const.values, [15, 15, 15]) - assert list(result.coords["i"].values) == [0, 1, 2] - - def test_quadratic_add_constant_join_inner(self, a: Variable, c: Variable) -> None: - quad = a.to_linexpr() * c.to_linexpr() - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_quadratic_add_expr_join_inner(self, a: Variable, c: Variable) -> None: - quad = a.to_linexpr() * c.to_linexpr() - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) - result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [0, 1] - - def test_quadratic_mul_constant_join_inner(self, a: Variable, c: Variable) -> None: - quad = a.to_linexpr() * c.to_linexpr() - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = quad.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_merge_join_left(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="left") - assert list(result.data.indexes["i"]) == [0, 1, 2] - - def test_merge_join_right(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge([a.to_linexpr(), b.to_linexpr()], join="right") - assert list(result.data.indexes["i"]) == [1, 2, 3] + np.testing.assert_array_equal(result.const.values, [5.0, 2.0, 1.0]) + + def test_div_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 10 + other = xr.DataArray([2.0, 5.0], dims=["i"], coords={"i": [1, 3]}) + result = expr.div(other, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=1).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(i=1).item() == pytest.approx(0.5) + assert result.const.sel(i=0).item() == pytest.approx(10.0) + assert result.coeffs.squeeze().sel(i=0).item() == pytest.approx(1.0) + + class TestQuadratic: + def test_quadratic_add_constant_join_inner( + self, a: Variable, b: Variable + ) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: + quad = a.to_linexpr() * a.to_linexpr() + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [0, 1] + + def test_quadratic_mul_constant_join_inner( + self, a: Variable, b: Variable + ) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] diff --git a/test/test_optimization.py b/test/test_optimization.py index 6bcb1627..d9ecc39e 100644 --- a/test/test_optimization.py +++ b/test/test_optimization.py @@ -55,7 +55,7 @@ params.append(("mosek", "lp", True)) -# Note: Platform-specific solver bugs (e.g., SCIP quadratic on Windows) are now +# Note: Platform-specific solver bugs are now # handled in linopy/solver_capabilities.py by adjusting the registry at import time. feasible_quadratic_solvers: list[str] = list(quadratic_solvers) diff --git a/test/test_piecewise_constraints.py b/test/test_piecewise_constraints.py index aeb76ec7..ab8e1f09 100644 --- a/test/test_piecewise_constraints.py +++ b/test/test_piecewise_constraints.py @@ -1,4 +1,4 @@ -"""Tests for piecewise linear constraints.""" +"""Tests for the new piecewise linear constraints API.""" from __future__ import annotations @@ -9,2119 +9,1485 @@ import pytest import xarray as xr -from linopy import Model, available_solvers, breakpoints +from linopy import ( + Model, + available_solvers, + breakpoints, + piecewise, + segments, + slopes_to_points, +) from linopy.constants import ( + BREAKPOINT_DIM, + LP_SEG_DIM, + PWL_ACTIVE_BOUND_SUFFIX, + PWL_AUX_SUFFIX, PWL_BINARY_SUFFIX, PWL_CONVEX_SUFFIX, PWL_DELTA_SUFFIX, PWL_FILL_SUFFIX, + PWL_INC_BINARY_SUFFIX, + PWL_INC_LINK_SUFFIX, + PWL_INC_ORDER_SUFFIX, PWL_LAMBDA_SUFFIX, - PWL_LINK_SUFFIX, + PWL_LP_DOMAIN_SUFFIX, + PWL_LP_SUFFIX, PWL_SELECT_SUFFIX, + PWL_X_LINK_SUFFIX, + PWL_Y_LINK_SUFFIX, + SEGMENT_DIM, +) +from linopy.piecewise import ( + PiecewiseConstraintDescriptor, + PiecewiseExpression, ) from linopy.solver_capabilities import SolverFeature, get_available_solvers_with_feature +_sos2_solvers = get_available_solvers_with_feature( + SolverFeature.SOS_CONSTRAINTS, available_solvers +) +_any_solvers = [ + s for s in ["highs", "gurobi", "glpk", "cplex"] if s in available_solvers +] -class TestBasicSingleVariable: - """Tests for single variable piecewise constraints.""" - def test_basic_single_variable(self) -> None: - """Test basic piecewise constraint with a single variable.""" - m = Model() - x = m.add_variables(name="x") +# =========================================================================== +# slopes_to_points +# =========================================================================== - breakpoints = xr.DataArray( - [0, 10, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} - ) - m.add_piecewise_constraints(x, breakpoints, dim="bp") +class TestSlopesToPoints: + def test_basic(self) -> None: + assert slopes_to_points([0, 1, 2], [1, 2], 0) == [0, 1, 3] - # Check lambda variables were created - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + def test_negative_slopes(self) -> None: + result = slopes_to_points([0, 10, 20], [-0.5, -1.0], 10) + assert result == [10, 5, -5] - # Check constraints were created - assert f"pwl0{PWL_CONVEX_SUFFIX}" in m.constraints - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + def test_wrong_length_raises(self) -> None: + with pytest.raises(ValueError, match="len\\(slopes\\)"): + slopes_to_points([0, 1, 2], [1], 0) - # Check SOS2 constraint was added - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert lambda_var.attrs.get("sos_type") == 2 - assert lambda_var.attrs.get("sos_dim") == "bp" - def test_single_variable_with_coords(self) -> None: - """Test piecewise constraint with a variable that has coordinates.""" - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - x = m.add_variables(coords=[generators], name="x") +# =========================================================================== +# breakpoints() factory +# =========================================================================== - bp_coords = [0, 1, 2] - breakpoints = xr.DataArray( - [[0, 50, 100], [0, 30, 80]], - dims=["generator", "bp"], - coords={"generator": generators, "bp": bp_coords}, - ) - m.add_piecewise_constraints(x, breakpoints, dim="bp") +class TestBreakpointsFactory: + def test_list(self) -> None: + bp = breakpoints([0, 50, 100]) + assert bp.dims == (BREAKPOINT_DIM,) + assert list(bp.values) == [0.0, 50.0, 100.0] - # Lambda should have both generator and bp dimensions - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "generator" in lambda_var.dims - assert "bp" in lambda_var.dims + def test_dict(self) -> None: + bp = breakpoints({"gen1": [0, 50, 100], "gen2": [0, 30]}, dim="generator") + assert set(bp.dims) == {"generator", BREAKPOINT_DIM} + assert bp.sizes[BREAKPOINT_DIM] == 3 + assert np.isnan(bp.sel(generator="gen2").sel({BREAKPOINT_DIM: 2})) + def test_dict_without_dim_raises(self) -> None: + with pytest.raises(ValueError, match="'dim' is required"): + breakpoints({"a": [0, 50], "b": [0, 30]}) -class TestDictOfVariables: - """Tests for dict of variables (multiple linked variables).""" + def test_slopes_list(self) -> None: + bp = breakpoints(slopes=[1, 2], x_points=[0, 1, 2], y0=0) + expected = breakpoints([0, 1, 3]) + xr.testing.assert_equal(bp, expected) - def test_dict_of_variables(self) -> None: - """Test piecewise constraint with multiple linked variables.""" - m = Model() - power = m.add_variables(name="power") - efficiency = m.add_variables(name="efficiency") + def test_slopes_dict(self) -> None: + bp = breakpoints( + slopes={"a": [1, 0.5], "b": [2, 1]}, + x_points={"a": [0, 10, 50], "b": [0, 20, 80]}, + y0={"a": 0, "b": 10}, + dim="gen", + ) + assert set(bp.dims) == {"gen", BREAKPOINT_DIM} + # a: [0, 10, 30], b: [10, 50, 110] + np.testing.assert_allclose(bp.sel(gen="a").values, [0, 10, 30]) + np.testing.assert_allclose(bp.sel(gen="b").values, [10, 50, 110]) - breakpoints = xr.DataArray( - [[0, 50, 100], [0.8, 0.95, 0.9]], - dims=["var", "bp"], - coords={"var": ["power", "efficiency"], "bp": [0, 1, 2]}, + def test_slopes_dict_shared_xpoints(self) -> None: + bp = breakpoints( + slopes={"a": [1, 2], "b": [3, 4]}, + x_points=[0, 1, 2], + y0={"a": 0, "b": 0}, + dim="gen", ) + np.testing.assert_allclose(bp.sel(gen="a").values, [0, 1, 3]) + np.testing.assert_allclose(bp.sel(gen="b").values, [0, 3, 7]) - m.add_piecewise_constraints( - {"power": power, "efficiency": efficiency}, - breakpoints, - dim="bp", + def test_slopes_dict_shared_y0(self) -> None: + bp = breakpoints( + slopes={"a": [1, 2], "b": [3, 4]}, + x_points={"a": [0, 1, 2], "b": [0, 1, 2]}, + y0=5.0, + dim="gen", ) + np.testing.assert_allclose(bp.sel(gen="a").values, [5, 6, 8]) - # Check single linking constraint was created for all variables - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + def test_values_and_slopes_raises(self) -> None: + with pytest.raises(ValueError, match="mutually exclusive"): + breakpoints([0, 1], slopes=[1], x_points=[0, 1], y0=0) - def test_dict_with_coordinates(self) -> None: - """Test dict of variables with additional coordinates.""" - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - power = m.add_variables(coords=[generators], name="power") - efficiency = m.add_variables(coords=[generators], name="efficiency") + def test_slopes_without_xpoints_raises(self) -> None: + with pytest.raises(ValueError, match="requires both"): + breakpoints(slopes=[1], y0=0) - breakpoints = xr.DataArray( - [[[0, 50, 100], [0.8, 0.95, 0.9]], [[0, 30, 80], [0.75, 0.9, 0.85]]], - dims=["generator", "var", "bp"], - coords={ - "generator": generators, - "var": ["power", "efficiency"], - "bp": [0, 1, 2], - }, - ) + def test_slopes_without_y0_raises(self) -> None: + with pytest.raises(ValueError, match="requires both"): + breakpoints(slopes=[1], x_points=[0, 1]) - m.add_piecewise_constraints( - {"power": power, "efficiency": efficiency}, - breakpoints, - dim="bp", - ) + def test_xpoints_with_values_raises(self) -> None: + with pytest.raises(ValueError, match="forbidden"): + breakpoints([0, 1], x_points=[0, 1]) - # Lambda should have generator and bp dimensions (not var) - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "generator" in lambda_var.dims - assert "bp" in lambda_var.dims - assert "var" not in lambda_var.dims + def test_y0_with_values_raises(self) -> None: + with pytest.raises(ValueError, match="forbidden"): + breakpoints([0, 1], y0=5) + # --- pandas and xarray inputs --- -class TestAutoDetectLinkDim: - """Tests for auto-detection of linking dimension.""" + def test_series(self) -> None: + bp = breakpoints(pd.Series([0, 50, 100])) + assert bp.dims == (BREAKPOINT_DIM,) + assert list(bp.values) == [0.0, 50.0, 100.0] - def test_auto_detect_linking_dim(self) -> None: - """Test that linking dimension is auto-detected from breakpoints.""" - m = Model() - power = m.add_variables(name="power") - efficiency = m.add_variables(name="efficiency") + def test_dataframe(self) -> None: + df = pd.DataFrame( + {"gen1": [0, 50, 100], "gen2": [0, 30, np.nan]} + ).T # rows=entities, cols=breakpoints + bp = breakpoints(df, dim="generator") + assert set(bp.dims) == {"generator", BREAKPOINT_DIM} + assert bp.sizes[BREAKPOINT_DIM] == 3 + np.testing.assert_allclose(bp.sel(generator="gen1").values, [0, 50, 100]) + assert np.isnan(bp.sel(generator="gen2").values[2]) + + def test_dataframe_without_dim_raises(self) -> None: + df = pd.DataFrame({"a": [0, 50], "b": [0, 30]}).T + with pytest.raises(ValueError, match="'dim' is required"): + breakpoints(df) - breakpoints = xr.DataArray( - [[0, 50, 100], [0.8, 0.95, 0.9]], - dims=["var", "bp"], - coords={"var": ["power", "efficiency"], "bp": [0, 1, 2]}, + def test_dataarray_passthrough(self) -> None: + da = xr.DataArray( + [0, 50, 100], + dims=[BREAKPOINT_DIM], + coords={BREAKPOINT_DIM: np.arange(3)}, ) + bp = breakpoints(da) + xr.testing.assert_equal(bp, da) - # Should auto-detect linking dim="var" - m.add_piecewise_constraints( - {"power": power, "efficiency": efficiency}, - breakpoints, - dim="bp", - ) + def test_dataarray_missing_dim_raises(self) -> None: + da = xr.DataArray([0, 50, 100], dims=["foo"]) + with pytest.raises(ValueError, match="must have a"): + breakpoints(da) - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + def test_slopes_series(self) -> None: + bp = breakpoints( + slopes=pd.Series([1, 2]), + x_points=pd.Series([0, 1, 2]), + y0=0, + ) + expected = breakpoints([0, 1, 3]) + xr.testing.assert_equal(bp, expected) + + def test_slopes_dataarray(self) -> None: + slopes_da = xr.DataArray( + [[1, 2], [3, 4]], + dims=["gen", BREAKPOINT_DIM], + coords={"gen": ["a", "b"], BREAKPOINT_DIM: [0, 1]}, + ) + xp_da = xr.DataArray( + [[0, 1, 2], [0, 1, 2]], + dims=["gen", BREAKPOINT_DIM], + coords={"gen": ["a", "b"], BREAKPOINT_DIM: [0, 1, 2]}, + ) + y0_da = xr.DataArray([0, 5], dims=["gen"], coords={"gen": ["a", "b"]}) + bp = breakpoints(slopes=slopes_da, x_points=xp_da, y0=y0_da, dim="gen") + np.testing.assert_allclose(bp.sel(gen="a").values, [0, 1, 3]) + np.testing.assert_allclose(bp.sel(gen="b").values, [5, 8, 12]) + + def test_slopes_dataframe(self) -> None: + slopes_df = pd.DataFrame({"a": [1, 0.5], "b": [2, 1]}).T + xp_df = pd.DataFrame({"a": [0, 10, 50], "b": [0, 20, 80]}).T + y0_series = pd.Series({"a": 0, "b": 10}) + bp = breakpoints(slopes=slopes_df, x_points=xp_df, y0=y0_series, dim="gen") + np.testing.assert_allclose(bp.sel(gen="a").values, [0, 10, 30]) + np.testing.assert_allclose(bp.sel(gen="b").values, [10, 50, 110]) + + +# =========================================================================== +# segments() factory +# =========================================================================== + + +class TestSegmentsFactory: + def test_list(self) -> None: + bp = segments([[0, 10], [50, 100]]) + assert set(bp.dims) == {SEGMENT_DIM, BREAKPOINT_DIM} + assert bp.sizes[SEGMENT_DIM] == 2 + assert bp.sizes[BREAKPOINT_DIM] == 2 + + def test_dict(self) -> None: + bp = segments( + {"a": [[0, 10], [50, 100]], "b": [[0, 20], [60, 90]]}, + dim="gen", + ) + assert "gen" in bp.dims + assert SEGMENT_DIM in bp.dims + assert BREAKPOINT_DIM in bp.dims + + def test_ragged(self) -> None: + bp = segments([[0, 5, 10], [50, 100]]) + assert bp.sizes[BREAKPOINT_DIM] == 3 + assert np.isnan(bp.sel({SEGMENT_DIM: 1, BREAKPOINT_DIM: 2})) + + def test_dict_without_dim_raises(self) -> None: + with pytest.raises(ValueError, match="'dim' is required"): + segments({"a": [[0, 10]], "b": [[50, 100]]}) + + def test_dataframe(self) -> None: + df = pd.DataFrame([[0, 10], [50, 100]]) # rows=segments, cols=breakpoints + bp = segments(df) + assert set(bp.dims) == {SEGMENT_DIM, BREAKPOINT_DIM} + assert bp.sizes[SEGMENT_DIM] == 2 + assert bp.sizes[BREAKPOINT_DIM] == 2 + np.testing.assert_allclose(bp.sel({SEGMENT_DIM: 0}).values, [0, 10]) + np.testing.assert_allclose(bp.sel({SEGMENT_DIM: 1}).values, [50, 100]) + + def test_dataarray_passthrough(self) -> None: + da = xr.DataArray( + [[0, 10], [50, 100]], + dims=[SEGMENT_DIM, BREAKPOINT_DIM], + coords={SEGMENT_DIM: [0, 1], BREAKPOINT_DIM: [0, 1]}, + ) + bp = segments(da) + xr.testing.assert_equal(bp, da) - def test_auto_detect_fails_with_no_match(self) -> None: - """Test that auto-detection fails when no dimension matches keys.""" - m = Model() - power = m.add_variables(name="power") - efficiency = m.add_variables(name="efficiency") + def test_dataarray_missing_dim_raises(self) -> None: + da_no_seg = xr.DataArray( + [[0, 10], [50, 100]], + dims=["foo", BREAKPOINT_DIM], + ) + with pytest.raises(ValueError, match="must have both"): + segments(da_no_seg) - # Dimension 'wrong' doesn't match variable keys - breakpoints = xr.DataArray( - [[0, 50, 100], [0.8, 0.95, 0.9]], - dims=["wrong", "bp"], - coords={"wrong": ["a", "b"], "bp": [0, 1, 2]}, + da_no_bp = xr.DataArray( + [[0, 10], [50, 100]], + dims=[SEGMENT_DIM, "bar"], ) + with pytest.raises(ValueError, match="must have both"): + segments(da_no_bp) - with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): - m.add_piecewise_constraints( - {"power": power, "efficiency": efficiency}, - breakpoints, - dim="bp", - ) +# =========================================================================== +# piecewise() and operator overloading +# =========================================================================== -class TestMasking: - """Tests for masking functionality.""" - def test_nan_masking(self) -> None: - """Test that NaN values in breakpoints create masked constraints.""" +class TestPiecewiseFunction: + def test_returns_expression(self) -> None: m = Model() x = m.add_variables(name="x") + pw = piecewise(x, x_points=[0, 10, 50], y_points=[5, 2, 20]) + assert isinstance(pw, PiecewiseExpression) - # Third breakpoint is NaN - breakpoints = xr.DataArray( - [0, 10, np.nan, 100], - dims=["bp"], - coords={"bp": [0, 1, 2, 3]}, - ) - - m.add_piecewise_constraints(x, breakpoints, dim="bp") - - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - # Non-NaN breakpoints (0, 1, 3) should have valid labels - assert int(lambda_var.labels.sel(bp=0)) != -1 - assert int(lambda_var.labels.sel(bp=1)) != -1 - assert int(lambda_var.labels.sel(bp=3)) != -1 - # NaN breakpoint (2) should be masked - assert int(lambda_var.labels.sel(bp=2)) == -1 - - def test_explicit_mask(self) -> None: - """Test user-provided mask.""" + def test_series_inputs(self) -> None: m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - x = m.add_variables(coords=[generators], name="x") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0, 30, 80]], - dims=["generator", "bp"], - coords={"generator": generators, "bp": [0, 1, 2]}, - ) - - # Mask out gen2 - mask = xr.DataArray( - [[True, True, True], [False, False, False]], - dims=["generator", "bp"], - coords={"generator": generators, "bp": [0, 1, 2]}, - ) - - m.add_piecewise_constraints(x, breakpoints, dim="bp", mask=mask) - - # Should still create variables and constraints - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + x = m.add_variables(name="x") + pw = piecewise(x, pd.Series([0, 10, 50]), pd.Series([5, 2, 20])) + assert isinstance(pw, PiecewiseExpression) - def test_skip_nan_check(self) -> None: - """Test skip_nan_check parameter for performance.""" + def test_tuple_inputs(self) -> None: m = Model() x = m.add_variables(name="x") + pw = piecewise(x, (0, 10, 50), (5, 2, 20)) + assert isinstance(pw, PiecewiseExpression) - # Breakpoints with no NaNs - breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) - - # Should work with skip_nan_check=True - m.add_piecewise_constraints(x, breakpoints, dim="bp", skip_nan_check=True) - - # All lambda variables should be valid (no masking) - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert (lambda_var.labels != -1).all() - - def test_dict_mask_without_linking_dim(self) -> None: - """Test dict case accepts broadcastable mask without linking dimension.""" + def test_eq_returns_descriptor(self) -> None: m = Model() - power = m.add_variables(name="power") - efficiency = m.add_variables(name="efficiency") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0.8, 0.95, 0.9]], - dims=["var", "bp"], - coords={"var": ["power", "efficiency"], "bp": [0, 1, 2]}, - ) - - # Mask over bp only; should broadcast across var - mask = xr.DataArray([True, False, True], dims=["bp"], coords={"bp": [0, 1, 2]}) - - m.add_piecewise_constraints( - {"power": power, "efficiency": efficiency}, - breakpoints, - dim="bp", - mask=mask, - ) - - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert (lambda_var.labels.sel(bp=0) != -1).all() - assert (lambda_var.labels.sel(bp=1) == -1).all() - assert (lambda_var.labels.sel(bp=2) != -1).all() - - -class TestMultiDimensional: - """Tests for multi-dimensional piecewise constraints.""" + x = m.add_variables(name="x") + y = m.add_variables(name="y") + desc = piecewise(x, [0, 10, 50], [5, 2, 20]) == y + assert isinstance(desc, PiecewiseConstraintDescriptor) + assert desc.sign == "==" - def test_multi_dimensional(self) -> None: - """Test piecewise constraint with multiple loop dimensions.""" + def test_ge_returns_le_descriptor(self) -> None: + """Pw >= y means y <= pw""" m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - timesteps = pd.Index([0, 1, 2], name="time") - x = m.add_variables(coords=[generators, timesteps], name="x") - - rng = np.random.default_rng(42) - breakpoints = xr.DataArray( - rng.random((2, 3, 4)) * 100, - dims=["generator", "time", "bp"], - coords={"generator": generators, "time": timesteps, "bp": [0, 1, 2, 3]}, - ) + x = m.add_variables(name="x") + y = m.add_variables(name="y") + desc = piecewise(x, [0, 10, 50], [5, 2, 20]) >= y + assert isinstance(desc, PiecewiseConstraintDescriptor) + assert desc.sign == "<=" - m.add_piecewise_constraints(x, breakpoints, dim="bp") + def test_le_returns_ge_descriptor(self) -> None: + """Pw <= y means y >= pw""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + desc = piecewise(x, [0, 10, 50], [5, 2, 20]) <= y + assert isinstance(desc, PiecewiseConstraintDescriptor) + assert desc.sign == ">=" + + @pytest.mark.parametrize( + ("operator", "expected_sign"), + [("==", "=="), ("<=", "<="), (">=", ">=")], + ) + def test_rhs_piecewise_returns_descriptor( + self, operator: str, expected_sign: str + ) -> None: + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + pw = piecewise(x, [0, 10, 50], [5, 2, 20]) + + if operator == "==": + desc = y == pw + elif operator == "<=": + desc = y <= pw + else: + desc = y >= pw + + assert isinstance(desc, PiecewiseConstraintDescriptor) + assert desc.sign == expected_sign + assert desc.piecewise_func is pw + + @pytest.mark.parametrize( + ("operator", "expected_sign"), + [("==", "=="), ("<=", "<="), (">=", ">=")], + ) + def test_rhs_piecewise_linear_expression_returns_descriptor( + self, operator: str, expected_sign: str + ) -> None: + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + z = m.add_variables(name="z") + lhs = 2 * y + z + pw = piecewise(x, [0, 10, 50], [5, 2, 20]) - # Lambda should have all dimensions - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "generator" in lambda_var.dims - assert "time" in lambda_var.dims - assert "bp" in lambda_var.dims + if operator == "==": + desc = lhs == pw + elif operator == "<=": + desc = lhs <= pw + else: + desc = lhs >= pw + assert isinstance(desc, PiecewiseConstraintDescriptor) + assert desc.sign == expected_sign + assert desc.lhs is lhs + assert desc.piecewise_func is pw -class TestValidationErrors: - """Tests for input validation.""" + def test_rhs_piecewise_add_constraint(self) -> None: + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + m.add_piecewise_constraints(y == piecewise(x, [0, 10, 50], [5, 2, 20])) + assert len(m.constraints) > 0 - def test_invalid_vars_type(self) -> None: - """Test error when expr is not Variable, LinearExpression, or dict.""" + def test_mismatched_sizes_raises(self) -> None: m = Model() + x = m.add_variables(name="x") + with pytest.raises(ValueError, match="same size"): + piecewise(x, [0, 10, 50, 100], [5, 2, 20]) - breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) + def test_missing_breakpoint_dim_raises(self) -> None: + m = Model() + x = m.add_variables(name="x") + xp = xr.DataArray([0, 10, 50], dims=["knot"]) + yp = xr.DataArray([5, 2, 20], dims=["knot"]) + with pytest.raises(ValueError, match="must have a breakpoint dimension"): + piecewise(x, xp, yp) + def test_missing_breakpoint_dim_x_only_raises(self) -> None: + m = Model() + x = m.add_variables(name="x") + xp = xr.DataArray([0, 10, 50], dims=["knot"]) + yp = xr.DataArray([5, 2, 20], dims=[BREAKPOINT_DIM]) with pytest.raises( - TypeError, match="must be a Variable, LinearExpression, or dict" + ValueError, match="x_points is missing the breakpoint dimension" ): - m.add_piecewise_constraints("invalid", breakpoints, dim="bp") # type: ignore + piecewise(x, xp, yp) - def test_invalid_dict_value_type(self) -> None: + def test_missing_breakpoint_dim_y_only_raises(self) -> None: m = Model() - bp = xr.DataArray( - [[0, 50], [0, 10]], - dims=["var", "bp"], - coords={"var": ["x", "y"], "bp": [0, 1]}, - ) - with pytest.raises(TypeError, match="dict value for key 'x'"): - m.add_piecewise_constraints({"x": "bad", "y": "bad"}, bp, dim="bp") # type: ignore + x = m.add_variables(name="x") + xp = xr.DataArray([0, 10, 50], dims=[BREAKPOINT_DIM]) + yp = xr.DataArray([5, 2, 20], dims=["knot"]) + with pytest.raises( + ValueError, match="y_points is missing the breakpoint dimension" + ): + piecewise(x, xp, yp) - def test_missing_dim(self) -> None: - """Test error when breakpoints don't have the required dim.""" + def test_segment_dim_mismatch_raises(self) -> None: m = Model() x = m.add_variables(name="x") + xp = segments([[0, 10], [50, 100]]) + yp = xr.DataArray([0, 5], dims=[BREAKPOINT_DIM]) + with pytest.raises(ValueError, match="segment.*dimension.*both must"): + piecewise(x, xp, yp) - breakpoints = xr.DataArray([0, 10, 50], dims=["wrong"]) - - with pytest.raises(ValueError, match="must have dimension"): - m.add_piecewise_constraints(x, breakpoints, dim="bp") - - def test_non_numeric_dim(self) -> None: - """Test error when dim coordinates are not numeric.""" + def test_detects_disjunctive(self) -> None: m = Model() x = m.add_variables(name="x") + pw = piecewise(x, segments([[0, 10], [50, 100]]), segments([[0, 5], [20, 80]])) + assert pw.disjunctive is True - breakpoints = xr.DataArray( - [0, 10, 50], - dims=["bp"], - coords={"bp": ["a", "b", "c"]}, # Non-numeric - ) - - with pytest.raises(ValueError, match="numeric coordinates"): - m.add_piecewise_constraints(x, breakpoints, dim="bp") - - def test_expression_support(self) -> None: - """Test that LinearExpression is supported as input.""" + def test_detects_continuous(self) -> None: m = Model() x = m.add_variables(name="x") - y = m.add_variables(name="y") + pw = piecewise(x, [0, 10, 50], [5, 2, 20]) + assert pw.disjunctive is False - breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) - # Should work with a LinearExpression - m.add_piecewise_constraints(x + y, breakpoints, dim="bp") +# =========================================================================== +# Continuous piecewise – equality +# =========================================================================== - # Check constraints were created - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - def test_no_matching_linking_dim(self) -> None: - """Test error when no breakpoints dimension matches dict keys.""" +class TestContinuousEquality: + def test_sos2(self) -> None: m = Model() - power = m.add_variables(name="power") - efficiency = m.add_variables(name="efficiency") - - breakpoints = xr.DataArray([0, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2]}) - - with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): - m.add_piecewise_constraints( - {"power": power, "efficiency": efficiency}, - breakpoints, - dim="bp", - ) + x = m.add_variables(name="x") + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0, 10, 50, 100], [5, 2, 20, 80]) == y, + method="sos2", + ) + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_CONVEX_SUFFIX}" in m.constraints + assert f"pwl0{PWL_X_LINK_SUFFIX}" in m.constraints + assert f"pwl0{PWL_Y_LINK_SUFFIX}" in m.constraints + lam = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert lam.attrs.get("sos_type") == 2 - def test_linking_dim_coords_mismatch(self) -> None: - """Test error when breakpoint dimension coords don't match dict keys.""" + def test_auto_selects_incremental_for_monotonic(self) -> None: m = Model() - power = m.add_variables(name="power") - efficiency = m.add_variables(name="efficiency") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0.8, 0.95, 0.9]], - dims=["var", "bp"], - coords={"var": ["wrong1", "wrong2"], "bp": [0, 1, 2]}, + x = m.add_variables(name="x") + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0, 10, 50, 100], [5, 2, 20, 80]) == y, ) + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables - with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): - m.add_piecewise_constraints( - {"power": power, "efficiency": efficiency}, - breakpoints, - dim="bp", - ) - - -class TestNameGeneration: - """Tests for automatic name generation.""" - - def test_auto_name_generation(self) -> None: - """Test that names are auto-generated correctly.""" + def test_auto_nonmonotonic_falls_back_to_sos2(self) -> None: m = Model() x = m.add_variables(name="x") y = m.add_variables(name="y") - - bp1 = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) - bp2 = xr.DataArray([0, 20, 80], dims=["bp"], coords={"bp": [0, 1, 2]}) - - m.add_piecewise_constraints(x, bp1, dim="bp") - m.add_piecewise_constraints(y, bp2, dim="bp") - + m.add_piecewise_constraints( + piecewise(x, [0, 50, 30, 100], [5, 20, 15, 80]) == y, + ) assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"pwl1{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_DELTA_SUFFIX}" not in m.variables - def test_custom_name(self) -> None: - """Test using a custom name.""" + def test_multi_dimensional(self) -> None: m = Model() - x = m.add_variables(name="x") - - breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) - - m.add_piecewise_constraints(x, breakpoints, dim="bp", name="my_pwl") - - assert f"my_pwl{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"my_pwl{PWL_CONVEX_SUFFIX}" in m.constraints - assert f"my_pwl{PWL_LINK_SUFFIX}" in m.constraints - - -class TestLPFileOutput: - """Tests for LP file output with piecewise constraints.""" + gens = pd.Index(["gen_a", "gen_b"], name="generator") + x = m.add_variables(coords=[gens], name="x") + y = m.add_variables(coords=[gens], name="y") + m.add_piecewise_constraints( + piecewise( + x, + breakpoints( + {"gen_a": [0, 10, 50], "gen_b": [0, 20, 80]}, dim="generator" + ), + breakpoints( + {"gen_a": [0, 5, 30], "gen_b": [0, 8, 50]}, dim="generator" + ), + ) + == y, + ) + delta = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] + assert "generator" in delta.dims - def test_piecewise_written_to_lp(self, tmp_path: Path) -> None: - """Test that piecewise constraints are properly written to LP file.""" + def test_with_slopes(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [0.0, 10.0, 50.0], - dims=["bp"], - coords={"bp": [0, 1, 2]}, + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise( + x, + [0, 10, 50, 100], + breakpoints(slopes=[-0.3, 0.45, 1.2], x_points=[0, 10, 50, 100], y0=5), + ) + == y, ) - - m.add_piecewise_constraints(x, breakpoints, dim="bp") - - # Add a simple objective to make it a valid LP - m.add_objective(x) - - fn = tmp_path / "pwl.lp" - m.to_file(fn, io_api="lp") - content = fn.read_text() - - # Should contain SOS2 section - assert "\nsos\n" in content.lower() - assert "s2" in content.lower() + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables -@pytest.mark.skipif("gurobi" not in available_solvers, reason="Gurobi not installed") -class TestSolverIntegration: - """Integration tests with Gurobi solver.""" +# =========================================================================== +# Continuous piecewise – inequality +# =========================================================================== - def test_solve_single_variable(self) -> None: - """Test solving a model with piecewise constraint.""" - gurobipy = pytest.importorskip("gurobipy") +class TestContinuousInequality: + def test_concave_le_uses_lp(self) -> None: + """Y <= concave f(x) → LP tangent lines""" m = Model() - # Variable that should be between 0 and 100 - x = m.add_variables(lower=0, upper=100, name="x") - - # Piecewise linear cost function: cost = f(x) - # f(0) = 0, f(50) = 10, f(100) = 50 - cost = m.add_variables(name="cost") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0, 10, 50]], - dims=["var", "bp"], - coords={"var": ["x", "cost"], "bp": [0, 1, 2]}, + x = m.add_variables(name="x") + y = m.add_variables(name="y") + # Concave: slopes 0.8, 0.4 (decreasing) + # pw >= y means y <= pw (sign="<=") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 40, 60]) >= y, ) + assert f"pwl0{PWL_LP_SUFFIX}" in m.constraints + assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables + assert f"pwl0{PWL_AUX_SUFFIX}" not in m.variables - m.add_piecewise_constraints({"x": x, "cost": cost}, breakpoints, dim="bp") - - # Minimize cost, but need x >= 50 to make it interesting - m.add_constraints(x >= 50, name="x_min") - m.add_objective(cost) - - try: - status, cond = m.solve(solver_name="gurobi", io_api="direct") - except gurobipy.GurobiError as exc: - pytest.skip(f"Gurobi environment unavailable: {exc}") - - assert status == "ok" - # At x=50, cost should be 10 - assert np.isclose(x.solution.values, 50, atol=1e-5) - assert np.isclose(cost.solution.values, 10, atol=1e-5) - - def test_solve_efficiency_curve(self) -> None: - """Test solving with a realistic efficiency curve.""" - gurobipy = pytest.importorskip("gurobipy") - + def test_convex_le_uses_sos2_aux(self) -> None: + """Y <= convex f(x) → SOS2 + aux""" m = Model() - power = m.add_variables(lower=0, upper=100, name="power") - efficiency = m.add_variables(name="efficiency") - - # Efficiency curve: starts low, peaks, then decreases - # power: 0 25 50 75 100 - # efficiency: 0.7 0.85 0.95 0.9 0.8 - breakpoints = xr.DataArray( - [[0, 25, 50, 75, 100], [0.7, 0.85, 0.95, 0.9, 0.8]], - dims=["var", "bp"], - coords={"var": ["power", "efficiency"], "bp": [0, 1, 2, 3, 4]}, - ) - + x = m.add_variables(name="x") + y = m.add_variables(name="y") + # Convex: slopes 0.2, 1.0 (increasing) m.add_piecewise_constraints( - {"power": power, "efficiency": efficiency}, - breakpoints, - dim="bp", + piecewise(x, [0, 50, 100], [0, 10, 60]) >= y, ) + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_AUX_SUFFIX}" in m.variables - # Maximize efficiency - m.add_objective(efficiency, sense="max") - - try: - status, cond = m.solve(solver_name="gurobi", io_api="direct") - except gurobipy.GurobiError as exc: - pytest.skip(f"Gurobi environment unavailable: {exc}") - - assert status == "ok" - # Maximum efficiency is at power=50 - assert np.isclose(power.solution.values, 50, atol=1e-5) - assert np.isclose(efficiency.solution.values, 0.95, atol=1e-5) - - def test_solve_multi_generator(self) -> None: - """Test with multiple generators each with different curves.""" - gurobipy = pytest.importorskip("gurobipy") - - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - power = m.add_variables(lower=0, upper=100, coords=[generators], name="power") - cost = m.add_variables(coords=[generators], name="cost") - - # Different cost curves for each generator - # gen1: cheaper at low power, expensive at high - # gen2: more expensive at low power, cheaper at high - breakpoints = xr.DataArray( - [ - [[0, 50, 100], [0, 5, 30]], # gen1: power, cost - [[0, 50, 100], [0, 15, 20]], # gen2: power, cost - ], - dims=["generator", "var", "bp"], - coords={ - "generator": generators, - "var": ["power", "cost"], - "bp": [0, 1, 2], - }, + def test_convex_ge_uses_lp(self) -> None: + """Y >= convex f(x) → LP tangent lines""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + # Convex: slopes 0.2, 1.0 (increasing) + # pw <= y means y >= pw (sign=">=") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 60]) <= y, ) + assert f"pwl0{PWL_LP_SUFFIX}" in m.constraints + assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables + assert f"pwl0{PWL_AUX_SUFFIX}" not in m.variables + def test_concave_ge_uses_sos2_aux(self) -> None: + """Y >= concave f(x) → SOS2 + aux""" + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + # Concave: slopes 0.8, 0.4 (decreasing) m.add_piecewise_constraints( - {"power": power, "cost": cost}, breakpoints, dim="bp" + piecewise(x, [0, 50, 100], [0, 40, 60]) <= y, ) + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_AUX_SUFFIX}" in m.variables - # Need total power of 120 - m.add_constraints(power.sum() >= 120, name="demand") - - # Minimize total cost - m.add_objective(cost.sum()) - - try: - status, cond = m.solve(solver_name="gurobi", io_api="direct") - except gurobipy.GurobiError as exc: - pytest.skip(f"Gurobi environment unavailable: {exc}") - - assert status == "ok" - # gen1 should provide ~50 (cheap up to 50), gen2 provides rest - total_power = power.solution.sum().values - assert np.isclose(total_power, 120, atol=1e-5) - - -class TestIncrementalFormulation: - """Tests for the incremental (delta) piecewise formulation.""" - - def test_single_variable_incremental(self) -> None: - """Test incremental formulation with a single variable.""" + def test_mixed_uses_sos2(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [0, 10, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + y = m.add_variables(name="y") + # Mixed: slopes 0.5, 0.3, 0.9 (down then up) + m.add_piecewise_constraints( + piecewise(x, [0, 30, 60, 100], [0, 15, 24, 60]) >= y, ) + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_AUX_SUFFIX}" in m.variables - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") - - # Check delta variables created - assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - # 3 segments → 3 delta vars - delta_var = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] - assert "bp_seg" in delta_var.dims - assert len(delta_var.coords["bp_seg"]) == 3 - - # Check filling-order constraint (single vectorized constraint) - assert f"pwl0{PWL_FILL_SUFFIX}" in m.constraints - - # Check link constraint - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + def test_method_lp_wrong_convexity_raises(self) -> None: + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + # Convex function + y <= pw + method="lp" should fail + with pytest.raises(ValueError, match="convex"): + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 60]) >= y, + method="lp", + ) - # No SOS2 or lambda variables - assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables + def test_method_lp_decreasing_breakpoints_raises(self) -> None: + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + with pytest.raises(ValueError, match="strictly increasing x_points"): + m.add_piecewise_constraints( + piecewise(x, [100, 50, 0], [60, 10, 0]) <= y, + method="lp", + ) - def test_two_breakpoints_incremental(self) -> None: - """Test incremental with only 2 breakpoints (1 segment, no fill constraints).""" + def test_auto_inequality_decreasing_breakpoints_raises(self) -> None: m = Model() x = m.add_variables(name="x") + y = m.add_variables(name="y") + with pytest.raises(ValueError, match="strictly increasing x_points"): + m.add_piecewise_constraints( + piecewise(x, [100, 50, 0], [60, 10, 0]) <= y, + ) - breakpoints = xr.DataArray([0, 100], dims=["bp"], coords={"bp": [0, 1]}) + def test_method_lp_equality_raises(self) -> None: + m = Model() + x = m.add_variables(name="x") + y = m.add_variables(name="y") + with pytest.raises(ValueError, match="equality"): + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 40, 60]) == y, + method="lp", + ) - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") - # 1 segment → 1 delta var, no filling constraints - delta_var = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] - assert len(delta_var.coords["bp_seg"]) == 1 +# =========================================================================== +# Incremental formulation +# =========================================================================== - # Link constraint should exist - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - def test_dict_incremental(self) -> None: - """Test incremental formulation with dict of variables.""" +class TestIncremental: + def test_creates_delta_vars(self) -> None: m = Model() - power = m.add_variables(name="power") - cost = m.add_variables(name="cost") - - # Both power and cost breakpoints are strictly increasing - breakpoints = xr.DataArray( - [[0, 50, 100], [0, 10, 50]], - dims=["var", "bp"], - coords={"var": ["power", "cost"], "bp": [0, 1, 2]}, - ) - + x = m.add_variables(name="x") + y = m.add_variables(name="y") m.add_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, - dim="bp", + piecewise(x, [0, 10, 50, 100], [5, 2, 20, 80]) == y, method="incremental", ) - assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints + delta = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] + assert delta.labels.sizes[LP_SEG_DIM] == 3 + assert f"pwl0{PWL_FILL_SUFFIX}" in m.constraints + assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables - def test_non_monotonic_raises(self) -> None: - """Test that non-monotonic breakpoints raise ValueError for incremental.""" + def test_nonmonotonic_raises(self) -> None: m = Model() x = m.add_variables(name="x") - - # Not monotonic: 0, 50, 30 - breakpoints = xr.DataArray([0, 50, 30], dims=["bp"], coords={"bp": [0, 1, 2]}) - + y = m.add_variables(name="y") with pytest.raises(ValueError, match="strictly monotonic"): - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 30, 100], [5, 20, 15, 80]) == y, + method="incremental", + ) - def test_decreasing_monotonic_works(self) -> None: - """Test that strictly decreasing breakpoints work for incremental.""" + def test_sos2_nonmonotonic_succeeds(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [100, 50, 10, 0], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 30, 100], [5, 20, 15, 80]) == y, + method="sos2", ) + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + assert f"pwl0{PWL_DELTA_SUFFIX}" not in m.variables - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") - assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - - def test_opposite_directions_in_dict(self) -> None: - """Test that dict with opposite monotonic directions works.""" + def test_two_breakpoints_no_fill(self) -> None: m = Model() - power = m.add_variables(name="power") - eff = m.add_variables(name="eff") - - # power increasing, efficiency decreasing - breakpoints = xr.DataArray( - [[0, 50, 100], [0.95, 0.9, 0.8]], - dims=["var", "bp"], - coords={"var": ["power", "eff"], "bp": [0, 1, 2]}, - ) - + x = m.add_variables(name="x") + y = m.add_variables(name="y") m.add_piecewise_constraints( - {"power": power, "eff": eff}, - breakpoints, - dim="bp", + piecewise(x, [0, 100], [5, 80]) == y, method="incremental", ) + delta = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] + assert delta.labels.sizes[LP_SEG_DIM] == 1 + assert f"pwl0{PWL_X_LINK_SUFFIX}" in m.constraints + assert f"pwl0{PWL_Y_LINK_SUFFIX}" in m.constraints - assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - - def test_nan_breakpoints_monotonic(self) -> None: - """Test that trailing NaN breakpoints don't break monotonicity check.""" + def test_creates_binary_indicator_vars(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [0, 10, 100, np.nan], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0, 10, 50, 100], [5, 2, 20, 80]) == y, + method="incremental", ) + assert f"pwl0{PWL_INC_BINARY_SUFFIX}" in m.variables + binary = m.variables[f"pwl0{PWL_INC_BINARY_SUFFIX}"] + assert binary.labels.sizes[LP_SEG_DIM] == 3 + assert f"pwl0{PWL_INC_LINK_SUFFIX}" in m.constraints - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="auto") - assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - - def test_auto_selects_incremental(self) -> None: - """Test method='auto' selects incremental for monotonic breakpoints.""" + def test_creates_order_constraints(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [0, 10, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0, 10, 50, 100], [5, 2, 20, 80]) == y, + method="incremental", ) + assert f"pwl0{PWL_INC_ORDER_SUFFIX}" in m.constraints - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="auto") - - # Should use incremental (delta vars, no lambda) - assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables - - def test_auto_selects_sos2(self) -> None: - """Test method='auto' falls back to sos2 for non-monotonic breakpoints.""" + def test_two_breakpoints_no_order_constraint(self) -> None: + """With only one segment, there's no order constraint needed.""" m = Model() x = m.add_variables(name="x") + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0, 100], [5, 80]) == y, + method="incremental", + ) + assert f"pwl0{PWL_INC_BINARY_SUFFIX}" in m.variables + assert f"pwl0{PWL_INC_LINK_SUFFIX}" in m.constraints + assert f"pwl0{PWL_INC_ORDER_SUFFIX}" not in m.constraints - # Non-monotonic across the full array (dict case would have linking dimension) - # For single expr, breakpoints along dim are [0, 50, 30] - breakpoints = xr.DataArray([0, 50, 30], dims=["bp"], coords={"bp": [0, 1, 2]}) - - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="auto") - - # Should use sos2 (lambda vars, no delta) - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"pwl0{PWL_DELTA_SUFFIX}" not in m.variables - - def test_invalid_method_raises(self) -> None: - """Test that an invalid method raises ValueError.""" + def test_decreasing_monotonic(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) - - with pytest.raises(ValueError, match="method must be"): - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="invalid") # type: ignore[arg-type] - - def test_incremental_with_coords(self) -> None: - """Test incremental formulation with extra coordinates.""" - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - x = m.add_variables(coords=[generators], name="x") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0, 30, 80]], - dims=["generator", "bp"], - coords={"generator": generators, "bp": [0, 1, 2]}, + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [100, 50, 10, 0], [80, 20, 2, 5]) == y, + method="incremental", ) + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") - - delta_var = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] - assert "generator" in delta_var.dims - assert "bp_seg" in delta_var.dims - - -# ===== Disjunctive Piecewise Linear Constraint Tests ===== +# =========================================================================== +# Disjunctive piecewise +# =========================================================================== -class TestDisjunctiveBasicSingleVariable: - """Tests for single variable disjunctive piecewise constraints.""" - def test_two_equal_segments(self) -> None: - """Test with two equal-length segments.""" +class TestDisjunctive: + def test_equality_creates_binary(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, segments([[0, 10], [50, 100]]), segments([[0, 5], [20, 80]])) + == y, ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - # Binary variables created assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables - # Selection constraint assert f"pwl0{PWL_SELECT_SUFFIX}" in m.constraints - # Lambda variables assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - # Convexity constraint assert f"pwl0{PWL_CONVEX_SUFFIX}" in m.constraints - # Link constraint - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - # SOS2 on lambda - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert lambda_var.attrs.get("sos_type") == 2 - assert lambda_var.attrs.get("sos_dim") == "breakpoint" + lam = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert lam.attrs.get("sos_type") == 2 - def test_uneven_segments_with_nan(self) -> None: - """Test segments of different lengths with NaN padding.""" + def test_inequality_creates_aux(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0, 5, 10], [50, 100, np.nan]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - # Lambda for NaN breakpoint should be masked - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "segment" in lambda_var.dims - assert "breakpoint" in lambda_var.dims - - def test_single_breakpoint_segment(self) -> None: - """Test with a segment that has only one valid breakpoint (point segment).""" - m = Model() - x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0, 10], [42, np.nan]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, segments([[0, 10], [50, 100]]), segments([[0, 5], [20, 80]])) + >= y, ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) + assert f"pwl0{PWL_AUX_SUFFIX}" in m.variables assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - def test_single_variable_with_coords(self) -> None: - """Test coordinates are preserved on binary and lambda variables.""" - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - x = m.add_variables(coords=[generators], name="x") - - breakpoints = xr.DataArray( - [ - [[0, 10], [50, 100]], - [[0, 20], [60, 90]], - ], - dims=["generator", "segment", "breakpoint"], - coords={ - "generator": generators, - "segment": [0, 1], - "breakpoint": [0, 1], - }, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - - # Both should preserve generator coordinates - assert list(binary_var.coords["generator"].values) == ["gen1", "gen2"] - assert list(lambda_var.coords["generator"].values) == ["gen1", "gen2"] - - # Binary has (generator, segment), lambda has (generator, segment, breakpoint) - assert set(binary_var.dims) == {"generator", "segment"} - assert set(lambda_var.dims) == {"generator", "segment", "breakpoint"} - - def test_return_value_is_selection_constraint(self) -> None: - """Test the return value is the selection constraint.""" + def test_method_lp_raises(self) -> None: m = Model() x = m.add_variables(name="x") + y = m.add_variables(name="y") + with pytest.raises(ValueError, match="disjunctive"): + m.add_piecewise_constraints( + piecewise( + x, segments([[0, 10], [50, 100]]), segments([[0, 5], [20, 80]]) + ) + >= y, + method="lp", + ) - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ) - - result = m.add_disjunctive_piecewise_constraints(x, breakpoints) - - # Return value should be the selection constraint - assert result is not None - select_name = f"pwl0{PWL_SELECT_SUFFIX}" - assert select_name in m.constraints - - -class TestDisjunctiveDictOfVariables: - """Tests for dict of variables with disjunctive constraints.""" - - def test_dict_with_two_segments(self) -> None: - """Test dict of variables with two segments.""" - m = Model() - power = m.add_variables(name="power") - cost = m.add_variables(name="cost") - - breakpoints = xr.DataArray( - [[[0, 50], [0, 10]], [[80, 100], [20, 50]]], - dims=["segment", "var", "breakpoint"], - coords={ - "segment": [0, 1], - "var": ["power", "cost"], - "breakpoint": [0, 1], - }, - ) - - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, - ) - - assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - - def test_auto_detect_linking_dim_with_segment_dim(self) -> None: - """Test auto-detection of linking dimension when segment_dim is also present.""" - m = Model() - power = m.add_variables(name="power") - cost = m.add_variables(name="cost") - - breakpoints = xr.DataArray( - [[[0, 50], [0, 10]], [[80, 100], [20, 50]]], - dims=["segment", "var", "breakpoint"], - coords={ - "segment": [0, 1], - "var": ["power", "cost"], - "breakpoint": [0, 1], - }, - ) - - # Should auto-detect linking dim="var" (not segment) - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, - ) - - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - - -class TestDisjunctiveExtraDimensions: - """Tests for extra dimensions on disjunctive constraints.""" - - def test_extra_generator_dimension(self) -> None: - """Test with an extra generator dimension.""" - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - x = m.add_variables(coords=[generators], name="x") - - breakpoints = xr.DataArray( - [ - [[0, 10], [50, 100]], - [[0, 20], [60, 90]], - ], - dims=["generator", "segment", "breakpoint"], - coords={ - "generator": generators, - "segment": [0, 1], - "breakpoint": [0, 1], - }, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - # Binary and lambda should have generator dimension - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "generator" in binary_var.dims - assert "generator" in lambda_var.dims - assert "segment" in binary_var.dims - assert "segment" in lambda_var.dims - - def test_multi_dimensional_generator_time(self) -> None: - """Test variable with generator + time coords, verify all dims present.""" + def test_method_incremental_raises(self) -> None: m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - timesteps = pd.Index([0, 1, 2], name="time") - x = m.add_variables(coords=[generators, timesteps], name="x") - - rng = np.random.default_rng(42) - bp_data = rng.random((2, 3, 2, 2)) * 100 - # Sort breakpoints within each segment - bp_data = np.sort(bp_data, axis=-1) - - breakpoints = xr.DataArray( - bp_data, - dims=["generator", "time", "segment", "breakpoint"], - coords={ - "generator": generators, - "time": timesteps, - "segment": [0, 1], - "breakpoint": [0, 1], - }, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - - # All extra dims should be present - for dim_name in ["generator", "time", "segment"]: - assert dim_name in binary_var.dims - for dim_name in ["generator", "time", "segment", "breakpoint"]: - assert dim_name in lambda_var.dims + x = m.add_variables(name="x") + y = m.add_variables(name="y") + with pytest.raises(ValueError, match="disjunctive"): + m.add_piecewise_constraints( + piecewise( + x, segments([[0, 10], [50, 100]]), segments([[0, 5], [20, 80]]) + ) + == y, + method="incremental", + ) - def test_dict_with_additional_coords(self) -> None: - """Test dict of variables with extra generator dim, binary/lambda exclude linking dimension.""" + def test_multi_dimensional(self) -> None: m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - power = m.add_variables(coords=[generators], name="power") - cost = m.add_variables(coords=[generators], name="cost") - - breakpoints = xr.DataArray( - [ - [[[0, 50], [0, 10]], [[80, 100], [20, 30]]], - [[[0, 40], [0, 8]], [[70, 90], [15, 25]]], - ], - dims=["generator", "segment", "var", "breakpoint"], - coords={ - "generator": generators, - "segment": [0, 1], - "var": ["power", "cost"], - "breakpoint": [0, 1], - }, - ) - - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, + gens = pd.Index(["gen_a", "gen_b"], name="generator") + x = m.add_variables(coords=[gens], name="x") + y = m.add_variables(coords=[gens], name="y") + m.add_piecewise_constraints( + piecewise( + x, + segments( + {"gen_a": [[0, 10], [50, 100]], "gen_b": [[0, 20], [60, 90]]}, + dim="generator", + ), + segments( + {"gen_a": [[0, 5], [20, 80]], "gen_b": [[0, 8], [30, 70]]}, + dim="generator", + ), + ) + == y, ) + binary = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] + lam = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert "generator" in binary.dims + assert "generator" in lam.dims - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - - # linking dimension (var) should NOT be in binary or lambda dims - assert "var" not in binary_var.dims - assert "var" not in lambda_var.dims - # generator should be present - assert "generator" in binary_var.dims - assert "generator" in lambda_var.dims +# =========================================================================== +# Validation +# =========================================================================== -class TestDisjunctiveMasking: - """Tests for masking functionality in disjunctive constraints.""" - - def test_nan_masking_labels(self) -> None: - """Test NaN breakpoints mask lambda labels to -1.""" +class TestValidation: + def test_non_descriptor_raises(self) -> None: m = Model() x = m.add_variables(name="x") + with pytest.raises(TypeError, match="PiecewiseConstraintDescriptor"): + m.add_piecewise_constraints(x) # type: ignore - breakpoints = xr.DataArray( - [[0, 5, 10], [50, 100, np.nan]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - # Segment 0: all 3 breakpoints valid (labels != -1) - seg0_labels = lambda_var.labels.sel(segment=0) - assert (seg0_labels != -1).all() - # Segment 1: breakpoint 2 is NaN → masked (label == -1) - seg1_bp2_label = lambda_var.labels.sel(segment=1, breakpoint=2) - assert int(seg1_bp2_label) == -1 - - # Binary: both segments have at least one valid breakpoint - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - assert (binary_var.labels != -1).all() - - def test_nan_masking_partial_segment(self) -> None: - """Test partial NaN — lambda masked but segment binary still valid.""" + def test_invalid_method_raises(self) -> None: m = Model() x = m.add_variables(name="x") + y = m.add_variables(name="y") + with pytest.raises(ValueError, match="method must be"): + m.add_piecewise_constraints( + piecewise(x, [0, 10, 50], [5, 2, 20]) == y, + method="invalid", # type: ignore + ) - # Segment 0 has 3 valid breakpoints, segment 1 has 2 valid + 1 NaN - breakpoints = xr.DataArray( - [[0, 5, 10], [50, 100, np.nan]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - # Segment 1 binary is still valid (has 2 valid breakpoints) - assert int(binary_var.labels.sel(segment=1)) != -1 +# =========================================================================== +# Name generation +# =========================================================================== - # Segment 1 valid lambdas (breakpoint 0, 1) should be valid - assert int(lambda_var.labels.sel(segment=1, breakpoint=0)) != -1 - assert int(lambda_var.labels.sel(segment=1, breakpoint=1)) != -1 - def test_explicit_mask(self) -> None: - """Test user-provided mask disables specific entries.""" +class TestNameGeneration: + def test_auto_name(self) -> None: m = Model() x = m.add_variables(name="x") + y = m.add_variables(name="y") + z = m.add_variables(name="z") + m.add_piecewise_constraints(piecewise(x, [0, 10, 50], [5, 2, 20]) == y) + m.add_piecewise_constraints(piecewise(x, [0, 20, 80], [10, 15, 50]) == z) + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables + assert f"pwl1{PWL_DELTA_SUFFIX}" in m.variables - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ) - - # Mask out entire segment 1 - mask = xr.DataArray( - [[True, True], [False, False]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints, mask=mask) - - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - - # Segment 0 lambdas should be valid - assert (lambda_var.labels.sel(segment=0) != -1).all() - # Segment 1 lambdas should be masked - assert (lambda_var.labels.sel(segment=1) == -1).all() - # Segment 1 binary should be masked (no valid breakpoints) - assert int(binary_var.labels.sel(segment=1)) == -1 - - def test_skip_nan_check(self) -> None: - """Test skip_nan_check=True treats all breakpoints as valid.""" + def test_custom_name(self) -> None: m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0, 5, 10], [50, 100, np.nan]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0, 10, 50], [5, 2, 20]) == y, + name="my_pwl", ) + assert f"my_pwl{PWL_DELTA_SUFFIX}" in m.variables + assert f"my_pwl{PWL_X_LINK_SUFFIX}" in m.constraints + assert f"my_pwl{PWL_Y_LINK_SUFFIX}" in m.constraints - m.add_disjunctive_piecewise_constraints(x, breakpoints, skip_nan_check=True) - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - # All labels should be valid (no masking) - assert (lambda_var.labels != -1).all() +# =========================================================================== +# Broadcasting +# =========================================================================== - def test_dict_mask_without_linking_dim(self) -> None: - """Test dict case accepts mask that omits linking dimension but is broadcastable.""" - m = Model() - power = m.add_variables(name="power") - cost = m.add_variables(name="cost") - - breakpoints = xr.DataArray( - [[[0, 50], [0, 10]], [[80, 100], [20, 30]]], - dims=["segment", "var", "breakpoint"], - coords={ - "segment": [0, 1], - "var": ["power", "cost"], - "breakpoint": [0, 1], - }, - ) - - # Mask over segment/breakpoint only; should broadcast across var - mask = xr.DataArray( - [[True, True], [False, False]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ) - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, - mask=mask, +class TestBroadcasting: + def test_broadcast_over_extra_dims(self) -> None: + m = Model() + gens = pd.Index(["gen_a", "gen_b"], name="generator") + times = pd.Index([0, 1, 2], name="time") + x = m.add_variables(coords=[gens, times], name="x") + y = m.add_variables(coords=[gens, times], name="y") + # Points only have generator dim → broadcast over time + m.add_piecewise_constraints( + piecewise( + x, + breakpoints( + {"gen_a": [0, 10, 50], "gen_b": [0, 20, 80]}, dim="generator" + ), + breakpoints( + {"gen_a": [0, 5, 30], "gen_b": [0, 8, 50]}, dim="generator" + ), + ) + == y, ) + delta = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] + assert "generator" in delta.dims + assert "time" in delta.dims - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert (lambda_var.labels.sel(segment=0) != -1).all() - assert (lambda_var.labels.sel(segment=1) == -1).all() +# =========================================================================== +# NaN masking +# =========================================================================== -class TestDisjunctiveValidationErrors: - """Tests for validation errors in disjunctive constraints.""" - - def test_missing_dim(self) -> None: - """Test error when breakpoints don't have dim.""" - m = Model() - x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "wrong"], - coords={"segment": [0, 1], "wrong": [0, 1]}, - ) - - with pytest.raises(ValueError, match="must have dimension"): - m.add_disjunctive_piecewise_constraints(x, breakpoints, dim="breakpoint") - def test_missing_segment_dim(self) -> None: - """Test error when breakpoints don't have segment_dim.""" +class TestNaNMasking: + def test_nan_masks_lambda_labels(self) -> None: + """NaN in y_points produces masked labels in SOS2 formulation.""" m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [0, 10, 50], - dims=["breakpoint"], - coords={"breakpoint": [0, 1, 2]}, + y = m.add_variables(name="y") + x_pts = xr.DataArray([0, 10, 50, np.nan], dims=[BREAKPOINT_DIM]) + y_pts = xr.DataArray([0, 5, 20, np.nan], dims=[BREAKPOINT_DIM]) + m.add_piecewise_constraints( + piecewise(x, x_pts, y_pts) == y, + method="sos2", ) + lam = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + # First 3 should be valid, last masked + assert (lam.labels.isel({BREAKPOINT_DIM: slice(None, 3)}) != -1).all() + assert int(lam.labels.isel({BREAKPOINT_DIM: 3})) == -1 - with pytest.raises(ValueError, match="must have dimension"): - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - def test_same_dim_segment_dim(self) -> None: - """Test error when dim == segment_dim.""" + def test_skip_nan_check_with_nan_raises(self) -> None: + """skip_nan_check=True with NaN breakpoints raises ValueError.""" m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ) - - with pytest.raises(ValueError, match="must be different"): - m.add_disjunctive_piecewise_constraints( - x, breakpoints, dim="segment", segment_dim="segment" + y = m.add_variables(name="y") + x_pts = xr.DataArray([0, 10, 50, np.nan], dims=[BREAKPOINT_DIM]) + y_pts = xr.DataArray([0, 5, 20, np.nan], dims=[BREAKPOINT_DIM]) + with pytest.raises(ValueError, match="skip_nan_check=True but breakpoints"): + m.add_piecewise_constraints( + piecewise(x, x_pts, y_pts) == y, + method="sos2", + skip_nan_check=True, ) - def test_non_numeric_coords(self) -> None: - """Test error when dim coordinates are not numeric.""" + def test_skip_nan_check_without_nan(self) -> None: + """skip_nan_check=True without NaN works fine (no mask computed).""" m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": ["a", "b"]}, - ) - - with pytest.raises(ValueError, match="numeric coordinates"): - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - def test_invalid_expr(self) -> None: - """Test error when expr is invalid type.""" - m = Model() - - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, + y = m.add_variables(name="y") + x_pts = xr.DataArray([0, 10, 50, 100], dims=[BREAKPOINT_DIM]) + y_pts = xr.DataArray([0, 5, 20, 40], dims=[BREAKPOINT_DIM]) + m.add_piecewise_constraints( + piecewise(x, x_pts, y_pts) == y, + method="sos2", + skip_nan_check=True, ) + lam = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] + assert (lam.labels != -1).all() - with pytest.raises( - TypeError, match="must be a Variable, LinearExpression, or dict" - ): - m.add_disjunctive_piecewise_constraints("invalid", breakpoints) # type: ignore - - def test_expression_support(self) -> None: - """Test that LinearExpression (x + y) works as input.""" + def test_sos2_interior_nan_raises(self) -> None: + """SOS2 with interior NaN breakpoints raises ValueError.""" m = Model() x = m.add_variables(name="x") y = m.add_variables(name="y") + x_pts = xr.DataArray([0, np.nan, 50, 100], dims=[BREAKPOINT_DIM]) + y_pts = xr.DataArray([0, np.nan, 20, 40], dims=[BREAKPOINT_DIM]) + with pytest.raises(ValueError, match="non-trailing NaN"): + m.add_piecewise_constraints( + piecewise(x, x_pts, y_pts) == y, + method="sos2", + ) - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ) - m.add_disjunctive_piecewise_constraints(x + y, breakpoints) +# =========================================================================== +# Convexity detection edge cases +# =========================================================================== - assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - def test_no_matching_linking_dim(self) -> None: - """Test error when no breakpoints dimension matches dict keys.""" +class TestConvexityDetection: + def test_linear_uses_lp_both_directions(self) -> None: + """Linear function uses LP for both <= and >= inequalities.""" m = Model() - power = m.add_variables(name="power") - cost = m.add_variables(name="cost") - - breakpoints = xr.DataArray( - [[0, 50], [80, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, + x = m.add_variables(lower=0, upper=100, name="x") + y1 = m.add_variables(name="y1") + y2 = m.add_variables(name="y2") + # y1 >= f(x) → LP + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 25, 50]) <= y1, ) - - with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, - ) - - def test_linking_dim_coords_mismatch(self) -> None: - """Test error when breakpoint dimension coords don't match dict keys.""" - m = Model() - power = m.add_variables(name="power") - cost = m.add_variables(name="cost") - - breakpoints = xr.DataArray( - [[[0, 50], [0, 10]], [[80, 100], [20, 30]]], - dims=["segment", "var", "breakpoint"], - coords={ - "segment": [0, 1], - "var": ["wrong1", "wrong2"], - "breakpoint": [0, 1], - }, + assert f"pwl0{PWL_LP_SUFFIX}" in m.constraints + # y2 <= f(x) → also LP (linear is both convex and concave) + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 25, 50]) >= y2, ) + assert f"pwl1{PWL_LP_SUFFIX}" in m.constraints - with pytest.raises(ValueError, match="Could not auto-detect linking dimension"): - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, - ) - - -class TestDisjunctiveNameGeneration: - """Tests for name generation in disjunctive constraints.""" - - def test_shared_counter_with_continuous(self) -> None: - """Test that disjunctive and continuous PWL share the counter.""" + def test_single_segment_uses_lp(self) -> None: + """A single segment (2 breakpoints) is linear; uses LP.""" m = Model() - x = m.add_variables(name="x") + x = m.add_variables(lower=0, upper=100, name="x") y = m.add_variables(name="y") - - bp_continuous = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) - m.add_piecewise_constraints(x, bp_continuous, dim="bp") - - bp_disjunctive = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, + m.add_piecewise_constraints( + piecewise(x, [0, 100], [0, 50]) <= y, ) - m.add_disjunctive_piecewise_constraints(y, bp_disjunctive) - - # First is pwl0, second is pwl1 - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"pwl1{PWL_BINARY_SUFFIX}" in m.variables + assert f"pwl0{PWL_LP_SUFFIX}" in m.constraints - def test_custom_name(self) -> None: - """Test custom name for disjunctive constraints.""" + def test_mixed_convexity_uses_sos2(self) -> None: + """Mixed convexity should fall back to SOS2 for inequalities.""" m = Model() - x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0, 10], [50, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, + x = m.add_variables(lower=0, upper=100, name="x") + y = m.add_variables(name="y") + # Mixed: slope goes up then down → neither convex nor concave + # y <= f(x) → piecewise >= y → sign="<=" internally + m.add_piecewise_constraints( + piecewise(x, [0, 30, 60, 100], [0, 40, 30, 50]) >= y, ) + assert f"pwl0{PWL_AUX_SUFFIX}" in m.variables + assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - m.add_disjunctive_piecewise_constraints(x, breakpoints, name="my_dpwl") - - assert f"my_dpwl{PWL_BINARY_SUFFIX}" in m.variables - assert f"my_dpwl{PWL_SELECT_SUFFIX}" in m.constraints - assert f"my_dpwl{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"my_dpwl{PWL_CONVEX_SUFFIX}" in m.constraints - assert f"my_dpwl{PWL_LINK_SUFFIX}" in m.constraints +# =========================================================================== +# LP file output +# =========================================================================== -class TestDisjunctiveLPFileOutput: - """Tests for LP file output with disjunctive piecewise constraints.""" - def test_lp_contains_sos2_and_binary(self, tmp_path: Path) -> None: - """Test LP file contains SOS2 section and binary variables.""" +class TestLPFileOutput: + def test_sos2_equality(self, tmp_path: Path) -> None: m = Model() - x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [[0.0, 10.0], [50.0, 100.0]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, + x = m.add_variables(name="x", lower=0, upper=100) + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0.0, 10.0, 50.0, 100.0], [5.0, 2.0, 20.0, 80.0]) == y, + method="sos2", ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - m.add_objective(x) - - fn = tmp_path / "dpwl.lp" + m.add_objective(y) + fn = tmp_path / "pwl_eq.lp" m.to_file(fn, io_api="lp") - content = fn.read_text() - - # Should contain SOS2 section - assert "\nsos\n" in content.lower() - assert "s2" in content.lower() - - # Should contain binary section - assert "binary" in content.lower() or "binaries" in content.lower() - + content = fn.read_text().lower() + assert "sos" in content + assert "s2" in content -class TestDisjunctiveMultiBreakpointSegments: - """Tests for segments with multiple breakpoints (unique to disjunctive formulation).""" - - def test_three_breakpoints_per_segment(self) -> None: - """Test segments with 3 breakpoints each — verify lambda shape.""" + def test_lp_formulation_no_sos2(self, tmp_path: Path) -> None: m = Model() - x = m.add_variables(name="x") - - # 2 segments, each with 3 breakpoints - breakpoints = xr.DataArray( - [[0, 5, 10], [50, 75, 100]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1, 2]}, + x = m.add_variables(name="x", lower=0, upper=100) + y = m.add_variables(name="y") + # Concave: pw >= y uses LP + m.add_piecewise_constraints( + piecewise(x, [0.0, 50.0, 100.0], [0.0, 40.0, 60.0]) >= y, ) + m.add_objective(y) + fn = tmp_path / "pwl_lp.lp" + m.to_file(fn, io_api="lp") + content = fn.read_text().lower() + assert "s2" not in content - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - # Lambda should have shape (2 segments, 3 breakpoints) - assert lambda_var.labels.sizes["segment"] == 2 - assert lambda_var.labels.sizes["breakpoint"] == 3 - # All labels valid (no NaN) - assert (lambda_var.labels != -1).all() - - def test_mixed_segment_lengths_nan_padding(self) -> None: - """Test one segment with 4 breakpoints, another with 2 (NaN-padded).""" + def test_disjunctive_sos2_and_binary(self, tmp_path: Path) -> None: m = Model() - x = m.add_variables(name="x") - - # Segment 0: 4 valid breakpoints - # Segment 1: 2 valid breakpoints + 2 NaN - breakpoints = xr.DataArray( - [[0, 5, 10, 15], [50, 100, np.nan, np.nan]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1, 2, 3]}, + x = m.add_variables(name="x", lower=0, upper=100) + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise( + x, + segments([[0.0, 10.0], [50.0, 100.0]]), + segments([[0.0, 5.0], [20.0, 80.0]]), + ) + == y, ) + m.add_objective(y) + fn = tmp_path / "pwl_disj.lp" + m.to_file(fn, io_api="lp") + content = fn.read_text().lower() + assert "s2" in content + assert "binary" in content or "binaries" in content - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - - # Lambda shape: (2 segments, 4 breakpoints) - assert lambda_var.labels.sizes["segment"] == 2 - assert lambda_var.labels.sizes["breakpoint"] == 4 - - # Segment 0: all 4 lambdas valid - assert (lambda_var.labels.sel(segment=0) != -1).all() - - # Segment 1: first 2 valid, last 2 masked - assert (lambda_var.labels.sel(segment=1, breakpoint=0) != -1).item() - assert (lambda_var.labels.sel(segment=1, breakpoint=1) != -1).item() - assert (lambda_var.labels.sel(segment=1, breakpoint=2) == -1).item() - assert (lambda_var.labels.sel(segment=1, breakpoint=3) == -1).item() - - # Both segment binaries valid (both have at least one valid breakpoint) - assert (binary_var.labels != -1).all() - - -_disjunctive_solvers = get_available_solvers_with_feature( - SolverFeature.SOS_CONSTRAINTS, available_solvers -) +# =========================================================================== +# Solver integration – SOS2 capable +# =========================================================================== -@pytest.mark.skipif( - len(_disjunctive_solvers) == 0, - reason="No solver with SOS constraint support installed", -) -class TestDisjunctiveSolverIntegration: - """Integration tests for disjunctive piecewise constraints.""" - @pytest.fixture(params=_disjunctive_solvers) +@pytest.mark.skipif(len(_sos2_solvers) == 0, reason="No solver with SOS2 support") +class TestSolverSOS2: + @pytest.fixture(params=_sos2_solvers) def solver_name(self, request: pytest.FixtureRequest) -> str: return request.param - def test_minimize_picks_low_segment(self, solver_name: str) -> None: - """Test minimizing x picks the lower segment.""" - m = Model() - x = m.add_variables(name="x") - - # Two segments: [0, 10] and [50, 100] - breakpoints = xr.DataArray( - [[0.0, 10.0], [50.0, 100.0]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - m.add_objective(x) - - status, cond = m.solve(solver_name=solver_name) - - assert status == "ok" - # Should pick x=0 (minimum of low segment) - assert np.isclose(x.solution.values, 0.0, atol=1e-5) - - def test_maximize_picks_high_segment(self, solver_name: str) -> None: - """Test maximizing x picks the upper segment.""" + def test_equality_minimize_cost(self, solver_name: str) -> None: m = Model() - x = m.add_variables(name="x") - - # Two segments: [0, 10] and [50, 100] - breakpoints = xr.DataArray( - [[0.0, 10.0], [50.0, 100.0]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, - ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - m.add_objective(x, sense="max") - - status, cond = m.solve(solver_name=solver_name) - - assert status == "ok" - # Should pick x=100 (maximum of high segment) - assert np.isclose(x.solution.values, 100.0, atol=1e-5) - - def test_dict_case_solver(self, solver_name: str) -> None: - """Test disjunctive with dict of variables and solver.""" - m = Model() - power = m.add_variables(name="power") + x = m.add_variables(lower=0, upper=100, name="x") cost = m.add_variables(name="cost") - - # Two operating regions: - # Region 0: power [0,50], cost [0,10] - # Region 1: power [80,100], cost [20,30] - breakpoints = xr.DataArray( - [[[0.0, 50.0], [0.0, 10.0]], [[80.0, 100.0], [20.0, 30.0]]], - dims=["segment", "var", "breakpoint"], - coords={ - "segment": [0, 1], - "var": ["power", "cost"], - "breakpoint": [0, 1], - }, - ) - - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 50]) == cost, ) - - # Minimize cost + m.add_constraints(x >= 50, name="x_min") m.add_objective(cost) - - status, cond = m.solve(solver_name=solver_name) - + status, _ = m.solve(solver_name=solver_name) assert status == "ok" - # Should pick region 0, minimum cost = 0 - assert np.isclose(cost.solution.values, 0.0, atol=1e-5) - assert np.isclose(power.solution.values, 0.0, atol=1e-5) + np.testing.assert_allclose(x.solution.values, 50, atol=1e-4) + np.testing.assert_allclose(cost.solution.values, 10, atol=1e-4) - def test_three_segments_min(self, solver_name: str) -> None: - """Test 3 segments, minimize picks lowest.""" + def test_equality_maximize_efficiency(self, solver_name: str) -> None: m = Model() - x = m.add_variables(name="x") - - # Three segments: [0, 10], [30, 50], [80, 100] - breakpoints = xr.DataArray( - [[0.0, 10.0], [30.0, 50.0], [80.0, 100.0]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1, 2], "breakpoint": [0, 1]}, + power = m.add_variables(lower=0, upper=100, name="power") + eff = m.add_variables(name="eff") + m.add_piecewise_constraints( + piecewise(power, [0, 25, 50, 75, 100], [0.7, 0.85, 0.95, 0.9, 0.8]) == eff, ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - m.add_objective(x) - - status, cond = m.solve(solver_name=solver_name) - + m.add_objective(eff, sense="max") + status, _ = m.solve(solver_name=solver_name) assert status == "ok" - assert np.isclose(x.solution.values, 0.0, atol=1e-5) + np.testing.assert_allclose(power.solution.values, 50, atol=1e-4) + np.testing.assert_allclose(eff.solution.values, 0.95, atol=1e-4) - def test_constrained_mid_segment(self, solver_name: str) -> None: - """Test constraint forcing x into middle of a segment, verify interpolation.""" + def test_disjunctive_solve(self, solver_name: str) -> None: m = Model() x = m.add_variables(name="x") - - # Two segments: [0, 10] and [50, 100] - breakpoints = xr.DataArray( - [[0.0, 10.0], [50.0, 100.0]], - dims=["segment", "breakpoint"], - coords={"segment": [0, 1], "breakpoint": [0, 1]}, + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise( + x, + segments([[0.0, 10.0], [50.0, 100.0]]), + segments([[0.0, 5.0], [20.0, 80.0]]), + ) + == y, ) - - m.add_disjunctive_piecewise_constraints(x, breakpoints) - - # Force x >= 60, so must be in segment 1 - m.add_constraints(x >= 60, name="x_lower") - m.add_objective(x) - - status, cond = m.solve(solver_name=solver_name) - + m.add_constraints(x >= 60, name="x_min") + m.add_objective(y) + status, _ = m.solve(solver_name=solver_name) assert status == "ok" - # Minimum in segment 1 with x >= 60 → x = 60 - assert np.isclose(x.solution.values, 60.0, atol=1e-5) - - def test_multi_breakpoint_segment_solver(self, solver_name: str) -> None: - """Test segment with 3 breakpoints, verify correct interpolated value.""" - m = Model() - power = m.add_variables(name="power") - cost = m.add_variables(name="cost") + # x=60 on second segment: y = 20 + (80-20)/(100-50)*(60-50) = 32 + np.testing.assert_allclose(float(x.solution.values), 60, atol=1e-4) + np.testing.assert_allclose(float(y.solution.values), 32, atol=1e-4) - # Both segments have 3 breakpoints (no NaN padding needed) - # Segment 0: 3-breakpoint curve (power [0,50,100], cost [0,10,50]) - # Segment 1: 3-breakpoint curve (power [200,250,300], cost [80,90,100]) - breakpoints = xr.DataArray( - [ - [[0.0, 50.0, 100.0], [0.0, 10.0, 50.0]], - [[200.0, 250.0, 300.0], [80.0, 90.0, 100.0]], - ], - dims=["segment", "var", "breakpoint"], - coords={ - "segment": [0, 1], - "var": ["power", "cost"], - "breakpoint": [0, 1, 2], - }, - ) - - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, - ) - - # Constraint: power >= 50, minimize cost → picks segment 0, power=50, cost=10 - m.add_constraints(power >= 50, name="power_min") - m.add_constraints(power <= 150, name="power_max") - m.add_objective(cost) - status, cond = m.solve(solver_name=solver_name) +# =========================================================================== +# Solver integration – LP formulation (any solver) +# =========================================================================== - assert status == "ok" - assert np.isclose(power.solution.values, 50.0, atol=1e-5) - assert np.isclose(cost.solution.values, 10.0, atol=1e-5) - - def test_multi_generator_solver(self, solver_name: str) -> None: - """Test multiple generators with different disjunctive segments.""" - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - power = m.add_variables(lower=0, coords=[generators], name="power") - cost = m.add_variables(coords=[generators], name="cost") - - # gen1: two operating regions - # Region 0: power [0,50], cost [0,15] - # Region 1: power [80,100], cost [30,50] - # gen2: two operating regions - # Region 0: power [0,60], cost [0,10] - # Region 1: power [70,100], cost [12,40] - breakpoints = xr.DataArray( - [ - [[[0.0, 50.0], [0.0, 15.0]], [[80.0, 100.0], [30.0, 50.0]]], - [[[0.0, 60.0], [0.0, 10.0]], [[70.0, 100.0], [12.0, 40.0]]], - ], - dims=["generator", "segment", "var", "breakpoint"], - coords={ - "generator": generators, - "segment": [0, 1], - "var": ["power", "cost"], - "breakpoint": [0, 1], - }, - ) - - m.add_disjunctive_piecewise_constraints( - {"power": power, "cost": cost}, - breakpoints, - ) - - # Total power demand >= 100 - m.add_constraints(power.sum() >= 100, name="demand") - m.add_objective(cost.sum()) - - status, cond = m.solve(solver_name=solver_name) - - assert status == "ok" - total_power = power.solution.sum().values - assert total_power >= 100 - 1e-5 - - -_incremental_solvers = [s for s in ["gurobi", "highs"] if s in available_solvers] - - -@pytest.mark.skipif( - len(_incremental_solvers) == 0, - reason="No supported solver (gurobi/highs) installed", -) -class TestIncrementalSolverIntegrationMultiSolver: - """Integration tests for incremental formulation across solvers.""" - @pytest.fixture(params=_incremental_solvers) +@pytest.mark.skipif(len(_any_solvers) == 0, reason="No solver available") +class TestSolverLP: + @pytest.fixture(params=_any_solvers) def solver_name(self, request: pytest.FixtureRequest) -> str: return request.param - def test_solve_incremental_single(self, solver_name: str) -> None: + def test_concave_le(self, solver_name: str) -> None: + """Y <= concave f(x), maximize y""" m = Model() x = m.add_variables(lower=0, upper=100, name="x") - cost = m.add_variables(name="cost") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0, 10, 50]], - dims=["var", "bp"], - coords={"var": ["x", "cost"], "bp": [0, 1, 2]}, - ) - + y = m.add_variables(name="y") + # Concave: [0,0],[50,40],[100,60] m.add_piecewise_constraints( - {"x": x, "cost": cost}, - breakpoints, - dim="bp", - method="incremental", + piecewise(x, [0, 50, 100], [0, 40, 60]) >= y, ) - - m.add_constraints(x >= 50, name="x_min") - m.add_objective(cost) - - status, cond = m.solve(solver_name=solver_name) - + m.add_constraints(x <= 75, name="x_max") + m.add_objective(y, sense="max") + status, _ = m.solve(solver_name=solver_name) assert status == "ok" - assert np.isclose(x.solution.values, 50, atol=1e-5) - assert np.isclose(cost.solution.values, 10, atol=1e-5) - - -class TestIncrementalDecreasingBreakpointsSolver: - """Solver test for incremental formulation with decreasing breakpoints.""" - - @pytest.fixture(params=_incremental_solvers) - def solver_name(self, request: pytest.FixtureRequest) -> str: - return request.param + # At x=75: y = 40 + 0.4*(75-50) = 50 + np.testing.assert_allclose(float(x.solution.values), 75, atol=1e-4) + np.testing.assert_allclose(float(y.solution.values), 50, atol=1e-4) - def test_decreasing_breakpoints_solver(self, solver_name: str) -> None: + def test_convex_ge(self, solver_name: str) -> None: + """Y >= convex f(x), minimize y""" m = Model() x = m.add_variables(lower=0, upper=100, name="x") - cost = m.add_variables(name="cost") - - breakpoints = xr.DataArray( - [[100, 50, 0], [50, 10, 0]], - dims=["var", "bp"], - coords={"var": ["x", "cost"], "bp": [0, 1, 2]}, - ) - + y = m.add_variables(name="y") + # Convex: [0,0],[50,10],[100,60] m.add_piecewise_constraints( - {"x": x, "cost": cost}, - breakpoints, - dim="bp", - method="incremental", + piecewise(x, [0, 50, 100], [0, 10, 60]) <= y, ) - - m.add_constraints(x >= 50, name="x_min") - m.add_objective(cost) - - status, cond = m.solve(solver_name=solver_name) - + m.add_constraints(x >= 25, name="x_min") + m.add_objective(y) + status, _ = m.solve(solver_name=solver_name) assert status == "ok" - assert np.isclose(x.solution.values, 50, atol=1e-5) - assert np.isclose(cost.solution.values, 10, atol=1e-5) - - -class TestIncrementalNonMonotonicDictRaises: - """Test that non-monotonic breakpoints in a dict raise ValueError.""" - - def test_non_monotonic_in_dict_raises(self) -> None: - m = Model() - x = m.add_variables(name="x") - y = m.add_variables(name="y") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0, 30, 10]], - dims=["var", "bp"], - coords={"var": ["x", "y"], "bp": [0, 1, 2]}, - ) - - with pytest.raises(ValueError, match="strictly monotonic"): - m.add_piecewise_constraints( - {"x": x, "y": y}, - breakpoints, - dim="bp", - method="incremental", + # At x=25: y = 0.2*25 = 5 + np.testing.assert_allclose(float(x.solution.values), 25, atol=1e-4) + np.testing.assert_allclose(float(y.solution.values), 5, atol=1e-4) + + def test_slopes_equivalence(self, solver_name: str) -> None: + """Same model with y_points vs slopes produces identical solutions.""" + # Model 1: direct y_points + m1 = Model() + x1 = m1.add_variables(lower=0, upper=100, name="x") + y1 = m1.add_variables(name="y") + m1.add_piecewise_constraints( + piecewise(x1, [0, 50, 100], [0, 40, 60]) >= y1, + ) + m1.add_constraints(x1 <= 75, name="x_max") + m1.add_objective(y1, sense="max") + s1, _ = m1.solve(solver_name=solver_name) + + # Model 2: slopes + m2 = Model() + x2 = m2.add_variables(lower=0, upper=100, name="x") + y2 = m2.add_variables(name="y") + m2.add_piecewise_constraints( + piecewise( + x2, + [0, 50, 100], + breakpoints(slopes=[0.8, 0.4], x_points=[0, 50, 100], y0=0), ) + >= y2, + ) + m2.add_constraints(x2 <= 75, name="x_max") + m2.add_objective(y2, sense="max") + s2, _ = m2.solve(solver_name=solver_name) - -class TestAdditionalEdgeCases: - """Additional edge case tests identified in review.""" - - def test_nan_breakpoints_delta_mask(self) -> None: - """Verify delta mask correctly masks segments adjacent to trailing NaN breakpoints.""" - m = Model() - x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [0, 10, np.nan, np.nan], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + assert s1 == "ok" + assert s2 == "ok" + np.testing.assert_allclose( + float(y1.solution.values), float(y2.solution.values), atol=1e-4 ) - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") - delta_var = m.variables[f"pwl0{PWL_DELTA_SUFFIX}"] - assert delta_var.labels.sel(bp_seg=0).values != -1 - assert delta_var.labels.sel(bp_seg=1).values == -1 - assert delta_var.labels.sel(bp_seg=2).values == -1 +class TestLPDomainConstraints: + """Tests for LP domain bound constraints.""" - def test_dict_with_linear_expressions(self) -> None: - """Test _build_stacked_expr with LinearExpression values (not just Variable).""" + def test_lp_domain_constraints_created(self) -> None: + """LP method creates domain bound constraints.""" m = Model() x = m.add_variables(name="x") y = m.add_variables(name="y") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0, 10, 50]], - dims=["var", "bp"], - coords={"var": ["expr_a", "expr_b"], "bp": [0, 1, 2]}, - ) - + # Concave: slopes decreasing → y <= pw uses LP m.add_piecewise_constraints( - {"expr_a": 2 * x, "expr_b": 3 * y}, - breakpoints, - dim="bp", + piecewise(x, [0, 50, 100], [0, 40, 60]) >= y, ) + assert f"pwl0{PWL_LP_DOMAIN_SUFFIX}_lo" in m.constraints + assert f"pwl0{PWL_LP_DOMAIN_SUFFIX}_hi" in m.constraints - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - - def test_pwl_counter_increments(self) -> None: - """Test that _pwlCounter increments and produces unique names.""" + def test_lp_domain_constraints_multidim(self) -> None: + """Domain constraints have entity dimension for per-entity breakpoints.""" m = Model() - x = m.add_variables(name="x") - y = m.add_variables(name="y") - breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) - - m.add_piecewise_constraints(x, breakpoints, dim="bp") - assert m._pwlCounter == 1 - - m.add_piecewise_constraints(y, breakpoints, dim="bp") - assert m._pwlCounter == 2 - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"pwl1{PWL_LAMBDA_SUFFIX}" in m.variables - - def test_auto_with_mixed_monotonicity_dict(self) -> None: - """Test method='auto' with opposite-direction slices in dict.""" - m = Model() - power = m.add_variables(name="power") - eff = m.add_variables(name="eff") - - breakpoints = xr.DataArray( - [[0, 50, 100], [0.95, 0.9, 0.8]], - dims=["var", "bp"], - coords={"var": ["power", "eff"], "bp": [0, 1, 2]}, - ) - + x = m.add_variables(coords=[pd.Index(["a", "b"], name="entity")], name="x") + y = m.add_variables(coords=[pd.Index(["a", "b"], name="entity")], name="y") + x_pts = breakpoints({"a": [0, 50, 100], "b": [10, 60, 110]}, dim="entity") + y_pts = breakpoints({"a": [0, 40, 60], "b": [5, 35, 55]}, dim="entity") m.add_piecewise_constraints( - {"power": power, "eff": eff}, - breakpoints, - dim="bp", - method="auto", + piecewise(x, x_pts, y_pts) >= y, ) + lo_name = f"pwl0{PWL_LP_DOMAIN_SUFFIX}_lo" + hi_name = f"pwl0{PWL_LP_DOMAIN_SUFFIX}_hi" + assert lo_name in m.constraints + assert hi_name in m.constraints + # Domain constraints should have the entity dimension + assert "entity" in m.constraints[lo_name].labels.dims + assert "entity" in m.constraints[hi_name].labels.dims - assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - assert f"pwl0{PWL_LAMBDA_SUFFIX}" not in m.variables - def test_custom_segment_dim(self) -> None: - """Test disjunctive with custom segment_dim name.""" - m = Model() - x = m.add_variables(name="x") +# =========================================================================== +# Active parameter (commitment binary) +# =========================================================================== - breakpoints = xr.DataArray( - [[0.0, 10.0], [50.0, 100.0]], - dims=["zone", "breakpoint"], - coords={"zone": [0, 1], "breakpoint": [0, 1]}, - ) - m.add_disjunctive_piecewise_constraints(x, breakpoints, segment_dim="zone") +class TestActiveParameter: + """Tests for the ``active`` parameter in piecewise constraints.""" - assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables - assert f"pwl0{PWL_SELECT_SUFFIX}" in m.constraints - - def test_sos2_return_value_is_convexity_constraint(self) -> None: - """Test that add_piecewise_constraints (SOS2) returns the convexity constraint.""" + def test_incremental_creates_active_bound(self) -> None: m = Model() x = m.add_variables(name="x") + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") + m.add_piecewise_constraints( + piecewise(x, [0, 10, 50, 100], [5, 2, 20, 80], active=u) == y, + method="incremental", + ) + assert f"pwl0{PWL_ACTIVE_BOUND_SUFFIX}" in m.constraints + assert f"pwl0{PWL_DELTA_SUFFIX}" in m.variables - breakpoints = xr.DataArray([0, 10, 50], dims=["bp"], coords={"bp": [0, 1, 2]}) - - result = m.add_piecewise_constraints(x, breakpoints, dim="bp") - assert result.name == f"pwl0{PWL_CONVEX_SUFFIX}" - - def test_incremental_lp_no_sos2(self, tmp_path: Path) -> None: - """Test that incremental formulation LP file has no SOS2 section.""" + def test_active_none_is_default(self) -> None: + """Without active, formulation is identical to before.""" m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [0.0, 10.0, 50.0], dims=["bp"], coords={"bp": [0, 1, 2]} + y = m.add_variables(name="y") + m.add_piecewise_constraints( + piecewise(x, [0, 10, 50], [0, 5, 30]) == y, + method="incremental", ) + assert f"pwl0{PWL_ACTIVE_BOUND_SUFFIX}" not in m.constraints - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") - m.add_objective(x) - - fn = tmp_path / "inc.lp" - m.to_file(fn, io_api="lp") - content = fn.read_text() - - assert "\nsos\n" not in content.lower() - assert "s2" not in content.lower() - - def test_two_breakpoints_no_fill_constraint(self) -> None: - """Test 2-breakpoint incremental produces no fill constraint.""" + def test_active_with_lp_method_raises(self) -> None: m = Model() x = m.add_variables(name="x") + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") + with pytest.raises(ValueError, match="not supported with method='lp'"): + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 40, 60], active=u) >= y, + method="lp", + ) - breakpoints = xr.DataArray([0, 100], dims=["bp"], coords={"bp": [0, 1]}) - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") - - assert f"pwl0{PWL_FILL_SUFFIX}" not in m.constraints - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - - def test_non_trailing_nan_incremental_raises(self) -> None: - """Non-trailing NaN breakpoints raise ValueError with method='incremental'.""" + def test_active_with_auto_lp_raises(self) -> None: + """Auto selects LP for concave >=, but active is incompatible.""" m = Model() x = m.add_variables(name="x") + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") + with pytest.raises(ValueError, match="not supported with method='lp'"): + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 40, 60], active=u) >= y, + ) - breakpoints = xr.DataArray( - [0, np.nan, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} - ) - - with pytest.raises(ValueError, match="non-trailing NaN"): - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="incremental") - - def test_non_trailing_nan_incremental_dict_raises(self) -> None: - """Dict case with one variable having non-trailing NaN raises.""" + def test_incremental_inequality_with_active(self) -> None: + """Inequality + active creates aux variable and active bound.""" m = Model() x = m.add_variables(name="x") y = m.add_variables(name="y") - - breakpoints = xr.DataArray( - [[0, 50, np.nan, 100], [0, 10, 50, 80]], - dims=["var", "bp"], - coords={"var": ["x", "y"], "bp": [0, 1, 2, 3]}, + u = m.add_variables(binary=True, name="u") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 50], active=u) >= y, + method="incremental", ) + assert f"pwl0{PWL_AUX_SUFFIX}" in m.variables + assert f"pwl0{PWL_ACTIVE_BOUND_SUFFIX}" in m.constraints + assert "pwl0_ineq" in m.constraints - with pytest.raises(ValueError, match="non-trailing NaN"): - m.add_piecewise_constraints( - {"x": x, "y": y}, - breakpoints, - dim="bp", - method="incremental", - ) - - def test_non_trailing_nan_falls_back_to_sos2(self) -> None: - """method='auto' falls back to SOS2 for non-trailing NaN.""" + def test_active_with_linear_expression(self) -> None: + """Active can be a LinearExpression, not just a Variable.""" m = Model() x = m.add_variables(name="x") - - breakpoints = xr.DataArray( - [0, np.nan, 50, 100], dims=["bp"], coords={"bp": [0, 1, 2, 3]} + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 50], active=1 * u) == y, + method="incremental", ) + assert f"pwl0{PWL_ACTIVE_BOUND_SUFFIX}" in m.constraints - m.add_piecewise_constraints(x, breakpoints, dim="bp", method="auto") - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables - assert f"pwl0{PWL_DELTA_SUFFIX}" not in m.variables +# =========================================================================== +# Solver integration – active parameter +# =========================================================================== -class TestBreakpointsFactory: - def test_positional_list(self) -> None: - bp = breakpoints([0, 50, 100]) - assert bp.dims == ("breakpoint",) - assert list(bp.values) == [0.0, 50.0, 100.0] - assert list(bp.coords["breakpoint"].values) == [0, 1, 2] - - def test_positional_dict(self) -> None: - bp = breakpoints({"gen1": [0, 50, 100], "gen2": [0, 30]}, dim="generator") - assert set(bp.dims) == {"generator", "breakpoint"} - assert bp.sizes["generator"] == 2 - assert bp.sizes["breakpoint"] == 3 - assert np.isnan(bp.sel(generator="gen2", breakpoint=2)) - - def test_positional_dict_without_dim_raises(self) -> None: - with pytest.raises(ValueError, match="'dim' is required"): - breakpoints({"gen1": [0, 50], "gen2": [0, 30]}) +@pytest.mark.skipif(len(_any_solvers) == 0, reason="No solver available") +class TestSolverActive: + @pytest.fixture(params=_any_solvers) + def solver_name(self, request: pytest.FixtureRequest) -> str: + return request.param - def test_kwargs_uniform(self) -> None: - bp = breakpoints(power=[0, 50, 100], fuel=[10, 20, 30]) - assert "var" in bp.dims - assert "breakpoint" in bp.dims - assert list(bp.coords["var"].values) == ["power", "fuel"] - assert bp.sizes["breakpoint"] == 3 + def test_incremental_active_on(self, solver_name: str) -> None: + """When u=1 (forced on), normal PWL domain is active.""" + m = Model() + x = m.add_variables(lower=0, upper=100, name="x") + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 50], active=u) == y, + method="incremental", + ) + m.add_constraints(u >= 1, name="force_on") + m.add_constraints(x >= 50, name="x_min") + m.add_objective(y) + status, _ = m.solve(solver_name=solver_name) + assert status == "ok" + np.testing.assert_allclose(float(x.solution.values), 50, atol=1e-4) + np.testing.assert_allclose(float(y.solution.values), 10, atol=1e-4) - def test_kwargs_per_entity(self) -> None: - bp = breakpoints( - power={"gen1": [0, 50, 100], "gen2": [0, 30]}, - cost={"gen1": [0, 10, 50], "gen2": [0, 8]}, - dim="generator", + def test_incremental_active_off(self, solver_name: str) -> None: + """When u=0 (forced off), x and y must be zero.""" + m = Model() + x = m.add_variables(lower=0, upper=100, name="x") + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 50], active=u) == y, + method="incremental", ) - assert "generator" in bp.dims - assert "var" in bp.dims - assert "breakpoint" in bp.dims + m.add_constraints(u <= 0, name="force_off") + m.add_objective(y, sense="max") + status, _ = m.solve(solver_name=solver_name) + assert status == "ok" + np.testing.assert_allclose(float(x.solution.values), 0, atol=1e-4) + np.testing.assert_allclose(float(y.solution.values), 0, atol=1e-4) - def test_kwargs_mixed_list_and_dict(self) -> None: - bp = breakpoints( - power={"gen1": [0, 50], "gen2": [0, 30]}, - fuel=[10, 20], - dim="generator", - ) - assert "generator" in bp.dims - assert "var" in bp.dims - assert bp.sel(var="fuel", generator="gen1", breakpoint=0) == 10 - assert bp.sel(var="fuel", generator="gen2", breakpoint=0) == 10 - - def test_kwargs_dataarray_passthrough(self) -> None: - power_da = xr.DataArray([0, 50, 100], dims=["breakpoint"]) - bp = breakpoints(power=power_da, fuel=[10, 20, 30]) - assert "var" in bp.dims - assert bp.sel(var="power", breakpoint=0) == 0 - - def test_both_positional_and_kwargs_raises(self) -> None: - with pytest.raises(ValueError, match="Cannot pass both"): - breakpoints([0, 50], power=[10, 20]) - - def test_neither_raises(self) -> None: - with pytest.raises(ValueError, match="Must pass either"): - breakpoints() - - def test_invalid_values_type_raises(self) -> None: - with pytest.raises(TypeError, match="must be a list or dict"): - breakpoints(42) # type: ignore - - def test_invalid_kwarg_type_raises(self) -> None: - with pytest.raises(ValueError, match="must be a list, dict, or DataArray"): - breakpoints(power=42) # type: ignore - - def test_kwargs_dict_without_dim_raises(self) -> None: - with pytest.raises(ValueError, match="'dim' is required"): - breakpoints(power={"gen1": [0, 50]}, cost=[10, 20]) + def test_incremental_nonzero_base_active_off(self, solver_name: str) -> None: + """ + Non-zero base (x₀=20, y₀=5) with u=0 must still force zero. - def test_factory_output_works_with_piecewise(self) -> None: + Tests the x₀*u / y₀*u base term multiplication — would fail if + base terms aren't multiplied by active. + """ m = Model() - x = m.add_variables(name="x") - bp = breakpoints([0, 10, 50]) - m.add_piecewise_constraints(x, bp, dim="breakpoint") - assert f"pwl0{PWL_LAMBDA_SUFFIX}" in m.variables + x = m.add_variables(lower=0, upper=100, name="x") + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") + m.add_piecewise_constraints( + piecewise(x, [20, 60, 100], [5, 20, 50], active=u) == y, + method="incremental", + ) + m.add_constraints(u <= 0, name="force_off") + m.add_objective(y, sense="max") + status, _ = m.solve(solver_name=solver_name) + assert status == "ok" + np.testing.assert_allclose(float(x.solution.values), 0, atol=1e-4) + np.testing.assert_allclose(float(y.solution.values), 0, atol=1e-4) - def test_factory_dict_output_works_with_piecewise(self) -> None: + def test_incremental_inequality_active_off(self, solver_name: str) -> None: + """Inequality with active=0: aux variable is 0, so y <= 0.""" m = Model() - power = m.add_variables(name="power") - cost = m.add_variables(name="cost") - bp = breakpoints(power=[0, 50, 100], cost=[0, 10, 50]) + x = m.add_variables(lower=0, upper=100, name="x") + y = m.add_variables(lower=0, name="y") + u = m.add_variables(binary=True, name="u") m.add_piecewise_constraints( - {"power": power, "cost": cost}, bp, dim="breakpoint" + piecewise(x, [0, 50, 100], [0, 10, 50], active=u) >= y, + method="incremental", ) - assert f"pwl0{PWL_LINK_SUFFIX}" in m.constraints - + m.add_constraints(u <= 0, name="force_off") + m.add_objective(y, sense="max") + status, _ = m.solve(solver_name=solver_name) + assert status == "ok" + np.testing.assert_allclose(float(y.solution.values), 0, atol=1e-4) -class TestBreakpointsSegments: - def test_list_of_tuples(self) -> None: - bp = breakpoints.segments([(0, 10), (50, 100)]) - assert set(bp.dims) == {"segment", "breakpoint"} - assert bp.sizes["segment"] == 2 - assert bp.sizes["breakpoint"] == 2 + def test_unit_commitment_pattern(self, solver_name: str) -> None: + """Solver decides to commit: verifies correct fuel at operating point.""" + m = Model() + p_min, p_max = 20.0, 100.0 + fuel_at_pmin, fuel_at_pmax = 10.0, 60.0 - def test_ragged_segments(self) -> None: - bp = breakpoints.segments([(0, 5, 10), (50, 100)]) - assert bp.sizes["breakpoint"] == 3 - assert np.isnan(bp.sel(segment=1, breakpoint=2)) + power = m.add_variables(lower=0, upper=p_max, name="power") + fuel = m.add_variables(name="fuel") + u = m.add_variables(binary=True, name="commit") - def test_per_entity_dict(self) -> None: - bp = breakpoints.segments( - {"gen1": [(0, 10), (50, 100)], "gen2": [(0, 20), (60, 90)]}, - dim="generator", + m.add_piecewise_constraints( + piecewise(power, [p_min, p_max], [fuel_at_pmin, fuel_at_pmax], active=u) + == fuel, + method="incremental", ) - assert "generator" in bp.dims - assert "segment" in bp.dims - assert "breakpoint" in bp.dims + m.add_constraints(power >= 50, name="demand") + m.add_objective(fuel + 5 * u) - def test_kwargs_multi_variable(self) -> None: - bp = breakpoints.segments( - power=[(0, 50), (80, 100)], - cost=[(0, 10), (20, 30)], + status, _ = m.solve(solver_name=solver_name) + assert status == "ok" + np.testing.assert_allclose(float(u.solution.values), 1, atol=1e-4) + np.testing.assert_allclose(float(power.solution.values), 50, atol=1e-4) + # fuel = 10 + (60-10)/(100-20) * (50-20) = 28.75 + np.testing.assert_allclose(float(fuel.solution.values), 28.75, atol=1e-4) + + def test_multi_dimensional_solver(self, solver_name: str) -> None: + """Per-entity on/off: gen_a on at x=50, gen_b off at x=0.""" + m = Model() + gens = pd.Index(["a", "b"], name="gen") + x = m.add_variables(lower=0, upper=100, coords=[gens], name="x") + y = m.add_variables(coords=[gens], name="y") + u = m.add_variables(binary=True, coords=[gens], name="u") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 50], active=u) == y, + method="incremental", ) - assert "segment" in bp.dims - assert "var" in bp.dims - assert "breakpoint" in bp.dims - - def test_segments_invalid_values_type_raises(self) -> None: - with pytest.raises(TypeError, match="must be a list or dict"): - breakpoints.segments(42) # type: ignore - - def test_segments_both_positional_and_kwargs_raises(self) -> None: - with pytest.raises(ValueError, match="Cannot pass both"): - breakpoints.segments([(0, 10)], power=[(0, 10)]) - - def test_segments_neither_raises(self) -> None: - with pytest.raises(ValueError, match="Must pass either"): - breakpoints.segments() - - def test_segments_invalid_kwarg_type_raises(self) -> None: - with pytest.raises(ValueError, match="must be a list, dict, or DataArray"): - breakpoints.segments(power=42) # type: ignore - - def test_segments_kwargs_dict_without_dim_raises(self) -> None: - with pytest.raises(ValueError, match="'dim' is required"): - breakpoints.segments(power={"gen1": [(0, 50)]}, cost=[(10, 20)]) - - def test_segments_dict_without_dim_raises(self) -> None: - with pytest.raises(ValueError, match="'dim' is required"): - breakpoints.segments({"gen1": [(0, 10)], "gen2": [(50, 100)]}) - - def test_segments_works_with_disjunctive(self) -> None: - m = Model() - x = m.add_variables(name="x") - bp = breakpoints.segments([(0, 10), (50, 100)]) - m.add_disjunctive_piecewise_constraints(x, bp) - assert f"pwl0{PWL_BINARY_SUFFIX}" in m.variables + m.add_constraints(u.sel(gen="a") >= 1, name="a_on") + m.add_constraints(u.sel(gen="b") <= 0, name="b_off") + m.add_constraints(x.sel(gen="a") >= 50, name="a_min") + m.add_objective(y.sum()) + status, _ = m.solve(solver_name=solver_name) + assert status == "ok" + np.testing.assert_allclose(float(x.solution.sel(gen="a")), 50, atol=1e-4) + np.testing.assert_allclose(float(y.solution.sel(gen="a")), 10, atol=1e-4) + np.testing.assert_allclose(float(x.solution.sel(gen="b")), 0, atol=1e-4) + np.testing.assert_allclose(float(y.solution.sel(gen="b")), 0, atol=1e-4) -class TestAutobroadcast: - def test_1d_breakpoints_2d_variable(self) -> None: - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - x = m.add_variables(coords=[generators], name="x") - bp = breakpoints([0, 10, 50]) - m.add_piecewise_constraints(x, bp, dim="breakpoint") - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "generator" in lambda_var.dims - assert "breakpoint" in lambda_var.dims +@pytest.mark.skipif(len(_sos2_solvers) == 0, reason="No SOS2-capable solver") +class TestSolverActiveSOS2: + @pytest.fixture(params=_sos2_solvers) + def solver_name(self, request: pytest.FixtureRequest) -> str: + return request.param - def test_already_matching_dims_noop(self) -> None: + def test_sos2_active_off(self, solver_name: str) -> None: + """SOS2: u=0 forces Σλ=0, collapsing x=0, y=0.""" m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - x = m.add_variables(coords=[generators], name="x") - bp = xr.DataArray( - [[0, 50, 100], [0, 30, 80]], - dims=["generator", "bp"], - coords={"generator": generators, "bp": [0, 1, 2]}, + x = m.add_variables(lower=0, upper=100, name="x") + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") + m.add_piecewise_constraints( + piecewise(x, [0, 50, 100], [0, 10, 50], active=u) == y, + method="sos2", ) - m.add_piecewise_constraints(x, bp, dim="bp") - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "generator" in lambda_var.dims + m.add_constraints(u <= 0, name="force_off") + m.add_objective(y, sense="max") + status, _ = m.solve(solver_name=solver_name) + assert status == "ok" + np.testing.assert_allclose(float(x.solution.values), 0, atol=1e-4) + np.testing.assert_allclose(float(y.solution.values), 0, atol=1e-4) - def test_dict_expr_broadcast(self) -> None: + def test_disjunctive_active_off(self, solver_name: str) -> None: + """Disjunctive: u=0 forces Σz_k=0, collapsing x=0, y=0.""" m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - power = m.add_variables(coords=[generators], name="power") - cost = m.add_variables(coords=[generators], name="cost") - bp = breakpoints(power=[0, 50, 100], cost=[0, 10, 50]) + x = m.add_variables(lower=0, upper=100, name="x") + y = m.add_variables(name="y") + u = m.add_variables(binary=True, name="u") m.add_piecewise_constraints( - {"power": power, "cost": cost}, bp, dim="breakpoint" - ) - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "generator" in lambda_var.dims - - def test_disjunctive_broadcast(self) -> None: - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - x = m.add_variables(coords=[generators], name="x") - bp = breakpoints.segments([(0, 10), (50, 100)]) - m.add_disjunctive_piecewise_constraints(x, bp) - binary_var = m.variables[f"pwl0{PWL_BINARY_SUFFIX}"] - assert "generator" in binary_var.dims - - def test_broadcast_multi_dim(self) -> None: - m = Model() - generators = pd.Index(["gen1", "gen2"], name="generator") - timesteps = pd.Index([0, 1, 2], name="time") - x = m.add_variables(coords=[generators, timesteps], name="x") - bp = breakpoints([0, 10, 50]) - m.add_piecewise_constraints(x, bp, dim="breakpoint") - lambda_var = m.variables[f"pwl0{PWL_LAMBDA_SUFFIX}"] - assert "generator" in lambda_var.dims - assert "time" in lambda_var.dims + piecewise( + x, + segments([[0.0, 10.0], [50.0, 100.0]]), + segments([[0.0, 5.0], [20.0, 80.0]]), + active=u, + ) + == y, + ) + m.add_constraints(u <= 0, name="force_off") + m.add_objective(y, sense="max") + status, _ = m.solve(solver_name=solver_name) + assert status == "ok" + np.testing.assert_allclose(float(x.solution.values), 0, atol=1e-4) + np.testing.assert_allclose(float(y.solution.values), 0, atol=1e-4) From 501850f3c91f24887c62695c26ad8c72f8843b82 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 12:10:03 +0100 Subject: [PATCH 44/66] Restore rhs_nan_mask in to_constraint fallthrough path Capture NaN positions in constant RHS before sub() fills them with 0, then restore NaN afterward so they still signal unconstrained positions. Co-Authored-By: Claude Opus 4.6 --- linopy/expressions.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 2e1c80c7..a30d5e5d 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -1202,9 +1202,28 @@ def to_constraint( f"has {len(self.coord_dims)}. Cannot create constraint." ) + # Remember where RHS is NaN (meaning "no constraint") before the + # subtraction, which may fill NaN with 0 as part of normal + # expression arithmetic. + if isinstance(rhs, DataArray): + rhs_nan_mask = rhs.isnull() + elif isinstance(rhs, np.ndarray | pd.Series | pd.DataFrame): + rhs_nan_mask = as_dataarray( + rhs, coords=self.coords, dims=self.coord_dims + ).isnull() + else: + rhs_nan_mask = None + all_to_lhs = self.sub(rhs, join=join).data + computed_rhs = -all_to_lhs.const + + # Restore NaN at positions where the original constant RHS had no + # value so that downstream code still treats them as unconstrained. + if rhs_nan_mask is not None and rhs_nan_mask.any(): + computed_rhs = xr.where(rhs_nan_mask, np.nan, computed_rhs) + data = assign_multiindex_safe( - all_to_lhs[["coeffs", "vars"]], sign=sign, rhs=-all_to_lhs.const + all_to_lhs[["coeffs", "vars"]], sign=sign, rhs=computed_rhs ) return constraints.Constraint(data, model=self.model) From ae760b1dde379019c942206f1bbb19d40b6ce416 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 13:46:53 +0100 Subject: [PATCH 45/66] Restore size-aware alignment defaults from harmonize-linopy-operations - _align_constant: use override when sizes match, reindex_like otherwise (instead of strict exact join default) - merge: restore check_common_keys_values override/outer logic - to_constraint: restore SUPPORTED_CONSTANT_TYPES conversion with reindex_like and rhs_nan_mask preservation - Sync test_linear_expression.py with origin/harmonize-linopy-operations - Re-add check_common_keys_values to common.py Co-Authored-By: Claude Opus 4.6 --- linopy/common.py | 18 ++++ linopy/expressions.py | 165 ++++++++++----------------------- test/test_linear_expression.py | 161 ++++++++++++++++++++------------ 3 files changed, 168 insertions(+), 176 deletions(-) diff --git a/linopy/common.py b/linopy/common.py index db309fa4..48755d6e 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -1185,6 +1185,24 @@ def deco(cls: Any) -> Any: return deco +def check_common_keys_values(list_of_dicts: list[dict[str, Any]]) -> bool: + """ + Check if all common keys among a list of dictionaries have the same value. + + Parameters + ---------- + list_of_dicts : list of dict + A list of dictionaries. + + Returns + ------- + bool + True if all common keys have the same value across all dictionaries, False otherwise. + """ + common_keys = set.intersection(*(set(d.keys()) for d in list_of_dicts)) + return all(len({d[k] for d in list_of_dicts if k in d}) == 1 for k in common_keys) + + def align( *objects: LinearExpression | QuadraticExpression | Variable | T_Alignable, join: JoinOptions = "exact", diff --git a/linopy/expressions.py b/linopy/expressions.py index a30d5e5d..22a1fb1d 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -49,6 +49,7 @@ LocIndexer, as_dataarray, assign_multiindex_safe, + check_common_keys_values, check_has_nulls, check_has_nulls_polars, fill_missing_coords, @@ -551,7 +552,6 @@ def _align_constant( other: DataArray, fill_value: float = 0, join: JoinOptions | None = None, - default_join: JoinOptions = "exact", ) -> tuple[DataArray, DataArray, bool]: """ Align a constant DataArray with self.const. @@ -563,10 +563,7 @@ def _align_constant( fill_value : float, default: 0 Fill value for missing coordinates. join : str, optional - Alignment method. If None, uses default_join. - default_join : str, default: "exact" - Default join mode when join is None. Use "exact" for add/sub, - "inner" for mul/div. + Alignment method. If None, uses size-aware default behavior. Returns ------- @@ -578,32 +575,22 @@ def _align_constant( Whether the expression's data needs reindexing. """ if join is None: - join = default_join - - if join == "override": - return self.const, other.assign_coords(coords=self.coords), False - elif join == "left": + if other.sizes == self.const.sizes: + return self.const, other.assign_coords(coords=self.coords), False return ( self.const, other.reindex_like(self.const, fill_value=fill_value), False, ) + elif join == "override": + return self.const, other.assign_coords(coords=self.coords), False else: - try: - self_const, aligned = xr.align( - self.const, other, join=join, fill_value=fill_value - ) - except ValueError as e: - if "exact" in str(e): - raise ValueError( - f"{e}\n" - "Use .add()/.sub()/.mul()/.div() with an explicit join= parameter:\n" - ' .add(other, join="inner") # intersection of coordinates\n' - ' .add(other, join="outer") # union of coordinates (with fill)\n' - ' .add(other, join="left") # keep left operand\'s coordinates\n' - ' .add(other, join="override") # positional alignment' - ) from None - raise + self_const, aligned = xr.align( + self.const, + other, + join=join, + fill_value=fill_value, + ) return self_const, aligned, True def _add_constant( @@ -615,7 +602,7 @@ def _add_constant( return self.assign(const=self.const.fillna(0) + other) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, da, needs_data_reindex = self._align_constant( - da, fill_value=0, join=join, default_join="exact" + da, fill_value=0, join=join ) da = da.fillna(0) self_const = self_const.fillna(0) @@ -645,7 +632,7 @@ def _apply_constant_op( """ factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, factor, needs_data_reindex = self._align_constant( - factor, fill_value=fill_value, join=join, default_join="exact" + factor, fill_value=fill_value, join=join ) factor = factor.fillna(fill_value) self_const = self_const.fillna(0) @@ -1160,57 +1147,23 @@ def to_constraint( f"Both sides of the constraint are constant. At least one side must contain variables. {self} {rhs}" ) - if isinstance(rhs, DataArray): - effective_join = join if join is not None else "exact" - if effective_join == "override": - aligned_rhs = rhs.assign_coords(coords=self.const.coords) - expr_const = self.const - expr_data = self.data - elif effective_join == "left": - aligned_rhs = rhs.reindex_like(self.const, fill_value=np.nan) - expr_const = self.const - expr_data = self.data - else: - try: - expr_const_aligned, aligned_rhs = xr.align( - self.const, rhs, join=effective_join, fill_value=np.nan - ) - except ValueError as e: - if "exact" in str(e): - raise ValueError( - f"{e}\n" - "Use .le()/.ge()/.eq() with an explicit join= parameter:\n" - ' .le(rhs, join="inner") # intersection of coordinates\n' - ' .le(rhs, join="left") # keep expression coordinates (NaN fill)\n' - ' .le(rhs, join="override") # positional alignment' - ) from None - raise - expr_const = expr_const_aligned.fillna(0) - expr_data = self.data.reindex_like( - expr_const_aligned, fill_value=self._fill_value + if isinstance(rhs, SUPPORTED_CONSTANT_TYPES): + rhs = as_dataarray(rhs, coords=self.coords, dims=self.coord_dims) + + extra_dims = set(rhs.dims) - set(self.coord_dims) + if extra_dims: + logger.warning( + f"Constant RHS contains dimensions {extra_dims} not present " + f"in the expression, which might lead to inefficiencies. " + f"Consider collapsing the dimensions by taking min/max." ) - constraint_rhs = aligned_rhs - expr_const - data = assign_multiindex_safe( - expr_data[["coeffs", "vars"]], sign=sign, rhs=constraint_rhs - ) - return constraints.Constraint(data, model=self.model) - elif isinstance(rhs, np.ndarray | pd.Series | pd.DataFrame) and rhs.ndim > len( - self.coord_dims - ): - raise ValueError( - f"RHS has {rhs.ndim} dimensions, but the expression only " - f"has {len(self.coord_dims)}. Cannot create constraint." - ) + rhs = rhs.reindex_like(self.const, fill_value=np.nan) # Remember where RHS is NaN (meaning "no constraint") before the # subtraction, which may fill NaN with 0 as part of normal # expression arithmetic. if isinstance(rhs, DataArray): rhs_nan_mask = rhs.isnull() - elif isinstance(rhs, np.ndarray | pd.Series | pd.DataFrame): - rhs_nan_mask = as_dataarray( - rhs, coords=self.coords, dims=self.coord_dims - ).isnull() else: rhs_nan_mask = None @@ -2488,6 +2441,16 @@ def merge( model = exprs[0].model + if join is not None: + override = join == "override" + elif cls in linopy_types and dim in HELPER_DIMS: + coord_dims = [ + {k: v for k, v in e.sizes.items() if k not in HELPER_DIMS} for e in exprs + ] + override = check_common_keys_values(coord_dims) # type: ignore + else: + override = False + data = [e.data if isinstance(e, linopy_types) else e for e in exprs] data = [fill_missing_coords(ds, fill_helper_dims=True) for ds in data] @@ -2503,51 +2466,23 @@ def merge( if join is not None: kwargs["join"] = join + elif override: + kwargs["join"] = "override" else: - kwargs["join"] = "exact" - - try: - if dim == TERM_DIM: - ds = xr.concat([d[["coeffs", "vars"]] for d in data], dim, **kwargs) - subkwargs = {**kwargs, "fill_value": 0} - const = xr.concat([d["const"] for d in data], dim, **subkwargs).sum( - TERM_DIM - ) - ds = assign_multiindex_safe(ds, const=const) - elif dim == FACTOR_DIM: - ds = xr.concat([d[["vars"]] for d in data], dim, **kwargs) - coeffs = xr.concat([d["coeffs"] for d in data], dim, **kwargs).prod( - FACTOR_DIM - ) - const = xr.concat([d["const"] for d in data], dim, **kwargs).prod( - FACTOR_DIM - ) - ds = assign_multiindex_safe(ds, coeffs=coeffs, const=const) - else: - # Pre-pad helper dims to same size before concat - fill = kwargs.get("fill_value", FILL_VALUE) - for helper_dim in HELPER_DIMS: - sizes = [d.sizes.get(helper_dim, 0) for d in data] - max_size = max(sizes) if sizes else 0 - if max_size > 0 and min(sizes) < max_size: - data = [ - d.reindex({helper_dim: range(max_size)}, fill_value=fill) - if d.sizes.get(helper_dim, 0) < max_size - else d - for d in data - ] - ds = xr.concat(data, dim, **kwargs) - except ValueError as e: - if "exact" in str(e): - raise ValueError( - f"{e}\n" - "Use .add()/.sub()/.mul()/.div() with an explicit join= parameter:\n" - ' .add(other, join="inner") # intersection of coordinates\n' - ' .add(other, join="outer") # union of coordinates (with fill)\n' - ' .add(other, join="left") # keep left operand\'s coordinates\n' - ' .add(other, join="override") # positional alignment' - ) from None - raise + kwargs.setdefault("join", "outer") + + if dim == TERM_DIM: + ds = xr.concat([d[["coeffs", "vars"]] for d in data], dim, **kwargs) + subkwargs = {**kwargs, "fill_value": 0} + const = xr.concat([d["const"] for d in data], dim, **subkwargs).sum(TERM_DIM) + ds = assign_multiindex_safe(ds, const=const) + elif dim == FACTOR_DIM: + ds = xr.concat([d[["vars"]] for d in data], dim, **kwargs) + coeffs = xr.concat([d["coeffs"] for d in data], dim, **kwargs).prod(FACTOR_DIM) + const = xr.concat([d["const"] for d in data], dim, **kwargs).prod(FACTOR_DIM) + ds = assign_multiindex_safe(ds, coeffs=coeffs, const=const) + else: + ds = xr.concat(data, dim, **kwargs) for d in set(HELPER_DIMS) & set(ds.coords): ds = ds.reset_index(d, drop=True) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 4a54e6d7..d3b8d426 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -182,7 +182,6 @@ def test_linear_expression_with_multiplication(x: Variable) -> None: expr = np.array(1) * x assert isinstance(expr, LinearExpression) - # Constants with extra dims broadcast freely expr = xr.DataArray(np.array([[1, 2], [2, 3]])) * x assert isinstance(expr, LinearExpression) @@ -278,9 +277,9 @@ def test_linear_expression_with_constant_multiplication( assert isinstance(obs, LinearExpression) assert (obs.const == 10).all() - # Constants with extra dims broadcast freely obs = expr * pd.Series([1, 2, 3], index=pd.RangeIndex(3, name="new_dim")) assert isinstance(obs, LinearExpression) + assert obs.shape == (2, 3, 1) def test_linear_expression_multi_indexed(u: Variable) -> None: @@ -404,12 +403,8 @@ def test_linear_expression_sum( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # disjoint coords now raise with exact default - with pytest.raises(ValueError, match="exact"): - v.loc[:9] + v.loc[10:] - - # positional alignment via assign_coords - expr = v.loc[:9] + v.loc[10:].assign_coords(dim_2=v.loc[:9].coords["dim_2"]) + # test special case otherride coords + expr = v.loc[:9] + v.loc[10:] assert expr.nterm == 2 assert len(expr.coords["dim_2"]) == 10 @@ -432,12 +427,8 @@ def test_linear_expression_sum_with_const( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # disjoint coords now raise with exact default - with pytest.raises(ValueError, match="exact"): - v.loc[:9] + v.loc[10:] - - # positional alignment via assign_coords - expr = v.loc[:9] + v.loc[10:].assign_coords(dim_2=v.loc[:9].coords["dim_2"]) + # test special case otherride coords + expr = v.loc[:9] + v.loc[10:] assert expr.nterm == 2 assert len(expr.coords["dim_2"]) == 10 @@ -565,17 +556,8 @@ def superset(self, request: Any) -> xr.DataArray | pd.Series: np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") ) - @pytest.fixture - def matching(self) -> xr.DataArray: - return xr.DataArray( - np.arange(20, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(20)}, - ) - @pytest.fixture def expected_fill(self) -> np.ndarray: - """Old expected result: 20-entry array with values at positions 1,3.""" arr = np.zeros(20) arr[1] = 10.0 arr[3] = 30.0 @@ -824,41 +806,51 @@ def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: assert_quadequal(subset + qexpr, qexpr + subset) class TestMissingValues: - """Same shape as variable but with NaN entries in the constant.""" + """ + Same shape as variable but with NaN entries in the constant. + + NaN values are filled with operation-specific neutral elements: + - Addition/subtraction: NaN -> 0 (additive identity) + - Multiplication: NaN -> 0 (zeroes out the variable) + - Division: NaN -> 1 (multiplicative identity, no scaling) + """ - EXPECTED_NAN_MASK = np.zeros(20, dtype=bool) - EXPECTED_NAN_MASK[[0, 5, 19]] = True + NAN_POSITIONS = [0, 5, 19] @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_nan_propagates( + def test_add_nan_filled( self, v: Variable, nan_constant: xr.DataArray | pd.Series, operand: str, ) -> None: + base_const = 0.0 if operand == "var" else 5.0 target = v if operand == "var" else v + 5 result = target + nan_constant assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.const.values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.const.values).any() + # At NaN positions, const should be unchanged (added 0) + for i in self.NAN_POSITIONS: + assert result.const.values[i] == base_const @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_nan_propagates( + def test_sub_nan_filled( self, v: Variable, nan_constant: xr.DataArray | pd.Series, operand: str, ) -> None: + base_const = 0.0 if operand == "var" else 5.0 target = v if operand == "var" else v + 5 result = target - nan_constant assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.const.values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.const.values).any() + # At NaN positions, const should be unchanged (subtracted 0) + for i in self.NAN_POSITIONS: + assert result.const.values[i] == base_const @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_nan_propagates( + def test_mul_nan_filled( self, v: Variable, nan_constant: xr.DataArray | pd.Series, @@ -867,12 +859,13 @@ def test_mul_nan_propagates( target = v if operand == "var" else 1 * v result = target * nan_constant assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.coeffs.squeeze().values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.coeffs.squeeze().values).any() + # At NaN positions, coeffs should be 0 (variable zeroed out) + for i in self.NAN_POSITIONS: + assert result.coeffs.squeeze().values[i] == 0.0 @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_nan_propagates( + def test_div_nan_filled( self, v: Variable, nan_constant: xr.DataArray | pd.Series, @@ -881,9 +874,11 @@ def test_div_nan_propagates( target = v if operand == "var" else 1 * v result = target / nan_constant assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.coeffs.squeeze().values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.coeffs.squeeze().values).any() + # At NaN positions, coeffs should be unchanged (divided by 1) + original_coeffs = (1 * v).coeffs.squeeze().values + for i in self.NAN_POSITIONS: + assert result.coeffs.squeeze().values[i] == original_coeffs[i] def test_add_commutativity( self, @@ -892,14 +887,9 @@ def test_add_commutativity( ) -> None: result_a = v + nan_constant result_b = nan_constant + v - # Compare non-NaN values are equal and NaN positions match - nan_mask_a = np.isnan(result_a.const.values) - nan_mask_b = np.isnan(result_b.const.values) - np.testing.assert_array_equal(nan_mask_a, nan_mask_b) - np.testing.assert_array_equal( - result_a.const.values[~nan_mask_a], - result_b.const.values[~nan_mask_b], - ) + assert not np.isnan(result_a.const.values).any() + assert not np.isnan(result_b.const.values).any() + np.testing.assert_array_equal(result_a.const.values, result_b.const.values) np.testing.assert_array_equal( result_a.coeffs.values, result_b.coeffs.values ) @@ -911,12 +901,10 @@ def test_mul_commutativity( ) -> None: result_a = v * nan_constant result_b = nan_constant * v - nan_mask_a = np.isnan(result_a.coeffs.values) - nan_mask_b = np.isnan(result_b.coeffs.values) - np.testing.assert_array_equal(nan_mask_a, nan_mask_b) + assert not np.isnan(result_a.coeffs.values).any() + assert not np.isnan(result_b.coeffs.values).any() np.testing.assert_array_equal( - result_a.coeffs.values[~nan_mask_a], - result_b.coeffs.values[~nan_mask_b], + result_a.coeffs.values, result_b.coeffs.values ) def test_quadexpr_add_nan( @@ -928,9 +916,62 @@ def test_quadexpr_add_nan( result = qexpr + nan_constant assert isinstance(result, QuadraticExpression) assert result.sizes["dim_2"] == 20 - np.testing.assert_array_equal( - np.isnan(result.const.values), self.EXPECTED_NAN_MASK - ) + assert not np.isnan(result.const.values).any() + + class TestExpressionWithNaN: + """Test that NaN in expression's own const/coeffs doesn't propagate.""" + + def test_shifted_expr_add_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr + 5 + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == 5.0 + + def test_shifted_expr_mul_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr * 2 + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_add_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + expr = (1 * v).shift(dim_2=1) + result = expr + arr + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == 0.0 + + def test_shifted_expr_mul_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr * arr + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_div_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr / 2 + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_sub_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr - 3 + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == -3.0 + + def test_shifted_expr_div_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr / arr + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: + nan_coeff = np.ones(v.sizes["dim_2"]) + nan_coeff[0] = np.nan + result = v.to_linexpr(nan_coeff) + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 class TestMultiDim: def test_multidim_subset_mul(self, m: Model) -> None: @@ -1042,8 +1083,7 @@ def test_linear_expression_isnull(v: Variable) -> None: expr = np.arange(20) * v filter = (expr.coeffs >= 10).any(TERM_DIM) expr = expr.where(filter) - # Entries where filter is False are null (coeffs=NaN, const=NaN) - assert expr.isnull().sum() == 10 # first 10 entries (coeff 0..9) are null + assert expr.isnull().sum() == 10 def test_linear_expression_flat(v: Variable) -> None: @@ -1173,7 +1213,6 @@ def test_linear_expression_fillna(v: Variable) -> None: filled = filtered.fillna(10) assert isinstance(filled, LinearExpression) - # fillna replaces NaN const values (10 entries × 10) + kept values (10 × 10) assert filled.const.sum() == 200 assert filled.coeffs.isnull().sum() == 10 From 2bdb49b05da150712b71df264aa44618d20d8119 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 13:56:22 +0100 Subject: [PATCH 46/66] Fix mypy errors in test files: add type annotations and fix Hashable check Co-Authored-By: Claude Opus 4.6 --- test/test_algebraic_properties.py | 51 +++++++++++++++++-------------- test/test_linear_expression.py | 2 +- 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py index 09548bf3..c0f04f22 100644 --- a/test/test_algebraic_properties.py +++ b/test/test_algebraic_properties.py @@ -37,6 +37,8 @@ a * 0 == 0 multiplication by zero """ +from __future__ import annotations + import numpy as np import pandas as pd import pytest @@ -44,49 +46,50 @@ from linopy import Model from linopy.expressions import LinearExpression +from linopy.variables import Variable @pytest.fixture -def m(): +def m() -> Model: return Model() @pytest.fixture -def time(): +def time() -> pd.RangeIndex: return pd.RangeIndex(3, name="time") @pytest.fixture -def tech(): +def tech() -> pd.Index: return pd.Index(["solar", "wind"], name="tech") @pytest.fixture -def x(m, time): +def x(m: Model, time: pd.RangeIndex) -> Variable: """Variable with dims [time].""" return m.add_variables(lower=0, coords=[time], name="x") @pytest.fixture -def y(m, time): +def y(m: Model, time: pd.RangeIndex) -> Variable: """Variable with dims [time].""" return m.add_variables(lower=0, coords=[time], name="y") @pytest.fixture -def z(m, time): +def z(m: Model, time: pd.RangeIndex) -> Variable: """Variable with dims [time].""" return m.add_variables(lower=0, coords=[time], name="z") @pytest.fixture -def g(m, time, tech): +def g(m: Model, time: pd.RangeIndex, tech: pd.Index) -> Variable: """Variable with dims [time, tech].""" return m.add_variables(lower=0, coords=[time, tech], name="g") @pytest.fixture -def c(tech): +def c(tech: pd.Index) -> xr.DataArray: """Constant (DataArray) with dims [tech].""" return xr.DataArray([2.0, 3.0], dims=["tech"], coords={"tech": tech}) @@ -95,7 +98,7 @@ def assert_linequal(a: LinearExpression, b: LinearExpression) -> None: """Assert two linear expressions are algebraically equivalent.""" assert set(a.dims) == set(b.dims), f"dims differ: {a.dims} vs {b.dims}" for dim in a.dims: - if dim.startswith("_"): + if isinstance(dim, str) and dim.startswith("_"): continue np.testing.assert_array_equal( sorted(a.coords[dim].values), sorted(b.coords[dim].values) @@ -109,15 +112,15 @@ def assert_linequal(a: LinearExpression, b: LinearExpression) -> None: class TestCommutativity: - def test_add_expr_expr(self, x, y): + def test_add_expr_expr(self, x: Variable, y: Variable) -> None: """X + y == y + x""" assert_linequal(x + y, y + x) - def test_mul_expr_constant(self, g, c): + def test_mul_expr_constant(self, g: Variable, c: xr.DataArray) -> None: """G * c == c * g""" assert_linequal(g * c, c * g) - def test_add_expr_constant(self, g, c): + def test_add_expr_constant(self, g: Variable, c: xr.DataArray) -> None: """G + c == c + g""" assert_linequal(g + c, c + g) @@ -128,11 +131,11 @@ def test_add_expr_constant(self, g, c): class TestAssociativity: - def test_add_same_dims(self, x, y, z): + def test_add_same_dims(self, x: Variable, y: Variable, z: Variable) -> None: """(x + y) + z == x + (y + z)""" assert_linequal((x + y) + z, x + (y + z)) - def test_add_with_constant(self, x, g, c): + def test_add_with_constant(self, x: Variable, g: Variable, c: xr.DataArray) -> None: """(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])""" assert_linequal((x + c) + g, x + (c + g)) @@ -143,15 +146,17 @@ def test_add_with_constant(self, x, g, c): class TestDistributivity: - def test_scalar(self, x, y): + def test_scalar(self, x: Variable, y: Variable) -> None: """S * (x + y) == s*x + s*y""" assert_linequal(3 * (x + y), 3 * x + 3 * y) - def test_constant_subset_dims(self, g, c): + def test_constant_subset_dims(self, g: Variable, c: xr.DataArray) -> None: """c[B] * (g[A,B] + g[A,B]) == c*g + c*g""" assert_linequal(c * (g + g), c * g + c * g) - def test_constant_mixed_dims(self, x, g, c): + def test_constant_mixed_dims( + self, x: Variable, g: Variable, c: xr.DataArray + ) -> None: """c[B] * (x[A] + g[A,B]) == c*x + c*g""" assert_linequal(c * (x + g), c * x + c * g) @@ -162,14 +167,14 @@ def test_constant_mixed_dims(self, x, g, c): class TestIdentity: - def test_additive(self, x): + def test_additive(self, x: Variable) -> None: """X + 0 == x""" result = x + 0 assert isinstance(result, LinearExpression) assert (result.const == 0).all() np.testing.assert_array_equal(result.coeffs.squeeze().values, [1, 1, 1]) - def test_multiplicative(self, x): + def test_multiplicative(self, x: Variable) -> None: """X * 1 == x""" result = x * 1 assert isinstance(result, LinearExpression) @@ -182,15 +187,15 @@ def test_multiplicative(self, x): class TestNegation: - def test_subtraction_is_add_negation(self, x, y): + def test_subtraction_is_add_negation(self, x: Variable, y: Variable) -> None: """X - y == x + (-y)""" assert_linequal(x - y, x + (-y)) - def test_subtraction_definition(self, x, y): + def test_subtraction_definition(self, x: Variable, y: Variable) -> None: """X - y == x + (-1) * y""" assert_linequal(x - y, x + (-1) * y) - def test_double_negation(self, x): + def test_double_negation(self, x: Variable) -> None: """-(-x) has same coefficients as x""" result = -(-x) np.testing.assert_array_equal( @@ -205,7 +210,7 @@ def test_double_negation(self, x): class TestZero: - def test_multiplication_by_zero(self, x): + def test_multiplication_by_zero(self, x: Variable) -> None: """X * 0 has zero coefficients""" result = x * 0 assert (result.coeffs == 0).all() diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index d3b8d426..1378f48d 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -1920,7 +1920,7 @@ def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: expr_a = 1 * a + 5 const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) - for join in ["override", "outer", "inner"]: + for join in ("override", "outer", "inner"): result = expr_a.add(const, join=join) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.const.values, [6, 7, 8]) From 0b4605ae20f64143330e53e35bab60801792a049 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 17:02:06 +0100 Subject: [PATCH 47/66] Add legacy/v1 arithmetic convention with deprecation transition (#607) * Add legacy arithmetic join mode with deprecation warning for transition - Add `options["arithmetic_join"]` setting (default: "legacy") to control coordinate alignment in arithmetic operations, merge, and constraints - Legacy mode reproduces old behavior: override when shapes match, outer otherwise for merge; reindex_like for constants; inner for align() - All legacy codepaths emit FutureWarning guiding users to opt in to "exact" - Move shared test fixtures (m, x, y, z, v, u) to conftest.py - Exact-behavior tests use autouse fixture to set arithmetic_join="exact" - Legacy test files (test_*_legacy.py) validate old behavior is preserved - All 2736 tests pass Co-Authored-By: Claude Opus 4.6 * Simplify global setting to 'legacy'/'v1', add LinopyDeprecationWarning - Restrict options["arithmetic_join"] to {"legacy", "v1"} instead of exposing all xarray join values (explicit join= parameter still accepts any) - "v1" maps to "exact" join internally - Add LinopyDeprecationWarning class (subclass of FutureWarning) with centralized message including how to silence - Export LinopyDeprecationWarning from linopy.__init__ Co-Authored-By: Claude Opus 4.6 * Rename arithmetic_join to arithmetic_convention, mention v1 removal - Rename setting from 'arithmetic_join' to 'arithmetic_convention' - Update deprecation message: "will be removed in linopy v1" Co-Authored-By: Claude Opus 4.6 * Rename test fixtures from exact_join to v1_convention Co-Authored-By: Claude Opus 4.6 * Update legacy tests * Merge harmonize-linopy-operations-mixed, restore NaN filling and align function - Resolve merge conflicts keeping transition layer logic - Restore NaN fillna(0) in _add_constant and _apply_constant_op - Restore simple finisher-based align() function (fixes MultiIndex) - Use check_common_keys_values in merge legacy path - Update legacy test files to match origin/harmonize-linopy-operations Co-Authored-By: Claude Opus 4.6 * Clean up obsolete code and fix convention-awareness in arithmetic - Remove dead check_common_keys_values function from common.py - Remove redundant default_join parameter from _align_constant, use options["arithmetic_convention"] directly - Gate fillna(0) calls in _add_constant and _apply_constant_op behind legacy convention check so NaN values propagate correctly under v1 - Fix legacy to_constraint path to compute constraint RHS directly instead of routing through sub() which re-applies fillna - Restore Variable.__mul__ scalar fast path via to_linexpr(other) - Restore Variable.__div__ explicit TypeError for non-linear division - Update v1 tests to expect ValueError on mismatched coords and test explicit join= escape hatches Co-Authored-By: Claude Opus 4.6 * Fix mypy errors and pytest importmode for CI - Add return type annotations (Generator) to all v1_convention fixtures - Add importmode = "importlib" to pytest config to fix import mismatch when linopy is installed from wheel and source dir is also present - Use tuple literal in loop to fix arg-type error Co-Authored-By: Claude Opus 4.6 * Fix CI: move import linopy to lazy in conftest.py Top-level `import linopy` in conftest.py caused pytest to import the package from site-packages before collecting doctests from the source directory, triggering import file mismatch errors on all platforms. Move the import inside fixture functions where it's actually needed. Also revert the unnecessary test.yml and importmode changes. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .gitignore | 1 - linopy/__init__.py | 3 +- linopy/common.py | 67 +- linopy/config.py | 36 +- linopy/expressions.py | 229 ++- linopy/variables.py | 15 +- test/conftest.py | 11 + test/test_algebraic_properties.py | 11 + test/test_common.py | 14 + test/test_common_legacy.py | 734 +++++++++ test/test_constraints.py | 93 +- test/test_constraints_legacy.py | 448 +++++ test/test_linear_expression.py | 407 +++-- test/test_linear_expression_legacy.py | 2160 +++++++++++++++++++++++++ test/test_typing.py | 11 + test/test_typing_legacy.py | 25 + 16 files changed, 3968 insertions(+), 297 deletions(-) create mode 100644 test/test_common_legacy.py create mode 100644 test/test_constraints_legacy.py create mode 100644 test/test_linear_expression_legacy.py create mode 100644 test/test_typing_legacy.py diff --git a/.gitignore b/.gitignore index 10ac8e45..7b962a6b 100644 --- a/.gitignore +++ b/.gitignore @@ -50,4 +50,3 @@ benchmark/scripts/leftovers/ # direnv .envrc AGENTS.md -coverage.xml diff --git a/linopy/__init__.py b/linopy/__init__.py index b1dc33b9..a372c087 100644 --- a/linopy/__init__.py +++ b/linopy/__init__.py @@ -13,7 +13,7 @@ # we need to extend their __mul__ functions with a quick special case import linopy.monkey_patch_xarray # noqa: F401 from linopy.common import align -from linopy.config import options +from linopy.config import LinopyDeprecationWarning, options from linopy.constants import EQUAL, GREATER_EQUAL, LESS_EQUAL from linopy.constraints import Constraint, Constraints from linopy.expressions import LinearExpression, QuadraticExpression, merge @@ -34,6 +34,7 @@ "EQUAL", "GREATER_EQUAL", "LESS_EQUAL", + "LinopyDeprecationWarning", "LinearExpression", "Model", "Objective", diff --git a/linopy/common.py b/linopy/common.py index 48755d6e..4b3f84d6 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -10,7 +10,7 @@ import operator import os from collections.abc import Callable, Generator, Hashable, Iterable, Sequence -from functools import reduce, wraps +from functools import partial, reduce, wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Generic, TypeVar, overload from warnings import warn @@ -1205,7 +1205,7 @@ def check_common_keys_values(list_of_dicts: list[dict[str, Any]]) -> bool: def align( *objects: LinearExpression | QuadraticExpression | Variable | T_Alignable, - join: JoinOptions = "exact", + join: JoinOptions | None = None, copy: bool = True, indexes: Any = None, exclude: str | Iterable[Hashable] = frozenset(), @@ -1265,41 +1265,56 @@ def align( """ + from linopy.config import options from linopy.expressions import LinearExpression, QuadraticExpression from linopy.variables import Variable - # Extract underlying Datasets for index computation. + if join is None: + join = options["arithmetic_convention"] + + if join == "legacy": + from linopy.config import LEGACY_DEPRECATION_MESSAGE, LinopyDeprecationWarning + + warn( + LEGACY_DEPRECATION_MESSAGE, + LinopyDeprecationWarning, + stacklevel=2, + ) + join = "inner" + + elif join == "v1": + join = "exact" + + finisher: list[partial[Any] | Callable[[Any], Any]] = [] das: list[Any] = [] for obj in objects: - if isinstance(obj, LinearExpression | QuadraticExpression | Variable): + if isinstance(obj, LinearExpression | QuadraticExpression): + finisher.append(partial(obj.__class__, model=obj.model)) + das.append(obj.data) + elif isinstance(obj, Variable): + finisher.append( + partial( + obj.__class__, + model=obj.model, + name=obj.data.attrs["name"], + skip_broadcast=True, + ) + ) das.append(obj.data) else: + finisher.append(lambda x: x) das.append(obj) exclude = frozenset(exclude).union(HELPER_DIMS) - - # Compute target indexes. - target_aligned = xr_align( - *das, join=join, copy=False, indexes=indexes, exclude=exclude + aligned = xr_align( + *das, + join=join, + copy=copy, + indexes=indexes, + exclude=exclude, + fill_value=fill_value, ) - - # Reindex each object to target indexes. - reindex_kwargs: dict[str, Any] = {} - if fill_value is not dtypes.NA: - reindex_kwargs["fill_value"] = fill_value - results: list[Any] = [] - for obj, target in zip(objects, target_aligned): - indexers = { - dim: target.indexes[dim] - for dim in target.dims - if dim not in exclude and dim in target.indexes - } - # Variable.reindex has no fill_value — it always uses sentinels - if isinstance(obj, Variable): - results.append(obj.reindex(indexers)) - else: - results.append(obj.reindex(indexers, **reindex_kwargs)) # type: ignore[union-attr] - return tuple(results) + return tuple([f(da) for f, da in zip(finisher, aligned)]) LocT = TypeVar( diff --git a/linopy/config.py b/linopy/config.py index c098709d..9f04ce17 100644 --- a/linopy/config.py +++ b/linopy/config.py @@ -9,28 +9,46 @@ from typing import Any +VALID_ARITHMETIC_JOINS = {"legacy", "v1"} + +LEGACY_DEPRECATION_MESSAGE = ( + "The 'legacy' arithmetic convention is deprecated and will be removed in " + "linopy v1. Set linopy.options['arithmetic_convention'] = 'v1' to opt in " + "to the new behavior, or filter this warning with:\n" + " import warnings; warnings.filterwarnings('ignore', category=LinopyDeprecationWarning)" +) + + +class LinopyDeprecationWarning(FutureWarning): + """Warning for deprecated linopy features scheduled for removal.""" + class OptionSettings: - def __init__(self, **kwargs: int) -> None: + def __init__(self, **kwargs: Any) -> None: self._defaults = kwargs self._current_values = kwargs.copy() - def __call__(self, **kwargs: int) -> None: + def __call__(self, **kwargs: Any) -> None: self.set_value(**kwargs) - def __getitem__(self, key: str) -> int: + def __getitem__(self, key: str) -> Any: return self.get_value(key) - def __setitem__(self, key: str, value: int) -> None: + def __setitem__(self, key: str, value: Any) -> None: return self.set_value(**{key: value}) - def set_value(self, **kwargs: int) -> None: + def set_value(self, **kwargs: Any) -> None: for k, v in kwargs.items(): if k not in self._defaults: raise KeyError(f"{k} is not a valid setting.") + if k == "arithmetic_convention" and v not in VALID_ARITHMETIC_JOINS: + raise ValueError( + f"Invalid arithmetic_convention: {v!r}. " + f"Must be one of {VALID_ARITHMETIC_JOINS}." + ) self._current_values[k] = v - def get_value(self, name: str) -> int: + def get_value(self, name: str) -> Any: if name in self._defaults: return self._current_values[name] else: @@ -57,4 +75,8 @@ def __repr__(self) -> str: return f"OptionSettings:\n {settings}" -options = OptionSettings(display_max_rows=14, display_max_terms=6) +options = OptionSettings( + display_max_rows=14, + display_max_terms=6, + arithmetic_convention="legacy", +) diff --git a/linopy/expressions.py b/linopy/expressions.py index 22a1fb1d..64e2ecb7 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -32,6 +32,7 @@ from xarray.core.indexes import Indexes from xarray.core.types import JoinOptions from xarray.core.utils import Frozen +from xarray.structure.alignment import AlignmentError try: # resolve breaking change in xarray 2025.03.0 @@ -49,7 +50,6 @@ LocIndexer, as_dataarray, assign_multiindex_safe, - check_common_keys_values, check_has_nulls, check_has_nulls_polars, fill_missing_coords, @@ -68,7 +68,7 @@ to_dataframe, to_polars, ) -from linopy.config import options +from linopy.config import LEGACY_DEPRECATION_MESSAGE, LinopyDeprecationWarning, options from linopy.constants import ( CV_DIM, EQUAL, @@ -563,7 +563,7 @@ def _align_constant( fill_value : float, default: 0 Fill value for missing coordinates. join : str, optional - Alignment method. If None, uses size-aware default behavior. + Alignment method. If None, uses ``options["arithmetic_convention"]``. Returns ------- @@ -575,6 +575,15 @@ def _align_constant( Whether the expression's data needs reindexing. """ if join is None: + join = options["arithmetic_convention"] + + if join == "legacy": + warn( + LEGACY_DEPRECATION_MESSAGE, + LinopyDeprecationWarning, + stacklevel=4, + ) + # Old behavior: override when same sizes, left join otherwise if other.sizes == self.const.sizes: return self.const, other.assign_coords(coords=self.coords), False return ( @@ -582,30 +591,52 @@ def _align_constant( other.reindex_like(self.const, fill_value=fill_value), False, ) - elif join == "override": + + elif join == "v1": + join = "exact" + + if join == "override": return self.const, other.assign_coords(coords=self.coords), False - else: - self_const, aligned = xr.align( + elif join == "left": + return ( self.const, - other, - join=join, - fill_value=fill_value, + other.reindex_like(self.const, fill_value=fill_value), + False, ) + else: + try: + self_const, aligned = xr.align( + self.const, other, join=join, fill_value=fill_value + ) + except ValueError as e: + if "exact" in str(e): + raise ValueError( + f"{e}\n" + "Use .add()/.sub()/.mul()/.div() with an explicit join= parameter:\n" + ' .add(other, join="inner") # intersection of coordinates\n' + ' .add(other, join="outer") # union of coordinates (with fill)\n' + ' .add(other, join="left") # keep left operand\'s coordinates\n' + ' .add(other, join="override") # positional alignment' + ) from None + raise return self_const, aligned, True def _add_constant( self: GenericExpression, other: ConstantLike, join: JoinOptions | None = None ) -> GenericExpression: - # NaN values in self.const or other are filled with 0 (additive identity) - # so that missing data does not silently propagate through arithmetic. + is_legacy = ( + join is None and options["arithmetic_convention"] == "legacy" + ) or join == "legacy" if np.isscalar(other) and join is None: - return self.assign(const=self.const.fillna(0) + other) + const = self.const.fillna(0) + other if is_legacy else self.const + other + return self.assign(const=const) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, da, needs_data_reindex = self._align_constant( da, fill_value=0, join=join ) - da = da.fillna(0) - self_const = self_const.fillna(0) + if is_legacy: + da = da.fillna(0) + self_const = self_const.fillna(0) if needs_data_reindex: fv = {**self._fill_value, "const": 0} return self.__class__( @@ -623,31 +654,29 @@ def _apply_constant_op( fill_value: float, join: JoinOptions | None = None, ) -> GenericExpression: - """ - Apply a constant operation (mul, div, etc.) to this expression with a scalar or array. - - NaN values are filled with neutral elements before the operation: - - factor (other) is filled with fill_value (0 for mul, 1 for div) - - coeffs and const are filled with 0 (additive identity) - """ + is_legacy = ( + join is None and options["arithmetic_convention"] == "legacy" + ) or join == "legacy" factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, factor, needs_data_reindex = self._align_constant( factor, fill_value=fill_value, join=join ) - factor = factor.fillna(fill_value) - self_const = self_const.fillna(0) + if is_legacy: + factor = factor.fillna(fill_value) + self_const = self_const.fillna(0) if needs_data_reindex: fv = {**self._fill_value, "const": 0} data = self.data.reindex_like(self_const, fill_value=fv) + coeffs = data.coeffs.fillna(0) if is_legacy else data.coeffs return self.__class__( assign_multiindex_safe( data, - coeffs=op(data.coeffs.fillna(0), factor), + coeffs=op(coeffs, factor), const=op(self_const, factor), ), self.model, ) - coeffs = self.coeffs.fillna(0) + coeffs = self.coeffs.fillna(0) if is_legacy else self.coeffs return self.assign(coeffs=op(coeffs, factor), const=op(self_const, factor)) def _multiply_by_constant( @@ -1147,34 +1176,74 @@ def to_constraint( f"Both sides of the constraint are constant. At least one side must contain variables. {self} {rhs}" ) - if isinstance(rhs, SUPPORTED_CONSTANT_TYPES): - rhs = as_dataarray(rhs, coords=self.coords, dims=self.coord_dims) + effective_join = join if join is not None else options["arithmetic_convention"] - extra_dims = set(rhs.dims) - set(self.coord_dims) - if extra_dims: - logger.warning( - f"Constant RHS contains dimensions {extra_dims} not present " - f"in the expression, which might lead to inefficiencies. " - f"Consider collapsing the dimensions by taking min/max." + if effective_join == "legacy": + warn( + LEGACY_DEPRECATION_MESSAGE, + LinopyDeprecationWarning, + stacklevel=3, + ) + # Old behavior: convert to DataArray, warn about extra dims, + # reindex_like (left join), then sub + if isinstance(rhs, SUPPORTED_CONSTANT_TYPES): + rhs = as_dataarray(rhs, coords=self.coords, dims=self.coord_dims) + extra_dims = set(rhs.dims) - set(self.coord_dims) + if extra_dims: + logger.warning( + f"Constant RHS contains dimensions {extra_dims} not present " + f"in the expression, which might lead to inefficiencies. " + f"Consider collapsing the dimensions by taking min/max." + ) + rhs = rhs.reindex_like(self.const, fill_value=np.nan) + # Alignment already done — compute constraint directly + constraint_rhs = rhs - self.const + data = assign_multiindex_safe( + self.data[["coeffs", "vars"]], sign=sign, rhs=constraint_rhs ) - rhs = rhs.reindex_like(self.const, fill_value=np.nan) + return constraints.Constraint(data, model=self.model) + # Non-constant rhs (Variable/Expression) — fall through to sub path + + if effective_join == "v1": + effective_join = "exact" - # Remember where RHS is NaN (meaning "no constraint") before the - # subtraction, which may fill NaN with 0 as part of normal - # expression arithmetic. if isinstance(rhs, DataArray): - rhs_nan_mask = rhs.isnull() - else: - rhs_nan_mask = None + if effective_join == "override": + aligned_rhs = rhs.assign_coords(coords=self.const.coords) + expr_const = self.const + expr_data = self.data + elif effective_join == "left": + aligned_rhs = rhs.reindex_like(self.const, fill_value=np.nan) + expr_const = self.const + expr_data = self.data + else: + try: + expr_const_aligned, aligned_rhs = xr.align( + self.const, rhs, join=effective_join, fill_value=np.nan + ) + except ValueError as e: + if "exact" in str(e): + raise ValueError( + f"{e}\n" + "Use .le()/.ge()/.eq() with an explicit join= parameter:\n" + ' .le(rhs, join="inner") # intersection of coordinates\n' + ' .le(rhs, join="left") # keep expression coordinates (NaN fill)\n' + ' .le(rhs, join="override") # positional alignment' + ) from None + raise + expr_const = expr_const_aligned.fillna(0) + expr_data = self.data.reindex_like( + expr_const_aligned, fill_value=self._fill_value + ) + constraint_rhs = aligned_rhs - expr_const + data = assign_multiindex_safe( + expr_data[["coeffs", "vars"]], sign=sign, rhs=constraint_rhs + ) + return constraints.Constraint(data, model=self.model) all_to_lhs = self.sub(rhs, join=join).data computed_rhs = -all_to_lhs.const - # Restore NaN at positions where the original constant RHS had no - # value so that downstream code still treats them as unconstrained. - if rhs_nan_mask is not None and rhs_nan_mask.any(): - computed_rhs = xr.where(rhs_nan_mask, np.nan, computed_rhs) - data = assign_multiindex_safe( all_to_lhs[["coeffs", "vars"]], sign=sign, rhs=computed_rhs ) @@ -1650,6 +1719,18 @@ def __add__( return self._add_constant(other) else: other = as_expression(other, model=self.model, dims=self.coord_dims) + if options["arithmetic_convention"] == "v1": + # Enforce exact coordinate alignment before merge + try: + xr.align(self.const, other.const, join="exact") + except (ValueError, AlignmentError) as e: + raise ValueError( + f"{e}\n" + "Use .add()/.sub() with an explicit join= parameter:\n" + ' .add(other, join="inner") # intersection\n' + ' .add(other, join="outer") # union with fill\n' + ' .add(other, join="left") # keep left coordinates' + ) from None return merge([self, other], cls=self.__class__) except TypeError: return NotImplemented @@ -2188,6 +2269,18 @@ def __add__(self, other: SideLike) -> QuadraticExpression: if isinstance(other, LinearExpression): other = other.to_quadexpr() + if options["arithmetic_convention"] == "v1": + try: + xr.align(self.const, other.const, join="exact") + except (ValueError, AlignmentError) as e: + raise ValueError( + f"{e}\n" + "Use .add()/.sub() with an explicit join= parameter:\n" + ' .add(other, join="inner") # intersection\n' + ' .add(other, join="outer") # union with fill\n' + ' .add(other, join="left") # keep left coordinates' + ) from None + return merge([self, other], cls=self.__class__) except TypeError: return NotImplemented @@ -2441,16 +2534,6 @@ def merge( model = exprs[0].model - if join is not None: - override = join == "override" - elif cls in linopy_types and dim in HELPER_DIMS: - coord_dims = [ - {k: v for k, v in e.sizes.items() if k not in HELPER_DIMS} for e in exprs - ] - override = check_common_keys_values(coord_dims) # type: ignore - else: - override = False - data = [e.data if isinstance(e, linopy_types) else e for e in exprs] data = [fill_missing_coords(ds, fill_helper_dims=True) for ds in data] @@ -2464,12 +2547,38 @@ def merge( elif cls == variables.Variable: kwargs["fill_value"] = variables.FILL_VALUE - if join is not None: - kwargs["join"] = join - elif override: - kwargs["join"] = "override" + effective_join = join if join is not None else options["arithmetic_convention"] + + if effective_join == "legacy": + warn( + LEGACY_DEPRECATION_MESSAGE, + LinopyDeprecationWarning, + stacklevel=2, + ) + # Reproduce old behavior: override when all shared dims have + # matching sizes, outer otherwise. + if cls in linopy_types and dim in HELPER_DIMS: + coord_dims = [ + {k: v for k, v in e.sizes.items() if k not in HELPER_DIMS} + for e in exprs + ] + common_keys = set.intersection(*(set(d.keys()) for d in coord_dims)) + override = all( + len({d[k] for d in coord_dims if k in d}) == 1 for k in common_keys + ) + else: + override = False + + kwargs["join"] = "override" if override else "outer" + elif effective_join == "v1": + # Merge uses outer join for xr.concat since helper dims + # (_term, _factor) commonly have different sizes and + # expressions may have different user dimensions. + # Coordinate enforcement for v1 is done at the operator + # level (__add__, __sub__, etc.) before calling merge. + kwargs["join"] = "outer" else: - kwargs.setdefault("join", "outer") + kwargs["join"] = effective_join if dim == TERM_DIM: ds = xr.concat([d[["coeffs", "vars"]] for d in data], dim, **kwargs) diff --git a/linopy/variables.py b/linopy/variables.py index 396703fb..1e2ea6ae 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -401,6 +401,13 @@ def __mul__(self, other: SideLike) -> ExpressionLike: Multiply variables with a coefficient, variable, or expression. """ try: + if isinstance(other, Variable | ScalarVariable): + return self.to_linexpr() * other + + # Fast path for scalars: build expression directly with coefficient + if np.isscalar(other): + return self.to_linexpr(other) + return self.to_linexpr() * other except TypeError: return NotImplemented @@ -449,7 +456,13 @@ def __div__( """ Divide variables with a coefficient. """ - return self.to_linexpr() / other + if isinstance(other, expressions.LinearExpression | Variable): + raise TypeError( + "unsupported operand type(s) for /: " + f"{type(self)} and {type(other)}. " + "Non-linear expressions are not yet supported." + ) + return self.to_linexpr()._divide_by_constant(other) def __truediv__( self, coefficient: ConstantLike | LinearExpression | Variable diff --git a/test/conftest.py b/test/conftest.py index ee20cdc2..5e2170a3 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -3,6 +3,7 @@ from __future__ import annotations import os +from collections.abc import Generator from typing import TYPE_CHECKING import pandas as pd @@ -57,6 +58,16 @@ def pytest_collection_modifyitems( item.add_marker(pytest.mark.gpu) +@pytest.fixture +def v1_convention() -> Generator[None, None, None]: + """Set arithmetic_convention to 'v1' for the duration of a test.""" + import linopy + + linopy.options["arithmetic_convention"] = "v1" + yield + linopy.options["arithmetic_convention"] = "legacy" + + @pytest.fixture def m() -> Model: from linopy import Model diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py index c0f04f22..04103b61 100644 --- a/test/test_algebraic_properties.py +++ b/test/test_algebraic_properties.py @@ -39,16 +39,27 @@ from __future__ import annotations +from collections.abc import Generator + import numpy as np import pandas as pd import pytest import xarray as xr +import linopy from linopy import Model from linopy.expressions import LinearExpression from linopy.variables import Variable +@pytest.fixture(autouse=True) +def _use_v1_convention() -> Generator[None, None, None]: + """Use v1 arithmetic convention for all tests in this module.""" + linopy.options["arithmetic_convention"] = "v1" + yield + linopy.options["arithmetic_convention"] = "legacy" + + @pytest.fixture def m() -> Model: return Model() diff --git a/test/test_common.py b/test/test_common.py index 64e4bf6f..719ab093 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -5,6 +5,8 @@ @author: fabian """ +from collections.abc import Generator + import numpy as np import pandas as pd import polars as pl @@ -13,6 +15,7 @@ from xarray import DataArray from xarray.testing.assertions import assert_equal +import linopy from linopy import LinearExpression, Model, Variable from linopy.common import ( align, @@ -27,6 +30,17 @@ from linopy.testing import assert_linequal, assert_varequal +@pytest.fixture(autouse=True) +def _use_v1_convention() -> Generator[None, None, None]: + """Use v1 arithmetic convention for all tests in this module.""" + linopy.options["arithmetic_convention"] = "v1" + yield + linopy.options["arithmetic_convention"] = "legacy" + + +# Fixtures m, u, x are provided by conftest.py + + def test_as_dataarray_with_series_dims_default() -> None: target_dim = "dim_0" target_index = [0, 1, 2] diff --git a/test/test_common_legacy.py b/test/test_common_legacy.py new file mode 100644 index 00000000..f1190024 --- /dev/null +++ b/test/test_common_legacy.py @@ -0,0 +1,734 @@ +#!/usr/bin/env python3 +""" +Created on Mon Jun 19 12:11:03 2023 + +@author: fabian +""" + +import numpy as np +import pandas as pd +import polars as pl +import pytest +import xarray as xr +from xarray import DataArray +from xarray.testing.assertions import assert_equal + +from linopy import LinearExpression, Model, Variable +from linopy.common import ( + align, + as_dataarray, + assign_multiindex_safe, + best_int, + get_dims_with_index_levels, + is_constant, + iterate_slices, + maybe_group_terms_polars, +) +from linopy.testing import assert_linequal, assert_varequal + + +def test_as_dataarray_with_series_dims_default() -> None: + target_dim = "dim_0" + target_index = [0, 1, 2] + s = pd.Series([1, 2, 3]) + da = as_dataarray(s) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + +def test_as_dataarray_with_series_dims_set() -> None: + target_dim = "dim1" + target_index = ["a", "b", "c"] + s = pd.Series([1, 2, 3], index=target_index) + dims = [target_dim] + da = as_dataarray(s, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + +def test_as_dataarray_with_series_dims_given() -> None: + target_dim = "dim1" + target_index = ["a", "b", "c"] + index = pd.Index(target_index, name=target_dim) + s = pd.Series([1, 2, 3], index=index) + dims: list[str] = [] + da = as_dataarray(s, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + +def test_as_dataarray_with_series_dims_priority() -> None: + """The dimension name from the pandas object should have priority.""" + target_dim = "dim1" + target_index = ["a", "b", "c"] + index = pd.Index(target_index, name=target_dim) + s = pd.Series([1, 2, 3], index=index) + dims = ["other"] + da = as_dataarray(s, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + +def test_as_dataarray_with_series_dims_subset() -> None: + target_dim = "dim_0" + target_index = ["a", "b", "c"] + s = pd.Series([1, 2, 3], index=target_index) + dims: list[str] = [] + da = as_dataarray(s, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + +def test_as_dataarray_with_series_dims_superset() -> None: + target_dim = "dim_a" + target_index = ["a", "b", "c"] + s = pd.Series([1, 2, 3], index=target_index) + dims = [target_dim, "other"] + da = as_dataarray(s, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + +def test_as_dataarray_with_series_aligned_coords() -> None: + """This should not give out a warning even though coords are given.""" + target_dim = "dim_0" + target_index = ["a", "b", "c"] + s = pd.Series([1, 2, 3], index=target_index) + da = as_dataarray(s, coords=[target_index]) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + da = as_dataarray(s, coords={target_dim: target_index}) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + +def test_as_dataarray_with_pl_series_dims_default() -> None: + target_dim = "dim_0" + target_index = [0, 1, 2] + s = pl.Series([1, 2, 3]) + da = as_dataarray(s) + assert isinstance(da, DataArray) + assert da.dims == (target_dim,) + assert list(da.coords[target_dim].values) == target_index + + +def test_as_dataarray_dataframe_dims_default() -> None: + target_dims = ("dim_0", "dim_1") + target_index = [0, 1] + target_columns = ["A", "B"] + df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) + da = as_dataarray(df) + assert isinstance(da, DataArray) + assert da.dims == target_dims + assert list(da.coords[target_dims[0]].values) == target_index + assert list(da.coords[target_dims[1]].values) == target_columns + + +def test_as_dataarray_dataframe_dims_set() -> None: + target_dims = ("dim1", "dim2") + target_index = ["a", "b"] + target_columns = ["A", "B"] + df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) + da = as_dataarray(df, dims=target_dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + assert list(da.coords[target_dims[0]].values) == target_index + assert list(da.coords[target_dims[1]].values) == target_columns + + +def test_as_dataarray_dataframe_dims_given() -> None: + target_dims = ("dim1", "dim2") + target_index = ["a", "b"] + target_columns = ["A", "B"] + index = pd.Index(target_index, name=target_dims[0]) + columns = pd.Index(target_columns, name=target_dims[1]) + df = pd.DataFrame([[1, 2], [3, 4]], index=index, columns=columns) + dims: list[str] = [] + da = as_dataarray(df, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + assert list(da.coords[target_dims[0]].values) == target_index + assert list(da.coords[target_dims[1]].values) == target_columns + + +def test_as_dataarray_dataframe_dims_priority() -> None: + """The dimension name from the pandas object should have priority.""" + target_dims = ("dim1", "dim2") + target_index = ["a", "b"] + target_columns = ["A", "B"] + index = pd.Index(target_index, name=target_dims[0]) + columns = pd.Index(target_columns, name=target_dims[1]) + df = pd.DataFrame([[1, 2], [3, 4]], index=index, columns=columns) + dims = ["other"] + da = as_dataarray(df, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + assert list(da.coords[target_dims[0]].values) == target_index + assert list(da.coords[target_dims[1]].values) == target_columns + + +def test_as_dataarray_dataframe_dims_subset() -> None: + target_dims = ("dim_0", "dim_1") + target_index = ["a", "b"] + target_columns = ["A", "B"] + df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) + dims: list[str] = [] + da = as_dataarray(df, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + assert list(da.coords[target_dims[0]].values) == target_index + assert list(da.coords[target_dims[1]].values) == target_columns + + +def test_as_dataarray_dataframe_dims_superset() -> None: + target_dims = ("dim_a", "dim_b") + target_index = ["a", "b"] + target_columns = ["A", "B"] + df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) + dims = [*target_dims, "other"] + da = as_dataarray(df, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + assert list(da.coords[target_dims[0]].values) == target_index + assert list(da.coords[target_dims[1]].values) == target_columns + + +def test_as_dataarray_dataframe_aligned_coords() -> None: + """This should not give out a warning even though coords are given.""" + target_dims = ("dim_0", "dim_1") + target_index = ["a", "b"] + target_columns = ["A", "B"] + df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) + da = as_dataarray(df, coords=[target_index, target_columns]) + assert isinstance(da, DataArray) + assert da.dims == target_dims + assert list(da.coords[target_dims[0]].values) == target_index + assert list(da.coords[target_dims[1]].values) == target_columns + + coords = dict(zip(target_dims, [target_index, target_columns])) + da = as_dataarray(df, coords=coords) + assert isinstance(da, DataArray) + assert da.dims == target_dims + assert list(da.coords[target_dims[0]].values) == target_index + assert list(da.coords[target_dims[1]].values) == target_columns + + +def test_as_dataarray_with_ndarray_no_coords_no_dims() -> None: + target_dims = ("dim_0", "dim_1") + target_coords = [[0, 1], [0, 1]] + arr = np.array([[1, 2], [3, 4]]) + da = as_dataarray(arr) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for i, dim in enumerate(target_dims): + assert list(da.coords[dim]) == target_coords[i] + + +def test_as_dataarray_with_ndarray_coords_list_no_dims() -> None: + target_dims = ("dim_0", "dim_1") + target_coords = [["a", "b"], ["A", "B"]] + arr = np.array([[1, 2], [3, 4]]) + da = as_dataarray(arr, coords=target_coords) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for i, dim in enumerate(target_dims): + assert list(da.coords[dim]) == target_coords[i] + + +def test_as_dataarray_with_ndarray_coords_indexes_no_dims() -> None: + target_dims = ("dim1", "dim2") + target_coords = [ + pd.Index(["a", "b"], name="dim1"), + pd.Index(["A", "B"], name="dim2"), + ] + arr = np.array([[1, 2], [3, 4]]) + da = as_dataarray(arr, coords=target_coords) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for i, dim in enumerate(target_dims): + assert list(da.coords[dim]) == list(target_coords[i]) + + +def test_as_dataarray_with_ndarray_coords_dict_set_no_dims() -> None: + """If no dims are given and coords are a dict, the keys of the dict should be used as dims.""" + target_dims = ("dim_0", "dim_2") + target_coords = {"dim_0": ["a", "b"], "dim_2": ["A", "B"]} + arr = np.array([[1, 2], [3, 4]]) + da = as_dataarray(arr, coords=target_coords) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for dim in target_dims: + assert list(da.coords[dim]) == target_coords[dim] + + +def test_as_dataarray_with_ndarray_coords_list_dims() -> None: + target_dims = ("dim1", "dim2") + target_coords = [["a", "b"], ["A", "B"]] + arr = np.array([[1, 2], [3, 4]]) + da = as_dataarray(arr, coords=target_coords, dims=target_dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for i, dim in enumerate(target_dims): + assert list(da.coords[dim]) == target_coords[i] + + +def test_as_dataarray_with_ndarray_coords_list_dims_superset() -> None: + target_dims = ("dim1", "dim2") + target_coords = [["a", "b"], ["A", "B"]] + arr = np.array([[1, 2], [3, 4]]) + dims = [*target_dims, "dim3"] + da = as_dataarray(arr, coords=target_coords, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for i, dim in enumerate(target_dims): + assert list(da.coords[dim]) == target_coords[i] + + +def test_as_dataarray_with_ndarray_coords_list_dims_subset() -> None: + target_dims = ("dim0", "dim_1") + target_coords = [["a", "b"], ["A", "B"]] + arr = np.array([[1, 2], [3, 4]]) + dims = ["dim0"] + da = as_dataarray(arr, coords=target_coords, dims=dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for i, dim in enumerate(target_dims): + assert list(da.coords[dim]) == target_coords[i] + + +def test_as_dataarray_with_ndarray_coords_indexes_dims_aligned() -> None: + target_dims = ("dim1", "dim2") + target_coords = [ + pd.Index(["a", "b"], name="dim1"), + pd.Index(["A", "B"], name="dim2"), + ] + arr = np.array([[1, 2], [3, 4]]) + da = as_dataarray(arr, coords=target_coords, dims=target_dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for i, dim in enumerate(target_dims): + assert list(da.coords[dim]) == list(target_coords[i]) + + +def test_as_dataarray_with_ndarray_coords_indexes_dims_not_aligned() -> None: + target_dims = ("dim3", "dim4") + target_coords = [ + pd.Index(["a", "b"], name="dim1"), + pd.Index(["A", "B"], name="dim2"), + ] + arr = np.array([[1, 2], [3, 4]]) + with pytest.raises(ValueError): + as_dataarray(arr, coords=target_coords, dims=target_dims) + + +def test_as_dataarray_with_ndarray_coords_dict_dims_aligned() -> None: + target_dims = ("dim_0", "dim_1") + target_coords = {"dim_0": ["a", "b"], "dim_1": ["A", "B"]} + arr = np.array([[1, 2], [3, 4]]) + da = as_dataarray(arr, coords=target_coords, dims=target_dims) + assert isinstance(da, DataArray) + assert da.dims == target_dims + for dim in target_dims: + assert list(da.coords[dim]) == target_coords[dim] + + +def test_as_dataarray_with_ndarray_coords_dict_set_dims_not_aligned() -> None: + target_dims = ("dim_0", "dim_1") + target_coords = {"dim_0": ["a", "b"], "dim_2": ["A", "B"]} + arr = np.array([[1, 2], [3, 4]]) + da = as_dataarray(arr, coords=target_coords, dims=target_dims) + assert da.dims == target_dims + assert list(da.coords["dim_0"].values) == ["a", "b"] + assert "dim_2" not in da.coords + + +def test_as_dataarray_with_number() -> None: + num = 1 + da = as_dataarray(num, dims=["dim1"], coords=[["a"]]) + assert isinstance(da, DataArray) + assert da.dims == ("dim1",) + assert list(da.coords["dim1"].values) == ["a"] + + +def test_as_dataarray_with_np_number() -> None: + num = np.float64(1) + da = as_dataarray(num, dims=["dim1"], coords=[["a"]]) + assert isinstance(da, DataArray) + assert da.dims == ("dim1",) + assert list(da.coords["dim1"].values) == ["a"] + + +def test_as_dataarray_with_number_default_dims_coords() -> None: + num = 1 + da = as_dataarray(num) + assert isinstance(da, DataArray) + assert da.dims == () + assert da.coords == {} + + +def test_as_dataarray_with_number_and_coords() -> None: + num = 1 + da = as_dataarray(num, coords=[pd.RangeIndex(10, name="a")]) + assert isinstance(da, DataArray) + assert da.dims == ("a",) + assert list(da.coords["a"].values) == list(range(10)) + + +def test_as_dataarray_with_dataarray() -> None: + da_in = DataArray( + data=[[1, 2], [3, 4]], + dims=["dim1", "dim2"], + coords={"dim1": ["a", "b"], "dim2": ["A", "B"]}, + ) + da_out = as_dataarray(da_in, dims=["dim1", "dim2"], coords=[["a", "b"], ["A", "B"]]) + assert isinstance(da_out, DataArray) + assert da_out.dims == da_in.dims + assert list(da_out.coords["dim1"].values) == list(da_in.coords["dim1"].values) + assert list(da_out.coords["dim2"].values) == list(da_in.coords["dim2"].values) + + +def test_as_dataarray_with_dataarray_default_dims_coords() -> None: + da_in = DataArray( + data=[[1, 2], [3, 4]], + dims=["dim1", "dim2"], + coords={"dim1": ["a", "b"], "dim2": ["A", "B"]}, + ) + da_out = as_dataarray(da_in) + assert isinstance(da_out, DataArray) + assert da_out.dims == da_in.dims + assert list(da_out.coords["dim1"].values) == list(da_in.coords["dim1"].values) + assert list(da_out.coords["dim2"].values) == list(da_in.coords["dim2"].values) + + +def test_as_dataarray_with_unsupported_type() -> None: + with pytest.raises(TypeError): + as_dataarray(lambda x: 1, dims=["dim1"], coords=[["a"]]) + + +def test_best_int() -> None: + # Test for int8 + assert best_int(127) == np.int8 + # Test for int16 + assert best_int(128) == np.int16 + assert best_int(32767) == np.int16 + # Test for int32 + assert best_int(32768) == np.int32 + assert best_int(2147483647) == np.int32 + # Test for int64 + assert best_int(2147483648) == np.int64 + assert best_int(9223372036854775807) == np.int64 + + # Test for value too large + with pytest.raises( + ValueError, match=r"Value 9223372036854775808 is too large for int64." + ): + best_int(9223372036854775808) + + +def test_assign_multiindex_safe() -> None: + # Create a multi-indexed dataset + index = pd.MultiIndex.from_product([["A", "B"], [1, 2]], names=["letter", "number"]) + data = xr.DataArray([1, 2, 3, 4], dims=["index"], coords={"index": index}) + ds = xr.Dataset({"value": data}) + + # This would now warn about the index deletion of single index level + # ds["humidity"] = data + + # Case 1: Assigning a single DataArray + result = assign_multiindex_safe(ds, humidity=data) + assert "humidity" in result + assert "value" in result + assert result["humidity"].equals(data) + + # Case 2: Assigning a Dataset + result = assign_multiindex_safe(ds, **xr.Dataset({"humidity": data})) # type: ignore + assert "humidity" in result + assert "value" in result + assert result["humidity"].equals(data) + + # Case 3: Assigning multiple DataArrays + result = assign_multiindex_safe(ds, humidity=data, pressure=data) + assert "humidity" in result + assert "pressure" in result + assert "value" in result + assert result["humidity"].equals(data) + assert result["pressure"].equals(data) + + +def test_iterate_slices_basic() -> None: + ds = xr.Dataset( + {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 + coords={"x": np.arange(10), "y": np.arange(10)}, + ) + slices = list(iterate_slices(ds, slice_size=20)) + assert len(slices) == 5 + for s in slices: + assert isinstance(s, xr.Dataset) + assert set(s.dims) == set(ds.dims) + + +def test_iterate_slices_with_exclude_dims() -> None: + ds = xr.Dataset( + {"var": (("x", "y"), np.random.rand(10, 20))}, # noqa: NPY002 + coords={"x": np.arange(10), "y": np.arange(20)}, + ) + slices = list(iterate_slices(ds, slice_size=20, slice_dims=["x"])) + assert len(slices) == 10 + for s in slices: + assert isinstance(s, xr.Dataset) + assert set(s.dims) == set(ds.dims) + + +def test_iterate_slices_large_max_size() -> None: + ds = xr.Dataset( + {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 + coords={"x": np.arange(10), "y": np.arange(10)}, + ) + slices = list(iterate_slices(ds, slice_size=200)) + assert len(slices) == 1 + for s in slices: + assert isinstance(s, xr.Dataset) + assert set(s.dims) == set(ds.dims) + + +def test_iterate_slices_small_max_size() -> None: + ds = xr.Dataset( + {"var": (("x", "y"), np.random.rand(10, 20))}, # noqa: NPY002 + coords={"x": np.arange(10), "y": np.arange(20)}, + ) + slices = list(iterate_slices(ds, slice_size=8, slice_dims=["x"])) + assert ( + len(slices) == 10 + ) # goes to the smallest slice possible which is 1 for the x dimension + for s in slices: + assert isinstance(s, xr.Dataset) + assert set(s.dims) == set(ds.dims) + + +def test_iterate_slices_slice_size_none() -> None: + ds = xr.Dataset( + {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 + coords={"x": np.arange(10), "y": np.arange(10)}, + ) + slices = list(iterate_slices(ds, slice_size=None)) + assert len(slices) == 1 + for s in slices: + assert ds.equals(s) + + +def test_iterate_slices_includes_last_slice() -> None: + ds = xr.Dataset( + {"var": (("x"), np.random.rand(10))}, # noqa: NPY002 + coords={"x": np.arange(10)}, + ) + slices = list(iterate_slices(ds, slice_size=3, slice_dims=["x"])) + assert len(slices) == 4 # 10 slices for dimension 'x' with size 10 + total_elements = sum(s.sizes["x"] for s in slices) + assert total_elements == ds.sizes["x"] # Ensure all elements are included + for s in slices: + assert isinstance(s, xr.Dataset) + assert set(s.dims) == set(ds.dims) + + +def test_iterate_slices_empty_slice_dims() -> None: + ds = xr.Dataset( + {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 + coords={"x": np.arange(10), "y": np.arange(10)}, + ) + slices = list(iterate_slices(ds, slice_size=50, slice_dims=[])) + assert len(slices) == 1 + for s in slices: + assert ds.equals(s) + + +def test_iterate_slices_invalid_slice_dims() -> None: + ds = xr.Dataset( + {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 + coords={"x": np.arange(10), "y": np.arange(10)}, + ) + with pytest.raises(ValueError): + list(iterate_slices(ds, slice_size=50, slice_dims=["z"])) + + +def test_iterate_slices_empty_dataset() -> None: + ds = xr.Dataset( + {"var": (("x", "y"), np.array([]).reshape(0, 0))}, coords={"x": [], "y": []} + ) + slices = list(iterate_slices(ds, slice_size=10, slice_dims=["x"])) + assert len(slices) == 1 + assert ds.equals(slices[0]) + + +def test_iterate_slices_single_element() -> None: + ds = xr.Dataset({"var": (("x", "y"), np.array([[1]]))}, coords={"x": [0], "y": [0]}) + slices = list(iterate_slices(ds, slice_size=1, slice_dims=["x"])) + assert len(slices) == 1 + assert ds.equals(slices[0]) + + +def test_get_dims_with_index_levels() -> None: + # Create test data + + # Case 1: Simple dataset with regular dimensions + ds1 = xr.Dataset( + {"temp": (("time", "lat"), np.random.rand(3, 2))}, # noqa: NPY002 + coords={"time": pd.date_range("2024-01-01", periods=3), "lat": [0, 1]}, + ) + + # Case 2: Dataset with a multi-index dimension + stations_index = pd.MultiIndex.from_product( + [["USA", "Canada"], ["NYC", "Toronto"]], names=["country", "city"] + ) + stations_coords = xr.Coordinates.from_pandas_multiindex(stations_index, "station") + ds2 = xr.Dataset( + {"temp": (("time", "station"), np.random.rand(3, 4))}, # noqa: NPY002 + coords={"time": pd.date_range("2024-01-01", periods=3), **stations_coords}, + ) + + # Case 3: Dataset with unnamed multi-index levels + unnamed_stations_index = pd.MultiIndex.from_product( + [["USA", "Canada"], ["NYC", "Toronto"]] + ) + unnamed_stations_coords = xr.Coordinates.from_pandas_multiindex( + unnamed_stations_index, "station" + ) + ds3 = xr.Dataset( + {"temp": (("time", "station"), np.random.rand(3, 4))}, # noqa: NPY002 + coords={ + "time": pd.date_range("2024-01-01", periods=3), + **unnamed_stations_coords, + }, + ) + + # Case 4: Dataset with multiple multi-indexed dimensions + locations_index = pd.MultiIndex.from_product( + [["North", "South"], ["A", "B"]], names=["region", "site"] + ) + locations_coords = xr.Coordinates.from_pandas_multiindex( + locations_index, "location" + ) + + ds4 = xr.Dataset( + {"temp": (("time", "station", "location"), np.random.rand(2, 4, 4))}, # noqa: NPY002 + coords={ + "time": pd.date_range("2024-01-01", periods=2), + **stations_coords, + **locations_coords, + }, + ) + + # Run tests + + # Test case 1: Regular dimensions + assert get_dims_with_index_levels(ds1) == ["time", "lat"] + + # Test case 2: Named multi-index + assert get_dims_with_index_levels(ds2) == ["time", "station (country, city)"] + + # Test case 3: Unnamed multi-index + assert get_dims_with_index_levels(ds3) == [ + "time", + "station (station_level_0, station_level_1)", + ] + + # Test case 4: Multiple multi-indices + expected = ["time", "station (country, city)", "location (region, site)"] + assert get_dims_with_index_levels(ds4) == expected + + # Test case 5: Empty dataset + ds5 = xr.Dataset() + assert get_dims_with_index_levels(ds5) == [] + + +def test_align(x: Variable, u: Variable) -> None: # noqa: F811 + alpha = xr.DataArray([1, 2], [[1, 2]]) + beta = xr.DataArray( + [1, 2, 3], + [ + ( + "dim_3", + pd.MultiIndex.from_tuples( + [(1, "b"), (2, "b"), (1, "c")], names=["level1", "level2"] + ), + ) + ], + ) + + # inner join + x_obs, alpha_obs = align(x, alpha) + assert isinstance(x_obs, Variable) + assert x_obs.shape == alpha_obs.shape == (1,) + assert_varequal(x_obs, x.loc[[1]]) + + # left-join + x_obs, alpha_obs = align(x, alpha, join="left") + assert x_obs.shape == alpha_obs.shape == (2,) + assert isinstance(x_obs, Variable) + assert_varequal(x_obs, x) + assert_equal(alpha_obs, DataArray([np.nan, 1], [[0, 1]])) + + # multiindex + beta_obs, u_obs = align(beta, u) + assert u_obs.shape == beta_obs.shape == (2,) + assert isinstance(u_obs, Variable) + assert_varequal(u_obs, u.loc[[(1, "b"), (2, "b")]]) + assert_equal(beta_obs, beta.loc[[(1, "b"), (2, "b")]]) + + # with linear expression + expr = 20 * x + x_obs, expr_obs, alpha_obs = align(x, expr, alpha) + assert x_obs.shape == alpha_obs.shape == (1,) + assert expr_obs.shape == (1, 1) # _term dim + assert isinstance(expr_obs, LinearExpression) + assert_linequal(expr_obs, expr.loc[[1]]) + + +def test_is_constant() -> None: + model = Model() + index = pd.Index(range(10), name="t") + a = model.add_variables(name="a", coords=[index]) + b = a.sel(t=1) + c = a * 2 + d = a * a + + non_constant = [a, b, c, d] + for nc in non_constant: + assert not is_constant(nc) + + constant_values = [ + 5, + 3.14, + np.int32(7), + np.float64(2.71), + pd.Series([1, 2, 3]), + np.array([4, 5, 6]), + xr.DataArray([k for k in range(10)], coords=[index]), + ] + for cv in constant_values: + assert is_constant(cv) + + +def test_maybe_group_terms_polars_no_duplicates() -> None: + """Fast path: distinct (labels, vars) pairs skip group_by.""" + df = pl.DataFrame({"labels": [0, 0], "vars": [1, 2], "coeffs": [3.0, 4.0]}) + result = maybe_group_terms_polars(df) + assert result.shape == (2, 3) + assert result.columns == ["labels", "vars", "coeffs"] + assert result["coeffs"].to_list() == [3.0, 4.0] + + +def test_maybe_group_terms_polars_with_duplicates() -> None: + """Slow path: duplicate (labels, vars) pairs trigger group_by.""" + df = pl.DataFrame({"labels": [0, 0], "vars": [1, 1], "coeffs": [3.0, 4.0]}) + result = maybe_group_terms_polars(df) + assert result.shape == (1, 3) + assert result["coeffs"].to_list() == [7.0] diff --git a/test/test_constraints.py b/test/test_constraints.py index 9fc0086b..e94f0152 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -5,6 +5,7 @@ @author: fabulous """ +from collections.abc import Generator from typing import Any import dask @@ -14,9 +15,19 @@ import pytest import xarray as xr +import linopy from linopy import EQUAL, GREATER_EQUAL, LESS_EQUAL, Model, Variable, available_solvers from linopy.testing import assert_conequal + +@pytest.fixture(autouse=True) +def _use_v1_convention() -> Generator[None, None, None]: + """Use v1 arithmetic convention for all tests in this module.""" + linopy.options["arithmetic_convention"] = "v1" + yield + linopy.options["arithmetic_convention"] = "legacy" + + # Test model functions @@ -347,67 +358,72 @@ def superset(self, request: Any) -> xr.DataArray | pd.Series: np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") ) - def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - con = v <= subset + def test_var_le_subset_raises(self, v: Variable, subset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + v <= subset + + def test_var_le_subset_join_left(self, v: Variable) -> None: + subset_da = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + con = v.to_linexpr().le(subset_da, join="left") assert con.sizes["dim_2"] == v.sizes["dim_2"] assert con.rhs.sel(dim_2=1).item() == 10.0 assert con.rhs.sel(dim_2=3).item() == 30.0 assert np.isnan(con.rhs.sel(dim_2=0).item()) @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) - def test_var_comparison_subset( + def test_var_comparison_subset_raises( self, v: Variable, subset: xr.DataArray, sign: str ) -> None: - if sign == LESS_EQUAL: - con = v <= subset - elif sign == GREATER_EQUAL: - con = v >= subset - else: - con = v == subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) + with pytest.raises(ValueError, match="exact"): + if sign == LESS_EQUAL: + v <= subset + elif sign == GREATER_EQUAL: + v >= subset + else: + v == subset + + def test_expr_le_subset_raises(self, v: Variable, subset: xr.DataArray) -> None: + expr = v + 5 + with pytest.raises(ValueError, match="exact"): + expr <= subset - def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + def test_expr_le_subset_join_left(self, v: Variable) -> None: + subset_da = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) expr = v + 5 - con = expr <= subset + con = expr.le(subset_da, join="left") assert con.sizes["dim_2"] == v.sizes["dim_2"] assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) assert np.isnan(con.rhs.sel(dim_2=0).item()) - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) - def test_subset_comparison_var( - self, v: Variable, subset: xr.DataArray, sign: str + def test_subset_comparison_var_raises( + self, v: Variable, subset: xr.DataArray ) -> None: - if sign == LESS_EQUAL: - con = subset <= v - elif sign == GREATER_EQUAL: - con = subset >= v - else: - con = subset == v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert np.isnan(con.rhs.sel(dim_2=0).item()) - assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) + with pytest.raises(ValueError, match="exact"): + subset <= v - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) - def test_superset_comparison_var( - self, v: Variable, superset: xr.DataArray, sign: str + def test_superset_comparison_var_raises( + self, v: Variable, superset: xr.DataArray ) -> None: - if sign == LESS_EQUAL: - con = superset <= v - else: - con = superset >= v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(con.lhs.coeffs.values).any() - assert not np.isnan(con.rhs.values).any() + with pytest.raises(ValueError, match="exact"): + superset <= v - def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: + def test_constraint_rhs_extra_dims_raises_on_mismatch(self, v: Variable) -> None: rhs = xr.DataArray( [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]}, ) + # dim_2 coords [0,1] don't match v's [0..19] under exact join + with pytest.raises(ValueError, match="exact"): + v <= rhs + + def test_constraint_rhs_extra_dims_broadcasts_matching(self, v: Variable) -> None: + rhs = xr.DataArray( + np.ones((2, 20)), + dims=["extra", "dim_2"], + coords={"dim_2": range(20)}, + ) c = v <= rhs assert "extra" in c.dims @@ -419,7 +435,8 @@ def test_subset_constraint_solve_integration(self) -> None: coords = pd.RangeIndex(5, name="i") x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) - m.add_constraints(x <= subset_ub, name="subset_ub") + # exact default raises — use explicit join="left" (NaN = no constraint) + m.add_constraints(x.to_linexpr().le(subset_ub, join="left"), name="subset_ub") m.add_objective(x.sum(), sense="max") m.solve(solver_name=solver) sol = m.solution["x"] diff --git a/test/test_constraints_legacy.py b/test/test_constraints_legacy.py new file mode 100644 index 00000000..9a467c8c --- /dev/null +++ b/test/test_constraints_legacy.py @@ -0,0 +1,448 @@ +#!/usr/bin/env python3 +""" +Created on Wed Mar 10 11:23:13 2021. + +@author: fabulous +""" + +from typing import Any + +import dask +import dask.array.core +import numpy as np +import pandas as pd +import pytest +import xarray as xr + +from linopy import EQUAL, GREATER_EQUAL, LESS_EQUAL, Model, Variable, available_solvers +from linopy.testing import assert_conequal + +# Test model functions + + +def test_constraint_assignment() -> None: + m: Model = Model() + + lower: xr.DataArray = xr.DataArray( + np.zeros((10, 10)), coords=[range(10), range(10)] + ) + upper: xr.DataArray = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper, name="x") + y = m.add_variables(name="y") + + con0 = m.add_constraints(1 * x + 10 * y, EQUAL, 0) + + for attr in m.constraints.dataset_attrs: + assert "con0" in getattr(m.constraints, attr) + + assert m.constraints.labels.con0.shape == (10, 10) + assert m.constraints.labels.con0.dtype == int + assert m.constraints.coeffs.con0.dtype in (int, float) + assert m.constraints.vars.con0.dtype in (int, float) + assert m.constraints.rhs.con0.dtype in (int, float) + + assert_conequal(m.constraints.con0, con0) + + +def test_constraint_equality() -> None: + m: Model = Model() + + lower: xr.DataArray = xr.DataArray( + np.zeros((10, 10)), coords=[range(10), range(10)] + ) + upper: xr.DataArray = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper, name="x") + y = m.add_variables(name="y") + + con0 = m.add_constraints(1 * x + 10 * y, EQUAL, 0) + + assert_conequal(con0, 1 * x + 10 * y == 0, strict=False) + assert_conequal(1 * x + 10 * y == 0, 1 * x + 10 * y == 0, strict=False) + + with pytest.raises(AssertionError): + assert_conequal(con0, 1 * x + 10 * y <= 0, strict=False) + + with pytest.raises(AssertionError): + assert_conequal(con0, 1 * x + 10 * y >= 0, strict=False) + + with pytest.raises(AssertionError): + assert_conequal(10 * y + 2 * x == 0, 1 * x + 10 * y == 0, strict=False) + + +def test_constraints_getattr_formatted() -> None: + m: Model = Model() + x = m.add_variables(0, 10, name="x") + m.add_constraints(1 * x == 0, name="con-0") + assert_conequal(m.constraints.con_0, m.constraints["con-0"]) + + +def test_anonymous_constraint_assignment() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper, name="x") + y = m.add_variables(name="y") + con = 1 * x + 10 * y == 0 + m.add_constraints(con) + + for attr in m.constraints.dataset_attrs: + assert "con0" in getattr(m.constraints, attr) + + assert m.constraints.labels.con0.shape == (10, 10) + assert m.constraints.labels.con0.dtype == int + assert m.constraints.coeffs.con0.dtype in (int, float) + assert m.constraints.vars.con0.dtype in (int, float) + assert m.constraints.rhs.con0.dtype in (int, float) + + +def test_constraint_assignment_with_tuples() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper) + y = m.add_variables() + + m.add_constraints([(1, x), (10, y)], EQUAL, 0, name="c") + for attr in m.constraints.dataset_attrs: + assert "c" in getattr(m.constraints, attr) + assert m.constraints.labels.c.shape == (10, 10) + + +def test_constraint_assignment_chunked() -> None: + # setting bounds with one pd.DataFrame and one pd.Series + m: Model = Model(chunk=5) + lower = pd.DataFrame(np.zeros((10, 10))) + upper = pd.Series(np.ones(10)) + x = m.add_variables(lower, upper) + m.add_constraints(x, GREATER_EQUAL, 0, name="c") + assert m.constraints.coeffs.c.data.shape == ( + 10, + 10, + 1, + ) + assert isinstance(m.constraints.coeffs.c.data, dask.array.core.Array) + + +def test_constraint_assignment_with_reindex() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper, name="x") + y = m.add_variables(name="y") + + m.add_constraints(1 * x + 10 * y, EQUAL, 0) + + shuffled_coords = [2, 1, 3, 4, 6, 5, 7, 9, 8, 0] + + con = x.loc[shuffled_coords] + y >= 10 + assert (con.coords["dim_0"].values == shuffled_coords).all() + + +@pytest.mark.parametrize( + "rhs_factory", + [ + pytest.param(lambda m, v: v, id="numpy"), + pytest.param(lambda m, v: xr.DataArray(v, dims=["dim_0"]), id="dataarray"), + pytest.param(lambda m, v: pd.Series(v, index=v), id="series"), + pytest.param( + lambda m, v: m.add_variables(coords=[v]), + id="variable", + ), + pytest.param( + lambda m, v: 2 * m.add_variables(coords=[v]) + 1, + id="linexpr", + ), + ], +) +def test_constraint_rhs_lower_dim(rhs_factory: Any) -> None: + m = Model() + naxis = np.arange(10, dtype=float) + maxis = np.arange(10).astype(str) + x = m.add_variables(coords=[naxis, maxis]) + y = m.add_variables(coords=[naxis, maxis]) + + c = m.add_constraints(x - y >= rhs_factory(m, naxis)) + assert c.shape == (10, 10) + + +@pytest.mark.parametrize( + "rhs_factory", + [ + pytest.param(lambda m: np.ones((5, 3)), id="numpy"), + pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), + ], +) +def test_constraint_rhs_higher_dim_constant_warns( + rhs_factory: Any, caplog: Any +) -> None: + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + + with caplog.at_level("WARNING", logger="linopy.expressions"): + m.add_constraints(x >= rhs_factory(m)) + assert "dimensions" in caplog.text + + +def test_constraint_rhs_higher_dim_dataarray_reindexes() -> None: + """DataArray RHS with extra dims reindexes to expression coords (no raise).""" + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + rhs = xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]) + + c = m.add_constraints(x >= rhs) + assert c.shape == (5, 3) + + +@pytest.mark.parametrize( + "rhs_factory", + [ + pytest.param( + lambda m: m.add_variables(coords=[range(5), range(3)]), + id="variable", + ), + pytest.param( + lambda m: 2 * m.add_variables(coords=[range(5), range(3)]) + 1, + id="linexpr", + ), + ], +) +def test_constraint_rhs_higher_dim_expression(rhs_factory: Any) -> None: + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + + c = m.add_constraints(x >= rhs_factory(m)) + assert c.shape == (5, 3) + + +def test_wrong_constraint_assignment_repeated() -> None: + # repeated variable assignment is forbidden + m: Model = Model() + x = m.add_variables() + m.add_constraints(x, LESS_EQUAL, 0, name="con") + with pytest.raises(ValueError): + m.add_constraints(x, LESS_EQUAL, 0, name="con") + + +def test_masked_constraints() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper) + y = m.add_variables() + + mask = pd.Series([True] * 5 + [False] * 5) + m.add_constraints(1 * x + 10 * y, EQUAL, 0, mask=mask) + assert (m.constraints.labels.con0[0:5, :] != -1).all() + assert (m.constraints.labels.con0[5:10, :] == -1).all() + + +def test_masked_constraints_broadcast() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper) + y = m.add_variables() + + mask = pd.Series([True] * 5 + [False] * 5) + m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc1", mask=mask) + assert (m.constraints.labels.bc1[0:5, :] != -1).all() + assert (m.constraints.labels.bc1[5:10, :] == -1).all() + + mask2 = xr.DataArray([True] * 5 + [False] * 5, dims=["dim_1"]) + m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc2", mask=mask2) + assert (m.constraints.labels.bc2[:, 0:5] != -1).all() + assert (m.constraints.labels.bc2[:, 5:10] == -1).all() + + mask3 = xr.DataArray( + [True, True, False, False, False], + dims=["dim_0"], + coords={"dim_0": range(5)}, + ) + with pytest.warns(FutureWarning, match="Missing values will be filled"): + m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc3", mask=mask3) + assert (m.constraints.labels.bc3[0:2, :] != -1).all() + assert (m.constraints.labels.bc3[2:5, :] == -1).all() + assert (m.constraints.labels.bc3[5:10, :] == -1).all() + + # Mask with extra dimension not in data should raise + mask4 = xr.DataArray([True, False], dims=["extra_dim"]) + with pytest.raises(AssertionError, match="not a subset"): + m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc4", mask=mask4) + + +def test_non_aligned_constraints() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros(10), coords=[range(10)]) + x = m.add_variables(lower, name="x") + + lower = xr.DataArray(np.zeros(8), coords=[range(8)]) + y = m.add_variables(lower, name="y") + + m.add_constraints(x == 0.0) + m.add_constraints(y == 0.0) + + with pytest.warns(UserWarning): + m.constraints.labels + + for dtype in m.constraints.labels.dtypes.values(): + assert np.issubdtype(dtype, np.integer) + + for dtype in m.constraints.coeffs.dtypes.values(): + assert np.issubdtype(dtype, np.floating) + + for dtype in m.constraints.vars.dtypes.values(): + assert np.issubdtype(dtype, np.integer) + + for dtype in m.constraints.rhs.dtypes.values(): + assert np.issubdtype(dtype, np.floating) + + +def test_constraints_flat() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper) + y = m.add_variables() + + assert isinstance(m.constraints.flat, pd.DataFrame) + assert m.constraints.flat.empty + with pytest.raises(ValueError): + m.constraints.to_matrix() + + m.add_constraints(1 * x + 10 * y, EQUAL, 0) + m.add_constraints(1 * x + 10 * y, LESS_EQUAL, 0) + m.add_constraints(1 * x + 10 * y, GREATER_EQUAL, 0) + + assert isinstance(m.constraints.flat, pd.DataFrame) + assert not m.constraints.flat.empty + + +def test_sanitize_infinities() -> None: + m: Model = Model() + + lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) + upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) + x = m.add_variables(lower, upper, name="x") + y = m.add_variables(name="y") + + # Test correct infinities + m.add_constraints(x <= np.inf, name="con_inf") + m.add_constraints(y >= -np.inf, name="con_neg_inf") + m.constraints.sanitize_infinities() + assert (m.constraints["con_inf"].labels == -1).all() + assert (m.constraints["con_neg_inf"].labels == -1).all() + + # Test incorrect infinities + with pytest.raises(ValueError): + m.add_constraints(x >= np.inf, name="con_wrong_inf") + with pytest.raises(ValueError): + m.add_constraints(y <= -np.inf, name="con_wrong_neg_inf") + + +class TestConstraintCoordinateAlignment: + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def subset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "xarray": + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) + + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def superset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "xarray": + return xr.DataArray( + np.arange(25, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + return pd.Series( + np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") + ) + + def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + con = v <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert con.rhs.sel(dim_2=3).item() == 30.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_var_comparison_subset( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = v <= subset + elif sign == GREATER_EQUAL: + con = v >= subset + else: + con = v == subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + expr = v + 5 + con = expr <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) + assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_subset_comparison_var( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = subset <= v + elif sign == GREATER_EQUAL: + con = subset >= v + else: + con = subset == v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert np.isnan(con.rhs.sel(dim_2=0).item()) + assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) + def test_superset_comparison_var( + self, v: Variable, superset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = superset <= v + else: + con = superset >= v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(con.lhs.coeffs.values).any() + assert not np.isnan(con.rhs.values).any() + + def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: + rhs = xr.DataArray( + [[1.0, 2.0]], + dims=["extra", "dim_2"], + coords={"dim_2": [0, 1]}, + ) + c = v <= rhs + assert "extra" in c.dims + + def test_subset_constraint_solve_integration(self) -> None: + if not available_solvers: + pytest.skip("No solver available") + solver = "highs" if "highs" in available_solvers else available_solvers[0] + m = Model() + coords = pd.RangeIndex(5, name="i") + x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") + subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) + m.add_constraints(x <= subset_ub, name="subset_ub") + m.add_objective(x.sum(), sense="max") + m.solve(solver_name=solver) + sol = m.solution["x"] + assert sol.sel(i=1).item() == pytest.approx(10.0) + assert sol.sel(i=3).item() == pytest.approx(20.0) + assert sol.sel(i=0).item() == pytest.approx(100.0) + assert sol.sel(i=2).item() == pytest.approx(100.0) + assert sol.sel(i=4).item() == pytest.approx(100.0) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 1378f48d..a4e4abfa 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -7,6 +7,7 @@ from __future__ import annotations +from collections.abc import Generator from typing import Any import numpy as np @@ -16,6 +17,7 @@ import xarray as xr from xarray.testing import assert_equal +import linopy from linopy import LinearExpression, Model, QuadraticExpression, Variable, merge from linopy.constants import HELPER_DIMS, TERM_DIM from linopy.expressions import ScalarLinearExpression @@ -23,6 +25,14 @@ from linopy.variables import ScalarVariable +@pytest.fixture(autouse=True) +def _use_v1_convention() -> Generator[None, None, None]: + """Use v1 arithmetic convention for all tests in this module.""" + linopy.options["arithmetic_convention"] = "v1" + yield + linopy.options["arithmetic_convention"] = "legacy" + + def test_empty_linexpr(m: Model) -> None: LinearExpression(None, m) @@ -403,8 +413,10 @@ def test_linear_expression_sum( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # test special case otherride coords - expr = v.loc[:9] + v.loc[10:] + # test special case override coords using assign_coords + a = v.loc[:9] + b = v.loc[10:].assign_coords(dim_2=a.coords["dim_2"]) + expr = a + b assert expr.nterm == 2 assert len(expr.coords["dim_2"]) == 10 @@ -427,8 +439,10 @@ def test_linear_expression_sum_with_const( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # test special case otherride coords - expr = v.loc[:9] + v.loc[10:] + # test special case override coords using assign_coords + a = v.loc[:9] + b = v.loc[10:].assign_coords(dim_2=a.coords["dim_2"]) + expr = a + b assert expr.nterm == 2 assert len(expr.coords["dim_2"]) == 10 @@ -538,6 +552,12 @@ def test_linear_expression_multiplication_invalid( class TestCoordinateAlignment: + @pytest.fixture + def matching(self) -> xr.DataArray: + return xr.DataArray( + np.arange(20, dtype=float), dims=["dim_2"], coords={"dim_2": range(20)} + ) + @pytest.fixture(params=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: if request.param == "da": @@ -574,8 +594,24 @@ def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) class TestSubset: + """ + Under v1, subset operations raise ValueError (exact join). + Use explicit join= to recover desired behavior. + """ + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_raises( + self, + v: Variable, + subset: xr.DataArray, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + with pytest.raises(ValueError, match="exact"): + target * subset + @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_subset_fills_zeros( + def test_mul_subset_join_left( self, v: Variable, subset: xr.DataArray, @@ -583,13 +619,24 @@ def test_mul_subset_fills_zeros( operand: str, ) -> None: target = v if operand == "var" else 1 * v - result = target * subset + result = target.mul(subset, join="left") assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_subset_fills_zeros( + def test_add_subset_raises( + self, + v: Variable, + subset: xr.DataArray, + operand: str, + ) -> None: + target = v if operand == "var" else v + 5 + with pytest.raises(ValueError, match="exact"): + target + subset + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_subset_join_left( self, v: Variable, subset: xr.DataArray, @@ -597,17 +644,28 @@ def test_add_subset_fills_zeros( operand: str, ) -> None: if operand == "var": - result = v + subset + result = v.add(subset, join="left") expected = expected_fill else: - result = (v + 5) + subset + result = (v + 5).add(subset, join="left") expected = expected_fill + 5 assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, expected) @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_subset_fills_negated( + def test_sub_subset_raises( + self, + v: Variable, + subset: xr.DataArray, + operand: str, + ) -> None: + target = v if operand == "var" else v + 5 + with pytest.raises(ValueError, match="exact"): + target - subset + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_subset_join_left( self, v: Variable, subset: xr.DataArray, @@ -615,242 +673,264 @@ def test_sub_subset_fills_negated( operand: str, ) -> None: if operand == "var": - result = v - subset + result = v.sub(subset, join="left") expected = -expected_fill else: - result = (v + 5) - subset + result = (v + 5).sub(subset, join="left") expected = 5 - expected_fill assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, expected) @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_subset_inverts_nonzero( + def test_div_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + with pytest.raises(ValueError, match="exact"): + target / subset + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_subset_join_left( self, v: Variable, subset: xr.DataArray, operand: str ) -> None: target = v if operand == "var" else 1 * v - result = target / subset + result = target.div(subset, join="left") assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.coeffs.values).any() assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) - def test_subset_add_var_coefficients( - self, v: Variable, subset: xr.DataArray - ) -> None: - result = subset + v - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + def test_subset_add_var_raises(self, v: Variable, subset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + subset + v - def test_subset_sub_var_coefficients( - self, v: Variable, subset: xr.DataArray - ) -> None: - result = subset - v - np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) + def test_subset_sub_var_raises(self, v: Variable, subset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + subset - v class TestSuperset: - def test_add_superset_pins_to_lhs_coords( + """Under v1, superset operations raise ValueError (exact join).""" + + def test_add_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + v + superset + + def test_add_superset_join_left( self, v: Variable, superset: xr.DataArray ) -> None: - result = v + superset + result = v.add(superset, join="left") assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.const.values).any() - def test_add_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset + v, v + superset) + def test_mul_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + v * superset - def test_sub_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset - v, -v + superset) - - def test_mul_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset * v, v * superset) - - def test_mul_superset_pins_to_lhs_coords( + def test_mul_superset_join_inner( self, v: Variable, superset: xr.DataArray ) -> None: - result = v * superset + result = v.mul(superset, join="inner") assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.coeffs.values).any() - def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: + def test_div_superset_raises(self, v: Variable) -> None: superset_nonzero = xr.DataArray( np.arange(1, 26, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)}, ) - result = v / superset_nonzero + with pytest.raises(ValueError, match="exact"): + v / superset_nonzero + + def test_div_superset_join_inner(self, v: Variable) -> None: + superset_nonzero = xr.DataArray( + np.arange(1, 26, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + result = v.div(superset_nonzero, join="inner") assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.coeffs.values).any() class TestDisjoint: - def test_add_disjoint_fills_zeros(self, v: Variable) -> None: + """Under v1, disjoint operations raise ValueError (exact join).""" + + def test_add_disjoint_raises(self, v: Variable) -> None: disjoint = xr.DataArray( [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} ) - result = v + disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, np.zeros(20)) + with pytest.raises(ValueError, match="exact"): + v + disjoint + + def test_add_disjoint_join_outer(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v.add(disjoint, join="outer") + assert result.sizes["dim_2"] == 22 # union of [0..19] and [50, 60] + + def test_mul_disjoint_raises(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + with pytest.raises(ValueError, match="exact"): + v * disjoint - def test_mul_disjoint_fills_zeros(self, v: Variable) -> None: + def test_mul_disjoint_join_left(self, v: Variable) -> None: disjoint = xr.DataArray( [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} ) - result = v * disjoint + result = v.mul(disjoint, join="left") assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) - def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: + def test_div_disjoint_raises(self, v: Variable) -> None: disjoint = xr.DataArray( [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} ) - result = v / disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + with pytest.raises(ValueError, match="exact"): + v / disjoint class TestCommutativity: - @pytest.mark.parametrize( - "make_lhs,make_rhs", - [ - (lambda v, s: s * v, lambda v, s: v * s), - (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), - (lambda v, s: s + v, lambda v, s: v + s), - (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), - ], - ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], - ) - def test_commutativity( - self, - v: Variable, - subset: xr.DataArray, - make_lhs: Any, - make_rhs: Any, + """Commutativity tests with matching coordinates under v1.""" + + def test_add_commutativity_matching_coords( + self, v: Variable, matching: xr.DataArray ) -> None: - assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + assert_linequal(v + matching, matching + v) - def test_sub_var_anticommutative( - self, v: Variable, subset: xr.DataArray + def test_mul_commutativity_matching_coords( + self, v: Variable, matching: xr.DataArray ) -> None: - assert_linequal(subset - v, -v + subset) + assert_linequal(v * matching, matching * v) - def test_sub_expr_anticommutative( + def test_subset_raises_both_sides( self, v: Variable, subset: xr.DataArray ) -> None: - expr = v + 5 - assert_linequal(subset - expr, -(expr - subset)) + """Subset operations raise regardless of operand order.""" + with pytest.raises(ValueError, match="exact"): + v * subset + with pytest.raises(ValueError, match="exact"): + subset * v - def test_add_commutativity_full_coords(self, v: Variable) -> None: - full = xr.DataArray( - np.arange(20, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(20)}, + def test_commutativity_with_join( + self, v: Variable, subset: xr.DataArray + ) -> None: + """Commutativity holds with explicit join.""" + assert_linequal( + v.add(subset, join="inner"), + subset + v.reindex({"dim_2": [1, 3]}), ) - assert_linequal(v + full, full + v) class TestQuadratic: - def test_quadexpr_add_subset( + """Under v1, subset operations on quadratic expressions raise.""" + + def test_quadexpr_add_subset_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + qexpr + subset + + def test_quadexpr_add_subset_join_left( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray, ) -> None: qexpr = v * v - result = qexpr + subset + result = qexpr.add(subset, join="left") assert isinstance(result, QuadraticExpression) assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, expected_fill) - def test_quadexpr_sub_subset( + def test_quadexpr_sub_subset_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + qexpr - subset + + def test_quadexpr_sub_subset_join_left( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray, ) -> None: qexpr = v * v - result = qexpr - subset + result = qexpr.sub(subset, join="left") assert isinstance(result, QuadraticExpression) assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, -expected_fill) - def test_quadexpr_mul_subset( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, + def test_quadexpr_mul_subset_raises( + self, v: Variable, subset: xr.DataArray ) -> None: qexpr = v * v - result = qexpr * subset - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + with pytest.raises(ValueError, match="exact"): + qexpr * subset - def test_subset_mul_quadexpr( + def test_quadexpr_mul_subset_join_left( self, v: Variable, subset: xr.DataArray, expected_fill: np.ndarray, ) -> None: qexpr = v * v - result = subset * qexpr + result = qexpr.mul(subset, join="left") assert isinstance(result, QuadraticExpression) assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: + def test_quadexpr_add_matching( + self, v: Variable, matching: xr.DataArray + ) -> None: qexpr = v * v - assert_quadequal(subset + qexpr, qexpr + subset) + assert_quadequal(matching + qexpr, qexpr + matching) class TestMissingValues: """ Same shape as variable but with NaN entries in the constant. - NaN values are filled with operation-specific neutral elements: - - Addition/subtraction: NaN -> 0 (additive identity) - - Multiplication: NaN -> 0 (zeroes out the variable) - - Division: NaN -> 1 (multiplicative identity, no scaling) + Under v1 convention, NaN values propagate through arithmetic + (no implicit fillna). """ NAN_POSITIONS = [0, 5, 19] @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_nan_filled( + def test_add_nan_propagates( self, v: Variable, nan_constant: xr.DataArray | pd.Series, operand: str, ) -> None: - base_const = 0.0 if operand == "var" else 5.0 target = v if operand == "var" else v + 5 result = target + nan_constant assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.const.values).any() - # At NaN positions, const should be unchanged (added 0) for i in self.NAN_POSITIONS: - assert result.const.values[i] == base_const + assert np.isnan(result.const.values[i]) @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_nan_filled( + def test_sub_nan_propagates( self, v: Variable, nan_constant: xr.DataArray | pd.Series, operand: str, ) -> None: - base_const = 0.0 if operand == "var" else 5.0 target = v if operand == "var" else v + 5 result = target - nan_constant assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.const.values).any() - # At NaN positions, const should be unchanged (subtracted 0) for i in self.NAN_POSITIONS: - assert result.const.values[i] == base_const + assert np.isnan(result.const.values[i]) @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_nan_filled( + def test_mul_nan_propagates( self, v: Variable, nan_constant: xr.DataArray | pd.Series, @@ -859,13 +939,11 @@ def test_mul_nan_filled( target = v if operand == "var" else 1 * v result = target * nan_constant assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.coeffs.squeeze().values).any() - # At NaN positions, coeffs should be 0 (variable zeroed out) for i in self.NAN_POSITIONS: - assert result.coeffs.squeeze().values[i] == 0.0 + assert np.isnan(result.coeffs.squeeze().values[i]) @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_nan_filled( + def test_div_nan_propagates( self, v: Variable, nan_constant: xr.DataArray | pd.Series, @@ -874,11 +952,8 @@ def test_div_nan_filled( target = v if operand == "var" else 1 * v result = target / nan_constant assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.coeffs.squeeze().values).any() - # At NaN positions, coeffs should be unchanged (divided by 1) - original_coeffs = (1 * v).coeffs.squeeze().values for i in self.NAN_POSITIONS: - assert result.coeffs.squeeze().values[i] == original_coeffs[i] + assert np.isnan(result.coeffs.squeeze().values[i]) def test_add_commutativity( self, @@ -887,8 +962,6 @@ def test_add_commutativity( ) -> None: result_a = v + nan_constant result_b = nan_constant + v - assert not np.isnan(result_a.const.values).any() - assert not np.isnan(result_b.const.values).any() np.testing.assert_array_equal(result_a.const.values, result_b.const.values) np.testing.assert_array_equal( result_a.coeffs.values, result_b.coeffs.values @@ -901,13 +974,11 @@ def test_mul_commutativity( ) -> None: result_a = v * nan_constant result_b = nan_constant * v - assert not np.isnan(result_a.coeffs.values).any() - assert not np.isnan(result_b.coeffs.values).any() np.testing.assert_array_equal( result_a.coeffs.values, result_b.coeffs.values ) - def test_quadexpr_add_nan( + def test_quadexpr_add_nan_propagates( self, v: Variable, nan_constant: xr.DataArray | pd.Series, @@ -916,75 +987,89 @@ def test_quadexpr_add_nan( result = qexpr + nan_constant assert isinstance(result, QuadraticExpression) assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.const.values).any() + for i in self.NAN_POSITIONS: + assert np.isnan(result.const.values[i]) class TestExpressionWithNaN: - """Test that NaN in expression's own const/coeffs doesn't propagate.""" + """ + Under v1, NaN in expression's own const/coeffs propagates through + arithmetic (no implicit fillna). + """ def test_shifted_expr_add_scalar(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) result = expr + 5 - assert not np.isnan(result.const.values).any() - assert result.const.values[0] == 5.0 + # Position 0 has NaN from shift, NaN + 5 = NaN under v1 + assert np.isnan(result.const.values[0]) def test_shifted_expr_mul_scalar(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) result = expr * 2 - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 + # Position 0 has NaN coeffs from shift, NaN * 2 = NaN under v1 + assert np.isnan(result.coeffs.squeeze().values[0]) def test_shifted_expr_add_array(self, v: Variable) -> None: arr = np.arange(v.sizes["dim_2"], dtype=float) expr = (1 * v).shift(dim_2=1) result = expr + arr - assert not np.isnan(result.const.values).any() - assert result.const.values[0] == 0.0 + # Position 0 has NaN const from shift, NaN + 0 = NaN under v1 + assert np.isnan(result.const.values[0]) def test_shifted_expr_mul_array(self, v: Variable) -> None: arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 expr = (1 * v).shift(dim_2=1) result = expr * arr - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 + # Position 0 has NaN coeffs from shift, NaN * 1 = NaN under v1 + assert np.isnan(result.coeffs.squeeze().values[0]) def test_shifted_expr_div_scalar(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) result = expr / 2 - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 + assert np.isnan(result.coeffs.squeeze().values[0]) def test_shifted_expr_sub_scalar(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) result = expr - 3 - assert not np.isnan(result.const.values).any() - assert result.const.values[0] == -3.0 + assert np.isnan(result.const.values[0]) def test_shifted_expr_div_array(self, v: Variable) -> None: arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 expr = (1 * v).shift(dim_2=1) result = expr / arr - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 + assert np.isnan(result.coeffs.squeeze().values[0]) def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: + """to_linexpr always fills NaN coefficients with 0 (not convention-aware).""" nan_coeff = np.ones(v.sizes["dim_2"]) nan_coeff[0] = np.nan result = v.to_linexpr(nan_coeff) - assert not np.isnan(result.coeffs.squeeze().values).any() assert result.coeffs.squeeze().values[0] == 0.0 class TestMultiDim: - def test_multidim_subset_mul(self, m: Model) -> None: + """Under v1, multi-dim subset operations raise.""" + + def test_multidim_subset_mul_raises(self, m: Model) -> None: coords_a = pd.RangeIndex(4, name="a") coords_b = pd.RangeIndex(5, name="b") w = m.add_variables(coords=[coords_a, coords_b], name="w") + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + with pytest.raises(ValueError, match="exact"): + w * subset_2d + def test_multidim_subset_mul_join_left(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") subset_2d = xr.DataArray( [[2.0, 3.0], [4.0, 5.0]], dims=["a", "b"], coords={"a": [1, 3], "b": [0, 4]}, ) - result = w * subset_2d + result = w.mul(subset_2d, join="left") assert result.sizes["a"] == 4 assert result.sizes["b"] == 5 assert not np.isnan(result.coeffs.values).any() @@ -993,23 +1078,17 @@ def test_multidim_subset_mul(self, m: Model) -> None: assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) - def test_multidim_subset_add(self, m: Model) -> None: + def test_multidim_subset_add_raises(self, m: Model) -> None: coords_a = pd.RangeIndex(4, name="a") coords_b = pd.RangeIndex(5, name="b") w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( [[2.0, 3.0], [4.0, 5.0]], dims=["a", "b"], coords={"a": [1, 3], "b": [0, 4]}, ) - result = w + subset_2d - assert result.sizes["a"] == 4 - assert result.sizes["b"] == 5 - assert not np.isnan(result.const.values).any() - assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) - assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) - assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) + with pytest.raises(ValueError, match="exact"): + w + subset_2d class TestXarrayCompat: def test_da_eq_da_still_works(self) -> None: @@ -1877,12 +1956,14 @@ def c(self, m2: Model) -> Variable: return m2.variables["c"] class TestAddition: - def test_add_join_none_preserves_default( + def test_add_join_none_raises_on_mismatch( self, a: Variable, b: Variable ) -> None: - result_default = a.to_linexpr() + b.to_linexpr() - result_none = a.to_linexpr().add(b.to_linexpr(), join=None) - assert_linequal(result_default, result_none) + # a has i=[0,1,2], b has i=[1,2,3] — exact default raises + with pytest.raises(ValueError, match="exact"): + a.to_linexpr() + b.to_linexpr() + with pytest.raises(ValueError, match="exact"): + a.to_linexpr().add(b.to_linexpr(), join=None) def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: result = a.to_linexpr().add(b.to_linexpr(), join="inner") @@ -2138,12 +2219,12 @@ def test_div_constant_outer_fill_values(self, a: Variable) -> None: class TestQuadratic: def test_quadratic_add_constant_join_inner( - self, a: Variable, b: Variable + self, a: Variable, c: Variable ) -> None: - quad = a.to_linexpr() * b.to_linexpr() + quad = a.to_linexpr() * c.to_linexpr() const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] + assert list(result.data.indexes["i"]) == [1, 2] def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: quad = a.to_linexpr() * a.to_linexpr() @@ -2152,9 +2233,9 @@ def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: assert list(result.data.indexes["i"]) == [0, 1] def test_quadratic_mul_constant_join_inner( - self, a: Variable, b: Variable + self, a: Variable, c: Variable ) -> None: - quad = a.to_linexpr() * b.to_linexpr() + quad = a.to_linexpr() * c.to_linexpr() const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] + assert list(result.data.indexes["i"]) == [1, 2] diff --git a/test/test_linear_expression_legacy.py b/test/test_linear_expression_legacy.py new file mode 100644 index 00000000..1378f48d --- /dev/null +++ b/test/test_linear_expression_legacy.py @@ -0,0 +1,2160 @@ +#!/usr/bin/env python3 +""" +Created on Wed Mar 17 17:06:36 2021. + +@author: fabian +""" + +from __future__ import annotations + +from typing import Any + +import numpy as np +import pandas as pd +import polars as pl +import pytest +import xarray as xr +from xarray.testing import assert_equal + +from linopy import LinearExpression, Model, QuadraticExpression, Variable, merge +from linopy.constants import HELPER_DIMS, TERM_DIM +from linopy.expressions import ScalarLinearExpression +from linopy.testing import assert_linequal, assert_quadequal +from linopy.variables import ScalarVariable + + +def test_empty_linexpr(m: Model) -> None: + LinearExpression(None, m) + + +def test_linexpr_with_wrong_data(m: Model) -> None: + with pytest.raises(ValueError): + LinearExpression(xr.Dataset({"a": [1]}), m) + + coeffs = xr.DataArray([1, 2], dims=["a"]) + vars = xr.DataArray([1, 2], dims=["a"]) + data = xr.Dataset({"coeffs": coeffs, "vars": vars}) + with pytest.raises(ValueError): + LinearExpression(data, m) + + # with model as None + coeffs = xr.DataArray(np.array([1, 2]), dims=[TERM_DIM]) + vars = xr.DataArray(np.array([1, 2]), dims=[TERM_DIM]) + data = xr.Dataset({"coeffs": coeffs, "vars": vars}) + with pytest.raises(ValueError): + LinearExpression(data, None) # type: ignore + + +def test_linexpr_with_helper_dims_as_coords(m: Model) -> None: + coords = [pd.Index([0], name="a"), pd.Index([1, 2], name=TERM_DIM)] + coeffs = xr.DataArray(np.array([[1, 2]]), coords=coords) + vars = xr.DataArray(np.array([[1, 2]]), coords=coords) + + data = xr.Dataset({"coeffs": coeffs, "vars": vars}) + assert set(HELPER_DIMS).intersection(set(data.coords)) + + expr = LinearExpression(data, m) + assert not set(HELPER_DIMS).intersection(set(expr.data.coords)) + + +def test_linexpr_with_data_without_coords(m: Model) -> None: + lhs = 1 * m["x"] + vars = xr.DataArray(lhs.vars.values, dims=["dim_0", TERM_DIM]) + coeffs = xr.DataArray(lhs.coeffs.values, dims=["dim_0", TERM_DIM]) + data = xr.Dataset({"vars": vars, "coeffs": coeffs}) + expr = LinearExpression(data, m) + assert_linequal(expr, lhs) + + +def test_linexpr_from_constant_dataarray(m: Model) -> None: + const = xr.DataArray([1, 2], dims=["dim_0"]) + expr = LinearExpression(const, m) + assert (expr.const == const).all() + assert expr.nterm == 0 + + +def test_linexpr_from_constant_pl_series(m: Model) -> None: + const = pl.Series([1, 2]) + expr = LinearExpression(const, m) + assert (expr.const == const.to_numpy()).all() + assert expr.nterm == 0 + + +def test_linexpr_from_constant_pandas_series(m: Model) -> None: + const = pd.Series([1, 2], index=pd.RangeIndex(2, name="dim_0")) + expr = LinearExpression(const, m) + assert (expr.const == const).all() + assert expr.nterm == 0 + + +def test_linexpr_from_constant_pandas_dataframe(m: Model) -> None: + const = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + expr = LinearExpression(const, m) + assert (expr.const == const).all() + assert expr.nterm == 0 + + +def test_linexpr_from_constant_numpy_array(m: Model) -> None: + const = np.array([1, 2]) + expr = LinearExpression(const, m) + assert (expr.const == const).all() + assert expr.nterm == 0 + + +def test_linexpr_from_constant_scalar(m: Model) -> None: + const = 1 + expr = LinearExpression(const, m) + assert (expr.const == const).all() + assert expr.nterm == 0 + + +def test_repr(m: Model) -> None: + expr = m.linexpr((10, "x"), (1, "y")) + expr.__repr__() + + +def test_fill_value() -> None: + isinstance(LinearExpression._fill_value, dict) + + +def test_linexpr_with_scalars(m: Model) -> None: + expr = m.linexpr((10, "x"), (1, "y")) + target = xr.DataArray( + [[10, 1], [10, 1]], coords={"dim_0": [0, 1]}, dims=["dim_0", TERM_DIM] + ) + assert_equal(expr.coeffs, target) + + +def test_linexpr_with_variables_and_constants( + m: Model, x: Variable, y: Variable +) -> None: + expr = m.linexpr((10, x), (1, y), 2) + assert (expr.const == 2).all() + + +def test_linexpr_with_series(m: Model, v: Variable) -> None: + lhs = pd.Series(np.arange(20)), v + expr = m.linexpr(lhs) + isinstance(expr, LinearExpression) + + +def test_linexpr_with_dataframe(m: Model, z: Variable) -> None: + lhs = pd.DataFrame(z.labels), z + expr = m.linexpr(lhs) + isinstance(expr, LinearExpression) + + +def test_linexpr_duplicated_index(m: Model) -> None: + expr = m.linexpr((10, "x"), (-1, "x")) + assert (expr.data._term == [0, 1]).all() + + +def test_linear_expression_with_multiplication(x: Variable) -> None: + expr = 1 * x + assert isinstance(expr, LinearExpression) + assert expr.nterm == 1 + assert len(expr.vars.dim_0) == x.shape[0] + + expr = x * 1 + assert isinstance(expr, LinearExpression) + + expr2 = x.mul(1) + assert_linequal(expr, expr2) + + expr3 = expr.mul(1) + assert_linequal(expr, expr3) + + expr = x / 1 + assert isinstance(expr, LinearExpression) + + expr = x / 1.0 + assert isinstance(expr, LinearExpression) + + expr2 = x.div(1) + assert_linequal(expr, expr2) + + expr3 = expr.div(1) + assert_linequal(expr, expr3) + + expr = np.array([1, 2]) * x + assert isinstance(expr, LinearExpression) + + expr = np.array(1) * x + assert isinstance(expr, LinearExpression) + + expr = xr.DataArray(np.array([[1, 2], [2, 3]])) * x + assert isinstance(expr, LinearExpression) + + expr = pd.Series([1, 2], index=pd.RangeIndex(2, name="dim_0")) * x + assert isinstance(expr, LinearExpression) + + quad = x * x + assert isinstance(quad, QuadraticExpression) + + with pytest.raises(TypeError): + quad * quad + + expr = x * 1 + assert isinstance(expr, LinearExpression) + assert expr.__mul__(object()) is NotImplemented + assert expr.__rmul__(object()) is NotImplemented + + +def test_linear_expression_with_addition(m: Model, x: Variable, y: Variable) -> None: + expr = 10 * x + y + assert isinstance(expr, LinearExpression) + assert_linequal(expr, m.linexpr((10, "x"), (1, "y"))) + + expr = x + 8 * y + assert isinstance(expr, LinearExpression) + assert_linequal(expr, m.linexpr((1, "x"), (8, "y"))) + + expr = x + y + assert isinstance(expr, LinearExpression) + assert_linequal(expr, m.linexpr((1, "x"), (1, "y"))) + + expr2 = x.add(y) + assert_linequal(expr, expr2) + + expr3 = (x * 1).add(y) + assert_linequal(expr, expr3) + + expr3 = x + (x * x) + assert isinstance(expr3, QuadraticExpression) + + +def test_linear_expression_with_raddition(m: Model, x: Variable) -> None: + expr = x * 1.0 + expr_2: LinearExpression = 10.0 + expr + assert isinstance(expr, LinearExpression) + expr_3: LinearExpression = expr + 10.0 + assert_linequal(expr_2, expr_3) + + +def test_linear_expression_with_subtraction(m: Model, x: Variable, y: Variable) -> None: + expr = x - y + assert isinstance(expr, LinearExpression) + assert_linequal(expr, m.linexpr((1, "x"), (-1, "y"))) + + expr2 = x.sub(y) + assert_linequal(expr, expr2) + + expr3: LinearExpression = x * 1 + expr4 = expr3.sub(y) + assert_linequal(expr, expr4) + + expr = -x - 8 * y + assert isinstance(expr, LinearExpression) + assert_linequal(expr, m.linexpr((-1, "x"), (-8, "y"))) + + +def test_linear_expression_rsubtraction(x: Variable, y: Variable) -> None: + expr = x * 1.0 + expr_2: LinearExpression = 10.0 - expr + assert isinstance(expr_2, LinearExpression) + expr_3: LinearExpression = (expr - 10.0) * -1 + assert_linequal(expr_2, expr_3) + assert expr.__rsub__(object()) is NotImplemented + + +def test_linear_expression_with_constant(m: Model, x: Variable, y: Variable) -> None: + expr = x + 1 + assert isinstance(expr, LinearExpression) + assert (expr.const == 1).all() + + expr = -x - 8 * y - 10 + assert isinstance(expr, LinearExpression) + assert (expr.const == -10).all() + assert expr.nterm == 2 + + +def test_linear_expression_with_constant_multiplication( + m: Model, x: Variable, y: Variable +) -> None: + expr = x + 1 + + obs = expr * 10 + assert isinstance(obs, LinearExpression) + assert (obs.const == 10).all() + + obs = expr * pd.Series([1, 2, 3], index=pd.RangeIndex(3, name="new_dim")) + assert isinstance(obs, LinearExpression) + assert obs.shape == (2, 3, 1) + + +def test_linear_expression_multi_indexed(u: Variable) -> None: + expr = 3 * u + 1 * u + assert isinstance(expr, LinearExpression) + + +def test_linear_expression_with_errors(m: Model, x: Variable) -> None: + with pytest.raises(TypeError): + x / x + + with pytest.raises(TypeError): + x / (1 * x) + + with pytest.raises(TypeError): + m.linexpr((10, x.labels), (1, "y")) + + with pytest.raises(TypeError): + m.linexpr(a=2) # type: ignore + + +def test_linear_expression_from_rule(m: Model, x: Variable, y: Variable) -> None: + def bound(m: Model, i: int) -> ScalarLinearExpression: + return ( + (i - 1) * x.at[i - 1] + y.at[i] + 1 * x.at[i] + if i == 1 + else i * x.at[i] - y.at[i] + ) + + expr = LinearExpression.from_rule(m, bound, x.coords) + assert isinstance(expr, LinearExpression) + assert expr.nterm == 3 + repr(expr) # test repr + + +def test_linear_expression_from_rule_with_return_none( + m: Model, x: Variable, y: Variable +) -> None: + # with return type None + def bound(m: Model, i: int) -> ScalarLinearExpression | None: + if i == 1: + return (i - 1) * x.at[i - 1] + y.at[i] + return None + + expr = LinearExpression.from_rule(m, bound, x.coords) + assert isinstance(expr, LinearExpression) + assert (expr.vars[0] == -1).all() + assert (expr.vars[1] != -1).all() + assert expr.coeffs[0].isnull().all() + assert expr.coeffs[1].notnull().all() + repr(expr) # test repr + + +def test_linear_expression_addition(x: Variable, y: Variable, z: Variable) -> None: + expr = 10 * x + y + other = 2 * y + z + res = expr + other + + assert res.nterm == expr.nterm + other.nterm + assert (res.coords["dim_0"] == expr.coords["dim_0"]).all() + assert (res.coords["dim_1"] == other.coords["dim_1"]).all() + assert res.data.notnull().all().to_array().all() + + res2 = expr.add(other) + assert_linequal(res, res2) + + assert isinstance(x - expr, LinearExpression) + assert isinstance(x + expr, LinearExpression) + + +def test_linear_expression_addition_with_constant( + x: Variable, y: Variable, z: Variable +) -> None: + expr = 10 * x + y + 10 + assert (expr.const == 10).all() + + expr = 10 * x + y + np.array([2, 3]) + assert list(expr.const) == [2, 3] + + expr = 10 * x + y + pd.Series([2, 3]) + assert list(expr.const) == [2, 3] + + +def test_linear_expression_subtraction(x: Variable, y: Variable, z: Variable) -> None: + expr = 10 * x + y - 10 + assert (expr.const == -10).all() + + expr = 10 * x + y - np.array([2, 3]) + assert list(expr.const) == [-2, -3] + + expr = 10 * x + y - pd.Series([2, 3]) + assert list(expr.const) == [-2, -3] + + +def test_linear_expression_substraction( + x: Variable, y: Variable, z: Variable, v: Variable +) -> None: + expr = 10 * x + y + other = 2 * y - z + res = expr - other + + assert res.nterm == expr.nterm + other.nterm + assert (res.coords["dim_0"] == expr.coords["dim_0"]).all() + assert (res.coords["dim_1"] == other.coords["dim_1"]).all() + assert res.data.notnull().all().to_array().all() + + +def test_linear_expression_sum( + x: Variable, y: Variable, z: Variable, v: Variable +) -> None: + expr = 10 * x + y + z + res = expr.sum("dim_0") + + assert res.size == expr.size + assert res.nterm == expr.nterm * len(expr.data.dim_0) + + res = expr.sum() + assert res.size == expr.size + assert res.nterm == expr.size + assert res.data.notnull().all().to_array().all() + + assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) + + # test special case otherride coords + expr = v.loc[:9] + v.loc[10:] + assert expr.nterm == 2 + assert len(expr.coords["dim_2"]) == 10 + + +def test_linear_expression_sum_with_const( + x: Variable, y: Variable, z: Variable, v: Variable +) -> None: + expr = 10 * x + y + z + 10 + res = expr.sum("dim_0") + + assert res.size == expr.size + assert res.nterm == expr.nterm * len(expr.data.dim_0) + assert (res.const == 20).all() + + res = expr.sum() + assert res.size == expr.size + assert res.nterm == expr.size + assert res.data.notnull().all().to_array().all() + assert (res.const == 60).item() + + assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) + + # test special case otherride coords + expr = v.loc[:9] + v.loc[10:] + assert expr.nterm == 2 + assert len(expr.coords["dim_2"]) == 10 + + +def test_linear_expression_sum_drop_zeros(z: Variable) -> None: + coeff = xr.zeros_like(z.labels) + coeff[1, 0] = 3 + coeff[0, 2] = 5 + expr = coeff * z + + res = expr.sum("dim_0", drop_zeros=True) + assert res.nterm == 1 + + res = expr.sum("dim_1", drop_zeros=True) + assert res.nterm == 1 + + coeff[1, 2] = 4 + expr.data["coeffs"] = coeff + res = expr.sum() + + res = expr.sum("dim_0", drop_zeros=True) + assert res.nterm == 2 + + res = expr.sum("dim_1", drop_zeros=True) + assert res.nterm == 2 + + +def test_linear_expression_sum_warn_using_dims(z: Variable) -> None: + with pytest.warns(DeprecationWarning): + (1 * z).sum(dims="dim_0") + + +def test_linear_expression_sum_warn_unknown_kwargs(z: Variable) -> None: + with pytest.raises(ValueError): + (1 * z).sum(unknown_kwarg="dim_0") + + +def test_linear_expression_power(x: Variable) -> None: + expr: LinearExpression = x * 1.0 + qd_expr = expr**2 + assert isinstance(qd_expr, QuadraticExpression) + + qd_expr2 = expr.pow(2) + assert_quadequal(qd_expr, qd_expr2) + + with pytest.raises(ValueError): + expr**3 + + +def test_linear_expression_multiplication( + x: Variable, y: Variable, z: Variable +) -> None: + expr = 10 * x + y + z + mexpr = expr * 10 + assert (mexpr.coeffs.sel(dim_1=0, dim_0=0, _term=0) == 100).item() + + mexpr = 10 * expr + assert (mexpr.coeffs.sel(dim_1=0, dim_0=0, _term=0) == 100).item() + + mexpr = expr / 100 + assert (mexpr.coeffs.sel(dim_1=0, dim_0=0, _term=0) == 1 / 10).item() + + mexpr = expr / 100.0 + assert (mexpr.coeffs.sel(dim_1=0, dim_0=0, _term=0) == 1 / 10).item() + + +def test_matmul_variable_and_const(x: Variable, y: Variable) -> None: + const = np.array([1, 2]) + expr = x @ const + assert expr.nterm == 2 + assert_linequal(expr, (x * const).sum()) + + assert_linequal(x @ const, (x * const).sum()) + + assert_linequal(x.dot(const), x @ const) + + +def test_matmul_expr_and_const(x: Variable, y: Variable) -> None: + expr = 10 * x + y + const = np.array([1, 2]) + res = expr @ const + target = (10 * x) @ const + y @ const + assert res.nterm == 4 + assert_linequal(res, target) + + assert_linequal(expr.dot(const), target) + + +def test_matmul_wrong_input(x: Variable, y: Variable, z: Variable) -> None: + expr = 10 * x + y + z + with pytest.raises(TypeError): + expr @ expr + + +def test_linear_expression_multiplication_invalid( + x: Variable, y: Variable, z: Variable +) -> None: + expr = 10 * x + y + z + + with pytest.raises(TypeError): + expr = 10 * x + y + z + expr * expr + + with pytest.raises(TypeError): + expr = 10 * x + y + z + expr / x + + +class TestCoordinateAlignment: + @pytest.fixture(params=["da", "series"]) + def subset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) + + @pytest.fixture(params=["da", "series"]) + def superset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray( + np.arange(25, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + return pd.Series( + np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") + ) + + @pytest.fixture + def expected_fill(self) -> np.ndarray: + arr = np.zeros(20) + arr[1] = 10.0 + arr[3] = 30.0 + return arr + + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + vals[5] = np.nan + vals[19] = np.nan + if request.param == "xarray": + return xr.DataArray(vals, dims=["dim_2"], coords={"dim_2": range(20)}) + return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) + + class TestSubset: + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_fills_zeros( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target * subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_subset_fills_zeros( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + if operand == "var": + result = v + subset + expected = expected_fill + else: + result = (v + 5) + subset + expected = expected_fill + 5 + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_subset_fills_negated( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + if operand == "var": + result = v - subset + expected = -expected_fill + else: + result = (v + 5) - subset + expected = 5 - expected_fill + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_subset_inverts_nonzero( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + result = target / subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) + assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + + def test_subset_add_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset + v + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + def test_subset_sub_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset - v + np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) + + class TestSuperset: + def test_add_superset_pins_to_lhs_coords( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v + superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + + def test_add_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset + v, v + superset) + + def test_sub_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset - v, -v + superset) + + def test_mul_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset * v, v * superset) + + def test_mul_superset_pins_to_lhs_coords( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v * superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: + superset_nonzero = xr.DataArray( + np.arange(1, 26, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + result = v / superset_nonzero + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + class TestDisjoint: + def test_add_disjoint_fills_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v + disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, np.zeros(20)) + + def test_mul_disjoint_fills_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v * disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) + + def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v / disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + class TestCommutativity: + @pytest.mark.parametrize( + "make_lhs,make_rhs", + [ + (lambda v, s: s * v, lambda v, s: v * s), + (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), + (lambda v, s: s + v, lambda v, s: v + s), + (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), + ], + ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], + ) + def test_commutativity( + self, + v: Variable, + subset: xr.DataArray, + make_lhs: Any, + make_rhs: Any, + ) -> None: + assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + + def test_sub_var_anticommutative( + self, v: Variable, subset: xr.DataArray + ) -> None: + assert_linequal(subset - v, -v + subset) + + def test_sub_expr_anticommutative( + self, v: Variable, subset: xr.DataArray + ) -> None: + expr = v + 5 + assert_linequal(subset - expr, -(expr - subset)) + + def test_add_commutativity_full_coords(self, v: Variable) -> None: + full = xr.DataArray( + np.arange(20, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(20)}, + ) + assert_linequal(v + full, full + v) + + class TestQuadratic: + def test_quadexpr_add_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr + subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected_fill) + + def test_quadexpr_sub_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr - subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, -expected_fill) + + def test_quadexpr_mul_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr * subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_mul_quadexpr( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = subset * qexpr + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: + qexpr = v * v + assert_quadequal(subset + qexpr, qexpr + subset) + + class TestMissingValues: + """ + Same shape as variable but with NaN entries in the constant. + + NaN values are filled with operation-specific neutral elements: + - Addition/subtraction: NaN -> 0 (additive identity) + - Multiplication: NaN -> 0 (zeroes out the variable) + - Division: NaN -> 1 (multiplicative identity, no scaling) + """ + + NAN_POSITIONS = [0, 5, 19] + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_nan_filled( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + base_const = 0.0 if operand == "var" else 5.0 + target = v if operand == "var" else v + 5 + result = target + nan_constant + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.const.values).any() + # At NaN positions, const should be unchanged (added 0) + for i in self.NAN_POSITIONS: + assert result.const.values[i] == base_const + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_nan_filled( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + base_const = 0.0 if operand == "var" else 5.0 + target = v if operand == "var" else v + 5 + result = target - nan_constant + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.const.values).any() + # At NaN positions, const should be unchanged (subtracted 0) + for i in self.NAN_POSITIONS: + assert result.const.values[i] == base_const + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_nan_filled( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target * nan_constant + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.coeffs.squeeze().values).any() + # At NaN positions, coeffs should be 0 (variable zeroed out) + for i in self.NAN_POSITIONS: + assert result.coeffs.squeeze().values[i] == 0.0 + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_nan_filled( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target / nan_constant + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.coeffs.squeeze().values).any() + # At NaN positions, coeffs should be unchanged (divided by 1) + original_coeffs = (1 * v).coeffs.squeeze().values + for i in self.NAN_POSITIONS: + assert result.coeffs.squeeze().values[i] == original_coeffs[i] + + def test_add_commutativity( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + result_a = v + nan_constant + result_b = nan_constant + v + assert not np.isnan(result_a.const.values).any() + assert not np.isnan(result_b.const.values).any() + np.testing.assert_array_equal(result_a.const.values, result_b.const.values) + np.testing.assert_array_equal( + result_a.coeffs.values, result_b.coeffs.values + ) + + def test_mul_commutativity( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + result_a = v * nan_constant + result_b = nan_constant * v + assert not np.isnan(result_a.coeffs.values).any() + assert not np.isnan(result_b.coeffs.values).any() + np.testing.assert_array_equal( + result_a.coeffs.values, result_b.coeffs.values + ) + + def test_quadexpr_add_nan( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + qexpr = v * v + result = qexpr + nan_constant + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.const.values).any() + + class TestExpressionWithNaN: + """Test that NaN in expression's own const/coeffs doesn't propagate.""" + + def test_shifted_expr_add_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr + 5 + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == 5.0 + + def test_shifted_expr_mul_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr * 2 + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_add_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + expr = (1 * v).shift(dim_2=1) + result = expr + arr + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == 0.0 + + def test_shifted_expr_mul_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr * arr + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_div_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr / 2 + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_sub_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr - 3 + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == -3.0 + + def test_shifted_expr_div_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr / arr + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: + nan_coeff = np.ones(v.sizes["dim_2"]) + nan_coeff[0] = np.nan + result = v.to_linexpr(nan_coeff) + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + class TestMultiDim: + def test_multidim_subset_mul(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w * subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) + assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) + + def test_multidim_subset_add(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w + subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.const.values).any() + assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) + + class TestXarrayCompat: + def test_da_eq_da_still_works(self) -> None: + da1 = xr.DataArray([1, 2, 3]) + da2 = xr.DataArray([1, 2, 3]) + result = da1 == da2 + assert result.values.all() + + def test_da_eq_scalar_still_works(self) -> None: + da = xr.DataArray([1, 2, 3]) + result = da == 2 + np.testing.assert_array_equal(result.values, [False, True, False]) + + def test_da_truediv_var_raises(self, v: Variable) -> None: + da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) + with pytest.raises(TypeError): + da / v # type: ignore[operator] + + +def test_expression_inherited_properties(x: Variable, y: Variable) -> None: + expr = 10 * x + y + assert isinstance(expr.attrs, dict) + assert isinstance(expr.coords, xr.Coordinates) + assert isinstance(expr.indexes, xr.core.indexes.Indexes) + assert isinstance(expr.sizes, xr.core.utils.Frozen) + + +def test_linear_expression_getitem_single(x: Variable, y: Variable) -> None: + expr = 10 * x + y + 3 + sel = expr[0] + assert isinstance(sel, LinearExpression) + assert sel.nterm == 2 + # one expression with two terms (constant is not counted) + assert sel.size == 2 + + +def test_linear_expression_getitem_slice(x: Variable, y: Variable) -> None: + expr = 10 * x + y + 3 + sel = expr[:1] + + assert isinstance(sel, LinearExpression) + assert sel.nterm == 2 + # one expression with two terms (constant is not counted) + assert sel.size == 2 + + +def test_linear_expression_getitem_list(x: Variable, y: Variable, z: Variable) -> None: + expr = 10 * x + z + 10 + sel = expr[:, [0, 2]] + assert isinstance(sel, LinearExpression) + assert sel.nterm == 2 + # four expressions with two terms (constant is not counted) + assert sel.size == 8 + + +def test_linear_expression_loc(x: Variable, y: Variable) -> None: + expr = x + y + assert expr.loc[0].size < expr.loc[:5].size + + +def test_linear_expression_empty(v: Variable) -> None: + expr = 7 * v + assert not expr.empty + assert expr.loc[[]].empty + + with pytest.warns(DeprecationWarning, match="use `.empty` property instead"): + assert expr.loc[[]].empty() + + +def test_linear_expression_isnull(v: Variable) -> None: + expr = np.arange(20) * v + filter = (expr.coeffs >= 10).any(TERM_DIM) + expr = expr.where(filter) + assert expr.isnull().sum() == 10 + + +def test_linear_expression_flat(v: Variable) -> None: + coeff = np.arange(1, 21) # use non-zero coefficients + expr = coeff * v + df = expr.flat + assert isinstance(df, pd.DataFrame) + assert (df.coeffs == coeff).all() + + +def test_iterate_slices(x: Variable, y: Variable) -> None: + expr = x + 10 * y + for s in expr.iterate_slices(slice_size=2): + assert isinstance(s, LinearExpression) + assert s.nterm == expr.nterm + assert s.coord_dims == expr.coord_dims + + +def test_linear_expression_to_polars(v: Variable) -> None: + coeff = np.arange(1, 21) # use non-zero coefficients + expr = coeff * v + df = expr.to_polars() + assert isinstance(df, pl.DataFrame) + assert (df["coeffs"].to_numpy() == coeff).all() + + +def test_linear_expression_where(v: Variable) -> None: + expr = np.arange(20) * v + filter = (expr.coeffs >= 10).any(TERM_DIM) + expr = expr.where(filter) + assert isinstance(expr, LinearExpression) + assert expr.nterm == 1 + + expr = np.arange(20) * v + expr = expr.where(filter, drop=True).sum() + assert isinstance(expr, LinearExpression) + assert expr.nterm == 10 + + +def test_linear_expression_where_with_const(v: Variable) -> None: + expr = np.arange(20) * v + 10 + filter = (expr.coeffs >= 10).any(TERM_DIM) + expr = expr.where(filter) + assert isinstance(expr, LinearExpression) + assert expr.nterm == 1 + assert expr.const[:10].isnull().all() + assert (expr.const[10:] == 10).all() + + expr = np.arange(20) * v + 10 + expr = expr.where(filter, drop=True).sum() + assert isinstance(expr, LinearExpression) + assert expr.nterm == 10 + assert expr.const == 100 + + +def test_linear_expression_where_scalar_fill_value(v: Variable) -> None: + expr = np.arange(20) * v + 10 + filter = (expr.coeffs >= 10).any(TERM_DIM) + expr = expr.where(filter, 200) + assert isinstance(expr, LinearExpression) + assert expr.nterm == 1 + assert (expr.const[:10] == 200).all() + assert (expr.const[10:] == 10).all() + + +def test_linear_expression_where_array_fill_value(v: Variable) -> None: + expr = np.arange(20) * v + 10 + filter = (expr.coeffs >= 10).any(TERM_DIM) + other = expr.coeffs + expr = expr.where(filter, other) + assert isinstance(expr, LinearExpression) + assert expr.nterm == 1 + assert (expr.const[:10] == other[:10]).all() + assert (expr.const[10:] == 10).all() + + +def test_linear_expression_where_expr_fill_value(v: Variable) -> None: + expr = np.arange(20) * v + 10 + expr2 = np.arange(20) * v + 5 + filter = (expr.coeffs >= 10).any(TERM_DIM) + res = expr.where(filter, expr2) + assert isinstance(res, LinearExpression) + assert res.nterm == 1 + assert (res.const[:10] == expr2.const[:10]).all() + assert (res.const[10:] == 10).all() + + +def test_where_with_helper_dim_false(v: Variable) -> None: + expr = np.arange(20) * v + with pytest.raises(ValueError): + filter = expr.coeffs >= 10 + expr.where(filter) + + +def test_linear_expression_shift(v: Variable) -> None: + shifted = v.to_linexpr().shift(dim_2=2) + assert shifted.nterm == 1 + assert shifted.coeffs.loc[:1].isnull().all() + assert (shifted.vars.loc[:1] == -1).all() + + +def test_linear_expression_swap_dims(v: Variable) -> None: + expr = v.to_linexpr() + expr = expr.assign_coords({"second": ("dim_2", expr.indexes["dim_2"] + 100)}) + expr = expr.swap_dims({"dim_2": "second"}) + assert isinstance(expr, LinearExpression) + assert expr.coord_dims == ("second",) + + +def test_linear_expression_set_index(v: Variable) -> None: + expr = v.to_linexpr() + expr = expr.assign_coords({"second": ("dim_2", expr.indexes["dim_2"] + 100)}) + expr = expr.set_index({"multi": ["dim_2", "second"]}) + assert isinstance(expr, LinearExpression) + assert expr.coord_dims == ("multi",) + assert isinstance(expr.indexes["multi"], pd.MultiIndex) + + +def test_linear_expression_fillna(v: Variable) -> None: + expr = np.arange(20) * v + 10 + assert expr.const.sum() == 200 + + filter = (expr.coeffs >= 10).any(TERM_DIM) + filtered = expr.where(filter) + assert isinstance(filtered, LinearExpression) + assert filtered.const.sum() == 100 + + filled = filtered.fillna(10) + assert isinstance(filled, LinearExpression) + assert filled.const.sum() == 200 + assert filled.coeffs.isnull().sum() == 10 + + +def test_variable_expand_dims(v: Variable) -> None: + result = v.to_linexpr().expand_dims("new_dim") + assert isinstance(result, LinearExpression) + assert result.coord_dims == ("dim_2", "new_dim") + + +def test_variable_stack(v: Variable) -> None: + result = v.to_linexpr().expand_dims("new_dim").stack(new=("new_dim", "dim_2")) + assert isinstance(result, LinearExpression) + assert result.coord_dims == ("new",) + + +def test_linear_expression_unstack(v: Variable) -> None: + result = v.to_linexpr().expand_dims("new_dim").stack(new=("new_dim", "dim_2")) + result = result.unstack("new") + assert isinstance(result, LinearExpression) + assert result.coord_dims == ("new_dim", "dim_2") + + +def test_linear_expression_diff(v: Variable) -> None: + diff = v.to_linexpr().diff("dim_2") + assert diff.nterm == 2 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby(v: Variable, use_fallback: bool) -> None: + expr = 1 * v + dim = v.dims[0] + groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords, name=dim) + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert dim in grouped.dims + assert (grouped.data[dim] == [1, 2]).all() + assert grouped.nterm == 10 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_on_same_name_as_target_dim( + v: Variable, use_fallback: bool +) -> None: + expr = 1 * v + groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "group" in grouped.dims + assert (grouped.data.group == [1, 2]).all() + assert grouped.nterm == 10 + + +@pytest.mark.parametrize("use_fallback", [True]) +def test_linear_expression_groupby_ndim(z: Variable, use_fallback: bool) -> None: + # TODO: implement fallback for n-dim groupby, see https://github.com/PyPSA/linopy/issues/299 + expr = 1 * z + groups = xr.DataArray([[1, 1, 2], [1, 3, 3]], coords=z.coords) + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "group" in grouped.dims + # there are three groups, 1, 2 and 3, the largest group has 3 elements + assert (grouped.data.group == [1, 2, 3]).all() + assert grouped.nterm == 3 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_name(v: Variable, use_fallback: bool) -> None: + expr = 1 * v + groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords, name="my_group") + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "my_group" in grouped.dims + assert (grouped.data.my_group == [1, 2]).all() + assert grouped.nterm == 10 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_series(v: Variable, use_fallback: bool) -> None: + expr = 1 * v + groups = pd.Series([1] * 10 + [2] * 10, index=v.indexes["dim_2"]) + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "group" in grouped.dims + assert (grouped.data.group == [1, 2]).all() + assert grouped.nterm == 10 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_series_with_name( + v: Variable, use_fallback: bool +) -> None: + expr = 1 * v + groups = pd.Series([1] * 10 + [2] * 10, index=v.indexes[v.dims[0]], name="my_group") + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "my_group" in grouped.dims + assert (grouped.data.my_group == [1, 2]).all() + assert grouped.nterm == 10 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_series_with_same_group_name( + v: Variable, use_fallback: bool +) -> None: + """ + Test that the group by works with a series whose name is the same as + the dimension to group. + """ + expr = 1 * v + groups = pd.Series([1] * 10 + [2] * 10, index=v.indexes["dim_2"]) + groups.name = "dim_2" + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "dim_2" in grouped.dims + assert (grouped.data.dim_2 == [1, 2]).all() + assert grouped.nterm == 10 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_series_on_multiindex( + u: Variable, use_fallback: bool +) -> None: + expr = 1 * u + len_grouped_dim = len(u.data["dim_3"]) + groups = pd.Series([1] * len_grouped_dim, index=u.indexes["dim_3"]) + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "group" in grouped.dims + assert (grouped.data.group == [1]).all() + assert grouped.nterm == len_grouped_dim + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_dataframe( + v: Variable, use_fallback: bool +) -> None: + expr = 1 * v + groups = pd.DataFrame( + {"a": [1] * 10 + [2] * 10, "b": list(range(4)) * 5}, index=v.indexes["dim_2"] + ) + if use_fallback: + with pytest.raises(ValueError): + expr.groupby(groups).sum(use_fallback=use_fallback) + return + + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + index = pd.MultiIndex.from_frame(groups) + assert "group" in grouped.dims + assert set(grouped.data.group.values) == set(index.values) + assert grouped.nterm == 3 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_dataframe_with_same_group_name( + v: Variable, use_fallback: bool +) -> None: + """ + Test that the group by works with a dataframe whose column name is the same as + the dimension to group. + """ + expr = 1 * v + groups = pd.DataFrame( + {"dim_2": [1] * 10 + [2] * 10, "b": list(range(4)) * 5}, + index=v.indexes["dim_2"], + ) + if use_fallback: + with pytest.raises(ValueError): + expr.groupby(groups).sum(use_fallback=use_fallback) + return + + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + index = pd.MultiIndex.from_frame(groups) + assert "group" in grouped.dims + assert set(grouped.data.group.values) == set(index.values) + assert grouped.nterm == 3 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_dataframe_on_multiindex( + u: Variable, use_fallback: bool +) -> None: + expr = 1 * u + len_grouped_dim = len(u.data["dim_3"]) + groups = pd.DataFrame({"a": [1] * len_grouped_dim}, index=u.indexes["dim_3"]) + + if use_fallback: + with pytest.raises(ValueError): + expr.groupby(groups).sum(use_fallback=use_fallback) + return + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "group" in grouped.dims + assert isinstance(grouped.indexes["group"], pd.MultiIndex) + assert grouped.nterm == len_grouped_dim + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_dataarray( + v: Variable, use_fallback: bool +) -> None: + expr = 1 * v + df = pd.DataFrame( + {"a": [1] * 10 + [2] * 10, "b": list(range(4)) * 5}, index=v.indexes["dim_2"] + ) + groups = xr.DataArray(df) + + # this should not be the case, see https://github.com/PyPSA/linopy/issues/351 + if use_fallback: + with pytest.raises((KeyError, IndexError)): + expr.groupby(groups).sum(use_fallback=use_fallback) + return + + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + index = pd.MultiIndex.from_frame(df) + assert "group" in grouped.dims + assert set(grouped.data.group.values) == set(index.values) + assert grouped.nterm == 3 + + +def test_linear_expression_groupby_with_dataframe_non_aligned(v: Variable) -> None: + expr = 1 * v + groups = pd.DataFrame( + {"a": [1] * 10 + [2] * 10, "b": list(range(4)) * 5}, index=v.indexes["dim_2"] + ) + target = expr.groupby(groups).sum() + + groups_non_aligned = groups[::-1] + grouped = expr.groupby(groups_non_aligned).sum() + assert_linequal(grouped, target) + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_with_const(v: Variable, use_fallback: bool) -> None: + expr = 1 * v + 15 + groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "group" in grouped.dims + assert (grouped.data.group == [1, 2]).all() + assert grouped.nterm == 10 + assert (grouped.const == 150).all() + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_asymmetric(v: Variable, use_fallback: bool) -> None: + expr = 1 * v + # now asymetric groups which result in different nterms + groups = xr.DataArray([1] * 12 + [2] * 8, coords=v.coords) + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "group" in grouped.dims + # first group must be full with vars + assert (grouped.data.sel(group=1) > 0).all() + # the last 4 entries of the second group must be empty, i.e. -1 + assert (grouped.data.sel(group=2).isel(_term=slice(None, -4)).vars >= 0).all() + assert (grouped.data.sel(group=2).isel(_term=slice(-4, None)).vars == -1).all() + assert grouped.nterm == 12 + + +@pytest.mark.parametrize("use_fallback", [True, False]) +def test_linear_expression_groupby_asymmetric_with_const( + v: Variable, use_fallback: bool +) -> None: + expr = 1 * v + 15 + # now asymetric groups which result in different nterms + groups = xr.DataArray([1] * 12 + [2] * 8, coords=v.coords) + grouped = expr.groupby(groups).sum(use_fallback=use_fallback) + assert "group" in grouped.dims + # first group must be full with vars + assert (grouped.data.sel(group=1) > 0).all() + # the last 4 entries of the second group must be empty, i.e. -1 + assert (grouped.data.sel(group=2).isel(_term=slice(None, -4)).vars >= 0).all() + assert (grouped.data.sel(group=2).isel(_term=slice(-4, None)).vars == -1).all() + assert grouped.nterm == 12 + assert list(grouped.const) == [180, 120] + + +def test_linear_expression_groupby_roll(v: Variable) -> None: + expr = 1 * v + groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) + grouped = expr.groupby(groups).roll(dim_2=1) + assert grouped.nterm == 1 + assert grouped.vars[0].item() == 19 + + +def test_linear_expression_groupby_roll_with_const(v: Variable) -> None: + expr = 1 * v + np.arange(20) + groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) + grouped = expr.groupby(groups).roll(dim_2=1) + assert grouped.nterm == 1 + assert grouped.vars[0].item() == 19 + assert grouped.const[0].item() == 9 + + +def test_linear_expression_groupby_from_variable(v: Variable) -> None: + groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) + grouped = v.groupby(groups).sum() + assert "group" in grouped.dims + assert (grouped.data.group == [1, 2]).all() + assert grouped.nterm == 10 + + +def test_linear_expression_rolling(v: Variable) -> None: + expr = 1 * v + rolled = expr.rolling(dim_2=2).sum() + assert rolled.nterm == 2 + + rolled = expr.rolling(dim_2=3).sum() + assert rolled.nterm == 3 + + with pytest.raises(ValueError): + expr.rolling().sum() + + +def test_linear_expression_rolling_with_const(v: Variable) -> None: + expr = 1 * v + 15 + rolled = expr.rolling(dim_2=2).sum() + assert rolled.nterm == 2 + assert rolled.const[0].item() == 15 + assert (rolled.const[1:] == 30).all() + + rolled = expr.rolling(dim_2=3).sum() + assert rolled.nterm == 3 + assert rolled.const[0].item() == 15 + assert rolled.const[1].item() == 30 + assert (rolled.const[2:] == 45).all() + + +def test_linear_expression_rolling_from_variable(v: Variable) -> None: + rolled = v.rolling(dim_2=2).sum() + assert rolled.nterm == 2 + + +def test_linear_expression_from_tuples(x: Variable, y: Variable) -> None: + expr = LinearExpression.from_tuples((10, x), (1, y)) + assert isinstance(expr, LinearExpression) + + with pytest.warns(DeprecationWarning): + expr2 = LinearExpression.from_tuples((10, x), (1,)) + assert isinstance(expr2, LinearExpression) + assert (expr2.const == 1).all() + + expr3 = LinearExpression.from_tuples((10, x), 1) + assert isinstance(expr3, LinearExpression) + assert_linequal(expr2, expr3) + + expr4 = LinearExpression.from_tuples((10, x), (1, y), 1) + assert isinstance(expr4, LinearExpression) + assert (expr4.const == 1).all() + + expr5 = LinearExpression.from_tuples(1, model=x.model) + assert isinstance(expr5, LinearExpression) + + +def test_linear_expression_from_tuples_bad_calls( + m: Model, x: Variable, y: Variable +) -> None: + with pytest.raises(ValueError): + LinearExpression.from_tuples((10, x), (1, y), x) + + with pytest.raises(ValueError): + LinearExpression.from_tuples((10, x, 3), (1, y), 1) + + sv = ScalarVariable(label=0, model=m) + with pytest.raises(TypeError): + LinearExpression.from_tuples((np.array([1, 1]), sv)) + + with pytest.raises(TypeError): + LinearExpression.from_tuples((x, x)) + + with pytest.raises(ValueError): + LinearExpression.from_tuples(10) + + +def test_linear_expression_from_constant_scalar(m: Model) -> None: + expr = LinearExpression.from_constant(model=m, constant=10) + assert expr.is_constant + assert isinstance(expr, LinearExpression) + assert (expr.const == 10).all() + + +def test_linear_expression_from_constant_1D(m: Model) -> None: + arr = pd.Series(index=pd.Index([0, 1], name="t"), data=[10, 20]) + expr = LinearExpression.from_constant(model=m, constant=arr) + assert isinstance(expr, LinearExpression) + assert list(expr.coords.keys())[0] == "t" + assert expr.nterm == 0 + assert (expr.const.values == [10, 20]).all() + assert expr.is_constant + + +def test_constant_linear_expression_to_polars_2D(m: Model) -> None: + index_a = pd.Index([0, 1], name="a") + index_b = pd.Index([0, 1, 2], name="b") + arr = np.array([[10, 20, 30], [40, 50, 60]]) + const = xr.DataArray(data=arr, coords=[index_a, index_b]) + + le_variable = m.add_variables(name="var", coords=[index_a, index_b]) * 1 + const + assert not le_variable.is_constant + le_const = LinearExpression.from_constant(model=m, constant=const) + assert le_const.is_constant + + var_pol = le_variable.to_polars() + const_pol = le_const.to_polars() + assert var_pol.shape == const_pol.shape + assert var_pol.columns == const_pol.columns + assert all(const_pol["const"] == var_pol["const"]) + assert all(const_pol["coeffs"].is_null()) + assert all(const_pol["vars"].is_null()) + + +def test_linear_expression_sanitize(x: Variable, y: Variable, z: Variable) -> None: + expr = 10 * x + y + z + assert isinstance(expr.sanitize(), LinearExpression) + + +def test_merge(x: Variable, y: Variable, z: Variable) -> None: + expr1 = (10 * x + y).sum("dim_0") + expr2 = z.sum("dim_0") + + res = merge([expr1, expr2], cls=LinearExpression) + assert res.nterm == 6 + + res: LinearExpression = merge([expr1, expr2]) # type: ignore + assert isinstance(res, LinearExpression) + + # now concat with same length of terms + expr1 = z.sel(dim_0=0).sum("dim_1") + expr2 = z.sel(dim_0=1).sum("dim_1") + + res = merge([expr1, expr2], dim="dim_1", cls=LinearExpression) + assert res.nterm == 3 + + # now with different length of terms + expr1 = z.sel(dim_0=0, dim_1=slice(0, 1)).sum("dim_1") + expr2 = z.sel(dim_0=1).sum("dim_1") + + res = merge([expr1, expr2], dim="dim_1", cls=LinearExpression) + assert res.nterm == 3 + assert res.sel(dim_1=0).vars[2].item() == -1 + + with pytest.warns(DeprecationWarning): + merge(expr1, expr2) + + +def test_linear_expression_outer_sum(x: Variable, y: Variable) -> None: + expr = x + y + expr2: LinearExpression = sum([x, y]) # type: ignore + assert_linequal(expr, expr2) + + expr = 1 * x + 2 * y + expr2: LinearExpression = sum([1 * x, 2 * y]) # type: ignore + assert_linequal(expr, expr2) + + assert isinstance(expr.sum(), LinearExpression) + + +def test_rename(x: Variable, y: Variable, z: Variable) -> None: + expr = 10 * x + y + z + renamed = expr.rename({"dim_0": "dim_5"}) + assert set(renamed.dims) == {"dim_1", "dim_5", TERM_DIM} + assert renamed.nterm == 3 + + renamed = expr.rename({"dim_0": "dim_1", "dim_1": "dim_2"}) + assert set(renamed.dims) == {"dim_1", "dim_2", TERM_DIM} + assert renamed.nterm == 3 + + +@pytest.mark.parametrize("multiple", [1.0, 0.5, 2.0, 0.0]) +def test_cumsum(m: Model, multiple: float) -> None: + # Test cumsum on variable x + var = m.variables["x"] + cumsum = (multiple * var).cumsum() + cumsum.nterm == 2 + + # Test cumsum on sum of variables + expr = m.variables["x"] + m.variables["y"] + cumsum = (multiple * expr).cumsum() + cumsum.nterm == 2 + + +def test_simplify_basic(x: Variable) -> None: + """Test basic simplification with duplicate terms.""" + expr = 2 * x + 3 * x + 1 * x + simplified = expr.simplify() + assert simplified.nterm == 1, f"Expected 1 term, got {simplified.nterm}" + + x_len = len(x.coords["dim_0"]) + # Check that the coefficient is 6 (2 + 3 + 1) + coeffs: np.ndarray = simplified.coeffs.values + assert len(coeffs) == x_len, f"Expected {x_len} coefficients, got {len(coeffs)}" + assert all(coeffs == 6.0), f"Expected coefficient 6.0, got {coeffs[0]}" + + +def test_simplify_multiple_dimensions() -> None: + model = Model() + a_index = pd.Index([0, 1, 2, 3], name="a") + b_index = pd.Index([0, 1, 2], name="b") + coords = [a_index, b_index] + x = model.add_variables(name="x", coords=coords) + + expr = 2 * x + 3 * x + x + # Simplify + simplified = expr.simplify() + assert simplified.nterm == 1, f"Expected 1 term, got {simplified.nterm}" + assert simplified.ndim == 2, f"Expected 2 dimensions, got {simplified.ndim}" + assert all(simplified.coeffs.values.reshape(-1) == 6), ( + f"Expected coefficients of 6, got {simplified.coeffs.values}" + ) + + +def test_simplify_with_different_variables(x: Variable, y: Variable) -> None: + """Test that different variables are kept separate.""" + # Create expression: 2*x + 3*x + 4*y + expr = 2 * x + 3 * x + 4 * y + + # Simplify + simplified = expr.simplify() + # Should have 2 terms (one for x with coeff 5, one for y with coeff 4) + assert simplified.nterm == 2, f"Expected 2 terms, got {simplified.nterm}" + + coeffs: list[float] = simplified.coeffs.values.flatten().tolist() + assert set(coeffs) == {5.0, 4.0}, ( + f"Expected coefficients {{5.0, 4.0}}, got {set(coeffs)}" + ) + + +def test_simplify_with_constant(x: Variable) -> None: + """Test that constants are preserved.""" + expr = 2 * x + 3 * x + 10 + + # Simplify + simplified = expr.simplify() + + # Check constant is preserved + assert all(simplified.const.values == 10.0), ( + f"Expected constant 10.0, got {simplified.const.values}" + ) + + # Check coefficients + assert all(simplified.coeffs.values == 5.0), ( + f"Expected coefficient 5.0, got {simplified.coeffs.values}" + ) + + +def test_simplify_cancellation(x: Variable) -> None: + """Test that terms cancel out correctly when coefficients sum to zero.""" + expr = x - x + simplified = expr.simplify() + + assert simplified.nterm == 0, f"Expected 0 terms, got {simplified.nterm}" + assert simplified.coeffs.values.size == 0 + assert simplified.vars.values.size == 0 + + +def test_simplify_partial_cancellation(x: Variable, y: Variable) -> None: + """Test partial cancellation where some terms cancel but others remain.""" + expr = 2 * x - 2 * x + 3 * y + simplified = expr.simplify() + + assert simplified.nterm == 1, f"Expected 1 term, got {simplified.nterm}" + assert all(simplified.coeffs.values == 3.0), ( + f"Expected coefficient 3.0, got {simplified.coeffs.values}" + ) + + +def test_constant_only_expression_mul_dataarray(m: Model) -> None: + const_arr = xr.DataArray([2, 3], dims=["dim_0"]) + const_expr = LinearExpression(const_arr, m) + assert const_expr.is_constant + assert const_expr.nterm == 0 + + data_arr = xr.DataArray([10, 20], dims=["dim_0"]) + expected_const = const_arr * data_arr + + result = const_expr * data_arr + assert isinstance(result, LinearExpression) + assert result.is_constant + assert (result.const == expected_const).all() + + result_rev = data_arr * const_expr + assert isinstance(result_rev, LinearExpression) + assert result_rev.is_constant + assert (result_rev.const == expected_const).all() + + +def test_constant_only_expression_mul_linexpr_with_vars(m: Model, x: Variable) -> None: + const_arr = xr.DataArray([2, 3], dims=["dim_0"]) + const_expr = LinearExpression(const_arr, m) + assert const_expr.is_constant + assert const_expr.nterm == 0 + + expr_with_vars = 1 * x + 5 + expected_coeffs = const_arr + expected_const = const_arr * 5 + + result = const_expr * expr_with_vars + assert isinstance(result, LinearExpression) + assert (result.coeffs == expected_coeffs).all() + assert (result.const == expected_const).all() + + result_rev = expr_with_vars * const_expr + assert isinstance(result_rev, LinearExpression) + assert (result_rev.coeffs == expected_coeffs).all() + assert (result_rev.const == expected_const).all() + + +def test_constant_only_expression_mul_constant_only(m: Model) -> None: + const_arr = xr.DataArray([2, 3], dims=["dim_0"]) + const_arr2 = xr.DataArray([4, 5], dims=["dim_0"]) + const_expr = LinearExpression(const_arr, m) + const_expr2 = LinearExpression(const_arr2, m) + assert const_expr.is_constant + assert const_expr2.is_constant + + expected_const = const_arr * const_arr2 + + result = const_expr * const_expr2 + assert isinstance(result, LinearExpression) + assert result.is_constant + assert (result.const == expected_const).all() + + result_rev = const_expr2 * const_expr + assert isinstance(result_rev, LinearExpression) + assert result_rev.is_constant + assert (result_rev.const == expected_const).all() + + +def test_constant_only_expression_mul_linexpr_with_vars_and_const( + m: Model, x: Variable +) -> None: + const_arr = xr.DataArray([2, 3], dims=["dim_0"]) + const_expr = LinearExpression(const_arr, m) + assert const_expr.is_constant + + expr_with_vars_and_const = 4 * x + 10 + expected_coeffs = const_arr * 4 + expected_const = const_arr * 10 + + result = const_expr * expr_with_vars_and_const + assert isinstance(result, LinearExpression) + assert not result.is_constant + assert (result.coeffs == expected_coeffs).all() + assert (result.const == expected_const).all() + + result_rev = expr_with_vars_and_const * const_expr + assert isinstance(result_rev, LinearExpression) + assert not result_rev.is_constant + assert (result_rev.coeffs == expected_coeffs).all() + assert (result_rev.const == expected_const).all() + + +class TestJoinParameter: + @pytest.fixture + def m2(self) -> Model: + m = Model() + m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="a") + m.add_variables(coords=[pd.Index([1, 2, 3], name="i")], name="b") + m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="c") + return m + + @pytest.fixture + def a(self, m2: Model) -> Variable: + return m2.variables["a"] + + @pytest.fixture + def b(self, m2: Model) -> Variable: + return m2.variables["b"] + + @pytest.fixture + def c(self, m2: Model) -> Variable: + return m2.variables["c"] + + class TestAddition: + def test_add_join_none_preserves_default( + self, a: Variable, b: Variable + ) -> None: + result_default = a.to_linexpr() + b.to_linexpr() + result_none = a.to_linexpr().add(b.to_linexpr(), join=None) + assert_linequal(result_default, result_none) + + def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_add_expr_join_outer(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_add_expr_join_left(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="left") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_add_expr_join_right(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().add(b.to_linexpr(), join="right") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_add_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_add_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().add(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: + expr = a.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [0, 1, 2]}) + result = expr.add(const, join="override") + assert list(result.data.indexes["i"]) == [0, 1, 2] + assert (result.const.values == const.values).all() + + def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: + expr_a = 1 * a + 5 + const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) + for join in ("override", "outer", "inner"): + result = expr_a.add(const, join=join) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [6, 7, 8]) + + def test_add_scalar_with_explicit_join(self, a: Variable) -> None: + expr = 1 * a + 5 + result = expr.add(10, join="override") + np.testing.assert_array_equal(result.const.values, [15, 15, 15]) + assert list(result.coords["i"].values) == [0, 1, 2] + + class TestSubtraction: + def test_sub_expr_join_inner(self, a: Variable, b: Variable) -> None: + result = a.to_linexpr().sub(b.to_linexpr(), join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_sub_constant_override(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.sub(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [-5, -15, -25]) + + class TestMultiplication: + def test_mul_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_mul_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().mul(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + assert result.coeffs.sel(i=0).item() == 0 + assert result.coeffs.sel(i=1).item() == 2 + assert result.coeffs.sel(i=2).item() == 3 + + def test_mul_expr_with_join_raises(self, a: Variable, b: Variable) -> None: + with pytest.raises(TypeError, match="join parameter is not supported"): + a.to_linexpr().mul(b.to_linexpr(), join="inner") + + class TestDivision: + def test_div_constant_join_inner(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().div(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_div_constant_join_outer(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.to_linexpr().div(const, join="outer") + assert list(result.data.indexes["i"]) == [0, 1, 2, 3] + + def test_div_expr_with_join_raises(self, a: Variable, b: Variable) -> None: + with pytest.raises(TypeError): + a.to_linexpr().div(b.to_linexpr(), join="outer") + + class TestVariableOperations: + def test_variable_add_join(self, a: Variable, b: Variable) -> None: + result = a.add(b, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_sub_join(self, a: Variable, b: Variable) -> None: + result = a.sub(b, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_mul_join(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_div_join(self, a: Variable) -> None: + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = a.div(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2] + + def test_variable_add_outer_values(self, a: Variable, b: Variable) -> None: + result = a.add(b, join="outer") + assert isinstance(result, LinearExpression) + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.nterm == 2 + + def test_variable_mul_override(self, a: Variable) -> None: + other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) + result = a.mul(other, join="override") + assert isinstance(result, LinearExpression) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) + + def test_variable_div_override(self, a: Variable) -> None: + other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) + result = a.div(other, join="override") + assert isinstance(result, LinearExpression) + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_almost_equal( + result.coeffs.squeeze().values, [0.5, 0.2, 0.1] + ) + + def test_same_shape_add_join_override(self, a: Variable, c: Variable) -> None: + result = a.to_linexpr().add(c.to_linexpr(), join="override") + assert list(result.data.indexes["i"]) == [0, 1, 2] + + class TestMerge: + def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="inner" + ) + assert list(result.data.indexes["i"]) == [1, 2] + + def test_merge_outer_join(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="outer" + ) + assert set(result.coords["i"].values) == {0, 1, 2, 3} + + def test_merge_join_left(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="left" + ) + assert list(result.data.indexes["i"]) == [0, 1, 2] + + def test_merge_join_right(self, a: Variable, b: Variable) -> None: + result: LinearExpression = merge( + [a.to_linexpr(), b.to_linexpr()], join="right" + ) + assert list(result.data.indexes["i"]) == [1, 2, 3] + + class TestValueVerification: + def test_add_expr_outer_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.add(expr_b, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 15 + assert result.const.sel(i=3).item() == 10 + + def test_add_expr_inner_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.add(expr_b, join="inner") + assert list(result.coords["i"].values) == [1, 2] + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 15 + + def test_add_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = expr.add(const, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == 15 + assert result.const.sel(i=2).item() == 5 + assert result.const.sel(i=3).item() == 20 + + def test_add_constant_inner_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = expr.add(const, join="inner") + assert list(result.coords["i"].values) == [1] + assert result.const.sel(i=1).item() == 15 + + def test_add_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.add(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [15, 25, 35]) + + def test_sub_expr_outer_const_values(self, a: Variable, b: Variable) -> None: + expr_a = 1 * a + 5 + expr_b = 2 * b + 10 + result = expr_a.sub(expr_b, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 5 + assert result.const.sel(i=1).item() == -5 + assert result.const.sel(i=2).item() == -5 + assert result.const.sel(i=3).item() == -10 + + def test_mul_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.mul(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [10, 15, 20]) + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) + + def test_mul_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 5 + other = xr.DataArray([2, 3], dims=["i"], coords={"i": [1, 3]}) + result = expr.mul(other, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=0).item() == 0 + assert result.const.sel(i=1).item() == 10 + assert result.const.sel(i=2).item() == 0 + assert result.const.sel(i=3).item() == 0 + assert result.coeffs.squeeze().sel(i=1).item() == 2 + assert result.coeffs.squeeze().sel(i=0).item() == 0 + + def test_div_constant_override_positional(self, a: Variable) -> None: + expr = 1 * a + 10 + other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) + result = expr.div(other, join="override") + assert list(result.coords["i"].values) == [0, 1, 2] + np.testing.assert_array_equal(result.const.values, [5.0, 2.0, 1.0]) + + def test_div_constant_outer_fill_values(self, a: Variable) -> None: + expr = 1 * a + 10 + other = xr.DataArray([2.0, 5.0], dims=["i"], coords={"i": [1, 3]}) + result = expr.div(other, join="outer") + assert set(result.coords["i"].values) == {0, 1, 2, 3} + assert result.const.sel(i=1).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(i=1).item() == pytest.approx(0.5) + assert result.const.sel(i=0).item() == pytest.approx(10.0) + assert result.coeffs.squeeze().sel(i=0).item() == pytest.approx(1.0) + + class TestQuadratic: + def test_quadratic_add_constant_join_inner( + self, a: Variable, b: Variable + ) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: + quad = a.to_linexpr() * a.to_linexpr() + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [0, 1] + + def test_quadratic_mul_constant_join_inner( + self, a: Variable, b: Variable + ) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] diff --git a/test/test_typing.py b/test/test_typing.py index 2375dc72..566583c2 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -1,8 +1,19 @@ +from collections.abc import Generator + +import pytest import xarray as xr import linopy +@pytest.fixture(autouse=True) +def _use_v1_convention() -> Generator[None, None, None]: + """Use v1 arithmetic convention for all tests in this module.""" + linopy.options["arithmetic_convention"] = "v1" + yield + linopy.options["arithmetic_convention"] = "legacy" + + def test_operations_with_data_arrays_are_typed_correctly() -> None: m = linopy.Model() diff --git a/test/test_typing_legacy.py b/test/test_typing_legacy.py new file mode 100644 index 00000000..99a27033 --- /dev/null +++ b/test/test_typing_legacy.py @@ -0,0 +1,25 @@ +import xarray as xr + +import linopy + + +def test_operations_with_data_arrays_are_typed_correctly() -> None: + m = linopy.Model() + + a: xr.DataArray = xr.DataArray([1, 2, 3]) + + v: linopy.Variable = m.add_variables(lower=0.0, name="v") + e: linopy.LinearExpression = v * 1.0 + q = v * v + + _ = a * v + _ = v * a + _ = v + a + + _ = a * e + _ = e * a + _ = e + a + + _ = a * q + _ = q * a + _ = q + a From c39d0d0ddaff237c1f47927c680221436587c0b3 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 19:39:51 +0100 Subject: [PATCH 48/66] Enforce exact user-dim alignment in merge for v1, add escape hatches MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - merge() in v1 mode now pre-validates that shared user-dimension coordinates match exactly, then uses outer join for xr.concat (helper dims like _term/_factor are excluded from the check) - Removed redundant pre-checks from LinearExpression.__add__ and QuadraticExpression.__add__ — merge handles enforcement now - Added scalar fast path in _apply_constant_op (mul/div skip alignment) - Wrapped AlignmentError import in try/except for xarray compat - Fixed missing space in __div__ error message - Added .fillna() as escape hatch option 5 in notebook - Updated merge docstring with convention behavior summary - Added explanatory comments (stacklevel, numpy_to_dataarray filtering) Co-Authored-By: Claude Opus 4.6 --- doc/index.rst | 1 - examples/arithmetic-convention.ipynb | 923 +++++++++++++++++++-------- examples/coordinate-alignment.ipynb | 488 -------------- linopy/common.py | 3 + linopy/expressions.py | 105 +-- test/test_linear_expression.py | 4 +- 6 files changed, 713 insertions(+), 811 deletions(-) delete mode 100644 examples/coordinate-alignment.ipynb diff --git a/doc/index.rst b/doc/index.rst index 4e6d9922..70b8b439 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -112,7 +112,6 @@ This package is published under MIT license. creating-variables creating-expressions creating-constraints - coordinate-alignment sos-constraints piecewise-linear-constraints piecewise-linear-constraints-tutorial diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index 97e93167..506e09b0 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -2,46 +2,98 @@ "cells": [ { "cell_type": "markdown", - "id": "c68183ce878b22db", + "id": "intro", "metadata": {}, - "source": "# Arithmetic Convention\n\nlinopy enforces strict defaults for coordinate alignment so that mismatches never silently produce wrong results.\n\n**Rule \u2014 Exact label matching on shared dimensions**\n\nWhen two operands share a dimension, their coordinate labels on that dimension must match exactly (`join=\"exact\"`). A `ValueError` is raised on mismatch.\n\n**Broadcasting** \u2014 When dimensions are *not* shared, operands broadcast freely over the missing dimensions \u2014 for both expressions and constants. This preserves all standard algebraic laws (commutativity, associativity, distributivity).\n\nInspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." + "source": [ + "# Arithmetic Convention\n", + "\n", + "linopy is transitioning to a stricter arithmetic convention for coordinate alignment. This notebook covers:\n", + "\n", + "1. [How to opt in](#how-to-opt-in) to the new behavior\n", + "2. [v1 convention](#v1-convention-the-future-default) — strict coordinate matching (the future default)\n", + "3. [Legacy convention](#legacy-convention-current-default) — the current default behavior\n", + "4. [The `join` parameter](#the-join-parameter) — explicit control over alignment\n", + "5. [Migration guide](#migration-guide) — updating your code" + ] }, { "cell_type": "code", - "id": "4251ba8271bff255", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.502763Z", - "start_time": "2026-03-09T19:45:36.697630Z" - } - }, + "execution_count": null, + "id": "imports", + "metadata": {}, + "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", "\n", "import linopy" - ], + ] + }, + { + "cell_type": "markdown", + "id": "opt-in-header", + "metadata": {}, + "source": [ + "## How to opt in\n", + "\n", + "linopy uses a global setting to control arithmetic behavior. The default is `\"legacy\"` (backward-compatible). To enable the new strict convention:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "opt-in", + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "linopy.options[\"arithmetic_convention\"] = \"v1\"" + ] + }, + { + "cell_type": "markdown", + "id": "opt-in-explain", + "metadata": {}, + "source": [ + "Put this at the top of your script, before any arithmetic. Under `\"legacy\"`, all legacy codepaths emit a `LinopyDeprecationWarning` to help you find code that needs updating.\n", + "\n", + "To silence the warnings without migrating yet:\n", + "\n", + "```python\n", + "import warnings\n", + "warnings.filterwarnings('ignore', category=linopy.LinopyDeprecationWarning)\n", + "```\n", + "\n", + "**Rollout plan:**\n", + "- **Now**: `\"legacy\"` is the default — nothing breaks.\n", + "- **linopy v1**: `\"v1\"` becomes the default, `\"legacy\"` is removed." + ] }, { "cell_type": "markdown", - "id": "c9d84bb1c59f2690", + "id": "v1-header", "metadata": {}, "source": [ - "## Setup" + "---\n", + "\n", + "## v1 convention (the future default)\n", + "\n", + "Two rules:\n", + "\n", + "1. **Shared dimensions must match exactly.** When two operands share a dimension, their coordinate labels must be identical. A `ValueError` is raised on mismatch.\n", + "2. **Non-shared dimensions broadcast freely.** When dimensions are not shared, operands broadcast over the missing dimensions — for both expressions and constants.\n", + "\n", + "This ensures mismatches never silently produce wrong results, while preserving all standard algebraic laws.\n", + "\n", + "Inspired by [pyoframe](https://github.com/Bravos-Power/pyoframe)." ] }, { "cell_type": "code", - "id": "57506c7b4bf9f4bf", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.555658Z", - "start_time": "2026-03-09T19:45:37.505351Z" - } - }, + "execution_count": null, + "id": "v1-setup", + "metadata": {}, + "outputs": [], "source": [ "m = linopy.Model()\n", "\n", @@ -53,406 +105,725 @@ "y = m.add_variables(lower=0, coords=[time], name=\"y\")\n", "gen = m.add_variables(lower=0, coords=[time, techs], name=\"gen\")\n", "risk = m.add_variables(lower=0, coords=[techs, scenarios], name=\"risk\")" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", - "id": "61636799d26f4d99", + "id": "v1-works-header", "metadata": {}, "source": [ - "## What works by default" + "### What works" ] }, { "cell_type": "code", - "id": "1f7af87e662800c", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.567659Z", - "start_time": "2026-03-09T19:45:37.558480Z" - } - }, + "execution_count": null, + "id": "v1-same-coords", + "metadata": {}, + "outputs": [], "source": [ - "# Same coords \u2014 just works\n", + "# Same coords — just works\n", "x + y" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "code", - "id": "985ade4e21e26271", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.586804Z", - "start_time": "2026-03-09T19:45:37.581356Z" - } - }, + "execution_count": null, + "id": "v1-matching-constant", + "metadata": {}, + "outputs": [], "source": [ "# Constant with matching coords\n", "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", "x * factor" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "code", - "id": "8f6a99d864238dbb", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.600600Z", - "start_time": "2026-03-09T19:45:37.592617Z" - } - }, + "execution_count": null, + "id": "v1-broadcast-constant", + "metadata": {}, + "outputs": [], "source": [ - "# Constant with fewer dims \u2014 broadcasts freely\n", + "# Constant with fewer dims — broadcasts freely\n", "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", "gen * cost # cost broadcasts over time" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "code", - "id": "d417bfa628cb280a", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.618719Z", - "start_time": "2026-03-09T19:45:37.608423Z" - } - }, - "source": [ - "# Expression + Expression with non-shared dims \u2014 broadcasts freely\n", - "gen + risk # (time, tech) + (tech, scenario) \u2192 (time, tech, scenario)" - ], + "execution_count": null, + "id": "v1-broadcast-expr", + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# Expression + Expression with non-shared dims — broadcasts freely\n", + "gen + risk # (time, tech) + (tech, scenario) → (time, tech, scenario)" + ] }, { "cell_type": "code", - "id": "400b4084ef94eb35", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.626365Z", - "start_time": "2026-03-09T19:45:37.621393Z" - } - }, + "execution_count": null, + "id": "v1-scalar", + "metadata": {}, + "outputs": [], "source": [ - "# Scalar \u2014 always fine\n", + "# Scalar — always fine\n", "x + 5" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "code", - "id": "2e4640266401ba61", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.642965Z", - "start_time": "2026-03-09T19:45:37.630809Z" - } - }, + "execution_count": null, + "id": "v1-constraint-broadcast", + "metadata": {}, + "outputs": [], "source": [ - "# Constraints \u2014 RHS with fewer dims broadcasts naturally\n", + "# Constraints — RHS with fewer dims broadcasts naturally\n", "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", "m.add_constraints(gen <= capacity, name=\"cap\") # capacity broadcasts over time" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", - "id": "c4e9c6dbcec7c0d9", + "id": "v1-raises-header", "metadata": {}, - "source": "## What raises an error" + "source": [ + "### What raises an error" + ] }, { "cell_type": "code", - "id": "fe1b95f337be4e9f", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.653963Z", - "start_time": "2026-03-09T19:45:37.648263Z" - } - }, + "execution_count": null, + "id": "v1-mismatch-expr", + "metadata": {}, + "outputs": [], "source": [ - "# Mismatched coordinates on shared dimension\n", "y_short = m.add_variables(\n", " lower=0, coords=[pd.RangeIndex(3, name=\"time\")], name=\"y_short\"\n", ")\n", "\n", "try:\n", - " x + y_short # time coords don't match\n", + " x + y_short # time coords don't match: [0..4] vs [0..2]\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ], - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "id": "5a0bb6e7d4b175c5", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.662586Z", - "start_time": "2026-03-09T19:45:37.658665Z" - } - }, - "source": "# Constant introduces new dimensions \u2014 broadcasts in arithmetic\nprofile = xr.DataArray(\n np.ones((3, 5)), dims=[\"tech\", \"time\"], coords={\"tech\": techs, \"time\": time}\n)\nx + profile # x[time] broadcasts over tech", - "outputs": [], - "execution_count": null - }, - { - "cell_type": "code", - "id": "e0f899f096773d96", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.681087Z", - "start_time": "2026-03-09T19:45:37.677125Z" - } - }, - "source": "# Multiplication with mismatched coordinates\npartial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\ntry:\n x * partial # time coords [0..4] vs [0,1,2]\nexcept ValueError as e:\n print(\"ValueError:\", e)", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "ValueError: cannot align objects with join='exact' where index/labels/sizes are not equal along these coordinates (dimensions): 'time' ('time',)\n", - "Use .add()/.sub()/.mul()/.div() with an explicit join= parameter:\n", - " .add(other, join=\"inner\") # intersection of coordinates\n", - " .add(other, join=\"outer\") # union of coordinates (with fill)\n", - " .add(other, join=\"left\") # keep left operand's coordinates\n", - " .add(other, join=\"override\") # positional alignment\n" - ] - } - ], - "execution_count": null - }, - { - "cell_type": "code", - "id": "aa03d3184a0e8b65", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.697975Z", - "start_time": "2026-03-09T19:45:37.694178Z" - } - }, - "source": [ - "# Constraint RHS with mismatched coordinates\n", - "partial_rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "v1-mismatch-constant", + "metadata": {}, + "outputs": [], + "source": [ + "partial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", "\n", "try:\n", - " x <= partial_rhs\n", + " x * partial # time coords [0..4] vs [0,1,2]\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ], + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "v1-mismatch-constraint", + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "try:\n", + " x <= partial # constraint RHS doesn't cover all coords\n", + "except ValueError as e:\n", + " print(\"ValueError:\", e)" + ] }, { "cell_type": "markdown", - "id": "64a6f983ce55547e", + "id": "v1-nan-header", "metadata": {}, "source": [ - "## Escape hatches\n", + "### NaN propagation\n", "\n", - "When coordinates don't match, linopy provides several ways to state your intent explicitly." + "Under v1, NaN values in constants **propagate** through arithmetic — they are not silently replaced with zeros. This makes missing data visible:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "v1-nan", + "metadata": {}, + "outputs": [], + "source": [ + "vals = xr.DataArray([1.0, np.nan, 3.0, 4.0, 5.0], dims=[\"time\"], coords={\"time\": time})\n", + "result = x + vals\n", + "print(\"const:\", result.const.values) # NaN at position 1" ] }, { "cell_type": "markdown", - "id": "709150bc01fc8c3", + "id": "v1-escape-header", + "metadata": {}, + "source": "### Escape hatches\n\nWhen coordinates don't match or your data contains NaN, you have several options:" + }, + { + "cell_type": "markdown", + "id": "v1-sel", "metadata": {}, "source": [ - "### 1. `.sel()` \u2014 Subset before operating\n", + "**Option 1: `.sel()` — subset before operating**\n", "\n", - "The cleanest way to restrict to matching coordinates. No need for an inner join \u2014 explicitly select what you want." + "The cleanest way. Explicitly select matching coordinates:" ] }, { "cell_type": "code", - "id": "b4f5bf23a8ee17d5", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.712616Z", - "start_time": "2026-03-09T19:45:37.704269Z" - } - }, + "execution_count": null, + "id": "v1-sel-example", + "metadata": {}, + "outputs": [], + "source": [ + "x.sel(time=[0, 1, 2]) + y_short" + ] + }, + { + "cell_type": "markdown", + "id": "v1-join", + "metadata": {}, + "source": [ + "**Option 2: Named methods with `join=`**\n", + "\n", + "All arithmetic operations have named-method equivalents (`.add()`, `.sub()`, `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`) that accept a `join` parameter:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "v1-join-example", + "metadata": {}, + "outputs": [], "source": [ - "x.sel(time=[0, 1, 2]) + y_short # select matching coords first" - ], + "x.add(y_short, join=\"inner\") # intersection: time [0, 1, 2]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "v1-join-outer", + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "x.mul(partial, join=\"left\") # keep x's coords, fill missing with 0" + ] }, { "cell_type": "markdown", - "id": "f12b0cb6d0e31651", + "id": "v1-assign-coords", "metadata": {}, - "source": "### 2. Named methods with `join=`\n\nAll arithmetic operations have named-method equivalents that accept a `join` parameter:\n\n| `join` | Coordinates kept | Fill |\n|--------|-----------------|------|\n| `\"exact\"` | Must match | `ValueError` if different |\n| `\"inner\"` | Intersection | \u2014 |\n| `\"outer\"` | Union | Zero (arithmetic) / NaN (constraints) |\n| `\"left\"` | Left operand's | Zero / NaN for missing right |\n| `\"right\"` | Right operand's | Zero for missing left |\n| `\"override\"` | Left operand's | Positional alignment |" + "source": [ + "**Option 3: `.assign_coords()` — positional alignment**\n", + "\n", + "When two operands have the same shape but different labels, relabel one to match the other:" + ] }, { "cell_type": "code", - "id": "78c967671819ef0c", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.746402Z", - "start_time": "2026-03-09T19:45:37.720673Z" - } - }, + "execution_count": null, + "id": "v1-assign-coords-example", + "metadata": {}, + "outputs": [], "source": [ - "m2 = linopy.Model()\n", + "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", "\n", - "i_a = pd.Index([0, 1, 2], name=\"i\")\n", - "i_b = pd.Index([1, 2, 3], name=\"i\")\n", + "# z has time=[5..9], x has time=[0..4] — same shape, different labels\n", + "x + z.assign_coords(time=x.coords[\"time\"])" + ] + }, + { + "cell_type": "markdown", + "id": "v1-align", + "metadata": {}, + "source": [ + "**Option 4: `linopy.align()` — multi-operand pre-alignment**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "v1-align-example", + "metadata": {}, + "outputs": [], + "source": [ + "x_aligned, y_short_aligned = linopy.align(x, y_short, join=\"outer\")\n", + "x_aligned + y_short_aligned" + ] + }, + { + "cell_type": "markdown", + "id": "a2wmuj3o3rl", + "metadata": {}, + "source": "**Option 5: `.fillna()` — handle NaN in constants**\n\nUnder v1, NaN propagates through arithmetic. If your data has NaN values that represent \"no effect\" (e.g., missing cost data that should be zero), fill them explicitly before operating:\n\n```python\n# Addition/subtraction: fill with 0 (additive identity)\nx + data_with_nans.fillna(0)\n\n# Multiplication: fill with 1 to preserve coefficients, or 0 to zero them out\nx * scaling_factors.fillna(1) # NaN means \"no scaling\"\nx * mask.fillna(0) # NaN means \"exclude\"\n\n# Division: fill with 1 (multiplicative identity)\nx / divisors.fillna(1)\n```" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "os5ovkgifp", + "metadata": {}, + "outputs": [], + "source": [ + "# NaN propagates by default\n", + "vals_with_nan = xr.DataArray(\n", + " [1.0, np.nan, 3.0, 4.0, 5.0], dims=[\"time\"], coords={\"time\": time}\n", + ")\n", + "print(\"With NaN: \", (x + vals_with_nan).const.values)\n", + "\n", + "# Fill explicitly to get legacy-like behavior\n", + "print(\"fillna(0): \", (x + vals_with_nan.fillna(0)).const.values)" + ] + }, + { + "cell_type": "markdown", + "id": "v1-algebraic", + "metadata": {}, + "source": [ + "### Algebraic properties\n", + "\n", + "All standard algebraic laws hold under v1. You can freely refactor expressions without worrying about dimension ordering.\n", + "\n", + "| Property | Example |\n", + "|---|---|\n", + "| **Commutativity of +** | `x + y == y + x` |\n", + "| **Commutativity of ×** | `x * c == c * x` |\n", + "| **Associativity of +** | `(x + y) + z == x + (y + z)` |\n", + "| **Scalar distributivity** | `s * (x + y) == s*x + s*y` |\n", + "| **Constant distributivity** | `c[B] * (x[A] + g[A,B]) == c[B]*x[A] + c[B]*g[A,B]` |\n", + "| **Additive identity** | `x + 0 == x` |\n", + "| **Multiplicative identity** | `x * 1 == x` |\n", + "| **Double negation** | `-(-x) == x` |\n", + "| **Zero** | `x * 0 == 0` |\n", + "\n", + "**Caveat:** These guarantees only hold for operations involving at least one linopy object. Operations between plain constants (`DataArray + DataArray`) use their library's own rules. To enforce strict matching for xarray operations too:\n", + "\n", + "```python\n", + "xr.set_options(arithmetic_join=\"exact\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "legacy-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Legacy convention (current default)\n", + "\n", + "The legacy convention is the current default (`linopy.options[\"arithmetic_convention\"] = \"legacy\"`). It uses heuristics to handle coordinate mismatches silently. This section describes its behavior for users who haven't migrated yet.\n", + "\n", + "Under legacy, all arithmetic operations emit a `LinopyDeprecationWarning`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "legacy-switch", + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", "\n", - "a = m2.add_variables(coords=[i_a], name=\"a\")\n", - "b = m2.add_variables(coords=[i_b], name=\"b\")\n", + "linopy.options[\"arithmetic_convention\"] = \"legacy\"\n", + "warnings.filterwarnings(\"ignore\", category=linopy.LinopyDeprecationWarning)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "legacy-setup", + "metadata": {}, + "outputs": [], + "source": [ + "m2 = linopy.Model()\n", + "time = pd.RangeIndex(5, name=\"time\")\n", + "x2 = m2.add_variables(lower=0, coords=[time], name=\"x\")\n", + "y2_short = m2.add_variables(\n", + " lower=0, coords=[pd.RangeIndex(3, name=\"time\")], name=\"y_short\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "legacy-size-aware", + "metadata": {}, + "source": [ + "### Size-aware alignment\n", "\n", - "print(\"inner:\", list(a.add(b, join=\"inner\").coords[\"i\"].values)) # [1, 2]\n", - "print(\"outer:\", list(a.add(b, join=\"outer\").coords[\"i\"].values)) # [0, 1, 2, 3]\n", - "print(\"left: \", list(a.add(b, join=\"left\").coords[\"i\"].values)) # [0, 1, 2]\n", - "print(\"right:\", list(a.add(b, join=\"right\").coords[\"i\"].values)) # [1, 2, 3]" - ], + "When two operands share a dimension:\n", + "- **Same size**: positional alignment (labels ignored, left operand's labels kept)\n", + "- **Different size**: left-join (reindex to the left operand's coordinates, fill with zeros)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "legacy-subset", + "metadata": {}, + "outputs": [], + "source": [ + "# Different size — left join, fill missing with 0\n", + "x2 + y2_short # y_short drops out at time 3, 4" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "legacy-same-size", + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# Same size — positional alignment (labels ignored!)\n", + "z2 = m2.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", + "x2 + z2 # x has time=[0..4], z has time=[5..9], but same size → positional match" + ] }, { "cell_type": "markdown", - "id": "424610ceccde798a", - "metadata": {}, - "source": "### 3. `linopy.align()` \u2014 Explicit pre-alignment\n\nFor complex multi-operand alignment. Linopy types automatically use correct sentinel fill values (labels/vars=-1, coeffs=NaN) while `fill_value` applies to `const`:" - }, - { - "cell_type": "code", - "id": "23f414e973e33c34", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.765307Z", - "start_time": "2026-03-09T19:45:37.756074Z" - } - }, - "source": "a_aligned, b_aligned = linopy.align(a, b, join=\"outer\")\na_aligned + b_aligned", - "outputs": [ - { - "data": { - "text/plain": [ - "LinearExpression [i: 4]:\n", - "------------------------\n", - "[0]: +1 a[0]\n", - "[1]: +1 a[1] + 1 b[1]\n", - "[2]: +1 a[2] + 1 b[2]\n", - "[3]: +1 b[3]" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "execution_count": null + "id": "legacy-nan", + "metadata": {}, + "source": [ + "### NaN filling\n", + "\n", + "NaN values in constants are silently replaced with operation-specific neutral elements:\n", + "- Addition/subtraction: NaN → 0\n", + "- Multiplication: NaN → 0 (zeroes out the variable)\n", + "- Division: NaN → 1 (no scaling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "legacy-nan-fill", + "metadata": {}, + "outputs": [], + "source": [ + "vals = xr.DataArray([1.0, np.nan, 3.0, 4.0, 5.0], dims=[\"time\"], coords={\"time\": time})\n", + "result = x2 + vals\n", + "print(\"const:\", result.const.values) # NaN replaced with 0" + ] }, { "cell_type": "markdown", - "id": "e64caf260c82ea6d", + "id": "legacy-constraint-rhs", + "metadata": {}, + "source": [ + "### Constraint RHS\n", + "\n", + "In constraints, the RHS is reindexed to the expression's coordinates. Missing positions become NaN, which tells linopy to skip those constraints:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "legacy-constraint", "metadata": {}, - "source": "## Positional alignment\n\nSometimes two operands have the same shape but different coordinate labels \u2014 e.g., data from different sources, or time series with different start dates. The exact join will raise. There are several ways to handle this:\n\n### Option 1: `.assign_coords()` (recommended)\n\nExplicitly relabel one operand to match the other. This is the clearest \u2014 the reader sees exactly which mapping is intended." + "outputs": [], + "source": [ + "rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", + "con = x2 <= rhs # constraint only at time 0, 1, 2; NaN at time 3, 4\n", + "con" + ] }, { "cell_type": "code", - "id": "9a513a6be9e5925e", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.787197Z", - "start_time": "2026-03-09T19:45:37.776535Z" - } - }, - "source": "c = m2.add_variables(coords=[[\"x\", \"y\", \"z\"]], name=\"c\")\nd = m2.add_variables(coords=[[\"p\", \"q\", \"r\"]], name=\"d\")\n\n# Relabel d's coordinates to match c\nc + d.assign_coords(dim_0=c.coords[\"dim_0\"])", + "execution_count": null, + "id": "legacy-restore-v1", + "metadata": {}, "outputs": [], - "execution_count": null + "source": [ + "# Switch back to v1 for the rest of the notebook\n", + "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", + "warnings.resetwarnings()" + ] }, { "cell_type": "markdown", - "id": "262eaf85fa44e152", - "metadata": { - "ExecuteTime": { - "end_time": "2026-03-09T19:45:37.803806Z", - "start_time": "2026-03-09T19:45:37.795935Z" - } - }, - "source": "### Option 2: `join=\"override\"`\n\nUses the left operand's coordinates positionally. Shorter, but less explicit about the mapping. Requires same size on the shared dimension." + "id": "join-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## The `join` parameter\n", + "\n", + "Both conventions support explicit `join=` on named methods. This overrides the default behavior and works identically in both modes.\n", + "\n", + "| `join` | Coordinates kept | Fill behavior |\n", + "|--------|-----------------|---------------|\n", + "| `\"exact\"` | Must match | `ValueError` if different |\n", + "| `\"inner\"` | Intersection | No fill needed |\n", + "| `\"outer\"` | Union | Fill with neutral element |\n", + "| `\"left\"` | Left operand's | Fill missing right |\n", + "| `\"right\"` | Right operand's | Fill missing left |\n", + "| `\"override\"` | Left operand's (positional) | Positional alignment |" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "join-setup", + "metadata": {}, + "outputs": [], + "source": [ + "m3 = linopy.Model()\n", + "\n", + "i_a = pd.Index([0, 1, 2], name=\"i\")\n", + "i_b = pd.Index([1, 2, 3], name=\"i\")\n", + "\n", + "a = m3.add_variables(coords=[i_a], name=\"a\")\n", + "b = m3.add_variables(coords=[i_b], name=\"b\")" + ] }, { "cell_type": "code", - "id": "8lk83w4yydw", - "source": "c.add(d, join=\"override\")", + "execution_count": null, + "id": "join-inner", "metadata": {}, + "outputs": [], + "source": [ + "# Inner join — intersection (i=1, 2)\n", + "a.add(b, join=\"inner\")" + ] + }, + { + "cell_type": "code", "execution_count": null, - "outputs": [] + "id": "join-outer", + "metadata": {}, + "outputs": [], + "source": [ + "# Outer join — union (i=0, 1, 2, 3)\n", + "a.add(b, join=\"outer\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "join-left", + "metadata": {}, + "outputs": [], + "source": [ + "# Left join — keep a's coords (i=0, 1, 2)\n", + "a.add(b, join=\"left\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "join-right", + "metadata": {}, + "outputs": [], + "source": [ + "# Right join — keep b's coords (i=1, 2, 3)\n", + "a.add(b, join=\"right\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "join-override", + "metadata": {}, + "outputs": [], + "source": [ + "# Override — positional (0↔1, 1↔2, 2↔3), uses a's labels\n", + "a.add(b, join=\"override\")" + ] }, { "cell_type": "markdown", - "id": "ysdlzpp192", - "source": "**Prefer `.assign_coords()`** \u2014 it makes the intent explicit and keeps coordinate metadata intact. Use `join=\"override\"` as a shorthand when the positional mapping is obvious.", - "metadata": {} + "id": "join-mul", + "metadata": {}, + "source": [ + "The same `join` parameter works on `.mul()`, `.div()`, `.le()`, `.ge()`, `.eq()`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "join-mul-example", + "metadata": {}, + "outputs": [], + "source": [ + "const = xr.DataArray([2, 3, 4], dims=[\"i\"], coords={\"i\": [1, 2, 3]})\n", + "\n", + "# Multiply, keeping only shared coords\n", + "a.mul(const, join=\"inner\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "join-constraint", + "metadata": {}, + "outputs": [], + "source": [ + "# Constraint with left join — only a's coords, NaN at missing RHS positions\n", + "rhs = xr.DataArray([10, 20], dims=[\"i\"], coords={\"i\": [0, 1]})\n", + "a.le(rhs, join=\"left\")" + ] }, { "cell_type": "markdown", - "id": "cd0ef5ca04e57be", + "id": "migration-header", "metadata": {}, "source": [ - "## Working with pandas\n", + "---\n", + "\n", + "## Migration guide\n", "\n", - "Under the strict convention, pandas objects must have **named indices** to avoid dimension name mismatches. A `pd.Series` without a named index becomes `dim_0` and will fail the exact join against a named variable dimension.\n", + "To migrate from legacy to v1:\n", + "\n", + "### Step 1: Enable v1 and run your code\n", "\n", "```python\n", - "# Bad \u2014 index name is None, becomes \"dim_0\"\n", - "cost = pd.Series([10, 20], index=[\"wind\", \"solar\"])\n", + "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", + "```\n", "\n", - "# Good \u2014 explicit dimension name\n", - "cost = pd.Series([10, 20], index=pd.Index([\"wind\", \"solar\"], name=\"tech\"))\n", + "Any code that relied on legacy alignment will now raise `ValueError` with a helpful message suggesting which `join=` to use.\n", + "\n", + "### Step 2: Fix each error\n", + "\n", + "Common patterns:\n", + "\n", + "| Legacy code (silent) | v1 equivalent (explicit) |\n", + "|---|---|\n", + "| `x + subset_constant` | `x.add(subset_constant, join=\"left\")` |\n", + "| `x + y` (same size, different labels) | `x + y.assign_coords(time=x.coords[\"time\"])` |\n", + "| `x <= partial_rhs` | `x.le(partial_rhs, join=\"left\")` |\n", + "| `expr + expr` (mismatched coords) | `expr.add(other, join=\"outer\")` or `.sel()` first |\n", + "\n", + "### Step 3: Handle NaN\n", + "\n", + "Under legacy, NaN in constants was silently replaced with 0. Under v1, NaN propagates. If your data contains NaN that should be treated as zero, use `.fillna(0)` explicitly:\n", + "\n", + "```python\n", + "# Legacy: NaN silently became 0\n", + "x + data_with_nans\n", + "\n", + "# v1: be explicit\n", + "x + data_with_nans.fillna(0)\n", "```\n", "\n", - "Consider using `force_dim_names=True` on the model to catch unnamed dimension issues at variable creation time." + "### Step 4: Pandas index names\n", + "\n", + "Under v1, pandas objects must have **named indices** to align properly with linopy variables:\n", + "\n", + "```python\n", + "# Will fail — unnamed index becomes \"dim_0\"\n", + "cost = pd.Series([10, 20], index=[\"wind\", \"solar\"])\n", + "\n", + "# Works — explicit dimension name\n", + "cost = pd.Series([10, 20], index=pd.Index([\"wind\", \"solar\"], name=\"tech\"))\n", + "```" ] }, { "cell_type": "markdown", - "id": "f0c3e862b0430c11", + "id": "practical-header", "metadata": {}, - "source": "## Summary\n\n| Situation | Behavior | How to handle |\n|---|---|---|\n| Shared dims, matching coords | \u2713 Proceeds | `x + y` |\n| Non-shared dims, expr + expr | \u2713 Broadcasts | `gen[time,tech] + risk[tech,scenario]` |\n| Constant with subset dims | \u2713 Broadcasts | `cost[tech] * gen[time,tech]` |\n| Constant introduces new dims | \u2713 Broadcasts | `x[time] + profile[time,tech]` |\n| Shared dims, mismatching coords | \u2717 Raises | `.sel()` or `.add(y, join=\"outer\")` |\n| Pandas without named index | \u2717 Raises on dim mismatch | Name the index |" + "source": [ + "---\n", + "\n", + "## Practical example\n", + "\n", + "A generation dispatch model demonstrating both matching coords and explicit joins." + ] }, { - "cell_type": "markdown", - "id": "d56kb3o89nb", - "source": "## Algebraic Properties\n\nAll standard algebraic laws hold for linopy arithmetic. This means you can freely refactor expressions without worrying about dimension ordering.\n\nLet `x[A]`, `y[A]`, `z[A]` be linopy variables with matching dims, `g[A,B]` a variable with extra dims, `c[B]` a constant (DataArray), and `s` a scalar.\n\n| Property | Example |\n|---|---|\n| **Commutativity of +** | `x + y == y + x` |\n| **Commutativity of \u00d7** | `x * c == c * x` |\n| **Associativity of +** | `(x + y) + z == x + (y + z)` |\n| **Associativity with constant** | `(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])` |\n| **Scalar distributivity** | `s * (x + y) == s*x + s*y` |\n| **Constant distributivity** | `c[B] * (x[A] + g[A,B]) == c[B]*x[A] + c[B]*g[A,B]` |\n| **Additive identity** | `x + 0 == x` |\n| **Multiplicative identity** | `x * 1 == x` |\n| **Negation** | `x - y == x + (-y)` |\n| **Double negation** | `-(-x) == x` |\n| **Zero** | `x * 0 == 0` |\n\n### Limitation: constant preparation\n\nThese guarantees only hold for operations that involve at least one linopy object. Operations between plain constants (`DataArray + DataArray`, `Series + Series`) happen **outside** linopy and use their library's own alignment rules \u2014 see the \"Preparing constants\" section above. To maintain algebraic consistency end-to-end, convert constants to `xr.DataArray` with explicit coordinates early and consider setting `xr.set_options(arithmetic_join=\"exact\")`.", - "metadata": {} + "cell_type": "code", + "execution_count": null, + "id": "practical-setup", + "metadata": {}, + "outputs": [], + "source": [ + "m4 = linopy.Model()\n", + "\n", + "hours = pd.RangeIndex(24, name=\"hour\")\n", + "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", + "\n", + "gen = m4.add_variables(lower=0, coords=[hours, techs], name=\"gen\")" + ] }, { - "cell_type": "markdown", - "id": "e7u7uhbm1dl", - "source": "## Broadcasting in constraints\n\nBroadcasting is allowed everywhere, including constraints. This can lead to two situations worth being aware of:\n\n| Constraint type | Example | What happens | Feedback |\n|---|---|---|---|\n| `<=` / `>=` | `x[time] <= rhs[time, scenario]` | Creates one constraint per (time, scenario). Only the tightest bound is active \u2014 the rest are redundant. | No issue \u2014 solver ignores slack constraints. |\n| `==` | `x[time] == rhs[time, scenario]` | Creates one equality per (time, scenario). If `rhs` differs across `scenario`, the variable must simultaneously equal multiple values. | Solver reports **infeasible** \u2014 clear feedback. |\n\nlinopy does **not** raise an error in these cases because:\n- Redundant inequality constraints are harmless (just slightly wasteful).\n- Infeasible equality constraints are caught by the solver with a clear diagnostic.\n- Blocking these would break algebraic equivalences \u2014 e.g., `x <= rhs` must behave the same as `x - rhs <= 0`, which involves arithmetic broadcasting.", - "metadata": {} + "cell_type": "code", + "execution_count": null, + "id": "practical-capacity", + "metadata": {}, + "outputs": [], + "source": [ + "# Capacity limits — constant broadcasts over hours\n", + "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", + "m4.add_constraints(gen <= capacity, name=\"capacity_limit\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "practical-solar", + "metadata": {}, + "outputs": [], + "source": [ + "# Solar availability — full 24h profile, matching coords\n", + "solar_avail = np.zeros(24)\n", + "solar_avail[6:19] = 100 * np.sin(np.linspace(0, np.pi, 13))\n", + "solar_availability = xr.DataArray(solar_avail, dims=[\"hour\"], coords={\"hour\": hours})\n", + "\n", + "solar_gen = gen.sel(tech=\"solar\")\n", + "m4.add_constraints(solar_gen <= solar_availability, name=\"solar_avail\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "practical-peak", + "metadata": {}, + "outputs": [], + "source": [ + "# Peak demand — only applies to hours 8-20, use join=\"inner\"\n", + "peak_hours = pd.RangeIndex(8, 21, name=\"hour\")\n", + "peak_demand = xr.DataArray(\n", + " np.full(len(peak_hours), 120.0), dims=[\"hour\"], coords={\"hour\": peak_hours}\n", + ")\n", + "\n", + "total_gen = gen.sum(\"tech\")\n", + "m4.add_constraints(total_gen.ge(peak_demand, join=\"inner\"), name=\"peak_demand\")" + ] }, { "cell_type": "markdown", - "id": "bpoepi5bcn8", - "source": "## Preparing constants\n\nlinopy enforces exact matching for all operations involving linopy objects. However, operations between plain constants (DataArrays, pandas, numpy) **before** they enter linopy use their own alignment rules, which can silently produce wrong results:\n\n| Library | Default alignment | Risk |\n|---|---|---|\n| **xarray** | Inner join \u2014 drops mismatched coords | Silent data loss |\n| **pandas** | Outer join \u2014 fills with NaN | Silent NaN propagation |\n| **numpy** | Positional \u2014 no coord checks | Wrong results if shapes match by accident |\n\nTo protect xarray operations, set the global arithmetic join to `\"exact\"`:\n\n```python\nxr.set_options(arithmetic_join=\"exact\")\n```\n\nFor pandas and numpy, there is no equivalent setting \u2014 prepare constants carefully and convert to `xr.DataArray` with explicit coords early.", - "metadata": {} + "id": "summary", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Summary\n", + "\n", + "| | v1 (future default) | Legacy (current default) |\n", + "|---|---|---|\n", + "| **Mismatched coords** | `ValueError` | Silent left-join / override |\n", + "| **Same-size different labels** | `ValueError` | Positional alignment |\n", + "| **NaN in constants** | Propagates | Filled with 0 |\n", + "| **Explicit join** | `.add(x, join=...)` | `.add(x, join=...)` |\n", + "| **Setting** | `options[\"arithmetic_convention\"] = \"v1\"` | `options[\"arithmetic_convention\"] = \"legacy\"` |" + ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "name": "python", - "version": "3.11.11" + "version": "3.11.0" } }, "nbformat": 4, diff --git a/examples/coordinate-alignment.ipynb b/examples/coordinate-alignment.ipynb deleted file mode 100644 index 1547bd9d..00000000 --- a/examples/coordinate-alignment.ipynb +++ /dev/null @@ -1,488 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Coordinate Alignment\n", - "\n", - "Since linopy builds on xarray, coordinate alignment matters when combining variables or expressions that live on different coordinates. By default, linopy aligns operands automatically and fills missing entries with sensible defaults. This guide shows how alignment works and how to control it with the ``join`` parameter." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import xarray as xr\n", - "\n", - "import linopy" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Default Alignment Behavior\n", - "\n", - "When two operands share a dimension but have different coordinates, linopy keeps the **larger** (superset) coordinate range and fills missing positions with zeros (for addition) or zero coefficients (for multiplication)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "m = linopy.Model()\n", - "\n", - "time = pd.RangeIndex(5, name=\"time\")\n", - "x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", - "\n", - "subset_time = pd.RangeIndex(3, name=\"time\")\n", - "y = m.add_variables(lower=0, coords=[subset_time], name=\"y\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Adding ``x`` (5 time steps) and ``y`` (3 time steps) gives an expression over all 5 time steps. Where ``y`` has no entry (time 3, 4), the coefficient is zero — i.e. ``y`` simply drops out of the sum at those positions." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "x + y" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The same applies when multiplying by a constant that covers only a subset of coordinates. Missing positions get a coefficient of zero:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "factor = xr.DataArray([2, 3, 4], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", - "x * factor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Adding a constant subset also fills missing coordinates with zero:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "x + factor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Constraints with Subset RHS\n", - "\n", - "For constraints, missing right-hand-side values are filled with ``NaN``, which tells linopy to **skip** the constraint at those positions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", - "con = x <= rhs\n", - "con" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The constraint only applies at time 0, 1, 2. At time 3 and 4 the RHS is ``NaN``, so no constraint is created." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "### Same-Shape Operands: Positional Alignment\n\nWhen two operands have the **same shape** on a shared dimension, linopy uses **positional alignment** by default — coordinate labels are ignored and the left operand's labels are kept. This is a performance optimization but can be surprising:" - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "offset_const = xr.DataArray(\n", - " [10, 20, 30, 40, 50], dims=[\"time\"], coords={\"time\": [5, 6, 7, 8, 9]}\n", - ")\n", - "x + offset_const" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "Even though ``offset_const`` has coordinates ``[5, 6, 7, 8, 9]`` and ``x`` has ``[0, 1, 2, 3, 4]``, the result uses ``x``'s labels. The values are aligned by **position**, not by label. The same applies when adding two variables or expressions of identical shape:" - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", - "x + z" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "``x`` (time 0–4) and ``z`` (time 5–9) share no coordinate labels, yet the result has 5 entries under ``x``'s coordinates — because they have the same shape, positions are matched directly.\n\nTo force **label-based** alignment, pass an explicit ``join``:" - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "x.add(z, join=\"outer\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "With ``join=\"outer\"``, the result spans all 10 time steps (union of 0–4 and 5–9), filling missing positions with zeros. This is the correct label-based alignment. The same-shape positional shortcut is equivalent to ``join=\"override\"`` — see below." - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The ``join`` Parameter\n", - "\n", - "For explicit control over alignment, use the ``.add()``, ``.sub()``, ``.mul()``, and ``.div()`` methods with a ``join`` parameter. The supported values follow xarray conventions:\n", - "\n", - "- ``\"inner\"`` — intersection of coordinates\n", - "- ``\"outer\"`` — union of coordinates (with fill)\n", - "- ``\"left\"`` — keep left operand's coordinates\n", - "- ``\"right\"`` — keep right operand's coordinates\n", - "- ``\"override\"`` — positional alignment, ignore coordinate labels\n", - "- ``\"exact\"`` — coordinates must match exactly (raises on mismatch)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "m2 = linopy.Model()\n", - "\n", - "i_a = pd.Index([0, 1, 2], name=\"i\")\n", - "i_b = pd.Index([1, 2, 3], name=\"i\")\n", - "\n", - "a = m2.add_variables(coords=[i_a], name=\"a\")\n", - "b = m2.add_variables(coords=[i_b], name=\"b\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Inner join** — only shared coordinates (i=1, 2):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.add(b, join=\"inner\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Outer join** — union of coordinates (i=0, 1, 2, 3):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.add(b, join=\"outer\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Left join** — keep left operand's coordinates (i=0, 1, 2):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.add(b, join=\"left\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Right join** — keep right operand's coordinates (i=1, 2, 3):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.add(b, join=\"right\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "**Override** — positional alignment, ignore coordinate labels. The result uses the left operand's coordinates. Here ``a`` has i=[0, 1, 2] and ``b`` has i=[1, 2, 3], so positions are matched as 0↔1, 1↔2, 2↔3:" - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.add(b, join=\"override\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Multiplication with ``join``\n", - "\n", - "The same ``join`` parameter works on ``.mul()`` and ``.div()``. When multiplying by a constant that covers a subset, ``join=\"inner\"`` restricts the result to shared coordinates only, while ``join=\"left\"`` fills missing values with zero:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "const = xr.DataArray([2, 3, 4], dims=[\"i\"], coords={\"i\": [1, 2, 3]})\n", - "\n", - "a.mul(const, join=\"inner\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.mul(const, join=\"left\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Alignment in Constraints\n", - "\n", - "The ``.le()``, ``.ge()``, and ``.eq()`` methods create constraints with explicit coordinate alignment. They accept the same ``join`` parameter:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rhs = xr.DataArray([10, 20], dims=[\"i\"], coords={\"i\": [0, 1]})\n", - "\n", - "a.le(rhs, join=\"inner\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With ``join=\"inner\"``, the constraint only exists at the intersection (i=0, 1). Compare with ``join=\"left\"``:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.le(rhs, join=\"left\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With ``join=\"left\"``, the result covers all of ``a``'s coordinates (i=0, 1, 2). At i=2, where the RHS has no value, the RHS becomes ``NaN`` and the constraint is masked out.\n", - "\n", - "The same methods work on expressions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expr = 2 * a + 1\n", - "expr.eq(rhs, join=\"inner\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "## Practical Example\n\nConsider a generation dispatch model where solar availability follows a daily profile and a minimum demand constraint only applies during peak hours." - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "m3 = linopy.Model()\n", - "\n", - "hours = pd.RangeIndex(24, name=\"hour\")\n", - "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", - "\n", - "gen = m3.add_variables(lower=0, coords=[hours, techs], name=\"gen\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Capacity limits apply to all hours and techs — standard broadcasting handles this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", - "m3.add_constraints(gen <= capacity, name=\"capacity_limit\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "For solar, we build a full 24-hour availability profile — zero at night, sine-shaped during daylight (hours 6–18). Since this covers all hours, standard alignment works directly and solar is properly constrained to zero at night:" - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "solar_avail = np.zeros(24)\n", - "solar_avail[6:19] = 100 * np.sin(np.linspace(0, np.pi, 13))\n", - "solar_availability = xr.DataArray(solar_avail, dims=[\"hour\"], coords={\"hour\": hours})\n", - "\n", - "solar_gen = gen.sel(tech=\"solar\")\n", - "m3.add_constraints(solar_gen <= solar_availability, name=\"solar_avail\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "Now suppose a minimum demand of 120 MW must be met, but only during peak hours (8–20). The demand array covers a subset of hours, so we use ``join=\"inner\"`` to restrict the constraint to just those hours:" - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "peak_hours = pd.RangeIndex(8, 21, name=\"hour\")\n", - "peak_demand = xr.DataArray(\n", - " np.full(len(peak_hours), 120.0), dims=[\"hour\"], coords={\"hour\": peak_hours}\n", - ")\n", - "\n", - "total_gen = gen.sum(\"tech\")\n", - "m3.add_constraints(total_gen.ge(peak_demand, join=\"inner\"), name=\"peak_demand\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": "The demand constraint only applies during peak hours (8–20). Outside that range, no minimum generation is required." - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Summary\n", - "\n", - "| ``join`` | Coordinates | Fill behavior |\n", - "|----------|------------|---------------|\n", - "| ``None`` (default) | Auto-detect (keeps superset) | Zeros for arithmetic, NaN for constraint RHS |\n", - "| ``\"inner\"`` | Intersection only | No fill needed |\n", - "| ``\"outer\"`` | Union | Fill with operation identity (0 for add, 0 for mul) |\n", - "| ``\"left\"`` | Left operand's | Fill right with identity |\n", - "| ``\"right\"`` | Right operand's | Fill left with identity |\n", - "| ``\"override\"`` | Left operand's (positional) | Positional alignment, ignore labels |\n", - "| ``\"exact\"`` | Must match exactly | Raises error if different |" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/linopy/common.py b/linopy/common.py index 4b3f84d6..a1022189 100644 --- a/linopy/common.py +++ b/linopy/common.py @@ -205,6 +205,9 @@ def numpy_to_dataarray( if isinstance(coords, list): coords = dict(zip(dims, coords[: arr.ndim])) elif is_dict_like(coords): + # Filter coords to matching dims — this is expected when a + # lower-dimensional constant is broadcast against an expression + # whose full coords are passed through as_dataarray. coords = {k: v for k, v in coords.items() if k in dims} return DataArray(arr, coords=coords, dims=dims, **kwargs) diff --git a/linopy/expressions.py b/linopy/expressions.py index 64e2ecb7..15a759ae 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -32,7 +32,12 @@ from xarray.core.indexes import Indexes from xarray.core.types import JoinOptions from xarray.core.utils import Frozen -from xarray.structure.alignment import AlignmentError + +try: + from xarray.structure.alignment import AlignmentError +except ImportError: + # Fallback for older xarray versions where this isn't a separate class + AlignmentError = ValueError # type: ignore[assignment, misc] try: # resolve breaking change in xarray 2025.03.0 @@ -578,6 +583,7 @@ def _align_constant( join = options["arithmetic_convention"] if join == "legacy": + # stacklevel=4: user code -> __add__/__mul__ -> _add_constant/_apply_constant_op -> _align_constant warn( LEGACY_DEPRECATION_MESSAGE, LinopyDeprecationWarning, @@ -657,6 +663,11 @@ def _apply_constant_op( is_legacy = ( join is None and options["arithmetic_convention"] == "legacy" ) or join == "legacy" + # Fast path for scalars: no dimensions to align + if np.isscalar(other): + coeffs = self.coeffs.fillna(0) if is_legacy else self.coeffs + const = self.const.fillna(0) if is_legacy else self.const + return self.assign(coeffs=op(coeffs, other), const=op(const, other)) factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, factor, needs_data_reindex = self._align_constant( factor, fill_value=fill_value, join=join @@ -694,7 +705,7 @@ def __div__(self: GenericExpression, other: SideLike) -> GenericExpression: if isinstance(other, SUPPORTED_EXPRESSION_TYPES): raise TypeError( "unsupported operand type(s) for /: " - f"{type(self)} and {type(other)}" + f"{type(self)} and {type(other)}. " "Non-linear expressions are not yet supported." ) return self._divide_by_constant(other) @@ -1719,18 +1730,6 @@ def __add__( return self._add_constant(other) else: other = as_expression(other, model=self.model, dims=self.coord_dims) - if options["arithmetic_convention"] == "v1": - # Enforce exact coordinate alignment before merge - try: - xr.align(self.const, other.const, join="exact") - except (ValueError, AlignmentError) as e: - raise ValueError( - f"{e}\n" - "Use .add()/.sub() with an explicit join= parameter:\n" - ' .add(other, join="inner") # intersection\n' - ' .add(other, join="outer") # union with fill\n' - ' .add(other, join="left") # keep left coordinates' - ) from None return merge([self, other], cls=self.__class__) except TypeError: return NotImplemented @@ -2269,18 +2268,6 @@ def __add__(self, other: SideLike) -> QuadraticExpression: if isinstance(other, LinearExpression): other = other.to_quadexpr() - if options["arithmetic_convention"] == "v1": - try: - xr.align(self.const, other.const, join="exact") - except (ValueError, AlignmentError) as e: - raise ValueError( - f"{e}\n" - "Use .add()/.sub() with an explicit join= parameter:\n" - ' .add(other, join="inner") # intersection\n' - ' .add(other, join="outer") # union with fill\n' - ' .add(other, join="left") # keep left coordinates' - ) from None - return merge([self, other], cls=self.__class__) except TypeError: return NotImplemented @@ -2480,13 +2467,25 @@ def merge( **kwargs: Any, ) -> GenericExpression: """ - Merge multiple expression together. + Merge multiple expressions together. + + Concatenates expressions along a given dimension (default: ``_term``). + Faster than summing expressions individually. + + Join behavior by convention (when ``join=None``): + + - **v1**: Enforces exact match on shared user-dimension coordinates. + Helper dims (``_term``, ``_factor``) and the concat dim are excluded + from this check. Raises ``ValueError`` on mismatch. The actual + ``xr.concat`` uses ``join="outer"`` since helper dims legitimately + differ between expressions. + - **legacy**: Uses ``join="override"`` (positional alignment) when all + shared user dims have matching sizes, ``join="outer"`` otherwise. + - **explicit** (e.g. ``join="inner"``): Passed through to ``xr.concat``. - This function is a bit faster than summing over multiple linear expressions. - In case a list of LinearExpression with exactly the same shape is passed - and the dimension to concatenate on is TERM_DIM, the concatenation uses - the coordinates of the first object as a basis which overrides the - coordinates of the consecutive objects. + Internal callers that bypass the convention: + + - ``.add(join=X)``: passes explicit join through. Parameters ---------- @@ -2495,21 +2494,20 @@ def merge( dim : str Dimension along which the expressions should be concatenated. cls : type - Explicitly set the type of the resulting expression (So that the type checker will know the return type) + Explicitly set the type of the resulting expression (So that the + type checker will know the return type) join : str, optional How to align coordinates. One of "outer", "inner", "left", "right", - "exact", "override". When None (default), auto-detects based on - expression shapes. + "exact", "override". When None (default), uses the current + arithmetic convention. **kwargs - Additional keyword arguments passed to xarray.concat. Defaults to - {coords: "minimal", compat: "override"} or, in the special case described - above, to {coords: "minimal", compat: "override", "join": "override"}. + Additional keyword arguments passed to xarray.concat. Returns ------- res : linopy.LinearExpression or linopy.QuadraticExpression """ - if not isinstance(exprs, list) and len(add_exprs): + if not isinstance(exprs, list) and len(add_exprs) > 0: warn( "Passing a tuple to the merge function is deprecated. Please pass a list of objects to be merged", DeprecationWarning, @@ -2571,13 +2569,32 @@ def merge( kwargs["join"] = "override" if override else "outer" elif effective_join == "v1": - # Merge uses outer join for xr.concat since helper dims - # (_term, _factor) commonly have different sizes and - # expressions may have different user dimensions. - # Coordinate enforcement for v1 is done at the operator - # level (__add__, __sub__, etc.) before calling merge. + # Enforce exact alignment on user dims only. Helper dims + # (_term, _factor) legitimately differ between expressions, + # so we can't pass join="exact" to xr.concat directly. + # Instead: pre-validate user dims, then concat with outer. + # Check only dimension-coordinates (not scalar coords left + # from .sel()), excluding helper dims and the concat dim. + skip_dims = set(HELPER_DIMS) | {dim} + user_coords = [ + {k: d.coords[k] for k in d.dims if k not in skip_dims} for d in data + ] + # Only check dims shared by all datasets (broadcasting is OK) + shared_dims = set.intersection(*(set(c.keys()) for c in user_coords)) + for d_name in shared_dims: + ref = user_coords[0][d_name] + for i, uc in enumerate(user_coords[1:], 1): + if not ref.equals(uc[d_name]): + raise ValueError( + f"Coordinate mismatch on dimension '{d_name}'.\n" + "Use .add()/.sub() with an explicit join= parameter:\n" + ' .add(other, join="inner") # intersection of coordinates\n' + ' .add(other, join="outer") # union of coordinates (with fill)\n' + ' .add(other, join="left") # keep left operand\'s coordinates' + ) kwargs["join"] = "outer" else: + # Explicit join passed through (e.g., from .add(join="inner")) kwargs["join"] = effective_join if dim == TERM_DIM: diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index a4e4abfa..574994ee 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -1960,9 +1960,9 @@ def test_add_join_none_raises_on_mismatch( self, a: Variable, b: Variable ) -> None: # a has i=[0,1,2], b has i=[1,2,3] — exact default raises - with pytest.raises(ValueError, match="exact"): + with pytest.raises(ValueError, match="Coordinate mismatch"): a.to_linexpr() + b.to_linexpr() - with pytest.raises(ValueError, match="exact"): + with pytest.raises(ValueError, match="Coordinate mismatch"): a.to_linexpr().add(b.to_linexpr(), join=None) def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: From 0ee78722e2481497a069fa152a9f762772558858 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 19:46:23 +0100 Subject: [PATCH 49/66] Fix mypy: wrap scalar in DataArray in _apply_constant_op fast path Co-Authored-By: Claude Opus 4.6 --- linopy/expressions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 15a759ae..954c562f 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -667,7 +667,8 @@ def _apply_constant_op( if np.isscalar(other): coeffs = self.coeffs.fillna(0) if is_legacy else self.coeffs const = self.const.fillna(0) if is_legacy else self.const - return self.assign(coeffs=op(coeffs, other), const=op(const, other)) + scalar = DataArray(other) + return self.assign(coeffs=op(coeffs, scalar), const=op(const, scalar)) factor = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, factor, needs_data_reindex = self._align_constant( factor, fill_value=fill_value, join=join From bed9f8ce03cfe92888788a3e77fa2032ade34530 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 21:38:17 +0100 Subject: [PATCH 50/66] Deduplicate convention-specific test files (#608) * Deduplicate convention-specific test files into single files Merge 4 pairs of v1/legacy test files into single files, eliminating ~2600 lines of duplicated test code. Convention-specific alignment tests are kept in separate classes (V1/Legacy) with autouse fixtures, while shared tests run under the module-level v1 convention. - test_typing_legacy.py -> merged into test_typing.py (parametrized) - test_common_legacy.py -> merged into test_common.py (legacy align test) - test_constraints_legacy.py -> merged into test_constraints.py (legacy alignment class) - test_linear_expression_legacy.py -> merged into test_linear_expression.py (legacy alignment + join classes) Co-Authored-By: Claude Opus 4.6 * Address PR review: consistency, dedup fixtures, missing test - Add legacy_convention fixture to conftest.py; use it consistently instead of manual try/finally blocks (#1) - Parametrize test_constant_with_extra_dims_broadcasts with convention fixture so it runs under both conventions (#2) - Add missing test_quadratic_add_expr_join_inner to TestJoinParameterLegacy (#3) - Extract shared fixtures into _CoordinateAlignmentFixtures and _ConstraintAlignmentFixtures mixin classes to eliminate fixture duplication between V1/Legacy alignment test classes (#4) Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- test/conftest.py | 22 + test/test_common.py | 44 + test/test_common_legacy.py | 734 --------- test/test_constraints.py | 125 +- test/test_constraints_legacy.py | 448 ----- test/test_linear_expression.py | 519 +++++- test/test_linear_expression_legacy.py | 2160 ------------------------- test/test_typing.py | 16 +- test/test_typing_legacy.py | 25 - 9 files changed, 706 insertions(+), 3387 deletions(-) delete mode 100644 test/test_common_legacy.py delete mode 100644 test/test_constraints_legacy.py delete mode 100644 test/test_linear_expression_legacy.py delete mode 100644 test/test_typing_legacy.py diff --git a/test/conftest.py b/test/conftest.py index 5e2170a3..100a8fbf 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -68,6 +68,28 @@ def v1_convention() -> Generator[None, None, None]: linopy.options["arithmetic_convention"] = "legacy" +@pytest.fixture +def legacy_convention() -> Generator[None, None, None]: + """Set arithmetic_convention to 'legacy' for the duration of a test.""" + import linopy + + old = linopy.options["arithmetic_convention"] + linopy.options["arithmetic_convention"] = "legacy" + yield + linopy.options["arithmetic_convention"] = old + + +@pytest.fixture(params=["v1", "legacy"]) +def convention(request: pytest.FixtureRequest) -> Generator[str, None, None]: + """Run the test under both arithmetic conventions.""" + import linopy + + old = linopy.options["arithmetic_convention"] + linopy.options["arithmetic_convention"] = request.param + yield request.param + linopy.options["arithmetic_convention"] = old + + @pytest.fixture def m() -> Model: from linopy import Model diff --git a/test/test_common.py b/test/test_common.py index 719ab093..69fd9b8d 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -713,6 +713,50 @@ def test_is_constant() -> None: assert is_constant(cv) +def test_align_legacy(x: Variable, u: Variable, legacy_convention: None) -> None: + """Legacy convention: default inner join, multiindex support.""" + alpha = xr.DataArray([1, 2], [[1, 2]]) + beta = xr.DataArray( + [1, 2, 3], + [ + ( + "dim_3", + pd.MultiIndex.from_tuples( + [(1, "b"), (2, "b"), (1, "c")], names=["level1", "level2"] + ), + ) + ], + ) + + # inner join (default) + x_obs, alpha_obs = align(x, alpha) + assert isinstance(x_obs, Variable) + assert x_obs.shape == alpha_obs.shape == (1,) + assert_varequal(x_obs, x.loc[[1]]) + + # left-join + x_obs, alpha_obs = align(x, alpha, join="left") + assert x_obs.shape == alpha_obs.shape == (2,) + assert isinstance(x_obs, Variable) + assert_varequal(x_obs, x) + assert_equal(alpha_obs, DataArray([np.nan, 1], [[0, 1]])) + + # multiindex + beta_obs, u_obs = align(beta, u) + assert u_obs.shape == beta_obs.shape == (2,) + assert isinstance(u_obs, Variable) + assert_varequal(u_obs, u.loc[[(1, "b"), (2, "b")]]) + assert_equal(beta_obs, beta.loc[[(1, "b"), (2, "b")]]) + + # with linear expression + expr = 20 * x + x_obs, expr_obs, alpha_obs = align(x, expr, alpha) + assert x_obs.shape == alpha_obs.shape == (1,) + assert expr_obs.shape == (1, 1) # _term dim + assert isinstance(expr_obs, LinearExpression) + assert_linequal(expr_obs, expr.loc[[1]]) + + def test_maybe_group_terms_polars_no_duplicates() -> None: """Fast path: distinct (labels, vars) pairs skip group_by.""" df = pl.DataFrame({"labels": [0, 0], "vars": [1, 2], "coeffs": [3.0, 4.0]}) diff --git a/test/test_common_legacy.py b/test/test_common_legacy.py deleted file mode 100644 index f1190024..00000000 --- a/test/test_common_legacy.py +++ /dev/null @@ -1,734 +0,0 @@ -#!/usr/bin/env python3 -""" -Created on Mon Jun 19 12:11:03 2023 - -@author: fabian -""" - -import numpy as np -import pandas as pd -import polars as pl -import pytest -import xarray as xr -from xarray import DataArray -from xarray.testing.assertions import assert_equal - -from linopy import LinearExpression, Model, Variable -from linopy.common import ( - align, - as_dataarray, - assign_multiindex_safe, - best_int, - get_dims_with_index_levels, - is_constant, - iterate_slices, - maybe_group_terms_polars, -) -from linopy.testing import assert_linequal, assert_varequal - - -def test_as_dataarray_with_series_dims_default() -> None: - target_dim = "dim_0" - target_index = [0, 1, 2] - s = pd.Series([1, 2, 3]) - da = as_dataarray(s) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - -def test_as_dataarray_with_series_dims_set() -> None: - target_dim = "dim1" - target_index = ["a", "b", "c"] - s = pd.Series([1, 2, 3], index=target_index) - dims = [target_dim] - da = as_dataarray(s, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - -def test_as_dataarray_with_series_dims_given() -> None: - target_dim = "dim1" - target_index = ["a", "b", "c"] - index = pd.Index(target_index, name=target_dim) - s = pd.Series([1, 2, 3], index=index) - dims: list[str] = [] - da = as_dataarray(s, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - -def test_as_dataarray_with_series_dims_priority() -> None: - """The dimension name from the pandas object should have priority.""" - target_dim = "dim1" - target_index = ["a", "b", "c"] - index = pd.Index(target_index, name=target_dim) - s = pd.Series([1, 2, 3], index=index) - dims = ["other"] - da = as_dataarray(s, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - -def test_as_dataarray_with_series_dims_subset() -> None: - target_dim = "dim_0" - target_index = ["a", "b", "c"] - s = pd.Series([1, 2, 3], index=target_index) - dims: list[str] = [] - da = as_dataarray(s, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - -def test_as_dataarray_with_series_dims_superset() -> None: - target_dim = "dim_a" - target_index = ["a", "b", "c"] - s = pd.Series([1, 2, 3], index=target_index) - dims = [target_dim, "other"] - da = as_dataarray(s, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - -def test_as_dataarray_with_series_aligned_coords() -> None: - """This should not give out a warning even though coords are given.""" - target_dim = "dim_0" - target_index = ["a", "b", "c"] - s = pd.Series([1, 2, 3], index=target_index) - da = as_dataarray(s, coords=[target_index]) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - da = as_dataarray(s, coords={target_dim: target_index}) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - -def test_as_dataarray_with_pl_series_dims_default() -> None: - target_dim = "dim_0" - target_index = [0, 1, 2] - s = pl.Series([1, 2, 3]) - da = as_dataarray(s) - assert isinstance(da, DataArray) - assert da.dims == (target_dim,) - assert list(da.coords[target_dim].values) == target_index - - -def test_as_dataarray_dataframe_dims_default() -> None: - target_dims = ("dim_0", "dim_1") - target_index = [0, 1] - target_columns = ["A", "B"] - df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) - da = as_dataarray(df) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - -def test_as_dataarray_dataframe_dims_set() -> None: - target_dims = ("dim1", "dim2") - target_index = ["a", "b"] - target_columns = ["A", "B"] - df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) - da = as_dataarray(df, dims=target_dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - -def test_as_dataarray_dataframe_dims_given() -> None: - target_dims = ("dim1", "dim2") - target_index = ["a", "b"] - target_columns = ["A", "B"] - index = pd.Index(target_index, name=target_dims[0]) - columns = pd.Index(target_columns, name=target_dims[1]) - df = pd.DataFrame([[1, 2], [3, 4]], index=index, columns=columns) - dims: list[str] = [] - da = as_dataarray(df, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - -def test_as_dataarray_dataframe_dims_priority() -> None: - """The dimension name from the pandas object should have priority.""" - target_dims = ("dim1", "dim2") - target_index = ["a", "b"] - target_columns = ["A", "B"] - index = pd.Index(target_index, name=target_dims[0]) - columns = pd.Index(target_columns, name=target_dims[1]) - df = pd.DataFrame([[1, 2], [3, 4]], index=index, columns=columns) - dims = ["other"] - da = as_dataarray(df, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - -def test_as_dataarray_dataframe_dims_subset() -> None: - target_dims = ("dim_0", "dim_1") - target_index = ["a", "b"] - target_columns = ["A", "B"] - df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) - dims: list[str] = [] - da = as_dataarray(df, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - -def test_as_dataarray_dataframe_dims_superset() -> None: - target_dims = ("dim_a", "dim_b") - target_index = ["a", "b"] - target_columns = ["A", "B"] - df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) - dims = [*target_dims, "other"] - da = as_dataarray(df, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - -def test_as_dataarray_dataframe_aligned_coords() -> None: - """This should not give out a warning even though coords are given.""" - target_dims = ("dim_0", "dim_1") - target_index = ["a", "b"] - target_columns = ["A", "B"] - df = pd.DataFrame([[1, 2], [3, 4]], index=target_index, columns=target_columns) - da = as_dataarray(df, coords=[target_index, target_columns]) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - coords = dict(zip(target_dims, [target_index, target_columns])) - da = as_dataarray(df, coords=coords) - assert isinstance(da, DataArray) - assert da.dims == target_dims - assert list(da.coords[target_dims[0]].values) == target_index - assert list(da.coords[target_dims[1]].values) == target_columns - - -def test_as_dataarray_with_ndarray_no_coords_no_dims() -> None: - target_dims = ("dim_0", "dim_1") - target_coords = [[0, 1], [0, 1]] - arr = np.array([[1, 2], [3, 4]]) - da = as_dataarray(arr) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for i, dim in enumerate(target_dims): - assert list(da.coords[dim]) == target_coords[i] - - -def test_as_dataarray_with_ndarray_coords_list_no_dims() -> None: - target_dims = ("dim_0", "dim_1") - target_coords = [["a", "b"], ["A", "B"]] - arr = np.array([[1, 2], [3, 4]]) - da = as_dataarray(arr, coords=target_coords) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for i, dim in enumerate(target_dims): - assert list(da.coords[dim]) == target_coords[i] - - -def test_as_dataarray_with_ndarray_coords_indexes_no_dims() -> None: - target_dims = ("dim1", "dim2") - target_coords = [ - pd.Index(["a", "b"], name="dim1"), - pd.Index(["A", "B"], name="dim2"), - ] - arr = np.array([[1, 2], [3, 4]]) - da = as_dataarray(arr, coords=target_coords) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for i, dim in enumerate(target_dims): - assert list(da.coords[dim]) == list(target_coords[i]) - - -def test_as_dataarray_with_ndarray_coords_dict_set_no_dims() -> None: - """If no dims are given and coords are a dict, the keys of the dict should be used as dims.""" - target_dims = ("dim_0", "dim_2") - target_coords = {"dim_0": ["a", "b"], "dim_2": ["A", "B"]} - arr = np.array([[1, 2], [3, 4]]) - da = as_dataarray(arr, coords=target_coords) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for dim in target_dims: - assert list(da.coords[dim]) == target_coords[dim] - - -def test_as_dataarray_with_ndarray_coords_list_dims() -> None: - target_dims = ("dim1", "dim2") - target_coords = [["a", "b"], ["A", "B"]] - arr = np.array([[1, 2], [3, 4]]) - da = as_dataarray(arr, coords=target_coords, dims=target_dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for i, dim in enumerate(target_dims): - assert list(da.coords[dim]) == target_coords[i] - - -def test_as_dataarray_with_ndarray_coords_list_dims_superset() -> None: - target_dims = ("dim1", "dim2") - target_coords = [["a", "b"], ["A", "B"]] - arr = np.array([[1, 2], [3, 4]]) - dims = [*target_dims, "dim3"] - da = as_dataarray(arr, coords=target_coords, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for i, dim in enumerate(target_dims): - assert list(da.coords[dim]) == target_coords[i] - - -def test_as_dataarray_with_ndarray_coords_list_dims_subset() -> None: - target_dims = ("dim0", "dim_1") - target_coords = [["a", "b"], ["A", "B"]] - arr = np.array([[1, 2], [3, 4]]) - dims = ["dim0"] - da = as_dataarray(arr, coords=target_coords, dims=dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for i, dim in enumerate(target_dims): - assert list(da.coords[dim]) == target_coords[i] - - -def test_as_dataarray_with_ndarray_coords_indexes_dims_aligned() -> None: - target_dims = ("dim1", "dim2") - target_coords = [ - pd.Index(["a", "b"], name="dim1"), - pd.Index(["A", "B"], name="dim2"), - ] - arr = np.array([[1, 2], [3, 4]]) - da = as_dataarray(arr, coords=target_coords, dims=target_dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for i, dim in enumerate(target_dims): - assert list(da.coords[dim]) == list(target_coords[i]) - - -def test_as_dataarray_with_ndarray_coords_indexes_dims_not_aligned() -> None: - target_dims = ("dim3", "dim4") - target_coords = [ - pd.Index(["a", "b"], name="dim1"), - pd.Index(["A", "B"], name="dim2"), - ] - arr = np.array([[1, 2], [3, 4]]) - with pytest.raises(ValueError): - as_dataarray(arr, coords=target_coords, dims=target_dims) - - -def test_as_dataarray_with_ndarray_coords_dict_dims_aligned() -> None: - target_dims = ("dim_0", "dim_1") - target_coords = {"dim_0": ["a", "b"], "dim_1": ["A", "B"]} - arr = np.array([[1, 2], [3, 4]]) - da = as_dataarray(arr, coords=target_coords, dims=target_dims) - assert isinstance(da, DataArray) - assert da.dims == target_dims - for dim in target_dims: - assert list(da.coords[dim]) == target_coords[dim] - - -def test_as_dataarray_with_ndarray_coords_dict_set_dims_not_aligned() -> None: - target_dims = ("dim_0", "dim_1") - target_coords = {"dim_0": ["a", "b"], "dim_2": ["A", "B"]} - arr = np.array([[1, 2], [3, 4]]) - da = as_dataarray(arr, coords=target_coords, dims=target_dims) - assert da.dims == target_dims - assert list(da.coords["dim_0"].values) == ["a", "b"] - assert "dim_2" not in da.coords - - -def test_as_dataarray_with_number() -> None: - num = 1 - da = as_dataarray(num, dims=["dim1"], coords=[["a"]]) - assert isinstance(da, DataArray) - assert da.dims == ("dim1",) - assert list(da.coords["dim1"].values) == ["a"] - - -def test_as_dataarray_with_np_number() -> None: - num = np.float64(1) - da = as_dataarray(num, dims=["dim1"], coords=[["a"]]) - assert isinstance(da, DataArray) - assert da.dims == ("dim1",) - assert list(da.coords["dim1"].values) == ["a"] - - -def test_as_dataarray_with_number_default_dims_coords() -> None: - num = 1 - da = as_dataarray(num) - assert isinstance(da, DataArray) - assert da.dims == () - assert da.coords == {} - - -def test_as_dataarray_with_number_and_coords() -> None: - num = 1 - da = as_dataarray(num, coords=[pd.RangeIndex(10, name="a")]) - assert isinstance(da, DataArray) - assert da.dims == ("a",) - assert list(da.coords["a"].values) == list(range(10)) - - -def test_as_dataarray_with_dataarray() -> None: - da_in = DataArray( - data=[[1, 2], [3, 4]], - dims=["dim1", "dim2"], - coords={"dim1": ["a", "b"], "dim2": ["A", "B"]}, - ) - da_out = as_dataarray(da_in, dims=["dim1", "dim2"], coords=[["a", "b"], ["A", "B"]]) - assert isinstance(da_out, DataArray) - assert da_out.dims == da_in.dims - assert list(da_out.coords["dim1"].values) == list(da_in.coords["dim1"].values) - assert list(da_out.coords["dim2"].values) == list(da_in.coords["dim2"].values) - - -def test_as_dataarray_with_dataarray_default_dims_coords() -> None: - da_in = DataArray( - data=[[1, 2], [3, 4]], - dims=["dim1", "dim2"], - coords={"dim1": ["a", "b"], "dim2": ["A", "B"]}, - ) - da_out = as_dataarray(da_in) - assert isinstance(da_out, DataArray) - assert da_out.dims == da_in.dims - assert list(da_out.coords["dim1"].values) == list(da_in.coords["dim1"].values) - assert list(da_out.coords["dim2"].values) == list(da_in.coords["dim2"].values) - - -def test_as_dataarray_with_unsupported_type() -> None: - with pytest.raises(TypeError): - as_dataarray(lambda x: 1, dims=["dim1"], coords=[["a"]]) - - -def test_best_int() -> None: - # Test for int8 - assert best_int(127) == np.int8 - # Test for int16 - assert best_int(128) == np.int16 - assert best_int(32767) == np.int16 - # Test for int32 - assert best_int(32768) == np.int32 - assert best_int(2147483647) == np.int32 - # Test for int64 - assert best_int(2147483648) == np.int64 - assert best_int(9223372036854775807) == np.int64 - - # Test for value too large - with pytest.raises( - ValueError, match=r"Value 9223372036854775808 is too large for int64." - ): - best_int(9223372036854775808) - - -def test_assign_multiindex_safe() -> None: - # Create a multi-indexed dataset - index = pd.MultiIndex.from_product([["A", "B"], [1, 2]], names=["letter", "number"]) - data = xr.DataArray([1, 2, 3, 4], dims=["index"], coords={"index": index}) - ds = xr.Dataset({"value": data}) - - # This would now warn about the index deletion of single index level - # ds["humidity"] = data - - # Case 1: Assigning a single DataArray - result = assign_multiindex_safe(ds, humidity=data) - assert "humidity" in result - assert "value" in result - assert result["humidity"].equals(data) - - # Case 2: Assigning a Dataset - result = assign_multiindex_safe(ds, **xr.Dataset({"humidity": data})) # type: ignore - assert "humidity" in result - assert "value" in result - assert result["humidity"].equals(data) - - # Case 3: Assigning multiple DataArrays - result = assign_multiindex_safe(ds, humidity=data, pressure=data) - assert "humidity" in result - assert "pressure" in result - assert "value" in result - assert result["humidity"].equals(data) - assert result["pressure"].equals(data) - - -def test_iterate_slices_basic() -> None: - ds = xr.Dataset( - {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 - coords={"x": np.arange(10), "y": np.arange(10)}, - ) - slices = list(iterate_slices(ds, slice_size=20)) - assert len(slices) == 5 - for s in slices: - assert isinstance(s, xr.Dataset) - assert set(s.dims) == set(ds.dims) - - -def test_iterate_slices_with_exclude_dims() -> None: - ds = xr.Dataset( - {"var": (("x", "y"), np.random.rand(10, 20))}, # noqa: NPY002 - coords={"x": np.arange(10), "y": np.arange(20)}, - ) - slices = list(iterate_slices(ds, slice_size=20, slice_dims=["x"])) - assert len(slices) == 10 - for s in slices: - assert isinstance(s, xr.Dataset) - assert set(s.dims) == set(ds.dims) - - -def test_iterate_slices_large_max_size() -> None: - ds = xr.Dataset( - {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 - coords={"x": np.arange(10), "y": np.arange(10)}, - ) - slices = list(iterate_slices(ds, slice_size=200)) - assert len(slices) == 1 - for s in slices: - assert isinstance(s, xr.Dataset) - assert set(s.dims) == set(ds.dims) - - -def test_iterate_slices_small_max_size() -> None: - ds = xr.Dataset( - {"var": (("x", "y"), np.random.rand(10, 20))}, # noqa: NPY002 - coords={"x": np.arange(10), "y": np.arange(20)}, - ) - slices = list(iterate_slices(ds, slice_size=8, slice_dims=["x"])) - assert ( - len(slices) == 10 - ) # goes to the smallest slice possible which is 1 for the x dimension - for s in slices: - assert isinstance(s, xr.Dataset) - assert set(s.dims) == set(ds.dims) - - -def test_iterate_slices_slice_size_none() -> None: - ds = xr.Dataset( - {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 - coords={"x": np.arange(10), "y": np.arange(10)}, - ) - slices = list(iterate_slices(ds, slice_size=None)) - assert len(slices) == 1 - for s in slices: - assert ds.equals(s) - - -def test_iterate_slices_includes_last_slice() -> None: - ds = xr.Dataset( - {"var": (("x"), np.random.rand(10))}, # noqa: NPY002 - coords={"x": np.arange(10)}, - ) - slices = list(iterate_slices(ds, slice_size=3, slice_dims=["x"])) - assert len(slices) == 4 # 10 slices for dimension 'x' with size 10 - total_elements = sum(s.sizes["x"] for s in slices) - assert total_elements == ds.sizes["x"] # Ensure all elements are included - for s in slices: - assert isinstance(s, xr.Dataset) - assert set(s.dims) == set(ds.dims) - - -def test_iterate_slices_empty_slice_dims() -> None: - ds = xr.Dataset( - {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 - coords={"x": np.arange(10), "y": np.arange(10)}, - ) - slices = list(iterate_slices(ds, slice_size=50, slice_dims=[])) - assert len(slices) == 1 - for s in slices: - assert ds.equals(s) - - -def test_iterate_slices_invalid_slice_dims() -> None: - ds = xr.Dataset( - {"var": (("x", "y"), np.random.rand(10, 10))}, # noqa: NPY002 - coords={"x": np.arange(10), "y": np.arange(10)}, - ) - with pytest.raises(ValueError): - list(iterate_slices(ds, slice_size=50, slice_dims=["z"])) - - -def test_iterate_slices_empty_dataset() -> None: - ds = xr.Dataset( - {"var": (("x", "y"), np.array([]).reshape(0, 0))}, coords={"x": [], "y": []} - ) - slices = list(iterate_slices(ds, slice_size=10, slice_dims=["x"])) - assert len(slices) == 1 - assert ds.equals(slices[0]) - - -def test_iterate_slices_single_element() -> None: - ds = xr.Dataset({"var": (("x", "y"), np.array([[1]]))}, coords={"x": [0], "y": [0]}) - slices = list(iterate_slices(ds, slice_size=1, slice_dims=["x"])) - assert len(slices) == 1 - assert ds.equals(slices[0]) - - -def test_get_dims_with_index_levels() -> None: - # Create test data - - # Case 1: Simple dataset with regular dimensions - ds1 = xr.Dataset( - {"temp": (("time", "lat"), np.random.rand(3, 2))}, # noqa: NPY002 - coords={"time": pd.date_range("2024-01-01", periods=3), "lat": [0, 1]}, - ) - - # Case 2: Dataset with a multi-index dimension - stations_index = pd.MultiIndex.from_product( - [["USA", "Canada"], ["NYC", "Toronto"]], names=["country", "city"] - ) - stations_coords = xr.Coordinates.from_pandas_multiindex(stations_index, "station") - ds2 = xr.Dataset( - {"temp": (("time", "station"), np.random.rand(3, 4))}, # noqa: NPY002 - coords={"time": pd.date_range("2024-01-01", periods=3), **stations_coords}, - ) - - # Case 3: Dataset with unnamed multi-index levels - unnamed_stations_index = pd.MultiIndex.from_product( - [["USA", "Canada"], ["NYC", "Toronto"]] - ) - unnamed_stations_coords = xr.Coordinates.from_pandas_multiindex( - unnamed_stations_index, "station" - ) - ds3 = xr.Dataset( - {"temp": (("time", "station"), np.random.rand(3, 4))}, # noqa: NPY002 - coords={ - "time": pd.date_range("2024-01-01", periods=3), - **unnamed_stations_coords, - }, - ) - - # Case 4: Dataset with multiple multi-indexed dimensions - locations_index = pd.MultiIndex.from_product( - [["North", "South"], ["A", "B"]], names=["region", "site"] - ) - locations_coords = xr.Coordinates.from_pandas_multiindex( - locations_index, "location" - ) - - ds4 = xr.Dataset( - {"temp": (("time", "station", "location"), np.random.rand(2, 4, 4))}, # noqa: NPY002 - coords={ - "time": pd.date_range("2024-01-01", periods=2), - **stations_coords, - **locations_coords, - }, - ) - - # Run tests - - # Test case 1: Regular dimensions - assert get_dims_with_index_levels(ds1) == ["time", "lat"] - - # Test case 2: Named multi-index - assert get_dims_with_index_levels(ds2) == ["time", "station (country, city)"] - - # Test case 3: Unnamed multi-index - assert get_dims_with_index_levels(ds3) == [ - "time", - "station (station_level_0, station_level_1)", - ] - - # Test case 4: Multiple multi-indices - expected = ["time", "station (country, city)", "location (region, site)"] - assert get_dims_with_index_levels(ds4) == expected - - # Test case 5: Empty dataset - ds5 = xr.Dataset() - assert get_dims_with_index_levels(ds5) == [] - - -def test_align(x: Variable, u: Variable) -> None: # noqa: F811 - alpha = xr.DataArray([1, 2], [[1, 2]]) - beta = xr.DataArray( - [1, 2, 3], - [ - ( - "dim_3", - pd.MultiIndex.from_tuples( - [(1, "b"), (2, "b"), (1, "c")], names=["level1", "level2"] - ), - ) - ], - ) - - # inner join - x_obs, alpha_obs = align(x, alpha) - assert isinstance(x_obs, Variable) - assert x_obs.shape == alpha_obs.shape == (1,) - assert_varequal(x_obs, x.loc[[1]]) - - # left-join - x_obs, alpha_obs = align(x, alpha, join="left") - assert x_obs.shape == alpha_obs.shape == (2,) - assert isinstance(x_obs, Variable) - assert_varequal(x_obs, x) - assert_equal(alpha_obs, DataArray([np.nan, 1], [[0, 1]])) - - # multiindex - beta_obs, u_obs = align(beta, u) - assert u_obs.shape == beta_obs.shape == (2,) - assert isinstance(u_obs, Variable) - assert_varequal(u_obs, u.loc[[(1, "b"), (2, "b")]]) - assert_equal(beta_obs, beta.loc[[(1, "b"), (2, "b")]]) - - # with linear expression - expr = 20 * x - x_obs, expr_obs, alpha_obs = align(x, expr, alpha) - assert x_obs.shape == alpha_obs.shape == (1,) - assert expr_obs.shape == (1, 1) # _term dim - assert isinstance(expr_obs, LinearExpression) - assert_linequal(expr_obs, expr.loc[[1]]) - - -def test_is_constant() -> None: - model = Model() - index = pd.Index(range(10), name="t") - a = model.add_variables(name="a", coords=[index]) - b = a.sel(t=1) - c = a * 2 - d = a * a - - non_constant = [a, b, c, d] - for nc in non_constant: - assert not is_constant(nc) - - constant_values = [ - 5, - 3.14, - np.int32(7), - np.float64(2.71), - pd.Series([1, 2, 3]), - np.array([4, 5, 6]), - xr.DataArray([k for k in range(10)], coords=[index]), - ] - for cv in constant_values: - assert is_constant(cv) - - -def test_maybe_group_terms_polars_no_duplicates() -> None: - """Fast path: distinct (labels, vars) pairs skip group_by.""" - df = pl.DataFrame({"labels": [0, 0], "vars": [1, 2], "coeffs": [3.0, 4.0]}) - result = maybe_group_terms_polars(df) - assert result.shape == (2, 3) - assert result.columns == ["labels", "vars", "coeffs"] - assert result["coeffs"].to_list() == [3.0, 4.0] - - -def test_maybe_group_terms_polars_with_duplicates() -> None: - """Slow path: duplicate (labels, vars) pairs trigger group_by.""" - df = pl.DataFrame({"labels": [0, 0], "vars": [1, 1], "coeffs": [3.0, 4.0]}) - result = maybe_group_terms_polars(df) - assert result.shape == (1, 3) - assert result["coeffs"].to_list() == [7.0] diff --git a/test/test_constraints.py b/test/test_constraints.py index e94f0152..e9e58aaa 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -210,6 +210,35 @@ def test_constraint_rhs_higher_dim_expression(rhs_factory: Any) -> None: assert c.shape == (5, 3) +@pytest.mark.parametrize( + "rhs_factory", + [ + pytest.param(lambda m: np.ones((5, 3)), id="numpy"), + pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), + ], +) +def test_constraint_rhs_higher_dim_constant_warns_legacy( + rhs_factory: Any, caplog: Any, legacy_convention: None +) -> None: + """Legacy convention warns on higher-dim constant RHS.""" + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + with caplog.at_level("WARNING", logger="linopy.expressions"): + m.add_constraints(x >= rhs_factory(m)) + assert "dimensions" in caplog.text + + +def test_constraint_rhs_higher_dim_dataarray_reindexes_legacy( + legacy_convention: None, +) -> None: + """Legacy convention: DataArray RHS with extra dims reindexes to expression coords.""" + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + rhs = xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]) + c = m.add_constraints(x >= rhs) + assert c.shape == (5, 3) + + def test_wrong_constraint_assignment_repeated() -> None: # repeated variable assignment is forbidden m: Model = Model() @@ -339,7 +368,9 @@ def test_sanitize_infinities() -> None: m.add_constraints(y <= -np.inf, name="con_wrong_neg_inf") -class TestConstraintCoordinateAlignment: +class _ConstraintAlignmentFixtures: + """Shared fixtures for constraint coordinate alignment tests.""" + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: if request.param == "xarray": @@ -358,6 +389,8 @@ def superset(self, request: Any) -> xr.DataArray | pd.Series: np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") ) + +class TestConstraintCoordinateAlignmentV1(_ConstraintAlignmentFixtures): def test_var_le_subset_raises(self, v: Variable, subset: xr.DataArray) -> None: with pytest.raises(ValueError, match="exact"): v <= subset @@ -445,3 +478,93 @@ def test_subset_constraint_solve_integration(self) -> None: assert sol.sel(i=0).item() == pytest.approx(100.0) assert sol.sel(i=2).item() == pytest.approx(100.0) assert sol.sel(i=4).item() == pytest.approx(100.0) + + +class TestConstraintCoordinateAlignmentLegacy(_ConstraintAlignmentFixtures): + """Legacy convention: outer join with NaN fill behavior for constraints.""" + + @pytest.fixture(autouse=True) + def _use_legacy(self, legacy_convention: None) -> None: + pass + + def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + con = v <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert con.rhs.sel(dim_2=3).item() == 30.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_var_comparison_subset( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = v <= subset + elif sign == GREATER_EQUAL: + con = v >= subset + else: + con = v == subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + expr = v + 5 + con = expr <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) + assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_subset_comparison_var( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = subset <= v + elif sign == GREATER_EQUAL: + con = subset >= v + else: + con = subset == v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert np.isnan(con.rhs.sel(dim_2=0).item()) + assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) + def test_superset_comparison_var( + self, v: Variable, superset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = superset <= v + else: + con = superset >= v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(con.lhs.coeffs.values).any() + assert not np.isnan(con.rhs.values).any() + + def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: + rhs = xr.DataArray( + [[1.0, 2.0]], + dims=["extra", "dim_2"], + coords={"dim_2": [0, 1]}, + ) + c = v <= rhs + assert "extra" in c.dims + + def test_subset_constraint_solve_integration(self) -> None: + if not available_solvers: + pytest.skip("No solver available") + solver = "highs" if "highs" in available_solvers else available_solvers[0] + m = Model() + coords = pd.RangeIndex(5, name="i") + x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") + subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) + m.add_constraints(x <= subset_ub, name="subset_ub") + m.add_objective(x.sum(), sense="max") + m.solve(solver_name=solver) + sol = m.solution["x"] + assert sol.sel(i=1).item() == pytest.approx(10.0) + assert sol.sel(i=3).item() == pytest.approx(20.0) + assert sol.sel(i=0).item() == pytest.approx(100.0) + assert sol.sel(i=2).item() == pytest.approx(100.0) + assert sol.sel(i=4).item() == pytest.approx(100.0) diff --git a/test/test_constraints_legacy.py b/test/test_constraints_legacy.py deleted file mode 100644 index 9a467c8c..00000000 --- a/test/test_constraints_legacy.py +++ /dev/null @@ -1,448 +0,0 @@ -#!/usr/bin/env python3 -""" -Created on Wed Mar 10 11:23:13 2021. - -@author: fabulous -""" - -from typing import Any - -import dask -import dask.array.core -import numpy as np -import pandas as pd -import pytest -import xarray as xr - -from linopy import EQUAL, GREATER_EQUAL, LESS_EQUAL, Model, Variable, available_solvers -from linopy.testing import assert_conequal - -# Test model functions - - -def test_constraint_assignment() -> None: - m: Model = Model() - - lower: xr.DataArray = xr.DataArray( - np.zeros((10, 10)), coords=[range(10), range(10)] - ) - upper: xr.DataArray = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper, name="x") - y = m.add_variables(name="y") - - con0 = m.add_constraints(1 * x + 10 * y, EQUAL, 0) - - for attr in m.constraints.dataset_attrs: - assert "con0" in getattr(m.constraints, attr) - - assert m.constraints.labels.con0.shape == (10, 10) - assert m.constraints.labels.con0.dtype == int - assert m.constraints.coeffs.con0.dtype in (int, float) - assert m.constraints.vars.con0.dtype in (int, float) - assert m.constraints.rhs.con0.dtype in (int, float) - - assert_conequal(m.constraints.con0, con0) - - -def test_constraint_equality() -> None: - m: Model = Model() - - lower: xr.DataArray = xr.DataArray( - np.zeros((10, 10)), coords=[range(10), range(10)] - ) - upper: xr.DataArray = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper, name="x") - y = m.add_variables(name="y") - - con0 = m.add_constraints(1 * x + 10 * y, EQUAL, 0) - - assert_conequal(con0, 1 * x + 10 * y == 0, strict=False) - assert_conequal(1 * x + 10 * y == 0, 1 * x + 10 * y == 0, strict=False) - - with pytest.raises(AssertionError): - assert_conequal(con0, 1 * x + 10 * y <= 0, strict=False) - - with pytest.raises(AssertionError): - assert_conequal(con0, 1 * x + 10 * y >= 0, strict=False) - - with pytest.raises(AssertionError): - assert_conequal(10 * y + 2 * x == 0, 1 * x + 10 * y == 0, strict=False) - - -def test_constraints_getattr_formatted() -> None: - m: Model = Model() - x = m.add_variables(0, 10, name="x") - m.add_constraints(1 * x == 0, name="con-0") - assert_conequal(m.constraints.con_0, m.constraints["con-0"]) - - -def test_anonymous_constraint_assignment() -> None: - m: Model = Model() - - lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) - upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper, name="x") - y = m.add_variables(name="y") - con = 1 * x + 10 * y == 0 - m.add_constraints(con) - - for attr in m.constraints.dataset_attrs: - assert "con0" in getattr(m.constraints, attr) - - assert m.constraints.labels.con0.shape == (10, 10) - assert m.constraints.labels.con0.dtype == int - assert m.constraints.coeffs.con0.dtype in (int, float) - assert m.constraints.vars.con0.dtype in (int, float) - assert m.constraints.rhs.con0.dtype in (int, float) - - -def test_constraint_assignment_with_tuples() -> None: - m: Model = Model() - - lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) - upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper) - y = m.add_variables() - - m.add_constraints([(1, x), (10, y)], EQUAL, 0, name="c") - for attr in m.constraints.dataset_attrs: - assert "c" in getattr(m.constraints, attr) - assert m.constraints.labels.c.shape == (10, 10) - - -def test_constraint_assignment_chunked() -> None: - # setting bounds with one pd.DataFrame and one pd.Series - m: Model = Model(chunk=5) - lower = pd.DataFrame(np.zeros((10, 10))) - upper = pd.Series(np.ones(10)) - x = m.add_variables(lower, upper) - m.add_constraints(x, GREATER_EQUAL, 0, name="c") - assert m.constraints.coeffs.c.data.shape == ( - 10, - 10, - 1, - ) - assert isinstance(m.constraints.coeffs.c.data, dask.array.core.Array) - - -def test_constraint_assignment_with_reindex() -> None: - m: Model = Model() - - lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) - upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper, name="x") - y = m.add_variables(name="y") - - m.add_constraints(1 * x + 10 * y, EQUAL, 0) - - shuffled_coords = [2, 1, 3, 4, 6, 5, 7, 9, 8, 0] - - con = x.loc[shuffled_coords] + y >= 10 - assert (con.coords["dim_0"].values == shuffled_coords).all() - - -@pytest.mark.parametrize( - "rhs_factory", - [ - pytest.param(lambda m, v: v, id="numpy"), - pytest.param(lambda m, v: xr.DataArray(v, dims=["dim_0"]), id="dataarray"), - pytest.param(lambda m, v: pd.Series(v, index=v), id="series"), - pytest.param( - lambda m, v: m.add_variables(coords=[v]), - id="variable", - ), - pytest.param( - lambda m, v: 2 * m.add_variables(coords=[v]) + 1, - id="linexpr", - ), - ], -) -def test_constraint_rhs_lower_dim(rhs_factory: Any) -> None: - m = Model() - naxis = np.arange(10, dtype=float) - maxis = np.arange(10).astype(str) - x = m.add_variables(coords=[naxis, maxis]) - y = m.add_variables(coords=[naxis, maxis]) - - c = m.add_constraints(x - y >= rhs_factory(m, naxis)) - assert c.shape == (10, 10) - - -@pytest.mark.parametrize( - "rhs_factory", - [ - pytest.param(lambda m: np.ones((5, 3)), id="numpy"), - pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), - ], -) -def test_constraint_rhs_higher_dim_constant_warns( - rhs_factory: Any, caplog: Any -) -> None: - m = Model() - x = m.add_variables(coords=[range(5)], name="x") - - with caplog.at_level("WARNING", logger="linopy.expressions"): - m.add_constraints(x >= rhs_factory(m)) - assert "dimensions" in caplog.text - - -def test_constraint_rhs_higher_dim_dataarray_reindexes() -> None: - """DataArray RHS with extra dims reindexes to expression coords (no raise).""" - m = Model() - x = m.add_variables(coords=[range(5)], name="x") - rhs = xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]) - - c = m.add_constraints(x >= rhs) - assert c.shape == (5, 3) - - -@pytest.mark.parametrize( - "rhs_factory", - [ - pytest.param( - lambda m: m.add_variables(coords=[range(5), range(3)]), - id="variable", - ), - pytest.param( - lambda m: 2 * m.add_variables(coords=[range(5), range(3)]) + 1, - id="linexpr", - ), - ], -) -def test_constraint_rhs_higher_dim_expression(rhs_factory: Any) -> None: - m = Model() - x = m.add_variables(coords=[range(5)], name="x") - - c = m.add_constraints(x >= rhs_factory(m)) - assert c.shape == (5, 3) - - -def test_wrong_constraint_assignment_repeated() -> None: - # repeated variable assignment is forbidden - m: Model = Model() - x = m.add_variables() - m.add_constraints(x, LESS_EQUAL, 0, name="con") - with pytest.raises(ValueError): - m.add_constraints(x, LESS_EQUAL, 0, name="con") - - -def test_masked_constraints() -> None: - m: Model = Model() - - lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) - upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper) - y = m.add_variables() - - mask = pd.Series([True] * 5 + [False] * 5) - m.add_constraints(1 * x + 10 * y, EQUAL, 0, mask=mask) - assert (m.constraints.labels.con0[0:5, :] != -1).all() - assert (m.constraints.labels.con0[5:10, :] == -1).all() - - -def test_masked_constraints_broadcast() -> None: - m: Model = Model() - - lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) - upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper) - y = m.add_variables() - - mask = pd.Series([True] * 5 + [False] * 5) - m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc1", mask=mask) - assert (m.constraints.labels.bc1[0:5, :] != -1).all() - assert (m.constraints.labels.bc1[5:10, :] == -1).all() - - mask2 = xr.DataArray([True] * 5 + [False] * 5, dims=["dim_1"]) - m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc2", mask=mask2) - assert (m.constraints.labels.bc2[:, 0:5] != -1).all() - assert (m.constraints.labels.bc2[:, 5:10] == -1).all() - - mask3 = xr.DataArray( - [True, True, False, False, False], - dims=["dim_0"], - coords={"dim_0": range(5)}, - ) - with pytest.warns(FutureWarning, match="Missing values will be filled"): - m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc3", mask=mask3) - assert (m.constraints.labels.bc3[0:2, :] != -1).all() - assert (m.constraints.labels.bc3[2:5, :] == -1).all() - assert (m.constraints.labels.bc3[5:10, :] == -1).all() - - # Mask with extra dimension not in data should raise - mask4 = xr.DataArray([True, False], dims=["extra_dim"]) - with pytest.raises(AssertionError, match="not a subset"): - m.add_constraints(1 * x + 10 * y, EQUAL, 0, name="bc4", mask=mask4) - - -def test_non_aligned_constraints() -> None: - m: Model = Model() - - lower = xr.DataArray(np.zeros(10), coords=[range(10)]) - x = m.add_variables(lower, name="x") - - lower = xr.DataArray(np.zeros(8), coords=[range(8)]) - y = m.add_variables(lower, name="y") - - m.add_constraints(x == 0.0) - m.add_constraints(y == 0.0) - - with pytest.warns(UserWarning): - m.constraints.labels - - for dtype in m.constraints.labels.dtypes.values(): - assert np.issubdtype(dtype, np.integer) - - for dtype in m.constraints.coeffs.dtypes.values(): - assert np.issubdtype(dtype, np.floating) - - for dtype in m.constraints.vars.dtypes.values(): - assert np.issubdtype(dtype, np.integer) - - for dtype in m.constraints.rhs.dtypes.values(): - assert np.issubdtype(dtype, np.floating) - - -def test_constraints_flat() -> None: - m: Model = Model() - - lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) - upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper) - y = m.add_variables() - - assert isinstance(m.constraints.flat, pd.DataFrame) - assert m.constraints.flat.empty - with pytest.raises(ValueError): - m.constraints.to_matrix() - - m.add_constraints(1 * x + 10 * y, EQUAL, 0) - m.add_constraints(1 * x + 10 * y, LESS_EQUAL, 0) - m.add_constraints(1 * x + 10 * y, GREATER_EQUAL, 0) - - assert isinstance(m.constraints.flat, pd.DataFrame) - assert not m.constraints.flat.empty - - -def test_sanitize_infinities() -> None: - m: Model = Model() - - lower = xr.DataArray(np.zeros((10, 10)), coords=[range(10), range(10)]) - upper = xr.DataArray(np.ones((10, 10)), coords=[range(10), range(10)]) - x = m.add_variables(lower, upper, name="x") - y = m.add_variables(name="y") - - # Test correct infinities - m.add_constraints(x <= np.inf, name="con_inf") - m.add_constraints(y >= -np.inf, name="con_neg_inf") - m.constraints.sanitize_infinities() - assert (m.constraints["con_inf"].labels == -1).all() - assert (m.constraints["con_neg_inf"].labels == -1).all() - - # Test incorrect infinities - with pytest.raises(ValueError): - m.add_constraints(x >= np.inf, name="con_wrong_inf") - with pytest.raises(ValueError): - m.add_constraints(y <= -np.inf, name="con_wrong_neg_inf") - - -class TestConstraintCoordinateAlignment: - @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) - def subset(self, request: Any) -> xr.DataArray | pd.Series: - if request.param == "xarray": - return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) - - @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) - def superset(self, request: Any) -> xr.DataArray | pd.Series: - if request.param == "xarray": - return xr.DataArray( - np.arange(25, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, - ) - return pd.Series( - np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") - ) - - def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - con = v <= subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert con.rhs.sel(dim_2=3).item() == 30.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) - def test_var_comparison_subset( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - if sign == LESS_EQUAL: - con = v <= subset - elif sign == GREATER_EQUAL: - con = v >= subset - else: - con = v == subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - expr = v + 5 - con = expr <= subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) - assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) - def test_subset_comparison_var( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - if sign == LESS_EQUAL: - con = subset <= v - elif sign == GREATER_EQUAL: - con = subset >= v - else: - con = subset == v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert np.isnan(con.rhs.sel(dim_2=0).item()) - assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) - - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) - def test_superset_comparison_var( - self, v: Variable, superset: xr.DataArray, sign: str - ) -> None: - if sign == LESS_EQUAL: - con = superset <= v - else: - con = superset >= v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(con.lhs.coeffs.values).any() - assert not np.isnan(con.rhs.values).any() - - def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: - rhs = xr.DataArray( - [[1.0, 2.0]], - dims=["extra", "dim_2"], - coords={"dim_2": [0, 1]}, - ) - c = v <= rhs - assert "extra" in c.dims - - def test_subset_constraint_solve_integration(self) -> None: - if not available_solvers: - pytest.skip("No solver available") - solver = "highs" if "highs" in available_solvers else available_solvers[0] - m = Model() - coords = pd.RangeIndex(5, name="i") - x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") - subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) - m.add_constraints(x <= subset_ub, name="subset_ub") - m.add_objective(x.sum(), sense="max") - m.solve(solver_name=solver) - sol = m.solution["x"] - assert sol.sel(i=1).item() == pytest.approx(10.0) - assert sol.sel(i=3).item() == pytest.approx(20.0) - assert sol.sel(i=0).item() == pytest.approx(100.0) - assert sol.sel(i=2).item() == pytest.approx(100.0) - assert sol.sel(i=4).item() == pytest.approx(100.0) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 574994ee..651cdb99 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -551,12 +551,8 @@ def test_linear_expression_multiplication_invalid( expr / x -class TestCoordinateAlignment: - @pytest.fixture - def matching(self) -> xr.DataArray: - return xr.DataArray( - np.arange(20, dtype=float), dims=["dim_2"], coords={"dim_2": range(20)} - ) +class _CoordinateAlignmentFixtures: + """Shared fixtures for coordinate alignment test classes.""" @pytest.fixture(params=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: @@ -593,6 +589,14 @@ def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: return xr.DataArray(vals, dims=["dim_2"], coords={"dim_2": range(20)}) return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) + +class TestCoordinateAlignmentV1(_CoordinateAlignmentFixtures): + @pytest.fixture + def matching(self) -> xr.DataArray: + return xr.DataArray( + np.arange(20, dtype=float), dims=["dim_2"], coords={"dim_2": range(20)} + ) + class TestSubset: """ Under v1, subset operations raise ValueError (exact join). @@ -1108,6 +1112,463 @@ def test_da_truediv_var_raises(self, v: Variable) -> None: da / v # type: ignore[operator] +class TestCoordinateAlignmentLegacy(_CoordinateAlignmentFixtures): + """Legacy convention: outer join with NaN fill / zero fill behavior.""" + + @pytest.fixture(autouse=True) + def _use_legacy(self, legacy_convention: None) -> None: + pass + + class TestSubset: + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_fills_zeros( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target * subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_subset_fills_zeros( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + if operand == "var": + result = v + subset + expected = expected_fill + else: + result = (v + 5) + subset + expected = expected_fill + 5 + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_subset_fills_negated( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + operand: str, + ) -> None: + if operand == "var": + result = v - subset + expected = -expected_fill + else: + result = (v + 5) - subset + expected = 5 - expected_fill + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_subset_inverts_nonzero( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + result = target / subset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) + assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + + def test_subset_add_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset + v + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + def test_subset_sub_var_coefficients( + self, v: Variable, subset: xr.DataArray + ) -> None: + result = subset - v + np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) + + class TestSuperset: + def test_add_superset_pins_to_lhs_coords( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v + superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + + def test_add_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset + v, v + superset) + + def test_sub_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset - v, -v + superset) + + def test_mul_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: + assert_linequal(superset * v, v * superset) + + def test_mul_superset_pins_to_lhs_coords( + self, v: Variable, superset: xr.DataArray + ) -> None: + result = v * superset + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: + superset_nonzero = xr.DataArray( + np.arange(1, 26, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + result = v / superset_nonzero + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + class TestDisjoint: + def test_add_disjoint_fills_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v + disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, np.zeros(20)) + + def test_mul_disjoint_fills_zeros(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v * disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) + + def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + result = v / disjoint + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + + class TestCommutativity: + @pytest.mark.parametrize( + "make_lhs,make_rhs", + [ + (lambda v, s: s * v, lambda v, s: v * s), + (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), + (lambda v, s: s + v, lambda v, s: v + s), + (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), + ], + ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], + ) + def test_commutativity( + self, + v: Variable, + subset: xr.DataArray, + make_lhs: Any, + make_rhs: Any, + ) -> None: + assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + + def test_sub_var_anticommutative( + self, v: Variable, subset: xr.DataArray + ) -> None: + assert_linequal(subset - v, -v + subset) + + def test_sub_expr_anticommutative( + self, v: Variable, subset: xr.DataArray + ) -> None: + expr = v + 5 + assert_linequal(subset - expr, -(expr - subset)) + + def test_add_commutativity_full_coords(self, v: Variable) -> None: + full = xr.DataArray( + np.arange(20, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(20)}, + ) + assert_linequal(v + full, full + v) + + class TestQuadratic: + def test_quadexpr_add_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr + subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, expected_fill) + + def test_quadexpr_sub_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr - subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.const.values).any() + np.testing.assert_array_equal(result.const.values, -expected_fill) + + def test_quadexpr_mul_subset( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = qexpr * subset + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_mul_quadexpr( + self, + v: Variable, + subset: xr.DataArray, + expected_fill: np.ndarray, + ) -> None: + qexpr = v * v + result = subset * qexpr + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + + def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: + qexpr = v * v + assert_quadequal(subset + qexpr, qexpr + subset) + + class TestMissingValues: + """ + NaN values are filled with operation-specific neutral elements: + - Addition/subtraction: NaN -> 0 (additive identity) + - Multiplication: NaN -> 0 (zeroes out the variable) + - Division: NaN -> 1 (multiplicative identity, no scaling) + """ + + NAN_POSITIONS = [0, 5, 19] + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_nan_filled( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + base_const = 0.0 if operand == "var" else 5.0 + target = v if operand == "var" else v + 5 + result = target + nan_constant + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.const.values).any() + for i in self.NAN_POSITIONS: + assert result.const.values[i] == base_const + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_nan_filled( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + base_const = 0.0 if operand == "var" else 5.0 + target = v if operand == "var" else v + 5 + result = target - nan_constant + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.const.values).any() + for i in self.NAN_POSITIONS: + assert result.const.values[i] == base_const + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_nan_filled( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target * nan_constant + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.coeffs.squeeze().values).any() + for i in self.NAN_POSITIONS: + assert result.coeffs.squeeze().values[i] == 0.0 + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_nan_filled( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + operand: str, + ) -> None: + target = v if operand == "var" else 1 * v + result = target / nan_constant + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.coeffs.squeeze().values).any() + original_coeffs = (1 * v).coeffs.squeeze().values + for i in self.NAN_POSITIONS: + assert result.coeffs.squeeze().values[i] == original_coeffs[i] + + def test_add_commutativity( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + result_a = v + nan_constant + result_b = nan_constant + v + assert not np.isnan(result_a.const.values).any() + assert not np.isnan(result_b.const.values).any() + np.testing.assert_array_equal(result_a.const.values, result_b.const.values) + np.testing.assert_array_equal( + result_a.coeffs.values, result_b.coeffs.values + ) + + def test_mul_commutativity( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + result_a = v * nan_constant + result_b = nan_constant * v + assert not np.isnan(result_a.coeffs.values).any() + assert not np.isnan(result_b.coeffs.values).any() + np.testing.assert_array_equal( + result_a.coeffs.values, result_b.coeffs.values + ) + + def test_quadexpr_add_nan( + self, + v: Variable, + nan_constant: xr.DataArray | pd.Series, + ) -> None: + qexpr = v * v + result = qexpr + nan_constant + assert isinstance(result, QuadraticExpression) + assert result.sizes["dim_2"] == 20 + assert not np.isnan(result.const.values).any() + + class TestExpressionWithNaN: + """Test that NaN in expression's own const/coeffs doesn't propagate.""" + + def test_shifted_expr_add_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr + 5 + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == 5.0 + + def test_shifted_expr_mul_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr * 2 + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_add_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + expr = (1 * v).shift(dim_2=1) + result = expr + arr + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == 0.0 + + def test_shifted_expr_mul_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr * arr + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_div_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr / 2 + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_shifted_expr_sub_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr - 3 + assert not np.isnan(result.const.values).any() + assert result.const.values[0] == -3.0 + + def test_shifted_expr_div_array(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr / arr + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: + nan_coeff = np.ones(v.sizes["dim_2"]) + nan_coeff[0] = np.nan + result = v.to_linexpr(nan_coeff) + assert not np.isnan(result.coeffs.squeeze().values).any() + assert result.coeffs.squeeze().values[0] == 0.0 + + class TestMultiDim: + def test_multidim_subset_mul(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w * subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.coeffs.values).any() + assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) + assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) + + def test_multidim_subset_add(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + result = w + subset_2d + assert result.sizes["a"] == 4 + assert result.sizes["b"] == 5 + assert not np.isnan(result.const.values).any() + assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) + assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) + assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) + + class TestXarrayCompat: + def test_da_eq_da_still_works(self) -> None: + da1 = xr.DataArray([1, 2, 3]) + da2 = xr.DataArray([1, 2, 3]) + result = da1 == da2 + assert result.values.all() + + def test_da_eq_scalar_still_works(self) -> None: + da = xr.DataArray([1, 2, 3]) + result = da == 2 + np.testing.assert_array_equal(result.values, [False, True, False]) + + def test_da_truediv_var_raises(self, v: Variable) -> None: + da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) + with pytest.raises(TypeError): + da / v # type: ignore[operator] + + def test_expression_inherited_properties(x: Variable, y: Variable) -> None: expr = 10 * x + y assert isinstance(expr.attrs, dict) @@ -2239,3 +2700,49 @@ def test_quadratic_mul_constant_join_inner( const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.mul(const, join="inner") assert list(result.data.indexes["i"]) == [1, 2] + + +class TestJoinParameterLegacy: + """Legacy convention: default outer join for mismatched coords.""" + + @pytest.fixture(autouse=True) + def _use_legacy(self, legacy_convention: None) -> None: + pass + + @pytest.fixture + def m2(self) -> Model: + m = Model() + m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="a") + m.add_variables(coords=[pd.Index([1, 2, 3], name="i")], name="b") + return m + + @pytest.fixture + def a(self, m2: Model) -> Variable: + return m2.variables["a"] + + @pytest.fixture + def b(self, m2: Model) -> Variable: + return m2.variables["b"] + + def test_add_join_none_preserves_default(self, a: Variable, b: Variable) -> None: + result_default = a.to_linexpr() + b.to_linexpr() + result_none = a.to_linexpr().add(b.to_linexpr(), join=None) + assert_linequal(result_default, result_none) + + def test_quadratic_add_constant_join_inner(self, a: Variable, b: Variable) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: + quad = a.to_linexpr() * a.to_linexpr() + const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [0, 1] + + def test_quadratic_mul_constant_join_inner(self, a: Variable, b: Variable) -> None: + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] diff --git a/test/test_linear_expression_legacy.py b/test/test_linear_expression_legacy.py deleted file mode 100644 index 1378f48d..00000000 --- a/test/test_linear_expression_legacy.py +++ /dev/null @@ -1,2160 +0,0 @@ -#!/usr/bin/env python3 -""" -Created on Wed Mar 17 17:06:36 2021. - -@author: fabian -""" - -from __future__ import annotations - -from typing import Any - -import numpy as np -import pandas as pd -import polars as pl -import pytest -import xarray as xr -from xarray.testing import assert_equal - -from linopy import LinearExpression, Model, QuadraticExpression, Variable, merge -from linopy.constants import HELPER_DIMS, TERM_DIM -from linopy.expressions import ScalarLinearExpression -from linopy.testing import assert_linequal, assert_quadequal -from linopy.variables import ScalarVariable - - -def test_empty_linexpr(m: Model) -> None: - LinearExpression(None, m) - - -def test_linexpr_with_wrong_data(m: Model) -> None: - with pytest.raises(ValueError): - LinearExpression(xr.Dataset({"a": [1]}), m) - - coeffs = xr.DataArray([1, 2], dims=["a"]) - vars = xr.DataArray([1, 2], dims=["a"]) - data = xr.Dataset({"coeffs": coeffs, "vars": vars}) - with pytest.raises(ValueError): - LinearExpression(data, m) - - # with model as None - coeffs = xr.DataArray(np.array([1, 2]), dims=[TERM_DIM]) - vars = xr.DataArray(np.array([1, 2]), dims=[TERM_DIM]) - data = xr.Dataset({"coeffs": coeffs, "vars": vars}) - with pytest.raises(ValueError): - LinearExpression(data, None) # type: ignore - - -def test_linexpr_with_helper_dims_as_coords(m: Model) -> None: - coords = [pd.Index([0], name="a"), pd.Index([1, 2], name=TERM_DIM)] - coeffs = xr.DataArray(np.array([[1, 2]]), coords=coords) - vars = xr.DataArray(np.array([[1, 2]]), coords=coords) - - data = xr.Dataset({"coeffs": coeffs, "vars": vars}) - assert set(HELPER_DIMS).intersection(set(data.coords)) - - expr = LinearExpression(data, m) - assert not set(HELPER_DIMS).intersection(set(expr.data.coords)) - - -def test_linexpr_with_data_without_coords(m: Model) -> None: - lhs = 1 * m["x"] - vars = xr.DataArray(lhs.vars.values, dims=["dim_0", TERM_DIM]) - coeffs = xr.DataArray(lhs.coeffs.values, dims=["dim_0", TERM_DIM]) - data = xr.Dataset({"vars": vars, "coeffs": coeffs}) - expr = LinearExpression(data, m) - assert_linequal(expr, lhs) - - -def test_linexpr_from_constant_dataarray(m: Model) -> None: - const = xr.DataArray([1, 2], dims=["dim_0"]) - expr = LinearExpression(const, m) - assert (expr.const == const).all() - assert expr.nterm == 0 - - -def test_linexpr_from_constant_pl_series(m: Model) -> None: - const = pl.Series([1, 2]) - expr = LinearExpression(const, m) - assert (expr.const == const.to_numpy()).all() - assert expr.nterm == 0 - - -def test_linexpr_from_constant_pandas_series(m: Model) -> None: - const = pd.Series([1, 2], index=pd.RangeIndex(2, name="dim_0")) - expr = LinearExpression(const, m) - assert (expr.const == const).all() - assert expr.nterm == 0 - - -def test_linexpr_from_constant_pandas_dataframe(m: Model) -> None: - const = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) - expr = LinearExpression(const, m) - assert (expr.const == const).all() - assert expr.nterm == 0 - - -def test_linexpr_from_constant_numpy_array(m: Model) -> None: - const = np.array([1, 2]) - expr = LinearExpression(const, m) - assert (expr.const == const).all() - assert expr.nterm == 0 - - -def test_linexpr_from_constant_scalar(m: Model) -> None: - const = 1 - expr = LinearExpression(const, m) - assert (expr.const == const).all() - assert expr.nterm == 0 - - -def test_repr(m: Model) -> None: - expr = m.linexpr((10, "x"), (1, "y")) - expr.__repr__() - - -def test_fill_value() -> None: - isinstance(LinearExpression._fill_value, dict) - - -def test_linexpr_with_scalars(m: Model) -> None: - expr = m.linexpr((10, "x"), (1, "y")) - target = xr.DataArray( - [[10, 1], [10, 1]], coords={"dim_0": [0, 1]}, dims=["dim_0", TERM_DIM] - ) - assert_equal(expr.coeffs, target) - - -def test_linexpr_with_variables_and_constants( - m: Model, x: Variable, y: Variable -) -> None: - expr = m.linexpr((10, x), (1, y), 2) - assert (expr.const == 2).all() - - -def test_linexpr_with_series(m: Model, v: Variable) -> None: - lhs = pd.Series(np.arange(20)), v - expr = m.linexpr(lhs) - isinstance(expr, LinearExpression) - - -def test_linexpr_with_dataframe(m: Model, z: Variable) -> None: - lhs = pd.DataFrame(z.labels), z - expr = m.linexpr(lhs) - isinstance(expr, LinearExpression) - - -def test_linexpr_duplicated_index(m: Model) -> None: - expr = m.linexpr((10, "x"), (-1, "x")) - assert (expr.data._term == [0, 1]).all() - - -def test_linear_expression_with_multiplication(x: Variable) -> None: - expr = 1 * x - assert isinstance(expr, LinearExpression) - assert expr.nterm == 1 - assert len(expr.vars.dim_0) == x.shape[0] - - expr = x * 1 - assert isinstance(expr, LinearExpression) - - expr2 = x.mul(1) - assert_linequal(expr, expr2) - - expr3 = expr.mul(1) - assert_linequal(expr, expr3) - - expr = x / 1 - assert isinstance(expr, LinearExpression) - - expr = x / 1.0 - assert isinstance(expr, LinearExpression) - - expr2 = x.div(1) - assert_linequal(expr, expr2) - - expr3 = expr.div(1) - assert_linequal(expr, expr3) - - expr = np.array([1, 2]) * x - assert isinstance(expr, LinearExpression) - - expr = np.array(1) * x - assert isinstance(expr, LinearExpression) - - expr = xr.DataArray(np.array([[1, 2], [2, 3]])) * x - assert isinstance(expr, LinearExpression) - - expr = pd.Series([1, 2], index=pd.RangeIndex(2, name="dim_0")) * x - assert isinstance(expr, LinearExpression) - - quad = x * x - assert isinstance(quad, QuadraticExpression) - - with pytest.raises(TypeError): - quad * quad - - expr = x * 1 - assert isinstance(expr, LinearExpression) - assert expr.__mul__(object()) is NotImplemented - assert expr.__rmul__(object()) is NotImplemented - - -def test_linear_expression_with_addition(m: Model, x: Variable, y: Variable) -> None: - expr = 10 * x + y - assert isinstance(expr, LinearExpression) - assert_linequal(expr, m.linexpr((10, "x"), (1, "y"))) - - expr = x + 8 * y - assert isinstance(expr, LinearExpression) - assert_linequal(expr, m.linexpr((1, "x"), (8, "y"))) - - expr = x + y - assert isinstance(expr, LinearExpression) - assert_linequal(expr, m.linexpr((1, "x"), (1, "y"))) - - expr2 = x.add(y) - assert_linequal(expr, expr2) - - expr3 = (x * 1).add(y) - assert_linequal(expr, expr3) - - expr3 = x + (x * x) - assert isinstance(expr3, QuadraticExpression) - - -def test_linear_expression_with_raddition(m: Model, x: Variable) -> None: - expr = x * 1.0 - expr_2: LinearExpression = 10.0 + expr - assert isinstance(expr, LinearExpression) - expr_3: LinearExpression = expr + 10.0 - assert_linequal(expr_2, expr_3) - - -def test_linear_expression_with_subtraction(m: Model, x: Variable, y: Variable) -> None: - expr = x - y - assert isinstance(expr, LinearExpression) - assert_linequal(expr, m.linexpr((1, "x"), (-1, "y"))) - - expr2 = x.sub(y) - assert_linequal(expr, expr2) - - expr3: LinearExpression = x * 1 - expr4 = expr3.sub(y) - assert_linequal(expr, expr4) - - expr = -x - 8 * y - assert isinstance(expr, LinearExpression) - assert_linequal(expr, m.linexpr((-1, "x"), (-8, "y"))) - - -def test_linear_expression_rsubtraction(x: Variable, y: Variable) -> None: - expr = x * 1.0 - expr_2: LinearExpression = 10.0 - expr - assert isinstance(expr_2, LinearExpression) - expr_3: LinearExpression = (expr - 10.0) * -1 - assert_linequal(expr_2, expr_3) - assert expr.__rsub__(object()) is NotImplemented - - -def test_linear_expression_with_constant(m: Model, x: Variable, y: Variable) -> None: - expr = x + 1 - assert isinstance(expr, LinearExpression) - assert (expr.const == 1).all() - - expr = -x - 8 * y - 10 - assert isinstance(expr, LinearExpression) - assert (expr.const == -10).all() - assert expr.nterm == 2 - - -def test_linear_expression_with_constant_multiplication( - m: Model, x: Variable, y: Variable -) -> None: - expr = x + 1 - - obs = expr * 10 - assert isinstance(obs, LinearExpression) - assert (obs.const == 10).all() - - obs = expr * pd.Series([1, 2, 3], index=pd.RangeIndex(3, name="new_dim")) - assert isinstance(obs, LinearExpression) - assert obs.shape == (2, 3, 1) - - -def test_linear_expression_multi_indexed(u: Variable) -> None: - expr = 3 * u + 1 * u - assert isinstance(expr, LinearExpression) - - -def test_linear_expression_with_errors(m: Model, x: Variable) -> None: - with pytest.raises(TypeError): - x / x - - with pytest.raises(TypeError): - x / (1 * x) - - with pytest.raises(TypeError): - m.linexpr((10, x.labels), (1, "y")) - - with pytest.raises(TypeError): - m.linexpr(a=2) # type: ignore - - -def test_linear_expression_from_rule(m: Model, x: Variable, y: Variable) -> None: - def bound(m: Model, i: int) -> ScalarLinearExpression: - return ( - (i - 1) * x.at[i - 1] + y.at[i] + 1 * x.at[i] - if i == 1 - else i * x.at[i] - y.at[i] - ) - - expr = LinearExpression.from_rule(m, bound, x.coords) - assert isinstance(expr, LinearExpression) - assert expr.nterm == 3 - repr(expr) # test repr - - -def test_linear_expression_from_rule_with_return_none( - m: Model, x: Variable, y: Variable -) -> None: - # with return type None - def bound(m: Model, i: int) -> ScalarLinearExpression | None: - if i == 1: - return (i - 1) * x.at[i - 1] + y.at[i] - return None - - expr = LinearExpression.from_rule(m, bound, x.coords) - assert isinstance(expr, LinearExpression) - assert (expr.vars[0] == -1).all() - assert (expr.vars[1] != -1).all() - assert expr.coeffs[0].isnull().all() - assert expr.coeffs[1].notnull().all() - repr(expr) # test repr - - -def test_linear_expression_addition(x: Variable, y: Variable, z: Variable) -> None: - expr = 10 * x + y - other = 2 * y + z - res = expr + other - - assert res.nterm == expr.nterm + other.nterm - assert (res.coords["dim_0"] == expr.coords["dim_0"]).all() - assert (res.coords["dim_1"] == other.coords["dim_1"]).all() - assert res.data.notnull().all().to_array().all() - - res2 = expr.add(other) - assert_linequal(res, res2) - - assert isinstance(x - expr, LinearExpression) - assert isinstance(x + expr, LinearExpression) - - -def test_linear_expression_addition_with_constant( - x: Variable, y: Variable, z: Variable -) -> None: - expr = 10 * x + y + 10 - assert (expr.const == 10).all() - - expr = 10 * x + y + np.array([2, 3]) - assert list(expr.const) == [2, 3] - - expr = 10 * x + y + pd.Series([2, 3]) - assert list(expr.const) == [2, 3] - - -def test_linear_expression_subtraction(x: Variable, y: Variable, z: Variable) -> None: - expr = 10 * x + y - 10 - assert (expr.const == -10).all() - - expr = 10 * x + y - np.array([2, 3]) - assert list(expr.const) == [-2, -3] - - expr = 10 * x + y - pd.Series([2, 3]) - assert list(expr.const) == [-2, -3] - - -def test_linear_expression_substraction( - x: Variable, y: Variable, z: Variable, v: Variable -) -> None: - expr = 10 * x + y - other = 2 * y - z - res = expr - other - - assert res.nterm == expr.nterm + other.nterm - assert (res.coords["dim_0"] == expr.coords["dim_0"]).all() - assert (res.coords["dim_1"] == other.coords["dim_1"]).all() - assert res.data.notnull().all().to_array().all() - - -def test_linear_expression_sum( - x: Variable, y: Variable, z: Variable, v: Variable -) -> None: - expr = 10 * x + y + z - res = expr.sum("dim_0") - - assert res.size == expr.size - assert res.nterm == expr.nterm * len(expr.data.dim_0) - - res = expr.sum() - assert res.size == expr.size - assert res.nterm == expr.size - assert res.data.notnull().all().to_array().all() - - assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - - # test special case otherride coords - expr = v.loc[:9] + v.loc[10:] - assert expr.nterm == 2 - assert len(expr.coords["dim_2"]) == 10 - - -def test_linear_expression_sum_with_const( - x: Variable, y: Variable, z: Variable, v: Variable -) -> None: - expr = 10 * x + y + z + 10 - res = expr.sum("dim_0") - - assert res.size == expr.size - assert res.nterm == expr.nterm * len(expr.data.dim_0) - assert (res.const == 20).all() - - res = expr.sum() - assert res.size == expr.size - assert res.nterm == expr.size - assert res.data.notnull().all().to_array().all() - assert (res.const == 60).item() - - assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - - # test special case otherride coords - expr = v.loc[:9] + v.loc[10:] - assert expr.nterm == 2 - assert len(expr.coords["dim_2"]) == 10 - - -def test_linear_expression_sum_drop_zeros(z: Variable) -> None: - coeff = xr.zeros_like(z.labels) - coeff[1, 0] = 3 - coeff[0, 2] = 5 - expr = coeff * z - - res = expr.sum("dim_0", drop_zeros=True) - assert res.nterm == 1 - - res = expr.sum("dim_1", drop_zeros=True) - assert res.nterm == 1 - - coeff[1, 2] = 4 - expr.data["coeffs"] = coeff - res = expr.sum() - - res = expr.sum("dim_0", drop_zeros=True) - assert res.nterm == 2 - - res = expr.sum("dim_1", drop_zeros=True) - assert res.nterm == 2 - - -def test_linear_expression_sum_warn_using_dims(z: Variable) -> None: - with pytest.warns(DeprecationWarning): - (1 * z).sum(dims="dim_0") - - -def test_linear_expression_sum_warn_unknown_kwargs(z: Variable) -> None: - with pytest.raises(ValueError): - (1 * z).sum(unknown_kwarg="dim_0") - - -def test_linear_expression_power(x: Variable) -> None: - expr: LinearExpression = x * 1.0 - qd_expr = expr**2 - assert isinstance(qd_expr, QuadraticExpression) - - qd_expr2 = expr.pow(2) - assert_quadequal(qd_expr, qd_expr2) - - with pytest.raises(ValueError): - expr**3 - - -def test_linear_expression_multiplication( - x: Variable, y: Variable, z: Variable -) -> None: - expr = 10 * x + y + z - mexpr = expr * 10 - assert (mexpr.coeffs.sel(dim_1=0, dim_0=0, _term=0) == 100).item() - - mexpr = 10 * expr - assert (mexpr.coeffs.sel(dim_1=0, dim_0=0, _term=0) == 100).item() - - mexpr = expr / 100 - assert (mexpr.coeffs.sel(dim_1=0, dim_0=0, _term=0) == 1 / 10).item() - - mexpr = expr / 100.0 - assert (mexpr.coeffs.sel(dim_1=0, dim_0=0, _term=0) == 1 / 10).item() - - -def test_matmul_variable_and_const(x: Variable, y: Variable) -> None: - const = np.array([1, 2]) - expr = x @ const - assert expr.nterm == 2 - assert_linequal(expr, (x * const).sum()) - - assert_linequal(x @ const, (x * const).sum()) - - assert_linequal(x.dot(const), x @ const) - - -def test_matmul_expr_and_const(x: Variable, y: Variable) -> None: - expr = 10 * x + y - const = np.array([1, 2]) - res = expr @ const - target = (10 * x) @ const + y @ const - assert res.nterm == 4 - assert_linequal(res, target) - - assert_linequal(expr.dot(const), target) - - -def test_matmul_wrong_input(x: Variable, y: Variable, z: Variable) -> None: - expr = 10 * x + y + z - with pytest.raises(TypeError): - expr @ expr - - -def test_linear_expression_multiplication_invalid( - x: Variable, y: Variable, z: Variable -) -> None: - expr = 10 * x + y + z - - with pytest.raises(TypeError): - expr = 10 * x + y + z - expr * expr - - with pytest.raises(TypeError): - expr = 10 * x + y + z - expr / x - - -class TestCoordinateAlignment: - @pytest.fixture(params=["da", "series"]) - def subset(self, request: Any) -> xr.DataArray | pd.Series: - if request.param == "da": - return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) - - @pytest.fixture(params=["da", "series"]) - def superset(self, request: Any) -> xr.DataArray | pd.Series: - if request.param == "da": - return xr.DataArray( - np.arange(25, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, - ) - return pd.Series( - np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") - ) - - @pytest.fixture - def expected_fill(self) -> np.ndarray: - arr = np.zeros(20) - arr[1] = 10.0 - arr[3] = 30.0 - return arr - - @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) - def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: - vals = np.arange(20, dtype=float) - vals[0] = np.nan - vals[5] = np.nan - vals[19] = np.nan - if request.param == "xarray": - return xr.DataArray(vals, dims=["dim_2"], coords={"dim_2": range(20)}) - return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) - - class TestSubset: - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_subset_fills_zeros( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - operand: str, - ) -> None: - target = v if operand == "var" else 1 * v - result = target * subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_subset_fills_zeros( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - operand: str, - ) -> None: - if operand == "var": - result = v + subset - expected = expected_fill - else: - result = (v + 5) + subset - expected = expected_fill + 5 - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_subset_fills_negated( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - operand: str, - ) -> None: - if operand == "var": - result = v - subset - expected = -expected_fill - else: - result = (v + 5) - subset - expected = 5 - expected_fill - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_subset_inverts_nonzero( - self, v: Variable, subset: xr.DataArray, operand: str - ) -> None: - target = v if operand == "var" else 1 * v - result = target / subset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) - assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) - - def test_subset_add_var_coefficients( - self, v: Variable, subset: xr.DataArray - ) -> None: - result = subset + v - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) - - def test_subset_sub_var_coefficients( - self, v: Variable, subset: xr.DataArray - ) -> None: - result = subset - v - np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) - - class TestSuperset: - def test_add_superset_pins_to_lhs_coords( - self, v: Variable, superset: xr.DataArray - ) -> None: - result = v + superset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - - def test_add_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset + v, v + superset) - - def test_sub_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset - v, -v + superset) - - def test_mul_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: - assert_linequal(superset * v, v * superset) - - def test_mul_superset_pins_to_lhs_coords( - self, v: Variable, superset: xr.DataArray - ) -> None: - result = v * superset - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - - def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: - superset_nonzero = xr.DataArray( - np.arange(1, 26, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, - ) - result = v / superset_nonzero - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - - class TestDisjoint: - def test_add_disjoint_fills_zeros(self, v: Variable) -> None: - disjoint = xr.DataArray( - [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v + disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, np.zeros(20)) - - def test_mul_disjoint_fills_zeros(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v * disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) - - def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v / disjoint - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) - - class TestCommutativity: - @pytest.mark.parametrize( - "make_lhs,make_rhs", - [ - (lambda v, s: s * v, lambda v, s: v * s), - (lambda v, s: s * (1 * v), lambda v, s: (1 * v) * s), - (lambda v, s: s + v, lambda v, s: v + s), - (lambda v, s: s + (v + 5), lambda v, s: (v + 5) + s), - ], - ids=["subset*var", "subset*expr", "subset+var", "subset+expr"], - ) - def test_commutativity( - self, - v: Variable, - subset: xr.DataArray, - make_lhs: Any, - make_rhs: Any, - ) -> None: - assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) - - def test_sub_var_anticommutative( - self, v: Variable, subset: xr.DataArray - ) -> None: - assert_linequal(subset - v, -v + subset) - - def test_sub_expr_anticommutative( - self, v: Variable, subset: xr.DataArray - ) -> None: - expr = v + 5 - assert_linequal(subset - expr, -(expr - subset)) - - def test_add_commutativity_full_coords(self, v: Variable) -> None: - full = xr.DataArray( - np.arange(20, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(20)}, - ) - assert_linequal(v + full, full + v) - - class TestQuadratic: - def test_quadexpr_add_subset( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - ) -> None: - qexpr = v * v - result = qexpr + subset - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected_fill) - - def test_quadexpr_sub_subset( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - ) -> None: - qexpr = v * v - result = qexpr - subset - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, -expected_fill) - - def test_quadexpr_mul_subset( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - ) -> None: - qexpr = v * v - result = qexpr * subset - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - def test_subset_mul_quadexpr( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - ) -> None: - qexpr = v * v - result = subset * qexpr - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: - qexpr = v * v - assert_quadequal(subset + qexpr, qexpr + subset) - - class TestMissingValues: - """ - Same shape as variable but with NaN entries in the constant. - - NaN values are filled with operation-specific neutral elements: - - Addition/subtraction: NaN -> 0 (additive identity) - - Multiplication: NaN -> 0 (zeroes out the variable) - - Division: NaN -> 1 (multiplicative identity, no scaling) - """ - - NAN_POSITIONS = [0, 5, 19] - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_nan_filled( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - operand: str, - ) -> None: - base_const = 0.0 if operand == "var" else 5.0 - target = v if operand == "var" else v + 5 - result = target + nan_constant - assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.const.values).any() - # At NaN positions, const should be unchanged (added 0) - for i in self.NAN_POSITIONS: - assert result.const.values[i] == base_const - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_nan_filled( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - operand: str, - ) -> None: - base_const = 0.0 if operand == "var" else 5.0 - target = v if operand == "var" else v + 5 - result = target - nan_constant - assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.const.values).any() - # At NaN positions, const should be unchanged (subtracted 0) - for i in self.NAN_POSITIONS: - assert result.const.values[i] == base_const - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_nan_filled( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - operand: str, - ) -> None: - target = v if operand == "var" else 1 * v - result = target * nan_constant - assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.coeffs.squeeze().values).any() - # At NaN positions, coeffs should be 0 (variable zeroed out) - for i in self.NAN_POSITIONS: - assert result.coeffs.squeeze().values[i] == 0.0 - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_nan_filled( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - operand: str, - ) -> None: - target = v if operand == "var" else 1 * v - result = target / nan_constant - assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.coeffs.squeeze().values).any() - # At NaN positions, coeffs should be unchanged (divided by 1) - original_coeffs = (1 * v).coeffs.squeeze().values - for i in self.NAN_POSITIONS: - assert result.coeffs.squeeze().values[i] == original_coeffs[i] - - def test_add_commutativity( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - ) -> None: - result_a = v + nan_constant - result_b = nan_constant + v - assert not np.isnan(result_a.const.values).any() - assert not np.isnan(result_b.const.values).any() - np.testing.assert_array_equal(result_a.const.values, result_b.const.values) - np.testing.assert_array_equal( - result_a.coeffs.values, result_b.coeffs.values - ) - - def test_mul_commutativity( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - ) -> None: - result_a = v * nan_constant - result_b = nan_constant * v - assert not np.isnan(result_a.coeffs.values).any() - assert not np.isnan(result_b.coeffs.values).any() - np.testing.assert_array_equal( - result_a.coeffs.values, result_b.coeffs.values - ) - - def test_quadexpr_add_nan( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - ) -> None: - qexpr = v * v - result = qexpr + nan_constant - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == 20 - assert not np.isnan(result.const.values).any() - - class TestExpressionWithNaN: - """Test that NaN in expression's own const/coeffs doesn't propagate.""" - - def test_shifted_expr_add_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr + 5 - assert not np.isnan(result.const.values).any() - assert result.const.values[0] == 5.0 - - def test_shifted_expr_mul_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr * 2 - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 - - def test_shifted_expr_add_array(self, v: Variable) -> None: - arr = np.arange(v.sizes["dim_2"], dtype=float) - expr = (1 * v).shift(dim_2=1) - result = expr + arr - assert not np.isnan(result.const.values).any() - assert result.const.values[0] == 0.0 - - def test_shifted_expr_mul_array(self, v: Variable) -> None: - arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 - expr = (1 * v).shift(dim_2=1) - result = expr * arr - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 - - def test_shifted_expr_div_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr / 2 - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 - - def test_shifted_expr_sub_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr - 3 - assert not np.isnan(result.const.values).any() - assert result.const.values[0] == -3.0 - - def test_shifted_expr_div_array(self, v: Variable) -> None: - arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 - expr = (1 * v).shift(dim_2=1) - result = expr / arr - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 - - def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: - nan_coeff = np.ones(v.sizes["dim_2"]) - nan_coeff[0] = np.nan - result = v.to_linexpr(nan_coeff) - assert not np.isnan(result.coeffs.squeeze().values).any() - assert result.coeffs.squeeze().values[0] == 0.0 - - class TestMultiDim: - def test_multidim_subset_mul(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - result = w * subset_2d - assert result.sizes["a"] == 4 - assert result.sizes["b"] == 5 - assert not np.isnan(result.coeffs.values).any() - assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) - assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) - assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) - assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) - - def test_multidim_subset_add(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - result = w + subset_2d - assert result.sizes["a"] == 4 - assert result.sizes["b"] == 5 - assert not np.isnan(result.const.values).any() - assert result.const.sel(a=1, b=0).item() == pytest.approx(2.0) - assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) - assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) - - class TestXarrayCompat: - def test_da_eq_da_still_works(self) -> None: - da1 = xr.DataArray([1, 2, 3]) - da2 = xr.DataArray([1, 2, 3]) - result = da1 == da2 - assert result.values.all() - - def test_da_eq_scalar_still_works(self) -> None: - da = xr.DataArray([1, 2, 3]) - result = da == 2 - np.testing.assert_array_equal(result.values, [False, True, False]) - - def test_da_truediv_var_raises(self, v: Variable) -> None: - da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) - with pytest.raises(TypeError): - da / v # type: ignore[operator] - - -def test_expression_inherited_properties(x: Variable, y: Variable) -> None: - expr = 10 * x + y - assert isinstance(expr.attrs, dict) - assert isinstance(expr.coords, xr.Coordinates) - assert isinstance(expr.indexes, xr.core.indexes.Indexes) - assert isinstance(expr.sizes, xr.core.utils.Frozen) - - -def test_linear_expression_getitem_single(x: Variable, y: Variable) -> None: - expr = 10 * x + y + 3 - sel = expr[0] - assert isinstance(sel, LinearExpression) - assert sel.nterm == 2 - # one expression with two terms (constant is not counted) - assert sel.size == 2 - - -def test_linear_expression_getitem_slice(x: Variable, y: Variable) -> None: - expr = 10 * x + y + 3 - sel = expr[:1] - - assert isinstance(sel, LinearExpression) - assert sel.nterm == 2 - # one expression with two terms (constant is not counted) - assert sel.size == 2 - - -def test_linear_expression_getitem_list(x: Variable, y: Variable, z: Variable) -> None: - expr = 10 * x + z + 10 - sel = expr[:, [0, 2]] - assert isinstance(sel, LinearExpression) - assert sel.nterm == 2 - # four expressions with two terms (constant is not counted) - assert sel.size == 8 - - -def test_linear_expression_loc(x: Variable, y: Variable) -> None: - expr = x + y - assert expr.loc[0].size < expr.loc[:5].size - - -def test_linear_expression_empty(v: Variable) -> None: - expr = 7 * v - assert not expr.empty - assert expr.loc[[]].empty - - with pytest.warns(DeprecationWarning, match="use `.empty` property instead"): - assert expr.loc[[]].empty() - - -def test_linear_expression_isnull(v: Variable) -> None: - expr = np.arange(20) * v - filter = (expr.coeffs >= 10).any(TERM_DIM) - expr = expr.where(filter) - assert expr.isnull().sum() == 10 - - -def test_linear_expression_flat(v: Variable) -> None: - coeff = np.arange(1, 21) # use non-zero coefficients - expr = coeff * v - df = expr.flat - assert isinstance(df, pd.DataFrame) - assert (df.coeffs == coeff).all() - - -def test_iterate_slices(x: Variable, y: Variable) -> None: - expr = x + 10 * y - for s in expr.iterate_slices(slice_size=2): - assert isinstance(s, LinearExpression) - assert s.nterm == expr.nterm - assert s.coord_dims == expr.coord_dims - - -def test_linear_expression_to_polars(v: Variable) -> None: - coeff = np.arange(1, 21) # use non-zero coefficients - expr = coeff * v - df = expr.to_polars() - assert isinstance(df, pl.DataFrame) - assert (df["coeffs"].to_numpy() == coeff).all() - - -def test_linear_expression_where(v: Variable) -> None: - expr = np.arange(20) * v - filter = (expr.coeffs >= 10).any(TERM_DIM) - expr = expr.where(filter) - assert isinstance(expr, LinearExpression) - assert expr.nterm == 1 - - expr = np.arange(20) * v - expr = expr.where(filter, drop=True).sum() - assert isinstance(expr, LinearExpression) - assert expr.nterm == 10 - - -def test_linear_expression_where_with_const(v: Variable) -> None: - expr = np.arange(20) * v + 10 - filter = (expr.coeffs >= 10).any(TERM_DIM) - expr = expr.where(filter) - assert isinstance(expr, LinearExpression) - assert expr.nterm == 1 - assert expr.const[:10].isnull().all() - assert (expr.const[10:] == 10).all() - - expr = np.arange(20) * v + 10 - expr = expr.where(filter, drop=True).sum() - assert isinstance(expr, LinearExpression) - assert expr.nterm == 10 - assert expr.const == 100 - - -def test_linear_expression_where_scalar_fill_value(v: Variable) -> None: - expr = np.arange(20) * v + 10 - filter = (expr.coeffs >= 10).any(TERM_DIM) - expr = expr.where(filter, 200) - assert isinstance(expr, LinearExpression) - assert expr.nterm == 1 - assert (expr.const[:10] == 200).all() - assert (expr.const[10:] == 10).all() - - -def test_linear_expression_where_array_fill_value(v: Variable) -> None: - expr = np.arange(20) * v + 10 - filter = (expr.coeffs >= 10).any(TERM_DIM) - other = expr.coeffs - expr = expr.where(filter, other) - assert isinstance(expr, LinearExpression) - assert expr.nterm == 1 - assert (expr.const[:10] == other[:10]).all() - assert (expr.const[10:] == 10).all() - - -def test_linear_expression_where_expr_fill_value(v: Variable) -> None: - expr = np.arange(20) * v + 10 - expr2 = np.arange(20) * v + 5 - filter = (expr.coeffs >= 10).any(TERM_DIM) - res = expr.where(filter, expr2) - assert isinstance(res, LinearExpression) - assert res.nterm == 1 - assert (res.const[:10] == expr2.const[:10]).all() - assert (res.const[10:] == 10).all() - - -def test_where_with_helper_dim_false(v: Variable) -> None: - expr = np.arange(20) * v - with pytest.raises(ValueError): - filter = expr.coeffs >= 10 - expr.where(filter) - - -def test_linear_expression_shift(v: Variable) -> None: - shifted = v.to_linexpr().shift(dim_2=2) - assert shifted.nterm == 1 - assert shifted.coeffs.loc[:1].isnull().all() - assert (shifted.vars.loc[:1] == -1).all() - - -def test_linear_expression_swap_dims(v: Variable) -> None: - expr = v.to_linexpr() - expr = expr.assign_coords({"second": ("dim_2", expr.indexes["dim_2"] + 100)}) - expr = expr.swap_dims({"dim_2": "second"}) - assert isinstance(expr, LinearExpression) - assert expr.coord_dims == ("second",) - - -def test_linear_expression_set_index(v: Variable) -> None: - expr = v.to_linexpr() - expr = expr.assign_coords({"second": ("dim_2", expr.indexes["dim_2"] + 100)}) - expr = expr.set_index({"multi": ["dim_2", "second"]}) - assert isinstance(expr, LinearExpression) - assert expr.coord_dims == ("multi",) - assert isinstance(expr.indexes["multi"], pd.MultiIndex) - - -def test_linear_expression_fillna(v: Variable) -> None: - expr = np.arange(20) * v + 10 - assert expr.const.sum() == 200 - - filter = (expr.coeffs >= 10).any(TERM_DIM) - filtered = expr.where(filter) - assert isinstance(filtered, LinearExpression) - assert filtered.const.sum() == 100 - - filled = filtered.fillna(10) - assert isinstance(filled, LinearExpression) - assert filled.const.sum() == 200 - assert filled.coeffs.isnull().sum() == 10 - - -def test_variable_expand_dims(v: Variable) -> None: - result = v.to_linexpr().expand_dims("new_dim") - assert isinstance(result, LinearExpression) - assert result.coord_dims == ("dim_2", "new_dim") - - -def test_variable_stack(v: Variable) -> None: - result = v.to_linexpr().expand_dims("new_dim").stack(new=("new_dim", "dim_2")) - assert isinstance(result, LinearExpression) - assert result.coord_dims == ("new",) - - -def test_linear_expression_unstack(v: Variable) -> None: - result = v.to_linexpr().expand_dims("new_dim").stack(new=("new_dim", "dim_2")) - result = result.unstack("new") - assert isinstance(result, LinearExpression) - assert result.coord_dims == ("new_dim", "dim_2") - - -def test_linear_expression_diff(v: Variable) -> None: - diff = v.to_linexpr().diff("dim_2") - assert diff.nterm == 2 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby(v: Variable, use_fallback: bool) -> None: - expr = 1 * v - dim = v.dims[0] - groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords, name=dim) - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert dim in grouped.dims - assert (grouped.data[dim] == [1, 2]).all() - assert grouped.nterm == 10 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_on_same_name_as_target_dim( - v: Variable, use_fallback: bool -) -> None: - expr = 1 * v - groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "group" in grouped.dims - assert (grouped.data.group == [1, 2]).all() - assert grouped.nterm == 10 - - -@pytest.mark.parametrize("use_fallback", [True]) -def test_linear_expression_groupby_ndim(z: Variable, use_fallback: bool) -> None: - # TODO: implement fallback for n-dim groupby, see https://github.com/PyPSA/linopy/issues/299 - expr = 1 * z - groups = xr.DataArray([[1, 1, 2], [1, 3, 3]], coords=z.coords) - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "group" in grouped.dims - # there are three groups, 1, 2 and 3, the largest group has 3 elements - assert (grouped.data.group == [1, 2, 3]).all() - assert grouped.nterm == 3 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_name(v: Variable, use_fallback: bool) -> None: - expr = 1 * v - groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords, name="my_group") - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "my_group" in grouped.dims - assert (grouped.data.my_group == [1, 2]).all() - assert grouped.nterm == 10 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_series(v: Variable, use_fallback: bool) -> None: - expr = 1 * v - groups = pd.Series([1] * 10 + [2] * 10, index=v.indexes["dim_2"]) - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "group" in grouped.dims - assert (grouped.data.group == [1, 2]).all() - assert grouped.nterm == 10 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_series_with_name( - v: Variable, use_fallback: bool -) -> None: - expr = 1 * v - groups = pd.Series([1] * 10 + [2] * 10, index=v.indexes[v.dims[0]], name="my_group") - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "my_group" in grouped.dims - assert (grouped.data.my_group == [1, 2]).all() - assert grouped.nterm == 10 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_series_with_same_group_name( - v: Variable, use_fallback: bool -) -> None: - """ - Test that the group by works with a series whose name is the same as - the dimension to group. - """ - expr = 1 * v - groups = pd.Series([1] * 10 + [2] * 10, index=v.indexes["dim_2"]) - groups.name = "dim_2" - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "dim_2" in grouped.dims - assert (grouped.data.dim_2 == [1, 2]).all() - assert grouped.nterm == 10 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_series_on_multiindex( - u: Variable, use_fallback: bool -) -> None: - expr = 1 * u - len_grouped_dim = len(u.data["dim_3"]) - groups = pd.Series([1] * len_grouped_dim, index=u.indexes["dim_3"]) - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "group" in grouped.dims - assert (grouped.data.group == [1]).all() - assert grouped.nterm == len_grouped_dim - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_dataframe( - v: Variable, use_fallback: bool -) -> None: - expr = 1 * v - groups = pd.DataFrame( - {"a": [1] * 10 + [2] * 10, "b": list(range(4)) * 5}, index=v.indexes["dim_2"] - ) - if use_fallback: - with pytest.raises(ValueError): - expr.groupby(groups).sum(use_fallback=use_fallback) - return - - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - index = pd.MultiIndex.from_frame(groups) - assert "group" in grouped.dims - assert set(grouped.data.group.values) == set(index.values) - assert grouped.nterm == 3 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_dataframe_with_same_group_name( - v: Variable, use_fallback: bool -) -> None: - """ - Test that the group by works with a dataframe whose column name is the same as - the dimension to group. - """ - expr = 1 * v - groups = pd.DataFrame( - {"dim_2": [1] * 10 + [2] * 10, "b": list(range(4)) * 5}, - index=v.indexes["dim_2"], - ) - if use_fallback: - with pytest.raises(ValueError): - expr.groupby(groups).sum(use_fallback=use_fallback) - return - - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - index = pd.MultiIndex.from_frame(groups) - assert "group" in grouped.dims - assert set(grouped.data.group.values) == set(index.values) - assert grouped.nterm == 3 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_dataframe_on_multiindex( - u: Variable, use_fallback: bool -) -> None: - expr = 1 * u - len_grouped_dim = len(u.data["dim_3"]) - groups = pd.DataFrame({"a": [1] * len_grouped_dim}, index=u.indexes["dim_3"]) - - if use_fallback: - with pytest.raises(ValueError): - expr.groupby(groups).sum(use_fallback=use_fallback) - return - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "group" in grouped.dims - assert isinstance(grouped.indexes["group"], pd.MultiIndex) - assert grouped.nterm == len_grouped_dim - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_dataarray( - v: Variable, use_fallback: bool -) -> None: - expr = 1 * v - df = pd.DataFrame( - {"a": [1] * 10 + [2] * 10, "b": list(range(4)) * 5}, index=v.indexes["dim_2"] - ) - groups = xr.DataArray(df) - - # this should not be the case, see https://github.com/PyPSA/linopy/issues/351 - if use_fallback: - with pytest.raises((KeyError, IndexError)): - expr.groupby(groups).sum(use_fallback=use_fallback) - return - - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - index = pd.MultiIndex.from_frame(df) - assert "group" in grouped.dims - assert set(grouped.data.group.values) == set(index.values) - assert grouped.nterm == 3 - - -def test_linear_expression_groupby_with_dataframe_non_aligned(v: Variable) -> None: - expr = 1 * v - groups = pd.DataFrame( - {"a": [1] * 10 + [2] * 10, "b": list(range(4)) * 5}, index=v.indexes["dim_2"] - ) - target = expr.groupby(groups).sum() - - groups_non_aligned = groups[::-1] - grouped = expr.groupby(groups_non_aligned).sum() - assert_linequal(grouped, target) - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_with_const(v: Variable, use_fallback: bool) -> None: - expr = 1 * v + 15 - groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "group" in grouped.dims - assert (grouped.data.group == [1, 2]).all() - assert grouped.nterm == 10 - assert (grouped.const == 150).all() - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_asymmetric(v: Variable, use_fallback: bool) -> None: - expr = 1 * v - # now asymetric groups which result in different nterms - groups = xr.DataArray([1] * 12 + [2] * 8, coords=v.coords) - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "group" in grouped.dims - # first group must be full with vars - assert (grouped.data.sel(group=1) > 0).all() - # the last 4 entries of the second group must be empty, i.e. -1 - assert (grouped.data.sel(group=2).isel(_term=slice(None, -4)).vars >= 0).all() - assert (grouped.data.sel(group=2).isel(_term=slice(-4, None)).vars == -1).all() - assert grouped.nterm == 12 - - -@pytest.mark.parametrize("use_fallback", [True, False]) -def test_linear_expression_groupby_asymmetric_with_const( - v: Variable, use_fallback: bool -) -> None: - expr = 1 * v + 15 - # now asymetric groups which result in different nterms - groups = xr.DataArray([1] * 12 + [2] * 8, coords=v.coords) - grouped = expr.groupby(groups).sum(use_fallback=use_fallback) - assert "group" in grouped.dims - # first group must be full with vars - assert (grouped.data.sel(group=1) > 0).all() - # the last 4 entries of the second group must be empty, i.e. -1 - assert (grouped.data.sel(group=2).isel(_term=slice(None, -4)).vars >= 0).all() - assert (grouped.data.sel(group=2).isel(_term=slice(-4, None)).vars == -1).all() - assert grouped.nterm == 12 - assert list(grouped.const) == [180, 120] - - -def test_linear_expression_groupby_roll(v: Variable) -> None: - expr = 1 * v - groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) - grouped = expr.groupby(groups).roll(dim_2=1) - assert grouped.nterm == 1 - assert grouped.vars[0].item() == 19 - - -def test_linear_expression_groupby_roll_with_const(v: Variable) -> None: - expr = 1 * v + np.arange(20) - groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) - grouped = expr.groupby(groups).roll(dim_2=1) - assert grouped.nterm == 1 - assert grouped.vars[0].item() == 19 - assert grouped.const[0].item() == 9 - - -def test_linear_expression_groupby_from_variable(v: Variable) -> None: - groups = xr.DataArray([1] * 10 + [2] * 10, coords=v.coords) - grouped = v.groupby(groups).sum() - assert "group" in grouped.dims - assert (grouped.data.group == [1, 2]).all() - assert grouped.nterm == 10 - - -def test_linear_expression_rolling(v: Variable) -> None: - expr = 1 * v - rolled = expr.rolling(dim_2=2).sum() - assert rolled.nterm == 2 - - rolled = expr.rolling(dim_2=3).sum() - assert rolled.nterm == 3 - - with pytest.raises(ValueError): - expr.rolling().sum() - - -def test_linear_expression_rolling_with_const(v: Variable) -> None: - expr = 1 * v + 15 - rolled = expr.rolling(dim_2=2).sum() - assert rolled.nterm == 2 - assert rolled.const[0].item() == 15 - assert (rolled.const[1:] == 30).all() - - rolled = expr.rolling(dim_2=3).sum() - assert rolled.nterm == 3 - assert rolled.const[0].item() == 15 - assert rolled.const[1].item() == 30 - assert (rolled.const[2:] == 45).all() - - -def test_linear_expression_rolling_from_variable(v: Variable) -> None: - rolled = v.rolling(dim_2=2).sum() - assert rolled.nterm == 2 - - -def test_linear_expression_from_tuples(x: Variable, y: Variable) -> None: - expr = LinearExpression.from_tuples((10, x), (1, y)) - assert isinstance(expr, LinearExpression) - - with pytest.warns(DeprecationWarning): - expr2 = LinearExpression.from_tuples((10, x), (1,)) - assert isinstance(expr2, LinearExpression) - assert (expr2.const == 1).all() - - expr3 = LinearExpression.from_tuples((10, x), 1) - assert isinstance(expr3, LinearExpression) - assert_linequal(expr2, expr3) - - expr4 = LinearExpression.from_tuples((10, x), (1, y), 1) - assert isinstance(expr4, LinearExpression) - assert (expr4.const == 1).all() - - expr5 = LinearExpression.from_tuples(1, model=x.model) - assert isinstance(expr5, LinearExpression) - - -def test_linear_expression_from_tuples_bad_calls( - m: Model, x: Variable, y: Variable -) -> None: - with pytest.raises(ValueError): - LinearExpression.from_tuples((10, x), (1, y), x) - - with pytest.raises(ValueError): - LinearExpression.from_tuples((10, x, 3), (1, y), 1) - - sv = ScalarVariable(label=0, model=m) - with pytest.raises(TypeError): - LinearExpression.from_tuples((np.array([1, 1]), sv)) - - with pytest.raises(TypeError): - LinearExpression.from_tuples((x, x)) - - with pytest.raises(ValueError): - LinearExpression.from_tuples(10) - - -def test_linear_expression_from_constant_scalar(m: Model) -> None: - expr = LinearExpression.from_constant(model=m, constant=10) - assert expr.is_constant - assert isinstance(expr, LinearExpression) - assert (expr.const == 10).all() - - -def test_linear_expression_from_constant_1D(m: Model) -> None: - arr = pd.Series(index=pd.Index([0, 1], name="t"), data=[10, 20]) - expr = LinearExpression.from_constant(model=m, constant=arr) - assert isinstance(expr, LinearExpression) - assert list(expr.coords.keys())[0] == "t" - assert expr.nterm == 0 - assert (expr.const.values == [10, 20]).all() - assert expr.is_constant - - -def test_constant_linear_expression_to_polars_2D(m: Model) -> None: - index_a = pd.Index([0, 1], name="a") - index_b = pd.Index([0, 1, 2], name="b") - arr = np.array([[10, 20, 30], [40, 50, 60]]) - const = xr.DataArray(data=arr, coords=[index_a, index_b]) - - le_variable = m.add_variables(name="var", coords=[index_a, index_b]) * 1 + const - assert not le_variable.is_constant - le_const = LinearExpression.from_constant(model=m, constant=const) - assert le_const.is_constant - - var_pol = le_variable.to_polars() - const_pol = le_const.to_polars() - assert var_pol.shape == const_pol.shape - assert var_pol.columns == const_pol.columns - assert all(const_pol["const"] == var_pol["const"]) - assert all(const_pol["coeffs"].is_null()) - assert all(const_pol["vars"].is_null()) - - -def test_linear_expression_sanitize(x: Variable, y: Variable, z: Variable) -> None: - expr = 10 * x + y + z - assert isinstance(expr.sanitize(), LinearExpression) - - -def test_merge(x: Variable, y: Variable, z: Variable) -> None: - expr1 = (10 * x + y).sum("dim_0") - expr2 = z.sum("dim_0") - - res = merge([expr1, expr2], cls=LinearExpression) - assert res.nterm == 6 - - res: LinearExpression = merge([expr1, expr2]) # type: ignore - assert isinstance(res, LinearExpression) - - # now concat with same length of terms - expr1 = z.sel(dim_0=0).sum("dim_1") - expr2 = z.sel(dim_0=1).sum("dim_1") - - res = merge([expr1, expr2], dim="dim_1", cls=LinearExpression) - assert res.nterm == 3 - - # now with different length of terms - expr1 = z.sel(dim_0=0, dim_1=slice(0, 1)).sum("dim_1") - expr2 = z.sel(dim_0=1).sum("dim_1") - - res = merge([expr1, expr2], dim="dim_1", cls=LinearExpression) - assert res.nterm == 3 - assert res.sel(dim_1=0).vars[2].item() == -1 - - with pytest.warns(DeprecationWarning): - merge(expr1, expr2) - - -def test_linear_expression_outer_sum(x: Variable, y: Variable) -> None: - expr = x + y - expr2: LinearExpression = sum([x, y]) # type: ignore - assert_linequal(expr, expr2) - - expr = 1 * x + 2 * y - expr2: LinearExpression = sum([1 * x, 2 * y]) # type: ignore - assert_linequal(expr, expr2) - - assert isinstance(expr.sum(), LinearExpression) - - -def test_rename(x: Variable, y: Variable, z: Variable) -> None: - expr = 10 * x + y + z - renamed = expr.rename({"dim_0": "dim_5"}) - assert set(renamed.dims) == {"dim_1", "dim_5", TERM_DIM} - assert renamed.nterm == 3 - - renamed = expr.rename({"dim_0": "dim_1", "dim_1": "dim_2"}) - assert set(renamed.dims) == {"dim_1", "dim_2", TERM_DIM} - assert renamed.nterm == 3 - - -@pytest.mark.parametrize("multiple", [1.0, 0.5, 2.0, 0.0]) -def test_cumsum(m: Model, multiple: float) -> None: - # Test cumsum on variable x - var = m.variables["x"] - cumsum = (multiple * var).cumsum() - cumsum.nterm == 2 - - # Test cumsum on sum of variables - expr = m.variables["x"] + m.variables["y"] - cumsum = (multiple * expr).cumsum() - cumsum.nterm == 2 - - -def test_simplify_basic(x: Variable) -> None: - """Test basic simplification with duplicate terms.""" - expr = 2 * x + 3 * x + 1 * x - simplified = expr.simplify() - assert simplified.nterm == 1, f"Expected 1 term, got {simplified.nterm}" - - x_len = len(x.coords["dim_0"]) - # Check that the coefficient is 6 (2 + 3 + 1) - coeffs: np.ndarray = simplified.coeffs.values - assert len(coeffs) == x_len, f"Expected {x_len} coefficients, got {len(coeffs)}" - assert all(coeffs == 6.0), f"Expected coefficient 6.0, got {coeffs[0]}" - - -def test_simplify_multiple_dimensions() -> None: - model = Model() - a_index = pd.Index([0, 1, 2, 3], name="a") - b_index = pd.Index([0, 1, 2], name="b") - coords = [a_index, b_index] - x = model.add_variables(name="x", coords=coords) - - expr = 2 * x + 3 * x + x - # Simplify - simplified = expr.simplify() - assert simplified.nterm == 1, f"Expected 1 term, got {simplified.nterm}" - assert simplified.ndim == 2, f"Expected 2 dimensions, got {simplified.ndim}" - assert all(simplified.coeffs.values.reshape(-1) == 6), ( - f"Expected coefficients of 6, got {simplified.coeffs.values}" - ) - - -def test_simplify_with_different_variables(x: Variable, y: Variable) -> None: - """Test that different variables are kept separate.""" - # Create expression: 2*x + 3*x + 4*y - expr = 2 * x + 3 * x + 4 * y - - # Simplify - simplified = expr.simplify() - # Should have 2 terms (one for x with coeff 5, one for y with coeff 4) - assert simplified.nterm == 2, f"Expected 2 terms, got {simplified.nterm}" - - coeffs: list[float] = simplified.coeffs.values.flatten().tolist() - assert set(coeffs) == {5.0, 4.0}, ( - f"Expected coefficients {{5.0, 4.0}}, got {set(coeffs)}" - ) - - -def test_simplify_with_constant(x: Variable) -> None: - """Test that constants are preserved.""" - expr = 2 * x + 3 * x + 10 - - # Simplify - simplified = expr.simplify() - - # Check constant is preserved - assert all(simplified.const.values == 10.0), ( - f"Expected constant 10.0, got {simplified.const.values}" - ) - - # Check coefficients - assert all(simplified.coeffs.values == 5.0), ( - f"Expected coefficient 5.0, got {simplified.coeffs.values}" - ) - - -def test_simplify_cancellation(x: Variable) -> None: - """Test that terms cancel out correctly when coefficients sum to zero.""" - expr = x - x - simplified = expr.simplify() - - assert simplified.nterm == 0, f"Expected 0 terms, got {simplified.nterm}" - assert simplified.coeffs.values.size == 0 - assert simplified.vars.values.size == 0 - - -def test_simplify_partial_cancellation(x: Variable, y: Variable) -> None: - """Test partial cancellation where some terms cancel but others remain.""" - expr = 2 * x - 2 * x + 3 * y - simplified = expr.simplify() - - assert simplified.nterm == 1, f"Expected 1 term, got {simplified.nterm}" - assert all(simplified.coeffs.values == 3.0), ( - f"Expected coefficient 3.0, got {simplified.coeffs.values}" - ) - - -def test_constant_only_expression_mul_dataarray(m: Model) -> None: - const_arr = xr.DataArray([2, 3], dims=["dim_0"]) - const_expr = LinearExpression(const_arr, m) - assert const_expr.is_constant - assert const_expr.nterm == 0 - - data_arr = xr.DataArray([10, 20], dims=["dim_0"]) - expected_const = const_arr * data_arr - - result = const_expr * data_arr - assert isinstance(result, LinearExpression) - assert result.is_constant - assert (result.const == expected_const).all() - - result_rev = data_arr * const_expr - assert isinstance(result_rev, LinearExpression) - assert result_rev.is_constant - assert (result_rev.const == expected_const).all() - - -def test_constant_only_expression_mul_linexpr_with_vars(m: Model, x: Variable) -> None: - const_arr = xr.DataArray([2, 3], dims=["dim_0"]) - const_expr = LinearExpression(const_arr, m) - assert const_expr.is_constant - assert const_expr.nterm == 0 - - expr_with_vars = 1 * x + 5 - expected_coeffs = const_arr - expected_const = const_arr * 5 - - result = const_expr * expr_with_vars - assert isinstance(result, LinearExpression) - assert (result.coeffs == expected_coeffs).all() - assert (result.const == expected_const).all() - - result_rev = expr_with_vars * const_expr - assert isinstance(result_rev, LinearExpression) - assert (result_rev.coeffs == expected_coeffs).all() - assert (result_rev.const == expected_const).all() - - -def test_constant_only_expression_mul_constant_only(m: Model) -> None: - const_arr = xr.DataArray([2, 3], dims=["dim_0"]) - const_arr2 = xr.DataArray([4, 5], dims=["dim_0"]) - const_expr = LinearExpression(const_arr, m) - const_expr2 = LinearExpression(const_arr2, m) - assert const_expr.is_constant - assert const_expr2.is_constant - - expected_const = const_arr * const_arr2 - - result = const_expr * const_expr2 - assert isinstance(result, LinearExpression) - assert result.is_constant - assert (result.const == expected_const).all() - - result_rev = const_expr2 * const_expr - assert isinstance(result_rev, LinearExpression) - assert result_rev.is_constant - assert (result_rev.const == expected_const).all() - - -def test_constant_only_expression_mul_linexpr_with_vars_and_const( - m: Model, x: Variable -) -> None: - const_arr = xr.DataArray([2, 3], dims=["dim_0"]) - const_expr = LinearExpression(const_arr, m) - assert const_expr.is_constant - - expr_with_vars_and_const = 4 * x + 10 - expected_coeffs = const_arr * 4 - expected_const = const_arr * 10 - - result = const_expr * expr_with_vars_and_const - assert isinstance(result, LinearExpression) - assert not result.is_constant - assert (result.coeffs == expected_coeffs).all() - assert (result.const == expected_const).all() - - result_rev = expr_with_vars_and_const * const_expr - assert isinstance(result_rev, LinearExpression) - assert not result_rev.is_constant - assert (result_rev.coeffs == expected_coeffs).all() - assert (result_rev.const == expected_const).all() - - -class TestJoinParameter: - @pytest.fixture - def m2(self) -> Model: - m = Model() - m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="a") - m.add_variables(coords=[pd.Index([1, 2, 3], name="i")], name="b") - m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="c") - return m - - @pytest.fixture - def a(self, m2: Model) -> Variable: - return m2.variables["a"] - - @pytest.fixture - def b(self, m2: Model) -> Variable: - return m2.variables["b"] - - @pytest.fixture - def c(self, m2: Model) -> Variable: - return m2.variables["c"] - - class TestAddition: - def test_add_join_none_preserves_default( - self, a: Variable, b: Variable - ) -> None: - result_default = a.to_linexpr() + b.to_linexpr() - result_none = a.to_linexpr().add(b.to_linexpr(), join=None) - assert_linequal(result_default, result_none) - - def test_add_expr_join_inner(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_add_expr_join_outer(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_add_expr_join_left(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="left") - assert list(result.data.indexes["i"]) == [0, 1, 2] - - def test_add_expr_join_right(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().add(b.to_linexpr(), join="right") - assert list(result.data.indexes["i"]) == [1, 2, 3] - - def test_add_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_add_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().add(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: - expr = a.to_linexpr() - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [0, 1, 2]}) - result = expr.add(const, join="override") - assert list(result.data.indexes["i"]) == [0, 1, 2] - assert (result.const.values == const.values).all() - - def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: - expr_a = 1 * a + 5 - const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) - for join in ("override", "outer", "inner"): - result = expr_a.add(const, join=join) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [6, 7, 8]) - - def test_add_scalar_with_explicit_join(self, a: Variable) -> None: - expr = 1 * a + 5 - result = expr.add(10, join="override") - np.testing.assert_array_equal(result.const.values, [15, 15, 15]) - assert list(result.coords["i"].values) == [0, 1, 2] - - class TestSubtraction: - def test_sub_expr_join_inner(self, a: Variable, b: Variable) -> None: - result = a.to_linexpr().sub(b.to_linexpr(), join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_sub_constant_override(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.sub(other, join="override") - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [-5, -15, -25]) - - class TestMultiplication: - def test_mul_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_mul_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().mul(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - assert result.coeffs.sel(i=0).item() == 0 - assert result.coeffs.sel(i=1).item() == 2 - assert result.coeffs.sel(i=2).item() == 3 - - def test_mul_expr_with_join_raises(self, a: Variable, b: Variable) -> None: - with pytest.raises(TypeError, match="join parameter is not supported"): - a.to_linexpr().mul(b.to_linexpr(), join="inner") - - class TestDivision: - def test_div_constant_join_inner(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().div(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_div_constant_join_outer(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.to_linexpr().div(const, join="outer") - assert list(result.data.indexes["i"]) == [0, 1, 2, 3] - - def test_div_expr_with_join_raises(self, a: Variable, b: Variable) -> None: - with pytest.raises(TypeError): - a.to_linexpr().div(b.to_linexpr(), join="outer") - - class TestVariableOperations: - def test_variable_add_join(self, a: Variable, b: Variable) -> None: - result = a.add(b, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_sub_join(self, a: Variable, b: Variable) -> None: - result = a.sub(b, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_mul_join(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_div_join(self, a: Variable) -> None: - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = a.div(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2] - - def test_variable_add_outer_values(self, a: Variable, b: Variable) -> None: - result = a.add(b, join="outer") - assert isinstance(result, LinearExpression) - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.nterm == 2 - - def test_variable_mul_override(self, a: Variable) -> None: - other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) - result = a.mul(other, join="override") - assert isinstance(result, LinearExpression) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - - def test_variable_div_override(self, a: Variable) -> None: - other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) - result = a.div(other, join="override") - assert isinstance(result, LinearExpression) - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_almost_equal( - result.coeffs.squeeze().values, [0.5, 0.2, 0.1] - ) - - def test_same_shape_add_join_override(self, a: Variable, c: Variable) -> None: - result = a.to_linexpr().add(c.to_linexpr(), join="override") - assert list(result.data.indexes["i"]) == [0, 1, 2] - - class TestMerge: - def test_merge_join_parameter(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge( - [a.to_linexpr(), b.to_linexpr()], join="inner" - ) - assert list(result.data.indexes["i"]) == [1, 2] - - def test_merge_outer_join(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge( - [a.to_linexpr(), b.to_linexpr()], join="outer" - ) - assert set(result.coords["i"].values) == {0, 1, 2, 3} - - def test_merge_join_left(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge( - [a.to_linexpr(), b.to_linexpr()], join="left" - ) - assert list(result.data.indexes["i"]) == [0, 1, 2] - - def test_merge_join_right(self, a: Variable, b: Variable) -> None: - result: LinearExpression = merge( - [a.to_linexpr(), b.to_linexpr()], join="right" - ) - assert list(result.data.indexes["i"]) == [1, 2, 3] - - class TestValueVerification: - def test_add_expr_outer_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.add(expr_b, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 15 - assert result.const.sel(i=3).item() == 10 - - def test_add_expr_inner_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.add(expr_b, join="inner") - assert list(result.coords["i"].values) == [1, 2] - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 15 - - def test_add_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) - result = expr.add(const, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == 15 - assert result.const.sel(i=2).item() == 5 - assert result.const.sel(i=3).item() == 20 - - def test_add_constant_inner_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) - result = expr.add(const, join="inner") - assert list(result.coords["i"].values) == [1] - assert result.const.sel(i=1).item() == 15 - - def test_add_constant_override_positional(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.add(other, join="override") - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [15, 25, 35]) - - def test_sub_expr_outer_const_values(self, a: Variable, b: Variable) -> None: - expr_a = 1 * a + 5 - expr_b = 2 * b + 10 - result = expr_a.sub(expr_b, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 5 - assert result.const.sel(i=1).item() == -5 - assert result.const.sel(i=2).item() == -5 - assert result.const.sel(i=3).item() == -10 - - def test_mul_constant_override_positional(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.mul(other, join="override") - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [10, 15, 20]) - np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 3, 4]) - - def test_mul_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 5 - other = xr.DataArray([2, 3], dims=["i"], coords={"i": [1, 3]}) - result = expr.mul(other, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=0).item() == 0 - assert result.const.sel(i=1).item() == 10 - assert result.const.sel(i=2).item() == 0 - assert result.const.sel(i=3).item() == 0 - assert result.coeffs.squeeze().sel(i=1).item() == 2 - assert result.coeffs.squeeze().sel(i=0).item() == 0 - - def test_div_constant_override_positional(self, a: Variable) -> None: - expr = 1 * a + 10 - other = xr.DataArray([2.0, 5.0, 10.0], dims=["i"], coords={"i": [5, 6, 7]}) - result = expr.div(other, join="override") - assert list(result.coords["i"].values) == [0, 1, 2] - np.testing.assert_array_equal(result.const.values, [5.0, 2.0, 1.0]) - - def test_div_constant_outer_fill_values(self, a: Variable) -> None: - expr = 1 * a + 10 - other = xr.DataArray([2.0, 5.0], dims=["i"], coords={"i": [1, 3]}) - result = expr.div(other, join="outer") - assert set(result.coords["i"].values) == {0, 1, 2, 3} - assert result.const.sel(i=1).item() == pytest.approx(5.0) - assert result.coeffs.squeeze().sel(i=1).item() == pytest.approx(0.5) - assert result.const.sel(i=0).item() == pytest.approx(10.0) - assert result.coeffs.squeeze().sel(i=0).item() == pytest.approx(1.0) - - class TestQuadratic: - def test_quadratic_add_constant_join_inner( - self, a: Variable, b: Variable - ) -> None: - quad = a.to_linexpr() * b.to_linexpr() - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] - - def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: - quad = a.to_linexpr() * a.to_linexpr() - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) - result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [0, 1] - - def test_quadratic_mul_constant_join_inner( - self, a: Variable, b: Variable - ) -> None: - quad = a.to_linexpr() * b.to_linexpr() - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = quad.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] diff --git a/test/test_typing.py b/test/test_typing.py index 566583c2..eef3e1ba 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -1,20 +1,9 @@ -from collections.abc import Generator - -import pytest import xarray as xr import linopy -@pytest.fixture(autouse=True) -def _use_v1_convention() -> Generator[None, None, None]: - """Use v1 arithmetic convention for all tests in this module.""" - linopy.options["arithmetic_convention"] = "v1" - yield - linopy.options["arithmetic_convention"] = "legacy" - - -def test_operations_with_data_arrays_are_typed_correctly() -> None: +def test_operations_with_data_arrays_are_typed_correctly(convention: str) -> None: m = linopy.Model() s: xr.DataArray = xr.DataArray(5.0) @@ -36,7 +25,8 @@ def test_operations_with_data_arrays_are_typed_correctly() -> None: _ = q + s -def test_constant_with_extra_dims_broadcasts() -> None: +def test_constant_with_extra_dims_broadcasts(convention: str) -> None: + """Broadcasting with extra dims works under both conventions.""" m = linopy.Model() a: xr.DataArray = xr.DataArray([1, 2, 3]) diff --git a/test/test_typing_legacy.py b/test/test_typing_legacy.py deleted file mode 100644 index 99a27033..00000000 --- a/test/test_typing_legacy.py +++ /dev/null @@ -1,25 +0,0 @@ -import xarray as xr - -import linopy - - -def test_operations_with_data_arrays_are_typed_correctly() -> None: - m = linopy.Model() - - a: xr.DataArray = xr.DataArray([1, 2, 3]) - - v: linopy.Variable = m.add_variables(lower=0.0, name="v") - e: linopy.LinearExpression = v * 1.0 - q = v * v - - _ = a * v - _ = v * a - _ = v + a - - _ = a * e - _ = e * a - _ = e + a - - _ = a * q - _ = q * a - _ = q + a From 95012c780bee7eb8ed1794aff8f1ce09787478fb Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 10 Mar 2026 23:01:22 +0100 Subject: [PATCH 51/66] Run all tests under both conventions, split where behavior differs (#609) * Restore master tests, add autouse convention fixture - Restore test files to match master exactly (legacy behavior) - Delete legacy duplicate test files - Add autouse parametrized convention fixture: every test runs under both 'legacy' and 'v1' conventions by default - Add legacy_convention/v1_convention opt-out fixtures for convention-specific tests Co-Authored-By: Claude Opus 4.6 * Mark legacy-only tests, add v1 counterparts for differing behavior Tests that differ between conventions are split: - Legacy-only: marked with legacy_convention fixture (skipped under v1) - V1-only: marked with v1_convention fixture (skipped under legacy) - All other tests: run under both conventions via autouse fixture Files changed: - test_common.py: split test_align into legacy/v1 versions - test_constraints.py: mark TestConstraintCoordinateAlignment as legacy-only, add TestConstraintCoordinateAlignmentV1, split higher-dim RHS tests - test_linear_expression.py: mark TestCoordinateAlignment as legacy-only, add TestCoordinateAlignmentV1, split sum/join tests - test_piecewise_constraints.py: mark legacy-only (implementation not yet v1-compatible) - test_sos_reformulation.py: mark legacy-only (implementation not yet v1-compatible) Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- test/conftest.py | 39 +- test/test_common.py | 111 ++-- test/test_constraints.py | 243 +++----- test/test_linear_expression.py | 965 ++++++++++------------------- test/test_piecewise_constraints.py | 5 + test/test_sos_reformulation.py | 5 + test/test_typing.py | 42 +- 7 files changed, 535 insertions(+), 875 deletions(-) diff --git a/test/conftest.py b/test/conftest.py index 100a8fbf..58142984 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -58,30 +58,9 @@ def pytest_collection_modifyitems( item.add_marker(pytest.mark.gpu) -@pytest.fixture -def v1_convention() -> Generator[None, None, None]: - """Set arithmetic_convention to 'v1' for the duration of a test.""" - import linopy - - linopy.options["arithmetic_convention"] = "v1" - yield - linopy.options["arithmetic_convention"] = "legacy" - - -@pytest.fixture -def legacy_convention() -> Generator[None, None, None]: - """Set arithmetic_convention to 'legacy' for the duration of a test.""" - import linopy - - old = linopy.options["arithmetic_convention"] - linopy.options["arithmetic_convention"] = "legacy" - yield - linopy.options["arithmetic_convention"] = old - - -@pytest.fixture(params=["v1", "legacy"]) +@pytest.fixture(autouse=True, params=["legacy", "v1"]) def convention(request: pytest.FixtureRequest) -> Generator[str, None, None]: - """Run the test under both arithmetic conventions.""" + """Run every test under both arithmetic conventions by default.""" import linopy old = linopy.options["arithmetic_convention"] @@ -90,6 +69,20 @@ def convention(request: pytest.FixtureRequest) -> Generator[str, None, None]: linopy.options["arithmetic_convention"] = old +@pytest.fixture +def legacy_convention(convention: str) -> None: + """Opt-out: skip this test when convention is not 'legacy'.""" + if convention != "legacy": + pytest.skip("legacy-only test") + + +@pytest.fixture +def v1_convention(convention: str) -> None: + """Opt-out: skip this test when convention is not 'v1'.""" + if convention != "v1": + pytest.skip("v1-only test") + + @pytest.fixture def m() -> Model: from linopy import Model diff --git a/test/test_common.py b/test/test_common.py index 69fd9b8d..71f4be0d 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -5,8 +5,6 @@ @author: fabian """ -from collections.abc import Generator - import numpy as np import pandas as pd import polars as pl @@ -15,7 +13,6 @@ from xarray import DataArray from xarray.testing.assertions import assert_equal -import linopy from linopy import LinearExpression, Model, Variable from linopy.common import ( align, @@ -30,17 +27,6 @@ from linopy.testing import assert_linequal, assert_varequal -@pytest.fixture(autouse=True) -def _use_v1_convention() -> Generator[None, None, None]: - """Use v1 arithmetic convention for all tests in this module.""" - linopy.options["arithmetic_convention"] = "v1" - yield - linopy.options["arithmetic_convention"] = "legacy" - - -# Fixtures m, u, x are provided by conftest.py - - def test_as_dataarray_with_series_dims_default() -> None: target_dim = "dim_0" target_index = [0, 1, 2] @@ -663,11 +649,23 @@ def test_get_dims_with_index_levels() -> None: assert get_dims_with_index_levels(ds5) == [] -def test_align(x: Variable) -> None: # noqa: F811 +def test_align(legacy_convention: None, x: Variable, u: Variable) -> None: # noqa: F811 + """Legacy: align() defaults to inner join for mismatched coords.""" alpha = xr.DataArray([1, 2], [[1, 2]]) + beta = xr.DataArray( + [1, 2, 3], + [ + ( + "dim_3", + pd.MultiIndex.from_tuples( + [(1, "b"), (2, "b"), (1, "c")], names=["level1", "level2"] + ), + ) + ], + ) - # inner join - x_obs, alpha_obs = align(x, alpha, join="inner") + # inner join (default under legacy) + x_obs, alpha_obs = align(x, alpha) assert isinstance(x_obs, Variable) assert x_obs.shape == alpha_obs.shape == (1,) assert_varequal(x_obs, x.loc[[1]]) @@ -679,42 +677,24 @@ def test_align(x: Variable) -> None: # noqa: F811 assert_varequal(x_obs, x) assert_equal(alpha_obs, DataArray([np.nan, 1], [[0, 1]])) + # multiindex + beta_obs, u_obs = align(beta, u) + assert u_obs.shape == beta_obs.shape == (2,) + assert isinstance(u_obs, Variable) + assert_varequal(u_obs, u.loc[[(1, "b"), (2, "b")]]) + assert_equal(beta_obs, beta.loc[[(1, "b"), (2, "b")]]) + # with linear expression expr = 20 * x - x_obs, expr_obs, alpha_obs = align(x, expr, alpha, join="inner") + x_obs, expr_obs, alpha_obs = align(x, expr, alpha) assert x_obs.shape == alpha_obs.shape == (1,) assert expr_obs.shape == (1, 1) # _term dim assert isinstance(expr_obs, LinearExpression) assert_linequal(expr_obs, expr.loc[[1]]) -def test_is_constant() -> None: - model = Model() - index = pd.Index(range(10), name="t") - a = model.add_variables(name="a", coords=[index]) - b = a.sel(t=1) - c = a * 2 - d = a * a - - non_constant = [a, b, c, d] - for nc in non_constant: - assert not is_constant(nc) - - constant_values = [ - 5, - 3.14, - np.int32(7), - np.float64(2.71), - pd.Series([1, 2, 3]), - np.array([4, 5, 6]), - xr.DataArray([k for k in range(10)], coords=[index]), - ] - for cv in constant_values: - assert is_constant(cv) - - -def test_align_legacy(x: Variable, u: Variable, legacy_convention: None) -> None: - """Legacy convention: default inner join, multiindex support.""" +def test_align_v1(v1_convention: None, x: Variable, u: Variable) -> None: # noqa: F811 + """V1: align() defaults to exact join; explicit join= needed for mismatched coords.""" alpha = xr.DataArray([1, 2], [[1, 2]]) beta = xr.DataArray( [1, 2, 3], @@ -728,8 +708,12 @@ def test_align_legacy(x: Variable, u: Variable, legacy_convention: None) -> None ], ) - # inner join (default) - x_obs, alpha_obs = align(x, alpha) + # exact join raises on mismatched coords + with pytest.raises(Exception): + align(x, alpha) + + # explicit inner join + x_obs, alpha_obs = align(x, alpha, join="inner") assert isinstance(x_obs, Variable) assert x_obs.shape == alpha_obs.shape == (1,) assert_varequal(x_obs, x.loc[[1]]) @@ -741,22 +725,47 @@ def test_align_legacy(x: Variable, u: Variable, legacy_convention: None) -> None assert_varequal(x_obs, x) assert_equal(alpha_obs, DataArray([np.nan, 1], [[0, 1]])) - # multiindex - beta_obs, u_obs = align(beta, u) + # multiindex with explicit inner join + beta_obs, u_obs = align(beta, u, join="inner") assert u_obs.shape == beta_obs.shape == (2,) assert isinstance(u_obs, Variable) assert_varequal(u_obs, u.loc[[(1, "b"), (2, "b")]]) assert_equal(beta_obs, beta.loc[[(1, "b"), (2, "b")]]) - # with linear expression + # with linear expression, explicit inner join expr = 20 * x - x_obs, expr_obs, alpha_obs = align(x, expr, alpha) + x_obs, expr_obs, alpha_obs = align(x, expr, alpha, join="inner") assert x_obs.shape == alpha_obs.shape == (1,) assert expr_obs.shape == (1, 1) # _term dim assert isinstance(expr_obs, LinearExpression) assert_linequal(expr_obs, expr.loc[[1]]) +def test_is_constant() -> None: + model = Model() + index = pd.Index(range(10), name="t") + a = model.add_variables(name="a", coords=[index]) + b = a.sel(t=1) + c = a * 2 + d = a * a + + non_constant = [a, b, c, d] + for nc in non_constant: + assert not is_constant(nc) + + constant_values = [ + 5, + 3.14, + np.int32(7), + np.float64(2.71), + pd.Series([1, 2, 3]), + np.array([4, 5, 6]), + xr.DataArray([k for k in range(10)], coords=[index]), + ] + for cv in constant_values: + assert is_constant(cv) + + def test_maybe_group_terms_polars_no_duplicates() -> None: """Fast path: distinct (labels, vars) pairs skip group_by.""" df = pl.DataFrame({"labels": [0, 0], "vars": [1, 2], "coeffs": [3.0, 4.0]}) diff --git a/test/test_constraints.py b/test/test_constraints.py index e9e58aaa..fa01e8f1 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -5,7 +5,6 @@ @author: fabulous """ -from collections.abc import Generator from typing import Any import dask @@ -15,19 +14,9 @@ import pytest import xarray as xr -import linopy from linopy import EQUAL, GREATER_EQUAL, LESS_EQUAL, Model, Variable, available_solvers from linopy.testing import assert_conequal - -@pytest.fixture(autouse=True) -def _use_v1_convention() -> Generator[None, None, None]: - """Use v1 arithmetic convention for all tests in this module.""" - linopy.options["arithmetic_convention"] = "v1" - yield - linopy.options["arithmetic_convention"] = "legacy" - - # Test model functions @@ -179,16 +168,48 @@ def test_constraint_rhs_lower_dim(rhs_factory: Any) -> None: assert c.shape == (10, 10) -def test_constraint_rhs_higher_dim_constant_broadcasts() -> None: +@pytest.mark.parametrize( + "rhs_factory", + [ + pytest.param(lambda m: np.ones((5, 3)), id="numpy"), + pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), + ], +) +def test_constraint_rhs_higher_dim_constant_warns( + legacy_convention: None, rhs_factory: Any, caplog: Any +) -> None: + """Legacy: higher-dim constant RHS warns about dimensions.""" m = Model() x = m.add_variables(coords=[range(5)], name="x") - # DataArray RHS with extra dims broadcasts (creates redundant constraints) + with caplog.at_level("WARNING", logger="linopy.expressions"): + m.add_constraints(x >= rhs_factory(m)) + assert "dimensions" in caplog.text + + +def test_constraint_rhs_higher_dim_constant_broadcasts_v1( + v1_convention: None, +) -> None: + """V1: higher-dim constant RHS broadcasts (creates redundant constraints).""" + m = Model() + x = m.add_variables(coords=[range(5)], name="x") rhs = xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]) c = m.add_constraints(x >= rhs, name="broadcast_con") assert "extra" in c.dims +def test_constraint_rhs_higher_dim_dataarray_reindexes( + legacy_convention: None, +) -> None: + """Legacy: DataArray RHS with extra dims reindexes to expression coords.""" + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + rhs = xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]) + + c = m.add_constraints(x >= rhs) + assert c.shape == (5, 3) + + @pytest.mark.parametrize( "rhs_factory", [ @@ -210,35 +231,6 @@ def test_constraint_rhs_higher_dim_expression(rhs_factory: Any) -> None: assert c.shape == (5, 3) -@pytest.mark.parametrize( - "rhs_factory", - [ - pytest.param(lambda m: np.ones((5, 3)), id="numpy"), - pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), - ], -) -def test_constraint_rhs_higher_dim_constant_warns_legacy( - rhs_factory: Any, caplog: Any, legacy_convention: None -) -> None: - """Legacy convention warns on higher-dim constant RHS.""" - m = Model() - x = m.add_variables(coords=[range(5)], name="x") - with caplog.at_level("WARNING", logger="linopy.expressions"): - m.add_constraints(x >= rhs_factory(m)) - assert "dimensions" in caplog.text - - -def test_constraint_rhs_higher_dim_dataarray_reindexes_legacy( - legacy_convention: None, -) -> None: - """Legacy convention: DataArray RHS with extra dims reindexes to expression coords.""" - m = Model() - x = m.add_variables(coords=[range(5)], name="x") - rhs = xr.DataArray(np.ones((5, 3)), dims=["dim_0", "extra"]) - c = m.add_constraints(x >= rhs) - assert c.shape == (5, 3) - - def test_wrong_constraint_assignment_repeated() -> None: # repeated variable assignment is forbidden m: Model = Model() @@ -368,8 +360,12 @@ def test_sanitize_infinities() -> None: m.add_constraints(y <= -np.inf, name="con_wrong_neg_inf") -class _ConstraintAlignmentFixtures: - """Shared fixtures for constraint coordinate alignment tests.""" +class TestConstraintCoordinateAlignmentLegacy: + """Legacy: outer join with NaN fill for constraint coordinate mismatches.""" + + @pytest.fixture(autouse=True) + def _legacy_only(self, legacy_convention: None) -> None: + pass @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: @@ -389,104 +385,6 @@ def superset(self, request: Any) -> xr.DataArray | pd.Series: np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") ) - -class TestConstraintCoordinateAlignmentV1(_ConstraintAlignmentFixtures): - def test_var_le_subset_raises(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v <= subset - - def test_var_le_subset_join_left(self, v: Variable) -> None: - subset_da = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - con = v.to_linexpr().le(subset_da, join="left") - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert con.rhs.sel(dim_2=3).item() == 30.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) - def test_var_comparison_subset_raises( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - with pytest.raises(ValueError, match="exact"): - if sign == LESS_EQUAL: - v <= subset - elif sign == GREATER_EQUAL: - v >= subset - else: - v == subset - - def test_expr_le_subset_raises(self, v: Variable, subset: xr.DataArray) -> None: - expr = v + 5 - with pytest.raises(ValueError, match="exact"): - expr <= subset - - def test_expr_le_subset_join_left(self, v: Variable) -> None: - subset_da = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - expr = v + 5 - con = expr.le(subset_da, join="left") - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) - assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - def test_subset_comparison_var_raises( - self, v: Variable, subset: xr.DataArray - ) -> None: - with pytest.raises(ValueError, match="exact"): - subset <= v - - def test_superset_comparison_var_raises( - self, v: Variable, superset: xr.DataArray - ) -> None: - with pytest.raises(ValueError, match="exact"): - superset <= v - - def test_constraint_rhs_extra_dims_raises_on_mismatch(self, v: Variable) -> None: - rhs = xr.DataArray( - [[1.0, 2.0]], - dims=["extra", "dim_2"], - coords={"dim_2": [0, 1]}, - ) - # dim_2 coords [0,1] don't match v's [0..19] under exact join - with pytest.raises(ValueError, match="exact"): - v <= rhs - - def test_constraint_rhs_extra_dims_broadcasts_matching(self, v: Variable) -> None: - rhs = xr.DataArray( - np.ones((2, 20)), - dims=["extra", "dim_2"], - coords={"dim_2": range(20)}, - ) - c = v <= rhs - assert "extra" in c.dims - - def test_subset_constraint_solve_integration(self) -> None: - if not available_solvers: - pytest.skip("No solver available") - solver = "highs" if "highs" in available_solvers else available_solvers[0] - m = Model() - coords = pd.RangeIndex(5, name="i") - x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") - subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) - # exact default raises — use explicit join="left" (NaN = no constraint) - m.add_constraints(x.to_linexpr().le(subset_ub, join="left"), name="subset_ub") - m.add_objective(x.sum(), sense="max") - m.solve(solver_name=solver) - sol = m.solution["x"] - assert sol.sel(i=1).item() == pytest.approx(10.0) - assert sol.sel(i=3).item() == pytest.approx(20.0) - assert sol.sel(i=0).item() == pytest.approx(100.0) - assert sol.sel(i=2).item() == pytest.approx(100.0) - assert sol.sel(i=4).item() == pytest.approx(100.0) - - -class TestConstraintCoordinateAlignmentLegacy(_ConstraintAlignmentFixtures): - """Legacy convention: outer join with NaN fill behavior for constraints.""" - - @pytest.fixture(autouse=True) - def _use_legacy(self, legacy_convention: None) -> None: - pass - def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: con = v <= subset assert con.sizes["dim_2"] == v.sizes["dim_2"] @@ -568,3 +466,64 @@ def test_subset_constraint_solve_integration(self) -> None: assert sol.sel(i=0).item() == pytest.approx(100.0) assert sol.sel(i=2).item() == pytest.approx(100.0) assert sol.sel(i=4).item() == pytest.approx(100.0) + + +class TestConstraintCoordinateAlignmentV1: + """V1: exact join raises on coordinate mismatches; explicit join= is the escape hatch.""" + + @pytest.fixture(autouse=True) + def _v1_only(self, v1_convention: None) -> None: + pass + + def test_var_le_subset_raises(self, v: Variable) -> None: + subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + with pytest.raises(ValueError, match="exact"): + v <= subset + + def test_var_le_subset_join_left(self, v: Variable) -> None: + subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + con = v.to_linexpr().le(subset, join="left") + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert con.rhs.sel(dim_2=3).item() == 30.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + def test_superset_comparison_raises(self, v: Variable) -> None: + superset = xr.DataArray( + np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} + ) + with pytest.raises(ValueError, match="exact"): + superset <= v + + def test_constraint_rhs_extra_dims_matching_broadcasts(self, v: Variable) -> None: + rhs = xr.DataArray( + np.ones((2, 20)), dims=["extra", "dim_2"], coords={"dim_2": range(20)} + ) + c = v <= rhs + assert "extra" in c.dims + + def test_constraint_rhs_extra_dims_mismatched_raises(self, v: Variable) -> None: + rhs = xr.DataArray( + [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} + ) + with pytest.raises(ValueError, match="exact"): + v <= rhs + + def test_subset_constraint_solve_integration(self) -> None: + if not available_solvers: + pytest.skip("No solver available") + solver = "highs" if "highs" in available_solvers else available_solvers[0] + m = Model() + coords = pd.RangeIndex(5, name="i") + x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") + subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) + # exact default raises — use explicit join="left" (NaN = no constraint) + m.add_constraints(x.to_linexpr().le(subset_ub, join="left"), name="subset_ub") + m.add_objective(x.sum(), sense="max") + m.solve(solver_name=solver) + sol = m.solution["x"] + assert sol.sel(i=1).item() == pytest.approx(10.0) + assert sol.sel(i=3).item() == pytest.approx(20.0) + assert sol.sel(i=0).item() == pytest.approx(100.0) + assert sol.sel(i=2).item() == pytest.approx(100.0) + assert sol.sel(i=4).item() == pytest.approx(100.0) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 651cdb99..e1016c35 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -7,7 +7,6 @@ from __future__ import annotations -from collections.abc import Generator from typing import Any import numpy as np @@ -17,7 +16,6 @@ import xarray as xr from xarray.testing import assert_equal -import linopy from linopy import LinearExpression, Model, QuadraticExpression, Variable, merge from linopy.constants import HELPER_DIMS, TERM_DIM from linopy.expressions import ScalarLinearExpression @@ -25,14 +23,6 @@ from linopy.variables import ScalarVariable -@pytest.fixture(autouse=True) -def _use_v1_convention() -> Generator[None, None, None]: - """Use v1 arithmetic convention for all tests in this module.""" - linopy.options["arithmetic_convention"] = "v1" - yield - linopy.options["arithmetic_convention"] = "legacy" - - def test_empty_linexpr(m: Model) -> None: LinearExpression(None, m) @@ -398,7 +388,29 @@ def test_linear_expression_substraction( def test_linear_expression_sum( - x: Variable, y: Variable, z: Variable, v: Variable + legacy_convention: None, x: Variable, y: Variable, z: Variable, v: Variable +) -> None: + expr = 10 * x + y + z + res = expr.sum("dim_0") + + assert res.size == expr.size + assert res.nterm == expr.nterm * len(expr.data.dim_0) + + res = expr.sum() + assert res.size == expr.size + assert res.nterm == expr.size + assert res.data.notnull().all().to_array().all() + + assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) + + # test special case otherride coords (legacy outer join allows this) + expr = v.loc[:9] + v.loc[10:] + assert expr.nterm == 2 + assert len(expr.coords["dim_2"]) == 10 + + +def test_linear_expression_sum_v1( + v1_convention: None, x: Variable, y: Variable, z: Variable, v: Variable ) -> None: expr = 10 * x + y + z res = expr.sum("dim_0") @@ -413,7 +425,7 @@ def test_linear_expression_sum( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # test special case override coords using assign_coords + # v1: mismatched coords require explicit assign_coords a = v.loc[:9] b = v.loc[10:].assign_coords(dim_2=a.coords["dim_2"]) expr = a + b @@ -422,7 +434,31 @@ def test_linear_expression_sum( def test_linear_expression_sum_with_const( - x: Variable, y: Variable, z: Variable, v: Variable + legacy_convention: None, x: Variable, y: Variable, z: Variable, v: Variable +) -> None: + expr = 10 * x + y + z + 10 + res = expr.sum("dim_0") + + assert res.size == expr.size + assert res.nterm == expr.nterm * len(expr.data.dim_0) + assert (res.const == 20).all() + + res = expr.sum() + assert res.size == expr.size + assert res.nterm == expr.size + assert res.data.notnull().all().to_array().all() + assert (res.const == 60).item() + + assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) + + # test special case otherride coords (legacy outer join allows this) + expr = v.loc[:9] + v.loc[10:] + assert expr.nterm == 2 + assert len(expr.coords["dim_2"]) == 10 + + +def test_linear_expression_sum_with_const_v1( + v1_convention: None, x: Variable, y: Variable, z: Variable, v: Variable ) -> None: expr = 10 * x + y + z + 10 res = expr.sum("dim_0") @@ -439,7 +475,7 @@ def test_linear_expression_sum_with_const( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # test special case override coords using assign_coords + # v1: mismatched coords require explicit assign_coords a = v.loc[:9] b = v.loc[10:].assign_coords(dim_2=a.coords["dim_2"]) expr = a + b @@ -542,583 +578,57 @@ def test_linear_expression_multiplication_invalid( ) -> None: expr = 10 * x + y + z - with pytest.raises(TypeError): - expr = 10 * x + y + z - expr * expr - - with pytest.raises(TypeError): - expr = 10 * x + y + z - expr / x - - -class _CoordinateAlignmentFixtures: - """Shared fixtures for coordinate alignment test classes.""" - - @pytest.fixture(params=["da", "series"]) - def subset(self, request: Any) -> xr.DataArray | pd.Series: - if request.param == "da": - return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) - - @pytest.fixture(params=["da", "series"]) - def superset(self, request: Any) -> xr.DataArray | pd.Series: - if request.param == "da": - return xr.DataArray( - np.arange(25, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, - ) - return pd.Series( - np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") - ) - - @pytest.fixture - def expected_fill(self) -> np.ndarray: - arr = np.zeros(20) - arr[1] = 10.0 - arr[3] = 30.0 - return arr - - @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) - def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: - vals = np.arange(20, dtype=float) - vals[0] = np.nan - vals[5] = np.nan - vals[19] = np.nan - if request.param == "xarray": - return xr.DataArray(vals, dims=["dim_2"], coords={"dim_2": range(20)}) - return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) - - -class TestCoordinateAlignmentV1(_CoordinateAlignmentFixtures): - @pytest.fixture - def matching(self) -> xr.DataArray: - return xr.DataArray( - np.arange(20, dtype=float), dims=["dim_2"], coords={"dim_2": range(20)} - ) - - class TestSubset: - """ - Under v1, subset operations raise ValueError (exact join). - Use explicit join= to recover desired behavior. - """ - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_subset_raises( - self, - v: Variable, - subset: xr.DataArray, - operand: str, - ) -> None: - target = v if operand == "var" else 1 * v - with pytest.raises(ValueError, match="exact"): - target * subset - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_subset_join_left( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - operand: str, - ) -> None: - target = v if operand == "var" else 1 * v - result = target.mul(subset, join="left") - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_subset_raises( - self, - v: Variable, - subset: xr.DataArray, - operand: str, - ) -> None: - target = v if operand == "var" else v + 5 - with pytest.raises(ValueError, match="exact"): - target + subset - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_subset_join_left( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - operand: str, - ) -> None: - if operand == "var": - result = v.add(subset, join="left") - expected = expected_fill - else: - result = (v + 5).add(subset, join="left") - expected = expected_fill + 5 - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_subset_raises( - self, - v: Variable, - subset: xr.DataArray, - operand: str, - ) -> None: - target = v if operand == "var" else v + 5 - with pytest.raises(ValueError, match="exact"): - target - subset - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_subset_join_left( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - operand: str, - ) -> None: - if operand == "var": - result = v.sub(subset, join="left") - expected = -expected_fill - else: - result = (v + 5).sub(subset, join="left") - expected = 5 - expected_fill - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_subset_raises( - self, v: Variable, subset: xr.DataArray, operand: str - ) -> None: - target = v if operand == "var" else 1 * v - with pytest.raises(ValueError, match="exact"): - target / subset - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_subset_join_left( - self, v: Variable, subset: xr.DataArray, operand: str - ) -> None: - target = v if operand == "var" else 1 * v - result = target.div(subset, join="left") - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) - assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) - - def test_subset_add_var_raises(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - subset + v - - def test_subset_sub_var_raises(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - subset - v - - class TestSuperset: - """Under v1, superset operations raise ValueError (exact join).""" - - def test_add_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v + superset - - def test_add_superset_join_left( - self, v: Variable, superset: xr.DataArray - ) -> None: - result = v.add(superset, join="left") - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - - def test_mul_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v * superset - - def test_mul_superset_join_inner( - self, v: Variable, superset: xr.DataArray - ) -> None: - result = v.mul(superset, join="inner") - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - - def test_div_superset_raises(self, v: Variable) -> None: - superset_nonzero = xr.DataArray( - np.arange(1, 26, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, - ) - with pytest.raises(ValueError, match="exact"): - v / superset_nonzero - - def test_div_superset_join_inner(self, v: Variable) -> None: - superset_nonzero = xr.DataArray( - np.arange(1, 26, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, - ) - result = v.div(superset_nonzero, join="inner") - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - - class TestDisjoint: - """Under v1, disjoint operations raise ValueError (exact join).""" - - def test_add_disjoint_raises(self, v: Variable) -> None: - disjoint = xr.DataArray( - [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - with pytest.raises(ValueError, match="exact"): - v + disjoint - - def test_add_disjoint_join_outer(self, v: Variable) -> None: - disjoint = xr.DataArray( - [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v.add(disjoint, join="outer") - assert result.sizes["dim_2"] == 22 # union of [0..19] and [50, 60] - - def test_mul_disjoint_raises(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - with pytest.raises(ValueError, match="exact"): - v * disjoint - - def test_mul_disjoint_join_left(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - result = v.mul(disjoint, join="left") - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) - - def test_div_disjoint_raises(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - with pytest.raises(ValueError, match="exact"): - v / disjoint - - class TestCommutativity: - """Commutativity tests with matching coordinates under v1.""" - - def test_add_commutativity_matching_coords( - self, v: Variable, matching: xr.DataArray - ) -> None: - assert_linequal(v + matching, matching + v) - - def test_mul_commutativity_matching_coords( - self, v: Variable, matching: xr.DataArray - ) -> None: - assert_linequal(v * matching, matching * v) - - def test_subset_raises_both_sides( - self, v: Variable, subset: xr.DataArray - ) -> None: - """Subset operations raise regardless of operand order.""" - with pytest.raises(ValueError, match="exact"): - v * subset - with pytest.raises(ValueError, match="exact"): - subset * v - - def test_commutativity_with_join( - self, v: Variable, subset: xr.DataArray - ) -> None: - """Commutativity holds with explicit join.""" - assert_linequal( - v.add(subset, join="inner"), - subset + v.reindex({"dim_2": [1, 3]}), - ) - - class TestQuadratic: - """Under v1, subset operations on quadratic expressions raise.""" - - def test_quadexpr_add_subset_raises( - self, v: Variable, subset: xr.DataArray - ) -> None: - qexpr = v * v - with pytest.raises(ValueError, match="exact"): - qexpr + subset - - def test_quadexpr_add_subset_join_left( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - ) -> None: - qexpr = v * v - result = qexpr.add(subset, join="left") - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, expected_fill) - - def test_quadexpr_sub_subset_raises( - self, v: Variable, subset: xr.DataArray - ) -> None: - qexpr = v * v - with pytest.raises(ValueError, match="exact"): - qexpr - subset - - def test_quadexpr_sub_subset_join_left( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - ) -> None: - qexpr = v * v - result = qexpr.sub(subset, join="left") - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.const.values).any() - np.testing.assert_array_equal(result.const.values, -expected_fill) - - def test_quadexpr_mul_subset_raises( - self, v: Variable, subset: xr.DataArray - ) -> None: - qexpr = v * v - with pytest.raises(ValueError, match="exact"): - qexpr * subset - - def test_quadexpr_mul_subset_join_left( - self, - v: Variable, - subset: xr.DataArray, - expected_fill: np.ndarray, - ) -> None: - qexpr = v * v - result = qexpr.mul(subset, join="left") - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) - - def test_quadexpr_add_matching( - self, v: Variable, matching: xr.DataArray - ) -> None: - qexpr = v * v - assert_quadequal(matching + qexpr, qexpr + matching) - - class TestMissingValues: - """ - Same shape as variable but with NaN entries in the constant. - - Under v1 convention, NaN values propagate through arithmetic - (no implicit fillna). - """ - - NAN_POSITIONS = [0, 5, 19] - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_nan_propagates( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - operand: str, - ) -> None: - target = v if operand == "var" else v + 5 - result = target + nan_constant - assert result.sizes["dim_2"] == 20 - for i in self.NAN_POSITIONS: - assert np.isnan(result.const.values[i]) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_nan_propagates( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - operand: str, - ) -> None: - target = v if operand == "var" else v + 5 - result = target - nan_constant - assert result.sizes["dim_2"] == 20 - for i in self.NAN_POSITIONS: - assert np.isnan(result.const.values[i]) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_nan_propagates( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - operand: str, - ) -> None: - target = v if operand == "var" else 1 * v - result = target * nan_constant - assert result.sizes["dim_2"] == 20 - for i in self.NAN_POSITIONS: - assert np.isnan(result.coeffs.squeeze().values[i]) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_nan_propagates( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - operand: str, - ) -> None: - target = v if operand == "var" else 1 * v - result = target / nan_constant - assert result.sizes["dim_2"] == 20 - for i in self.NAN_POSITIONS: - assert np.isnan(result.coeffs.squeeze().values[i]) - - def test_add_commutativity( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - ) -> None: - result_a = v + nan_constant - result_b = nan_constant + v - np.testing.assert_array_equal(result_a.const.values, result_b.const.values) - np.testing.assert_array_equal( - result_a.coeffs.values, result_b.coeffs.values - ) - - def test_mul_commutativity( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - ) -> None: - result_a = v * nan_constant - result_b = nan_constant * v - np.testing.assert_array_equal( - result_a.coeffs.values, result_b.coeffs.values - ) - - def test_quadexpr_add_nan_propagates( - self, - v: Variable, - nan_constant: xr.DataArray | pd.Series, - ) -> None: - qexpr = v * v - result = qexpr + nan_constant - assert isinstance(result, QuadraticExpression) - assert result.sizes["dim_2"] == 20 - for i in self.NAN_POSITIONS: - assert np.isnan(result.const.values[i]) - - class TestExpressionWithNaN: - """ - Under v1, NaN in expression's own const/coeffs propagates through - arithmetic (no implicit fillna). - """ - - def test_shifted_expr_add_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr + 5 - # Position 0 has NaN from shift, NaN + 5 = NaN under v1 - assert np.isnan(result.const.values[0]) - - def test_shifted_expr_mul_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr * 2 - # Position 0 has NaN coeffs from shift, NaN * 2 = NaN under v1 - assert np.isnan(result.coeffs.squeeze().values[0]) - - def test_shifted_expr_add_array(self, v: Variable) -> None: - arr = np.arange(v.sizes["dim_2"], dtype=float) - expr = (1 * v).shift(dim_2=1) - result = expr + arr - # Position 0 has NaN const from shift, NaN + 0 = NaN under v1 - assert np.isnan(result.const.values[0]) - - def test_shifted_expr_mul_array(self, v: Variable) -> None: - arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 - expr = (1 * v).shift(dim_2=1) - result = expr * arr - # Position 0 has NaN coeffs from shift, NaN * 1 = NaN under v1 - assert np.isnan(result.coeffs.squeeze().values[0]) - - def test_shifted_expr_div_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr / 2 - assert np.isnan(result.coeffs.squeeze().values[0]) - - def test_shifted_expr_sub_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr - 3 - assert np.isnan(result.const.values[0]) - - def test_shifted_expr_div_array(self, v: Variable) -> None: - arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 - expr = (1 * v).shift(dim_2=1) - result = expr / arr - assert np.isnan(result.coeffs.squeeze().values[0]) - - def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: - """to_linexpr always fills NaN coefficients with 0 (not convention-aware).""" - nan_coeff = np.ones(v.sizes["dim_2"]) - nan_coeff[0] = np.nan - result = v.to_linexpr(nan_coeff) - assert result.coeffs.squeeze().values[0] == 0.0 - - class TestMultiDim: - """Under v1, multi-dim subset operations raise.""" - - def test_multidim_subset_mul_raises(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - with pytest.raises(ValueError, match="exact"): - w * subset_2d - - def test_multidim_subset_mul_join_left(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - result = w.mul(subset_2d, join="left") - assert result.sizes["a"] == 4 - assert result.sizes["b"] == 5 - assert not np.isnan(result.coeffs.values).any() - assert result.coeffs.squeeze().sel(a=1, b=0).item() == pytest.approx(2.0) - assert result.coeffs.squeeze().sel(a=3, b=4).item() == pytest.approx(5.0) - assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) - assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) - - def test_multidim_subset_add_raises(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - with pytest.raises(ValueError, match="exact"): - w + subset_2d - - class TestXarrayCompat: - def test_da_eq_da_still_works(self) -> None: - da1 = xr.DataArray([1, 2, 3]) - da2 = xr.DataArray([1, 2, 3]) - result = da1 == da2 - assert result.values.all() - - def test_da_eq_scalar_still_works(self) -> None: - da = xr.DataArray([1, 2, 3]) - result = da == 2 - np.testing.assert_array_equal(result.values, [False, True, False]) + with pytest.raises(TypeError): + expr = 10 * x + y + z + expr * expr - def test_da_truediv_var_raises(self, v: Variable) -> None: - da = xr.DataArray(np.ones(20), dims=["dim_2"], coords={"dim_2": range(20)}) - with pytest.raises(TypeError): - da / v # type: ignore[operator] + with pytest.raises(TypeError): + expr = 10 * x + y + z + expr / x -class TestCoordinateAlignmentLegacy(_CoordinateAlignmentFixtures): - """Legacy convention: outer join with NaN fill / zero fill behavior.""" +class TestCoordinateAlignmentLegacy: + """Legacy: outer join with NaN fill / zero fill for coordinate mismatches.""" @pytest.fixture(autouse=True) - def _use_legacy(self, legacy_convention: None) -> None: + def _legacy_only(self, legacy_convention: None) -> None: pass + @pytest.fixture(params=["da", "series"]) + def subset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) + + @pytest.fixture(params=["da", "series"]) + def superset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray( + np.arange(25, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + return pd.Series( + np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") + ) + + @pytest.fixture + def expected_fill(self) -> np.ndarray: + arr = np.zeros(20) + arr[1] = 10.0 + arr[3] = 30.0 + return arr + + @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) + def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + vals[5] = np.nan + vals[19] = np.nan + if request.param == "xarray": + return xr.DataArray(vals, dims=["dim_2"], coords={"dim_2": range(20)}) + return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) + class TestSubset: @pytest.mark.parametrize("operand", ["var", "expr"]) def test_mul_subset_fills_zeros( @@ -1353,6 +863,8 @@ def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: class TestMissingValues: """ + Same shape as variable but with NaN entries in the constant. + NaN values are filled with operation-specific neutral elements: - Addition/subtraction: NaN -> 0 (additive identity) - Multiplication: NaN -> 0 (zeroes out the variable) @@ -1373,6 +885,7 @@ def test_add_nan_filled( result = target + nan_constant assert result.sizes["dim_2"] == 20 assert not np.isnan(result.const.values).any() + # At NaN positions, const should be unchanged (added 0) for i in self.NAN_POSITIONS: assert result.const.values[i] == base_const @@ -1388,6 +901,7 @@ def test_sub_nan_filled( result = target - nan_constant assert result.sizes["dim_2"] == 20 assert not np.isnan(result.const.values).any() + # At NaN positions, const should be unchanged (subtracted 0) for i in self.NAN_POSITIONS: assert result.const.values[i] == base_const @@ -1402,6 +916,7 @@ def test_mul_nan_filled( result = target * nan_constant assert result.sizes["dim_2"] == 20 assert not np.isnan(result.coeffs.squeeze().values).any() + # At NaN positions, coeffs should be 0 (variable zeroed out) for i in self.NAN_POSITIONS: assert result.coeffs.squeeze().values[i] == 0.0 @@ -1416,6 +931,7 @@ def test_div_nan_filled( result = target / nan_constant assert result.sizes["dim_2"] == 20 assert not np.isnan(result.coeffs.squeeze().values).any() + # At NaN positions, coeffs should be unchanged (divided by 1) original_coeffs = (1 * v).coeffs.squeeze().values for i in self.NAN_POSITIONS: assert result.coeffs.squeeze().values[i] == original_coeffs[i] @@ -2417,10 +1933,18 @@ def c(self, m2: Model) -> Variable: return m2.variables["c"] class TestAddition: - def test_add_join_none_raises_on_mismatch( - self, a: Variable, b: Variable + def test_add_join_none_preserves_default( + self, legacy_convention: None, a: Variable, b: Variable + ) -> None: + """Legacy: join=None uses outer join for mismatched coords.""" + result_default = a.to_linexpr() + b.to_linexpr() + result_none = a.to_linexpr().add(b.to_linexpr(), join=None) + assert_linequal(result_default, result_none) + + def test_add_join_none_raises_on_mismatch_v1( + self, v1_convention: None, a: Variable, b: Variable ) -> None: - # a has i=[0,1,2], b has i=[1,2,3] — exact default raises + """V1: join=None uses exact join, raises on mismatched coords.""" with pytest.raises(ValueError, match="Coordinate mismatch"): a.to_linexpr() + b.to_linexpr() with pytest.raises(ValueError, match="Coordinate mismatch"): @@ -2462,7 +1986,7 @@ def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: expr_a = 1 * a + 5 const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) - for join in ("override", "outer", "inner"): + for join in ["override", "outer", "inner"]: result = expr_a.add(const, join=join) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.const.values, [6, 7, 8]) @@ -2680,8 +2204,18 @@ def test_div_constant_outer_fill_values(self, a: Variable) -> None: class TestQuadratic: def test_quadratic_add_constant_join_inner( - self, a: Variable, c: Variable + self, legacy_convention: None, a: Variable, b: Variable + ) -> None: + """Legacy: a*b with mismatched coords uses outer join.""" + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.add(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_quadratic_add_constant_join_inner_v1( + self, v1_convention: None, a: Variable, c: Variable ) -> None: + """V1: use a*c (same coords) to create quad, then join inner.""" quad = a.to_linexpr() * c.to_linexpr() const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.add(const, join="inner") @@ -2694,55 +2228,232 @@ def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: assert list(result.data.indexes["i"]) == [0, 1] def test_quadratic_mul_constant_join_inner( - self, a: Variable, c: Variable + self, legacy_convention: None, a: Variable, b: Variable + ) -> None: + """Legacy: a*b with mismatched coords uses outer join.""" + quad = a.to_linexpr() * b.to_linexpr() + const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) + result = quad.mul(const, join="inner") + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_quadratic_mul_constant_join_inner_v1( + self, v1_convention: None, a: Variable, c: Variable ) -> None: + """V1: use a*c (same coords) to create quad, then join inner.""" quad = a.to_linexpr() * c.to_linexpr() const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.mul(const, join="inner") assert list(result.data.indexes["i"]) == [1, 2] -class TestJoinParameterLegacy: - """Legacy convention: default outer join for mismatched coords.""" +class TestCoordinateAlignmentV1: + """V1: exact join raises on mismatched coords; explicit join= is the escape hatch.""" @pytest.fixture(autouse=True) - def _use_legacy(self, legacy_convention: None) -> None: + def _v1_only(self, v1_convention: None) -> None: pass - @pytest.fixture - def m2(self) -> Model: - m = Model() - m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="a") - m.add_variables(coords=[pd.Index([1, 2, 3], name="i")], name="b") - return m + @pytest.fixture(params=["da", "series"]) + def subset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) - @pytest.fixture - def a(self, m2: Model) -> Variable: - return m2.variables["a"] + @pytest.fixture(params=["da", "series"]) + def superset(self, request: Any) -> xr.DataArray | pd.Series: + if request.param == "da": + return xr.DataArray( + np.arange(25, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + return pd.Series( + np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") + ) - @pytest.fixture - def b(self, m2: Model) -> Variable: - return m2.variables["b"] + class TestSubset: + """Under v1, subset operations raise ValueError (exact join).""" + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + with pytest.raises(ValueError, match="exact"): + target * subset + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else v + 5 + with pytest.raises(ValueError, match="exact"): + target + subset + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else v + 5 + with pytest.raises(ValueError, match="exact"): + target - subset + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + with pytest.raises(ValueError, match="exact"): + target / subset + + def test_subset_add_var_raises(self, v: Variable, subset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + subset + v - def test_add_join_none_preserves_default(self, a: Variable, b: Variable) -> None: - result_default = a.to_linexpr() + b.to_linexpr() - result_none = a.to_linexpr().add(b.to_linexpr(), join=None) - assert_linequal(result_default, result_none) - - def test_quadratic_add_constant_join_inner(self, a: Variable, b: Variable) -> None: - quad = a.to_linexpr() * b.to_linexpr() - const = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 2, 3]}) - result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] - - def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: - quad = a.to_linexpr() * a.to_linexpr() - const = xr.DataArray([10, 20], dims=["i"], coords={"i": [0, 1]}) - result = quad.add(const, join="inner") - assert list(result.data.indexes["i"]) == [0, 1] - - def test_quadratic_mul_constant_join_inner(self, a: Variable, b: Variable) -> None: - quad = a.to_linexpr() * b.to_linexpr() - const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) - result = quad.mul(const, join="inner") - assert list(result.data.indexes["i"]) == [1, 2, 3] + def test_subset_sub_var_raises(self, v: Variable, subset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + subset - v + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_join_left( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + """Explicit join='left' fills zeros for missing coords.""" + target = v if operand == "var" else 1 * v + result = target.mul(subset, join="left") + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + + class TestSuperset: + """Under v1, superset operations raise ValueError (exact join).""" + + def test_add_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + v + superset + + def test_mul_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + v * superset + + class TestDisjoint: + """Under v1, disjoint coord operations raise ValueError.""" + + def test_add_disjoint_raises(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + with pytest.raises(ValueError, match="exact"): + v + disjoint + + def test_mul_disjoint_raises(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + with pytest.raises(ValueError, match="exact"): + v * disjoint + + class TestCommutativity: + """Under v1, only matching coords allow commutativity.""" + + def test_add_commutativity_matching_coords(self, v: Variable) -> None: + matching = xr.DataArray( + np.arange(20, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(20)}, + ) + assert_linequal(v + matching, matching + v) + + def test_subset_raises_both_sides( + self, v: Variable, subset: xr.DataArray + ) -> None: + with pytest.raises(ValueError, match="exact"): + v * subset + with pytest.raises(ValueError, match="exact"): + subset * v + + class TestQuadratic: + """Under v1, subset operations on quadratic expressions raise.""" + + def test_quadexpr_add_subset_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + qexpr + subset + + def test_quadexpr_mul_subset_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + qexpr * subset + + class TestMissingValues: + """Under v1, NaN values propagate (no implicit fillna).""" + + NAN_POSITIONS = [0, 5, 19] + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_nan_propagates(self, v: Variable, operand: str) -> None: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + vals[5] = np.nan + vals[19] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + target = v if operand == "var" else v + 5 + result = target + nan_constant + for i in self.NAN_POSITIONS: + assert np.isnan(result.const.values[i]) + + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_nan_propagates(self, v: Variable, operand: str) -> None: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + target = v if operand == "var" else 1 * v + result = target * nan_constant + assert np.isnan(result.coeffs.squeeze().values[0]) + + class TestExpressionWithNaN: + """Under v1, NaN in expression's own const/coeffs propagates.""" + + def test_shifted_expr_add_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr + 5 + assert np.isnan(result.const.values[0]) + + def test_shifted_expr_mul_scalar(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr * 2 + assert np.isnan(result.coeffs.squeeze().values[0]) + + class TestMultiDim: + """Under v1, multi-dim subset operations raise.""" + + def test_multidim_subset_mul_raises(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + with pytest.raises(ValueError, match="exact"): + w * subset_2d + + def test_multidim_subset_add_raises(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + with pytest.raises(ValueError, match="exact"): + w + subset_2d diff --git a/test/test_piecewise_constraints.py b/test/test_piecewise_constraints.py index ab8e1f09..b947965d 100644 --- a/test/test_piecewise_constraints.py +++ b/test/test_piecewise_constraints.py @@ -51,6 +51,11 @@ ] +@pytest.fixture(autouse=True) +def _legacy_only(legacy_convention: None) -> None: + """Piecewise implementation not yet adapted for v1 convention.""" + + # =========================================================================== # slopes_to_points # =========================================================================== diff --git a/test/test_sos_reformulation.py b/test/test_sos_reformulation.py index 24ba62b3..252ecc41 100644 --- a/test/test_sos_reformulation.py +++ b/test/test_sos_reformulation.py @@ -19,6 +19,11 @@ ) +@pytest.fixture(autouse=True) +def _legacy_only(legacy_convention: None) -> None: + """SOS reformulation not yet adapted for v1 convention.""" + + class TestValidateBounds: """Tests for bound validation in compute_big_m_values.""" diff --git a/test/test_typing.py b/test/test_typing.py index eef3e1ba..99a27033 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -3,30 +3,7 @@ import linopy -def test_operations_with_data_arrays_are_typed_correctly(convention: str) -> None: - m = linopy.Model() - - s: xr.DataArray = xr.DataArray(5.0) - - v: linopy.Variable = m.add_variables(lower=0.0, name="v") - e: linopy.LinearExpression = v * 1.0 - q = v * v - - _ = s * v - _ = v * s - _ = v + s - - _ = s * e - _ = e * s - _ = e + s - - _ = s * q - _ = q * s - _ = q + s - - -def test_constant_with_extra_dims_broadcasts(convention: str) -> None: - """Broadcasting with extra dims works under both conventions.""" +def test_operations_with_data_arrays_are_typed_correctly() -> None: m = linopy.Model() a: xr.DataArray = xr.DataArray([1, 2, 3]) @@ -35,13 +12,14 @@ def test_constant_with_extra_dims_broadcasts(convention: str) -> None: e: linopy.LinearExpression = v * 1.0 q = v * v - # Constants can introduce new dimensions (broadcasting) - result_v = a * v - assert "dim_0" in result_v.dims + _ = a * v + _ = v * a + _ = v + a - result_e = a * e - assert "dim_0" in result_e.dims + _ = a * e + _ = e * a + _ = e + a - # QuadraticExpression also allows constant broadcasting - result_q = a * q - assert isinstance(result_q, linopy.expressions.QuadraticExpression) + _ = a * q + _ = q * a + _ = q + a From c570e1b12f28bb754e091c127cb3063a28b1e5bc Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Mar 2026 09:41:44 +0100 Subject: [PATCH 52/66] Fix mypy, strengthen tests, add convention + reindex test coverage (#610) * Restore master tests, add autouse convention fixture - Restore test files to match master exactly (legacy behavior) - Delete legacy duplicate test files - Add autouse parametrized convention fixture: every test runs under both 'legacy' and 'v1' conventions by default - Add legacy_convention/v1_convention opt-out fixtures for convention-specific tests Co-Authored-By: Claude Opus 4.6 * Mark legacy-only tests, add v1 counterparts for differing behavior Tests that differ between conventions are split: - Legacy-only: marked with legacy_convention fixture (skipped under v1) - V1-only: marked with v1_convention fixture (skipped under legacy) - All other tests: run under both conventions via autouse fixture Files changed: - test_common.py: split test_align into legacy/v1 versions - test_constraints.py: mark TestConstraintCoordinateAlignment as legacy-only, add TestConstraintCoordinateAlignmentV1, split higher-dim RHS tests - test_linear_expression.py: mark TestCoordinateAlignment as legacy-only, add TestCoordinateAlignmentV1, split sum/join tests - test_piecewise_constraints.py: mark legacy-only (implementation not yet v1-compatible) - test_sos_reformulation.py: mark legacy-only (implementation not yet v1-compatible) Co-Authored-By: Claude Opus 4.6 * Fix mypy error, strengthen tests, and add convention test coverage - Fix mypy: use typed list[JoinOptions] for loop variable in test_linear_expression.py - Strengthen assert_linequal in test_algebraic_properties.py to verify coefficients and vars - Fix Variable.reindex_like() to handle DataArray inputs correctly - Add test_convention.py covering config validation, deprecation warnings, scalar fast path, NaN edge cases, convention switching, error messages, and Variable.reindex/reindex_like Co-Authored-By: Claude Opus 4.6 * Add reindex/reindex_like tests for Expression and Constraint, fix DataArray bug - Fix LinearExpression.reindex_like() to handle DataArray inputs (same bug as Variable) - Add TestExpressionReindex: subset, superset, fill_value, type preservation, reindex_like with Expression/Variable/DataArray/Dataset - Add TestConstraintReindex: subset, superset, reindex_like with Dataset/DataArray Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- linopy/expressions.py | 12 +- linopy/variables.py | 12 +- test/test_algebraic_properties.py | 23 +- test/test_convention.py | 468 ++++++++++++++++++++++++++++++ test/test_linear_expression.py | 4 +- 5 files changed, 506 insertions(+), 13 deletions(-) create mode 100644 test/test_convention.py diff --git a/linopy/expressions.py b/linopy/expressions.py index 954c562f..9e893d45 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -1635,12 +1635,14 @@ def reindex_like( Variable labels and coefficients always use sentinel values. """ fv = {**self._fill_value, "const": fill_value} + if isinstance(other, DataArray): + ref = other.to_dataset(name="__tmp__") + elif isinstance(other, Dataset): + ref = other + else: + ref = other.data return self.__class__( - self.data.reindex_like( - other if isinstance(other, Dataset) else other.data, - fill_value=fv, - **kwargs, - ), + self.data.reindex_like(ref, fill_value=fv, **kwargs), self.model, ) diff --git a/linopy/variables.py b/linopy/variables.py index 1e2ea6ae..fec08e50 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -1278,12 +1278,14 @@ def reindex_like( **kwargs: Any, ) -> Variable: """Reindex like another object, filling with sentinel values.""" + if isinstance(other, DataArray): + ref = other.to_dataset(name="__tmp__") + elif isinstance(other, Dataset): + ref = other + else: + ref = other.data return self.__class__( - self.data.reindex_like( - other if isinstance(other, Dataset) else other.data, - fill_value=self._fill_value, - **kwargs, - ), + self.data.reindex_like(ref, fill_value=self._fill_value, **kwargs), self.model, self.name, ) diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py index 04103b61..5d755baa 100644 --- a/test/test_algebraic_properties.py +++ b/test/test_algebraic_properties.py @@ -106,7 +106,11 @@ def c(tech: pd.Index) -> xr.DataArray: def assert_linequal(a: LinearExpression, b: LinearExpression) -> None: - """Assert two linear expressions are algebraically equivalent.""" + """ + Assert two linear expressions are algebraically equivalent. + + Checks dimensions, coordinates, coefficients, variable references, and constants. + """ assert set(a.dims) == set(b.dims), f"dims differ: {a.dims} vs {b.dims}" for dim in a.dims: if isinstance(dim, str) and dim.startswith("_"): @@ -114,7 +118,22 @@ def assert_linequal(a: LinearExpression, b: LinearExpression) -> None: np.testing.assert_array_equal( sorted(a.coords[dim].values), sorted(b.coords[dim].values) ) - assert a.const.sum().item() == pytest.approx(b.const.sum().item()) + # Simplify both to canonical form for coefficient/variable comparison + a_s = a.simplify() + b_s = b.simplify() + assert a_s.nterm == b_s.nterm, f"nterm differs: {a_s.nterm} vs {b_s.nterm}" + np.testing.assert_array_almost_equal( + np.sort(a_s.coeffs.values, axis=None), + np.sort(b_s.coeffs.values, axis=None), + err_msg="coefficients differ", + ) + np.testing.assert_array_equal( + np.sort(a_s.vars.values, axis=None), + np.sort(b_s.vars.values, axis=None), + ) + np.testing.assert_array_almost_equal( + a.const.values, b.const.values, err_msg="constants differ" + ) # ============================================================ diff --git a/test/test_convention.py b/test/test_convention.py new file mode 100644 index 00000000..2e756cfc --- /dev/null +++ b/test/test_convention.py @@ -0,0 +1,468 @@ +""" +Tests for the arithmetic convention system. + +Covers: +- Config validation (valid/invalid convention values, default) +- Deprecation warnings under legacy convention +- Scalar fast path consistency +- NaN edge cases (inf, -inf) +- Convention switching mid-session +- Variable.reindex() and Variable.reindex_like() +""" + +from __future__ import annotations + +import warnings + +import numpy as np +import pandas as pd +import pytest +import xarray as xr + +import linopy +from linopy import LinearExpression, Model, Variable +from linopy.config import ( + LinopyDeprecationWarning, + OptionSettings, + options, +) +from linopy.constraints import Constraint +from linopy.testing import assert_linequal + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture +def m() -> Model: + model = Model() + model.add_variables(coords=[pd.RangeIndex(5, name="i")], name="a") + model.add_variables(coords=[pd.RangeIndex(5, name="i")], name="b") + return model + + +@pytest.fixture +def a(m: Model) -> Variable: + return m.variables["a"] + + +@pytest.fixture +def b(m: Model) -> Variable: + return m.variables["b"] + + +# --------------------------------------------------------------------------- +# 3. Config validation +# --------------------------------------------------------------------------- + + +class TestConfigValidation: + def test_default_convention_is_legacy(self) -> None: + """Default arithmetic_convention should be 'legacy'.""" + fresh = OptionSettings( + display_max_rows=14, + display_max_terms=6, + arithmetic_convention="legacy", + ) + assert fresh["arithmetic_convention"] == "legacy" + + def test_set_valid_convention_v1(self) -> None: + old = options["arithmetic_convention"] + try: + options["arithmetic_convention"] = "v1" + assert options["arithmetic_convention"] == "v1" + finally: + options["arithmetic_convention"] = old + + def test_set_valid_convention_legacy(self) -> None: + old = options["arithmetic_convention"] + try: + options["arithmetic_convention"] = "legacy" + assert options["arithmetic_convention"] == "legacy" + finally: + options["arithmetic_convention"] = old + + def test_set_invalid_convention_raises(self) -> None: + with pytest.raises(ValueError, match="Invalid arithmetic_convention"): + options["arithmetic_convention"] = "invalid" + + def test_set_invalid_convention_exact_raises(self) -> None: + """'exact' is a join mode, not a valid convention name.""" + with pytest.raises(ValueError, match="Invalid arithmetic_convention"): + options["arithmetic_convention"] = "exact" + + def test_invalid_key_raises(self) -> None: + with pytest.raises(KeyError, match="not a valid setting"): + options["nonexistent_key"] = 42 + + def test_get_invalid_key_raises(self) -> None: + with pytest.raises(KeyError, match="not a valid setting"): + _ = options["nonexistent_key"] + + +# --------------------------------------------------------------------------- +# 5. Deprecation warnings +# --------------------------------------------------------------------------- + + +class TestDeprecationWarnings: + @pytest.fixture(autouse=True) + def _use_legacy(self, legacy_convention: None) -> None: + pass + + def test_add_constant_emits_deprecation_warning(self, a: Variable) -> None: + const = xr.DataArray([1, 2, 3, 4, 5], dims=["i"], coords={"i": range(5)}) + with pytest.warns(LinopyDeprecationWarning, match="legacy"): + _ = (1 * a) + const + + def test_mul_constant_emits_deprecation_warning(self, a: Variable) -> None: + const = xr.DataArray([1, 2, 3, 4, 5], dims=["i"], coords={"i": range(5)}) + with pytest.warns(LinopyDeprecationWarning, match="legacy"): + _ = (1 * a) * const + + def test_align_emits_deprecation_warning(self, a: Variable) -> None: + alpha = xr.DataArray([1, 2], [[1, 2]]) + with pytest.warns(LinopyDeprecationWarning, match="legacy"): + linopy.align(a, alpha) + + +# --------------------------------------------------------------------------- +# 6. Scalar fast path +# --------------------------------------------------------------------------- + + +class TestScalarFastPath: + """Scalar operations should produce same results as array operations.""" + + @pytest.fixture(autouse=True) + def _use_v1(self, v1_convention: None) -> None: + pass + + def test_add_scalar_matches_array(self, a: Variable) -> None: + scalar_result = (1 * a) + 5 + array_const = xr.DataArray(np.full(5, 5.0), dims=["i"], coords={"i": range(5)}) + array_result = (1 * a) + array_const + assert_linequal(scalar_result, array_result) + + def test_sub_scalar_matches_array(self, a: Variable) -> None: + scalar_result = (1 * a) - 3 + array_const = xr.DataArray(np.full(5, 3.0), dims=["i"], coords={"i": range(5)}) + array_result = (1 * a) - array_const + assert_linequal(scalar_result, array_result) + + def test_mul_scalar_matches_array(self, a: Variable) -> None: + scalar_result = (1 * a) * 2 + array_const = xr.DataArray(np.full(5, 2.0), dims=["i"], coords={"i": range(5)}) + array_result = (1 * a) * array_const + assert_linequal(scalar_result, array_result) + + def test_div_scalar_matches_array(self, a: Variable) -> None: + scalar_result = (1 * a) / 4 + array_const = xr.DataArray(np.full(5, 4.0), dims=["i"], coords={"i": range(5)}) + array_result = (1 * a) / array_const + assert_linequal(scalar_result, array_result) + + +# --------------------------------------------------------------------------- +# 7. NaN edge cases +# --------------------------------------------------------------------------- + + +class TestNaNEdgeCases: + @pytest.fixture(autouse=True) + def _use_v1(self, v1_convention: None) -> None: + pass + + def test_inf_add_propagates(self, a: Variable) -> None: + """Adding inf should propagate to const.""" + const = xr.DataArray( + [1.0, np.inf, 3.0, 4.0, 5.0], dims=["i"], coords={"i": range(5)} + ) + result = (1 * a) + const + assert np.isinf(result.const.values[1]) + + def test_neg_inf_add_propagates(self, a: Variable) -> None: + """Adding -inf should propagate to const.""" + const = xr.DataArray( + [1.0, -np.inf, 3.0, 4.0, 5.0], dims=["i"], coords={"i": range(5)} + ) + result = (1 * a) + const + assert np.isinf(result.const.values[1]) + assert result.const.values[1] < 0 + + def test_inf_mul_propagates(self, a: Variable) -> None: + """Multiplying by inf should propagate to coeffs.""" + const = xr.DataArray( + [1.0, np.inf, 3.0, 4.0, 5.0], dims=["i"], coords={"i": range(5)} + ) + result = (1 * a) * const + assert np.isinf(result.coeffs.squeeze().values[1]) + + def test_nan_mul_propagates_v1(self, a: Variable) -> None: + """Under v1, NaN in mul should propagate (no fillna).""" + const = xr.DataArray( + [1.0, np.nan, 3.0, 4.0, 5.0], dims=["i"], coords={"i": range(5)} + ) + result = (1 * a) * const + assert np.isnan(result.coeffs.squeeze().values[1]) + + +# --------------------------------------------------------------------------- +# 8. Convention switching mid-session +# --------------------------------------------------------------------------- + + +class TestConventionSwitching: + def test_switch_convention_mid_session(self, a: Variable, b: Variable) -> None: + """Switching convention mid-session should change behavior immediately.""" + const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) + + # Under legacy: mismatched-size const should work + linopy.options["arithmetic_convention"] = "legacy" + with warnings.catch_warnings(): + warnings.simplefilter("ignore", LinopyDeprecationWarning) + # This should succeed under legacy (left join / override) + _ = (1 * a) + const + + # Switch to v1: same operation with mismatched coords should raise + linopy.options["arithmetic_convention"] = "v1" + with pytest.raises(ValueError, match="exact"): + _ = (1 * a) + const + + def test_reset_restores_defaults(self) -> None: + """OptionSettings.reset() should restore factory defaults.""" + options["arithmetic_convention"] = "v1" + assert options["arithmetic_convention"] == "v1" + options.reset() + assert options["arithmetic_convention"] == "legacy" # factory default + + +# --------------------------------------------------------------------------- +# 9. TestJoinParameter deduplication (shared base class) +# --------------------------------------------------------------------------- +# The existing TestJoinParameter class already tests both conventions via +# legacy_convention/v1_convention fixtures. The deduplication is addressed by +# verifying that explicit join= works identically under both conventions. + + +class TestJoinWorksUnderBothConventions: + """Explicit join= should produce same results regardless of convention.""" + + @pytest.fixture + def m2(self) -> Model: + m = Model() + m.add_variables(coords=[pd.Index([0, 1, 2], name="i")], name="a") + m.add_variables(coords=[pd.Index([1, 2, 3], name="i")], name="b") + return m + + def test_add_inner_same_under_both(self, m2: Model) -> None: + a = m2.variables["a"] + b = m2.variables["b"] + + linopy.options["arithmetic_convention"] = "legacy" + with warnings.catch_warnings(): + warnings.simplefilter("ignore", LinopyDeprecationWarning) + result_legacy = a.to_linexpr().add(b.to_linexpr(), join="inner") + + linopy.options["arithmetic_convention"] = "v1" + result_v1 = a.to_linexpr().add(b.to_linexpr(), join="inner") + + assert list(result_legacy.data.indexes["i"]) == list( + result_v1.data.indexes["i"] + ) + + def test_add_outer_same_under_both(self, m2: Model) -> None: + a = m2.variables["a"] + b = m2.variables["b"] + + linopy.options["arithmetic_convention"] = "legacy" + with warnings.catch_warnings(): + warnings.simplefilter("ignore", LinopyDeprecationWarning) + result_legacy = a.to_linexpr().add(b.to_linexpr(), join="outer") + + linopy.options["arithmetic_convention"] = "v1" + result_v1 = a.to_linexpr().add(b.to_linexpr(), join="outer") + + assert set(result_legacy.data.indexes["i"]) == set(result_v1.data.indexes["i"]) + + +# --------------------------------------------------------------------------- +# 10. Error message tests +# --------------------------------------------------------------------------- + + +class TestErrorMessages: + @pytest.fixture(autouse=True) + def _use_v1(self, v1_convention: None) -> None: + pass + + def test_exact_join_error_suggests_escape_hatches(self, a: Variable) -> None: + """Error message should suggest .add()/.mul() with join= parameter.""" + subset = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) + with pytest.raises(ValueError, match=r"\.add\(other, join="): + _ = (1 * a) + subset + + def test_exact_join_error_mentions_inner(self, a: Variable) -> None: + subset = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) + with pytest.raises(ValueError, match="inner"): + _ = (1 * a) + subset + + def test_exact_join_error_mentions_outer(self, a: Variable) -> None: + subset = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) + with pytest.raises(ValueError, match="outer"): + _ = (1 * a) + subset + + +# --------------------------------------------------------------------------- +# Variable.reindex() and Variable.reindex_like() +# --------------------------------------------------------------------------- + + +class TestVariableReindex: + @pytest.fixture + def var(self) -> Variable: + m = Model() + return m.add_variables(coords=[pd.Index([0, 1, 2, 3, 4], name="i")], name="v") + + def test_reindex_subset(self, var: Variable) -> None: + result = var.reindex(i=[1, 2, 3]) + assert isinstance(result, Variable) + assert list(result.data.indexes["i"]) == [1, 2, 3] + # Labels for the reindexed positions should be valid + assert (result.labels.sel(i=[1, 2, 3]).values >= 0).all() + + def test_reindex_superset(self, var: Variable) -> None: + result = var.reindex(i=[0, 1, 2, 3, 4, 5, 6]) + assert isinstance(result, Variable) + assert list(result.data.indexes["i"]) == [0, 1, 2, 3, 4, 5, 6] + # New positions should have sentinel label (-1) + assert result.labels.sel(i=5).item() == -1 + assert result.labels.sel(i=6).item() == -1 + # Original positions should be valid + assert (result.labels.sel(i=[0, 1, 2, 3, 4]).values >= 0).all() + + def test_reindex_preserves_type(self, var: Variable) -> None: + result = var.reindex(i=[0, 1]) + assert type(result) is type(var) + + def test_reindex_like_variable(self, var: Variable) -> None: + m = var.model + other = m.add_variables(coords=[pd.Index([2, 3, 4, 5], name="i")], name="other") + result = var.reindex_like(other) + assert isinstance(result, Variable) + assert list(result.data.indexes["i"]) == [2, 3, 4, 5] + # Position 5 should have sentinel + assert result.labels.sel(i=5).item() == -1 + # Positions 2,3,4 should be valid + assert (result.labels.sel(i=[2, 3, 4]).values >= 0).all() + + def test_reindex_like_dataarray(self, var: Variable) -> None: + other = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 3, 5]}) + result = var.reindex_like(other) + assert isinstance(result, Variable) + assert list(result.data.indexes["i"]) == [1, 3, 5] + assert result.labels.sel(i=5).item() == -1 + + def test_reindex_empty(self, var: Variable) -> None: + result = var.reindex(i=[]) + assert isinstance(result, Variable) + assert len(result.data.indexes["i"]) == 0 + + +class TestExpressionReindex: + @pytest.fixture + def expr(self) -> LinearExpression: + m = Model() + x = m.add_variables(coords=[pd.Index([0, 1, 2, 3, 4], name="i")], name="x") + return 2 * x + 10 + + def test_reindex_subset(self, expr: LinearExpression) -> None: + result = expr.reindex(i=[1, 2, 3]) + assert isinstance(result, LinearExpression) + assert list(result.data.indexes["i"]) == [1, 2, 3] + # Coefficients for existing positions should be preserved + np.testing.assert_array_equal(result.coeffs.squeeze().values, [2, 2, 2]) + np.testing.assert_array_equal(result.const.values, [10, 10, 10]) + + def test_reindex_superset(self, expr: LinearExpression) -> None: + result = expr.reindex(i=[0, 1, 2, 3, 4, 5, 6]) + assert isinstance(result, LinearExpression) + assert list(result.data.indexes["i"]) == [0, 1, 2, 3, 4, 5, 6] + # New positions should have sentinel var labels (-1) + assert result.vars.squeeze().sel(i=5).item() == -1 + assert result.vars.squeeze().sel(i=6).item() == -1 + # Original positions should be valid + assert (result.vars.squeeze().sel(i=[0, 1, 2, 3, 4]).values >= 0).all() + + def test_reindex_fill_value(self, expr: LinearExpression) -> None: + result = expr.reindex(i=[0, 1, 5], fill_value=0) + assert result.const.sel(i=5).item() == 0 + result_nan = expr.reindex(i=[0, 1, 5]) + assert np.isnan(result_nan.const.sel(i=5).item()) + + def test_reindex_preserves_type(self, expr: LinearExpression) -> None: + result = expr.reindex(i=[0, 1]) + assert type(result) is type(expr) + + def test_reindex_like_expression(self, expr: LinearExpression) -> None: + m = expr.model + y = m.add_variables(coords=[pd.Index([2, 3, 4, 5], name="i")], name="y") + other = 1 * y + result = expr.reindex_like(other) + assert isinstance(result, LinearExpression) + assert list(result.data.indexes["i"]) == [2, 3, 4, 5] + assert result.vars.squeeze().sel(i=5).item() == -1 + + def test_reindex_like_variable(self, expr: LinearExpression) -> None: + m = expr.model + y = m.add_variables(coords=[pd.Index([1, 3, 5], name="i")], name="y") + result = expr.reindex_like(y) + assert isinstance(result, LinearExpression) + assert list(result.data.indexes["i"]) == [1, 3, 5] + + def test_reindex_like_dataarray(self, expr: LinearExpression) -> None: + da = xr.DataArray([10, 20, 30], dims=["i"], coords={"i": [1, 3, 5]}) + result = expr.reindex_like(da) + assert isinstance(result, LinearExpression) + assert list(result.data.indexes["i"]) == [1, 3, 5] + assert result.vars.squeeze().sel(i=5).item() == -1 + + def test_reindex_like_dataset(self, expr: LinearExpression) -> None: + ds = xr.Dataset({"tmp": (("i",), [1, 2])}, coords={"i": [0, 1]}) + result = expr.reindex_like(ds) + assert isinstance(result, LinearExpression) + assert list(result.data.indexes["i"]) == [0, 1] + + +class TestConstraintReindex: + @pytest.fixture + def con(self) -> Constraint: + m = Model() + x = m.add_variables(coords=[pd.Index([0, 1, 2, 3, 4], name="i")], name="x") + linopy.options["arithmetic_convention"] = "legacy" + with warnings.catch_warnings(): + warnings.simplefilter("ignore", LinopyDeprecationWarning) + c = x >= 0 + m.add_constraints(c, name="c") + return m.constraints["c"] + + def test_reindex_subset(self, con: Constraint) -> None: + result = con.reindex({"i": [1, 2, 3]}) + assert list(result.data.indexes["i"]) == [1, 2, 3] + + def test_reindex_superset(self, con: Constraint) -> None: + result = con.reindex({"i": [0, 1, 2, 3, 4, 5]}) + assert list(result.data.indexes["i"]) == [0, 1, 2, 3, 4, 5] + # New position should have sentinel label + assert result.data.vars.squeeze().sel(i=5).item() == -1 + + def test_reindex_like_dataset(self, con: Constraint) -> None: + ds = xr.Dataset({"tmp": (("i",), [1, 2])}, coords={"i": [0, 1]}) + result = con.reindex_like(ds) + assert list(result.data.indexes["i"]) == [0, 1] + + def test_reindex_like_dataarray(self, con: Constraint) -> None: + da = xr.DataArray([10, 20], dims=["i"], coords={"i": [1, 3]}) + result = con.reindex_like(da) + assert list(result.data.indexes["i"]) == [1, 3] diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index e1016c35..1e5a9f7b 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -14,6 +14,7 @@ import polars as pl import pytest import xarray as xr +from xarray.core.types import JoinOptions from xarray.testing import assert_equal from linopy import LinearExpression, Model, QuadraticExpression, Variable, merge @@ -1986,7 +1987,8 @@ def test_add_constant_join_override(self, a: Variable, c: Variable) -> None: def test_add_same_coords_all_joins(self, a: Variable, c: Variable) -> None: expr_a = 1 * a + 5 const = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) - for join in ["override", "outer", "inner"]: + joins: list[JoinOptions] = ["override", "outer", "inner"] + for join in joins: result = expr_a.add(const, join=join) assert list(result.coords["i"].values) == [0, 1, 2] np.testing.assert_array_equal(result.const.values, [6, 7, 8]) From 7df680526beddac2aded9c52675a86590bbc9e2a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Mar 2026 11:14:00 +0100 Subject: [PATCH 53/66] Suppress LinopyDeprecationWarning in tests, add v1 constraint counterparts (#611) * Restore master tests, add autouse convention fixture - Restore test files to match master exactly (legacy behavior) - Delete legacy duplicate test files - Add autouse parametrized convention fixture: every test runs under both 'legacy' and 'v1' conventions by default - Add legacy_convention/v1_convention opt-out fixtures for convention-specific tests Co-Authored-By: Claude Opus 4.6 * Mark legacy-only tests, add v1 counterparts for differing behavior Tests that differ between conventions are split: - Legacy-only: marked with legacy_convention fixture (skipped under v1) - V1-only: marked with v1_convention fixture (skipped under legacy) - All other tests: run under both conventions via autouse fixture Files changed: - test_common.py: split test_align into legacy/v1 versions - test_constraints.py: mark TestConstraintCoordinateAlignment as legacy-only, add TestConstraintCoordinateAlignmentV1, split higher-dim RHS tests - test_linear_expression.py: mark TestCoordinateAlignment as legacy-only, add TestCoordinateAlignmentV1, split sum/join tests - test_piecewise_constraints.py: mark legacy-only (implementation not yet v1-compatible) - test_sos_reformulation.py: mark legacy-only (implementation not yet v1-compatible) Co-Authored-By: Claude Opus 4.6 * Fix mypy error, strengthen tests, and add convention test coverage - Fix mypy: use typed list[JoinOptions] for loop variable in test_linear_expression.py - Strengthen assert_linequal in test_algebraic_properties.py to verify coefficients and vars - Fix Variable.reindex_like() to handle DataArray inputs correctly - Add test_convention.py covering config validation, deprecation warnings, scalar fast path, NaN edge cases, convention switching, error messages, and Variable.reindex/reindex_like Co-Authored-By: Claude Opus 4.6 * Add reindex/reindex_like tests for Expression and Constraint, fix DataArray bug - Fix LinearExpression.reindex_like() to handle DataArray inputs (same bug as Variable) - Add TestExpressionReindex: subset, superset, fill_value, type preservation, reindex_like with Expression/Variable/DataArray/Dataset - Add TestConstraintReindex: subset, superset, reindex_like with Dataset/DataArray Co-Authored-By: Claude Opus 4.6 * Suppress LinopyDeprecationWarning in tests, add v1 constraint counterparts - Convention fixture now filters LinopyDeprecationWarning under legacy, reducing test warnings from 9262 to 213. Dedicated tests in test_convention.py still verify warnings are emitted. - test_repr.py: suppress module-level deprecation warnings from collection-time model setup. - TestConstraintCoordinateAlignmentV1: add comprehensive v1 counterparts covering all comparison operators (<=, >=, ==), subset/superset/expr raises, explicit join= escape hatches, assign_coords pattern, and higher-dim DataArray broadcast vs mismatch behavior. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- test/conftest.py | 17 ++++++- test/test_constraints.py | 105 +++++++++++++++++++++++++++++++++++++++ test/test_repr.py | 5 ++ 3 files changed, 125 insertions(+), 2 deletions(-) diff --git a/test/conftest.py b/test/conftest.py index 58142984..39bfdf3f 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -3,6 +3,7 @@ from __future__ import annotations import os +import warnings from collections.abc import Generator from typing import TYPE_CHECKING @@ -60,12 +61,24 @@ def pytest_collection_modifyitems( @pytest.fixture(autouse=True, params=["legacy", "v1"]) def convention(request: pytest.FixtureRequest) -> Generator[str, None, None]: - """Run every test under both arithmetic conventions by default.""" + """ + Run every test under both arithmetic conventions by default. + + Under "legacy", LinopyDeprecationWarning is suppressed so that the test + output stays clean. Dedicated tests in test_convention.py verify that + these warnings are actually emitted. + """ import linopy + from linopy.config import LinopyDeprecationWarning old = linopy.options["arithmetic_convention"] linopy.options["arithmetic_convention"] = request.param - yield request.param + if request.param == "legacy": + with warnings.catch_warnings(): + warnings.simplefilter("ignore", LinopyDeprecationWarning) + yield request.param + else: + yield request.param linopy.options["arithmetic_convention"] = old diff --git a/test/test_constraints.py b/test/test_constraints.py index fa01e8f1..c834ded9 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -509,6 +509,111 @@ def test_constraint_rhs_extra_dims_mismatched_raises(self, v: Variable) -> None: with pytest.raises(ValueError, match="exact"): v <= rhs + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_var_comparison_subset_raises(self, v: Variable, sign: str) -> None: + subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + with pytest.raises(ValueError, match="exact"): + if sign == LESS_EQUAL: + v <= subset + elif sign == GREATER_EQUAL: + v >= subset + else: + v == subset + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_var_comparison_subset_join_left(self, v: Variable, sign: str) -> None: + subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + expr = v.to_linexpr() + if sign == LESS_EQUAL: + con = expr.le(subset, join="left") + elif sign == GREATER_EQUAL: + con = expr.ge(subset, join="left") + else: + con = expr.eq(subset, join="left") + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + def test_var_comparison_subset_assign_coords(self, v: Variable) -> None: + """V1 pattern: use assign_coords to align before comparing.""" + target_coords = v.coords["dim_2"][:2] + subset = xr.DataArray( + [10.0, 30.0], dims=["dim_2"], coords={"dim_2": target_coords} + ) + con = v.loc[:1] <= subset + assert con.sizes["dim_2"] == 2 + assert con.rhs.sel(dim_2=0).item() == 10.0 + assert con.rhs.sel(dim_2=1).item() == 30.0 + + def test_expr_le_subset_raises(self, v: Variable) -> None: + subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + expr = v + 5 + with pytest.raises(ValueError, match="exact"): + expr <= subset + + def test_expr_le_subset_join_left(self, v: Variable) -> None: + subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + expr = v.to_linexpr() + 5 + con = expr.le(subset, join="left") + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) + assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_subset_comparison_var_raises(self, v: Variable, sign: str) -> None: + subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) + with pytest.raises(ValueError, match="exact"): + if sign == LESS_EQUAL: + subset <= v + elif sign == GREATER_EQUAL: + subset >= v + else: + subset == v + + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) + def test_superset_comparison_var_raises(self, v: Variable, sign: str) -> None: + superset = xr.DataArray( + np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} + ) + with pytest.raises(ValueError, match="exact"): + if sign == LESS_EQUAL: + superset <= v + else: + superset >= v + + def test_superset_comparison_var_join_inner(self, v: Variable) -> None: + superset = xr.DataArray( + np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} + ) + con = v.to_linexpr().le(superset, join="inner") + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(con.rhs.values).any() + + def test_constraint_rhs_higher_dim_dataarray_broadcasts(self) -> None: + """V1: DataArray RHS with extra dims broadcasts if shared dim coords match.""" + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + rhs = xr.DataArray( + np.ones((5, 3)), + dims=["dim_0", "extra"], + coords={"dim_0": range(5)}, + ) + c = m.add_constraints(x >= rhs) + assert c.shape == (5, 3) + + def test_constraint_rhs_higher_dim_dataarray_mismatched_raises(self) -> None: + """V1: DataArray RHS with mismatched shared dim coords raises.""" + m = Model() + x = m.add_variables(coords=[range(5)], name="x") + rhs = xr.DataArray( + np.ones((3, 3)), + dims=["dim_0", "extra"], + coords={"dim_0": [10, 11, 12]}, + ) + with pytest.raises(ValueError, match="exact"): + m.add_constraints(x >= rhs) + def test_subset_constraint_solve_integration(self) -> None: if not available_solvers: pytest.skip("No solver available") diff --git a/test/test_repr.py b/test/test_repr.py index 9a7af893..e2782d41 100644 --- a/test/test_repr.py +++ b/test/test_repr.py @@ -1,15 +1,20 @@ from __future__ import annotations +import warnings + import numpy as np import pandas as pd import pytest import xarray as xr from linopy import Model, options +from linopy.config import LinopyDeprecationWarning from linopy.constraints import Constraint from linopy.expressions import LinearExpression from linopy.variables import Variable +warnings.filterwarnings("ignore", category=LinopyDeprecationWarning) + m = Model() lower = pd.Series(0, range(10)) From 373c8b3e3dc225318562fc2affe61f6eba096948 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Mar 2026 11:40:28 +0100 Subject: [PATCH 54/66] Replace convention fixtures with pytest markers for clean test separation Introduce @pytest.mark.legacy_only and @pytest.mark.v1_only markers as the single, consistent mechanism for convention-specific tests. This replaces five different patterns (class-level autouse fixtures, function fixture params, module-level autouse fixtures, and inconsistent naming variants) with one visible, declarative approach. Changes: - conftest.py: register markers, skip logic in convention fixture - Remove all _legacy_only/_v1_only/_use_legacy/_use_v1 autouse fixtures - Remove legacy_convention/v1_convention fixture params from signatures - Module-level: pytestmark = pytest.mark.{legacy,v1}_only - Class-level: @pytest.mark.{legacy,v1}_only decorator - Function-level: @pytest.mark.{legacy,v1}_only decorator - Supports pytest -m "legacy_only" / -m "v1_only" for filtering Co-Authored-By: Claude Opus 4.6 --- test/conftest.py | 29 +++++++++++----------- test/test_algebraic_properties.py | 11 +------- test/test_common.py | 6 +++-- test/test_constraints.py | 23 ++++++----------- test/test_convention.py | 24 +++++------------- test/test_linear_expression.py | 40 ++++++++++++++++-------------- test/test_piecewise_constraints.py | 4 +-- test/test_sos_reformulation.py | 5 +--- 8 files changed, 58 insertions(+), 84 deletions(-) diff --git a/test/conftest.py b/test/conftest.py index 39bfdf3f..a2f61ca7 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -27,6 +27,12 @@ def pytest_addoption(parser: pytest.Parser) -> None: def pytest_configure(config: pytest.Config) -> None: """Configure pytest with custom markers and behavior.""" config.addinivalue_line("markers", "gpu: marks tests as requiring GPU hardware") + config.addinivalue_line( + "markers", "legacy_only: test runs only under the legacy arithmetic convention" + ) + config.addinivalue_line( + "markers", "v1_only: test runs only under the v1 arithmetic convention" + ) # Set environment variable so test modules can check if GPU tests are enabled # This is needed because parametrize happens at import time @@ -64,6 +70,9 @@ def convention(request: pytest.FixtureRequest) -> Generator[str, None, None]: """ Run every test under both arithmetic conventions by default. + Tests marked ``@pytest.mark.legacy_only`` or ``@pytest.mark.v1_only`` + are automatically skipped for the other convention. + Under "legacy", LinopyDeprecationWarning is suppressed so that the test output stays clean. Dedicated tests in test_convention.py verify that these warnings are actually emitted. @@ -71,6 +80,12 @@ def convention(request: pytest.FixtureRequest) -> Generator[str, None, None]: import linopy from linopy.config import LinopyDeprecationWarning + item = request.node + if item.get_closest_marker("legacy_only") and request.param != "legacy": + pytest.skip("legacy-only test") + if item.get_closest_marker("v1_only") and request.param != "v1": + pytest.skip("v1-only test") + old = linopy.options["arithmetic_convention"] linopy.options["arithmetic_convention"] = request.param if request.param == "legacy": @@ -82,20 +97,6 @@ def convention(request: pytest.FixtureRequest) -> Generator[str, None, None]: linopy.options["arithmetic_convention"] = old -@pytest.fixture -def legacy_convention(convention: str) -> None: - """Opt-out: skip this test when convention is not 'legacy'.""" - if convention != "legacy": - pytest.skip("legacy-only test") - - -@pytest.fixture -def v1_convention(convention: str) -> None: - """Opt-out: skip this test when convention is not 'v1'.""" - if convention != "v1": - pytest.skip("v1-only test") - - @pytest.fixture def m() -> Model: from linopy import Model diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py index 5d755baa..09ee0849 100644 --- a/test/test_algebraic_properties.py +++ b/test/test_algebraic_properties.py @@ -39,25 +39,16 @@ from __future__ import annotations -from collections.abc import Generator - import numpy as np import pandas as pd import pytest import xarray as xr -import linopy from linopy import Model from linopy.expressions import LinearExpression from linopy.variables import Variable - -@pytest.fixture(autouse=True) -def _use_v1_convention() -> Generator[None, None, None]: - """Use v1 arithmetic convention for all tests in this module.""" - linopy.options["arithmetic_convention"] = "v1" - yield - linopy.options["arithmetic_convention"] = "legacy" +pytestmark = pytest.mark.v1_only @pytest.fixture diff --git a/test/test_common.py b/test/test_common.py index 71f4be0d..51116a9a 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -649,7 +649,8 @@ def test_get_dims_with_index_levels() -> None: assert get_dims_with_index_levels(ds5) == [] -def test_align(legacy_convention: None, x: Variable, u: Variable) -> None: # noqa: F811 +@pytest.mark.legacy_only +def test_align(x: Variable, u: Variable) -> None: # noqa: F811 """Legacy: align() defaults to inner join for mismatched coords.""" alpha = xr.DataArray([1, 2], [[1, 2]]) beta = xr.DataArray( @@ -693,7 +694,8 @@ def test_align(legacy_convention: None, x: Variable, u: Variable) -> None: # no assert_linequal(expr_obs, expr.loc[[1]]) -def test_align_v1(v1_convention: None, x: Variable, u: Variable) -> None: # noqa: F811 +@pytest.mark.v1_only +def test_align_v1(x: Variable, u: Variable) -> None: # noqa: F811 """V1: align() defaults to exact join; explicit join= needed for mismatched coords.""" alpha = xr.DataArray([1, 2], [[1, 2]]) beta = xr.DataArray( diff --git a/test/test_constraints.py b/test/test_constraints.py index c834ded9..537fb18c 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -175,8 +175,9 @@ def test_constraint_rhs_lower_dim(rhs_factory: Any) -> None: pytest.param(lambda m: pd.DataFrame(np.ones((5, 3))), id="dataframe"), ], ) +@pytest.mark.legacy_only def test_constraint_rhs_higher_dim_constant_warns( - legacy_convention: None, rhs_factory: Any, caplog: Any + rhs_factory: Any, caplog: Any ) -> None: """Legacy: higher-dim constant RHS warns about dimensions.""" m = Model() @@ -187,9 +188,8 @@ def test_constraint_rhs_higher_dim_constant_warns( assert "dimensions" in caplog.text -def test_constraint_rhs_higher_dim_constant_broadcasts_v1( - v1_convention: None, -) -> None: +@pytest.mark.v1_only +def test_constraint_rhs_higher_dim_constant_broadcasts_v1() -> None: """V1: higher-dim constant RHS broadcasts (creates redundant constraints).""" m = Model() x = m.add_variables(coords=[range(5)], name="x") @@ -198,9 +198,8 @@ def test_constraint_rhs_higher_dim_constant_broadcasts_v1( assert "extra" in c.dims -def test_constraint_rhs_higher_dim_dataarray_reindexes( - legacy_convention: None, -) -> None: +@pytest.mark.legacy_only +def test_constraint_rhs_higher_dim_dataarray_reindexes() -> None: """Legacy: DataArray RHS with extra dims reindexes to expression coords.""" m = Model() x = m.add_variables(coords=[range(5)], name="x") @@ -360,13 +359,10 @@ def test_sanitize_infinities() -> None: m.add_constraints(y <= -np.inf, name="con_wrong_neg_inf") +@pytest.mark.legacy_only class TestConstraintCoordinateAlignmentLegacy: """Legacy: outer join with NaN fill for constraint coordinate mismatches.""" - @pytest.fixture(autouse=True) - def _legacy_only(self, legacy_convention: None) -> None: - pass - @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: if request.param == "xarray": @@ -468,13 +464,10 @@ def test_subset_constraint_solve_integration(self) -> None: assert sol.sel(i=4).item() == pytest.approx(100.0) +@pytest.mark.v1_only class TestConstraintCoordinateAlignmentV1: """V1: exact join raises on coordinate mismatches; explicit join= is the escape hatch.""" - @pytest.fixture(autouse=True) - def _v1_only(self, v1_convention: None) -> None: - pass - def test_var_le_subset_raises(self, v: Variable) -> None: subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) with pytest.raises(ValueError, match="exact"): diff --git a/test/test_convention.py b/test/test_convention.py index 2e756cfc..95b3a1a1 100644 --- a/test/test_convention.py +++ b/test/test_convention.py @@ -106,11 +106,8 @@ def test_get_invalid_key_raises(self) -> None: # --------------------------------------------------------------------------- +@pytest.mark.legacy_only class TestDeprecationWarnings: - @pytest.fixture(autouse=True) - def _use_legacy(self, legacy_convention: None) -> None: - pass - def test_add_constant_emits_deprecation_warning(self, a: Variable) -> None: const = xr.DataArray([1, 2, 3, 4, 5], dims=["i"], coords={"i": range(5)}) with pytest.warns(LinopyDeprecationWarning, match="legacy"): @@ -132,13 +129,10 @@ def test_align_emits_deprecation_warning(self, a: Variable) -> None: # --------------------------------------------------------------------------- +@pytest.mark.v1_only class TestScalarFastPath: """Scalar operations should produce same results as array operations.""" - @pytest.fixture(autouse=True) - def _use_v1(self, v1_convention: None) -> None: - pass - def test_add_scalar_matches_array(self, a: Variable) -> None: scalar_result = (1 * a) + 5 array_const = xr.DataArray(np.full(5, 5.0), dims=["i"], coords={"i": range(5)}) @@ -169,11 +163,8 @@ def test_div_scalar_matches_array(self, a: Variable) -> None: # --------------------------------------------------------------------------- +@pytest.mark.v1_only class TestNaNEdgeCases: - @pytest.fixture(autouse=True) - def _use_v1(self, v1_convention: None) -> None: - pass - def test_inf_add_propagates(self, a: Variable) -> None: """Adding inf should propagate to const.""" const = xr.DataArray( @@ -242,8 +233,8 @@ def test_reset_restores_defaults(self) -> None: # 9. TestJoinParameter deduplication (shared base class) # --------------------------------------------------------------------------- # The existing TestJoinParameter class already tests both conventions via -# legacy_convention/v1_convention fixtures. The deduplication is addressed by -# verifying that explicit join= works identically under both conventions. +# @pytest.mark.legacy_only / @pytest.mark.v1_only markers. The deduplication +# is addressed by verifying that explicit join= works identically under both. class TestJoinWorksUnderBothConventions: @@ -292,11 +283,8 @@ def test_add_outer_same_under_both(self, m2: Model) -> None: # --------------------------------------------------------------------------- +@pytest.mark.v1_only class TestErrorMessages: - @pytest.fixture(autouse=True) - def _use_v1(self, v1_convention: None) -> None: - pass - def test_exact_join_error_suggests_escape_hatches(self, a: Variable) -> None: """Error message should suggest .add()/.mul() with join= parameter.""" subset = xr.DataArray([1, 2, 3], dims=["i"], coords={"i": [0, 1, 2]}) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 1e5a9f7b..66b58290 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -388,8 +388,9 @@ def test_linear_expression_substraction( assert res.data.notnull().all().to_array().all() +@pytest.mark.legacy_only def test_linear_expression_sum( - legacy_convention: None, x: Variable, y: Variable, z: Variable, v: Variable + x: Variable, y: Variable, z: Variable, v: Variable ) -> None: expr = 10 * x + y + z res = expr.sum("dim_0") @@ -410,8 +411,9 @@ def test_linear_expression_sum( assert len(expr.coords["dim_2"]) == 10 +@pytest.mark.v1_only def test_linear_expression_sum_v1( - v1_convention: None, x: Variable, y: Variable, z: Variable, v: Variable + x: Variable, y: Variable, z: Variable, v: Variable ) -> None: expr = 10 * x + y + z res = expr.sum("dim_0") @@ -434,8 +436,9 @@ def test_linear_expression_sum_v1( assert len(expr.coords["dim_2"]) == 10 +@pytest.mark.legacy_only def test_linear_expression_sum_with_const( - legacy_convention: None, x: Variable, y: Variable, z: Variable, v: Variable + x: Variable, y: Variable, z: Variable, v: Variable ) -> None: expr = 10 * x + y + z + 10 res = expr.sum("dim_0") @@ -458,8 +461,9 @@ def test_linear_expression_sum_with_const( assert len(expr.coords["dim_2"]) == 10 +@pytest.mark.v1_only def test_linear_expression_sum_with_const_v1( - v1_convention: None, x: Variable, y: Variable, z: Variable, v: Variable + x: Variable, y: Variable, z: Variable, v: Variable ) -> None: expr = 10 * x + y + z + 10 res = expr.sum("dim_0") @@ -588,13 +592,10 @@ def test_linear_expression_multiplication_invalid( expr / x +@pytest.mark.legacy_only class TestCoordinateAlignmentLegacy: """Legacy: outer join with NaN fill / zero fill for coordinate mismatches.""" - @pytest.fixture(autouse=True) - def _legacy_only(self, legacy_convention: None) -> None: - pass - @pytest.fixture(params=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: if request.param == "da": @@ -1934,16 +1935,18 @@ def c(self, m2: Model) -> Variable: return m2.variables["c"] class TestAddition: + @pytest.mark.legacy_only def test_add_join_none_preserves_default( - self, legacy_convention: None, a: Variable, b: Variable + self, a: Variable, b: Variable ) -> None: """Legacy: join=None uses outer join for mismatched coords.""" result_default = a.to_linexpr() + b.to_linexpr() result_none = a.to_linexpr().add(b.to_linexpr(), join=None) assert_linequal(result_default, result_none) + @pytest.mark.v1_only def test_add_join_none_raises_on_mismatch_v1( - self, v1_convention: None, a: Variable, b: Variable + self, a: Variable, b: Variable ) -> None: """V1: join=None uses exact join, raises on mismatched coords.""" with pytest.raises(ValueError, match="Coordinate mismatch"): @@ -2205,8 +2208,9 @@ def test_div_constant_outer_fill_values(self, a: Variable) -> None: assert result.coeffs.squeeze().sel(i=0).item() == pytest.approx(1.0) class TestQuadratic: + @pytest.mark.legacy_only def test_quadratic_add_constant_join_inner( - self, legacy_convention: None, a: Variable, b: Variable + self, a: Variable, b: Variable ) -> None: """Legacy: a*b with mismatched coords uses outer join.""" quad = a.to_linexpr() * b.to_linexpr() @@ -2214,8 +2218,9 @@ def test_quadratic_add_constant_join_inner( result = quad.add(const, join="inner") assert list(result.data.indexes["i"]) == [1, 2, 3] + @pytest.mark.v1_only def test_quadratic_add_constant_join_inner_v1( - self, v1_convention: None, a: Variable, c: Variable + self, a: Variable, c: Variable ) -> None: """V1: use a*c (same coords) to create quad, then join inner.""" quad = a.to_linexpr() * c.to_linexpr() @@ -2229,8 +2234,9 @@ def test_quadratic_add_expr_join_inner(self, a: Variable) -> None: result = quad.add(const, join="inner") assert list(result.data.indexes["i"]) == [0, 1] + @pytest.mark.legacy_only def test_quadratic_mul_constant_join_inner( - self, legacy_convention: None, a: Variable, b: Variable + self, a: Variable, b: Variable ) -> None: """Legacy: a*b with mismatched coords uses outer join.""" quad = a.to_linexpr() * b.to_linexpr() @@ -2238,8 +2244,9 @@ def test_quadratic_mul_constant_join_inner( result = quad.mul(const, join="inner") assert list(result.data.indexes["i"]) == [1, 2, 3] + @pytest.mark.v1_only def test_quadratic_mul_constant_join_inner_v1( - self, v1_convention: None, a: Variable, c: Variable + self, a: Variable, c: Variable ) -> None: """V1: use a*c (same coords) to create quad, then join inner.""" quad = a.to_linexpr() * c.to_linexpr() @@ -2248,13 +2255,10 @@ def test_quadratic_mul_constant_join_inner_v1( assert list(result.data.indexes["i"]) == [1, 2] +@pytest.mark.v1_only class TestCoordinateAlignmentV1: """V1: exact join raises on mismatched coords; explicit join= is the escape hatch.""" - @pytest.fixture(autouse=True) - def _v1_only(self, v1_convention: None) -> None: - pass - @pytest.fixture(params=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: if request.param == "da": diff --git a/test/test_piecewise_constraints.py b/test/test_piecewise_constraints.py index b947965d..7da36991 100644 --- a/test/test_piecewise_constraints.py +++ b/test/test_piecewise_constraints.py @@ -51,9 +51,7 @@ ] -@pytest.fixture(autouse=True) -def _legacy_only(legacy_convention: None) -> None: - """Piecewise implementation not yet adapted for v1 convention.""" +pytestmark = pytest.mark.legacy_only # =========================================================================== diff --git a/test/test_sos_reformulation.py b/test/test_sos_reformulation.py index 252ecc41..20e32d9a 100644 --- a/test/test_sos_reformulation.py +++ b/test/test_sos_reformulation.py @@ -18,10 +18,7 @@ undo_sos_reformulation, ) - -@pytest.fixture(autouse=True) -def _legacy_only(legacy_convention: None) -> None: - """SOS reformulation not yet adapted for v1 convention.""" +pytestmark = pytest.mark.legacy_only class TestValidateBounds: From 1450451f30eb21d546aecfa0315828187cc1c4f6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Mar 2026 13:31:30 +0100 Subject: [PATCH 55/66] Co-locate legacy and v1 tests with behavior-based naming for easier review Merge separate TestConstraintCoordinateAlignmentLegacy/V1 and TestCoordinateAlignmentLegacy/V1 classes into unified classes where legacy and v1 test methods sit side-by-side, grouped by scenario. Test names now describe behavior (e.g. test_mul_subset_fills_zeros for legacy, test_mul_subset_raises for v1) rather than using class-level Legacy/V1 suffixes, making it clear what each convention expects. Co-Authored-By: Claude Opus 4.6 --- test/test_constraints.py | 245 ++++++++++-------- test/test_linear_expression.py | 455 +++++++++++++++++---------------- 2 files changed, 368 insertions(+), 332 deletions(-) diff --git a/test/test_constraints.py b/test/test_constraints.py index 537fb18c..eebf2940 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -359,9 +359,8 @@ def test_sanitize_infinities() -> None: m.add_constraints(y <= -np.inf, name="con_wrong_neg_inf") -@pytest.mark.legacy_only -class TestConstraintCoordinateAlignmentLegacy: - """Legacy: outer join with NaN fill for constraint coordinate mismatches.""" +class TestConstraintCoordinateAlignment: + """Tests for constraint behavior when variable and RHS coordinates differ.""" @pytest.fixture(params=["xarray", "pandas_series"], ids=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: @@ -381,98 +380,23 @@ def superset(self, request: Any) -> xr.DataArray | pd.Series: np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") ) - def test_var_le_subset(self, v: Variable, subset: xr.DataArray) -> None: + # -- var <= subset -- + + @pytest.mark.legacy_only + def test_var_le_subset_fills_nan(self, v: Variable, subset: xr.DataArray) -> None: con = v <= subset assert con.sizes["dim_2"] == v.sizes["dim_2"] assert con.rhs.sel(dim_2=1).item() == 10.0 assert con.rhs.sel(dim_2=3).item() == 30.0 assert np.isnan(con.rhs.sel(dim_2=0).item()) - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) - def test_var_comparison_subset( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - if sign == LESS_EQUAL: - con = v <= subset - elif sign == GREATER_EQUAL: - con = v >= subset - else: - con = v == subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == 10.0 - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - def test_expr_le_subset(self, v: Variable, subset: xr.DataArray) -> None: - expr = v + 5 - con = expr <= subset - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) - assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) - assert np.isnan(con.rhs.sel(dim_2=0).item()) - - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) - def test_subset_comparison_var( - self, v: Variable, subset: xr.DataArray, sign: str - ) -> None: - if sign == LESS_EQUAL: - con = subset <= v - elif sign == GREATER_EQUAL: - con = subset >= v - else: - con = subset == v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert np.isnan(con.rhs.sel(dim_2=0).item()) - assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) - - @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) - def test_superset_comparison_var( - self, v: Variable, superset: xr.DataArray, sign: str - ) -> None: - if sign == LESS_EQUAL: - con = superset <= v - else: - con = superset >= v - assert con.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(con.lhs.coeffs.values).any() - assert not np.isnan(con.rhs.values).any() - - def test_constraint_rhs_extra_dims_broadcasts(self, v: Variable) -> None: - rhs = xr.DataArray( - [[1.0, 2.0]], - dims=["extra", "dim_2"], - coords={"dim_2": [0, 1]}, - ) - c = v <= rhs - assert "extra" in c.dims - - def test_subset_constraint_solve_integration(self) -> None: - if not available_solvers: - pytest.skip("No solver available") - solver = "highs" if "highs" in available_solvers else available_solvers[0] - m = Model() - coords = pd.RangeIndex(5, name="i") - x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") - subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) - m.add_constraints(x <= subset_ub, name="subset_ub") - m.add_objective(x.sum(), sense="max") - m.solve(solver_name=solver) - sol = m.solution["x"] - assert sol.sel(i=1).item() == pytest.approx(10.0) - assert sol.sel(i=3).item() == pytest.approx(20.0) - assert sol.sel(i=0).item() == pytest.approx(100.0) - assert sol.sel(i=2).item() == pytest.approx(100.0) - assert sol.sel(i=4).item() == pytest.approx(100.0) - - -@pytest.mark.v1_only -class TestConstraintCoordinateAlignmentV1: - """V1: exact join raises on coordinate mismatches; explicit join= is the escape hatch.""" - + @pytest.mark.v1_only def test_var_le_subset_raises(self, v: Variable) -> None: subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) with pytest.raises(ValueError, match="exact"): v <= subset + @pytest.mark.v1_only def test_var_le_subset_join_left(self, v: Variable) -> None: subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) con = v.to_linexpr().le(subset, join="left") @@ -481,27 +405,24 @@ def test_var_le_subset_join_left(self, v: Variable) -> None: assert con.rhs.sel(dim_2=3).item() == 30.0 assert np.isnan(con.rhs.sel(dim_2=0).item()) - def test_superset_comparison_raises(self, v: Variable) -> None: - superset = xr.DataArray( - np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} - ) - with pytest.raises(ValueError, match="exact"): - superset <= v + # -- var comparison (all signs) with subset -- - def test_constraint_rhs_extra_dims_matching_broadcasts(self, v: Variable) -> None: - rhs = xr.DataArray( - np.ones((2, 20)), dims=["extra", "dim_2"], coords={"dim_2": range(20)} - ) - c = v <= rhs - assert "extra" in c.dims - - def test_constraint_rhs_extra_dims_mismatched_raises(self, v: Variable) -> None: - rhs = xr.DataArray( - [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} - ) - with pytest.raises(ValueError, match="exact"): - v <= rhs + @pytest.mark.legacy_only + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_var_comparison_subset_fills_nan( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = v <= subset + elif sign == GREATER_EQUAL: + con = v >= subset + else: + con = v == subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == 10.0 + assert np.isnan(con.rhs.sel(dim_2=0).item()) + @pytest.mark.v1_only @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) def test_var_comparison_subset_raises(self, v: Variable, sign: str) -> None: subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) @@ -513,6 +434,7 @@ def test_var_comparison_subset_raises(self, v: Variable, sign: str) -> None: else: v == subset + @pytest.mark.v1_only @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) def test_var_comparison_subset_join_left(self, v: Variable, sign: str) -> None: subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) @@ -527,6 +449,7 @@ def test_var_comparison_subset_join_left(self, v: Variable, sign: str) -> None: assert con.rhs.sel(dim_2=1).item() == 10.0 assert np.isnan(con.rhs.sel(dim_2=0).item()) + @pytest.mark.v1_only def test_var_comparison_subset_assign_coords(self, v: Variable) -> None: """V1 pattern: use assign_coords to align before comparing.""" target_coords = v.coords["dim_2"][:2] @@ -538,12 +461,25 @@ def test_var_comparison_subset_assign_coords(self, v: Variable) -> None: assert con.rhs.sel(dim_2=0).item() == 10.0 assert con.rhs.sel(dim_2=1).item() == 30.0 + # -- expr <= subset -- + + @pytest.mark.legacy_only + def test_expr_le_subset_fills_nan(self, v: Variable, subset: xr.DataArray) -> None: + expr = v + 5 + con = expr <= subset + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert con.rhs.sel(dim_2=1).item() == pytest.approx(5.0) + assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) + assert np.isnan(con.rhs.sel(dim_2=0).item()) + + @pytest.mark.v1_only def test_expr_le_subset_raises(self, v: Variable) -> None: subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) expr = v + 5 with pytest.raises(ValueError, match="exact"): expr <= subset + @pytest.mark.v1_only def test_expr_le_subset_join_left(self, v: Variable) -> None: subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) expr = v.to_linexpr() + 5 @@ -553,6 +489,24 @@ def test_expr_le_subset_join_left(self, v: Variable) -> None: assert con.rhs.sel(dim_2=3).item() == pytest.approx(25.0) assert np.isnan(con.rhs.sel(dim_2=0).item()) + # -- subset comparison var (reverse) -- + + @pytest.mark.legacy_only + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) + def test_subset_comparison_var_fills_nan( + self, v: Variable, subset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = subset <= v + elif sign == GREATER_EQUAL: + con = subset >= v + else: + con = subset == v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert np.isnan(con.rhs.sel(dim_2=0).item()) + assert con.rhs.sel(dim_2=1).item() == pytest.approx(10.0) + + @pytest.mark.v1_only @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL, EQUAL]) def test_subset_comparison_var_raises(self, v: Variable, sign: str) -> None: subset = xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) @@ -564,6 +518,30 @@ def test_subset_comparison_var_raises(self, v: Variable, sign: str) -> None: else: subset == v + # -- superset comparison var -- + + @pytest.mark.legacy_only + @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) + def test_superset_comparison_no_nan( + self, v: Variable, superset: xr.DataArray, sign: str + ) -> None: + if sign == LESS_EQUAL: + con = superset <= v + else: + con = superset >= v + assert con.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(con.lhs.coeffs.values).any() + assert not np.isnan(con.rhs.values).any() + + @pytest.mark.v1_only + def test_superset_comparison_raises(self, v: Variable) -> None: + superset = xr.DataArray( + np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} + ) + with pytest.raises(ValueError, match="exact"): + superset <= v + + @pytest.mark.v1_only @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) def test_superset_comparison_var_raises(self, v: Variable, sign: str) -> None: superset = xr.DataArray( @@ -575,7 +553,8 @@ def test_superset_comparison_var_raises(self, v: Variable, sign: str) -> None: else: superset >= v - def test_superset_comparison_var_join_inner(self, v: Variable) -> None: + @pytest.mark.v1_only + def test_superset_comparison_join_inner(self, v: Variable) -> None: superset = xr.DataArray( np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} ) @@ -583,7 +562,36 @@ def test_superset_comparison_var_join_inner(self, v: Variable) -> None: assert con.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(con.rhs.values).any() - def test_constraint_rhs_higher_dim_dataarray_broadcasts(self) -> None: + # -- extra dims -- + + @pytest.mark.legacy_only + def test_rhs_extra_dims_broadcasts(self, v: Variable) -> None: + rhs = xr.DataArray( + [[1.0, 2.0]], + dims=["extra", "dim_2"], + coords={"dim_2": [0, 1]}, + ) + c = v <= rhs + assert "extra" in c.dims + + @pytest.mark.v1_only + def test_rhs_extra_dims_matching_broadcasts(self, v: Variable) -> None: + rhs = xr.DataArray( + np.ones((2, 20)), dims=["extra", "dim_2"], coords={"dim_2": range(20)} + ) + c = v <= rhs + assert "extra" in c.dims + + @pytest.mark.v1_only + def test_rhs_extra_dims_mismatched_raises(self, v: Variable) -> None: + rhs = xr.DataArray( + [[1.0, 2.0]], dims=["extra", "dim_2"], coords={"dim_2": [0, 1]} + ) + with pytest.raises(ValueError, match="exact"): + v <= rhs + + @pytest.mark.v1_only + def test_rhs_higher_dim_dataarray_matching_broadcasts(self) -> None: """V1: DataArray RHS with extra dims broadcasts if shared dim coords match.""" m = Model() x = m.add_variables(coords=[range(5)], name="x") @@ -595,7 +603,8 @@ def test_constraint_rhs_higher_dim_dataarray_broadcasts(self) -> None: c = m.add_constraints(x >= rhs) assert c.shape == (5, 3) - def test_constraint_rhs_higher_dim_dataarray_mismatched_raises(self) -> None: + @pytest.mark.v1_only + def test_rhs_higher_dim_dataarray_mismatched_raises(self) -> None: """V1: DataArray RHS with mismatched shared dim coords raises.""" m = Model() x = m.add_variables(coords=[range(5)], name="x") @@ -607,7 +616,29 @@ def test_constraint_rhs_higher_dim_dataarray_mismatched_raises(self) -> None: with pytest.raises(ValueError, match="exact"): m.add_constraints(x >= rhs) - def test_subset_constraint_solve_integration(self) -> None: + # -- solver integration -- + + @pytest.mark.legacy_only + def test_subset_constraint_solve_implicit(self) -> None: + if not available_solvers: + pytest.skip("No solver available") + solver = "highs" if "highs" in available_solvers else available_solvers[0] + m = Model() + coords = pd.RangeIndex(5, name="i") + x = m.add_variables(lower=0, upper=100, coords=[coords], name="x") + subset_ub = xr.DataArray([10.0, 20.0], dims=["i"], coords={"i": [1, 3]}) + m.add_constraints(x <= subset_ub, name="subset_ub") + m.add_objective(x.sum(), sense="max") + m.solve(solver_name=solver) + sol = m.solution["x"] + assert sol.sel(i=1).item() == pytest.approx(10.0) + assert sol.sel(i=3).item() == pytest.approx(20.0) + assert sol.sel(i=0).item() == pytest.approx(100.0) + assert sol.sel(i=2).item() == pytest.approx(100.0) + assert sol.sel(i=4).item() == pytest.approx(100.0) + + @pytest.mark.v1_only + def test_subset_constraint_solve_explicit_join(self) -> None: if not available_solvers: pytest.skip("No solver available") solver = "highs" if "highs" in available_solvers else available_solvers[0] diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 66b58290..3cb8337b 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -592,9 +592,8 @@ def test_linear_expression_multiplication_invalid( expr / x -@pytest.mark.legacy_only -class TestCoordinateAlignmentLegacy: - """Legacy: outer join with NaN fill / zero fill for coordinate mismatches.""" +class TestCoordinateAlignment: + """Coordinate alignment tests for both legacy (outer join) and v1 (exact join).""" @pytest.fixture(params=["da", "series"]) def subset(self, request: Any) -> xr.DataArray | pd.Series: @@ -632,6 +631,7 @@ def nan_constant(self, request: Any) -> xr.DataArray | pd.Series: return pd.Series(vals, index=pd.Index(range(20), name="dim_2")) class TestSubset: + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_mul_subset_fills_zeros( self, @@ -646,6 +646,16 @@ def test_mul_subset_fills_zeros( assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + with pytest.raises(ValueError, match="exact"): + target * subset + + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_add_subset_fills_zeros( self, @@ -664,6 +674,16 @@ def test_add_subset_fills_zeros( assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, expected) + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else v + 5 + with pytest.raises(ValueError, match="exact"): + target + subset + + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_sub_subset_fills_negated( self, @@ -682,6 +702,16 @@ def test_sub_subset_fills_negated( assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, expected) + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else v + 5 + with pytest.raises(ValueError, match="exact"): + target - subset + + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_div_subset_inverts_nonzero( self, v: Variable, subset: xr.DataArray, operand: str @@ -693,19 +723,52 @@ def test_div_subset_inverts_nonzero( assert result.coeffs.squeeze().sel(dim_2=1).item() == pytest.approx(0.1) assert result.coeffs.squeeze().sel(dim_2=0).item() == pytest.approx(1.0) + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_subset_raises( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + target = v if operand == "var" else 1 * v + with pytest.raises(ValueError, match="exact"): + target / subset + + @pytest.mark.legacy_only def test_subset_add_var_coefficients( self, v: Variable, subset: xr.DataArray ) -> None: result = subset + v np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + @pytest.mark.v1_only + def test_subset_add_var_raises(self, v: Variable, subset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + subset + v + + @pytest.mark.legacy_only def test_subset_sub_var_coefficients( self, v: Variable, subset: xr.DataArray ) -> None: result = subset - v np.testing.assert_array_equal(result.coeffs.squeeze().values, -np.ones(20)) + @pytest.mark.v1_only + def test_subset_sub_var_raises(self, v: Variable, subset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + subset - v + + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_subset_join_left( + self, v: Variable, subset: xr.DataArray, operand: str + ) -> None: + """Explicit join='left' fills zeros for missing coords.""" + target = v if operand == "var" else 1 * v + result = target.mul(subset, join="left") + assert result.sizes["dim_2"] == v.sizes["dim_2"] + assert not np.isnan(result.coeffs.values).any() + class TestSuperset: + @pytest.mark.legacy_only def test_add_superset_pins_to_lhs_coords( self, v: Variable, superset: xr.DataArray ) -> None: @@ -713,15 +776,24 @@ def test_add_superset_pins_to_lhs_coords( assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.const.values).any() + @pytest.mark.v1_only + def test_add_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + v + superset + + @pytest.mark.legacy_only def test_add_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: assert_linequal(superset + v, v + superset) + @pytest.mark.legacy_only def test_sub_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: assert_linequal(superset - v, -v + superset) + @pytest.mark.legacy_only def test_mul_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: assert_linequal(superset * v, v * superset) + @pytest.mark.legacy_only def test_mul_superset_pins_to_lhs_coords( self, v: Variable, superset: xr.DataArray ) -> None: @@ -729,6 +801,12 @@ def test_mul_superset_pins_to_lhs_coords( assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.coeffs.values).any() + @pytest.mark.v1_only + def test_mul_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: + with pytest.raises(ValueError, match="exact"): + v * superset + + @pytest.mark.legacy_only def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: superset_nonzero = xr.DataArray( np.arange(1, 26, dtype=float), @@ -740,6 +818,7 @@ def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: assert not np.isnan(result.coeffs.values).any() class TestDisjoint: + @pytest.mark.legacy_only def test_add_disjoint_fills_zeros(self, v: Variable) -> None: disjoint = xr.DataArray( [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} @@ -749,6 +828,15 @@ def test_add_disjoint_fills_zeros(self, v: Variable) -> None: assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, np.zeros(20)) + @pytest.mark.v1_only + def test_add_disjoint_raises(self, v: Variable) -> None: + disjoint = xr.DataArray( + [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + with pytest.raises(ValueError, match="exact"): + v + disjoint + + @pytest.mark.legacy_only def test_mul_disjoint_fills_zeros(self, v: Variable) -> None: disjoint = xr.DataArray( [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} @@ -758,6 +846,15 @@ def test_mul_disjoint_fills_zeros(self, v: Variable) -> None: assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, np.zeros(20)) + @pytest.mark.v1_only + def test_mul_disjoint_raises(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + with pytest.raises(ValueError, match="exact"): + v * disjoint + + @pytest.mark.legacy_only def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: disjoint = xr.DataArray( [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} @@ -768,6 +865,7 @@ def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) class TestCommutativity: + @pytest.mark.legacy_only @pytest.mark.parametrize( "make_lhs,make_rhs", [ @@ -787,18 +885,20 @@ def test_commutativity( ) -> None: assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + @pytest.mark.legacy_only def test_sub_var_anticommutative( self, v: Variable, subset: xr.DataArray ) -> None: assert_linequal(subset - v, -v + subset) + @pytest.mark.legacy_only def test_sub_expr_anticommutative( self, v: Variable, subset: xr.DataArray ) -> None: expr = v + 5 assert_linequal(subset - expr, -(expr - subset)) - def test_add_commutativity_full_coords(self, v: Variable) -> None: + def test_add_commutativity_matching_coords(self, v: Variable) -> None: full = xr.DataArray( np.arange(20, dtype=float), dims=["dim_2"], @@ -806,8 +906,18 @@ def test_add_commutativity_full_coords(self, v: Variable) -> None: ) assert_linequal(v + full, full + v) + @pytest.mark.v1_only + def test_subset_raises_both_sides( + self, v: Variable, subset: xr.DataArray + ) -> None: + with pytest.raises(ValueError, match="exact"): + v * subset + with pytest.raises(ValueError, match="exact"): + subset * v + class TestQuadratic: - def test_quadexpr_add_subset( + @pytest.mark.legacy_only + def test_quadexpr_add_subset_fills( self, v: Variable, subset: xr.DataArray, @@ -820,6 +930,15 @@ def test_quadexpr_add_subset( assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, expected_fill) + @pytest.mark.v1_only + def test_quadexpr_add_subset_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + qexpr + subset + + @pytest.mark.legacy_only def test_quadexpr_sub_subset( self, v: Variable, @@ -833,7 +952,8 @@ def test_quadexpr_sub_subset( assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, -expected_fill) - def test_quadexpr_mul_subset( + @pytest.mark.legacy_only + def test_quadexpr_mul_subset_fills( self, v: Variable, subset: xr.DataArray, @@ -846,6 +966,15 @@ def test_quadexpr_mul_subset( assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + @pytest.mark.v1_only + def test_quadexpr_mul_subset_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + qexpr * subset + + @pytest.mark.legacy_only def test_subset_mul_quadexpr( self, v: Variable, @@ -859,6 +988,7 @@ def test_subset_mul_quadexpr( assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + @pytest.mark.legacy_only def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: qexpr = v * v assert_quadequal(subset + qexpr, qexpr + subset) @@ -867,14 +997,13 @@ class TestMissingValues: """ Same shape as variable but with NaN entries in the constant. - NaN values are filled with operation-specific neutral elements: - - Addition/subtraction: NaN -> 0 (additive identity) - - Multiplication: NaN -> 0 (zeroes out the variable) - - Division: NaN -> 1 (multiplicative identity, no scaling) + Legacy: NaN values are filled with operation-specific neutral elements. + V1: NaN values propagate (no implicit fillna). """ NAN_POSITIONS = [0, 5, 19] + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_add_nan_filled( self, @@ -891,6 +1020,22 @@ def test_add_nan_filled( for i in self.NAN_POSITIONS: assert result.const.values[i] == base_const + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_add_nan_propagates(self, v: Variable, operand: str) -> None: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + vals[5] = np.nan + vals[19] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + target = v if operand == "var" else v + 5 + result = target + nan_constant + for i in self.NAN_POSITIONS: + assert np.isnan(result.const.values[i]) + + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_sub_nan_filled( self, @@ -907,6 +1052,7 @@ def test_sub_nan_filled( for i in self.NAN_POSITIONS: assert result.const.values[i] == base_const + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_mul_nan_filled( self, @@ -922,6 +1068,19 @@ def test_mul_nan_filled( for i in self.NAN_POSITIONS: assert result.coeffs.squeeze().values[i] == 0.0 + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_mul_nan_propagates(self, v: Variable, operand: str) -> None: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + target = v if operand == "var" else 1 * v + result = target * nan_constant + assert np.isnan(result.coeffs.squeeze().values[0]) + + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_div_nan_filled( self, @@ -938,6 +1097,7 @@ def test_div_nan_filled( for i in self.NAN_POSITIONS: assert result.coeffs.squeeze().values[i] == original_coeffs[i] + @pytest.mark.legacy_only def test_add_commutativity( self, v: Variable, @@ -952,6 +1112,7 @@ def test_add_commutativity( result_a.coeffs.values, result_b.coeffs.values ) + @pytest.mark.legacy_only def test_mul_commutativity( self, v: Variable, @@ -965,6 +1126,7 @@ def test_mul_commutativity( result_a.coeffs.values, result_b.coeffs.values ) + @pytest.mark.legacy_only def test_quadexpr_add_nan( self, v: Variable, @@ -977,20 +1139,40 @@ def test_quadexpr_add_nan( assert not np.isnan(result.const.values).any() class TestExpressionWithNaN: - """Test that NaN in expression's own const/coeffs doesn't propagate.""" + """ + Test NaN in expression's own const/coeffs. - def test_shifted_expr_add_scalar(self, v: Variable) -> None: + Legacy: NaN is filled with neutral elements. + V1: NaN propagates. + """ + + @pytest.mark.legacy_only + def test_shifted_expr_add_scalar_filled(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) result = expr + 5 assert not np.isnan(result.const.values).any() assert result.const.values[0] == 5.0 - def test_shifted_expr_mul_scalar(self, v: Variable) -> None: + @pytest.mark.v1_only + def test_shifted_expr_add_scalar_propagates(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr + 5 + assert np.isnan(result.const.values[0]) + + @pytest.mark.legacy_only + def test_shifted_expr_mul_scalar_filled(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) result = expr * 2 assert not np.isnan(result.coeffs.squeeze().values).any() assert result.coeffs.squeeze().values[0] == 0.0 + @pytest.mark.v1_only + def test_shifted_expr_mul_scalar_propagates(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr * 2 + assert np.isnan(result.coeffs.squeeze().values[0]) + + @pytest.mark.legacy_only def test_shifted_expr_add_array(self, v: Variable) -> None: arr = np.arange(v.sizes["dim_2"], dtype=float) expr = (1 * v).shift(dim_2=1) @@ -998,6 +1180,7 @@ def test_shifted_expr_add_array(self, v: Variable) -> None: assert not np.isnan(result.const.values).any() assert result.const.values[0] == 0.0 + @pytest.mark.legacy_only def test_shifted_expr_mul_array(self, v: Variable) -> None: arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 expr = (1 * v).shift(dim_2=1) @@ -1005,18 +1188,21 @@ def test_shifted_expr_mul_array(self, v: Variable) -> None: assert not np.isnan(result.coeffs.squeeze().values).any() assert result.coeffs.squeeze().values[0] == 0.0 + @pytest.mark.legacy_only def test_shifted_expr_div_scalar(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) result = expr / 2 assert not np.isnan(result.coeffs.squeeze().values).any() assert result.coeffs.squeeze().values[0] == 0.0 + @pytest.mark.legacy_only def test_shifted_expr_sub_scalar(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) result = expr - 3 assert not np.isnan(result.const.values).any() assert result.const.values[0] == -3.0 + @pytest.mark.legacy_only def test_shifted_expr_div_array(self, v: Variable) -> None: arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 expr = (1 * v).shift(dim_2=1) @@ -1024,6 +1210,7 @@ def test_shifted_expr_div_array(self, v: Variable) -> None: assert not np.isnan(result.coeffs.squeeze().values).any() assert result.coeffs.squeeze().values[0] == 0.0 + @pytest.mark.legacy_only def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: nan_coeff = np.ones(v.sizes["dim_2"]) nan_coeff[0] = np.nan @@ -1032,7 +1219,8 @@ def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: assert result.coeffs.squeeze().values[0] == 0.0 class TestMultiDim: - def test_multidim_subset_mul(self, m: Model) -> None: + @pytest.mark.legacy_only + def test_multidim_subset_mul_fills(self, m: Model) -> None: coords_a = pd.RangeIndex(4, name="a") coords_b = pd.RangeIndex(5, name="b") w = m.add_variables(coords=[coords_a, coords_b], name="w") @@ -1051,7 +1239,21 @@ def test_multidim_subset_mul(self, m: Model) -> None: assert result.coeffs.squeeze().sel(a=0, b=0).item() == pytest.approx(0.0) assert result.coeffs.squeeze().sel(a=1, b=2).item() == pytest.approx(0.0) - def test_multidim_subset_add(self, m: Model) -> None: + @pytest.mark.v1_only + def test_multidim_subset_mul_raises(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + with pytest.raises(ValueError, match="exact"): + w * subset_2d + + @pytest.mark.legacy_only + def test_multidim_subset_add_fills(self, m: Model) -> None: coords_a = pd.RangeIndex(4, name="a") coords_b = pd.RangeIndex(5, name="b") w = m.add_variables(coords=[coords_a, coords_b], name="w") @@ -1069,6 +1271,19 @@ def test_multidim_subset_add(self, m: Model) -> None: assert result.const.sel(a=3, b=4).item() == pytest.approx(5.0) assert result.const.sel(a=0, b=0).item() == pytest.approx(0.0) + @pytest.mark.v1_only + def test_multidim_subset_add_raises(self, m: Model) -> None: + coords_a = pd.RangeIndex(4, name="a") + coords_b = pd.RangeIndex(5, name="b") + w = m.add_variables(coords=[coords_a, coords_b], name="w") + subset_2d = xr.DataArray( + [[2.0, 3.0], [4.0, 5.0]], + dims=["a", "b"], + coords={"a": [1, 3], "b": [0, 4]}, + ) + with pytest.raises(ValueError, match="exact"): + w + subset_2d + class TestXarrayCompat: def test_da_eq_da_still_works(self) -> None: da1 = xr.DataArray([1, 2, 3]) @@ -2253,213 +2468,3 @@ def test_quadratic_mul_constant_join_inner_v1( const = xr.DataArray([2, 3, 4], dims=["i"], coords={"i": [1, 2, 3]}) result = quad.mul(const, join="inner") assert list(result.data.indexes["i"]) == [1, 2] - - -@pytest.mark.v1_only -class TestCoordinateAlignmentV1: - """V1: exact join raises on mismatched coords; explicit join= is the escape hatch.""" - - @pytest.fixture(params=["da", "series"]) - def subset(self, request: Any) -> xr.DataArray | pd.Series: - if request.param == "da": - return xr.DataArray([10.0, 30.0], dims=["dim_2"], coords={"dim_2": [1, 3]}) - return pd.Series([10.0, 30.0], index=pd.Index([1, 3], name="dim_2")) - - @pytest.fixture(params=["da", "series"]) - def superset(self, request: Any) -> xr.DataArray | pd.Series: - if request.param == "da": - return xr.DataArray( - np.arange(25, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(25)}, - ) - return pd.Series( - np.arange(25, dtype=float), index=pd.Index(range(25), name="dim_2") - ) - - class TestSubset: - """Under v1, subset operations raise ValueError (exact join).""" - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_subset_raises( - self, v: Variable, subset: xr.DataArray, operand: str - ) -> None: - target = v if operand == "var" else 1 * v - with pytest.raises(ValueError, match="exact"): - target * subset - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_subset_raises( - self, v: Variable, subset: xr.DataArray, operand: str - ) -> None: - target = v if operand == "var" else v + 5 - with pytest.raises(ValueError, match="exact"): - target + subset - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_subset_raises( - self, v: Variable, subset: xr.DataArray, operand: str - ) -> None: - target = v if operand == "var" else v + 5 - with pytest.raises(ValueError, match="exact"): - target - subset - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_subset_raises( - self, v: Variable, subset: xr.DataArray, operand: str - ) -> None: - target = v if operand == "var" else 1 * v - with pytest.raises(ValueError, match="exact"): - target / subset - - def test_subset_add_var_raises(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - subset + v - - def test_subset_sub_var_raises(self, v: Variable, subset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - subset - v - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_subset_join_left( - self, v: Variable, subset: xr.DataArray, operand: str - ) -> None: - """Explicit join='left' fills zeros for missing coords.""" - target = v if operand == "var" else 1 * v - result = target.mul(subset, join="left") - assert result.sizes["dim_2"] == v.sizes["dim_2"] - assert not np.isnan(result.coeffs.values).any() - - class TestSuperset: - """Under v1, superset operations raise ValueError (exact join).""" - - def test_add_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v + superset - - def test_mul_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: - with pytest.raises(ValueError, match="exact"): - v * superset - - class TestDisjoint: - """Under v1, disjoint coord operations raise ValueError.""" - - def test_add_disjoint_raises(self, v: Variable) -> None: - disjoint = xr.DataArray( - [100.0, 200.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - with pytest.raises(ValueError, match="exact"): - v + disjoint - - def test_mul_disjoint_raises(self, v: Variable) -> None: - disjoint = xr.DataArray( - [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} - ) - with pytest.raises(ValueError, match="exact"): - v * disjoint - - class TestCommutativity: - """Under v1, only matching coords allow commutativity.""" - - def test_add_commutativity_matching_coords(self, v: Variable) -> None: - matching = xr.DataArray( - np.arange(20, dtype=float), - dims=["dim_2"], - coords={"dim_2": range(20)}, - ) - assert_linequal(v + matching, matching + v) - - def test_subset_raises_both_sides( - self, v: Variable, subset: xr.DataArray - ) -> None: - with pytest.raises(ValueError, match="exact"): - v * subset - with pytest.raises(ValueError, match="exact"): - subset * v - - class TestQuadratic: - """Under v1, subset operations on quadratic expressions raise.""" - - def test_quadexpr_add_subset_raises( - self, v: Variable, subset: xr.DataArray - ) -> None: - qexpr = v * v - with pytest.raises(ValueError, match="exact"): - qexpr + subset - - def test_quadexpr_mul_subset_raises( - self, v: Variable, subset: xr.DataArray - ) -> None: - qexpr = v * v - with pytest.raises(ValueError, match="exact"): - qexpr * subset - - class TestMissingValues: - """Under v1, NaN values propagate (no implicit fillna).""" - - NAN_POSITIONS = [0, 5, 19] - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_nan_propagates(self, v: Variable, operand: str) -> None: - vals = np.arange(20, dtype=float) - vals[0] = np.nan - vals[5] = np.nan - vals[19] = np.nan - nan_constant = xr.DataArray( - vals, dims=["dim_2"], coords={"dim_2": range(20)} - ) - target = v if operand == "var" else v + 5 - result = target + nan_constant - for i in self.NAN_POSITIONS: - assert np.isnan(result.const.values[i]) - - @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_nan_propagates(self, v: Variable, operand: str) -> None: - vals = np.arange(20, dtype=float) - vals[0] = np.nan - nan_constant = xr.DataArray( - vals, dims=["dim_2"], coords={"dim_2": range(20)} - ) - target = v if operand == "var" else 1 * v - result = target * nan_constant - assert np.isnan(result.coeffs.squeeze().values[0]) - - class TestExpressionWithNaN: - """Under v1, NaN in expression's own const/coeffs propagates.""" - - def test_shifted_expr_add_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr + 5 - assert np.isnan(result.const.values[0]) - - def test_shifted_expr_mul_scalar(self, v: Variable) -> None: - expr = (1 * v).shift(dim_2=1) - result = expr * 2 - assert np.isnan(result.coeffs.squeeze().values[0]) - - class TestMultiDim: - """Under v1, multi-dim subset operations raise.""" - - def test_multidim_subset_mul_raises(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - with pytest.raises(ValueError, match="exact"): - w * subset_2d - - def test_multidim_subset_add_raises(self, m: Model) -> None: - coords_a = pd.RangeIndex(4, name="a") - coords_b = pd.RangeIndex(5, name="b") - w = m.add_variables(coords=[coords_a, coords_b], name="w") - subset_2d = xr.DataArray( - [[2.0, 3.0], [4.0, 5.0]], - dims=["a", "b"], - coords={"a": [1, 3], "b": [0, 4]}, - ) - with pytest.raises(ValueError, match="exact"): - w + subset_2d From 0c33599d822c0351128652cdb245e28837fd73a8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Mar 2026 13:47:44 +0100 Subject: [PATCH 56/66] Add missing v1 counterparts for all legacy_only tests, fix test issues - Add 22 missing v1_only counterparts across test_linear_expression.py: - TestSuperset: v1 raises for add/sub/mul commutativity and div - TestDisjoint: v1 raises for div - TestCommutativity: parametrized v1 raises for all ops - TestQuadratic: v1 raises for sub, reverse mul, reverse add - TestMissingValues: v1 NaN propagates for sub, div, commutativity, quadexpr - TestExpressionWithNaN: v1 NaN propagates for add/mul/div array, sub/div scalar - Add v1 negative assertions in test_linear_expression_sum_v1 and test_linear_expression_sum_with_const_v1 (assert mismatched coords raise before showing assign_coords workaround) - Add TestNoDeprecationWarnings v1 counterpart in test_convention.py - Fix test_align_v1: use pytest.raises(ValueError) instead of bare Exception - Remove redundant test_superset_comparison_raises (covered by parametrized test_superset_comparison_var_raises) - Remove v1_only marker from TestScalarFastPath (convention-independent) - Un-mark test_variable_to_linexpr_nan_coefficient as legacy_only (to_linexpr fills NaN under both conventions) Co-Authored-By: Claude Opus 4.6 --- test/test_common.py | 2 +- test/test_constraints.py | 8 -- test/test_convention.py | 30 ++++- test/test_linear_expression.py | 197 ++++++++++++++++++++++++++++++++- 4 files changed, 224 insertions(+), 13 deletions(-) diff --git a/test/test_common.py b/test/test_common.py index 51116a9a..7fa70489 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -711,7 +711,7 @@ def test_align_v1(x: Variable, u: Variable) -> None: # noqa: F811 ) # exact join raises on mismatched coords - with pytest.raises(Exception): + with pytest.raises(ValueError, match="exact"): align(x, alpha) # explicit inner join diff --git a/test/test_constraints.py b/test/test_constraints.py index eebf2940..67cd592b 100644 --- a/test/test_constraints.py +++ b/test/test_constraints.py @@ -533,14 +533,6 @@ def test_superset_comparison_no_nan( assert not np.isnan(con.lhs.coeffs.values).any() assert not np.isnan(con.rhs.values).any() - @pytest.mark.v1_only - def test_superset_comparison_raises(self, v: Variable) -> None: - superset = xr.DataArray( - np.arange(25, dtype=float), dims=["dim_2"], coords={"dim_2": range(25)} - ) - with pytest.raises(ValueError, match="exact"): - superset <= v - @pytest.mark.v1_only @pytest.mark.parametrize("sign", [LESS_EQUAL, GREATER_EQUAL]) def test_superset_comparison_var_raises(self, v: Variable, sign: str) -> None: diff --git a/test/test_convention.py b/test/test_convention.py index 95b3a1a1..7a566e38 100644 --- a/test/test_convention.py +++ b/test/test_convention.py @@ -124,12 +124,40 @@ def test_align_emits_deprecation_warning(self, a: Variable) -> None: linopy.align(a, alpha) +@pytest.mark.v1_only +class TestNoDeprecationWarnings: + """V1: matching-coord operations should not emit deprecation warnings.""" + + def test_add_constant_no_deprecation_warning(self, a: Variable) -> None: + const = xr.DataArray([1, 2, 3, 4, 5], dims=["i"], coords={"i": range(5)}) + import warnings + + with warnings.catch_warnings(): + warnings.simplefilter("error", LinopyDeprecationWarning) + _ = (1 * a) + const + + def test_mul_constant_no_deprecation_warning(self, a: Variable) -> None: + const = xr.DataArray([1, 2, 3, 4, 5], dims=["i"], coords={"i": range(5)}) + import warnings + + with warnings.catch_warnings(): + warnings.simplefilter("error", LinopyDeprecationWarning) + _ = (1 * a) * const + + def test_align_no_deprecation_warning(self, a: Variable) -> None: + alpha = xr.DataArray([1, 2, 3, 4, 5], dims=["i"], coords={"i": range(5)}) + import warnings + + with warnings.catch_warnings(): + warnings.simplefilter("error", LinopyDeprecationWarning) + linopy.align(a, alpha) + + # --------------------------------------------------------------------------- # 6. Scalar fast path # --------------------------------------------------------------------------- -@pytest.mark.v1_only class TestScalarFastPath: """Scalar operations should produce same results as array operations.""" diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 3cb8337b..c833e17a 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -428,7 +428,11 @@ def test_linear_expression_sum_v1( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # v1: mismatched coords require explicit assign_coords + # v1: mismatched coords raise ValueError + with pytest.raises(ValueError, match="Coordinate mismatch"): + v.loc[:9] + v.loc[10:] + + # explicit assign_coords as workaround a = v.loc[:9] b = v.loc[10:].assign_coords(dim_2=a.coords["dim_2"]) expr = a + b @@ -480,7 +484,11 @@ def test_linear_expression_sum_with_const_v1( assert_linequal(expr.sum(["dim_0", TERM_DIM]), expr.sum("dim_0")) - # v1: mismatched coords require explicit assign_coords + # v1: mismatched coords raise ValueError + with pytest.raises(ValueError, match="Coordinate mismatch"): + v.loc[:9] + v.loc[10:] + + # explicit assign_coords as workaround a = v.loc[:9] b = v.loc[10:].assign_coords(dim_2=a.coords["dim_2"]) expr = a + b @@ -785,14 +793,41 @@ def test_add_superset_raises(self, v: Variable, superset: xr.DataArray) -> None: def test_add_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: assert_linequal(superset + v, v + superset) + @pytest.mark.v1_only + def test_add_var_commutative_raises( + self, v: Variable, superset: xr.DataArray + ) -> None: + with pytest.raises(ValueError, match="exact"): + superset + v + with pytest.raises(ValueError, match="exact"): + v + superset + @pytest.mark.legacy_only def test_sub_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: assert_linequal(superset - v, -v + superset) + @pytest.mark.v1_only + def test_sub_var_commutative_raises( + self, v: Variable, superset: xr.DataArray + ) -> None: + with pytest.raises(ValueError, match="exact"): + superset - v + with pytest.raises(ValueError, match="exact"): + v - superset + @pytest.mark.legacy_only def test_mul_var_commutative(self, v: Variable, superset: xr.DataArray) -> None: assert_linequal(superset * v, v * superset) + @pytest.mark.v1_only + def test_mul_var_commutative_raises( + self, v: Variable, superset: xr.DataArray + ) -> None: + with pytest.raises(ValueError, match="exact"): + superset * v + with pytest.raises(ValueError, match="exact"): + v * superset + @pytest.mark.legacy_only def test_mul_superset_pins_to_lhs_coords( self, v: Variable, superset: xr.DataArray @@ -817,6 +852,16 @@ def test_div_superset_pins_to_lhs_coords(self, v: Variable) -> None: assert result.sizes["dim_2"] == v.sizes["dim_2"] assert not np.isnan(result.coeffs.values).any() + @pytest.mark.v1_only + def test_div_superset_raises(self, v: Variable) -> None: + superset_nonzero = xr.DataArray( + np.arange(1, 26, dtype=float), + dims=["dim_2"], + coords={"dim_2": range(25)}, + ) + with pytest.raises(ValueError, match="exact"): + v / superset_nonzero + class TestDisjoint: @pytest.mark.legacy_only def test_add_disjoint_fills_zeros(self, v: Variable) -> None: @@ -864,6 +909,14 @@ def test_div_disjoint_preserves_coeffs(self, v: Variable) -> None: assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, np.ones(20)) + @pytest.mark.v1_only + def test_div_disjoint_raises(self, v: Variable) -> None: + disjoint = xr.DataArray( + [10.0, 20.0], dims=["dim_2"], coords={"dim_2": [50, 60]} + ) + with pytest.raises(ValueError, match="exact"): + v / disjoint + class TestCommutativity: @pytest.mark.legacy_only @pytest.mark.parametrize( @@ -885,6 +938,23 @@ def test_commutativity( ) -> None: assert_linequal(make_lhs(v, subset), make_rhs(v, subset)) + @pytest.mark.v1_only + @pytest.mark.parametrize( + "op", + [ + lambda v, s: s * v, + lambda v, s: s + v, + lambda v, s: s + (v + 5), + lambda v, s: s - v, + ], + ids=["subset*var", "subset+var", "subset+expr", "subset-var"], + ) + def test_commutativity_raises( + self, v: Variable, subset: xr.DataArray, op: Any + ) -> None: + with pytest.raises(ValueError, match="exact"): + op(v, subset) + @pytest.mark.legacy_only def test_sub_var_anticommutative( self, v: Variable, subset: xr.DataArray @@ -952,6 +1022,14 @@ def test_quadexpr_sub_subset( assert not np.isnan(result.const.values).any() np.testing.assert_array_equal(result.const.values, -expected_fill) + @pytest.mark.v1_only + def test_quadexpr_sub_subset_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + qexpr - subset + @pytest.mark.legacy_only def test_quadexpr_mul_subset_fills( self, @@ -988,11 +1066,27 @@ def test_subset_mul_quadexpr( assert not np.isnan(result.coeffs.values).any() np.testing.assert_array_equal(result.coeffs.squeeze().values, expected_fill) + @pytest.mark.v1_only + def test_subset_mul_quadexpr_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + subset * qexpr + @pytest.mark.legacy_only def test_subset_add_quadexpr(self, v: Variable, subset: xr.DataArray) -> None: qexpr = v * v assert_quadequal(subset + qexpr, qexpr + subset) + @pytest.mark.v1_only + def test_subset_add_quadexpr_raises( + self, v: Variable, subset: xr.DataArray + ) -> None: + qexpr = v * v + with pytest.raises(ValueError, match="exact"): + subset + qexpr + class TestMissingValues: """ Same shape as variable but with NaN entries in the constant. @@ -1052,6 +1146,20 @@ def test_sub_nan_filled( for i in self.NAN_POSITIONS: assert result.const.values[i] == base_const + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_sub_nan_propagates(self, v: Variable, operand: str) -> None: + vals = np.arange(20, dtype=float) + for i in self.NAN_POSITIONS: + vals[i] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + target = v if operand == "var" else v + 5 + result = target - nan_constant + for i in self.NAN_POSITIONS: + assert np.isnan(result.const.values[i]) + @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) def test_mul_nan_filled( @@ -1097,6 +1205,20 @@ def test_div_nan_filled( for i in self.NAN_POSITIONS: assert result.coeffs.squeeze().values[i] == original_coeffs[i] + @pytest.mark.v1_only + @pytest.mark.parametrize("operand", ["var", "expr"]) + def test_div_nan_propagates(self, v: Variable, operand: str) -> None: + vals = np.arange(20, dtype=float) + 1 + vals[0] = np.nan + vals[5] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + target = v if operand == "var" else 1 * v + result = target / nan_constant + assert np.isnan(result.coeffs.squeeze().values[0]) + assert np.isnan(result.coeffs.squeeze().values[5]) + @pytest.mark.legacy_only def test_add_commutativity( self, @@ -1112,6 +1234,18 @@ def test_add_commutativity( result_a.coeffs.values, result_b.coeffs.values ) + @pytest.mark.v1_only + def test_add_commutativity_nan_propagates(self, v: Variable) -> None: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + result_a = v + nan_constant + result_b = nan_constant + v + assert np.isnan(result_a.const.values[0]) + assert np.isnan(result_b.const.values[0]) + @pytest.mark.legacy_only def test_mul_commutativity( self, @@ -1126,6 +1260,18 @@ def test_mul_commutativity( result_a.coeffs.values, result_b.coeffs.values ) + @pytest.mark.v1_only + def test_mul_commutativity_nan_propagates(self, v: Variable) -> None: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + result_a = v * nan_constant + result_b = nan_constant * v + assert np.isnan(result_a.coeffs.squeeze().values[0]) + assert np.isnan(result_b.coeffs.squeeze().values[0]) + @pytest.mark.legacy_only def test_quadexpr_add_nan( self, @@ -1138,6 +1284,18 @@ def test_quadexpr_add_nan( assert result.sizes["dim_2"] == 20 assert not np.isnan(result.const.values).any() + @pytest.mark.v1_only + def test_quadexpr_add_nan_propagates(self, v: Variable) -> None: + vals = np.arange(20, dtype=float) + vals[0] = np.nan + nan_constant = xr.DataArray( + vals, dims=["dim_2"], coords={"dim_2": range(20)} + ) + qexpr = v * v + result = qexpr + nan_constant + assert isinstance(result, QuadraticExpression) + assert np.isnan(result.const.values[0]) + class TestExpressionWithNaN: """ Test NaN in expression's own const/coeffs. @@ -1180,6 +1338,13 @@ def test_shifted_expr_add_array(self, v: Variable) -> None: assert not np.isnan(result.const.values).any() assert result.const.values[0] == 0.0 + @pytest.mark.v1_only + def test_shifted_expr_add_array_propagates(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + expr = (1 * v).shift(dim_2=1) + result = expr + arr + assert np.isnan(result.const.values[0]) + @pytest.mark.legacy_only def test_shifted_expr_mul_array(self, v: Variable) -> None: arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 @@ -1188,6 +1353,13 @@ def test_shifted_expr_mul_array(self, v: Variable) -> None: assert not np.isnan(result.coeffs.squeeze().values).any() assert result.coeffs.squeeze().values[0] == 0.0 + @pytest.mark.v1_only + def test_shifted_expr_mul_array_propagates(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr * arr + assert np.isnan(result.coeffs.squeeze().values[0]) + @pytest.mark.legacy_only def test_shifted_expr_div_scalar(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) @@ -1195,6 +1367,12 @@ def test_shifted_expr_div_scalar(self, v: Variable) -> None: assert not np.isnan(result.coeffs.squeeze().values).any() assert result.coeffs.squeeze().values[0] == 0.0 + @pytest.mark.v1_only + def test_shifted_expr_div_scalar_propagates(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr / 2 + assert np.isnan(result.coeffs.squeeze().values[0]) + @pytest.mark.legacy_only def test_shifted_expr_sub_scalar(self, v: Variable) -> None: expr = (1 * v).shift(dim_2=1) @@ -1202,6 +1380,12 @@ def test_shifted_expr_sub_scalar(self, v: Variable) -> None: assert not np.isnan(result.const.values).any() assert result.const.values[0] == -3.0 + @pytest.mark.v1_only + def test_shifted_expr_sub_scalar_propagates(self, v: Variable) -> None: + expr = (1 * v).shift(dim_2=1) + result = expr - 3 + assert np.isnan(result.const.values[0]) + @pytest.mark.legacy_only def test_shifted_expr_div_array(self, v: Variable) -> None: arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 @@ -1210,8 +1394,15 @@ def test_shifted_expr_div_array(self, v: Variable) -> None: assert not np.isnan(result.coeffs.squeeze().values).any() assert result.coeffs.squeeze().values[0] == 0.0 - @pytest.mark.legacy_only + @pytest.mark.v1_only + def test_shifted_expr_div_array_propagates(self, v: Variable) -> None: + arr = np.arange(v.sizes["dim_2"], dtype=float) + 1 + expr = (1 * v).shift(dim_2=1) + result = expr / arr + assert np.isnan(result.coeffs.squeeze().values[0]) + def test_variable_to_linexpr_nan_coefficient(self, v: Variable) -> None: + """to_linexpr fills NaN with 0 under both conventions (internal conversion).""" nan_coeff = np.ones(v.sizes["dim_2"]) nan_coeff[0] = np.nan result = v.to_linexpr(nan_coeff) From 3abe10e3e7bbcf199cf35c3707e96dd2853dfcd2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 11 Mar 2026 14:02:02 +0100 Subject: [PATCH 57/66] Make piecewise and SOS tests run under both v1 and legacy conventions Use drop=True on scalar isel calls to prevent residual scalar coordinates from causing exact-join mismatches under the v1 arithmetic convention. Also align binary_hi coordinates with delta_lo in incremental PWL. Co-Authored-By: Claude Opus 4.6 --- linopy/piecewise.py | 7 +++++-- linopy/sos_reformulation.py | 8 +++++--- test/test_piecewise_constraints.py | 3 --- test/test_sos_reformulation.py | 2 -- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/linopy/piecewise.py b/linopy/piecewise.py index 63bc3f4a..7ba2fc23 100644 --- a/linopy/piecewise.py +++ b/linopy/piecewise.py @@ -858,10 +858,13 @@ def _add_pwl_incremental_core( fill_con = model.add_constraints(delta_hi <= delta_lo, name=fill_name) binary_hi = binary_var.isel({LP_SEG_DIM: slice(1, None)}, drop=True) + binary_hi = binary_hi.assign_coords( + {LP_SEG_DIM: delta_lo.coords[LP_SEG_DIM].values} + ) model.add_constraints(binary_hi <= delta_lo, name=inc_order_name) - x0 = x_points.isel({BREAKPOINT_DIM: 0}) - y0 = y_points.isel({BREAKPOINT_DIM: 0}) + x0 = x_points.isel({BREAKPOINT_DIM: 0}, drop=True) + y0 = y_points.isel({BREAKPOINT_DIM: 0}, drop=True) # When active is provided, multiply base terms by active x_base: DataArray | LinearExpression = x0 diff --git a/linopy/sos_reformulation.py b/linopy/sos_reformulation.py index 8ccb7613..8e3e0330 100644 --- a/linopy/sos_reformulation.py +++ b/linopy/sos_reformulation.py @@ -182,7 +182,8 @@ def reformulate_sos2( added_constraints = [first_name] model.add_constraints( - x_expr.isel({sos_dim: 0}) <= M.isel({sos_dim: 0}) * z_expr.isel({sos_dim: 0}), + x_expr.isel({sos_dim: 0}, drop=True) + <= M.isel({sos_dim: 0}, drop=True) * z_expr.isel({sos_dim: 0}, drop=True), name=first_name, ) @@ -208,8 +209,9 @@ def reformulate_sos2( added_constraints.append(mid_name) model.add_constraints( - x_expr.isel({sos_dim: n - 1}) - <= M.isel({sos_dim: n - 1}) * z_expr.isel({sos_dim: n - 2}), + x_expr.isel({sos_dim: n - 1}, drop=True) + <= M.isel({sos_dim: n - 1}, drop=True) + * z_expr.isel({sos_dim: n - 2}, drop=True), name=last_name, ) added_constraints.extend([last_name, card_name]) diff --git a/test/test_piecewise_constraints.py b/test/test_piecewise_constraints.py index 7da36991..ab8e1f09 100644 --- a/test/test_piecewise_constraints.py +++ b/test/test_piecewise_constraints.py @@ -51,9 +51,6 @@ ] -pytestmark = pytest.mark.legacy_only - - # =========================================================================== # slopes_to_points # =========================================================================== diff --git a/test/test_sos_reformulation.py b/test/test_sos_reformulation.py index 20e32d9a..24ba62b3 100644 --- a/test/test_sos_reformulation.py +++ b/test/test_sos_reformulation.py @@ -18,8 +18,6 @@ undo_sos_reformulation, ) -pytestmark = pytest.mark.legacy_only - class TestValidateBounds: """Tests for bound validation in compute_big_m_values.""" From 7ae9b08f9ceb784bf5788c5b2beb8b2132e301ef Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 12 Mar 2026 08:22:18 +0100 Subject: [PATCH 58/66] Implement v1 NaN convention: absent terms, not absent coordinates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NaN in linopy v1 means "absent term" — it marks individual terms as missing without masking entire coordinates. User-supplied NaN at API boundaries (constants, factors, constraint RHS) raises ValueError; masking must be explicit via .sel() or mask=. Implementation: - FILL_VALUE["coeffs"] changed from NaN to 0 (structural "no term") - NaN validation added in _add_constant, _apply_constant_op, to_constraint - Piecewise internals use .fillna(0) on breakpoint data - Tests updated to expect ValueError for NaN operands under v1 Key design decisions: - NaN enters only via mask= or structural ops (shift, reindex, where) - Combining expressions: absent terms do not poison valid terms (xr.sum skipna=True preserves valid contributions) - A coordinate is fully absent only when ALL terms have vars=-1 AND const is NaN — this is what isnull() checks - lhs >= rhs ≡ lhs - rhs >= 0, so RHS follows the same rules as constants Documentation: - New missing-data.ipynb: convention principles, fillna patterns, masking with .sel() and mask=, migration guide from legacy - New nan-edge-cases.ipynb: investigation of shift, roll, where, reindex, isnull, arithmetic on shifted expressions, sanitize_missings - arithmetic-convention.ipynb: updated to reference missing-data notebook Co-Authored-By: Claude Opus 4.6 --- doc/missing-data.nblink | 3 + examples/arithmetic-convention.ipynb | 599 ++++++++++++--------- examples/missing-data.ipynb | 431 +++++++++++++++ examples/nan-edge-cases.ipynb | 773 +++++++++++++++++++++++++++ linopy/expressions.py | 35 +- linopy/piecewise.py | 4 +- test/test_convention.py | 8 +- test/test_linear_expression.py | 56 +- test/test_optimization.py | 7 +- 9 files changed, 1621 insertions(+), 295 deletions(-) create mode 100644 doc/missing-data.nblink create mode 100644 examples/missing-data.ipynb create mode 100644 examples/nan-edge-cases.ipynb diff --git a/doc/missing-data.nblink b/doc/missing-data.nblink new file mode 100644 index 00000000..64befb24 --- /dev/null +++ b/doc/missing-data.nblink @@ -0,0 +1,3 @@ +{ + "path": "../examples/missing-data.ipynb" +} diff --git a/examples/arithmetic-convention.ipynb b/examples/arithmetic-convention.ipynb index 506e09b0..42a2bc3a 100644 --- a/examples/arithmetic-convention.ipynb +++ b/examples/arithmetic-convention.ipynb @@ -4,31 +4,26 @@ "cell_type": "markdown", "id": "intro", "metadata": {}, - "source": [ - "# Arithmetic Convention\n", - "\n", - "linopy is transitioning to a stricter arithmetic convention for coordinate alignment. This notebook covers:\n", - "\n", - "1. [How to opt in](#how-to-opt-in) to the new behavior\n", - "2. [v1 convention](#v1-convention-the-future-default) — strict coordinate matching (the future default)\n", - "3. [Legacy convention](#legacy-convention-current-default) — the current default behavior\n", - "4. [The `join` parameter](#the-join-parameter) — explicit control over alignment\n", - "5. [Migration guide](#migration-guide) — updating your code" - ] + "source": "# Arithmetic Convention\n\nlinopy is transitioning to a stricter arithmetic convention for coordinate alignment. This notebook covers:\n\n1. [How to opt in](#how-to-opt-in) to the new behavior\n2. [v1 convention](#v1-convention-the-future-default) — strict coordinate matching (the future default)\n3. [Legacy convention](#legacy-convention-current-default) — the current default behavior\n4. [The `join` parameter](#the-join-parameter) — explicit control over alignment\n5. [Migration guide](#migration-guide) — updating your code\n\nFor NaN handling and masking, see [Missing Data](missing-data.ipynb)." }, { "cell_type": "code", - "execution_count": null, "id": "imports", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.056995Z", + "start_time": "2026-03-11T14:44:59.298634Z" + } + }, "source": [ "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", "\n", "import linopy" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -42,13 +37,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "opt-in", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.064201Z", + "start_time": "2026-03-11T14:45:00.062580Z" + } + }, "source": [ "linopy.options[\"arithmetic_convention\"] = \"v1\"" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -90,10 +90,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "v1-setup", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.110784Z", + "start_time": "2026-03-11T14:45:00.067922Z" + } + }, "source": [ "m = linopy.Model()\n", "\n", @@ -105,7 +108,9 @@ "y = m.add_variables(lower=0, coords=[time], name=\"y\")\n", "gen = m.add_variables(lower=0, coords=[time, techs], name=\"gen\")\n", "risk = m.add_variables(lower=0, coords=[techs, scenarios], name=\"risk\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -117,72 +122,102 @@ }, { "cell_type": "code", - "execution_count": null, "id": "v1-same-coords", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.125467Z", + "start_time": "2026-03-11T14:45:00.114440Z" + } + }, "source": [ "# Same coords — just works\n", "x + y" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "v1-matching-constant", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.137242Z", + "start_time": "2026-03-11T14:45:00.129823Z" + } + }, "source": [ "# Constant with matching coords\n", "factor = xr.DataArray([2, 3, 4, 5, 6], dims=[\"time\"], coords={\"time\": time})\n", "x * factor" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "v1-broadcast-constant", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.155873Z", + "start_time": "2026-03-11T14:45:00.145769Z" + } + }, "source": [ "# Constant with fewer dims — broadcasts freely\n", "cost = xr.DataArray([1.0, 0.5, 3.0], dims=[\"tech\"], coords={\"tech\": techs})\n", "gen * cost # cost broadcasts over time" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "v1-broadcast-expr", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.178325Z", + "start_time": "2026-03-11T14:45:00.166370Z" + } + }, "source": [ "# Expression + Expression with non-shared dims — broadcasts freely\n", "gen + risk # (time, tech) + (tech, scenario) → (time, tech, scenario)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "v1-scalar", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.190629Z", + "start_time": "2026-03-11T14:45:00.184831Z" + } + }, "source": [ "# Scalar — always fine\n", "x + 5" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "v1-constraint-broadcast", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.215193Z", + "start_time": "2026-03-11T14:45:00.202065Z" + } + }, "source": [ "# Constraints — RHS with fewer dims broadcasts naturally\n", "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", "m.add_constraints(gen <= capacity, name=\"cap\") # capacity broadcasts over time" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -194,10 +229,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "v1-mismatch-expr", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.228580Z", + "start_time": "2026-03-11T14:45:00.221751Z" + } + }, "source": [ "y_short = m.add_variables(\n", " lower=0, coords=[pd.RangeIndex(3, name=\"time\")], name=\"y_short\"\n", @@ -207,14 +245,19 @@ " x + y_short # time coords don't match: [0..4] vs [0..2]\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "v1-mismatch-constant", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.241638Z", + "start_time": "2026-03-11T14:45:00.237972Z" + } + }, "source": [ "partial = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", "\n", @@ -222,48 +265,60 @@ " x * partial # time coords [0..4] vs [0,1,2]\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "v1-mismatch-constraint", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.257815Z", + "start_time": "2026-03-11T14:45:00.253230Z" + } + }, "source": [ "try:\n", " x <= partial # constraint RHS doesn't cover all coords\n", "except ValueError as e:\n", " print(\"ValueError:\", e)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", "id": "v1-nan-header", "metadata": {}, - "source": [ - "### NaN propagation\n", - "\n", - "Under v1, NaN values in constants **propagate** through arithmetic — they are not silently replaced with zeros. This makes missing data visible:" - ] + "source": "### NaN in arithmetic\n\nUnder v1, NaN values in arithmetic operands **raise a `ValueError`** — they are not silently replaced. See [Missing Data](missing-data.ipynb) for details on handling NaN and masking." }, { "cell_type": "code", - "execution_count": null, "id": "v1-nan", - "metadata": {}, - "outputs": [], - "source": [ - "vals = xr.DataArray([1.0, np.nan, 3.0, 4.0, 5.0], dims=[\"time\"], coords={\"time\": time})\n", - "result = x + vals\n", - "print(\"const:\", result.const.values) # NaN at position 1" - ] + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.273485Z", + "start_time": "2026-03-11T14:45:00.268607Z" + } + }, + "source": "vals = xr.DataArray([1.0, np.nan, 3.0, 4.0, 5.0], dims=[\"time\"], coords={\"time\": time})\n\ntry:\n x + vals\nexcept ValueError as e:\n print(\"ValueError:\", e)", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ValueError: Constant contains NaN values. Use .fillna() to handle missing values before arithmetic operations.\n" + ] + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "v1-escape-header", "metadata": {}, - "source": "### Escape hatches\n\nWhen coordinates don't match or your data contains NaN, you have several options:" + "source": "### Escape hatches for coordinate mismatches\n\nWhen coordinates don't match, you have several options:" }, { "cell_type": "markdown", @@ -277,13 +332,18 @@ }, { "cell_type": "code", - "execution_count": null, "id": "v1-sel-example", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.293520Z", + "start_time": "2026-03-11T14:45:00.285174Z" + } + }, "source": [ "x.sel(time=[0, 1, 2]) + y_short" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -297,23 +357,33 @@ }, { "cell_type": "code", - "execution_count": null, "id": "v1-join-example", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.315957Z", + "start_time": "2026-03-11T14:45:00.307311Z" + } + }, "source": [ "x.add(y_short, join=\"inner\") # intersection: time [0, 1, 2]" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "v1-join-outer", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.325706Z", + "start_time": "2026-03-11T14:45:00.319864Z" + } + }, "source": [ "x.mul(partial, join=\"left\") # keep x's coords, fill missing with 0" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -327,16 +397,21 @@ }, { "cell_type": "code", - "execution_count": null, "id": "v1-assign-coords-example", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.353860Z", + "start_time": "2026-03-11T14:45:00.344281Z" + } + }, "source": [ "z = m.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", "\n", "# z has time=[5..9], x has time=[0..4] — same shape, different labels\n", "x + z.assign_coords(time=x.coords[\"time\"])" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -348,37 +423,19 @@ }, { "cell_type": "code", - "execution_count": null, "id": "v1-align-example", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.371208Z", + "start_time": "2026-03-11T14:45:00.361638Z" + } + }, "source": [ "x_aligned, y_short_aligned = linopy.align(x, y_short, join=\"outer\")\n", "x_aligned + y_short_aligned" - ] - }, - { - "cell_type": "markdown", - "id": "a2wmuj3o3rl", - "metadata": {}, - "source": "**Option 5: `.fillna()` — handle NaN in constants**\n\nUnder v1, NaN propagates through arithmetic. If your data has NaN values that represent \"no effect\" (e.g., missing cost data that should be zero), fill them explicitly before operating:\n\n```python\n# Addition/subtraction: fill with 0 (additive identity)\nx + data_with_nans.fillna(0)\n\n# Multiplication: fill with 1 to preserve coefficients, or 0 to zero them out\nx * scaling_factors.fillna(1) # NaN means \"no scaling\"\nx * mask.fillna(0) # NaN means \"exclude\"\n\n# Division: fill with 1 (multiplicative identity)\nx / divisors.fillna(1)\n```" - }, - { - "cell_type": "code", - "execution_count": null, - "id": "os5ovkgifp", - "metadata": {}, + ], "outputs": [], - "source": [ - "# NaN propagates by default\n", - "vals_with_nan = xr.DataArray(\n", - " [1.0, np.nan, 3.0, 4.0, 5.0], dims=[\"time\"], coords={\"time\": time}\n", - ")\n", - "print(\"With NaN: \", (x + vals_with_nan).const.values)\n", - "\n", - "# Fill explicitly to get legacy-like behavior\n", - "print(\"fillna(0): \", (x + vals_with_nan.fillna(0)).const.values)" - ] + "execution_count": null }, { "cell_type": "markdown", @@ -424,23 +481,31 @@ }, { "cell_type": "code", - "execution_count": null, "id": "legacy-switch", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.376208Z", + "start_time": "2026-03-11T14:45:00.374668Z" + } + }, "source": [ "import warnings\n", "\n", "linopy.options[\"arithmetic_convention\"] = \"legacy\"\n", "warnings.filterwarnings(\"ignore\", category=linopy.LinopyDeprecationWarning)" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "legacy-setup", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.390854Z", + "start_time": "2026-03-11T14:45:00.386569Z" + } + }, "source": [ "m2 = linopy.Model()\n", "time = pd.RangeIndex(5, name=\"time\")\n", @@ -448,7 +513,9 @@ "y2_short = m2.add_variables(\n", " lower=0, coords=[pd.RangeIndex(3, name=\"time\")], name=\"y_short\"\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -464,26 +531,36 @@ }, { "cell_type": "code", - "execution_count": null, "id": "legacy-subset", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.400749Z", + "start_time": "2026-03-11T14:45:00.393413Z" + } + }, "source": [ "# Different size — left join, fill missing with 0\n", "x2 + y2_short # y_short drops out at time 3, 4" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "legacy-same-size", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.413292Z", + "start_time": "2026-03-11T14:45:00.404468Z" + } + }, "source": [ "# Same size — positional alignment (labels ignored!)\n", "z2 = m2.add_variables(lower=0, coords=[pd.RangeIndex(5, 10, name=\"time\")], name=\"z\")\n", "x2 + z2 # x has time=[0..4], z has time=[5..9], but same size → positional match" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -500,15 +577,20 @@ }, { "cell_type": "code", - "execution_count": null, "id": "legacy-nan-fill", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.421451Z", + "start_time": "2026-03-11T14:45:00.416080Z" + } + }, "source": [ "vals = xr.DataArray([1.0, np.nan, 3.0, 4.0, 5.0], dims=[\"time\"], coords={\"time\": time})\n", "result = x2 + vals\n", "print(\"const:\", result.const.values) # NaN replaced with 0" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -522,27 +604,37 @@ }, { "cell_type": "code", - "execution_count": null, "id": "legacy-constraint", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.435315Z", + "start_time": "2026-03-11T14:45:00.429597Z" + } + }, "source": [ "rhs = xr.DataArray([10, 20, 30], dims=[\"time\"], coords={\"time\": [0, 1, 2]})\n", "con = x2 <= rhs # constraint only at time 0, 1, 2; NaN at time 3, 4\n", "con" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "legacy-restore-v1", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.447130Z", + "start_time": "2026-03-11T14:45:00.445694Z" + } + }, "source": [ "# Switch back to v1 for the rest of the notebook\n", "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", "warnings.resetwarnings()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -567,10 +659,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "join-setup", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.457694Z", + "start_time": "2026-03-11T14:45:00.453096Z" + } + }, "source": [ "m3 = linopy.Model()\n", "\n", @@ -579,62 +674,89 @@ "\n", "a = m3.add_variables(coords=[i_a], name=\"a\")\n", "b = m3.add_variables(coords=[i_b], name=\"b\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "join-inner", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.473280Z", + "start_time": "2026-03-11T14:45:00.464589Z" + } + }, "source": [ "# Inner join — intersection (i=1, 2)\n", "a.add(b, join=\"inner\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "join-outer", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.488676Z", + "start_time": "2026-03-11T14:45:00.478816Z" + } + }, "source": [ "# Outer join — union (i=0, 1, 2, 3)\n", "a.add(b, join=\"outer\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "join-left", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.500599Z", + "start_time": "2026-03-11T14:45:00.491930Z" + } + }, "source": [ "# Left join — keep a's coords (i=0, 1, 2)\n", "a.add(b, join=\"left\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "join-right", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.511586Z", + "start_time": "2026-03-11T14:45:00.503595Z" + } + }, "source": [ "# Right join — keep b's coords (i=1, 2, 3)\n", "a.add(b, join=\"right\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "join-override", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.522774Z", + "start_time": "2026-03-11T14:45:00.515038Z" + } + }, "source": [ "# Override — positional (0↔1, 1↔2, 2↔3), uses a's labels\n", "a.add(b, join=\"override\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -646,83 +768,44 @@ }, { "cell_type": "code", - "execution_count": null, "id": "join-mul-example", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.535578Z", + "start_time": "2026-03-11T14:45:00.528426Z" + } + }, "source": [ "const = xr.DataArray([2, 3, 4], dims=[\"i\"], coords={\"i\": [1, 2, 3]})\n", "\n", "# Multiply, keeping only shared coords\n", "a.mul(const, join=\"inner\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "join-constraint", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.543751Z", + "start_time": "2026-03-11T14:45:00.538431Z" + } + }, "source": [ "# Constraint with left join — only a's coords, NaN at missing RHS positions\n", "rhs = xr.DataArray([10, 20], dims=[\"i\"], coords={\"i\": [0, 1]})\n", "a.le(rhs, join=\"left\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", "id": "migration-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## Migration guide\n", - "\n", - "To migrate from legacy to v1:\n", - "\n", - "### Step 1: Enable v1 and run your code\n", - "\n", - "```python\n", - "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", - "```\n", - "\n", - "Any code that relied on legacy alignment will now raise `ValueError` with a helpful message suggesting which `join=` to use.\n", - "\n", - "### Step 2: Fix each error\n", - "\n", - "Common patterns:\n", - "\n", - "| Legacy code (silent) | v1 equivalent (explicit) |\n", - "|---|---|\n", - "| `x + subset_constant` | `x.add(subset_constant, join=\"left\")` |\n", - "| `x + y` (same size, different labels) | `x + y.assign_coords(time=x.coords[\"time\"])` |\n", - "| `x <= partial_rhs` | `x.le(partial_rhs, join=\"left\")` |\n", - "| `expr + expr` (mismatched coords) | `expr.add(other, join=\"outer\")` or `.sel()` first |\n", - "\n", - "### Step 3: Handle NaN\n", - "\n", - "Under legacy, NaN in constants was silently replaced with 0. Under v1, NaN propagates. If your data contains NaN that should be treated as zero, use `.fillna(0)` explicitly:\n", - "\n", - "```python\n", - "# Legacy: NaN silently became 0\n", - "x + data_with_nans\n", - "\n", - "# v1: be explicit\n", - "x + data_with_nans.fillna(0)\n", - "```\n", - "\n", - "### Step 4: Pandas index names\n", - "\n", - "Under v1, pandas objects must have **named indices** to align properly with linopy variables:\n", - "\n", - "```python\n", - "# Will fail — unnamed index becomes \"dim_0\"\n", - "cost = pd.Series([10, 20], index=[\"wind\", \"solar\"])\n", - "\n", - "# Works — explicit dimension name\n", - "cost = pd.Series([10, 20], index=pd.Index([\"wind\", \"solar\"], name=\"tech\"))\n", - "```" - ] + "source": "---\n\n## Migration guide\n\nTo migrate from legacy to v1:\n\n### Step 1: Enable v1 and run your code\n\n```python\nlinopy.options[\"arithmetic_convention\"] = \"v1\"\n```\n\nAny code that relied on legacy alignment will now raise `ValueError` with a helpful message suggesting which `join=` to use.\n\n### Step 2: Fix coordinate mismatches\n\nCommon patterns:\n\n| Legacy code (silent) | v1 equivalent (explicit) |\n|---|---|\n| `x + subset_constant` | `x.add(subset_constant, join=\"left\")` |\n| `x + y` (same size, different labels) | `x + y.assign_coords(time=x.coords[\"time\"])` |\n| `x <= partial_rhs` | `x.le(partial_rhs, join=\"left\")` |\n| `expr + expr` (mismatched coords) | `expr.add(other, join=\"outer\")` or `.sel()` first |\n\n### Step 3: Handle NaN\n\nUnder legacy, NaN in operands was silently replaced. Under v1, it raises `ValueError`. See [Missing Data](missing-data.ipynb) for the full migration guide.\n\n### Step 4: Pandas index names\n\nUnder v1, pandas objects must have **named indices** to align properly with linopy variables:\n\n```python\n# Will fail — unnamed index becomes \"dim_0\"\ncost = pd.Series([10, 20], index=[\"wind\", \"solar\"])\n\n# Works — explicit dimension name\ncost = pd.Series([10, 20], index=pd.Index([\"wind\", \"solar\"], name=\"tech\"))\n```" }, { "cell_type": "markdown", @@ -738,10 +821,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "practical-setup", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.643343Z", + "start_time": "2026-03-11T14:45:00.638601Z" + } + }, "source": [ "m4 = linopy.Model()\n", "\n", @@ -749,26 +835,36 @@ "techs = pd.Index([\"solar\", \"wind\", \"gas\"], name=\"tech\")\n", "\n", "gen = m4.add_variables(lower=0, coords=[hours, techs], name=\"gen\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "practical-capacity", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.661328Z", + "start_time": "2026-03-11T14:45:00.650715Z" + } + }, "source": [ "# Capacity limits — constant broadcasts over hours\n", "capacity = xr.DataArray([100, 80, 50], dims=[\"tech\"], coords={\"tech\": techs})\n", "m4.add_constraints(gen <= capacity, name=\"capacity_limit\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "practical-solar", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.680515Z", + "start_time": "2026-03-11T14:45:00.669453Z" + } + }, "source": [ "# Solar availability — full 24h profile, matching coords\n", "solar_avail = np.zeros(24)\n", @@ -777,14 +873,19 @@ "\n", "solar_gen = gen.sel(tech=\"solar\")\n", "m4.add_constraints(solar_gen <= solar_availability, name=\"solar_avail\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "practical-peak", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:45:00.695847Z", + "start_time": "2026-03-11T14:45:00.684022Z" + } + }, "source": [ "# Peak demand — only applies to hours 8-20, use join=\"inner\"\n", "peak_hours = pd.RangeIndex(8, 21, name=\"hour\")\n", @@ -794,25 +895,15 @@ "\n", "total_gen = gen.sum(\"tech\")\n", "m4.add_constraints(total_gen.ge(peak_demand, join=\"inner\"), name=\"peak_demand\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", "id": "summary", "metadata": {}, - "source": [ - "---\n", - "\n", - "## Summary\n", - "\n", - "| | v1 (future default) | Legacy (current default) |\n", - "|---|---|---|\n", - "| **Mismatched coords** | `ValueError` | Silent left-join / override |\n", - "| **Same-size different labels** | `ValueError` | Positional alignment |\n", - "| **NaN in constants** | Propagates | Filled with 0 |\n", - "| **Explicit join** | `.add(x, join=...)` | `.add(x, join=...)` |\n", - "| **Setting** | `options[\"arithmetic_convention\"] = \"v1\"` | `options[\"arithmetic_convention\"] = \"legacy\"` |" - ] + "source": "---\n\n## Summary\n\n| | v1 (future default) | Legacy (current default) |\n|---|---|---|\n| **Mismatched coords** | `ValueError` | Silent left-join / override |\n| **Same-size different labels** | `ValueError` | Positional alignment |\n| **NaN in operands** | `ValueError` ([details](missing-data.ipynb)) | Filled with neutral element |\n| **Explicit join** | `.add(x, join=...)` | `.add(x, join=...)` |\n| **Setting** | `options[\"arithmetic_convention\"] = \"v1\"` | `options[\"arithmetic_convention\"] = \"legacy\"` |" } ], "metadata": { diff --git a/examples/missing-data.ipynb b/examples/missing-data.ipynb new file mode 100644 index 00000000..cb3fa9f6 --- /dev/null +++ b/examples/missing-data.ipynb @@ -0,0 +1,431 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "intro", + "metadata": {}, + "source": [ + "# Missing Data and Masking\n", + "\n", + "This notebook explains linopy's NaN convention under v1 and how to handle missing data.\n", + "\n", + "1. [The NaN convention](#the-nan-convention) — design principles\n", + "2. [What raises](#what-raises) — NaN at API boundaries\n", + "3. [Handling NaN with `.fillna()`](#handling-nan-with-fillna) — choosing the right fill value\n", + "4. [Masking constraints](#masking-constraints) — `.sel()` and `mask=`\n", + "5. [Masking with NaN in coefficients](#masking-with-nan-in-coefficients) — multi-dimensional patterns\n", + "6. [Legacy NaN behavior](#legacy-nan-behavior-for-comparison) — how it worked before\n", + "\n", + "For coordinate alignment rules, see [Arithmetic Convention](arithmetic-convention.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "imports", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:52:16.879309Z", + "start_time": "2026-03-11T14:52:16.087004Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:13.611630Z", + "iopub.status.busy": "2026-03-12T07:17:13.611383Z", + "iopub.status.idle": "2026-03-12T07:17:14.222456Z", + "shell.execute_reply": "2026-03-12T07:17:14.222237Z" + } + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "import xarray as xr\n", + "\n", + "import linopy\n", + "\n", + "linopy.options[\"arithmetic_convention\"] = \"v1\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "setup", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:52:16.939592Z", + "start_time": "2026-03-11T14:52:16.885073Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:14.223624Z", + "iopub.status.busy": "2026-03-12T07:17:14.223510Z", + "iopub.status.idle": "2026-03-12T07:17:14.252820Z", + "shell.execute_reply": "2026-03-12T07:17:14.252554Z" + } + }, + "outputs": [], + "source": [ + "m = linopy.Model()\n", + "time = pd.RangeIndex(5, name=\"time\")\n", + "x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", + "\n", + "# Data with NaN\n", + "data = xr.DataArray([1.0, np.nan, 3.0, 4.0, 5.0], dims=[\"time\"], coords={\"time\": time})" + ] + }, + { + "cell_type": "markdown", + "id": "rqgv2f7nwpb", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## The NaN convention\n", + "\n", + "In linopy v1, **NaN means \"absent term.\"** It is never a numeric value.\n", + "\n", + "### How NaN enters\n", + "\n", + "Only two sources produce NaN inside linopy data structures:\n", + "\n", + "1. **`mask=` argument** at construction (`add_variables`, `add_constraints`) — you explicitly declare which slots exist.\n", + "2. **Structural operations** that produce absent slots: `.shift()`, `.roll()`, reindexing to a larger index.\n", + "\n", + "### How NaN propagates\n", + "\n", + "An expression is a sum of terms. Each term has a coefficient, a variable reference, and the expression has a shared constant. NaN marks an **individual term** as absent — it does not mask the entire coordinate.\n", + "\n", + "When expressions are combined (e.g., `x*2 + y.shift(time=1)`), each term is kept independently. At time=0, `y.shift` contributes no term (NaN coeffs, vars=-1), but `x*2` is still valid. The result at time=0 is `2*x[0]` — not absent.\n", + "\n", + "A coordinate is only fully absent when **all** terms have vars=-1 **and** the constant is NaN. This is exactly what `isnull()` checks.\n", + "\n", + "### Where NaN lives\n", + "\n", + "NaN is burned directly into the float fields: `coeffs`, `const`, `rhs`, `lower`, `upper`. Integer fields (`labels`, `vars`) use **-1** as their equivalent sentinel. There is no separate boolean mask array.\n", + "\n", + "### What raises\n", + "\n", + "Any **user-supplied NaN at an API boundary** — in bounds, constants, factors, or RHS — raises `ValueError` immediately. Masking is always explicit via `mask=` or `.sel()`, never by passing NaN as a value.\n", + "\n", + "### Why this is consistent\n", + "\n", + "- **`lhs >= rhs` is `lhs - rhs >= 0`**, so RHS obeys the same rule as any constant — no special case.\n", + "- **No dual role for NaN**: it cannot mean both \"absent\" and \"a number I computed with.\" Internal NaN (from `shift`, `mask=`) is always structural. User NaN is always an error.\n", + "- **Absent terms, not absent coordinates**: combining a valid expression with a partially-absent one does not destroy the valid part. Only when *every* term at a coordinate is absent is the coordinate itself absent." + ] + }, + { + "cell_type": "markdown", + "id": "v1-rule-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## What raises\n", + "\n", + "**NaN in any arithmetic operand raises `ValueError`.** This includes:\n", + "- Constants added/subtracted: `expr + data_with_nan`\n", + "- Factors multiplied/divided: `expr * data_with_nan`\n", + "- Constraint RHS: `expr >= data_with_nan` (because `expr >= rhs` is `expr - rhs >= 0`)\n", + "\n", + "There is no implicit fill. The library does not guess whether NaN means \"zero,\" \"exclude,\" or \"identity.\" You decide." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "v1-rule-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:52:16.949756Z", + "start_time": "2026-03-11T14:52:16.942400Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:14.253991Z", + "iopub.status.busy": "2026-03-12T07:17:14.253892Z", + "iopub.status.idle": "2026-03-12T07:17:14.260195Z", + "shell.execute_reply": "2026-03-12T07:17:14.259998Z" + } + }, + "outputs": [], + "source": [ + "# All of these raise ValueError:\n", + "for op_name, op_fn in [\n", + " (\"add\", lambda: x + data),\n", + " (\"mul\", lambda: x * data),\n", + " (\"constraint\", lambda: x >= data),\n", + "]:\n", + " try:\n", + " op_fn()\n", + " except ValueError:\n", + " print(f\"{op_name}: ValueError raised (NaN in operand)\")" + ] + }, + { + "cell_type": "markdown", + "id": "fillna-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Handling NaN with `.fillna()`\n", + "\n", + "When your data contains NaN, fill it explicitly before combining with expressions. The fill value depends on what the NaN means in your context:\n", + "\n", + "| Operation | Fill value | Meaning |\n", + "|-----------|-----------|--------|\n", + "| `expr + data.fillna(0)` | 0 | NaN = \"no offset\" |\n", + "| `expr * data.fillna(0)` | 0 | NaN = \"exclude this term\" |\n", + "| `expr * data.fillna(1)` | 1 | NaN = \"no scaling\" |\n", + "| `expr / data.fillna(1)` | 1 | NaN = \"no scaling\" |\n", + "\n", + "The choice is yours — and that's the point. Under legacy, the library chose for you (0 for add/mul, 1 for div). Under v1, you make the decision explicit." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fillna-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:52:16.968586Z", + "start_time": "2026-03-11T14:52:16.956299Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:14.261186Z", + "iopub.status.busy": "2026-03-12T07:17:14.261122Z", + "iopub.status.idle": "2026-03-12T07:17:14.270213Z", + "shell.execute_reply": "2026-03-12T07:17:14.269997Z" + } + }, + "outputs": [], + "source": [ + "# Fill NaN before operating — you choose the fill value\n", + "print(\"add fillna(0):\", (x + data.fillna(0)).const.values)\n", + "print(\"mul fillna(0):\", (x * data.fillna(0)).coeffs.squeeze().values)\n", + "print(\"mul fillna(1):\", (x * data.fillna(1)).coeffs.squeeze().values)" + ] + }, + { + "cell_type": "markdown", + "id": "masking-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Masking constraints\n", + "\n", + "A common pattern: your data has NaN at positions where no constraint should exist. For example, availability data that's only defined for certain hours, or cost data with missing entries.\n", + "\n", + "### Approach 1: `.sel()` (preferred)\n", + "\n", + "Select only valid positions — the constraint has fewer coordinates:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "masking-sel", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:52:16.983888Z", + "start_time": "2026-03-11T14:52:16.974378Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:14.271290Z", + "iopub.status.busy": "2026-03-12T07:17:14.271219Z", + "iopub.status.idle": "2026-03-12T07:17:14.279948Z", + "shell.execute_reply": "2026-03-12T07:17:14.279785Z" + } + }, + "outputs": [], + "source": [ + "# Availability data with NaN = \"no limit at this hour\"\n", + "availability = xr.DataArray(\n", + " [100.0, 80.0, np.nan, np.nan, 60.0], dims=[\"time\"], coords={\"time\": time}\n", + ")\n", + "\n", + "# Select only where data is valid — constraint has fewer coordinates\n", + "valid = availability.notnull()\n", + "m.add_constraints(x.sel(time=valid) <= availability.sel(time=valid), name=\"avail\")" + ] + }, + { + "cell_type": "markdown", + "id": "masking-mask-header", + "metadata": {}, + "source": [ + "No fillna, no mask parameter — the constraint simply doesn't exist at the NaN positions.\n", + "\n", + "### Approach 2: `mask=` parameter\n", + "\n", + "When `.sel()` is inconvenient (e.g., multi-dimensional data where NaN positions vary across dimensions), use `mask=`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "masking-mask-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:52:16.998421Z", + "start_time": "2026-03-11T14:52:16.990226Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:14.280842Z", + "iopub.status.busy": "2026-03-12T07:17:14.280784Z", + "iopub.status.idle": "2026-03-12T07:17:14.286824Z", + "shell.execute_reply": "2026-03-12T07:17:14.286655Z" + } + }, + "outputs": [], + "source": [ + "# Same result using mask= instead of .sel()\n", + "mask = availability.notnull()\n", + "m.add_constraints(x <= availability.fillna(0), name=\"avail_masked\", mask=mask)" + ] + }, + { + "cell_type": "markdown", + "id": "masking-vars-header", + "metadata": {}, + "source": [ + "The same approaches work for variables with NaN bounds:\n", + "\n", + "```python\n", + "# With .sel()\n", + "valid = upper_bounds.notnull()\n", + "m.add_variables(upper=upper_bounds.sel(i=valid), coords=[valid_coords], name=\"y\")\n", + "\n", + "# Or with mask=\n", + "mask = upper_bounds.notnull()\n", + "m.add_variables(upper=upper_bounds.fillna(0), mask=mask, name=\"y\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "coeff-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Masking with NaN in coefficients\n", + "\n", + "When NaN appears in coefficient data (e.g., efficiency factors where some combinations don't apply), the same two approaches work:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "coeff-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-11T14:52:17.017774Z", + "start_time": "2026-03-11T14:52:17.003374Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:14.287819Z", + "iopub.status.busy": "2026-03-12T07:17:14.287760Z", + "iopub.status.idle": "2026-03-12T07:17:14.300622Z", + "shell.execute_reply": "2026-03-12T07:17:14.300443Z" + } + }, + "outputs": [], + "source": [ + "# Efficiency data: solar has no efficiency at night (NaN)\n", + "techs = pd.Index([\"solar\", \"wind\"], name=\"tech\")\n", + "hours = pd.RangeIndex(4, name=\"hour\")\n", + "gen = m.add_variables(lower=0, coords=[hours, techs], name=\"gen\")\n", + "\n", + "efficiency = xr.DataArray(\n", + " [[np.nan, 0.35], [0.8, 0.35], [0.9, 0.35], [np.nan, 0.35]],\n", + " dims=[\"hour\", \"tech\"],\n", + " coords={\"hour\": hours, \"tech\": techs},\n", + ")\n", + "\n", + "# Approach 1: .sel() — select only valid hours per tech\n", + "valid_hours = efficiency.sel(tech=\"solar\").notnull()\n", + "solar_gen = gen.sel(tech=\"solar\", hour=valid_hours)\n", + "solar_eff = efficiency.sel(tech=\"solar\", hour=valid_hours)\n", + "print(\"sel approach — solar hours:\", solar_gen.coords[\"hour\"].values)\n", + "\n", + "# Approach 2: mask= — keep all coordinates, mask invalid ones\n", + "rhs = xr.DataArray([50.0] * 4, dims=[\"hour\"], coords={\"hour\": hours})\n", + "coeff_mask = efficiency.notnull().all(\"tech\")\n", + "expr = gen * efficiency.fillna(0)\n", + "m.add_constraints(expr >= rhs, name=\"min_output\", mask=coeff_mask)\n", + "print(\"mask approach — constraint mask:\", coeff_mask.values)" + ] + }, + { + "cell_type": "markdown", + "id": "legacy-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Legacy NaN behavior (for comparison)\n", + "\n", + "Under legacy, NaN was handled implicitly:\n", + "- **In arithmetic**: silently replaced with neutral elements (0 for add/sub/mul, 1 for div)\n", + "- **In constraint RHS**: NaN meant \"no constraint here\" — auto-masked internally\n", + "- **With `auto_mask=True`**: NaN in variable bounds meant \"no variable here\"\n", + "\n", + "This was convenient but could mask data quality issues. A NaN from a data pipeline bug would silently become 0, producing a valid but wrong model.\n", + "\n", + "### Migration\n", + "\n", + "| Legacy code (silent) | v1 equivalent (explicit) |\n", + "|---|---|\n", + "| `x + data_with_nans` | `x + data_with_nans.fillna(0)` |\n", + "| `x * data_with_nans` | `x * data_with_nans.fillna(0)` |\n", + "| `x / data_with_nans` | `x / data_with_nans.fillna(1)` |\n", + "| `m.add_constraints(expr >= nan_rhs)` | `m.add_constraints(expr.sel(...) >= rhs.sel(...))` |\n", + "| `Model(auto_mask=True)` | Explicit `mask=` or `.sel()` |" + ] + }, + { + "cell_type": "markdown", + "id": "summary", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Summary\n", + "\n", + "| Aspect | v1 | Legacy |\n", + "|---|---|---|\n", + "| **NaN means** | Absent term (not absent coordinate) | Numeric placeholder (filled silently) |\n", + "| **NaN sources** | `mask=`, structural ops only | Anywhere (user data, bounds, RHS) |\n", + "| **NaN in operands** | `ValueError` | Filled with neutral element (0 or 1) |\n", + "| **NaN in constraint RHS** | `ValueError` | Auto-masked |\n", + "| **Combining expressions** | Absent terms ignored, valid terms kept | NaN filled before combining |\n", + "| **Coordinate absent when** | All terms absent AND const is NaN | Never (NaN always filled) |\n", + "| **Masking** | Explicit: `.sel()` or `mask=` | Implicit via NaN / `auto_mask` |\n", + "| **Storage** | Float fields + `-1` sentinels | Same, but NaN has dual role |\n", + "| **Fill value choice** | User decides | Library decides |" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/nan-edge-cases.ipynb b/examples/nan-edge-cases.ipynb new file mode 100644 index 00000000..57e08c47 --- /dev/null +++ b/examples/nan-edge-cases.ipynb @@ -0,0 +1,773 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "intro", + "metadata": {}, + "source": [ + "# NaN Edge Cases: Legacy vs v1\n", + "\n", + "Development notebook investigating how NaN behaves across linopy operations under both conventions.\n", + "\n", + "1. [shift — the primary NaN source](#shift)\n", + "2. [roll — circular, no NaN](#roll)\n", + "3. [where — conditional masking](#where)\n", + "4. [reindex — expanding coordinates](#reindex)\n", + "5. [isnull / fillna — detection and recovery](#isnull--fillna)\n", + "6. [Arithmetic with shifted expressions](#arithmetic-with-shifted-expressions)\n", + "7. [Constraints from expressions with NaN](#constraints-from-expressions-with-nan)\n", + "8. [sanitize_missings — the solver boundary](#sanitize_missings)\n", + "9. [FILL_VALUE internals](#fill_value-internals)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "imports", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.382686Z", + "start_time": "2026-03-12T07:02:17.428044Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:07.720185Z", + "iopub.status.busy": "2026-03-12T07:17:07.720094Z", + "iopub.status.idle": "2026-03-12T07:17:08.462534Z", + "shell.execute_reply": "2026-03-12T07:17:08.462309Z" + } + }, + "outputs": [], + "source": [ + "import warnings\n", + "\n", + "import pandas as pd\n", + "import xarray as xr\n", + "\n", + "import linopy\n", + "from linopy import Model\n", + "from linopy.config import LinopyDeprecationWarning\n", + "from linopy.expressions import FILL_VALUE\n", + "\n", + "warnings.filterwarnings(\"ignore\", category=LinopyDeprecationWarning)\n", + "\n", + "print(\"FILL_VALUE:\", FILL_VALUE)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "setup", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.440164Z", + "start_time": "2026-03-12T07:02:18.394722Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.463669Z", + "iopub.status.busy": "2026-03-12T07:17:08.463532Z", + "iopub.status.idle": "2026-03-12T07:17:08.500789Z", + "shell.execute_reply": "2026-03-12T07:17:08.500587Z" + } + }, + "outputs": [], + "source": [ + "def make_model():\n", + " m = Model()\n", + " time = pd.RangeIndex(5, name=\"time\")\n", + " x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", + " return m, x\n", + "\n", + "\n", + "m, x = make_model()\n", + "print(\"x:\", x)" + ] + }, + { + "cell_type": "markdown", + "id": "shift-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## shift\n", + "\n", + "`.shift()` is the primary structural source of NaN. It shifts data along a dimension,\n", + "creating a gap that must be filled. The fill values come from `FILL_VALUE`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "shift-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.457999Z", + "start_time": "2026-03-12T07:02:18.450094Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.501837Z", + "iopub.status.busy": "2026-03-12T07:17:08.501734Z", + "iopub.status.idle": "2026-03-12T07:17:08.507679Z", + "shell.execute_reply": "2026-03-12T07:17:08.507514Z" + } + }, + "outputs": [], + "source": [ + "expr = 2 * x + 10\n", + "shifted = expr.shift(time=1)\n", + "\n", + "print(\"=== Original ===\")\n", + "print(\"coeffs:\", expr.coeffs.squeeze().values)\n", + "print(\"vars: \", expr.vars.squeeze().values)\n", + "print(\"const: \", expr.const.values)\n", + "\n", + "print(\"\\n=== Shifted (time=1) ===\")\n", + "print(\"coeffs:\", shifted.coeffs.squeeze().values)\n", + "print(\"vars: \", shifted.vars.squeeze().values)\n", + "print(\"const: \", shifted.const.values)\n", + "print(\"isnull:\", shifted.isnull().values)\n", + "\n", + "print(\"\\nKey: vars=-1 is the integer sentinel, const=NaN marks the slot as absent.\")\n", + "print(\"coeffs are filled with\", FILL_VALUE[\"coeffs\"], \"(not NaN).\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "shift-variable", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.471949Z", + "start_time": "2026-03-12T07:02:18.469156Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.508604Z", + "iopub.status.busy": "2026-03-12T07:17:08.508544Z", + "iopub.status.idle": "2026-03-12T07:17:08.510507Z", + "shell.execute_reply": "2026-03-12T07:17:08.510344Z" + } + }, + "outputs": [], + "source": [ + "# Variables also support shift — labels get -1 sentinel, bounds get NaN\n", + "x_shifted = x.shift(time=1)\n", + "print(\"shifted variable labels:\", x_shifted.labels.values)\n", + "print(\"shifted variable lower: \", x_shifted.lower.values)\n", + "print(\"shifted variable upper: \", x_shifted.upper.values)" + ] + }, + { + "cell_type": "markdown", + "id": "roll-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## roll\n", + "\n", + "`.roll()` is circular — values wrap around, no NaN is introduced." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "roll-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.487236Z", + "start_time": "2026-03-12T07:02:18.481358Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.511390Z", + "iopub.status.busy": "2026-03-12T07:17:08.511331Z", + "iopub.status.idle": "2026-03-12T07:17:08.515994Z", + "shell.execute_reply": "2026-03-12T07:17:08.515833Z" + } + }, + "outputs": [], + "source": [ + "expr = 2 * x + 10\n", + "rolled = expr.roll(time=1)\n", + "\n", + "print(\"=== Rolled (time=1) ===\")\n", + "print(\"coeffs:\", rolled.coeffs.squeeze().values)\n", + "print(\"vars: \", rolled.vars.squeeze().values)\n", + "print(\"const: \", rolled.const.values)\n", + "print(\"isnull:\", rolled.isnull().values)\n", + "print(\"\\nNo NaN — values wrap around.\")" + ] + }, + { + "cell_type": "markdown", + "id": "where-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## where\n", + "\n", + "`.where(cond)` masks slots where the condition is False.\n", + "Masked slots get `vars=-1, coeffs=0, const=NaN` — same as FILL_VALUE." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "where-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.502493Z", + "start_time": "2026-03-12T07:02:18.496728Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.516930Z", + "iopub.status.busy": "2026-03-12T07:17:08.516857Z", + "iopub.status.idle": "2026-03-12T07:17:08.522323Z", + "shell.execute_reply": "2026-03-12T07:17:08.522120Z" + } + }, + "outputs": [], + "source": [ + "expr = 2 * x + 10\n", + "mask = xr.DataArray([True, True, False, False, True], dims=[\"time\"])\n", + "masked = expr.where(mask)\n", + "\n", + "print(\"=== where(mask) ===\")\n", + "print(\"coeffs:\", masked.coeffs.squeeze().values)\n", + "print(\"vars: \", masked.vars.squeeze().values)\n", + "print(\"const: \", masked.const.values)\n", + "print(\"isnull:\", masked.isnull().values)\n", + "\n", + "print(\"\\nFalse positions → absent slot (vars=-1, const=NaN).\")\n", + "print(\"Same shape, fewer active slots.\")" + ] + }, + { + "cell_type": "markdown", + "id": "reindex-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## reindex\n", + "\n", + "`.reindex()` expands or shrinks coordinates. New coordinates get FILL_VALUE." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "reindex-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.515465Z", + "start_time": "2026-03-12T07:02:18.506075Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.523325Z", + "iopub.status.busy": "2026-03-12T07:17:08.523263Z", + "iopub.status.idle": "2026-03-12T07:17:08.530243Z", + "shell.execute_reply": "2026-03-12T07:17:08.530070Z" + } + }, + "outputs": [], + "source": [ + "expr = 2 * x + 10\n", + "\n", + "# Expand to a larger index\n", + "new_time = pd.RangeIndex(7, name=\"time\")\n", + "expanded = expr.reindex({\"time\": new_time})\n", + "\n", + "print(\"=== reindex to [0..6] ===\")\n", + "print(\"coeffs:\", expanded.coeffs.squeeze().values)\n", + "print(\"vars: \", expanded.vars.squeeze().values)\n", + "print(\"const: \", expanded.const.values)\n", + "print(\"isnull:\", expanded.isnull().values)\n", + "\n", + "# Shrink to a smaller index\n", + "shrunk = expr.reindex({\"time\": [1, 3]})\n", + "print(\"\\n=== reindex to [1, 3] ===\")\n", + "print(\"coeffs:\", shrunk.coeffs.squeeze().values)\n", + "print(\"const: \", shrunk.const.values)\n", + "print(\"\\nNew positions [5, 6] are absent. Shrinking drops slots.\")" + ] + }, + { + "cell_type": "markdown", + "id": "isnull-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## isnull / fillna\n", + "\n", + "`isnull()` detects absent slots. The check is:\n", + "```\n", + "(vars == -1).all(helper_dims) & const.isnull()\n", + "```\n", + "Both conditions must be true — a slot is only \"absent\" if there are no variables AND no constant." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "isnull-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.531165Z", + "start_time": "2026-03-12T07:02:18.524875Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.531196Z", + "iopub.status.busy": "2026-03-12T07:17:08.531119Z", + "iopub.status.idle": "2026-03-12T07:17:08.536287Z", + "shell.execute_reply": "2026-03-12T07:17:08.536119Z" + } + }, + "outputs": [], + "source": [ + "expr = 2 * x + 10\n", + "shifted = expr.shift(time=2)\n", + "\n", + "print(\"=== isnull on shifted expression ===\")\n", + "print(\"vars: \", shifted.vars.squeeze().values)\n", + "print(\"const: \", shifted.const.values)\n", + "print(\"isnull:\", shifted.isnull().values)\n", + "\n", + "# What about an expression with const=0 but vars=-1?\n", + "# This would be a \"zero expression\" not an absent one.\n", + "print(\"\\n=== Why const=NaN matters ===\")\n", + "print(\"If const were 0 instead of NaN, isnull() would be False\")\n", + "print(\"→ the slot would look like a valid 'zero expression'\")\n", + "print(\"→ NaN in const is what distinguishes 'absent' from 'zero'\")" + ] + }, + { + "cell_type": "markdown", + "id": "arithmetic-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Arithmetic with shifted expressions\n", + "\n", + "This is where legacy and v1 diverge. When you do arithmetic on an expression\n", + "that already has NaN (from shift/where/reindex), the NaN is **internal** — it's\n", + "not user-supplied data at an API boundary.\n", + "\n", + "- **Legacy**: fills expression NaN with neutral elements before operating\n", + "- **v1**: lets IEEE NaN propagate — absent stays absent" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "arithmetic-legacy", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.553302Z", + "start_time": "2026-03-12T07:02:18.541098Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.537166Z", + "iopub.status.busy": "2026-03-12T07:17:08.537114Z", + "iopub.status.idle": "2026-03-12T07:17:08.546152Z", + "shell.execute_reply": "2026-03-12T07:17:08.545979Z" + } + }, + "outputs": [], + "source": [ + "linopy.options[\"arithmetic_convention\"] = \"legacy\"\n", + "m, x = make_model()\n", + "\n", + "shifted = (2 * x + 10).shift(time=1)\n", + "print(\"=== LEGACY: shifted + 5 ===\")\n", + "result = shifted + 5\n", + "print(\"const: \", result.const.values)\n", + "print(\"coeffs:\", result.coeffs.squeeze().values)\n", + "print(\"isnull:\", result.isnull().values)\n", + "print(\"→ NaN const filled with 0, then +5 = 5. Slot looks alive!\")\n", + "\n", + "print(\"\\n=== LEGACY: shifted * 3 ===\")\n", + "result = shifted * 3\n", + "print(\"const: \", result.const.values)\n", + "print(\"coeffs:\", result.coeffs.squeeze().values)\n", + "print(\"isnull:\", result.isnull().values)\n", + "print(\"→ NaN filled with 0, then *3 = 0. Slot has zero coeff.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "arithmetic-v1", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.573084Z", + "start_time": "2026-03-12T07:02:18.562699Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.547012Z", + "iopub.status.busy": "2026-03-12T07:17:08.546958Z", + "iopub.status.idle": "2026-03-12T07:17:08.554977Z", + "shell.execute_reply": "2026-03-12T07:17:08.554795Z" + } + }, + "outputs": [], + "source": [ + "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", + "m, x = make_model()\n", + "\n", + "shifted = (2 * x + 10).shift(time=1)\n", + "print(\"=== V1: shifted + 5 ===\")\n", + "result = shifted + 5\n", + "print(\"const: \", result.const.values)\n", + "print(\"coeffs:\", result.coeffs.squeeze().values)\n", + "print(\"isnull:\", result.isnull().values)\n", + "print(\"→ NaN + 5 = NaN. Absent slot stays absent. IEEE propagation.\")\n", + "\n", + "print(\"\\n=== V1: shifted * 3 ===\")\n", + "result = shifted * 3\n", + "print(\"const: \", result.const.values)\n", + "print(\"coeffs:\", result.coeffs.squeeze().values)\n", + "print(\"isnull:\", result.isnull().values)\n", + "print(\"→ NaN * 3 = NaN. Coeffs 0*3 = 0 (not NaN — coeffs FILL is 0).\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eodco2pcrqn", + "metadata": { + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.555856Z", + "iopub.status.busy": "2026-03-12T07:17:08.555799Z", + "iopub.status.idle": "2026-03-12T07:17:08.565818Z", + "shell.execute_reply": "2026-03-12T07:17:08.565644Z" + } + }, + "outputs": [], + "source": [ + "# Combining expressions: absent term does NOT poison valid terms\n", + "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", + "m, x = make_model()\n", + "y = m.add_variables(lower=0, coords=[pd.RangeIndex(5, name=\"time\")], name=\"y\")\n", + "\n", + "result = x * 2 + (1 * y).shift(time=1)\n", + "print(\"=== V1: x*2 + y.shift(1) ===\")\n", + "print(\"const: \", result.const.values)\n", + "print(\"isnull:\", result.isnull().values)\n", + "print(\"vars:\")\n", + "print(result.vars.values)\n", + "print(\"coeffs:\")\n", + "print(result.coeffs.values)\n", + "print(\"\\n→ time=0: x[0] with coeff=2 is valid! y's absent term (vars=-1, coeffs=NaN)\")\n", + "print(\" does NOT mask the coordinate. const=0 (not NaN) because xr.sum skips NaN.\")" + ] + }, + { + "cell_type": "markdown", + "id": "key-difference", + "metadata": {}, + "source": [ + "### Key difference: scalar arithmetic on a single shifted expression\n", + "\n", + "| | Legacy | v1 |\n", + "|---|---|---|\n", + "| `shifted + 5` at absent slot | const=5 (alive!) | const=NaN (absent) |\n", + "| `shifted * 3` at absent slot | coeffs=0, const=0 | coeffs=0, const=NaN |\n", + "| `isnull()` after arithmetic | False (slot revived!) | True (slot stays absent) |\n", + "\n", + "Legacy can **revive** absent slots through scalar arithmetic. v1 cannot — once absent, always absent.\n", + "\n", + "### But: combining expressions does NOT poison\n", + "\n", + "When two expressions are merged (e.g., `x*2 + y.shift(1)`), each term is independent. An absent term from `y.shift` does **not** mask the valid `x` term at the same coordinate:\n", + "\n", + "```python\n", + "x*2 + y.shift(time=1) # at time=0: 2*x[0] (valid!) + absent term → 2*x[0]\n", + "```\n", + "\n", + "A coordinate is only fully absent when **all** terms have `vars=-1` and `const` is NaN. This is what `isnull()` checks." + ] + }, + { + "cell_type": "markdown", + "id": "constraint-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## Constraints from expressions with NaN\n", + "\n", + "What happens when an expression with absent slots (NaN) becomes a constraint?\n", + "The NaN in const propagates to the constraint RHS." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "constraint-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.598217Z", + "start_time": "2026-03-12T07:02:18.583043Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.566737Z", + "iopub.status.busy": "2026-03-12T07:17:08.566681Z", + "iopub.status.idle": "2026-03-12T07:17:08.579707Z", + "shell.execute_reply": "2026-03-12T07:17:08.579538Z" + } + }, + "outputs": [], + "source": [ + "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", + "m, x = make_model()\n", + "\n", + "shifted = (1 * x).shift(time=1)\n", + "print(\"=== Shifted expression ===\")\n", + "print(\"vars: \", shifted.vars.squeeze().values)\n", + "print(\"const: \", shifted.const.values)\n", + "\n", + "# Under v1, x[1:] - x[:-1] requires explicit join because coords differ\n", + "# (time=[1,2,3,4] vs time=[0,1,2,3]).\n", + "# Use join=\"override\" to align by position:\n", + "print(\"\\n=== x[1:] - x[:-1] via isel + override join ===\")\n", + "x_now = 1 * x.isel(time=slice(1, None))\n", + "x_prev = 1 * x.isel(time=slice(None, -1))\n", + "ramp = x_now.sub(x_prev, join=\"override\")\n", + "print(\"const: \", ramp.const.values)\n", + "print(\"isnull:\", ramp.isnull().values)\n", + "print(\"→ No NaN at all — isel avoids the gap entirely.\")\n", + "\n", + "# But what if we use shifted expression directly as a constraint?\n", + "print(\"\\n=== Constraint from shifted expression (has NaN) ===\")\n", + "con = m.add_constraints(shifted <= 5, name=\"shifted_raw\")\n", + "print(\"constraint rhs: \", con.rhs.values)\n", + "print(\"constraint labels:\", con.labels.values)\n", + "print(\"constraint vars: \", con.vars.squeeze().values)\n", + "print(\"\\nNaN in RHS at time=0. Label is still assigned.\")\n", + "print(\"This will be caught by sanitize_missings() or check_has_nulls() at solve time.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "constraint-fix", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.625188Z", + "start_time": "2026-03-12T07:02:18.607269Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.580657Z", + "iopub.status.busy": "2026-03-12T07:17:08.580602Z", + "iopub.status.idle": "2026-03-12T07:17:08.596413Z", + "shell.execute_reply": "2026-03-12T07:17:08.596253Z" + } + }, + "outputs": [], + "source": [ + "# The correct approach: avoid the gap entirely with isel + override\n", + "m2, x2 = make_model()\n", + "\n", + "x_now = 1 * x2.isel(time=slice(1, None))\n", + "x_prev = 1 * x2.isel(time=slice(None, -1))\n", + "ramp = x_now.sub(x_prev, join=\"override\")\n", + "con = m2.add_constraints(ramp <= 5, name=\"ramp_isel\")\n", + "print(\"=== isel + override approach (preferred) ===\")\n", + "print(\"rhs: \", con.rhs.values)\n", + "print(\"labels:\", con.labels.values)\n", + "print(\"No NaN — constraint only exists where both operands exist.\")\n", + "\n", + "# Approach 2: sel with a validity mask on shifted expression\n", + "m3, x3 = make_model()\n", + "shifted = (1 * x3).shift(time=1)\n", + "valid = ~shifted.isnull()\n", + "con = m3.add_constraints(shifted.sel(time=valid) <= 5, name=\"shifted_sel\")\n", + "print(\"\\n=== sel approach (filter after shift) ===\")\n", + "print(\"rhs: \", con.rhs.values)\n", + "print(\"labels:\", con.labels.values)\n", + "print(\"Absent slot at time=0 removed by .sel().\")" + ] + }, + { + "cell_type": "markdown", + "id": "sanitize-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## sanitize_missings\n", + "\n", + "Called at solve time (before writing to solver). Sets `labels=-1` where all vars are -1.\n", + "This catches constraints where the LHS has no variables — but does NOT catch NaN in RHS.\n", + "\n", + "```python\n", + "def sanitize_missings(self):\n", + " for name in self:\n", + " con = self[name]\n", + " contains_non_missing = (con.vars != -1).any(con.term_dim)\n", + " labels = self[name].labels.where(contains_non_missing, -1)\n", + "```\n", + "\n", + "After sanitize_missings, `check_has_nulls()` in `.flat` catches any remaining NaN in rhs/coeffs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "sanitize-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.644614Z", + "start_time": "2026-03-12T07:02:18.635530Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.597352Z", + "iopub.status.busy": "2026-03-12T07:17:08.597296Z", + "iopub.status.idle": "2026-03-12T07:17:08.605197Z", + "shell.execute_reply": "2026-03-12T07:17:08.605028Z" + } + }, + "outputs": [], + "source": [ + "# Demonstrate what sanitize_missings does\n", + "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", + "m, x = make_model()\n", + "\n", + "shifted = (1 * x).shift(time=1)\n", + "# shifted at time=0: vars=-1 (no variable), const=NaN\n", + "# This means: LHS has no variables at time=0\n", + "\n", + "con = m.add_constraints(shifted <= 5, name=\"test\")\n", + "print(\"Before sanitize_missings:\")\n", + "print(\" labels:\", con.labels.values)\n", + "print(\" vars: \", con.vars.squeeze().values)\n", + "print(\" rhs: \", con.rhs.values)\n", + "\n", + "m.constraints.sanitize_missings()\n", + "con = m.constraints[\"test\"]\n", + "print(\"\\nAfter sanitize_missings:\")\n", + "print(\" labels:\", con.labels.values)\n", + "print(\" vars: \", con.vars.squeeze().values)\n", + "print(\" rhs: \", con.rhs.values)\n", + "print(\"\\n→ Label at time=0 set to -1 (masked out).\")\n", + "print(\"→ RHS still has NaN but that slot is now masked by labels=-1.\")" + ] + }, + { + "cell_type": "markdown", + "id": "fillvalue-header", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## FILL_VALUE internals\n", + "\n", + "The sentinel values used when structural operations create absent slots:\n", + "\n", + "| Type | Field | FILL_VALUE | Why |\n", + "|---|---|---|---|\n", + "| LinearExpression | `vars` | -1 | Integer sentinel (no variable) |\n", + "| LinearExpression | `coeffs` | 0 | \"No term\" = zero coefficient |\n", + "| LinearExpression | `const` | NaN | Marks slot as absent (needed for `isnull()`) |\n", + "| Variable | `labels` | -1 | Integer sentinel (no variable) |\n", + "| Variable | `lower` | NaN | Absent bound |\n", + "| Variable | `upper` | NaN | Absent bound |\n", + "| Constraint | `labels` | -1 | Integer sentinel (no constraint) |\n", + "\n", + "### Why coeffs=0 but const=NaN?\n", + "\n", + "- **coeffs=0**: A missing term contributes nothing to the sum. `0 * var = 0`.\n", + "- **const=NaN**: Distinguishes \"absent slot\" from \"slot with zero constant.\"\n", + " Without NaN in const, `isnull()` couldn't tell the difference.\n", + "\n", + "### isnull() depends on const=NaN\n", + "\n", + "```python\n", + "def isnull(self):\n", + " return (self.vars == -1).all(helper_dims) & self.const.isnull()\n", + "```\n", + "\n", + "If const were 0 instead of NaN, a shifted expression would not be detected as null." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fillvalue-demo", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.663083Z", + "start_time": "2026-03-12T07:02:18.654471Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.606113Z", + "iopub.status.busy": "2026-03-12T07:17:08.606058Z", + "iopub.status.idle": "2026-03-12T07:17:08.612124Z", + "shell.execute_reply": "2026-03-12T07:17:08.611961Z" + } + }, + "outputs": [], + "source": [ + "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", + "m, x = make_model()\n", + "\n", + "expr = 2 * x + 10\n", + "shifted = expr.shift(time=1)\n", + "\n", + "print(\"=== FILL_VALUE in action ===\")\n", + "print(f\"vars FILL={FILL_VALUE['vars']}: \", shifted.vars.squeeze().values)\n", + "print(f\"coeffs FILL={FILL_VALUE['coeffs']}: \", shifted.coeffs.squeeze().values)\n", + "print(f\"const FILL={FILL_VALUE['const']}:\", shifted.const.values)\n", + "print()\n", + "print(\"isnull:\", shifted.isnull().values)\n", + "print(\"\\nSlot 0: vars=-1, coeffs=0, const=NaN → isnull=True\")\n", + "print(\"Slot 1: vars=0, coeffs=2, const=10 → isnull=False\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cleanup", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-12T07:02:18.669891Z", + "start_time": "2026-03-12T07:02:18.668107Z" + }, + "execution": { + "iopub.execute_input": "2026-03-12T07:17:08.613034Z", + "iopub.status.busy": "2026-03-12T07:17:08.612977Z", + "iopub.status.idle": "2026-03-12T07:17:08.614319Z", + "shell.execute_reply": "2026-03-12T07:17:08.614153Z" + } + }, + "outputs": [], + "source": [ + "linopy.options.reset()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/linopy/expressions.py b/linopy/expressions.py index 9e893d45..47ada288 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -104,7 +104,7 @@ from linopy.variables import ScalarVariable, Variable -FILL_VALUE = {"vars": -1, "coeffs": np.nan, "const": np.nan} +FILL_VALUE = {"vars": -1, "coeffs": 0, "const": np.nan} def _to_piecewise_constraint_descriptor( @@ -634,6 +634,11 @@ def _add_constant( join is None and options["arithmetic_convention"] == "legacy" ) or join == "legacy" if np.isscalar(other) and join is None: + if not is_legacy and np.isnan(other): + raise ValueError( + "Constant contains NaN values. Use .fillna() to handle " + "missing values before arithmetic operations." + ) const = self.const.fillna(0) + other if is_legacy else self.const + other return self.assign(const=const) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) @@ -643,6 +648,11 @@ def _add_constant( if is_legacy: da = da.fillna(0) self_const = self_const.fillna(0) + elif da.isnull().any(): + raise ValueError( + "Constant contains NaN values. Use .fillna() to handle " + "missing values before arithmetic operations." + ) if needs_data_reindex: fv = {**self._fill_value, "const": 0} return self.__class__( @@ -665,6 +675,11 @@ def _apply_constant_op( ) or join == "legacy" # Fast path for scalars: no dimensions to align if np.isscalar(other): + if not is_legacy and np.isnan(other): + raise ValueError( + "Factor contains NaN values. Use .fillna() to handle " + "missing values before arithmetic operations." + ) coeffs = self.coeffs.fillna(0) if is_legacy else self.coeffs const = self.const.fillna(0) if is_legacy else self.const scalar = DataArray(other) @@ -676,6 +691,11 @@ def _apply_constant_op( if is_legacy: factor = factor.fillna(fill_value) self_const = self_const.fillna(0) + elif factor.isnull().any(): + raise ValueError( + "Factor contains NaN values. Use .fillna() to handle " + "missing values before arithmetic operations." + ) if needs_data_reindex: fv = {**self._fill_value, "const": 0} data = self.data.reindex_like(self_const, fill_value=fv) @@ -1219,7 +1239,18 @@ def to_constraint( if effective_join == "v1": effective_join = "exact" + if isinstance(rhs, SUPPORTED_CONSTANT_TYPES) and not isinstance(rhs, DataArray): + rhs = as_dataarray(rhs, coords=self.coords, dims=self.coord_dims) + if isinstance(rhs, DataArray): + is_legacy = ( + join is None and options["arithmetic_convention"] == "legacy" + ) or join == "legacy" + if not is_legacy and rhs.isnull().any(): + raise ValueError( + "Constraint RHS contains NaN values. Use .fillna() and " + "mask= to handle missing values explicitly." + ) if effective_join == "override": aligned_rhs = rhs.assign_coords(coords=self.const.coords) expr_const = self.const @@ -2209,7 +2240,7 @@ class QuadraticExpression(BaseExpression): __array_priority__ = 10000 __pandas_priority__ = 10000 - _fill_value = {"vars": -1, "coeffs": np.nan, "const": np.nan} + _fill_value = {"vars": -1, "coeffs": 0, "const": np.nan} def __init__(self, data: Dataset | None, model: Model) -> None: super().__init__(data, model) diff --git a/linopy/piecewise.py b/linopy/piecewise.py index 7ba2fc23..489a8bdf 100644 --- a/linopy/piecewise.py +++ b/linopy/piecewise.py @@ -762,10 +762,10 @@ def _add_pwl_sos2_core( lambda_var.sum(dim=BREAKPOINT_DIM) == rhs, name=convex_name ) - x_weighted = (lambda_var * x_points).sum(dim=BREAKPOINT_DIM) + x_weighted = (lambda_var * x_points.fillna(0)).sum(dim=BREAKPOINT_DIM) model.add_constraints(x_expr == x_weighted, name=x_link_name) - y_weighted = (lambda_var * y_points).sum(dim=BREAKPOINT_DIM) + y_weighted = (lambda_var * y_points.fillna(0)).sum(dim=BREAKPOINT_DIM) model.add_constraints(target_expr == y_weighted, name=y_link_name) return convex_con diff --git a/test/test_convention.py b/test/test_convention.py index 7a566e38..e1099e81 100644 --- a/test/test_convention.py +++ b/test/test_convention.py @@ -218,13 +218,13 @@ def test_inf_mul_propagates(self, a: Variable) -> None: result = (1 * a) * const assert np.isinf(result.coeffs.squeeze().values[1]) - def test_nan_mul_propagates_v1(self, a: Variable) -> None: - """Under v1, NaN in mul should propagate (no fillna).""" + def test_nan_mul_raises_v1(self, a: Variable) -> None: + """Under v1, NaN in mul should raise ValueError.""" const = xr.DataArray( [1.0, np.nan, 3.0, 4.0, 5.0], dims=["i"], coords={"i": range(5)} ) - result = (1 * a) * const - assert np.isnan(result.coeffs.squeeze().values[1]) + with pytest.raises(ValueError, match="NaN"): + (1 * a) * const # --------------------------------------------------------------------------- diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index c833e17a..17a35a41 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -1116,7 +1116,7 @@ def test_add_nan_filled( @pytest.mark.v1_only @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_add_nan_propagates(self, v: Variable, operand: str) -> None: + def test_add_nan_raises(self, v: Variable, operand: str) -> None: vals = np.arange(20, dtype=float) vals[0] = np.nan vals[5] = np.nan @@ -1125,9 +1125,8 @@ def test_add_nan_propagates(self, v: Variable, operand: str) -> None: vals, dims=["dim_2"], coords={"dim_2": range(20)} ) target = v if operand == "var" else v + 5 - result = target + nan_constant - for i in self.NAN_POSITIONS: - assert np.isnan(result.const.values[i]) + with pytest.raises(ValueError, match="NaN"): + target + nan_constant @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) @@ -1148,7 +1147,7 @@ def test_sub_nan_filled( @pytest.mark.v1_only @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_sub_nan_propagates(self, v: Variable, operand: str) -> None: + def test_sub_nan_raises(self, v: Variable, operand: str) -> None: vals = np.arange(20, dtype=float) for i in self.NAN_POSITIONS: vals[i] = np.nan @@ -1156,9 +1155,8 @@ def test_sub_nan_propagates(self, v: Variable, operand: str) -> None: vals, dims=["dim_2"], coords={"dim_2": range(20)} ) target = v if operand == "var" else v + 5 - result = target - nan_constant - for i in self.NAN_POSITIONS: - assert np.isnan(result.const.values[i]) + with pytest.raises(ValueError, match="NaN"): + target - nan_constant @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) @@ -1178,15 +1176,15 @@ def test_mul_nan_filled( @pytest.mark.v1_only @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_mul_nan_propagates(self, v: Variable, operand: str) -> None: + def test_mul_nan_raises(self, v: Variable, operand: str) -> None: vals = np.arange(20, dtype=float) vals[0] = np.nan nan_constant = xr.DataArray( vals, dims=["dim_2"], coords={"dim_2": range(20)} ) target = v if operand == "var" else 1 * v - result = target * nan_constant - assert np.isnan(result.coeffs.squeeze().values[0]) + with pytest.raises(ValueError, match="NaN"): + target * nan_constant @pytest.mark.legacy_only @pytest.mark.parametrize("operand", ["var", "expr"]) @@ -1207,7 +1205,7 @@ def test_div_nan_filled( @pytest.mark.v1_only @pytest.mark.parametrize("operand", ["var", "expr"]) - def test_div_nan_propagates(self, v: Variable, operand: str) -> None: + def test_div_nan_raises(self, v: Variable, operand: str) -> None: vals = np.arange(20, dtype=float) + 1 vals[0] = np.nan vals[5] = np.nan @@ -1215,9 +1213,8 @@ def test_div_nan_propagates(self, v: Variable, operand: str) -> None: vals, dims=["dim_2"], coords={"dim_2": range(20)} ) target = v if operand == "var" else 1 * v - result = target / nan_constant - assert np.isnan(result.coeffs.squeeze().values[0]) - assert np.isnan(result.coeffs.squeeze().values[5]) + with pytest.raises(ValueError, match="NaN"): + target / nan_constant @pytest.mark.legacy_only def test_add_commutativity( @@ -1235,16 +1232,16 @@ def test_add_commutativity( ) @pytest.mark.v1_only - def test_add_commutativity_nan_propagates(self, v: Variable) -> None: + def test_add_commutativity_nan_raises(self, v: Variable) -> None: vals = np.arange(20, dtype=float) vals[0] = np.nan nan_constant = xr.DataArray( vals, dims=["dim_2"], coords={"dim_2": range(20)} ) - result_a = v + nan_constant - result_b = nan_constant + v - assert np.isnan(result_a.const.values[0]) - assert np.isnan(result_b.const.values[0]) + with pytest.raises(ValueError, match="NaN"): + v + nan_constant + with pytest.raises(ValueError, match="NaN"): + nan_constant + v @pytest.mark.legacy_only def test_mul_commutativity( @@ -1261,16 +1258,16 @@ def test_mul_commutativity( ) @pytest.mark.v1_only - def test_mul_commutativity_nan_propagates(self, v: Variable) -> None: + def test_mul_commutativity_nan_raises(self, v: Variable) -> None: vals = np.arange(20, dtype=float) vals[0] = np.nan nan_constant = xr.DataArray( vals, dims=["dim_2"], coords={"dim_2": range(20)} ) - result_a = v * nan_constant - result_b = nan_constant * v - assert np.isnan(result_a.coeffs.squeeze().values[0]) - assert np.isnan(result_b.coeffs.squeeze().values[0]) + with pytest.raises(ValueError, match="NaN"): + v * nan_constant + with pytest.raises(ValueError, match="NaN"): + nan_constant * v @pytest.mark.legacy_only def test_quadexpr_add_nan( @@ -1285,16 +1282,15 @@ def test_quadexpr_add_nan( assert not np.isnan(result.const.values).any() @pytest.mark.v1_only - def test_quadexpr_add_nan_propagates(self, v: Variable) -> None: + def test_quadexpr_add_nan_raises(self, v: Variable) -> None: vals = np.arange(20, dtype=float) vals[0] = np.nan nan_constant = xr.DataArray( vals, dims=["dim_2"], coords={"dim_2": range(20)} ) qexpr = v * v - result = qexpr + nan_constant - assert isinstance(result, QuadraticExpression) - assert np.isnan(result.const.values[0]) + with pytest.raises(ValueError, match="NaN"): + qexpr + nan_constant class TestExpressionWithNaN: """ @@ -1678,7 +1674,7 @@ def test_linear_expression_fillna(v: Variable) -> None: filled = filtered.fillna(10) assert isinstance(filled, LinearExpression) assert filled.const.sum() == 200 - assert filled.coeffs.isnull().sum() == 10 + assert (filled.coeffs.squeeze() == 0).sum() == 10 def test_variable_expand_dims(v: Variable) -> None: diff --git a/test/test_optimization.py b/test/test_optimization.py index 2bc1d0ed..4696f6c2 100644 --- a/test/test_optimization.py +++ b/test/test_optimization.py @@ -1108,14 +1108,15 @@ def auto_mask_variable_model() -> Model: @pytest.fixture def auto_mask_constraint_model() -> Model: - """Model with auto_mask=True and NaN in constraint RHS.""" - m = Model(auto_mask=True) + """Model with NaN in constraint RHS, masked explicitly.""" + m = Model() x = m.add_variables(lower=0, coords=[range(10)], name="x") y = m.add_variables(lower=0, coords=[range(10)], name="y") rhs = pd.Series([10.0] * 8 + [np.nan, np.nan], range(10)) - m.add_constraints(x + y, GREATER_EQUAL, rhs) # NaN rhs auto-masked + mask = rhs.notnull() + m.add_constraints(x + y, GREATER_EQUAL, rhs.fillna(0), mask=mask) m.add_constraints(x + y, GREATER_EQUAL, 5) m.add_objective(2 * x + y) From 65578ddb1e58234c7632a6393ab9dfce261a1f33 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 12 Mar 2026 08:24:17 +0100 Subject: [PATCH 59/66] Rename nan-edge-cases notebook as dev artifact (_prefix) Co-Authored-By: Claude Opus 4.6 --- examples/{nan-edge-cases.ipynb => _nan-edge-cases.ipynb} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{nan-edge-cases.ipynb => _nan-edge-cases.ipynb} (100%) diff --git a/examples/nan-edge-cases.ipynb b/examples/_nan-edge-cases.ipynb similarity index 100% rename from examples/nan-edge-cases.ipynb rename to examples/_nan-edge-cases.ipynb From 3798f7a067ac5b4b84360af94bf8de90140de20d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 12 Mar 2026 08:28:56 +0100 Subject: [PATCH 60/66] List all structural operations that produce NaN in convention docs shift, where, reindex, reindex_like, unstack produce absent terms. roll, sel, isel, drop_sel, expand_dims, broadcast_like do not. Co-Authored-By: Claude Opus 4.6 --- examples/missing-data.ipynb | 37 +------------------------------------ 1 file changed, 1 insertion(+), 36 deletions(-) diff --git a/examples/missing-data.ipynb b/examples/missing-data.ipynb index cb3fa9f6..e660783b 100644 --- a/examples/missing-data.ipynb +++ b/examples/missing-data.ipynb @@ -76,42 +76,7 @@ "cell_type": "markdown", "id": "rqgv2f7nwpb", "metadata": {}, - "source": [ - "---\n", - "\n", - "## The NaN convention\n", - "\n", - "In linopy v1, **NaN means \"absent term.\"** It is never a numeric value.\n", - "\n", - "### How NaN enters\n", - "\n", - "Only two sources produce NaN inside linopy data structures:\n", - "\n", - "1. **`mask=` argument** at construction (`add_variables`, `add_constraints`) — you explicitly declare which slots exist.\n", - "2. **Structural operations** that produce absent slots: `.shift()`, `.roll()`, reindexing to a larger index.\n", - "\n", - "### How NaN propagates\n", - "\n", - "An expression is a sum of terms. Each term has a coefficient, a variable reference, and the expression has a shared constant. NaN marks an **individual term** as absent — it does not mask the entire coordinate.\n", - "\n", - "When expressions are combined (e.g., `x*2 + y.shift(time=1)`), each term is kept independently. At time=0, `y.shift` contributes no term (NaN coeffs, vars=-1), but `x*2` is still valid. The result at time=0 is `2*x[0]` — not absent.\n", - "\n", - "A coordinate is only fully absent when **all** terms have vars=-1 **and** the constant is NaN. This is exactly what `isnull()` checks.\n", - "\n", - "### Where NaN lives\n", - "\n", - "NaN is burned directly into the float fields: `coeffs`, `const`, `rhs`, `lower`, `upper`. Integer fields (`labels`, `vars`) use **-1** as their equivalent sentinel. There is no separate boolean mask array.\n", - "\n", - "### What raises\n", - "\n", - "Any **user-supplied NaN at an API boundary** — in bounds, constants, factors, or RHS — raises `ValueError` immediately. Masking is always explicit via `mask=` or `.sel()`, never by passing NaN as a value.\n", - "\n", - "### Why this is consistent\n", - "\n", - "- **`lhs >= rhs` is `lhs - rhs >= 0`**, so RHS obeys the same rule as any constant — no special case.\n", - "- **No dual role for NaN**: it cannot mean both \"absent\" and \"a number I computed with.\" Internal NaN (from `shift`, `mask=`) is always structural. User NaN is always an error.\n", - "- **Absent terms, not absent coordinates**: combining a valid expression with a partially-absent one does not destroy the valid part. Only when *every* term at a coordinate is absent is the coordinate itself absent." - ] + "source": "---\n\n## The NaN convention\n\nIn linopy v1, **NaN means \"absent term.\"** It is never a numeric value.\n\n### How NaN enters\n\nOnly two sources produce NaN inside linopy data structures:\n\n1. **`mask=` argument** at construction (`add_variables`, `add_constraints`) — you explicitly declare which slots exist.\n2. **Structural operations** that produce absent slots: `.shift()`, `.where()`, `.reindex()`, `.reindex_like()`, `.unstack()` (with missing combinations).\n\nOperations that do **not** produce NaN: `.roll()` (circular), `.sel()` / `.isel()` (subset), `.drop_sel()` (drops), `.expand_dims()` / `.broadcast_like()` (broadcast existing data).\n\n### How NaN propagates\n\nAn expression is a sum of terms. Each term has a coefficient, a variable reference, and the expression has a shared constant. NaN marks an **individual term** as absent — it does not mask the entire coordinate.\n\nWhen expressions are combined (e.g., `x*2 + y.shift(time=1)`), each term is kept independently. At time=0, `y.shift` contributes no term (NaN coeffs, vars=-1), but `x*2` is still valid. The result at time=0 is `2*x[0]` — not absent.\n\nA coordinate is only fully absent when **all** terms have vars=-1 **and** the constant is NaN. This is exactly what `isnull()` checks.\n\n### Where NaN lives\n\nNaN is burned directly into the float fields: `coeffs`, `const`, `rhs`, `lower`, `upper`. Integer fields (`labels`, `vars`) use **-1** as their equivalent sentinel. There is no separate boolean mask array.\n\n### What raises\n\nAny **user-supplied NaN at an API boundary** — in bounds, constants, factors, or RHS — raises `ValueError` immediately. Masking is always explicit via `mask=` or `.sel()`, never by passing NaN as a value.\n\n### Why this is consistent\n\n- **`lhs >= rhs` is `lhs - rhs >= 0`**, so RHS obeys the same rule as any constant — no special case.\n- **No dual role for NaN**: it cannot mean both \"absent\" and \"a number I computed with.\" Internal NaN (from `shift`, `mask=`) is always structural. User NaN is always an error.\n- **Absent terms, not absent coordinates**: combining a valid expression with a partially-absent one does not destroy the valid part. Only when *every* term at a coordinate is absent is the coordinate itself absent." }, { "cell_type": "markdown", From 1dbecb244fb065d897e64aee8407e3ba4ccc4994 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 12 Mar 2026 09:15:11 +0100 Subject: [PATCH 61/66] Revert FILL_VALUE["coeffs"] back to NaN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Coeffs=0 was an implicit choice about the neutral element for multiplication. NaN is more honest — it means "absent", which is what FILL_VALUE is for. Both NaN and 0 coeffs get filtered by filter_nulls_polars at solve time, so behavior is unchanged. Co-Authored-By: Claude Opus 4.6 --- linopy/expressions.py | 4 ++-- test/test_linear_expression.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/linopy/expressions.py b/linopy/expressions.py index 47ada288..fbba3bb7 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -104,7 +104,7 @@ from linopy.variables import ScalarVariable, Variable -FILL_VALUE = {"vars": -1, "coeffs": 0, "const": np.nan} +FILL_VALUE = {"vars": -1, "coeffs": np.nan, "const": np.nan} def _to_piecewise_constraint_descriptor( @@ -2240,7 +2240,7 @@ class QuadraticExpression(BaseExpression): __array_priority__ = 10000 __pandas_priority__ = 10000 - _fill_value = {"vars": -1, "coeffs": 0, "const": np.nan} + _fill_value = {"vars": -1, "coeffs": np.nan, "const": np.nan} def __init__(self, data: Dataset | None, model: Model) -> None: super().__init__(data, model) diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 17a35a41..49a48382 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -1674,7 +1674,7 @@ def test_linear_expression_fillna(v: Variable) -> None: filled = filtered.fillna(10) assert isinstance(filled, LinearExpression) assert filled.const.sum() == 200 - assert (filled.coeffs.squeeze() == 0).sum() == 10 + assert filled.coeffs.isnull().sum() == 10 def test_variable_expand_dims(v: Variable) -> None: From 539ca00e0a59df10ad07bd333551173b77889d44 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 12 Mar 2026 09:26:25 +0100 Subject: [PATCH 62/66] Add expression-combining edge cases to dev notebook New cases: x + y.shift(1) + 5, x + (y+5).shift(1) + 5 (shifted const is lost), x.shift(1) + y.shift(1) (fully absent coordinate). Updated FILL_VALUE docs to reflect coeffs=NaN (not 0). Co-Authored-By: Claude Opus 4.6 --- examples/_nan-edge-cases.ipynb | 269 +++++++++++++++++++++------------ 1 file changed, 174 insertions(+), 95 deletions(-) diff --git a/examples/_nan-edge-cases.ipynb b/examples/_nan-edge-cases.ipynb index 57e08c47..9b480735 100644 --- a/examples/_nan-edge-cases.ipynb +++ b/examples/_nan-edge-cases.ipynb @@ -15,9 +15,10 @@ "4. [reindex — expanding coordinates](#reindex)\n", "5. [isnull / fillna — detection and recovery](#isnull--fillna)\n", "6. [Arithmetic with shifted expressions](#arithmetic-with-shifted-expressions)\n", - "7. [Constraints from expressions with NaN](#constraints-from-expressions-with-nan)\n", - "8. [sanitize_missings — the solver boundary](#sanitize_missings)\n", - "9. [FILL_VALUE internals](#fill_value-internals)" + "7. [Combining expressions with absent terms](#combining-expressions-with-absent-terms)\n", + "8. [Constraints from expressions with NaN](#constraints-from-expressions-with-nan)\n", + "9. [sanitize_missings — the solver boundary](#sanitize_missings)\n", + "10. [FILL_VALUE internals](#fill_value-internals)" ] }, { @@ -30,10 +31,10 @@ "start_time": "2026-03-12T07:02:17.428044Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:07.720185Z", - "iopub.status.busy": "2026-03-12T07:17:07.720094Z", - "iopub.status.idle": "2026-03-12T07:17:08.462534Z", - "shell.execute_reply": "2026-03-12T07:17:08.462309Z" + "iopub.execute_input": "2026-03-12T08:25:47.306077Z", + "iopub.status.busy": "2026-03-12T08:25:47.305688Z", + "iopub.status.idle": "2026-03-12T08:25:47.906314Z", + "shell.execute_reply": "2026-03-12T08:25:47.906090Z" } }, "outputs": [], @@ -63,10 +64,10 @@ "start_time": "2026-03-12T07:02:18.394722Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.463669Z", - "iopub.status.busy": "2026-03-12T07:17:08.463532Z", - "iopub.status.idle": "2026-03-12T07:17:08.500789Z", - "shell.execute_reply": "2026-03-12T07:17:08.500587Z" + "iopub.execute_input": "2026-03-12T08:25:47.907490Z", + "iopub.status.busy": "2026-03-12T08:25:47.907377Z", + "iopub.status.idle": "2026-03-12T08:25:47.938441Z", + "shell.execute_reply": "2026-03-12T08:25:47.938258Z" } }, "outputs": [], @@ -105,10 +106,10 @@ "start_time": "2026-03-12T07:02:18.450094Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.501837Z", - "iopub.status.busy": "2026-03-12T07:17:08.501734Z", - "iopub.status.idle": "2026-03-12T07:17:08.507679Z", - "shell.execute_reply": "2026-03-12T07:17:08.507514Z" + "iopub.execute_input": "2026-03-12T08:25:47.939568Z", + "iopub.status.busy": "2026-03-12T08:25:47.939444Z", + "iopub.status.idle": "2026-03-12T08:25:47.945428Z", + "shell.execute_reply": "2026-03-12T08:25:47.945260Z" } }, "outputs": [], @@ -127,8 +128,8 @@ "print(\"const: \", shifted.const.values)\n", "print(\"isnull:\", shifted.isnull().values)\n", "\n", - "print(\"\\nKey: vars=-1 is the integer sentinel, const=NaN marks the slot as absent.\")\n", - "print(\"coeffs are filled with\", FILL_VALUE[\"coeffs\"], \"(not NaN).\")" + "print(\"\\nKey: all float fields (coeffs, const) get NaN at the gap.\")\n", + "print(\"Integer field vars gets -1 sentinel.\")" ] }, { @@ -141,10 +142,10 @@ "start_time": "2026-03-12T07:02:18.469156Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.508604Z", - "iopub.status.busy": "2026-03-12T07:17:08.508544Z", - "iopub.status.idle": "2026-03-12T07:17:08.510507Z", - "shell.execute_reply": "2026-03-12T07:17:08.510344Z" + "iopub.execute_input": "2026-03-12T08:25:47.946355Z", + "iopub.status.busy": "2026-03-12T08:25:47.946298Z", + "iopub.status.idle": "2026-03-12T08:25:47.948146Z", + "shell.execute_reply": "2026-03-12T08:25:47.947974Z" } }, "outputs": [], @@ -178,10 +179,10 @@ "start_time": "2026-03-12T07:02:18.481358Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.511390Z", - "iopub.status.busy": "2026-03-12T07:17:08.511331Z", - "iopub.status.idle": "2026-03-12T07:17:08.515994Z", - "shell.execute_reply": "2026-03-12T07:17:08.515833Z" + "iopub.execute_input": "2026-03-12T08:25:47.949110Z", + "iopub.status.busy": "2026-03-12T08:25:47.949039Z", + "iopub.status.idle": "2026-03-12T08:25:47.954063Z", + "shell.execute_reply": "2026-03-12T08:25:47.953891Z" } }, "outputs": [], @@ -207,7 +208,7 @@ "## where\n", "\n", "`.where(cond)` masks slots where the condition is False.\n", - "Masked slots get `vars=-1, coeffs=0, const=NaN` — same as FILL_VALUE." + "Masked slots get `vars=-1, coeffs=NaN, const=NaN` — all float fields NaN, integer sentinel -1." ] }, { @@ -220,10 +221,10 @@ "start_time": "2026-03-12T07:02:18.496728Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.516930Z", - "iopub.status.busy": "2026-03-12T07:17:08.516857Z", - "iopub.status.idle": "2026-03-12T07:17:08.522323Z", - "shell.execute_reply": "2026-03-12T07:17:08.522120Z" + "iopub.execute_input": "2026-03-12T08:25:47.955033Z", + "iopub.status.busy": "2026-03-12T08:25:47.954967Z", + "iopub.status.idle": "2026-03-12T08:25:47.960120Z", + "shell.execute_reply": "2026-03-12T08:25:47.959950Z" } }, "outputs": [], @@ -264,10 +265,10 @@ "start_time": "2026-03-12T07:02:18.506075Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.523325Z", - "iopub.status.busy": "2026-03-12T07:17:08.523263Z", - "iopub.status.idle": "2026-03-12T07:17:08.530243Z", - "shell.execute_reply": "2026-03-12T07:17:08.530070Z" + "iopub.execute_input": "2026-03-12T08:25:47.961042Z", + "iopub.status.busy": "2026-03-12T08:25:47.960980Z", + "iopub.status.idle": "2026-03-12T08:25:47.967846Z", + "shell.execute_reply": "2026-03-12T08:25:47.967693Z" } }, "outputs": [], @@ -318,10 +319,10 @@ "start_time": "2026-03-12T07:02:18.524875Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.531196Z", - "iopub.status.busy": "2026-03-12T07:17:08.531119Z", - "iopub.status.idle": "2026-03-12T07:17:08.536287Z", - "shell.execute_reply": "2026-03-12T07:17:08.536119Z" + "iopub.execute_input": "2026-03-12T08:25:47.968881Z", + "iopub.status.busy": "2026-03-12T08:25:47.968800Z", + "iopub.status.idle": "2026-03-12T08:25:47.974292Z", + "shell.execute_reply": "2026-03-12T08:25:47.974130Z" } }, "outputs": [], @@ -369,10 +370,10 @@ "start_time": "2026-03-12T07:02:18.541098Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.537166Z", - "iopub.status.busy": "2026-03-12T07:17:08.537114Z", - "iopub.status.idle": "2026-03-12T07:17:08.546152Z", - "shell.execute_reply": "2026-03-12T07:17:08.545979Z" + "iopub.execute_input": "2026-03-12T08:25:47.975240Z", + "iopub.status.busy": "2026-03-12T08:25:47.975181Z", + "iopub.status.idle": "2026-03-12T08:25:47.983757Z", + "shell.execute_reply": "2026-03-12T08:25:47.983582Z" } }, "outputs": [], @@ -406,10 +407,10 @@ "start_time": "2026-03-12T07:02:18.562699Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.547012Z", - "iopub.status.busy": "2026-03-12T07:17:08.546958Z", - "iopub.status.idle": "2026-03-12T07:17:08.554977Z", - "shell.execute_reply": "2026-03-12T07:17:08.554795Z" + "iopub.execute_input": "2026-03-12T08:25:47.984647Z", + "iopub.status.busy": "2026-03-12T08:25:47.984591Z", + "iopub.status.idle": "2026-03-12T08:25:47.992694Z", + "shell.execute_reply": "2026-03-12T08:25:47.992528Z" } }, "outputs": [], @@ -439,29 +440,113 @@ "id": "eodco2pcrqn", "metadata": { "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.555856Z", - "iopub.status.busy": "2026-03-12T07:17:08.555799Z", - "iopub.status.idle": "2026-03-12T07:17:08.565818Z", - "shell.execute_reply": "2026-03-12T07:17:08.565644Z" + "iopub.execute_input": "2026-03-12T08:25:47.993521Z", + "iopub.status.busy": "2026-03-12T08:25:47.993467Z", + "iopub.status.idle": "2026-03-12T08:25:48.002848Z", + "shell.execute_reply": "2026-03-12T08:25:48.002675Z" } }, "outputs": [], "source": [ - "# Combining expressions: absent term does NOT poison valid terms\n", + "# Case 1: x + y.shift(1) — absent term, no poisoning\n", "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", "m, x = make_model()\n", "y = m.add_variables(lower=0, coords=[pd.RangeIndex(5, name=\"time\")], name=\"y\")\n", "\n", - "result = x * 2 + (1 * y).shift(time=1)\n", - "print(\"=== V1: x*2 + y.shift(1) ===\")\n", + "result = x + (1 * y).shift(time=1)\n", + "print(\"=== x + y.shift(1) ===\")\n", "print(\"const: \", result.const.values)\n", "print(\"isnull:\", result.isnull().values)\n", - "print(\"vars:\")\n", - "print(result.vars.values)\n", - "print(\"coeffs:\")\n", - "print(result.coeffs.values)\n", - "print(\"\\n→ time=0: x[0] with coeff=2 is valid! y's absent term (vars=-1, coeffs=NaN)\")\n", - "print(\" does NOT mask the coordinate. const=0 (not NaN) because xr.sum skips NaN.\")" + "print(\"vars:\\n\", result.vars.values)\n", + "print(\"coeffs:\\n\", result.coeffs.values)\n", + "print(\"\\n→ time=0: x[0] is valid (coeff=1), y's term is absent (coeff=NaN, vars=-1).\")\n", + "print(\" const=0 because xr.sum(skipna=True) treats NaN as 0 in the sum.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "qfgxszizmcf", + "metadata": { + "execution": { + "iopub.execute_input": "2026-03-12T08:25:48.003734Z", + "iopub.status.busy": "2026-03-12T08:25:48.003675Z", + "iopub.status.idle": "2026-03-12T08:25:48.011267Z", + "shell.execute_reply": "2026-03-12T08:25:48.011094Z" + } + }, + "outputs": [], + "source": [ + "# Case 2: x + y.shift(1) + 5 — scalar adds to const, absent term unaffected\n", + "result2 = x + (1 * y).shift(time=1) + 5\n", + "print(\"=== x + y.shift(1) + 5 ===\")\n", + "print(\"const: \", result2.const.values)\n", + "print(\"isnull:\", result2.isnull().values)\n", + "print(\"\\n→ time=0: const=5 (from x's 0 + scalar 5). y's absent term doesn't interfere.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "mwjx9or4azm", + "metadata": { + "execution": { + "iopub.execute_input": "2026-03-12T08:25:48.012148Z", + "iopub.status.busy": "2026-03-12T08:25:48.012093Z", + "iopub.status.idle": "2026-03-12T08:25:48.020636Z", + "shell.execute_reply": "2026-03-12T08:25:48.020460Z" + } + }, + "outputs": [], + "source": [ + "# Case 3: x + (y+5).shift(1) — shifted constant is LOST at the gap\n", + "result3 = x + (1 * y + 5).shift(time=1) + 5\n", + "print(\"=== x + (y+5).shift(1) + 5 ===\")\n", + "print(\"const: \", result3.const.values)\n", + "print(\"isnull:\", result3.isnull().values)\n", + "print(\"\\n→ time=0: const=5, NOT 10.\")\n", + "print(\" The +5 inside (y+5) was part of the shifted expression.\")\n", + "print(\" shift makes the ENTIRE expression absent at time=0 — including its constant.\")\n", + "print(\" So the shifted 5 is lost. Only the outer +5 survives.\")\n", + "print(\" time=1..4: const=10 (shifted 5 + outer 5).\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "h9wto4skk5s", + "metadata": { + "execution": { + "iopub.execute_input": "2026-03-12T08:25:48.021484Z", + "iopub.status.busy": "2026-03-12T08:25:48.021426Z", + "iopub.status.idle": "2026-03-12T08:25:48.029464Z", + "shell.execute_reply": "2026-03-12T08:25:48.029305Z" + } + }, + "outputs": [], + "source": [ + "# Case 4: both expressions shifted — fully absent coordinate\n", + "result4 = (1 * x).shift(time=1) + (1 * y).shift(time=1)\n", + "print(\"=== x.shift(1) + y.shift(1) ===\")\n", + "print(\"const: \", result4.const.values)\n", + "print(\"isnull:\", result4.isnull().values)\n", + "print(\"\\n→ time=0: ALL terms absent AND const=NaN → isnull=True.\")\n", + "print(\" This is the only way a coordinate becomes fully absent from merging.\")" + ] + }, + { + "cell_type": "markdown", + "id": "j00yil0a95", + "metadata": {}, + "source": [ + "### Summary: combining expressions with absent terms\n", + "\n", + "| Expression | const at time=0 | isnull at time=0 | Why |\n", + "|---|---|---|---|\n", + "| `x + y.shift(1)` | 0 | False | y's term absent, x valid, const sum skips NaN |\n", + "| `x + y.shift(1) + 5` | 5 | False | Same, then +5 on const |\n", + "| `x + (y+5).shift(1) + 5` | 5 | False | Shifted const (5) is lost — only outer +5 survives |\n", + "| `x.shift(1) + y.shift(1)` | NaN | True | ALL terms absent → coordinate absent |" ] }, { @@ -474,20 +559,18 @@ "| | Legacy | v1 |\n", "|---|---|---|\n", "| `shifted + 5` at absent slot | const=5 (alive!) | const=NaN (absent) |\n", - "| `shifted * 3` at absent slot | coeffs=0, const=0 | coeffs=0, const=NaN |\n", + "| `shifted * 3` at absent slot | coeffs=0, const=0 | coeffs=NaN, const=NaN |\n", "| `isnull()` after arithmetic | False (slot revived!) | True (slot stays absent) |\n", "\n", "Legacy can **revive** absent slots through scalar arithmetic. v1 cannot — once absent, always absent.\n", "\n", - "### But: combining expressions does NOT poison\n", + "---\n", "\n", - "When two expressions are merged (e.g., `x*2 + y.shift(1)`), each term is independent. An absent term from `y.shift` does **not** mask the valid `x` term at the same coordinate:\n", + "## Combining expressions with absent terms\n", "\n", - "```python\n", - "x*2 + y.shift(time=1) # at time=0: 2*x[0] (valid!) + absent term → 2*x[0]\n", - "```\n", + "When two expressions are merged (e.g., `x + y.shift(1)`), each term is concatenated along the `_term` dimension. The constant is summed with `skipna=True` — so NaN from one operand does NOT poison the other.\n", "\n", - "A coordinate is only fully absent when **all** terms have `vars=-1` and `const` is NaN. This is what `isnull()` checks." + "**Key rule: absent terms don't poison valid terms at the same coordinate.**" ] }, { @@ -513,10 +596,10 @@ "start_time": "2026-03-12T07:02:18.583043Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.566737Z", - "iopub.status.busy": "2026-03-12T07:17:08.566681Z", - "iopub.status.idle": "2026-03-12T07:17:08.579707Z", - "shell.execute_reply": "2026-03-12T07:17:08.579538Z" + "iopub.execute_input": "2026-03-12T08:25:48.030525Z", + "iopub.status.busy": "2026-03-12T08:25:48.030458Z", + "iopub.status.idle": "2026-03-12T08:25:48.043325Z", + "shell.execute_reply": "2026-03-12T08:25:48.043168Z" } }, "outputs": [], @@ -560,10 +643,10 @@ "start_time": "2026-03-12T07:02:18.607269Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.580657Z", - "iopub.status.busy": "2026-03-12T07:17:08.580602Z", - "iopub.status.idle": "2026-03-12T07:17:08.596413Z", - "shell.execute_reply": "2026-03-12T07:17:08.596253Z" + "iopub.execute_input": "2026-03-12T08:25:48.044184Z", + "iopub.status.busy": "2026-03-12T08:25:48.044131Z", + "iopub.status.idle": "2026-03-12T08:25:48.060912Z", + "shell.execute_reply": "2026-03-12T08:25:48.060763Z" } }, "outputs": [], @@ -624,10 +707,10 @@ "start_time": "2026-03-12T07:02:18.635530Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.597352Z", - "iopub.status.busy": "2026-03-12T07:17:08.597296Z", - "iopub.status.idle": "2026-03-12T07:17:08.605197Z", - "shell.execute_reply": "2026-03-12T07:17:08.605028Z" + "iopub.execute_input": "2026-03-12T08:25:48.061831Z", + "iopub.status.busy": "2026-03-12T08:25:48.061768Z", + "iopub.status.idle": "2026-03-12T08:25:48.069806Z", + "shell.execute_reply": "2026-03-12T08:25:48.069649Z" } }, "outputs": [], @@ -670,18 +753,14 @@ "| Type | Field | FILL_VALUE | Why |\n", "|---|---|---|---|\n", "| LinearExpression | `vars` | -1 | Integer sentinel (no variable) |\n", - "| LinearExpression | `coeffs` | 0 | \"No term\" = zero coefficient |\n", - "| LinearExpression | `const` | NaN | Marks slot as absent (needed for `isnull()`) |\n", + "| LinearExpression | `coeffs` | NaN | Absent — not a numeric value |\n", + "| LinearExpression | `const` | NaN | Absent — needed for `isnull()` detection |\n", "| Variable | `labels` | -1 | Integer sentinel (no variable) |\n", "| Variable | `lower` | NaN | Absent bound |\n", "| Variable | `upper` | NaN | Absent bound |\n", "| Constraint | `labels` | -1 | Integer sentinel (no constraint) |\n", "\n", - "### Why coeffs=0 but const=NaN?\n", - "\n", - "- **coeffs=0**: A missing term contributes nothing to the sum. `0 * var = 0`.\n", - "- **const=NaN**: Distinguishes \"absent slot\" from \"slot with zero constant.\"\n", - " Without NaN in const, `isnull()` couldn't tell the difference.\n", + "All float fields use NaN for absence. Integer fields use -1. No implicit choice about neutral elements.\n", "\n", "### isnull() depends on const=NaN\n", "\n", @@ -690,7 +769,7 @@ " return (self.vars == -1).all(helper_dims) & self.const.isnull()\n", "```\n", "\n", - "If const were 0 instead of NaN, a shifted expression would not be detected as null." + "Both conditions must be true: all variable references are -1 AND the constant is NaN. This distinguishes \"absent\" from \"valid expression with zero constant.\"" ] }, { @@ -703,10 +782,10 @@ "start_time": "2026-03-12T07:02:18.654471Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.606113Z", - "iopub.status.busy": "2026-03-12T07:17:08.606058Z", - "iopub.status.idle": "2026-03-12T07:17:08.612124Z", - "shell.execute_reply": "2026-03-12T07:17:08.611961Z" + "iopub.execute_input": "2026-03-12T08:25:48.070762Z", + "iopub.status.busy": "2026-03-12T08:25:48.070706Z", + "iopub.status.idle": "2026-03-12T08:25:48.077412Z", + "shell.execute_reply": "2026-03-12T08:25:48.077245Z" } }, "outputs": [], @@ -723,7 +802,7 @@ "print(f\"const FILL={FILL_VALUE['const']}:\", shifted.const.values)\n", "print()\n", "print(\"isnull:\", shifted.isnull().values)\n", - "print(\"\\nSlot 0: vars=-1, coeffs=0, const=NaN → isnull=True\")\n", + "print(\"\\nSlot 0: vars=-1, coeffs=NaN, const=NaN → isnull=True\")\n", "print(\"Slot 1: vars=0, coeffs=2, const=10 → isnull=False\")" ] }, @@ -737,10 +816,10 @@ "start_time": "2026-03-12T07:02:18.668107Z" }, "execution": { - "iopub.execute_input": "2026-03-12T07:17:08.613034Z", - "iopub.status.busy": "2026-03-12T07:17:08.612977Z", - "iopub.status.idle": "2026-03-12T07:17:08.614319Z", - "shell.execute_reply": "2026-03-12T07:17:08.614153Z" + "iopub.execute_input": "2026-03-12T08:25:48.078298Z", + "iopub.status.busy": "2026-03-12T08:25:48.078237Z", + "iopub.status.idle": "2026-03-12T08:25:48.079577Z", + "shell.execute_reply": "2026-03-12T08:25:48.079408Z" } }, "outputs": [], From 1ce92618af4812c6539b34e90cd86a2ca6c98314 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 14 Mar 2026 17:48:47 +0100 Subject: [PATCH 63/66] Streamline NaN edge cases notebook and add fillna discussion Rewrite notebook to be more concise: show expression objects directly instead of printing individual fields, use markdown for explanations. Add section addressing why fillna on expressions is unnecessary for outer-join NaN (structural markers, not numeric gaps). Fix summary table for x.shift(1)+y.shift(1) case (const=0, not NaN). Co-Authored-By: Claude Opus 4.6 (1M context) --- examples/_nan-edge-cases.ipynb | 1500 ++++++++++++++++++++------------ 1 file changed, 962 insertions(+), 538 deletions(-) diff --git a/examples/_nan-edge-cases.ipynb b/examples/_nan-edge-cases.ipynb index 9b480735..c811a365 100644 --- a/examples/_nan-edge-cases.ipynb +++ b/examples/_nan-edge-cases.ipynb @@ -4,439 +4,868 @@ "cell_type": "markdown", "id": "intro", "metadata": {}, - "source": [ - "# NaN Edge Cases: Legacy vs v1\n", - "\n", - "Development notebook investigating how NaN behaves across linopy operations under both conventions.\n", - "\n", - "1. [shift — the primary NaN source](#shift)\n", - "2. [roll — circular, no NaN](#roll)\n", - "3. [where — conditional masking](#where)\n", - "4. [reindex — expanding coordinates](#reindex)\n", - "5. [isnull / fillna — detection and recovery](#isnull--fillna)\n", - "6. [Arithmetic with shifted expressions](#arithmetic-with-shifted-expressions)\n", - "7. [Combining expressions with absent terms](#combining-expressions-with-absent-terms)\n", - "8. [Constraints from expressions with NaN](#constraints-from-expressions-with-nan)\n", - "9. [sanitize_missings — the solver boundary](#sanitize_missings)\n", - "10. [FILL_VALUE internals](#fill_value-internals)" - ] + "source": "# NaN Edge Cases: Legacy vs v1\n\nDevelopment notebook investigating how NaN behaves across linopy operations under both conventions.\n\n**Core principle (v1):** NaN means \"absent term\" — not a numeric value. It enters only through structural operations (`shift`, `where`, `reindex`, `mask=`) and propagates via IEEE semantics. Absent terms don't poison valid terms at the same coordinate.\n\n1. [Sources of NaN](#sources-of-nan)\n2. [isnull detection](#isnull-detection)\n3. [Arithmetic on shifted expressions](#arithmetic-on-shifted-expressions)\n4. [Combining expressions with absent terms](#combining-expressions-with-absent-terms)\n5. [Constraints from expressions with NaN](#constraints-from-expressions-with-nan)\n6. [Why fillna on expressions is unnecessary](#why-fillna-on-expressions-is-unnecessary)\n7. [FILL_VALUE internals](#fill_value-internals)" }, { "cell_type": "code", - "execution_count": null, "id": "imports", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.382686Z", - "start_time": "2026-03-12T07:02:17.428044Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.306077Z", "iopub.status.busy": "2026-03-12T08:25:47.305688Z", "iopub.status.idle": "2026-03-12T08:25:47.906314Z", "shell.execute_reply": "2026-03-12T08:25:47.906090Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:50.978615Z", + "start_time": "2026-03-14T16:44:48.272788Z" } }, - "outputs": [], - "source": [ - "import warnings\n", - "\n", - "import pandas as pd\n", - "import xarray as xr\n", - "\n", - "import linopy\n", - "from linopy import Model\n", - "from linopy.config import LinopyDeprecationWarning\n", - "from linopy.expressions import FILL_VALUE\n", - "\n", - "warnings.filterwarnings(\"ignore\", category=LinopyDeprecationWarning)\n", - "\n", - "print(\"FILL_VALUE:\", FILL_VALUE)" - ] + "source": "import warnings\n\nimport pandas as pd\nimport xarray as xr\n\nimport linopy\nfrom linopy import Model\nfrom linopy.config import LinopyDeprecationWarning\nfrom linopy.expressions import FILL_VALUE\n\nwarnings.filterwarnings(\"ignore\", category=LinopyDeprecationWarning)", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MindOpt 2.2.0 | 2e28db43, Aug 29 2025, 14:27:12 | arm64 - macOS 26.2\n", + "Start license validation (current time : 14-MAR-2026 17:44:50 UTC+0100).\n", + "[WARN ] No license file is found.\n", + "[ERROR] No valid license was found. Please visit https://opt.aliyun.com/doc/latest/en/html/installation/license.html to apply for and set up your license.\n", + "License validation terminated. Time : 0.000s\n", + "\n" + ] + } + ], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "setup", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.440164Z", - "start_time": "2026-03-12T07:02:18.394722Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.907490Z", "iopub.status.busy": "2026-03-12T08:25:47.907377Z", "iopub.status.idle": "2026-03-12T08:25:47.938441Z", "shell.execute_reply": "2026-03-12T08:25:47.938258Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.019790Z", + "start_time": "2026-03-14T16:44:50.995727Z" } }, + "source": "def make_model():\n m = Model()\n time = pd.RangeIndex(5, name=\"time\")\n x = m.add_variables(lower=0, coords=[time], name=\"x\")\n return m, x", "outputs": [], - "source": [ - "def make_model():\n", - " m = Model()\n", - " time = pd.RangeIndex(5, name=\"time\")\n", - " x = m.add_variables(lower=0, coords=[time], name=\"x\")\n", - " return m, x\n", - "\n", - "\n", - "m, x = make_model()\n", - "print(\"x:\", x)" - ] + "execution_count": null }, { "cell_type": "markdown", "id": "shift-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## shift\n", - "\n", - "`.shift()` is the primary structural source of NaN. It shifts data along a dimension,\n", - "creating a gap that must be filled. The fill values come from `FILL_VALUE`." - ] + "source": "---\n\n## Sources of NaN\n\n### shift\n\n`.shift()` is the primary structural source of NaN. It shifts data along a dimension, creating a gap filled with `FILL_VALUE` (`vars=-1`, `coeffs=NaN`, `const=NaN`)." }, { "cell_type": "code", - "execution_count": null, "id": "shift-demo", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.457999Z", - "start_time": "2026-03-12T07:02:18.450094Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.939568Z", "iopub.status.busy": "2026-03-12T08:25:47.939444Z", "iopub.status.idle": "2026-03-12T08:25:47.945428Z", "shell.execute_reply": "2026-03-12T08:25:47.945260Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.248456Z", + "start_time": "2026-03-14T16:44:51.045488Z" } }, - "outputs": [], - "source": [ - "expr = 2 * x + 10\n", - "shifted = expr.shift(time=1)\n", - "\n", - "print(\"=== Original ===\")\n", - "print(\"coeffs:\", expr.coeffs.squeeze().values)\n", - "print(\"vars: \", expr.vars.squeeze().values)\n", - "print(\"const: \", expr.const.values)\n", - "\n", - "print(\"\\n=== Shifted (time=1) ===\")\n", - "print(\"coeffs:\", shifted.coeffs.squeeze().values)\n", - "print(\"vars: \", shifted.vars.squeeze().values)\n", - "print(\"const: \", shifted.const.values)\n", - "print(\"isnull:\", shifted.isnull().values)\n", - "\n", - "print(\"\\nKey: all float fields (coeffs, const) get NaN at the gap.\")\n", - "print(\"Integer field vars gets -1 sentinel.\")" - ] + "source": "m, x = make_model()\nexpr = 2 * x + 10\nexpr.shift(time=1)", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 5]:\n", + "---------------------------\n", + "[0]: None\n", + "[1]: +2 x[0] + 10\n", + "[2]: +2 x[1] + 10\n", + "[3]: +2 x[2] + 10\n", + "[4]: +2 x[3] + 10" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "shift-variable", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.471949Z", - "start_time": "2026-03-12T07:02:18.469156Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.946355Z", "iopub.status.busy": "2026-03-12T08:25:47.946298Z", "iopub.status.idle": "2026-03-12T08:25:47.948146Z", "shell.execute_reply": "2026-03-12T08:25:47.947974Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.319384Z", + "start_time": "2026-03-14T16:44:51.288865Z" } }, - "outputs": [], - "source": [ - "# Variables also support shift — labels get -1 sentinel, bounds get NaN\n", - "x_shifted = x.shift(time=1)\n", - "print(\"shifted variable labels:\", x_shifted.labels.values)\n", - "print(\"shifted variable lower: \", x_shifted.lower.values)\n", - "print(\"shifted variable upper: \", x_shifted.upper.values)" - ] + "source": "# Variables also support shift — labels get -1 sentinel, bounds get NaN\nx.shift(time=1)", + "outputs": [ + { + "data": { + "text/plain": [ + "Variable (time: 5) - 1 masked entries\n", + "-------------------------------------\n", + "[0]: None\n", + "[1]: x[0] ∈ [0, inf]\n", + "[2]: x[1] ∈ [0, inf]\n", + "[3]: x[2] ∈ [0, inf]\n", + "[4]: x[3] ∈ [0, inf]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "roll-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## roll\n", - "\n", - "`.roll()` is circular — values wrap around, no NaN is introduced." - ] + "source": "### roll\n\n`.roll()` is circular — values wrap around, no NaN introduced." }, { "cell_type": "code", - "execution_count": null, "id": "roll-demo", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.487236Z", - "start_time": "2026-03-12T07:02:18.481358Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.949110Z", "iopub.status.busy": "2026-03-12T08:25:47.949039Z", "iopub.status.idle": "2026-03-12T08:25:47.954063Z", "shell.execute_reply": "2026-03-12T08:25:47.953891Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.398779Z", + "start_time": "2026-03-14T16:44:51.344805Z" } }, - "outputs": [], - "source": [ - "expr = 2 * x + 10\n", - "rolled = expr.roll(time=1)\n", - "\n", - "print(\"=== Rolled (time=1) ===\")\n", - "print(\"coeffs:\", rolled.coeffs.squeeze().values)\n", - "print(\"vars: \", rolled.vars.squeeze().values)\n", - "print(\"const: \", rolled.const.values)\n", - "print(\"isnull:\", rolled.isnull().values)\n", - "print(\"\\nNo NaN — values wrap around.\")" - ] + "source": "m, x = make_model()\n(2 * x + 10).roll(time=1)", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 5]:\n", + "---------------------------\n", + "[0]: +2 x[4] + 10\n", + "[1]: +2 x[0] + 10\n", + "[2]: +2 x[1] + 10\n", + "[3]: +2 x[2] + 10\n", + "[4]: +2 x[3] + 10" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "where-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## where\n", - "\n", - "`.where(cond)` masks slots where the condition is False.\n", - "Masked slots get `vars=-1, coeffs=NaN, const=NaN` — all float fields NaN, integer sentinel -1." - ] + "source": "### where\n\n`.where(cond)` masks slots where the condition is False → `vars=-1, coeffs=NaN, const=NaN`." }, { "cell_type": "code", - "execution_count": null, "id": "where-demo", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.502493Z", - "start_time": "2026-03-12T07:02:18.496728Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.955033Z", "iopub.status.busy": "2026-03-12T08:25:47.954967Z", "iopub.status.idle": "2026-03-12T08:25:47.960120Z", "shell.execute_reply": "2026-03-12T08:25:47.959950Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.456507Z", + "start_time": "2026-03-14T16:44:51.430059Z" } }, - "outputs": [], - "source": [ - "expr = 2 * x + 10\n", - "mask = xr.DataArray([True, True, False, False, True], dims=[\"time\"])\n", - "masked = expr.where(mask)\n", - "\n", - "print(\"=== where(mask) ===\")\n", - "print(\"coeffs:\", masked.coeffs.squeeze().values)\n", - "print(\"vars: \", masked.vars.squeeze().values)\n", - "print(\"const: \", masked.const.values)\n", - "print(\"isnull:\", masked.isnull().values)\n", - "\n", - "print(\"\\nFalse positions → absent slot (vars=-1, const=NaN).\")\n", - "print(\"Same shape, fewer active slots.\")" - ] + "source": "m, x = make_model()\nmask = xr.DataArray([True, True, False, False, True], dims=[\"time\"])\n(2 * x + 10).where(mask)", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 5]:\n", + "---------------------------\n", + "[0]: +2 x[0] + 10\n", + "[1]: +2 x[1] + 10\n", + "[2]: None\n", + "[3]: None\n", + "[4]: +2 x[4] + 10" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "reindex-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## reindex\n", - "\n", - "`.reindex()` expands or shrinks coordinates. New coordinates get FILL_VALUE." - ] + "source": "### reindex\n\n`.reindex()` expands or shrinks coordinates. New coordinates get `FILL_VALUE`." }, { "cell_type": "code", - "execution_count": null, "id": "reindex-demo", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.515465Z", - "start_time": "2026-03-12T07:02:18.506075Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.961042Z", "iopub.status.busy": "2026-03-12T08:25:47.960980Z", "iopub.status.idle": "2026-03-12T08:25:47.967846Z", "shell.execute_reply": "2026-03-12T08:25:47.967693Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.512019Z", + "start_time": "2026-03-14T16:44:51.482771Z" } }, - "outputs": [], - "source": [ - "expr = 2 * x + 10\n", - "\n", - "# Expand to a larger index\n", - "new_time = pd.RangeIndex(7, name=\"time\")\n", - "expanded = expr.reindex({\"time\": new_time})\n", - "\n", - "print(\"=== reindex to [0..6] ===\")\n", - "print(\"coeffs:\", expanded.coeffs.squeeze().values)\n", - "print(\"vars: \", expanded.vars.squeeze().values)\n", - "print(\"const: \", expanded.const.values)\n", - "print(\"isnull:\", expanded.isnull().values)\n", - "\n", - "# Shrink to a smaller index\n", - "shrunk = expr.reindex({\"time\": [1, 3]})\n", - "print(\"\\n=== reindex to [1, 3] ===\")\n", - "print(\"coeffs:\", shrunk.coeffs.squeeze().values)\n", - "print(\"const: \", shrunk.const.values)\n", - "print(\"\\nNew positions [5, 6] are absent. Shrinking drops slots.\")" - ] + "source": "m, x = make_model()\nexpr = 2 * x + 10\n\n# Expand to a larger index — new positions [5, 6] are absent\nexpr.reindex({\"time\": pd.RangeIndex(7, name=\"time\")})", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 7]:\n", + "---------------------------\n", + "[0]: +2 x[0] + 10\n", + "[1]: +2 x[1] + 10\n", + "[2]: +2 x[2] + 10\n", + "[3]: +2 x[3] + 10\n", + "[4]: +2 x[4] + 10\n", + "[5]: None\n", + "[6]: None" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "isnull-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## isnull / fillna\n", - "\n", - "`isnull()` detects absent slots. The check is:\n", - "```\n", - "(vars == -1).all(helper_dims) & const.isnull()\n", - "```\n", - "Both conditions must be true — a slot is only \"absent\" if there are no variables AND no constant." - ] + "source": "---\n\n## isnull detection\n\n`isnull()` checks: `(vars == -1).all(helper_dims) & const.isnull()`\n\nBoth conditions must be true — a slot is only \"absent\" if there are no variables AND no constant. This distinguishes \"absent\" from \"valid expression with zero constant\"." }, { "cell_type": "code", - "execution_count": null, "id": "isnull-demo", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.531165Z", - "start_time": "2026-03-12T07:02:18.524875Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.968881Z", "iopub.status.busy": "2026-03-12T08:25:47.968800Z", "iopub.status.idle": "2026-03-12T08:25:47.974292Z", "shell.execute_reply": "2026-03-12T08:25:47.974130Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.613696Z", + "start_time": "2026-03-14T16:44:51.561248Z" } }, - "outputs": [], - "source": [ - "expr = 2 * x + 10\n", - "shifted = expr.shift(time=2)\n", - "\n", - "print(\"=== isnull on shifted expression ===\")\n", - "print(\"vars: \", shifted.vars.squeeze().values)\n", - "print(\"const: \", shifted.const.values)\n", - "print(\"isnull:\", shifted.isnull().values)\n", - "\n", - "# What about an expression with const=0 but vars=-1?\n", - "# This would be a \"zero expression\" not an absent one.\n", - "print(\"\\n=== Why const=NaN matters ===\")\n", - "print(\"If const were 0 instead of NaN, isnull() would be False\")\n", - "print(\"→ the slot would look like a valid 'zero expression'\")\n", - "print(\"→ NaN in const is what distinguishes 'absent' from 'zero'\")" - ] + "source": "m, x = make_model()\nshifted = (2 * x + 10).shift(time=2)\nshifted.isnull()", + "outputs": [ + { + "data": { + "text/plain": [ + " Size: 5B\n", + "array([ True, True, False, False, False])\n", + "Coordinates:\n", + " * time (time) int64 40B 0 1 2 3 4" + ], + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
<xarray.DataArray (time: 5)> Size: 5B\n",
+       "array([ True,  True, False, False, False])\n",
+       "Coordinates:\n",
+       "  * time     (time) int64 40B 0 1 2 3 4
" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "arithmetic-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## Arithmetic with shifted expressions\n", - "\n", - "This is where legacy and v1 diverge. When you do arithmetic on an expression\n", - "that already has NaN (from shift/where/reindex), the NaN is **internal** — it's\n", - "not user-supplied data at an API boundary.\n", - "\n", - "- **Legacy**: fills expression NaN with neutral elements before operating\n", - "- **v1**: lets IEEE NaN propagate — absent stays absent" - ] + "source": "---\n\n## Arithmetic on shifted expressions\n\nWhen you do arithmetic on an expression that already has NaN (from `shift`/`where`/`reindex`), the NaN is **internal** — not user-supplied.\n\n- **Legacy**: fills expression NaN with neutral elements before operating → can **revive** absent slots\n- **v1**: IEEE NaN propagation → absent stays absent" }, { "cell_type": "code", - "execution_count": null, "id": "arithmetic-legacy", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.553302Z", - "start_time": "2026-03-12T07:02:18.541098Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.975240Z", "iopub.status.busy": "2026-03-12T08:25:47.975181Z", "iopub.status.idle": "2026-03-12T08:25:47.983757Z", "shell.execute_reply": "2026-03-12T08:25:47.983582Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.720121Z", + "start_time": "2026-03-14T16:44:51.654874Z" } }, - "outputs": [], - "source": [ - "linopy.options[\"arithmetic_convention\"] = \"legacy\"\n", - "m, x = make_model()\n", - "\n", - "shifted = (2 * x + 10).shift(time=1)\n", - "print(\"=== LEGACY: shifted + 5 ===\")\n", - "result = shifted + 5\n", - "print(\"const: \", result.const.values)\n", - "print(\"coeffs:\", result.coeffs.squeeze().values)\n", - "print(\"isnull:\", result.isnull().values)\n", - "print(\"→ NaN const filled with 0, then +5 = 5. Slot looks alive!\")\n", - "\n", - "print(\"\\n=== LEGACY: shifted * 3 ===\")\n", - "result = shifted * 3\n", - "print(\"const: \", result.const.values)\n", - "print(\"coeffs:\", result.coeffs.squeeze().values)\n", - "print(\"isnull:\", result.isnull().values)\n", - "print(\"→ NaN filled with 0, then *3 = 0. Slot has zero coeff.\")" - ] + "source": "linopy.options[\"arithmetic_convention\"] = \"legacy\"\nm, x = make_model()\nshifted = (2 * x + 10).shift(time=1)\n\n# Legacy: NaN const filled with 0, then +5 = 5. Slot looks alive!\nshifted + 5", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 5]:\n", + "---------------------------\n", + "[0]: +5\n", + "[1]: +2 x[0] + 15\n", + "[2]: +2 x[1] + 15\n", + "[3]: +2 x[2] + 15\n", + "[4]: +2 x[3] + 15" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "arithmetic-v1", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.573084Z", - "start_time": "2026-03-12T07:02:18.562699Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:47.984647Z", "iopub.status.busy": "2026-03-12T08:25:47.984591Z", "iopub.status.idle": "2026-03-12T08:25:47.992694Z", "shell.execute_reply": "2026-03-12T08:25:47.992528Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.796832Z", + "start_time": "2026-03-14T16:44:51.753190Z" } }, - "outputs": [], - "source": [ - "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", - "m, x = make_model()\n", - "\n", - "shifted = (2 * x + 10).shift(time=1)\n", - "print(\"=== V1: shifted + 5 ===\")\n", - "result = shifted + 5\n", - "print(\"const: \", result.const.values)\n", - "print(\"coeffs:\", result.coeffs.squeeze().values)\n", - "print(\"isnull:\", result.isnull().values)\n", - "print(\"→ NaN + 5 = NaN. Absent slot stays absent. IEEE propagation.\")\n", - "\n", - "print(\"\\n=== V1: shifted * 3 ===\")\n", - "result = shifted * 3\n", - "print(\"const: \", result.const.values)\n", - "print(\"coeffs:\", result.coeffs.squeeze().values)\n", - "print(\"isnull:\", result.isnull().values)\n", - "print(\"→ NaN * 3 = NaN. Coeffs 0*3 = 0 (not NaN — coeffs FILL is 0).\")" - ] + "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm, x = make_model()\nshifted = (2 * x + 10).shift(time=1)\n\n# v1: NaN + 5 = NaN. Absent slot stays absent.\nshifted + 5", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 5]:\n", + "---------------------------\n", + "[0]: None\n", + "[1]: +2 x[0] + 15\n", + "[2]: +2 x[1] + 15\n", + "[3]: +2 x[2] + 15\n", + "[4]: +2 x[3] + 15" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "id": "eodco2pcrqn", "metadata": { "execution": { @@ -444,28 +873,16 @@ "iopub.status.busy": "2026-03-12T08:25:47.993467Z", "iopub.status.idle": "2026-03-12T08:25:48.002848Z", "shell.execute_reply": "2026-03-12T08:25:48.002675Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:37:06.445775Z", + "start_time": "2026-03-14T16:37:06.380780Z" } }, - "outputs": [], - "source": [ - "# Case 1: x + y.shift(1) — absent term, no poisoning\n", - "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", - "m, x = make_model()\n", - "y = m.add_variables(lower=0, coords=[pd.RangeIndex(5, name=\"time\")], name=\"y\")\n", - "\n", - "result = x + (1 * y).shift(time=1)\n", - "print(\"=== x + y.shift(1) ===\")\n", - "print(\"const: \", result.const.values)\n", - "print(\"isnull:\", result.isnull().values)\n", - "print(\"vars:\\n\", result.vars.values)\n", - "print(\"coeffs:\\n\", result.coeffs.values)\n", - "print(\"\\n→ time=0: x[0] is valid (coeff=1), y's term is absent (coeff=NaN, vars=-1).\")\n", - "print(\" const=0 because xr.sum(skipna=True) treats NaN as 0 in the sum.\")" - ] + "source": "---\n\n## Combining expressions with absent terms\n\nWhen two expressions are merged (e.g., `x + y.shift(1)`), each term is concatenated along the `_term` dimension. The constant is summed with `skipna=True` — NaN from one operand does **not** poison the other.\n\n**Key rule: absent terms don't poison valid terms at the same coordinate.**" }, { "cell_type": "code", - "execution_count": null, "id": "qfgxszizmcf", "metadata": { "execution": { @@ -473,21 +890,35 @@ "iopub.status.busy": "2026-03-12T08:25:48.003675Z", "iopub.status.idle": "2026-03-12T08:25:48.011267Z", "shell.execute_reply": "2026-03-12T08:25:48.011094Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.874274Z", + "start_time": "2026-03-14T16:44:51.821114Z" } }, - "outputs": [], - "source": [ - "# Case 2: x + y.shift(1) + 5 — scalar adds to const, absent term unaffected\n", - "result2 = x + (1 * y).shift(time=1) + 5\n", - "print(\"=== x + y.shift(1) + 5 ===\")\n", - "print(\"const: \", result2.const.values)\n", - "print(\"isnull:\", result2.isnull().values)\n", - "print(\"\\n→ time=0: const=5 (from x's 0 + scalar 5). y's absent term doesn't interfere.\")" - ] + "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm, x = make_model()\ny = m.add_variables(lower=0, coords=[pd.RangeIndex(5, name=\"time\")], name=\"y\")\n\n# x is valid everywhere, y.shift(1) is absent at time=0\n# → time=0 still has x's term, only y's term is absent\nx + (1 * y).shift(time=1)", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 5]:\n", + "---------------------------\n", + "[0]: +1 x[0]\n", + "[1]: +1 x[1] + 1 y[0]\n", + "[2]: +1 x[2] + 1 y[1]\n", + "[3]: +1 x[3] + 1 y[2]\n", + "[4]: +1 x[4] + 1 y[3]" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "mwjx9or4azm", "metadata": { "execution": { @@ -495,25 +926,35 @@ "iopub.status.busy": "2026-03-12T08:25:48.012093Z", "iopub.status.idle": "2026-03-12T08:25:48.020636Z", "shell.execute_reply": "2026-03-12T08:25:48.020460Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:51.956251Z", + "start_time": "2026-03-14T16:44:51.908789Z" } }, - "outputs": [], - "source": [ - "# Case 3: x + (y+5).shift(1) — shifted constant is LOST at the gap\n", - "result3 = x + (1 * y + 5).shift(time=1) + 5\n", - "print(\"=== x + (y+5).shift(1) + 5 ===\")\n", - "print(\"const: \", result3.const.values)\n", - "print(\"isnull:\", result3.isnull().values)\n", - "print(\"\\n→ time=0: const=5, NOT 10.\")\n", - "print(\" The +5 inside (y+5) was part of the shifted expression.\")\n", - "print(\" shift makes the ENTIRE expression absent at time=0 — including its constant.\")\n", - "print(\" So the shifted 5 is lost. Only the outer +5 survives.\")\n", - "print(\" time=1..4: const=10 (shifted 5 + outer 5).\")" - ] + "source": "# Shifted constant is LOST at the gap:\n# (y+5).shift makes the ENTIRE expression absent at time=0 — including its constant.\n# Only the outer +5 survives. time=1..4 get const=10 (shifted 5 + outer 5).\nx + (1 * y + 5).shift(time=1) + 5", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 5]:\n", + "---------------------------\n", + "[0]: +1 x[0] + 5\n", + "[1]: +1 x[1] + 1 y[0] + 10\n", + "[2]: +1 x[2] + 1 y[1] + 10\n", + "[3]: +1 x[3] + 1 y[2] + 10\n", + "[4]: +1 x[4] + 1 y[3] + 10" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "h9wto4skk5s", "metadata": { "execution": { @@ -521,311 +962,294 @@ "iopub.status.busy": "2026-03-12T08:25:48.021426Z", "iopub.status.idle": "2026-03-12T08:25:48.029464Z", "shell.execute_reply": "2026-03-12T08:25:48.029305Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:52.067099Z", + "start_time": "2026-03-14T16:44:52.002644Z" } }, - "outputs": [], - "source": [ - "# Case 4: both expressions shifted — fully absent coordinate\n", - "result4 = (1 * x).shift(time=1) + (1 * y).shift(time=1)\n", - "print(\"=== x.shift(1) + y.shift(1) ===\")\n", - "print(\"const: \", result4.const.values)\n", - "print(\"isnull:\", result4.isnull().values)\n", - "print(\"\\n→ time=0: ALL terms absent AND const=NaN → isnull=True.\")\n", - "print(\" This is the only way a coordinate becomes fully absent from merging.\")" - ] + "source": "# Both expressions shifted — all variable terms absent at time=0, but const=0 (not NaN)\n# because merge sums constants with fill_value=0. So isnull is False — it's a zero expression, not absent.\nresult = (1 * x).shift(time=1) + (1 * y).shift(time=1)\nprint(\"isnull:\", result.isnull().values)\nresult", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "isnull: [False False False False False]\n" + ] + }, + { + "data": { + "text/plain": [ + "LinearExpression [time: 5]:\n", + "---------------------------\n", + "[0]: +0\n", + "[1]: +1 x[0] + 1 y[0]\n", + "[2]: +1 x[1] + 1 y[1]\n", + "[3]: +1 x[2] + 1 y[2]\n", + "[4]: +1 x[3] + 1 y[3]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "j00yil0a95", "metadata": {}, - "source": [ - "### Summary: combining expressions with absent terms\n", - "\n", - "| Expression | const at time=0 | isnull at time=0 | Why |\n", - "|---|---|---|---|\n", - "| `x + y.shift(1)` | 0 | False | y's term absent, x valid, const sum skips NaN |\n", - "| `x + y.shift(1) + 5` | 5 | False | Same, then +5 on const |\n", - "| `x + (y+5).shift(1) + 5` | 5 | False | Shifted const (5) is lost — only outer +5 survives |\n", - "| `x.shift(1) + y.shift(1)` | NaN | True | ALL terms absent → coordinate absent |" - ] + "source": "### Summary\n\n| Expression | const at time=0 | isnull at time=0 | Why |\n|---|---|---|---|\n| `x + y.shift(1)` | 0 | False | y's term absent, x valid, const sum skips NaN |\n| `x + y.shift(1) + 5` | 5 | False | Same, then +5 on const |\n| `x + (y+5).shift(1) + 5` | 5 | False | Shifted const (5) is lost — only outer +5 survives |\n| `x.shift(1) + y.shift(1)` | 0 | False | All terms absent, but const=0 (merge sums with fill_value=0) |\n\nNote: `merge()` sums constants with `fill_value=0`, so combining two fully-absent expressions yields a zero expression (const=0), not an absent one (const=NaN). This is a design choice — the slot has no variables but a valid constant of 0." }, { "cell_type": "markdown", "id": "key-difference", "metadata": {}, - "source": [ - "### Key difference: scalar arithmetic on a single shifted expression\n", - "\n", - "| | Legacy | v1 |\n", - "|---|---|---|\n", - "| `shifted + 5` at absent slot | const=5 (alive!) | const=NaN (absent) |\n", - "| `shifted * 3` at absent slot | coeffs=0, const=0 | coeffs=NaN, const=NaN |\n", - "| `isnull()` after arithmetic | False (slot revived!) | True (slot stays absent) |\n", - "\n", - "Legacy can **revive** absent slots through scalar arithmetic. v1 cannot — once absent, always absent.\n", - "\n", - "---\n", - "\n", - "## Combining expressions with absent terms\n", - "\n", - "When two expressions are merged (e.g., `x + y.shift(1)`), each term is concatenated along the `_term` dimension. The constant is summed with `skipna=True` — so NaN from one operand does NOT poison the other.\n", - "\n", - "**Key rule: absent terms don't poison valid terms at the same coordinate.**" - ] + "source": "### Legacy vs v1: scalar arithmetic on shifted expressions\n\n| | Legacy | v1 |\n|---|---|---|\n| `shifted + 5` at absent slot | const=5 (alive!) | const=NaN (absent) |\n| `shifted * 3` at absent slot | coeffs=0, const=0 | coeffs=NaN, const=NaN |\n| `isnull()` after arithmetic | False (slot revived!) | True (slot stays absent) |\n\nLegacy can **revive** absent slots through scalar arithmetic. v1 cannot — once absent, always absent." }, { "cell_type": "markdown", "id": "constraint-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## Constraints from expressions with NaN\n", - "\n", - "What happens when an expression with absent slots (NaN) becomes a constraint?\n", - "The NaN in const propagates to the constraint RHS." - ] + "source": "---\n\n## Constraints from expressions with NaN\n\nAbsent slots in expressions propagate to constraint RHS. The preferred approach is to avoid NaN entirely using `isel` + positional alignment, or to filter with `.sel()`." }, { "cell_type": "code", - "execution_count": null, "id": "constraint-demo", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.598217Z", - "start_time": "2026-03-12T07:02:18.583043Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:48.030525Z", "iopub.status.busy": "2026-03-12T08:25:48.030458Z", "iopub.status.idle": "2026-03-12T08:25:48.043325Z", "shell.execute_reply": "2026-03-12T08:25:48.043168Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:52.181582Z", + "start_time": "2026-03-14T16:44:52.090723Z" } }, - "outputs": [], - "source": [ - "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", - "m, x = make_model()\n", - "\n", - "shifted = (1 * x).shift(time=1)\n", - "print(\"=== Shifted expression ===\")\n", - "print(\"vars: \", shifted.vars.squeeze().values)\n", - "print(\"const: \", shifted.const.values)\n", - "\n", - "# Under v1, x[1:] - x[:-1] requires explicit join because coords differ\n", - "# (time=[1,2,3,4] vs time=[0,1,2,3]).\n", - "# Use join=\"override\" to align by position:\n", - "print(\"\\n=== x[1:] - x[:-1] via isel + override join ===\")\n", - "x_now = 1 * x.isel(time=slice(1, None))\n", - "x_prev = 1 * x.isel(time=slice(None, -1))\n", - "ramp = x_now.sub(x_prev, join=\"override\")\n", - "print(\"const: \", ramp.const.values)\n", - "print(\"isnull:\", ramp.isnull().values)\n", - "print(\"→ No NaN at all — isel avoids the gap entirely.\")\n", - "\n", - "# But what if we use shifted expression directly as a constraint?\n", - "print(\"\\n=== Constraint from shifted expression (has NaN) ===\")\n", - "con = m.add_constraints(shifted <= 5, name=\"shifted_raw\")\n", - "print(\"constraint rhs: \", con.rhs.values)\n", - "print(\"constraint labels:\", con.labels.values)\n", - "print(\"constraint vars: \", con.vars.squeeze().values)\n", - "print(\"\\nNaN in RHS at time=0. Label is still assigned.\")\n", - "print(\"This will be caught by sanitize_missings() or check_has_nulls() at solve time.\")" - ] + "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm, x = make_model()\n\n# Preferred: isel + override avoids NaN entirely\nx_now = 1 * x.isel(time=slice(1, None))\nx_prev = 1 * x.isel(time=slice(None, -1))\nramp = x_now.sub(x_prev, join=\"override\")\nramp", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 4]:\n", + "---------------------------\n", + "[1]: +1 x[1] - 1 x[0]\n", + "[2]: +1 x[2] - 1 x[1]\n", + "[3]: +1 x[3] - 1 x[2]\n", + "[4]: +1 x[4] - 1 x[3]" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "constraint-fix", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.625188Z", - "start_time": "2026-03-12T07:02:18.607269Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:48.044184Z", "iopub.status.busy": "2026-03-12T08:25:48.044131Z", "iopub.status.idle": "2026-03-12T08:25:48.060912Z", "shell.execute_reply": "2026-03-12T08:25:48.060763Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:52.239486Z", + "start_time": "2026-03-14T16:44:52.219351Z" } }, - "outputs": [], - "source": [ - "# The correct approach: avoid the gap entirely with isel + override\n", - "m2, x2 = make_model()\n", - "\n", - "x_now = 1 * x2.isel(time=slice(1, None))\n", - "x_prev = 1 * x2.isel(time=slice(None, -1))\n", - "ramp = x_now.sub(x_prev, join=\"override\")\n", - "con = m2.add_constraints(ramp <= 5, name=\"ramp_isel\")\n", - "print(\"=== isel + override approach (preferred) ===\")\n", - "print(\"rhs: \", con.rhs.values)\n", - "print(\"labels:\", con.labels.values)\n", - "print(\"No NaN — constraint only exists where both operands exist.\")\n", - "\n", - "# Approach 2: sel with a validity mask on shifted expression\n", - "m3, x3 = make_model()\n", - "shifted = (1 * x3).shift(time=1)\n", - "valid = ~shifted.isnull()\n", - "con = m3.add_constraints(shifted.sel(time=valid) <= 5, name=\"shifted_sel\")\n", - "print(\"\\n=== sel approach (filter after shift) ===\")\n", - "print(\"rhs: \", con.rhs.values)\n", - "print(\"labels:\", con.labels.values)\n", - "print(\"Absent slot at time=0 removed by .sel().\")" - ] + "source": "# Alternative: filter absent slots with .sel() after shift\nshifted = (1 * x).shift(time=1)\nvalid = ~shifted.isnull()\nshifted.sel(time=valid)", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [time: 4]:\n", + "---------------------------\n", + "[1]: +1 x[0]\n", + "[2]: +1 x[1]\n", + "[3]: +1 x[2]\n", + "[4]: +1 x[3]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "sanitize-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## sanitize_missings\n", - "\n", - "Called at solve time (before writing to solver). Sets `labels=-1` where all vars are -1.\n", - "This catches constraints where the LHS has no variables — but does NOT catch NaN in RHS.\n", - "\n", - "```python\n", - "def sanitize_missings(self):\n", - " for name in self:\n", - " con = self[name]\n", - " contains_non_missing = (con.vars != -1).any(con.term_dim)\n", - " labels = self[name].labels.where(contains_non_missing, -1)\n", - "```\n", - "\n", - "After sanitize_missings, `check_has_nulls()` in `.flat` catches any remaining NaN in rhs/coeffs." - ] + "source": "---\n\n## Why fillna on expressions is unnecessary\n\nA common concern: `fillna()` works on parameters (plain DataArrays), but what about Variables and Expressions? Can NaN \"leak\" into arithmetic?\n\n**No — by design.** NaN in coefficients is a structural marker meaning \"this term doesn't exist,\" not a numeric gap that needs filling.\n\n- **Parameters**: `fillna(0)` or `fillna(1)` makes sense — these are numeric values with a context-dependent neutral element.\n- **Variables**: A decision variable either exists at a coordinate or it doesn't. There's no meaningful numeric fill for an absent variable.\n- **Expressions**: Absent terms (`vars=-1, coeffs=NaN`) are filtered out at solve time. They don't contribute to the constraint matrix. No fill needed.\n\nWhen expressions are combined via outer join, absent terms on one side don't poison valid terms on the other — each term in the `_term` dimension is independent." }, { "cell_type": "code", - "execution_count": null, "id": "sanitize-demo", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.644614Z", - "start_time": "2026-03-12T07:02:18.635530Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:48.061831Z", "iopub.status.busy": "2026-03-12T08:25:48.061768Z", "iopub.status.idle": "2026-03-12T08:25:48.069806Z", "shell.execute_reply": "2026-03-12T08:25:48.069649Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:52.336546Z", + "start_time": "2026-03-14T16:44:52.267752Z" } }, - "outputs": [], - "source": [ - "# Demonstrate what sanitize_missings does\n", - "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", - "m, x = make_model()\n", - "\n", - "shifted = (1 * x).shift(time=1)\n", - "# shifted at time=0: vars=-1 (no variable), const=NaN\n", - "# This means: LHS has no variables at time=0\n", - "\n", - "con = m.add_constraints(shifted <= 5, name=\"test\")\n", - "print(\"Before sanitize_missings:\")\n", - "print(\" labels:\", con.labels.values)\n", - "print(\" vars: \", con.vars.squeeze().values)\n", - "print(\" rhs: \", con.rhs.values)\n", - "\n", - "m.constraints.sanitize_missings()\n", - "con = m.constraints[\"test\"]\n", - "print(\"\\nAfter sanitize_missings:\")\n", - "print(\" labels:\", con.labels.values)\n", - "print(\" vars: \", con.vars.squeeze().values)\n", - "print(\" rhs: \", con.rhs.values)\n", - "print(\"\\n→ Label at time=0 set to -1 (masked out).\")\n", - "print(\"→ RHS still has NaN but that slot is now masked by labels=-1.\")" - ] + "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm = Model()\ntech_a = [\"wind\", \"solar\"]\ntech_b = [\"solar\", \"gas\"]\n\ncap_a = m.add_variables(lower=0, coords=[tech_a], name=\"cap_a\")\ncap_b = m.add_variables(lower=0, coords=[tech_b], name=\"cap_b\")\n\ncost_a = xr.DataArray([10, 20], coords=[(\"dim_0\", tech_a)])\ncost_b = xr.DataArray([15, 25], coords=[(\"dim_0\", tech_b)])\n\n# Outer join: absent terms at wind (no cap_b) and gas (no cap_a)\ncombined = (cap_a * cost_a).add(cap_b * cost_b, join=\"outer\")\ncombined", + "outputs": [ + { + "data": { + "text/plain": [ + "LinearExpression [dim_0: 3]:\n", + "----------------------------\n", + "[gas]: +25 cap_b[gas]\n", + "[solar]: +20 cap_a[solar] + 15 cap_b[solar]\n", + "[wind]: +10 cap_a[wind]" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null + }, + { + "cell_type": "code", + "id": "epev84h04pn", + "source": "# Further arithmetic: NaN coeffs stay NaN (absent stays absent), valid terms scale correctly\n# No coordinate is fully absent — isnull is False everywhere\nprint(\"isnull:\", combined.isnull().values)\ncombined * 2", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-14T16:44:52.385488Z", + "start_time": "2026-03-14T16:44:52.364277Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "isnull: [False False False]\n" + ] + }, + { + "data": { + "text/plain": [ + "LinearExpression [dim_0: 3]:\n", + "----------------------------\n", + "[gas]: +50 cap_b[gas]\n", + "[solar]: +40 cap_a[solar] + 30 cap_b[solar]\n", + "[wind]: +20 cap_a[wind]" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": null + }, + { + "cell_type": "code", + "id": "o542kxv546", + "source": "# This solves correctly — absent terms are ignored in the constraint matrix\nm.add_constraints(cap_a <= 100, name=\"max_a\")\nm.add_constraints(cap_b <= 100, name=\"max_b\")\nm.add_objective(combined.sum())\nstatus, _ = m.solve(\"highs\")\nprint(f\"Status: {status}, Objective: {m.objective.value}\")", + "metadata": { + "ExecuteTime": { + "end_time": "2026-03-14T16:44:52.541506Z", + "start_time": "2026-03-14T16:44:52.409653Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "LP linopy-problem-7inffcby has 4 rows; 4 cols; 4 nonzeros\n", + "Coefficient ranges:\n", + " Matrix [1e+00, 1e+00]\n", + " Cost [1e+01, 2e+01]\n", + " Bound [0e+00, 0e+00]\n", + " RHS [1e+02, 1e+02]\n", + "Presolving model\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "Presolve reductions: rows 0(-4); columns 0(-4); nonzeros 0(-4) - Reduced to empty\n", + "Performed postsolve\n", + "Solving the original LP from the solution after postsolve\n", + "\n", + "Model name : linopy-problem-7inffcby\n", + "Model status : Optimal\n", + "Objective value : 0.0000000000e+00\n", + "P-D objective error : 0.0000000000e+00\n", + "HiGHS run time : 0.00\n", + "Status: ok, Objective: 0.0\n" + ] + } + ], + "execution_count": null }, { "cell_type": "markdown", "id": "fillvalue-header", "metadata": {}, - "source": [ - "---\n", - "\n", - "## FILL_VALUE internals\n", - "\n", - "The sentinel values used when structural operations create absent slots:\n", - "\n", - "| Type | Field | FILL_VALUE | Why |\n", - "|---|---|---|---|\n", - "| LinearExpression | `vars` | -1 | Integer sentinel (no variable) |\n", - "| LinearExpression | `coeffs` | NaN | Absent — not a numeric value |\n", - "| LinearExpression | `const` | NaN | Absent — needed for `isnull()` detection |\n", - "| Variable | `labels` | -1 | Integer sentinel (no variable) |\n", - "| Variable | `lower` | NaN | Absent bound |\n", - "| Variable | `upper` | NaN | Absent bound |\n", - "| Constraint | `labels` | -1 | Integer sentinel (no constraint) |\n", - "\n", - "All float fields use NaN for absence. Integer fields use -1. No implicit choice about neutral elements.\n", - "\n", - "### isnull() depends on const=NaN\n", - "\n", - "```python\n", - "def isnull(self):\n", - " return (self.vars == -1).all(helper_dims) & self.const.isnull()\n", - "```\n", - "\n", - "Both conditions must be true: all variable references are -1 AND the constant is NaN. This distinguishes \"absent\" from \"valid expression with zero constant.\"" - ] + "source": "---\n\n## FILL_VALUE internals\n\n| Type | Field | FILL_VALUE | Why |\n|---|---|---|---|\n| LinearExpression | `vars` | -1 | Integer sentinel (no variable) |\n| LinearExpression | `coeffs` | NaN | Absent — not a numeric value |\n| LinearExpression | `const` | NaN | Absent — needed for `isnull()` detection |\n| Variable | `labels` | -1 | Integer sentinel (no variable) |\n| Variable | `lower` | NaN | Absent bound |\n| Variable | `upper` | NaN | Absent bound |\n\nAll float fields use NaN for absence. Integer fields use -1." }, { "cell_type": "code", - "execution_count": null, "id": "fillvalue-demo", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.663083Z", - "start_time": "2026-03-12T07:02:18.654471Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:48.070762Z", "iopub.status.busy": "2026-03-12T08:25:48.070706Z", "iopub.status.idle": "2026-03-12T08:25:48.077412Z", "shell.execute_reply": "2026-03-12T08:25:48.077245Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:52.569251Z", + "start_time": "2026-03-14T16:44:52.561763Z" } }, - "outputs": [], - "source": [ - "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", - "m, x = make_model()\n", - "\n", - "expr = 2 * x + 10\n", - "shifted = expr.shift(time=1)\n", - "\n", - "print(\"=== FILL_VALUE in action ===\")\n", - "print(f\"vars FILL={FILL_VALUE['vars']}: \", shifted.vars.squeeze().values)\n", - "print(f\"coeffs FILL={FILL_VALUE['coeffs']}: \", shifted.coeffs.squeeze().values)\n", - "print(f\"const FILL={FILL_VALUE['const']}:\", shifted.const.values)\n", - "print()\n", - "print(\"isnull:\", shifted.isnull().values)\n", - "print(\"\\nSlot 0: vars=-1, coeffs=NaN, const=NaN → isnull=True\")\n", - "print(\"Slot 1: vars=0, coeffs=2, const=10 → isnull=False\")" - ] + "source": "print(\"FILL_VALUE:\", FILL_VALUE)", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "FILL_VALUE: {'vars': -1, 'coeffs': nan, 'const': nan}\n" + ] + } + ], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "id": "cleanup", "metadata": { - "ExecuteTime": { - "end_time": "2026-03-12T07:02:18.669891Z", - "start_time": "2026-03-12T07:02:18.668107Z" - }, "execution": { "iopub.execute_input": "2026-03-12T08:25:48.078298Z", "iopub.status.busy": "2026-03-12T08:25:48.078237Z", "iopub.status.idle": "2026-03-12T08:25:48.079577Z", "shell.execute_reply": "2026-03-12T08:25:48.079408Z" + }, + "ExecuteTime": { + "end_time": "2026-03-14T16:44:52.602913Z", + "start_time": "2026-03-14T16:44:52.597148Z" } }, + "source": "linopy.options.reset()", "outputs": [], - "source": [ - "linopy.options.reset()" - ] + "execution_count": null } ], "metadata": { From eb442be8118120997407d418647061ac6e429c3d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 14 Mar 2026 18:18:51 +0100 Subject: [PATCH 64/66] Fix merge() losing NaN const when all terms are absent When merging expressions where all input constants are NaN at a coordinate (e.g. x.shift(1) + y.shift(1) at time=0), the const sum with fill_value=0 turned NaN+NaN into 0+0=0, making the slot appear as a valid zero expression instead of absent. Detect all-NaN constants before filling and restore NaN after sum. Also streamline _nan-edge-cases notebook with less verbose output, more markdown, and a new section on why fillna on expressions is unnecessary for outer-join NaN. Co-Authored-By: Claude Opus 4.6 (1M context) --- linopy/expressions.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/linopy/expressions.py b/linopy/expressions.py index fbba3bb7..b99cc1d7 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -2633,8 +2633,15 @@ def merge( if dim == TERM_DIM: ds = xr.concat([d[["coeffs", "vars"]] for d in data], dim, **kwargs) + # Concat without fill to detect where all constants were NaN + raw_consts = xr.concat([d["const"] for d in data], dim, **kwargs) + all_const_nan = raw_consts.isnull().all(TERM_DIM) + # Sum with fill_value=0 so valid NaN + valid 5 = 5 (not NaN) subkwargs = {**kwargs, "fill_value": 0} const = xr.concat([d["const"] for d in data], dim, **subkwargs).sum(TERM_DIM) + # Restore NaN where all input constants were NaN (all terms absent) + if all_const_nan.any(): + const = const.where(~all_const_nan) ds = assign_multiindex_safe(ds, const=const) elif dim == FACTOR_DIM: ds = xr.concat([d[["vars"]] for d in data], dim, **kwargs) From 0b296cea6e81ffcf792a9fe8dd2e26ca84c90ea8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 14 Mar 2026 19:45:03 +0100 Subject: [PATCH 65/66] Fix NaN propagation consistency and add fill_value API (#620) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix NaN propagation consistency and add fill_value API Bug fix: Variable.to_linexpr() now sets const=NaN where labels=-1 in v1 mode, so Variable and Expression paths produce identical results for shifted/masked variables. New features: - Variable.fillna(numeric) returns LinearExpression with constant fill - .add()/.sub()/.mul()/.div() accept fill_value= parameter for explicit NaN filling before the operation Design: bare operators (+, *, etc.) propagate NaN — absent stays absent. Users choose how to revive via .fillna(value) or .add(value, fill_value=). No implicit neutral element. Tests: 21 new tests in test_algebraic_properties.py covering NaN propagation, fillna, fill_value, and path consistency. Co-Authored-By: Claude Opus 4.6 (1M context) * Use additive identity (0) for NaN const fill in addition Addition/subtraction now fill NaN const with 0 before operating, both in v1 and legacy modes. This preserves associativity: (x.shift(1) + 5) + y == x.shift(1) + (5 + y) Multiplication/division still propagate NaN — no implicit fill, since the neutral element depends on context (0 kills, 1 preserves). Use .fillna(value) or .mul(v, fill_value=) for explicit control. Revised test_algebraic_properties.py as a thorough specification: 51 tests covering commutativity, associativity, distributivity, identity, negation, zero, absent slot behavior (add revives, mul propagates, merge semantics), fillna, and fill_value param. Co-Authored-By: Claude Opus 4.6 (1M context) --------- Co-authored-by: Claude Opus 4.6 (1M context) --- examples/_nan-edge-cases.ipynb | 276 ++++++++++------------------ linopy/expressions.py | 40 +++-- linopy/variables.py | 34 +++- test/test_algebraic_properties.py | 288 +++++++++++++++++++++++++++++- test/test_linear_expression.py | 18 +- 5 files changed, 446 insertions(+), 210 deletions(-) diff --git a/examples/_nan-edge-cases.ipynb b/examples/_nan-edge-cases.ipynb index c811a365..8b037a40 100644 --- a/examples/_nan-edge-cases.ipynb +++ b/examples/_nan-edge-cases.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "id": "intro", "metadata": {}, - "source": "# NaN Edge Cases: Legacy vs v1\n\nDevelopment notebook investigating how NaN behaves across linopy operations under both conventions.\n\n**Core principle (v1):** NaN means \"absent term\" — not a numeric value. It enters only through structural operations (`shift`, `where`, `reindex`, `mask=`) and propagates via IEEE semantics. Absent terms don't poison valid terms at the same coordinate.\n\n1. [Sources of NaN](#sources-of-nan)\n2. [isnull detection](#isnull-detection)\n3. [Arithmetic on shifted expressions](#arithmetic-on-shifted-expressions)\n4. [Combining expressions with absent terms](#combining-expressions-with-absent-terms)\n5. [Constraints from expressions with NaN](#constraints-from-expressions-with-nan)\n6. [Why fillna on expressions is unnecessary](#why-fillna-on-expressions-is-unnecessary)\n7. [FILL_VALUE internals](#fill_value-internals)" + "source": "# NaN Edge Cases: Legacy vs v1\n\nDevelopment notebook investigating how NaN behaves across linopy operations under both conventions.\n\n**Core principle (v1):** NaN means \"absent term\" — not a numeric value. It enters only through structural operations (`shift`, `where`, `reindex`, `mask=`) and propagates via IEEE semantics. Absent terms don't poison valid terms at the same coordinate.\n\n1. [Sources of NaN](#sources-of-nan)\n2. [isnull detection](#isnull-detection)\n3. [Arithmetic on shifted expressions](#arithmetic-on-shifted-expressions)\n4. [Combining expressions with absent terms](#combining-expressions-with-absent-terms)\n5. [Constraints from expressions with NaN](#constraints-from-expressions-with-nan)\n6. [Reviving absent slots with fillna and fill_value](#reviving-absent-slots)\n7. [FILL_VALUE internals](#fill_value-internals)" }, { "cell_type": "code", @@ -17,8 +17,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.906090Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:50.978615Z", - "start_time": "2026-03-14T16:44:48.272788Z" + "end_time": "2026-03-14T17:35:55.585725Z", + "start_time": "2026-03-14T17:35:54.358954Z" } }, "source": "import warnings\n\nimport pandas as pd\nimport xarray as xr\n\nimport linopy\nfrom linopy import Model\nfrom linopy.config import LinopyDeprecationWarning\nfrom linopy.expressions import FILL_VALUE\n\nwarnings.filterwarnings(\"ignore\", category=LinopyDeprecationWarning)", @@ -28,7 +28,7 @@ "output_type": "stream", "text": [ "MindOpt 2.2.0 | 2e28db43, Aug 29 2025, 14:27:12 | arm64 - macOS 26.2\n", - "Start license validation (current time : 14-MAR-2026 17:44:50 UTC+0100).\n", + "Start license validation (current time : 14-MAR-2026 18:35:55 UTC+0100).\n", "[WARN ] No license file is found.\n", "[ERROR] No valid license was found. Please visit https://opt.aliyun.com/doc/latest/en/html/installation/license.html to apply for and set up your license.\n", "License validation terminated. Time : 0.000s\n", @@ -49,8 +49,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.938258Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.019790Z", - "start_time": "2026-03-14T16:44:50.995727Z" + "end_time": "2026-03-14T17:35:55.594006Z", + "start_time": "2026-03-14T17:35:55.590699Z" } }, "source": "def make_model():\n m = Model()\n time = pd.RangeIndex(5, name=\"time\")\n x = m.add_variables(lower=0, coords=[time], name=\"x\")\n return m, x", @@ -74,8 +74,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.945260Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.248456Z", - "start_time": "2026-03-14T16:44:51.045488Z" + "end_time": "2026-03-14T17:35:55.689874Z", + "start_time": "2026-03-14T17:35:55.603203Z" } }, "source": "m, x = make_model()\nexpr = 2 * x + 10\nexpr.shift(time=1)", @@ -110,8 +110,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.947974Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.319384Z", - "start_time": "2026-03-14T16:44:51.288865Z" + "end_time": "2026-03-14T17:35:55.700844Z", + "start_time": "2026-03-14T17:35:55.694910Z" } }, "source": "# Variables also support shift — labels get -1 sentinel, bounds get NaN\nx.shift(time=1)", @@ -152,8 +152,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.953891Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.398779Z", - "start_time": "2026-03-14T16:44:51.344805Z" + "end_time": "2026-03-14T17:35:55.723168Z", + "start_time": "2026-03-14T17:35:55.709576Z" } }, "source": "m, x = make_model()\n(2 * x + 10).roll(time=1)", @@ -194,8 +194,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.959950Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.456507Z", - "start_time": "2026-03-14T16:44:51.430059Z" + "end_time": "2026-03-14T17:35:55.750248Z", + "start_time": "2026-03-14T17:35:55.738201Z" } }, "source": "m, x = make_model()\nmask = xr.DataArray([True, True, False, False, True], dims=[\"time\"])\n(2 * x + 10).where(mask)", @@ -236,8 +236,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.967693Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.512019Z", - "start_time": "2026-03-14T16:44:51.482771Z" + "end_time": "2026-03-14T17:35:55.782002Z", + "start_time": "2026-03-14T17:35:55.768465Z" } }, "source": "m, x = make_model()\nexpr = 2 * x + 10\n\n# Expand to a larger index — new positions [5, 6] are absent\nexpr.reindex({\"time\": pd.RangeIndex(7, name=\"time\")})", @@ -280,8 +280,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.974130Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.613696Z", - "start_time": "2026-03-14T16:44:51.561248Z" + "end_time": "2026-03-14T17:35:55.805454Z", + "start_time": "2026-03-14T17:35:55.785946Z" } }, "source": "m, x = make_model()\nshifted = (2 * x + 10).shift(time=2)\nshifted.isnull()", @@ -776,7 +776,7 @@ "
<xarray.DataArray (time: 5)> Size: 5B\n",
        "array([ True,  True, False, False, False])\n",
        "Coordinates:\n",
-       "  * time     (time) int64 40B 0 1 2 3 4
" + " * time (time) int64 40B 0 1 2 3 4" ] }, "execution_count": 8, @@ -790,7 +790,7 @@ "cell_type": "markdown", "id": "arithmetic-header", "metadata": {}, - "source": "---\n\n## Arithmetic on shifted expressions\n\nWhen you do arithmetic on an expression that already has NaN (from `shift`/`where`/`reindex`), the NaN is **internal** — not user-supplied.\n\n- **Legacy**: fills expression NaN with neutral elements before operating → can **revive** absent slots\n- **v1**: IEEE NaN propagation → absent stays absent" + "source": "---\n\n## Arithmetic on shifted expressions\n\nWhen you do arithmetic on an expression with absent slots (from `shift`/`where`/`reindex`):\n\n- **Addition/subtraction**: fills const with 0 (additive identity) before adding. This preserves associativity: `(a + b) + c == a + (b + c)`.\n- **Multiplication/division**: NaN propagates. No implicit fill — the \"right\" neutral element depends on context (0 kills, 1 preserves).\n\nLegacy mode fills all NaN with neutral elements for both add and mul." }, { "cell_type": "code", @@ -803,8 +803,8 @@ "shell.execute_reply": "2026-03-12T08:25:47.983582Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.720121Z", - "start_time": "2026-03-14T16:44:51.654874Z" + "end_time": "2026-03-14T17:35:55.830568Z", + "start_time": "2026-03-14T17:35:55.813713Z" } }, "source": "linopy.options[\"arithmetic_convention\"] = \"legacy\"\nm, x = make_model()\nshifted = (2 * x + 10).shift(time=1)\n\n# Legacy: NaN const filled with 0, then +5 = 5. Slot looks alive!\nshifted + 5", @@ -839,31 +839,22 @@ "shell.execute_reply": "2026-03-12T08:25:47.992528Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.796832Z", - "start_time": "2026-03-14T16:44:51.753190Z" + "end_time": "2026-03-14T17:35:55.861842Z", + "start_time": "2026-03-14T17:35:55.839520Z" } }, - "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm, x = make_model()\nshifted = (2 * x + 10).shift(time=1)\n\n# v1: NaN + 5 = NaN. Absent slot stays absent.\nshifted + 5", - "outputs": [ - { - "data": { - "text/plain": [ - "LinearExpression [time: 5]:\n", - "---------------------------\n", - "[0]: None\n", - "[1]: +2 x[0] + 15\n", - "[2]: +2 x[1] + 15\n", - "[3]: +2 x[2] + 15\n", - "[4]: +2 x[3] + 15" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm, x = make_model()\nshifted = (2 * x + 10).shift(time=1)\n\n# v1: addition fills const with 0 (additive identity), then adds 5\nshifted + 5", + "outputs": [], "execution_count": null }, + { + "cell_type": "code", + "id": "j9aln31mkog", + "source": "# v1: multiplication propagates NaN — absent stays absent\nshifted * 3", + "metadata": {}, + "execution_count": null, + "outputs": [] + }, { "cell_type": "markdown", "id": "eodco2pcrqn", @@ -892,29 +883,20 @@ "shell.execute_reply": "2026-03-12T08:25:48.011094Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.874274Z", - "start_time": "2026-03-14T16:44:51.821114Z" + "end_time": "2026-03-14T17:35:55.903304Z", + "start_time": "2026-03-14T17:35:55.878230Z" } }, - "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm, x = make_model()\ny = m.add_variables(lower=0, coords=[pd.RangeIndex(5, name=\"time\")], name=\"y\")\n\n# x is valid everywhere, y.shift(1) is absent at time=0\n# → time=0 still has x's term, only y's term is absent\nx + (1 * y).shift(time=1)", - "outputs": [ - { - "data": { - "text/plain": [ - "LinearExpression [time: 5]:\n", - "---------------------------\n", - "[0]: +1 x[0]\n", - "[1]: +1 x[1] + 1 y[0]\n", - "[2]: +1 x[2] + 1 y[1]\n", - "[3]: +1 x[3] + 1 y[2]\n", - "[4]: +1 x[4] + 1 y[3]" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } + "source": [ + "linopy.options[\"arithmetic_convention\"] = \"v1\"\n", + "m, x = make_model()\n", + "y = m.add_variables(lower=0, coords=[pd.RangeIndex(5, name=\"time\")], name=\"y\")\n", + "\n", + "# x is valid everywhere, y.shift(1) is absent at time=0\n", + "# → time=0 still has x's term, only y's term is absent\n", + "x + (1 * y).shift(time=1)" ], + "outputs": [], "execution_count": null }, { @@ -928,8 +910,8 @@ "shell.execute_reply": "2026-03-12T08:25:48.020460Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:51.956251Z", - "start_time": "2026-03-14T16:44:51.908789Z" + "end_time": "2026-03-14T17:35:55.937575Z", + "start_time": "2026-03-14T17:35:55.913528Z" } }, "source": "# Shifted constant is LOST at the gap:\n# (y+5).shift makes the ENTIRE expression absent at time=0 — including its constant.\n# Only the outer +5 survives. time=1..4 get const=10 (shifted 5 + outer 5).\nx + (1 * y + 5).shift(time=1) + 5", @@ -964,49 +946,25 @@ "shell.execute_reply": "2026-03-12T08:25:48.029305Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:52.067099Z", - "start_time": "2026-03-14T16:44:52.002644Z" + "end_time": "2026-03-14T17:35:55.968359Z", + "start_time": "2026-03-14T17:35:55.943359Z" } }, - "source": "# Both expressions shifted — all variable terms absent at time=0, but const=0 (not NaN)\n# because merge sums constants with fill_value=0. So isnull is False — it's a zero expression, not absent.\nresult = (1 * x).shift(time=1) + (1 * y).shift(time=1)\nprint(\"isnull:\", result.isnull().values)\nresult", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "isnull: [False False False False False]\n" - ] - }, - { - "data": { - "text/plain": [ - "LinearExpression [time: 5]:\n", - "---------------------------\n", - "[0]: +0\n", - "[1]: +1 x[0] + 1 y[0]\n", - "[2]: +1 x[1] + 1 y[1]\n", - "[3]: +1 x[2] + 1 y[2]\n", - "[4]: +1 x[3] + 1 y[3]" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], + "source": "# Both expressions shifted — time=0 is fully absent (all terms absent AND const=NaN)\nresult = (1 * x).shift(time=1) + (1 * y).shift(time=1)\nprint(\"isnull:\", result.isnull().values)\nresult", + "outputs": [], "execution_count": null }, { "cell_type": "markdown", "id": "j00yil0a95", "metadata": {}, - "source": "### Summary\n\n| Expression | const at time=0 | isnull at time=0 | Why |\n|---|---|---|---|\n| `x + y.shift(1)` | 0 | False | y's term absent, x valid, const sum skips NaN |\n| `x + y.shift(1) + 5` | 5 | False | Same, then +5 on const |\n| `x + (y+5).shift(1) + 5` | 5 | False | Shifted const (5) is lost — only outer +5 survives |\n| `x.shift(1) + y.shift(1)` | 0 | False | All terms absent, but const=0 (merge sums with fill_value=0) |\n\nNote: `merge()` sums constants with `fill_value=0`, so combining two fully-absent expressions yields a zero expression (const=0), not an absent one (const=NaN). This is a design choice — the slot has no variables but a valid constant of 0." + "source": "### Summary\n\n| Expression | const at time=0 | isnull at time=0 | Why |\n|---|---|---|---|\n| `x + y.shift(1)` | 0 | False | y's term absent, x valid, const sum skips NaN |\n| `x + y.shift(1) + 5` | 5 | False | Same, then +5 on const |\n| `x + (y+5).shift(1) + 5` | 5 | False | Shifted const (5) is lost — only outer +5 survives |\n| `x.shift(1) + y.shift(1)` | NaN | True | All terms absent AND all consts NaN → fully absent |" }, { "cell_type": "markdown", "id": "key-difference", "metadata": {}, - "source": "### Legacy vs v1: scalar arithmetic on shifted expressions\n\n| | Legacy | v1 |\n|---|---|---|\n| `shifted + 5` at absent slot | const=5 (alive!) | const=NaN (absent) |\n| `shifted * 3` at absent slot | coeffs=0, const=0 | coeffs=NaN, const=NaN |\n| `isnull()` after arithmetic | False (slot revived!) | True (slot stays absent) |\n\nLegacy can **revive** absent slots through scalar arithmetic. v1 cannot — once absent, always absent." + "source": "### Legacy vs v1: scalar arithmetic on shifted expressions\n\n| | Legacy | v1 |\n|---|---|---|\n| `shifted + 5` at absent slot | const=5 (alive) | const=5 (alive, additive identity fill) |\n| `shifted * 3` at absent slot | coeffs=0, const=0 (alive) | coeffs=NaN, const=NaN (absent) |\n| `shifted - 5` at absent slot | const=-5 (alive) | const=-5 (alive, additive identity fill) |\n| `shifted / 2` at absent slot | coeffs=0, const=0 (alive) | coeffs=NaN, const=NaN (absent) |\n\n**v1 rule:** addition/subtraction use 0 as additive identity to fill NaN const. Multiplication/division propagate NaN — use `.fillna(value)` or `.mul(v, fill_value=)` for explicit control." }, { "cell_type": "markdown", @@ -1025,8 +983,8 @@ "shell.execute_reply": "2026-03-12T08:25:48.043168Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:52.181582Z", - "start_time": "2026-03-14T16:44:52.090723Z" + "end_time": "2026-03-14T17:35:55.998673Z", + "start_time": "2026-03-14T17:35:55.979979Z" } }, "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm, x = make_model()\n\n# Preferred: isel + override avoids NaN entirely\nx_now = 1 * x.isel(time=slice(1, None))\nx_prev = 1 * x.isel(time=slice(None, -1))\nramp = x_now.sub(x_prev, join=\"override\")\nramp", @@ -1060,8 +1018,8 @@ "shell.execute_reply": "2026-03-12T08:25:48.060763Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:52.239486Z", - "start_time": "2026-03-14T16:44:52.219351Z" + "end_time": "2026-03-14T17:35:56.020722Z", + "start_time": "2026-03-14T17:35:56.009573Z" } }, "source": "# Alternative: filter absent slots with .sel() after shift\nshifted = (1 * x).shift(time=1)\nvalid = ~shifted.isnull()\nshifted.sel(time=valid)", @@ -1088,7 +1046,7 @@ "cell_type": "markdown", "id": "sanitize-header", "metadata": {}, - "source": "---\n\n## Why fillna on expressions is unnecessary\n\nA common concern: `fillna()` works on parameters (plain DataArrays), but what about Variables and Expressions? Can NaN \"leak\" into arithmetic?\n\n**No — by design.** NaN in coefficients is a structural marker meaning \"this term doesn't exist,\" not a numeric gap that needs filling.\n\n- **Parameters**: `fillna(0)` or `fillna(1)` makes sense — these are numeric values with a context-dependent neutral element.\n- **Variables**: A decision variable either exists at a coordinate or it doesn't. There's no meaningful numeric fill for an absent variable.\n- **Expressions**: Absent terms (`vars=-1, coeffs=NaN`) are filtered out at solve time. They don't contribute to the constraint matrix. No fill needed.\n\nWhen expressions are combined via outer join, absent terms on one side don't poison valid terms on the other — each term in the `_term` dimension is independent." + "source": "---\n\n## Reviving absent slots\n\nAddition/subtraction automatically fill const with 0 (additive identity) — this is not arbitrary, it preserves associativity.\n\nFor multiplication/division, NaN propagates. To revive absent slots before multiplying:\n\n- **`.fillna(value)`** — fill before arithmetic. Works on both Variables and Expressions. `Variable.fillna(numeric)` returns a `LinearExpression`.\n- **`.mul(value, fill_value=)`** — fill and multiply in one step." }, { "cell_type": "code", @@ -1101,104 +1059,62 @@ "shell.execute_reply": "2026-03-12T08:25:48.069649Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:52.336546Z", - "start_time": "2026-03-14T16:44:52.267752Z" + "end_time": "2026-03-14T17:35:56.056198Z", + "start_time": "2026-03-14T17:35:56.030610Z" } }, - "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm = Model()\ntech_a = [\"wind\", \"solar\"]\ntech_b = [\"solar\", \"gas\"]\n\ncap_a = m.add_variables(lower=0, coords=[tech_a], name=\"cap_a\")\ncap_b = m.add_variables(lower=0, coords=[tech_b], name=\"cap_b\")\n\ncost_a = xr.DataArray([10, 20], coords=[(\"dim_0\", tech_a)])\ncost_b = xr.DataArray([15, 25], coords=[(\"dim_0\", tech_b)])\n\n# Outer join: absent terms at wind (no cap_b) and gas (no cap_a)\ncombined = (cap_a * cost_a).add(cap_b * cost_b, join=\"outer\")\ncombined", - "outputs": [ - { - "data": { - "text/plain": [ - "LinearExpression [dim_0: 3]:\n", - "----------------------------\n", - "[gas]: +25 cap_b[gas]\n", - "[solar]: +20 cap_a[solar] + 15 cap_b[solar]\n", - "[wind]: +10 cap_a[wind]" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], + "source": "linopy.options[\"arithmetic_convention\"] = \"v1\"\nm, x = make_model()\nshifted = (1 * x).shift(time=1)\n\n# Multiplication propagates NaN — absent stays absent\nshifted * 3", + "outputs": [], "execution_count": null }, { "cell_type": "code", "id": "epev84h04pn", - "source": "# Further arithmetic: NaN coeffs stay NaN (absent stays absent), valid terms scale correctly\n# No coordinate is fully absent — isnull is False everywhere\nprint(\"isnull:\", combined.isnull().values)\ncombined * 2", + "source": "# fillna(0) revives, then multiplication works\nshifted.fillna(0) * 3", "metadata": { "ExecuteTime": { - "end_time": "2026-03-14T16:44:52.385488Z", - "start_time": "2026-03-14T16:44:52.364277Z" + "end_time": "2026-03-14T17:35:56.068769Z", + "start_time": "2026-03-14T17:35:56.060454Z" } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "isnull: [False False False]\n" - ] - }, - { - "data": { - "text/plain": [ - "LinearExpression [dim_0: 3]:\n", - "----------------------------\n", - "[gas]: +50 cap_b[gas]\n", - "[solar]: +40 cap_a[solar] + 30 cap_b[solar]\n", - "[wind]: +20 cap_a[wind]" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "execution_count": null }, { "cell_type": "code", "id": "o542kxv546", - "source": "# This solves correctly — absent terms are ignored in the constraint matrix\nm.add_constraints(cap_a <= 100, name=\"max_a\")\nm.add_constraints(cap_b <= 100, name=\"max_b\")\nm.add_objective(combined.sum())\nstatus, _ = m.solve(\"highs\")\nprint(f\"Status: {status}, Objective: {m.objective.value}\")", + "source": "# Shorthand: .mul(value, fill_value=) does both in one step\nshifted.mul(3, fill_value=0)", "metadata": { "ExecuteTime": { - "end_time": "2026-03-14T16:44:52.541506Z", - "start_time": "2026-03-14T16:44:52.409653Z" + "end_time": "2026-03-14T17:35:56.127017Z", + "start_time": "2026-03-14T17:35:56.076931Z" } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "LP linopy-problem-7inffcby has 4 rows; 4 cols; 4 nonzeros\n", - "Coefficient ranges:\n", - " Matrix [1e+00, 1e+00]\n", - " Cost [1e+01, 2e+01]\n", - " Bound [0e+00, 0e+00]\n", - " RHS [1e+02, 1e+02]\n", - "Presolving model\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "Presolve reductions: rows 0(-4); columns 0(-4); nonzeros 0(-4) - Reduced to empty\n", - "Performed postsolve\n", - "Solving the original LP from the solution after postsolve\n", - "\n", - "Model name : linopy-problem-7inffcby\n", - "Model status : Optimal\n", - "Objective value : 0.0000000000e+00\n", - "P-D objective error : 0.0000000000e+00\n", - "HiGHS run time : 0.00\n", - "Status: ok, Objective: 0.0\n" - ] - } - ], + "outputs": [], "execution_count": null }, + { + "cell_type": "code", + "id": "3vl823ewxsx", + "source": "# Variable.fillna(numeric) returns a LinearExpression\nx.shift(time=1).fillna(0)", + "metadata": {}, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "id": "bpifee3jcir", + "source": "### Outer join with fill_value\n\nWhen combining expressions with mismatched coordinates, absent terms on each side don't poison valid terms. The outer join preserves all coordinates.", + "metadata": {} + }, + { + "cell_type": "code", + "id": "4psrcv8pjn8", + "source": "m = Model()\ntech_a, tech_b = [\"wind\", \"solar\"], [\"solar\", \"gas\"]\ncap_a = m.add_variables(lower=0, coords=[tech_a], name=\"cap_a\")\ncap_b = m.add_variables(lower=0, coords=[tech_b], name=\"cap_b\")\ncost_a = xr.DataArray([10, 20], coords=[(\"dim_0\", tech_a)])\ncost_b = xr.DataArray([15, 25], coords=[(\"dim_0\", tech_b)])\n\n# Outer join: each tech keeps its valid terms, absent terms are ignored at solve time\ncombined = (cap_a * cost_a).add(cap_b * cost_b, join=\"outer\")\nprint(\"isnull:\", combined.isnull().values)\ncombined", + "metadata": {}, + "execution_count": null, + "outputs": [] + }, { "cell_type": "markdown", "id": "fillvalue-header", @@ -1216,8 +1132,8 @@ "shell.execute_reply": "2026-03-12T08:25:48.077245Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:52.569251Z", - "start_time": "2026-03-14T16:44:52.561763Z" + "end_time": "2026-03-14T17:35:56.142163Z", + "start_time": "2026-03-14T17:35:56.139464Z" } }, "source": "print(\"FILL_VALUE:\", FILL_VALUE)", @@ -1243,8 +1159,8 @@ "shell.execute_reply": "2026-03-12T08:25:48.079408Z" }, "ExecuteTime": { - "end_time": "2026-03-14T16:44:52.602913Z", - "start_time": "2026-03-14T16:44:52.597148Z" + "end_time": "2026-03-14T17:35:56.148607Z", + "start_time": "2026-03-14T17:35:56.146368Z" } }, "source": "linopy.options.reset()", diff --git a/linopy/expressions.py b/linopy/expressions.py index b99cc1d7..d09916e7 100644 --- a/linopy/expressions.py +++ b/linopy/expressions.py @@ -639,15 +639,17 @@ def _add_constant( "Constant contains NaN values. Use .fillna() to handle " "missing values before arithmetic operations." ) - const = self.const.fillna(0) + other if is_legacy else self.const + other + const = self.const.fillna(0) + other return self.assign(const=const) da = as_dataarray(other, coords=self.coords, dims=self.coord_dims) self_const, da, needs_data_reindex = self._align_constant( da, fill_value=0, join=join ) + # Always fill self_const with 0 (additive identity) to stay + # consistent with merge() and preserve associativity. + self_const = self_const.fillna(0) if is_legacy: da = da.fillna(0) - self_const = self_const.fillna(0) elif da.isnull().any(): raise ValueError( "Constant contains NaN values. Use .fillna() to handle " @@ -786,6 +788,7 @@ def add( self: GenericExpression, other: SideLike, join: JoinOptions | None = None, + fill_value: float | None = None, ) -> GenericExpression | QuadraticExpression: """ Add an expression to others. @@ -798,22 +801,27 @@ def add( How to align coordinates. One of "outer", "inner", "left", "right", "exact", "override". When None (default), uses the current default behavior. + fill_value : float, optional + Fill NaN in the expression's constant before adding. Useful + for reviving absent slots with a defined value. """ + expr = self.fillna(fill_value) if fill_value is not None else self if join is None: - return self.__add__(other) + return expr.__add__(other) if isinstance(other, SUPPORTED_CONSTANT_TYPES): - return self._add_constant(other, join=join) + return expr._add_constant(other, join=join) other = as_expression(other, model=self.model, dims=self.coord_dims) if isinstance(other, LinearExpression) and isinstance( - self, QuadraticExpression + expr, QuadraticExpression ): other = other.to_quadexpr() - return merge([self, other], cls=self.__class__, join=join) # type: ignore[list-item] + return merge([expr, other], cls=self.__class__, join=join) # type: ignore[list-item] def sub( self: GenericExpression, other: SideLike, join: JoinOptions | None = None, + fill_value: float | None = None, ) -> GenericExpression | QuadraticExpression: """ Subtract others from expression. @@ -826,13 +834,16 @@ def sub( How to align coordinates. One of "outer", "inner", "left", "right", "exact", "override". When None (default), uses the current default behavior. + fill_value : float, optional + Fill NaN in the expression's constant before subtracting. """ - return self.add(-other, join=join) + return self.add(-other, join=join, fill_value=fill_value) def mul( self: GenericExpression, other: SideLike, join: JoinOptions | None = None, + fill_value: float | None = None, ) -> GenericExpression | QuadraticExpression: """ Multiply the expr by a factor. @@ -845,19 +856,23 @@ def mul( How to align coordinates. One of "outer", "inner", "left", "right", "exact", "override". When None (default), uses the current default behavior. + fill_value : float, optional + Fill NaN in the expression's constant before multiplying. """ + expr = self.fillna(fill_value) if fill_value is not None else self if join is None: - return self.__mul__(other) + return expr.__mul__(other) if isinstance(other, SUPPORTED_EXPRESSION_TYPES): raise TypeError( "join parameter is not supported for expression-expression multiplication" ) - return self._multiply_by_constant(other, join=join) + return expr._multiply_by_constant(other, join=join) def div( self: GenericExpression, other: VariableLike | ConstantLike, join: JoinOptions | None = None, + fill_value: float | None = None, ) -> GenericExpression | QuadraticExpression: """ Divide the expr by a factor. @@ -870,16 +885,19 @@ def div( How to align coordinates. One of "outer", "inner", "left", "right", "exact", "override". When None (default), uses the current default behavior. + fill_value : float, optional + Fill NaN in the expression's constant before dividing. """ + expr = self.fillna(fill_value) if fill_value is not None else self if join is None: - return self.__div__(other) + return expr.__div__(other) if isinstance(other, SUPPORTED_EXPRESSION_TYPES): raise TypeError( "unsupported operand type(s) for /: " f"{type(self)} and {type(other)}. " "Non-linear expressions are not yet supported." ) - return self._divide_by_constant(other, join=join) + return expr._divide_by_constant(other, join=join) def le( self: GenericExpression, diff --git a/linopy/variables.py b/linopy/variables.py index fec08e50..14c2d832 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -322,6 +322,15 @@ def to_linexpr( ds = Dataset({"coeffs": coefficient, "vars": self.labels}).expand_dims( TERM_DIM, -1 ) + # In v1 mode, set const=NaN where the variable is absent so that + # absence propagates through arithmetic (consistent with expression path) + if options["arithmetic_convention"] == "v1": + absent = self.labels == -1 + if absent.any(): + const = DataArray( + np.where(absent, np.nan, 0.0), coords=self.labels.coords + ) + ds = ds.assign(const=const) return expressions.LinearExpression(ds, self.model) def __repr__(self) -> str: @@ -1144,19 +1153,30 @@ def where( def fillna( self, - fill_value: ScalarVariable | dict[str, str | float | int] | Variable | Dataset, - ) -> Variable: + fill_value: int + | float + | ScalarVariable + | dict[str, str | float | int] + | Variable + | Dataset, + ) -> Variable | expressions.LinearExpression: """ - Fill missing values with a variable. + Fill missing values. - This operation call ``xarray.DataArray.fillna`` but ensures preserving - the linopy.Variable type. + When ``fill_value`` is numeric, absent variable slots are replaced + with that constant and a :class:`LinearExpression` is returned + (since a constant is not a variable). When ``fill_value`` is a + Variable, the result stays a Variable. Parameters ---------- - fill_value : Variable/ScalarVariable - Variable to use for filling. + fill_value : numeric, Variable, or ScalarVariable + Value to use for filling. Numeric values produce a + LinearExpression; Variable values produce a Variable. """ + if isinstance(fill_value, int | float | np.integer | np.floating): + expr = self.to_linexpr() + return expr.fillna(fill_value) return self.where(~self.isnull(), fill_value) def ffill(self, dim: str, limit: None = None) -> Variable: diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py index 09ee0849..be5669bd 100644 --- a/test/test_algebraic_properties.py +++ b/test/test_algebraic_properties.py @@ -1,7 +1,9 @@ """ Algebraic properties of linopy arithmetic. -All standard algebraic laws should hold for linopy expressions. +All standard algebraic laws should hold for linopy expressions, +including in the presence of absent slots (NaN from shift/where/reindex). + This file serves as both specification and test suite. Notation: @@ -9,6 +11,7 @@ g[A,B] — linopy variable with dimensions A and B c[B] — constant (DataArray) with dimension B s — scalar (int/float) + xs — x.shift(time=1), variable with absent slot SPECIFICATION ============= @@ -19,11 +22,11 @@ 2. Associativity (a + b) + c == a + (b + c) for any linopy operands a, b, c - Including mixed: (x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B]) + Including with absent slots: (xs + s) + y == xs + (s + y) 3. Distributivity c * (a + b) == c*a + c*b for constant c, linopy operands a, b - s * (a + b) == s*a + s*b for scalar s + Including with absent slots: s * (xs + c) == s*xs + s*c 4. Identity a + 0 == a additive identity @@ -35,6 +38,26 @@ 6. Zero a * 0 == 0 multiplication by zero + +7. NaN / absent slot behavior + Addition uses additive identity (0) to fill NaN const: + xs + s revives absent slot with const=s + xs - s revives absent slot with const=-s + Multiplication propagates NaN: + xs * s keeps absent slot absent + xs / s keeps absent slot absent + Merge (expression + expression): + xs + y — absent x term doesn't poison valid y term + xs + ys — fully absent when ALL terms absent + Variable and expression paths are consistent. + +8. fillna + Variable.fillna(numeric) returns LinearExpression + Expression.fillna(value) fills const at absent slots + +9. Named methods with fill_value + .add(v, fill_value=f) fills const before adding + .mul(v, fill_value=f) fills const before multiplying """ from __future__ import annotations @@ -133,18 +156,26 @@ def assert_linequal(a: LinearExpression, b: LinearExpression) -> None: class TestCommutativity: - def test_add_expr_expr(self, x: Variable, y: Variable) -> None: + def test_add_var_var(self, x: Variable, y: Variable) -> None: """X + y == y + x""" assert_linequal(x + y, y + x) - def test_mul_expr_constant(self, g: Variable, c: xr.DataArray) -> None: + def test_mul_var_constant(self, g: Variable, c: xr.DataArray) -> None: """G * c == c * g""" assert_linequal(g * c, c * g) - def test_add_expr_constant(self, g: Variable, c: xr.DataArray) -> None: + def test_add_var_constant(self, g: Variable, c: xr.DataArray) -> None: """G + c == c + g""" assert_linequal(g + c, c + g) + def test_add_var_scalar(self, x: Variable) -> None: + """X + 5 == 5 + x""" + assert_linequal(x + 5, 5 + x) + + def test_mul_var_scalar(self, x: Variable) -> None: + """X * 3 == 3 * x""" + assert_linequal(x * 3, 3 * x) + # ============================================================ # 2. Associativity @@ -160,6 +191,30 @@ def test_add_with_constant(self, x: Variable, g: Variable, c: xr.DataArray) -> N """(x[A] + c[B]) + g[A,B] == x[A] + (c[B] + g[A,B])""" assert_linequal((x + c) + g, x + (c + g)) + def test_add_shifted_scalar_var(self, x: Variable, y: Variable) -> None: + """(x.shift(1) + 5) + y == x.shift(1) + (5 + y)""" + lhs = (x.shift(time=1) + 5) + y + rhs = x.shift(time=1) + (5 + y) + assert_linequal(lhs, rhs) + + def test_add_shifted_scalar_var_reordered(self, x: Variable, y: Variable) -> None: + """(x.shift(1) + y) + 5 == x.shift(1) + (y + 5)""" + lhs = (x.shift(time=1) + y) + 5 + rhs = x.shift(time=1) + (y + 5) + assert_linequal(lhs, rhs) + + def test_add_three_scalars_shifted(self, x: Variable) -> None: + """(x.shift(1) + 3) + 7 == x.shift(1) + 10""" + lhs = (x.shift(time=1) + 3) + 7 + rhs = x.shift(time=1) + 10 + assert_linequal(lhs, rhs) + + def test_sub_shifted_scalar_var(self, x: Variable, y: Variable) -> None: + """(x.shift(1) - 5) + y == x.shift(1) + (y - 5)""" + lhs = (x.shift(time=1) - 5) + y + rhs = x.shift(time=1) + (y - 5) + assert_linequal(lhs, rhs) + # ============================================================ # 3. Distributivity @@ -181,6 +236,18 @@ def test_constant_mixed_dims( """c[B] * (x[A] + g[A,B]) == c*x + c*g""" assert_linequal(c * (x + g), c * x + c * g) + def test_scalar_shifted_add_constant(self, x: Variable) -> None: + """3 * (x.shift(1) + 5) == 3*x.shift(1) + 15""" + lhs = 3 * (x.shift(time=1) + 5) + rhs = 3 * x.shift(time=1) + 15 + assert_linequal(lhs, rhs) + + def test_scalar_shifted_add_var(self, x: Variable, y: Variable) -> None: + """3 * (x.shift(1) + y) == 3*x.shift(1) + 3*y""" + lhs = 3 * (x.shift(time=1) + y) + rhs = 3 * x.shift(time=1) + 3 * y + assert_linequal(lhs, rhs) + # ============================================================ # 4. Identity @@ -201,6 +268,12 @@ def test_multiplicative(self, x: Variable) -> None: assert isinstance(result, LinearExpression) np.testing.assert_array_equal(result.coeffs.squeeze().values, [1, 1, 1]) + def test_additive_shifted(self, x: Variable) -> None: + """x.shift(1) + 0 revives absent slot as zero expression.""" + result = x.shift(time=1) + 0 + assert not result.isnull().values[0] + assert result.const.values[0] == 0 + # ============================================================ # 5. Negation @@ -235,3 +308,206 @@ def test_multiplication_by_zero(self, x: Variable) -> None: """X * 0 has zero coefficients""" result = x * 0 assert (result.coeffs == 0).all() + + +# ============================================================ +# 7. NaN / absent slot behavior +# ============================================================ + + +class TestAbsentSlotAddition: + """Addition fills const with 0 (additive identity) → revives absent slots.""" + + def test_add_scalar_revives(self, x: Variable) -> None: + result = x.shift(time=1) + 5 + assert not result.isnull().values[0] + assert result.const.values[0] == 5 + + def test_add_array_revives(self, x: Variable) -> None: + arr = xr.DataArray([10.0, 20.0, 30.0], dims=["time"]) + result = (1 * x).shift(time=1) + arr + assert not result.isnull().values[0] + assert result.const.values[0] == 10.0 + + def test_sub_scalar_revives(self, x: Variable) -> None: + result = x.shift(time=1) - 5 + assert not result.isnull().values[0] + assert result.const.values[0] == -5 + + def test_add_zero_revives(self, x: Variable) -> None: + """+ 0 revives to a zero expression (not absent).""" + result = x.shift(time=1) + 0 + assert not result.isnull().values[0] + assert result.const.values[0] == 0 + + def test_variable_and_expression_paths_consistent_add(self, x: Variable) -> None: + var_result = x.shift(time=1) + 5 + expr_result = (1 * x).shift(time=1) + 5 + np.testing.assert_array_equal( + var_result.isnull().values, expr_result.isnull().values + ) + np.testing.assert_array_equal(var_result.const.values, expr_result.const.values) + + +class TestAbsentSlotMultiplication: + """Multiplication propagates NaN → absent stays absent.""" + + def test_mul_scalar_propagates(self, x: Variable) -> None: + result = x.shift(time=1) * 3 + assert result.isnull().values[0] + assert not result.isnull().values[1] + + def test_mul_array_propagates(self, x: Variable) -> None: + arr = xr.DataArray([2.0, 2.0, 2.0], dims=["time"]) + result = (1 * x).shift(time=1) * arr + assert result.isnull().values[0] + + def test_div_scalar_propagates(self, x: Variable) -> None: + result = (1 * x).shift(time=1) / 2 + assert result.isnull().values[0] + + def test_variable_and_expression_paths_consistent_mul(self, x: Variable) -> None: + var_result = x.shift(time=1) * 3 + expr_result = (1 * x).shift(time=1) * 3 + np.testing.assert_array_equal( + var_result.isnull().values, expr_result.isnull().values + ) + + +class TestAbsentSlotMerge: + """Merging expressions: absent terms don't poison valid terms.""" + + def test_partial_absent(self, x: Variable, y: Variable) -> None: + """X + y.shift(1): x is valid everywhere → no absent slots.""" + result = x + (1 * y).shift(time=1) + assert not result.isnull().any() + + def test_all_absent(self, x: Variable, y: Variable) -> None: + """x.shift(1) + y.shift(1): all terms absent at time=0 → absent.""" + result = (1 * x).shift(time=1) + (1 * y).shift(time=1) + assert result.isnull().values[0] + assert not result.isnull().values[1] + + def test_shifted_const_lost(self, x: Variable, y: Variable) -> None: + """X + (y+5).shift(1): shifted constant is lost at the gap.""" + result = x + (1 * y + 5).shift(time=1) + # time=0: only x's const (0), shifted 5 is lost + assert result.const.values[0] == 0 + # time=1: both consts survive (0 + 5 = 5) + assert result.const.values[1] == 5 + + +class TestAbsentSlotMixed: + """Combined add/mul with absent slots.""" + + def test_add_then_mul(self, x: Variable) -> None: + """(x.shift(1) + 5) * 3 → +15 at absent slot.""" + result = (x.shift(time=1) + 5) * 3 + assert not result.isnull().values[0] + assert result.const.values[0] == 15 + + def test_mul_then_add(self, x: Variable) -> None: + """x.shift(1) * 3 + 5 → +5 at absent slot.""" + result = x.shift(time=1) * 3 + 5 + assert not result.isnull().values[0] + assert result.const.values[0] == 5 + + def test_where_add_revives(self, x: Variable) -> None: + mask = xr.DataArray([True, False, True], dims=["time"]) + result = (1 * x).where(mask) + 10 + assert not result.isnull().any() + assert result.const.values[1] == 10 + + def test_where_mul_propagates(self, x: Variable) -> None: + mask = xr.DataArray([True, False, True], dims=["time"]) + result = (1 * x).where(mask) * 3 + assert not result.isnull().values[0] + assert result.isnull().values[1] + assert not result.isnull().values[2] + + +# ============================================================ +# 8. fillna +# ============================================================ + + +class TestFillNA: + """fillna revives absent slots with explicit values.""" + + def test_variable_fillna_numeric_returns_expression(self, x: Variable) -> None: + result = x.shift(time=1).fillna(0) + assert isinstance(result, LinearExpression) + + def test_variable_fillna_revives(self, x: Variable) -> None: + result = x.shift(time=1).fillna(0) + assert not result.isnull().any() + assert result.const.values[0] == 0 + + def test_variable_fillna_custom_value(self, x: Variable) -> None: + result = x.shift(time=1).fillna(42) + assert result.const.values[0] == 42 + assert result.const.values[1] == 0 # valid slots unaffected + + def test_expression_fillna_revives(self, x: Variable) -> None: + result = (1 * x).shift(time=1).fillna(0) + 5 + assert not result.isnull().any() + assert result.const.values[0] == 5 + + def test_variable_fillna_variable_returns_variable( + self, x: Variable, y: Variable + ) -> None: + result = x.shift(time=1).fillna(y) + assert isinstance(result, Variable) + + def test_fillna_then_add_equals_fillna_sum(self, x: Variable) -> None: + """fillna(0) + 5 == fillna(5) at absent slots.""" + a = (1 * x).shift(time=1).fillna(0) + 5 + b = (1 * x).shift(time=1).fillna(5) + assert a.const.values[0] == 5 + assert b.const.values[0] == 5 + + +# ============================================================ +# 9. Named methods with fill_value +# ============================================================ + + +class TestFillValueParam: + """Named methods (.add, .sub, .mul, .div) accept fill_value.""" + + def test_add_fill_value(self, x: Variable) -> None: + expr = (1 * x).shift(time=1) + result = expr.add(5, fill_value=0) + assert not result.isnull().any() + assert result.const.values[0] == 5 + + def test_sub_fill_value(self, x: Variable) -> None: + expr = (1 * x).shift(time=1) + result = expr.sub(5, fill_value=0) + assert not result.isnull().any() + assert result.const.values[0] == -5 + + def test_mul_fill_value(self, x: Variable) -> None: + expr = (1 * x).shift(time=1) + result = expr.mul(3, fill_value=0) + assert not result.isnull().any() + assert result.const.values[0] == 0 + + def test_div_fill_value(self, x: Variable) -> None: + expr = (1 * x).shift(time=1) + result = expr.div(2, fill_value=0) + assert not result.isnull().any() + assert result.const.values[0] == 0 + + def test_add_without_fill_value_still_revives(self, x: Variable) -> None: + """add() always fills const with 0 (additive identity).""" + expr = (1 * x).shift(time=1) + result = expr.add(5) + assert not result.isnull().values[0] + assert result.const.values[0] == 5 + + def test_fill_value_only_affects_absent(self, x: Variable) -> None: + expr = (1 * x).shift(time=1) + result = expr.add(5, fill_value=0) + assert result.const.values[1] == 5 # valid slot: 0 + 5 + assert result.coeffs.values[1, 0] == 1 # coeff unchanged diff --git a/test/test_linear_expression.py b/test/test_linear_expression.py index 49a48382..90163e4b 100644 --- a/test/test_linear_expression.py +++ b/test/test_linear_expression.py @@ -1308,10 +1308,12 @@ def test_shifted_expr_add_scalar_filled(self, v: Variable) -> None: assert result.const.values[0] == 5.0 @pytest.mark.v1_only - def test_shifted_expr_add_scalar_propagates(self, v: Variable) -> None: + def test_shifted_expr_add_scalar_revives(self, v: Variable) -> None: + """Addition fills const with 0 (additive identity) then adds.""" expr = (1 * v).shift(dim_2=1) result = expr + 5 - assert np.isnan(result.const.values[0]) + assert not np.isnan(result.const.values[0]) + assert result.const.values[0] == 5.0 @pytest.mark.legacy_only def test_shifted_expr_mul_scalar_filled(self, v: Variable) -> None: @@ -1335,11 +1337,13 @@ def test_shifted_expr_add_array(self, v: Variable) -> None: assert result.const.values[0] == 0.0 @pytest.mark.v1_only - def test_shifted_expr_add_array_propagates(self, v: Variable) -> None: + def test_shifted_expr_add_array_revives(self, v: Variable) -> None: + """Addition fills const with 0 (additive identity) then adds.""" arr = np.arange(v.sizes["dim_2"], dtype=float) expr = (1 * v).shift(dim_2=1) result = expr + arr - assert np.isnan(result.const.values[0]) + assert not np.isnan(result.const.values[0]) + assert result.const.values[0] == 0.0 @pytest.mark.legacy_only def test_shifted_expr_mul_array(self, v: Variable) -> None: @@ -1377,10 +1381,12 @@ def test_shifted_expr_sub_scalar(self, v: Variable) -> None: assert result.const.values[0] == -3.0 @pytest.mark.v1_only - def test_shifted_expr_sub_scalar_propagates(self, v: Variable) -> None: + def test_shifted_expr_sub_scalar_revives(self, v: Variable) -> None: + """Subtraction fills const with 0 (additive identity) then subtracts.""" expr = (1 * v).shift(dim_2=1) result = expr - 3 - assert np.isnan(result.const.values[0]) + assert not np.isnan(result.const.values[0]) + assert result.const.values[0] == -3.0 @pytest.mark.legacy_only def test_shifted_expr_div_array(self, v: Variable) -> None: From 83a15a6c9f00277208176a44f2265651121ab57e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 14 Mar 2026 20:22:11 +0100 Subject: [PATCH 66/66] Fix NaN propagation consistency, add fill_value API, extend algebraic tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NaN handling now preserves algebraic conventions (associativity, distributivity, commutativity): - Addition/subtraction fill const with 0 (additive identity), preserving (a + b) + c == a + (b + c) - Multiplication/division propagate NaN — user decides via .fillna(value) or .mul(v, fill_value=) - Variable.to_linexpr() sets coeffs=NaN and const=NaN at absent slots in v1 mode (consistent with expression path) - merge() preserves const=NaN when all input constants are NaN - Variable.fillna(numeric) returns LinearExpression - .add()/.sub()/.mul()/.div() accept fill_value= parameter 62 algebraic property tests covering all conventions including absent slots, division, subtraction, and multi-dimensional cases. Co-Authored-By: Claude Opus 4.6 (1M context) --- linopy/variables.py | 16 ++++-- test/test_algebraic_properties.py | 90 +++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 4 deletions(-) diff --git a/linopy/variables.py b/linopy/variables.py index 14c2d832..2621c045 100644 --- a/linopy/variables.py +++ b/linopy/variables.py @@ -322,15 +322,23 @@ def to_linexpr( ds = Dataset({"coeffs": coefficient, "vars": self.labels}).expand_dims( TERM_DIM, -1 ) - # In v1 mode, set const=NaN where the variable is absent so that - # absence propagates through arithmetic (consistent with expression path) + # In v1 mode, set coeffs=NaN and const=NaN where the variable is + # absent so that absence propagates through arithmetic (consistent + # with expression path where shift/where/reindex fill with FILL_VALUE) if options["arithmetic_convention"] == "v1": absent = self.labels == -1 if absent.any(): - const = DataArray( + nan_fill = DataArray( np.where(absent, np.nan, 0.0), coords=self.labels.coords ) - ds = ds.assign(const=const) + coeff_fill = DataArray( + np.where(absent, np.nan, coefficient.values), + coords=self.labels.coords, + ) + ds = ds.assign( + const=nan_fill, + coeffs=coeff_fill.expand_dims(TERM_DIM, -1), + ) return expressions.LinearExpression(ds, self.model) def __repr__(self) -> str: diff --git a/test/test_algebraic_properties.py b/test/test_algebraic_properties.py index be5669bd..6e0ff175 100644 --- a/test/test_algebraic_properties.py +++ b/test/test_algebraic_properties.py @@ -511,3 +511,93 @@ def test_fill_value_only_affects_absent(self, x: Variable) -> None: result = expr.add(5, fill_value=0) assert result.const.values[1] == 5 # valid slot: 0 + 5 assert result.coeffs.values[1, 0] == 1 # coeff unchanged + + +# ============================================================ +# 10. Division with absent slots +# ============================================================ + + +class TestDivisionAbsentSlots: + """Division propagates NaN same as multiplication.""" + + def test_div_scalar_propagates(self, x: Variable) -> None: + result = (1 * x).shift(time=1) / 2 + assert result.isnull().values[0] + assert not result.isnull().values[1] + + def test_div_array_propagates(self, x: Variable) -> None: + arr = xr.DataArray([2.0, 2.0, 2.0], dims=["time"]) + result = (1 * x).shift(time=1) / arr + assert result.isnull().values[0] + + def test_div_consistent_paths(self, x: Variable) -> None: + """Variable and expression paths give same result for division.""" + var_result = x.shift(time=1) / 2 + expr_result = (1 * x).shift(time=1) / 2 + assert_linequal(var_result, expr_result) + + def test_div_equals_mul_reciprocal(self, x: Variable) -> None: + """Shifted / 2 == shifted * 0.5""" + shifted = (1 * x).shift(time=1) + assert_linequal(shifted / 2, shifted * 0.5) + + +# ============================================================ +# 11. Subtraction with absent slots +# ============================================================ + + +class TestSubtractionAbsentSlots: + """Subtraction with shifted coords preserves associativity.""" + + def test_sub_scalar_revives(self, x: Variable) -> None: + result = x.shift(time=1) - 5 + assert not result.isnull().values[0] + assert result.const.values[0] == -5 + + def test_sub_associativity_shifted(self, x: Variable, y: Variable) -> None: + """(x.shift(1) - 5) + y == x.shift(1) + (y - 5)""" + xs = x.shift(time=1) + assert_linequal((xs - 5) + y, xs + (y - 5)) + + def test_sub_equals_add_neg_shifted(self, x: Variable) -> None: + """x.shift(1) - 5 == x.shift(1) + (-5)""" + xs = x.shift(time=1) + assert_linequal(xs - 5, xs + (-5)) + + +# ============================================================ +# 12. Multi-dimensional absent slots +# ============================================================ + + +@pytest.fixture +def g2(m: Model, time: pd.RangeIndex, tech: pd.Index) -> Variable: + """Second variable with dims [time, tech].""" + return m.add_variables(lower=0, coords=[time, tech], name="g2") + + +class TestMultiDimensionalAbsentSlots: + """2D variables with shift: add revives, mul propagates.""" + + def test_2d_add_revives(self, g: Variable) -> None: + shifted = (1 * g).shift(time=1) + result = shifted + 5 + assert not result.isnull().isel(time=0).any() + assert (result.const.isel(time=0) == 5).all() + + def test_2d_mul_propagates(self, g: Variable) -> None: + shifted = (1 * g).shift(time=1) + result = shifted * 3 + assert result.isnull().isel(time=0).all() + + def test_2d_associativity(self, g: Variable, g2: Variable) -> None: + """(g.shift(1) + g2) + 5 == g.shift(1) + (g2 + 5) in 2D.""" + gs = g.shift(time=1) + assert_linequal((gs + g2) + 5, gs + (g2 + 5)) + + def test_2d_distributivity(self, g: Variable, g2: Variable) -> None: + """2 * (g.shift(1) + g2) == 2*g.shift(1) + 2*g2 in 2D.""" + gs = g.shift(time=1) + assert_linequal(2 * (gs + g2), 2 * gs + 2 * g2)