diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 87d30cf8..ecf9abe9 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -12,6 +12,7 @@ Upcoming Version * Add the `sphinx-copybutton` to the documentation * Add SOS1 and SOS2 reformulations for solvers not supporting them. * Enable quadratic problems with SCIP on windows. +* Add unified ``SolverMetrics`` dataclass accessible via ``Model.solver_metrics`` after solving. Provides ``solver_name``, ``solve_time``, ``objective_value``, ``best_bound``, and ``dual_bound`` in a solver-independent way. All solvers populate solver-specific fields where available. Version 0.6.5 diff --git a/examples/create-a-model.ipynb b/examples/create-a-model.ipynb index a158e0cf..7e4ac46a 100644 --- a/examples/create-a-model.ipynb +++ b/examples/create-a-model.ipynb @@ -252,9 +252,25 @@ "cell_type": "markdown", "id": "e296f641", "metadata": {}, + "source": "Well done! You solved your first linopy model!" + }, + { + "metadata": {}, + "cell_type": "markdown", "source": [ - "Well done! You solved your first linopy model!" - ] + "### Solver Metrics\n", + "\n", + "After solving, you can inspect performance metrics reported by the solver via `solver_metrics`. This includes solve time, objective value, and for MIP problems, the dual bound and MIP gap (available for most solvers." + ], + "id": "e4995d38f3fc7779" + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": "m.solver_metrics", + "id": "bef28e724dceba9" } ], "metadata": { diff --git a/linopy/__init__.py b/linopy/__init__.py index b1dc33b9..f40ee04a 100644 --- a/linopy/__init__.py +++ b/linopy/__init__.py @@ -14,7 +14,7 @@ import linopy.monkey_patch_xarray # noqa: F401 from linopy.common import align from linopy.config import options -from linopy.constants import EQUAL, GREATER_EQUAL, LESS_EQUAL +from linopy.constants import EQUAL, GREATER_EQUAL, LESS_EQUAL, SolverMetrics from linopy.constraints import Constraint, Constraints from linopy.expressions import LinearExpression, QuadraticExpression, merge from linopy.io import read_netcdf @@ -40,6 +40,7 @@ "OetcHandler", "QuadraticExpression", "RemoteHandler", + "SolverMetrics", "Variable", "Variables", "available_solvers", diff --git a/linopy/constants.py b/linopy/constants.py index 00bbd705..fd9bf4bb 100644 --- a/linopy/constants.py +++ b/linopy/constants.py @@ -3,6 +3,7 @@ Linopy module for defining constant values used within the package. """ +import dataclasses import logging from dataclasses import dataclass, field from enum import Enum @@ -235,6 +236,50 @@ class Solution: objective: float = field(default=np.nan) +@dataclass(frozen=True) +class SolverMetrics: + """ + Unified solver performance metrics. + + All fields default to ``None``. Solvers populate what they can; + unsupported fields remain ``None``. Access via + :attr:`Model.solver_metrics` after calling :meth:`Model.solve`. + + Attributes + ---------- + solver_name : str or None + Name of the solver used. + solve_time : float or None + Wall-clock time spent solving (seconds). + objective_value : float or None + Objective value of the best solution found. + dual_bound : float or None + Best bound on the objective from the MIP relaxation (also known as + "best bound"). Only populated for integer programs. + mip_gap : float or None + Relative gap between the objective value and the dual bound. + Only populated for integer programs. + peak_memory : float or None + Peak memory usage during solving (MB). Only populated for solvers + that expose this information (e.g. Gurobi, Xpress). + """ + + solver_name: str | None = None + solve_time: float | None = None + objective_value: float | None = None + dual_bound: float | None = None + mip_gap: float | None = None + peak_memory: float | None = None + + def __repr__(self) -> str: + fields = [] + for f in dataclasses.fields(self): + val = getattr(self, f.name) + if val is not None: + fields.append(f"{f.name}={val!r}") + return f"SolverMetrics({', '.join(fields)})" + + @dataclass class Result: """ @@ -244,6 +289,7 @@ class Result: status: Status solution: Solution | None = None solver_model: Any = None + metrics: SolverMetrics | None = None def __repr__(self) -> str: solver_model_string = ( @@ -256,12 +302,16 @@ def __repr__(self) -> str: ) else: solution_string = "Solution: None\n" + metrics_string = "" + if self.metrics is not None: + metrics_string = f"Solver metrics: {self.metrics}\n" return ( f"Status: {self.status.status.value}\n" f"Termination condition: {self.status.termination_condition.value}\n" + solution_string + f"Solver model: {solver_model_string}\n" - f"Solver message: {self.status.legacy_status}" + + metrics_string + + f"Solver message: {self.status.legacy_status}" ) def info(self) -> None: diff --git a/linopy/model.py b/linopy/model.py index f1284aaa..da41811e 100644 --- a/linopy/model.py +++ b/linopy/model.py @@ -44,6 +44,7 @@ SOS_TYPE_ATTR, TERM_DIM, ModelStatus, + SolverMetrics, TerminationCondition, ) from linopy.constraints import AnonymousScalarConstraint, Constraint, Constraints @@ -112,6 +113,7 @@ class Model: solver_model: Any solver_name: str + _solver_metrics: SolverMetrics | None _variables: Variables _constraints: Constraints _objective: Objective @@ -154,6 +156,7 @@ class Model: "_force_dim_names", "_auto_mask", "_solver_dir", + "_solver_metrics", "solver_model", "solver_name", "matrices", @@ -215,6 +218,28 @@ def __init__( ) self.matrices: MatrixAccessor = MatrixAccessor(self) + self._solver_metrics: SolverMetrics | None = None + + @property + def solver_metrics(self) -> SolverMetrics | None: + """ + Solver performance metrics from the last solve, or ``None`` + if the model has not been solved yet. + + Returns a :class:`~linopy.constants.SolverMetrics` instance. + Fields the solver cannot provide remain ``None``. + + Reset to ``None`` by :meth:`reset_solution`. + + Examples + -------- + >>> m.solve(solver_name="highs") # doctest: +SKIP + >>> m.solver_metrics.solve_time # doctest: +SKIP + 0.003 + >>> m.solver_metrics.objective_value # doctest: +SKIP + 0.0 + """ + return self._solver_metrics @property def variables(self) -> Variables: @@ -1483,6 +1508,7 @@ def solve( self.termination_condition = result.status.termination_condition.value self.solver_model = result.solver_model self.solver_name = solver_name + self._solver_metrics = result.metrics if not result.status.is_ok: return ( @@ -1546,6 +1572,7 @@ def _mock_solve( self.termination_condition = TerminationCondition.optimal.value self.solver_model = None self.solver_name = solver_name + self._solver_metrics = SolverMetrics(solver_name="mock", objective_value=0.0) for name, var in self.variables.items(): var.solution = xr.DataArray(0.0, var.coords) @@ -1788,6 +1815,7 @@ def reset_solution(self) -> None: """ self.variables.reset_solution() self.constraints.reset_dual() + self._solver_metrics = None to_netcdf = to_netcdf diff --git a/linopy/solvers.py b/linopy/solvers.py index 474459fe..b59398ff 100644 --- a/linopy/solvers.py +++ b/linopy/solvers.py @@ -6,6 +6,7 @@ from __future__ import annotations import contextlib +import dataclasses import enum import io import logging @@ -14,6 +15,7 @@ import subprocess as sub import sys import threading +import time import warnings from abc import ABC, abstractmethod from collections import namedtuple @@ -29,6 +31,7 @@ from linopy.constants import ( Result, Solution, + SolverMetrics, SolverStatus, Status, TerminationCondition, @@ -224,6 +227,15 @@ class xpress_Namespaces: # type: ignore[no-redef] logger = logging.getLogger(__name__) +def _safe_get(func: Callable[[], Any]) -> Any: + """Call *func* and return its result, or None if it raises.""" + try: + return func() + except Exception: + logger.debug("Failed to extract solver metric", exc_info=True) + return None + + io_structure = dict( lp_file={ "gurobi", @@ -419,6 +431,21 @@ def solve_problem( msg = "No problem file or model specified." raise ValueError(msg) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + """ + Extract solver performance metrics. + + Base implementation populates solver_name and objective_value. + Subclasses should call super(), then set solver-specific fields + on the returned object. + """ + return SolverMetrics( + solver_name=self.solver_name.value, + objective_value=_safe_get( + lambda: solution.objective if not np.isnan(solution.objective) else None + ), + ) + @property def solver_name(self) -> SolverName: return SolverName[self.__class__.__name__] @@ -440,6 +467,14 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + metrics = super()._extract_metrics(solver_model, solution) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: solver_model.runtime), + mip_gap=_safe_get(lambda: solver_model.mip_gap), + ) + def solve_problem_from_model( self, model: Model, @@ -607,7 +642,9 @@ def get_solver_solution() -> Solution: runtime = float(m.group(1)) CbcModel = namedtuple("CbcModel", ["mip_gap", "runtime"]) - return Result(status, solution, CbcModel(mip_gap, runtime)) + solver_model = CbcModel(mip_gap, runtime) + metrics = self._extract_metrics(solver_model, solution) + return Result(status, solution, solver_model, metrics) class GLPK(Solver[None]): @@ -737,7 +774,10 @@ def solve_problem_from_file( if not os.path.exists(solution_fn): status = Status(SolverStatus.warning, TerminationCondition.unknown) - return Result(status, Solution()) + solution = Solution() + return Result( + status, solution, metrics=self._extract_metrics(None, solution) + ) f = open(solution_fn) @@ -777,7 +817,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution) + metrics = self._extract_metrics(None, solution) + return Result(status, solution, metrics=metrics) class Highs(Solver[None]): @@ -920,6 +961,28 @@ def solve_problem_from_file( sense=read_sense_from_problem_file(problem_fn), ) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + h = solver_model + metrics = super()._extract_metrics(solver_model, solution) + + def _highs_info(key: str) -> float: + status, val = h.getInfoValue(key) + if status != highspy.HighsStatus.kOk: # pragma: no cover + msg = f"Failed to get HiGHS info: {key}" + raise RuntimeError(msg) + return val + + is_mip = _safe_get(lambda: _highs_info("mip_node_count")) not in (None, -1) + + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: h.getRunTime()), + mip_gap=_safe_get(lambda: _highs_info("mip_gap")) if is_mip else None, + dual_bound=_safe_get(lambda: _highs_info("mip_dual_bound")) + if is_mip + else None, + ) + def _set_solver_params( self, highs_solver: highspy.Highs, @@ -1028,7 +1091,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, h) + metrics = self._extract_metrics(h, solution) + return Result(status, solution, h, metrics) class Gurobi(Solver["gurobipy.Env | dict[str, Any] | None"]): @@ -1162,6 +1226,22 @@ def solve_problem_from_file( sense=sense, ) + def _extract_metrics( + self, solver_model: Any, solution: Solution + ) -> SolverMetrics: # pragma: no cover + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + is_mip = _safe_get(lambda: m.IsMIP) == 1 + mem_gb = _safe_get(lambda: m.MaxMemUsed) + peak_memory = mem_gb * 1024 if mem_gb is not None else None + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.Runtime), + dual_bound=_safe_get(lambda: m.ObjBound) if is_mip else None, + mip_gap=_safe_get(lambda: m.MIPGap) if is_mip else None, + peak_memory=peak_memory, + ) + def _solve( self, m: gurobipy.Model, @@ -1263,7 +1343,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) class Cplex(Solver[None]): @@ -1286,6 +1367,23 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics( + self, solver_model: Any, solution: Solution + ) -> SolverMetrics: # pragma: no cover + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + is_mip = _safe_get(lambda: m.problem_type[m.get_problem_type()] != "LP") + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: self._solve_time), + dual_bound=_safe_get(lambda: m.solution.MIP.get_best_objective()) + if is_mip + else None, + mip_gap=_safe_get(lambda: m.solution.MIP.get_mip_relative_gap()) + if is_mip + else None, + ) + def solve_problem_from_model( self, model: Model, @@ -1375,8 +1473,10 @@ def solve_problem_from_file( is_lp = m.problem_type[m.get_problem_type()] == "LP" + _t0 = time.perf_counter() # pragma: no cover with contextlib.suppress(cplex.exceptions.errors.CplexSolverError): m.solve() + self._solve_time = time.perf_counter() - _t0 # pragma: no cover if solution_fn is not None: try: @@ -1419,7 +1519,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) class SCIP(Solver[None]): @@ -1438,6 +1539,17 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics(self, solver_model: Any, solution: Solution) -> SolverMetrics: + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + is_mip = getattr(self, "_is_mip", False) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.getSolvingTime()), + dual_bound=_safe_get(lambda: m.getDualbound()) if is_mip else None, + mip_gap=_safe_get(lambda: m.getGap()) if is_mip else None, + ) + def solve_problem_from_model( self, model: Model, @@ -1529,6 +1641,7 @@ def solve_problem_from_file( if warmstart_fn: logger.warning("Warmstart not implemented for SCIP") + self._is_mip = m.getNIntVars() + m.getNBinVars() > 0 m.optimize() if basis_fn: @@ -1572,7 +1685,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) class Xpress(Solver[None]): @@ -1594,6 +1708,28 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics( + self, solver_model: Any, solution: Solution + ) -> SolverMetrics: # pragma: no cover + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + is_mip = _safe_get(lambda: m.attributes.mipents) not in (None, 0) + + def _xpress_mip_gap() -> float | None: + obj = m.attributes.mipbestobjval + bound = m.attributes.bestbound + if obj == 0: + return 0.0 if bound == 0 else None + return abs(obj - bound) / abs(obj) + + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.attributes.time), + dual_bound=_safe_get(lambda: m.attributes.bestbound) if is_mip else None, + mip_gap=_safe_get(_xpress_mip_gap) if is_mip else None, + peak_memory=_safe_get(lambda: m.attributes.peakmemory / (1024 * 1024)), + ) + def solve_problem_from_model( self, model: Model, @@ -1742,7 +1878,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution, m) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, m, metrics) KnitroResult = namedtuple("KnitroResult", "knitro_context reported_runtime") @@ -1976,6 +2113,23 @@ def __init__( ) -> None: super().__init__(**solver_options) + def _extract_metrics( + self, solver_model: Any, solution: Solution + ) -> SolverMetrics: # pragma: no cover + m = solver_model + metrics = super()._extract_metrics(solver_model, solution) + is_mip = _safe_get(lambda: m.getnumintvar()) not in (None, 0) + return dataclasses.replace( + metrics, + solve_time=_safe_get(lambda: m.getdouinf(mosek.dinfitem.optimizer_time)), + dual_bound=_safe_get(lambda: m.getdouinf(mosek.dinfitem.mio_obj_bound)) + if is_mip + else None, + mip_gap=_safe_get(lambda: m.getdouinf(mosek.dinfitem.mio_obj_rel_gap)) + if is_mip + else None, + ) + def solve_problem_from_model( self, model: Model, @@ -2286,7 +2440,8 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) - return Result(status, solution) + metrics = self._extract_metrics(m, solution) + return Result(status, solution, metrics=metrics) class COPT(Solver[None]): @@ -2425,9 +2580,10 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) + metrics = self._extract_metrics(m, solution) env_.close() - return Result(status, solution, m) + return Result(status, solution, m, metrics) class MindOpt(Solver[None]): @@ -2568,10 +2724,12 @@ def get_solver_solution() -> Solution: solution = self.safe_get_solution(status=status, func=get_solver_solution) solution = maybe_adjust_objective_sign(solution, io_api, sense) + metrics = self._extract_metrics(m, solution) + m.dispose() env_.dispose() - return Result(status, solution, m) + return Result(status, solution, m, metrics) class PIPS(Solver[None]): @@ -2820,7 +2978,8 @@ def get_solver_solution() -> Solution: solution = maybe_adjust_objective_sign(solution, io_api, sense) # see https://github.com/MIT-Lu-Lab/cuPDLPx/tree/main/python#solution-attributes - return Result(status, solution, cu_model) + metrics = self._extract_metrics(cu_model, solution) + return Result(status, solution, cu_model, metrics) def _set_solver_params(self, cu_model: cupdlpx.Model) -> None: """ diff --git a/test/test_solver_metrics.py b/test/test_solver_metrics.py new file mode 100644 index 00000000..c63118e0 --- /dev/null +++ b/test/test_solver_metrics.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python3 +""" +Tests for the SolverMetrics feature. +""" + +from __future__ import annotations + +import numpy as np +import pytest +import xarray as xr + +from linopy import Model, available_solvers +from linopy.constants import Result, Solution, SolverMetrics, Status +from linopy.solver_capabilities import SolverFeature, get_available_solvers_with_feature + +# --------------------------------------------------------------------------- +# SolverMetrics dataclass tests +# --------------------------------------------------------------------------- + + +def test_solver_metrics_defaults() -> None: + m = SolverMetrics() + assert m.solver_name is None + assert m.solve_time is None + assert m.objective_value is None + assert m.dual_bound is None + assert m.mip_gap is None + assert m.peak_memory is None + + +def test_solver_metrics_partial() -> None: + m = SolverMetrics(solver_name="highs", solve_time=1.5) + assert m.solver_name == "highs" + assert m.solve_time == 1.5 + assert m.objective_value is None + + +def test_solver_metrics_repr_only_non_none() -> None: + m = SolverMetrics(solver_name="gurobi", solve_time=2.3) + r = repr(m) + assert "solver_name='gurobi'" in r + assert "solve_time=2.3" in r + assert "objective_value" not in r + assert "dual_bound" not in r + + +def test_solver_metrics_repr_empty() -> None: + m = SolverMetrics() + assert repr(m) == "SolverMetrics()" + + +def test_solver_metrics_frozen() -> None: + m = SolverMetrics(solver_name="test") + with pytest.raises(AttributeError): + m.solver_name = "other" # type: ignore[misc] + + +# --------------------------------------------------------------------------- +# Result backward compatibility tests +# --------------------------------------------------------------------------- + + +def test_result_without_metrics() -> None: + """Result without metrics should still work (backward compatible).""" + status = Status.from_termination_condition("optimal") + result = Result(status=status, solution=Solution()) + assert result.metrics is None + # repr should not crash + repr(result) + + +def test_result_with_metrics() -> None: + status = Status.from_termination_condition("optimal") + metrics = SolverMetrics(solver_name="test", solve_time=1.0) + result = Result(status=status, solution=Solution(), metrics=metrics) + assert result.metrics is not None + assert result.metrics.solver_name == "test" + r = repr(result) + assert "Solver metrics:" in r + + +# --------------------------------------------------------------------------- +# Model integration tests +# --------------------------------------------------------------------------- + + +def test_model_metrics_none_before_solve() -> None: + m = Model() + assert m.solver_metrics is None + + +def test_model_metrics_populated_after_mock_solve() -> None: + m = Model() + x = m.add_variables( + lower=xr.DataArray(np.zeros(5), dims=["i"]), + upper=xr.DataArray(np.ones(5), dims=["i"]), + name="x", + ) + m.add_objective(x.sum()) + m.solve(mock_solve=True) + assert m.solver_metrics is not None + assert m.solver_metrics.solver_name == "mock" + assert m.solver_metrics.objective_value == 0.0 + + +def test_model_metrics_reset() -> None: + m = Model() + x = m.add_variables( + lower=xr.DataArray(np.zeros(5), dims=["i"]), + upper=xr.DataArray(np.ones(5), dims=["i"]), + name="x", + ) + m.add_objective(x.sum()) + m.solve(mock_solve=True) + assert m.solver_metrics is not None + m.reset_solution() + assert m.solver_metrics is None + + +# --------------------------------------------------------------------------- +# Solver-specific integration tests (parametrized over available solvers) +# --------------------------------------------------------------------------- + +# Solvers that have a tested _extract_metrics override providing solve_time etc. +_solvers_with_metrics = {"gurobi", "highs", "scip", "cplex", "xpress", "mosek"} + +direct_solvers = [ + s + for s in get_available_solvers_with_feature( + SolverFeature.DIRECT_API, available_solvers + ) + if s in _solvers_with_metrics +] +file_io_solvers = [ + s + for s in get_available_solvers_with_feature( + SolverFeature.READ_MODEL_FROM_FILE, available_solvers + ) + if s in _solvers_with_metrics +] +mip_solvers = [ + s + for s in get_available_solvers_with_feature( + SolverFeature.INTEGER_VARIABLES, available_solvers + ) + if s in _solvers_with_metrics +] + + +def _make_simple_lp() -> Model: + m = Model() + x = m.add_variables( + lower=xr.DataArray(np.zeros(3), dims=["i"]), + upper=xr.DataArray(np.ones(3), dims=["i"]), + name="x", + ) + m.add_constraints(x.sum() >= 1, name="con") + m.add_objective(x.sum()) + return m + + +def _make_simple_mip() -> Model: + m = Model() + x = m.add_variables(coords=[np.arange(3)], name="x", binary=True) + m.add_constraints(x.sum() >= 1, name="con") + m.add_objective(x.sum()) + return m + + +@pytest.mark.parametrize("solver", direct_solvers) +def test_solver_metrics_direct(solver: str) -> None: + m = _make_simple_lp() + m.solve(solver_name=solver, io_api="direct") + metrics = m.solver_metrics + assert metrics is not None + assert metrics.solver_name == solver + assert metrics.objective_value is not None + assert metrics.objective_value == pytest.approx(1.0) + assert metrics.solve_time is not None + assert metrics.solve_time >= 0 + + +@pytest.mark.parametrize("solver", file_io_solvers) +def test_solver_metrics_file_io(solver: str) -> None: + m = _make_simple_lp() + m.solve(solver_name=solver, io_api="lp") + metrics = m.solver_metrics + assert metrics is not None + assert metrics.solver_name == solver + assert metrics.objective_value is not None + assert metrics.objective_value == pytest.approx(1.0) + assert metrics.solve_time is not None + assert metrics.solve_time >= 0 + + +@pytest.mark.parametrize("solver", mip_solvers) +def test_solver_metrics_mip(solver: str) -> None: + """Solve a MIP and verify mip_gap and dual_bound are populated.""" + m = _make_simple_mip() + if solver in direct_solvers: + m.solve(solver_name=solver, io_api="direct") + else: + m.solve(solver_name=solver, io_api="lp") + metrics = m.solver_metrics + assert metrics is not None + assert metrics.solver_name == solver + assert metrics.objective_value == pytest.approx(1.0) + assert metrics.solve_time is not None + assert metrics.solve_time >= 0 + assert metrics.mip_gap is not None + assert metrics.mip_gap >= 0 + assert metrics.dual_bound is not None + assert isinstance(metrics.dual_bound, float) + + +# Solvers that populate peak_memory in _extract_metrics +_solvers_with_peak_memory = {"gurobi", "xpress"} + +peak_memory_solvers = [s for s in available_solvers if s in _solvers_with_peak_memory] + + +@pytest.mark.parametrize("solver", peak_memory_solvers) +def test_solver_metrics_peak_memory(solver: str) -> None: # pragma: no cover + """Verify peak_memory is populated for solvers that support it.""" + m = _make_simple_lp() + if solver in direct_solvers: + m.solve(solver_name=solver, io_api="direct") + else: + m.solve(solver_name=solver, io_api="lp") + metrics = m.solver_metrics + assert metrics is not None + assert metrics.peak_memory is not None + assert metrics.peak_memory > 0