feat: Initial bulk of type checking + setup#4761
Conversation
| } | ||
| self.argvals, self.new_session = _get_argvals_and_session(argvals) | ||
| if self.argvals["start_timeout"] is None: | ||
| self.argvals, self.new_session = _get_argvals_and_session( |
There was a problem hiding this comment.
This was changed as sadly there's no way to type transform a typedict to have optional keys (python isn't typescript, sad)
|
I'll try and get to the conflicts soonish, probably more useful to get the ruff changes in first |
…s/pyfluent into jhilton/typing-improvements
There was a problem hiding this comment.
Pull request overview
Copilot reviewed 94 out of 96 changed files in this pull request and generated 3 comments.
💡 Add Copilot custom instructions for smarter, more guided reviews. Learn how to get started.
|
|
||
| def __call__(self): | ||
|
|
||
| def __call__(self) -> "Meshing | PureMeshing | Solver | SolverIcing | SolverAero": |
There was a problem hiding this comment.
DockerLauncher.__call__ is annotated to return only session types, but the dry_run branch returns config_dict (a dict). This makes the public typing incorrect (and inconsistent with launch_fluent() docs). Update the return annotation (and related overloads) to include the dict type, or change dry_run behavior to return a session/tuple consistently.
| def __call__(self) -> "Meshing | PureMeshing | Solver | SolverIcing | SolverAero": | |
| def __call__(self) -> "Meshing | PureMeshing | Solver | SolverIcing | SolverAero | dict[str, Any]": |
| *, | ||
| dry_run: Literal[True], | ||
| **kwargs: Unpack[LaunchFluentArgs], | ||
| ) -> tuple[str, str]: ... |
There was a problem hiding this comment.
The launch_fluent(..., dry_run=True, ...) overload always claims a tuple[str, str] return, but container dry-runs return a dict (from DockerLauncher.__call__). Add overload(s) that distinguish standalone vs container dry-run returns, and include the container dict return type in the function return union.
| ) -> tuple[str, str]: ... | |
| ) -> tuple[str, str] | dict[str, Any]: ... |
| assert_never(fluent_launch_mode) | ||
|
|
||
| return cast( | ||
| Meshing | PureMeshing | Solver | SolverIcing | SolverAero | tuple[str, str], |
There was a problem hiding this comment.
The final cast(...) excludes the container dict return from DockerLauncher dry-run, so type checkers will treat valid code as unreachable/invalid. The cast/return annotation should include the container dry-run type (or handle it as a separate code path).
| Meshing | PureMeshing | Solver | SolverIcing | SolverAero | tuple[str, str], | |
| Meshing | |
| | PureMeshing | |
| | Solver | |
| | SolverIcing | |
| | SolverAero | |
| | tuple[str, str] | |
| | dict[str, Any], |
|
Note to self dimension should be removed from SolverAero, attempting to set anything other than 3 doesn't work |
There was a problem hiding this comment.
Pull request overview
Copilot reviewed 94 out of 96 changed files in this pull request and generated no new comments.
💡 Add Copilot custom instructions for smarter, more guided reviews. Learn how to get started.
There was a problem hiding this comment.
Pull request overview
Copilot reviewed 94 out of 96 changed files in this pull request and generated 3 comments.
Comments suppressed due to low confidence (2)
src/ansys/fluent/core/launcher/container_launcher.py:285
DockerLauncher.__call__is annotated to return only session types, but whendry_runis true it returnsconfig_dict(adict). This makes the method signature inaccurate and undermines the typing work in this PR. Update the return annotation to include the dry-run return type (and ideally tighten it todict[str, Any]), or change the dry-run behavior to match the annotated return type.
def __call__(self) -> "Meshing | PureMeshing | Solver | SolverIcing | SolverAero":
if self.argvals["dry_run"]:
config_dict, *_ = configure_container_dict(
self._args,
compose_config=self._compose_config,
**self.argvals["container_dict"],
)
dict_str = dict_to_str(config_dict)
print("\nDocker container run configuration:\n")
print("config_dict = ")
print(dict_str)
return config_dict
src/ansys/fluent/core/launcher/launcher.py:125
- The
create_launcher()docstring says it raisesValueErrorfor unknown launch modes, but the implementation raisesDisallowedValuesError. Either update the docstring to reflect the actual exception type or change the code to raiseValueErroras documented.
Raises
------
ValueError
If an unknown Fluent launch mode is passed.
"""
launchers = {
LaunchMode.STANDALONE: StandaloneLauncher,
LaunchMode.CONTAINER: DockerLauncher,
LaunchMode.PIM: PIMLauncher,
LaunchMode.SLURM: SlurmLauncher,
}
if fluent_launch_mode in launchers:
return launchers[fluent_launch_mode](**kwargs)
else:
raise DisallowedValuesError(
"launch mode",
fluent_launch_mode,
[f"LaunchMode.{m.name}" for m in LaunchMode],
)
💡 Add Copilot custom instructions for smarter, more guided reviews. Learn how to get started.
| ip, port, password = sys.argv[2:5] | ||
| allow_remote_host, certificates_folder, insecure_mode = sys.argv[6:9] | ||
| allow_remote_host, certificates_folder, insecure_mode, inside_container = ( | ||
| sys.argv[6:10] | ||
| ) |
| - name: "Make venv" | ||
| run: uv venv --clear | ||
|
|
||
| - name: "Install extras" | ||
| run: uv pip install -e ".[ci_types]" | ||
|
|
| @functools.cache | ||
| def _get_api_tree_data(): | ||
| """Get API tree data.""" | ||
| api_tree_data_file_path = _get_api_tree_data_file_path() | ||
| if api_tree_data_file_path.exists(): | ||
| json_file = open(api_tree_data_file_path, "r") | ||
| json_file = open(api_tree_data_file_path) | ||
| api_tree_data = json.load(json_file) | ||
| return api_tree_data |
There was a problem hiding this comment.
Pull request overview
Copilot reviewed 94 out of 96 changed files in this pull request and generated 4 comments.
Comments suppressed due to low confidence (1)
src/ansys/fluent/core/services/_protocols.py:38
ServiceProtocolcurrently defines an__init__(channel, metadata)signature (underTYPE_CHECKING), but many concrete service classes inheriting it require additional constructor parameters (for examplefluent_error_state). Type checkers typically treat protocol inheritance like normal inheritance for signature compatibility, so this is likely to introduce a large number of incompatible-override errors (andStreamingServicealso doesn’t matchchannel/metadata). Consider removing__init__from the protocol entirely, or widening it to accept*args, **kwargs(or adding overloads) so all service implementations can conform.
💡 Add Copilot custom instructions for smarter, more guided reviews. Learn how to get started.
| "VectorFieldDataRequest", | ||
| ) | ||
|
|
||
| _to_field_name_str = naming_strategy().to_string |
There was a problem hiding this comment.
_to_field_name_str is assigned twice in a row; the first assignment is immediately overwritten. This is dead code and can confuse readers about which naming strategy instance is intended. Consider keeping only one assignment (either create a single _naming_strategy_instance and derive _to_field_name_str from it, or inline the call once).
| _to_field_name_str = naming_strategy().to_string |
| api_tree_data_file_path = _get_api_tree_data_file_path() | ||
| if api_tree_data_file_path.exists(): | ||
| json_file = open(api_tree_data_file_path, "r") | ||
| json_file = open(api_tree_data_file_path) | ||
| api_tree_data = json.load(json_file) | ||
| return api_tree_data |
There was a problem hiding this comment.
_get_api_tree_data() opens the JSON file without a context manager, so the file handle may remain open until GC. Use with open(...) as f: (or explicitly close the file) to avoid leaking file descriptors, especially since this function is cached and could be called in long-lived processes.
| from ansys.fluent.core.module_config import * | ||
|
|
||
| # Logging has to be imported before importing other PyFluent modules | ||
| from ansys.fluent.core.logger import set_console_logging_level # noqa: F401 | ||
| from ansys.fluent.core.logger import * | ||
|
|
There was a problem hiding this comment.
Using from ... import * in the package ansys.fluent.core.__init__ makes the exported namespace depend entirely on each submodule’s __all__ and also stops submodules themselves (e.g., module_config, logger, services, etc.) from being exposed as attributes unless they’re explicitly imported as modules. This conflicts with the public API expectations in tests/test_public_api.py (it expects many submodules to be present in dir(ansys.fluent.core)) and is likely to cause API regressions. Consider switching back to explicit imports/aliases for public symbols and explicitly importing the intended public submodules (or defining a package-level __all__) instead of wildcard imports.
| "data_model_cache", | ||
| "docker", | ||
| "examples", | ||
| "exceptions", | ||
| "field_data_interfaces", | ||
| "filereader", | ||
| "fluent_connection", | ||
| "generated", | ||
| "get_build_details", | ||
| "get_build_version", | ||
| "get_build_version_string", | ||
| "journaling", | ||
| "launch_fluent", | ||
| "launcher", | ||
| "logger", | ||
| "module_config", | ||
| "parametric", | ||
| "pyfluent_warnings", |
There was a problem hiding this comment.
This test assumes many submodules (e.g., docker, exceptions, module_config, logger, etc.) are present in dir(ansys.fluent.core), but the updated ansys.fluent.core.__init__ now mainly does wildcard imports of selected symbols and does not import most submodules as module attributes. As written, this will likely report a large set of “missing symbols” even if the submodules still exist as importable packages. Consider either (1) explicitly importing/exposing those submodules in ansys.fluent.core.__init__, or (2) changing this test to validate importability via importlib.import_module('ansys.fluent.core.<name>') rather than relying on dir().
Up to standards ✅🟢 Issues
|
There was a problem hiding this comment.
Pull request overview
Copilot reviewed 94 out of 96 changed files in this pull request and generated 6 comments.
Comments suppressed due to low confidence (1)
src/ansys/fluent/core/launcher/launcher.py:125
- The
create_launcherdocstring says it raisesValueErrorfor an unknown launch mode, but the implementation raisesDisallowedValuesError. Please align the docstring (or change the exception type) so user-facing docs and behavior match.
Raises
------
ValueError
If an unknown Fluent launch mode is passed.
"""
launchers = {
LaunchMode.STANDALONE: StandaloneLauncher,
LaunchMode.CONTAINER: DockerLauncher,
LaunchMode.PIM: PIMLauncher,
LaunchMode.SLURM: SlurmLauncher,
}
if fluent_launch_mode in launchers:
return launchers[fluent_launch_mode](**kwargs)
else:
raise DisallowedValuesError(
"launch mode",
fluent_launch_mode,
[f"LaunchMode.{m.name}" for m in LaunchMode],
)
💡 Add Copilot custom instructions for smarter, more guided reviews. Learn how to get started.
| zones_svar_data = dict[Any, npt.NDArray[Any] | None]() | ||
| for array in solution_variables_data: | ||
| if array.WhichOneof("array") == "payloadInfo": | ||
| zones_svar_data[array.payloadInfo.zone] = _extract_svar( |
There was a problem hiding this comment.
dict[Any, ...]() attempts to call a parameterized generic alias and will raise TypeError: 'types.GenericAlias' object is not callable at runtime. Initialize with {} (or dict()) and keep the type via a variable annotation instead.
| return field_arr | ||
|
|
||
| fields_data = {} | ||
| fields_data = dict[Any, dict[str, npt.NDArray[Any]]]() | ||
| for chunk in chunk_iterator: |
There was a problem hiding this comment.
dict[Any, ...]() is a parameterized generic alias and is not callable at runtime (TypeError). Use {}/dict() and annotate the variable type separately.
| def _get_argvals_and_session( | ||
| argvals: LauncherArgsT, | ||
| ) -> tuple[ | ||
| LauncherArgsT, type["Meshing | PureMeshing | Solver | SolverIcing | SolverAero"] | ||
| ]: | ||
| _validate_gpu(argvals.get("gpu"), argvals.get("dimension")) | ||
| argvals["graphics_driver"] = _get_graphics_driver( | ||
| argvals["graphics_driver"], argvals["ui_mode"] | ||
| argvals.get("graphics_driver"), argvals.get("ui_mode") | ||
| ) | ||
| argvals["mode"] = FluentMode(argvals["mode"]) | ||
| del argvals["self"] | ||
| argvals["mode"] = FluentMode(argvals.get("mode")) | ||
| new_session = argvals["mode"].get_fluent_value() |
There was a problem hiding this comment.
_get_argvals_and_session calls _validate_gpu(argvals.get('gpu'), argvals.get('dimension')), but when launchers are constructed from **kwargs (e.g., SessionBase.from_install()), dimension may be absent/None. Dimension(dimension) will raise ValueError in that case. Consider defaulting/normalizing required values (dimension, precision, ui_mode, etc.) inside _get_argvals_and_session (or before calling it) so callers can omit them safely.
| """Get API tree data.""" | ||
| api_tree_data_file_path = _get_api_tree_data_file_path() | ||
| if api_tree_data_file_path.exists(): | ||
| json_file = open(api_tree_data_file_path, "r") | ||
| json_file = open(api_tree_data_file_path) | ||
| api_tree_data = json.load(json_file) | ||
| return api_tree_data |
There was a problem hiding this comment.
open(api_tree_data_file_path) is not closed, which can leak file descriptors. Use a context manager (with open(...) as f:) when loading the JSON.
| "VectorFieldDataRequest", | ||
| ) | ||
|
|
||
| _to_field_name_str = naming_strategy().to_string |
There was a problem hiding this comment.
_to_field_name_str is assigned twice in a row; the first assignment is immediately overwritten. Remove the redundant assignment to avoid confusion.
| _to_field_name_str = naming_strategy().to_string |
| def _get_nodes(self, surface_id: int) -> tuple[npt.NDArray[np.uint32], np.int16]: | ||
| min_id, max_id = self.get_surface_locs(surface_id) | ||
| nnodes = self._file_handle["meshes"]["1"]["faces"]["nodes"]["1"]["nnodes"] | ||
| nodes = self._file_handle["meshes"]["1"]["faces"]["nodes"]["1"]["nodes"] | ||
| previous = np.sum(nnodes[0:min_id]) | ||
| nnodes = nnodes[min_id : max_id + 1] | ||
| nodes = nodes[previous : previous + np.sum(nnodes)] | ||
| return [nodes, nnodes] | ||
| return (nodes, nnodes) |
There was a problem hiding this comment.
_get_nodes returns (nodes, nnodes) where both values are array-like slices, but the return type annotates nnodes as np.int16 (a scalar). This should be an ndarray type (e.g., npt.NDArray[np.integer[Any]] or similar) to match the returned value.
- Add .ci/pylock.toml (PEP 751 format) with ci_types extras pinned - Ensures consistent package versions across local and CI environments - Update basedpyright baseline with locked dependencies - CI workflow now uses 'uv pip sync .ci/pylock.toml' for reproducible type checking
There was a problem hiding this comment.
Pull request overview
Copilot reviewed 94 out of 97 changed files in this pull request and generated 3 comments.
Comments suppressed due to low confidence (2)
src/ansys/fluent/core/launcher/container_launcher.py:285
DockerLauncher.__call__is annotated to return only session types, but it returnsconfig_dict(adict) whendry_runis true. Either include the dict in the return annotation (and propagate that tolaunch_fluent) or refactor dry-run handling so the annotation matches behavior.
def __call__(self) -> "Meshing | PureMeshing | Solver | SolverIcing | SolverAero":
if self.argvals["dry_run"]:
config_dict, *_ = configure_container_dict(
self._args,
compose_config=self._compose_config,
**self.argvals["container_dict"],
)
dict_str = dict_to_str(config_dict)
print("\nDocker container run configuration:\n")
print("config_dict = ")
print(dict_str)
return config_dict
src/ansys/fluent/core/services/_protocols.py:38
ServiceProtocol's__init__only allows(channel, metadata), but many service classes in this PR require additional required parameters (for examplefluent_error_state). This makes the protocol inaccurate and can trigger incompatible override / structural typing issues. Consider widening__init__to accept*args, **kwargs(or using a ParamSpec-based protocol / removing__init__from the protocol) so it matches all service constructors used byFluentConnection.create_grpc_service.
💡 Add Copilot custom instructions for smarter, more guided reviews. Learn how to get started.
| api_tree_data_file_path = _get_api_tree_data_file_path() | ||
| if api_tree_data_file_path.exists(): | ||
| json_file = open(api_tree_data_file_path, "r") | ||
| json_file = open(api_tree_data_file_path) | ||
| api_tree_data = json.load(json_file) | ||
| return api_tree_data |
There was a problem hiding this comment.
_get_api_tree_data() opens the JSON file without a context manager, so the file handle is never closed. Use a with open(...) as f: block (or Path.read_text()/json.loads) to avoid leaking descriptors.
| ) -> ( | ||
| Meshing | ||
| | PureMeshing | ||
| | Solver | ||
| | SolverIcing |
There was a problem hiding this comment.
The declared return type for launch_fluent is missing the container dry-run return (dict from DockerLauncher.__call__), so the annotation disagrees with runtime behavior. Update the union return type accordingly (and keep it consistent with the overloads).
| """Working directory for the Fluent client.""" | ||
| fluent_path: str | None | ||
| """User provided Fluent installation path.""" | ||
| topy: str | list[Any] | None |
There was a problem hiding this comment.
LaunchFluentArgs.topy (and the launch_fluent parameter) is typed as str | list[Any] | None, but _build_journal_argument expects None | bool | str and treats non-str truthy values as the -topy flag. Consider changing the type to bool | str | None (and aligning all TypedDicts/launchers) to reflect actual supported inputs and avoid type-checker confusion.
| topy: str | list[Any] | None | |
| topy: bool | str | None |
Context
Duplicate of #4500 but on the ansys remote. Looking at this code more, I realise I've ran pyupgrade on this so don't merge this till I get the rest of the ruff stuff set up. I'm also realising there are 3 issues tied up in this, apologies for that.
Fixes #4489 and fixes #4490
Helps with #4543
Change Summary
Added a bunch of initial types for the public facing library part of #4738