From 4283200c88efaa0c29700b5a6bfe917060ec311c Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Thu, 2 Apr 2026 22:07:51 +0200 Subject: [PATCH 01/51] Replace uid_map with direct parameter references in aliases --- docs/docs/tutorials/ed-17.py | 4 +- docs/docs/tutorials/ed-3.py | 8 +- docs/docs/tutorials/ed-5.py | 4 +- .../analysis/categories/aliases/default.py | 81 ++++++++++++++----- src/easydiffraction/core/singleton.py | 74 +++-------------- src/easydiffraction/core/variable.py | 21 +---- ..._powder-diffraction_constant-wavelength.py | 8 +- .../analysis/categories/test_aliases.py | 16 +++- .../easydiffraction/core/test_singletons.py | 12 ++- 9 files changed, 103 insertions(+), 125 deletions(-) diff --git a/docs/docs/tutorials/ed-17.py b/docs/docs/tutorials/ed-17.py index af06031f..6e2a50ee 100644 --- a/docs/docs/tutorials/ed-17.py +++ b/docs/docs/tutorials/ed-17.py @@ -250,11 +250,11 @@ # %% project.analysis.aliases.create( label='biso_Co1', - param_uid=structure.atom_sites['Co1'].b_iso.uid, + param=structure.atom_sites['Co1'].b_iso, ) project.analysis.aliases.create( label='biso_Co2', - param_uid=structure.atom_sites['Co2'].b_iso.uid, + param=structure.atom_sites['Co2'].b_iso, ) # %% [markdown] diff --git a/docs/docs/tutorials/ed-3.py b/docs/docs/tutorials/ed-3.py index 23b60d88..5404f847 100644 --- a/docs/docs/tutorials/ed-3.py +++ b/docs/docs/tutorials/ed-3.py @@ -567,11 +567,11 @@ # %% project.analysis.aliases.create( label='biso_La', - param_uid=project.structures['lbco'].atom_sites['La'].b_iso.uid, + param=project.structures['lbco'].atom_sites['La'].b_iso, ) project.analysis.aliases.create( label='biso_Ba', - param_uid=project.structures['lbco'].atom_sites['Ba'].b_iso.uid, + param=project.structures['lbco'].atom_sites['Ba'].b_iso, ) # %% [markdown] @@ -636,11 +636,11 @@ # %% project.analysis.aliases.create( label='occ_La', - param_uid=project.structures['lbco'].atom_sites['La'].occupancy.uid, + param=project.structures['lbco'].atom_sites['La'].occupancy, ) project.analysis.aliases.create( label='occ_Ba', - param_uid=project.structures['lbco'].atom_sites['Ba'].occupancy.uid, + param=project.structures['lbco'].atom_sites['Ba'].occupancy, ) # %% [markdown] diff --git a/docs/docs/tutorials/ed-5.py b/docs/docs/tutorials/ed-5.py index 74e1d887..c87a964e 100644 --- a/docs/docs/tutorials/ed-5.py +++ b/docs/docs/tutorials/ed-5.py @@ -255,11 +255,11 @@ # %% project.analysis.aliases.create( label='biso_Co1', - param_uid=project.structures['cosio'].atom_sites['Co1'].b_iso.uid, + param=project.structures['cosio'].atom_sites['Co1'].b_iso, ) project.analysis.aliases.create( label='biso_Co2', - param_uid=project.structures['cosio'].atom_sites['Co2'].b_iso.uid, + param=project.structures['cosio'].atom_sites['Co2'].b_iso, ) # %% [markdown] diff --git a/src/easydiffraction/analysis/categories/aliases/default.py b/src/easydiffraction/analysis/categories/aliases/default.py index 7b1e0df0..ecc4a1c3 100644 --- a/src/easydiffraction/analysis/categories/aliases/default.py +++ b/src/easydiffraction/analysis/categories/aliases/default.py @@ -1,10 +1,12 @@ # SPDX-FileCopyrightText: 2026 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause """ -Alias category for mapping friendly names to parameter UIDs. +Alias category for mapping friendly names to parameters. Defines a small record type used by analysis configuration to refer to -parameters via readable labels instead of raw unique identifiers. +parameters via readable labels instead of opaque identifiers. At runtime +each alias holds a direct object reference to the parameter; for CIF +serialization the parameter's ``unique_name`` is stored. """ from __future__ import annotations @@ -23,8 +25,9 @@ class Alias(CategoryItem): """ Single alias entry. - Maps a human-readable ``label`` to a concrete ``param_uid`` used by - the engine. + Maps a human-readable ``label`` to a parameter object. The + ``param_unique_name`` descriptor stores the parameter's + ``unique_name`` for CIF serialization. """ def __init__(self) -> None: @@ -32,23 +35,27 @@ def __init__(self) -> None: self._label = StringDescriptor( name='label', - description='...', # TODO + description='Human-readable alias for a parameter.', value_spec=AttributeSpec( default='_', # TODO, Maybe None? validator=RegexValidator(pattern=r'^[A-Za-z_][A-Za-z0-9_]*$'), ), cif_handler=CifHandler(names=['_alias.label']), ) - self._param_uid = StringDescriptor( - name='param_uid', - description='...', # TODO + self._param_unique_name = StringDescriptor( + name='param_unique_name', + description='Unique name of the referenced parameter.', value_spec=AttributeSpec( default='_', - validator=RegexValidator(pattern=r'^[A-Za-z_][A-Za-z0-9_]*$'), + validator=RegexValidator(pattern=r'^[A-Za-z_][A-Za-z0-9_.]*$'), ), - cif_handler=CifHandler(names=['_alias.param_uid']), + cif_handler=CifHandler(names=['_alias.param_unique_name']), ) + # Direct reference to the Parameter object (runtime only). + # Stored via object.__setattr__ to avoid parent-chain mutation. + object.__setattr__(self, '_param_ref', None) + self._identity.category_code = 'alias' self._identity.category_entry_name = lambda: str(self.label.value) @@ -59,7 +66,7 @@ def __init__(self) -> None: @property def label(self) -> StringDescriptor: """ - ... + Human-readable alias label (e.g. ``'biso_La'``). Reading this property returns the underlying ``StringDescriptor`` object. Assigning to it updates the @@ -72,19 +79,38 @@ def label(self, value: str) -> None: self._label.value = value @property - def param_uid(self) -> StringDescriptor: + def param(self) -> object | None: + """ + The referenced parameter object, or None before resolution. + """ + return self._param_ref + + @property + def param_unique_name(self) -> StringDescriptor: """ - ... + Unique name of the referenced parameter (for CIF). Reading this property returns the underlying - ``StringDescriptor`` object. Assigning to it updates the - parameter value. + ``StringDescriptor`` object. """ - return self._param_uid + return self._param_unique_name - @param_uid.setter - def param_uid(self, value: str) -> None: - self._param_uid.value = value + def _set_param(self, param: object) -> None: + """ + Store a direct reference to the parameter. + + Also updates ``param_unique_name`` from the parameter's + ``unique_name`` for CIF round-tripping. + """ + object.__setattr__(self, '_param_ref', param) + self._param_unique_name.value = param.unique_name + + @property + def parameters(self) -> list: + """ + Descriptors owned by this alias (excludes the param reference). + """ + return [self._label, self._param_unique_name] @AliasesFactory.register @@ -99,3 +125,20 @@ class Aliases(CategoryCollection): def __init__(self) -> None: """Create an empty collection of aliases.""" super().__init__(item_type=Alias) + + def create(self, *, label: str, param: object) -> None: + """ + Create a new alias mapping a label to a parameter. + + Parameters + ---------- + label : str + Human-readable alias name (e.g. ``'biso_La'``). + param : object + The parameter object to reference. + """ + item = Alias() + item.label = label + item._set_param(param) + self.add(item) + diff --git a/src/easydiffraction/core/singleton.py b/src/easydiffraction/core/singleton.py index 9d8a1d89..a4ac6b28 100644 --- a/src/easydiffraction/core/singleton.py +++ b/src/easydiffraction/core/singleton.py @@ -3,12 +3,9 @@ from typing import Any from typing import Self -from typing import TypeVar from asteval import Interpreter -T = TypeVar('T', bound='SingletonBase') - # ====================================================================== @@ -33,54 +30,6 @@ def get(cls) -> Self: # ====================================================================== -class UidMapHandler(SingletonBase): - """Global handler to manage UID-to-Parameter object mapping.""" - - def __init__(self) -> None: - # Internal map: uid (str) → Parameter instance - self._uid_map: dict[str, Any] = {} - - def get_uid_map(self) -> dict[str, Any]: - """Return the current UID-to-Parameter map.""" - return self._uid_map - - def add_to_uid_map(self, parameter: object) -> None: - """ - Add a single Parameter or Descriptor object to the UID map. - - Only Descriptor or Parameter instances are allowed (not - Components or others). - """ - from easydiffraction.core.variable import GenericDescriptorBase # noqa: PLC0415 - - if not isinstance(parameter, GenericDescriptorBase): - msg = ( - f'Cannot add object of type {type(parameter).__name__} to UID map. ' - 'Only Descriptor or Parameter instances are allowed.' - ) - raise TypeError(msg) - self._uid_map[parameter.uid] = parameter - - def replace_uid(self, old_uid: str, new_uid: str) -> None: - """ - Replace an existing UID key in the UID map with a new UID. - - Moves the associated parameter from old_uid to new_uid. Raises a - KeyError if the old_uid doesn't exist. - """ - if old_uid not in self._uid_map: - # Only raise if old_uid is not None and not empty - print('DEBUG: replace_uid failed', old_uid, 'current map:', list(self._uid_map.keys())) - msg = f"UID '{old_uid}' not found in the UID map." - raise KeyError(msg) - self._uid_map[new_uid] = self._uid_map.pop(old_uid) - - # TODO: Implement removing from the UID map - - -# ====================================================================== - - # TODO: Implement changing atrr '.constrained' back to False # when removing constraints class ConstraintsHandler(SingletonBase): @@ -94,7 +43,7 @@ class ConstraintsHandler(SingletonBase): def __init__(self) -> None: # Maps alias names - # (like 'biso_La') → ConstraintAlias(param=Parameter) + # (like 'biso_La') → Alias(param=Parameter) self._alias_to_param: dict[str, Any] = {} # Stores raw user-defined constraints indexed by lhs_alias @@ -106,7 +55,7 @@ def __init__(self) -> None: def set_aliases(self, aliases: object) -> None: """ - Set the alias map (name → parameter wrapper). + Set the alias map (name → alias wrapper). Called when user registers parameter aliases like: alias='biso_La', param=model.atom_sites['La'].b_iso @@ -137,25 +86,21 @@ def _parse_constraints(self) -> None: def apply(self) -> None: """ - Evaluate constraints and applies them to dependent parameters. + Evaluate constraints and apply them to dependent parameters. - For each constraint: - Evaluate RHS using current values of - aliases - Locate the dependent parameter by alias → uid → param + For each constraint: + - Evaluate RHS using current values of aliased parameters + - Locate the dependent parameter via direct alias reference - Update its value and mark it as constrained """ if not self._parsed_constraints: return # Nothing to apply - # Retrieve global UID → Parameter object map - uid_map = UidMapHandler.get().get_uid_map() - # Prepare a flat dict of {alias: value} for use in expressions param_values = {} for alias, alias_obj in self._alias_to_param.items(): - uid = alias_obj.param_uid.value - param = uid_map[uid] - value = param.value - param_values[alias] = value + param = alias_obj.param + param_values[alias] = param.value # Create an asteval interpreter for safe expression evaluation ae = Interpreter() @@ -167,8 +112,7 @@ def apply(self) -> None: rhs_value = ae(rhs_expr) # Get the actual parameter object we want to update - dependent_uid = self._alias_to_param[lhs_alias].param_uid.value - param = uid_map[dependent_uid] + param = self._alias_to_param[lhs_alias].param # Update its value and mark it as constrained param._set_value_constrained(rhs_value) diff --git a/src/easydiffraction/core/variable.py b/src/easydiffraction/core/variable.py index 2acce18d..9d18f455 100644 --- a/src/easydiffraction/core/variable.py +++ b/src/easydiffraction/core/variable.py @@ -3,15 +3,12 @@ from __future__ import annotations -import secrets -import string from typing import TYPE_CHECKING import numpy as np from easydiffraction.core.diagnostic import Diagnostics from easydiffraction.core.guard import GuardedBase -from easydiffraction.core.singleton import UidMapHandler from easydiffraction.core.validation import AttributeSpec from easydiffraction.core.validation import DataTypes from easydiffraction.core.validation import RangeValidator @@ -287,9 +284,6 @@ def __init__( self._constrained_spec = self._BOOL_SPEC_TEMPLATE self._constrained = self._constrained_spec.default - self._uid: str = self._generate_uid() - UidMapHandler.get().add_to_uid_map(self) - def __str__(self) -> str: """Return string representation with uncertainty and free.""" s = GenericDescriptorBase.__str__(self) @@ -301,21 +295,10 @@ def __str__(self) -> str: s += f' (free={self.free})' return f'<{s}>' - @staticmethod - def _generate_uid(length: int = 16) -> str: - letters = string.ascii_lowercase - return ''.join(secrets.choice(letters) for _ in range(length)) - - @property - def uid(self) -> str: - """Stable random identifier for this descriptor.""" - return self._uid - @property def _minimizer_uid(self) -> str: - """Variant of uid that is safe for minimizer engines.""" - # return self.unique_name.replace('.', '__') - return self.uid + """Variant of unique_name that is safe for minimizer engines.""" + return self.unique_name.replace('.', '__') @property def constrained(self) -> bool: diff --git a/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py b/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py index 7a98c15d..ee3a984b 100644 --- a/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py +++ b/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py @@ -277,19 +277,19 @@ def test_single_fit_neutron_pd_cwl_lbco_with_constraints() -> None: # Set aliases for parameters project.analysis.aliases.create( label='biso_La', - param_uid=atom_sites['La'].b_iso.uid, + param=atom_sites['La'].b_iso, ) project.analysis.aliases.create( label='biso_Ba', - param_uid=atom_sites['Ba'].b_iso.uid, + param=atom_sites['Ba'].b_iso, ) project.analysis.aliases.create( label='occ_La', - param_uid=atom_sites['La'].occupancy.uid, + param=atom_sites['La'].occupancy, ) project.analysis.aliases.create( label='occ_Ba', - param_uid=atom_sites['Ba'].occupancy.uid, + param=atom_sites['Ba'].occupancy, ) # Set constraints diff --git a/tests/unit/easydiffraction/analysis/categories/test_aliases.py b/tests/unit/easydiffraction/analysis/categories/test_aliases.py index 2545218a..5efd265c 100644 --- a/tests/unit/easydiffraction/analysis/categories/test_aliases.py +++ b/tests/unit/easydiffraction/analysis/categories/test_aliases.py @@ -3,15 +3,25 @@ from easydiffraction.analysis.categories.aliases import Alias from easydiffraction.analysis.categories.aliases import Aliases +from easydiffraction.core.validation import AttributeSpec +from easydiffraction.core.variable import Parameter +from easydiffraction.io.cif.handler import CifHandler def test_alias_creation_and_collection(): + p1 = Parameter( + name='b_iso', + value_spec=AttributeSpec(default=0.5), + cif_handler=CifHandler(names=['_atom_site.b_iso']), + ) a = Alias() a.label = 'x' - a.param_uid = 'p1' + a._set_param(p1) assert a.label.value == 'x' + assert a.param is p1 coll = Aliases() - coll.create(label='x', param_uid='p1') + coll.create(label='x', param=p1) # Collections index by entry name; check via names or direct indexing assert 'x' in coll.names - assert coll['x'].param_uid.value == 'p1' + assert coll['x'].param is p1 + assert coll['x'].param_unique_name.value == p1.unique_name diff --git a/tests/unit/easydiffraction/core/test_singletons.py b/tests/unit/easydiffraction/core/test_singletons.py index ba69f07a..a68f76d8 100644 --- a/tests/unit/easydiffraction/core/test_singletons.py +++ b/tests/unit/easydiffraction/core/test_singletons.py @@ -1,12 +1,10 @@ # SPDX-FileCopyrightText: 2025 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause -import pytest +from easydiffraction.core.singleton import ConstraintsHandler -def test_uid_map_handler_rejects_non_descriptor(): - from easydiffraction.core.singleton import UidMapHandler - - h = UidMapHandler.get() - with pytest.raises(TypeError): - h.add_to_uid_map(object()) +def test_constraints_handler_is_singleton(): + h1 = ConstraintsHandler.get() + h2 = ConstraintsHandler.get() + assert h1 is h2 From eb1202f31e72991e8079f8d88fe6ffdbafa36be3 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Thu, 2 Apr 2026 22:22:23 +0200 Subject: [PATCH 02/51] Auto-enable constraints on create, add enable/disable API --- docs/docs/tutorials/ed-17.py | 5 ---- docs/docs/tutorials/ed-3.py | 19 +------------ docs/docs/tutorials/ed-5.py | 5 ---- src/easydiffraction/analysis/analysis.py | 28 ++++++++----------- .../analysis/categories/aliases/default.py | 5 ++-- .../categories/constraints/default.py | 24 +++++++++++----- src/easydiffraction/core/variable.py | 13 ++++++--- ..._powder-diffraction_constant-wavelength.py | 3 -- 8 files changed, 40 insertions(+), 62 deletions(-) diff --git a/docs/docs/tutorials/ed-17.py b/docs/docs/tutorials/ed-17.py index 6e2a50ee..68be3ff0 100644 --- a/docs/docs/tutorials/ed-17.py +++ b/docs/docs/tutorials/ed-17.py @@ -265,11 +265,6 @@ expression='biso_Co2 = biso_Co1', ) -# %% [markdown] -# Apply constraints. - -# %% -project.analysis.apply_constraints() # %% [markdown] # #### Set Fit Mode diff --git a/docs/docs/tutorials/ed-3.py b/docs/docs/tutorials/ed-3.py index 5404f847..1a79d789 100644 --- a/docs/docs/tutorials/ed-3.py +++ b/docs/docs/tutorials/ed-3.py @@ -587,19 +587,7 @@ project.analysis.show_constraints() # %% [markdown] -# Show free parameters before applying constraints. - -# %% -project.analysis.show_free_params() - -# %% [markdown] -# Apply constraints. - -# %% -project.analysis.apply_constraints() - -# %% [markdown] -# Show free parameters after applying constraints. +# Show free parameters. # %% project.analysis.show_free_params() @@ -657,11 +645,6 @@ # %% project.analysis.show_constraints() -# %% [markdown] -# Apply constraints. - -# %% -project.analysis.apply_constraints() # %% [markdown] # Set structure parameters to be refined. diff --git a/docs/docs/tutorials/ed-5.py b/docs/docs/tutorials/ed-5.py index c87a964e..4e41a905 100644 --- a/docs/docs/tutorials/ed-5.py +++ b/docs/docs/tutorials/ed-5.py @@ -270,11 +270,6 @@ expression='biso_Co2 = biso_Co1', ) -# %% [markdown] -# Apply constraints. - -# %% -project.analysis.apply_constraints() # %% [markdown] # #### Run Fitting diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index c21db3e6..3a675381 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -563,16 +563,7 @@ def show_constraints(self) -> None: columns_alignment=['left'], columns_data=rows, ) - - def apply_constraints(self) -> None: - """Apply currently defined constraints to the project.""" - if not self.constraints._items: - log.warning('No constraints defined.') - return - - self.constraints_handler.set_aliases(self.aliases) - self.constraints_handler.set_constraints(self.constraints) - self.constraints_handler.apply() + console.print(f'Constraints enabled: {self.constraints.enabled}') def fit(self, verbosity: str | None = None) -> None: """ @@ -616,6 +607,11 @@ def fit(self, verbosity: str | None = None) -> None: log.warning('No experiments found in the project. Cannot run fit.') return + # Apply constraints before fitting so that constrained + # parameters are marked and excluded from the free parameter + # list built by the fitter. + self._update_categories() + # Run the fitting process mode = FitModeEnum(self._fit_mode.mode.value) if mode is FitModeEnum.JOINT: @@ -762,16 +758,14 @@ def _update_categories(self, called_by_minimizer: bool = False) -> None: called_by_minimizer : bool, default=False Whether this is called during fitting. """ + del called_by_minimizer + # Apply constraints to sync dependent parameters - if self.constraints._items: + if self.constraints.enabled and self.constraints._items: + self.constraints_handler.set_aliases(self.aliases) + self.constraints_handler.set_constraints(self.constraints) self.constraints_handler.apply() - # Update category-specific logic - # TODO: Need self.categories as in the case of datablock.py - for category in [self.aliases, self.constraints]: - if hasattr(category, '_update'): - category._update(called_by_minimizer=called_by_minimizer) - def as_cif(self) -> str: """ Serialize the analysis section to a CIF string. diff --git a/src/easydiffraction/analysis/categories/aliases/default.py b/src/easydiffraction/analysis/categories/aliases/default.py index ecc4a1c3..8aac2cdc 100644 --- a/src/easydiffraction/analysis/categories/aliases/default.py +++ b/src/easydiffraction/analysis/categories/aliases/default.py @@ -54,7 +54,7 @@ def __init__(self) -> None: # Direct reference to the Parameter object (runtime only). # Stored via object.__setattr__ to avoid parent-chain mutation. - object.__setattr__(self, '_param_ref', None) + object.__setattr__(self, '_param_ref', None) # noqa: PLC2801 self._identity.category_code = 'alias' self._identity.category_entry_name = lambda: str(self.label.value) @@ -102,7 +102,7 @@ def _set_param(self, param: object) -> None: Also updates ``param_unique_name`` from the parameter's ``unique_name`` for CIF round-tripping. """ - object.__setattr__(self, '_param_ref', param) + object.__setattr__(self, '_param_ref', param) # noqa: PLC2801 self._param_unique_name.value = param.unique_name @property @@ -141,4 +141,3 @@ def create(self, *, label: str, param: object) -> None: item.label = label item._set_param(param) self.add(item) - diff --git a/src/easydiffraction/analysis/categories/constraints/default.py b/src/easydiffraction/analysis/categories/constraints/default.py index 3bb1b77e..63a4264d 100644 --- a/src/easydiffraction/analysis/categories/constraints/default.py +++ b/src/easydiffraction/analysis/categories/constraints/default.py @@ -14,7 +14,6 @@ from easydiffraction.core.category import CategoryCollection from easydiffraction.core.category import CategoryItem from easydiffraction.core.metadata import TypeInfo -from easydiffraction.core.singleton import ConstraintsHandler from easydiffraction.core.validation import AttributeSpec from easydiffraction.core.validation import RegexValidator from easydiffraction.core.variable import StringDescriptor @@ -102,11 +101,27 @@ class Constraints(CategoryCollection): def __init__(self) -> None: """Create an empty constraints collection.""" super().__init__(item_type=Constraint) + self._enabled: bool = False + + @property + def enabled(self) -> bool: + """Whether constraints are currently active.""" + return self._enabled + + def enable(self) -> None: + """Activate constraints so they are applied during fitting.""" + self._enabled = True + + def disable(self) -> None: + """Deactivate constraints without deleting them.""" + self._enabled = False def create(self, *, expression: str) -> None: """ Create a constraint from an expression string. + Automatically enables constraints on the first call. + Parameters ---------- expression : str @@ -116,9 +131,4 @@ def create(self, *, expression: str) -> None: item = Constraint() item.expression = expression self.add(item) - - def _update(self, called_by_minimizer: bool = False) -> None: - del called_by_minimizer - - constraints = ConstraintsHandler.get() - constraints.apply() + self._enabled = True diff --git a/src/easydiffraction/core/variable.py b/src/easydiffraction/core/variable.py index 9d18f455..6d987fd1 100644 --- a/src/easydiffraction/core/variable.py +++ b/src/easydiffraction/core/variable.py @@ -309,12 +309,17 @@ def _set_value_constrained(self, v: object) -> None: """ Set the value from a constraint expression. - Validates against the spec, marks the parent datablock dirty, - and flags the parameter as constrained. Used exclusively by - ``ConstraintsHandler.apply()``. + Bypasses validation and marks the parent datablock dirty, like + ``_set_value_from_minimizer``, because constraints are applied + inside the minimizer loop where trial values may exceed + physical-range validators. Flags the parameter as constrained. + Used exclusively by ``ConstraintsHandler.apply()``. """ - self.value = v + self._value = v self._constrained = True + parent_datablock = self._datablock_item() + if parent_datablock is not None: + parent_datablock._need_categories_update = True @property def free(self) -> bool: diff --git a/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py b/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py index ee3a984b..2184fc07 100644 --- a/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py +++ b/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py @@ -296,9 +296,6 @@ def test_single_fit_neutron_pd_cwl_lbco_with_constraints() -> None: project.analysis.constraints.create(expression='biso_Ba = biso_La') project.analysis.constraints.create(expression='occ_Ba = 1 - occ_La') - # Apply constraints - project.analysis.apply_constraints() - # Perform fit project.analysis.fit() From 0a0385cef3dfa77c3f15452f2f6c1bab528f7f43 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 08:33:45 +0200 Subject: [PATCH 03/51] Implement Project.load() from CIF directory --- docs/architecture/architecture.md | 2 +- docs/architecture/issues_closed.md | 26 ++ docs/architecture/issues_open.md | 60 ++--- .../architecture/sequential_fitting_design.md | 222 ++++++++-------- src/easydiffraction/io/cif/serialize.py | 165 +++++++++++- src/easydiffraction/project/project.py | 122 ++++++++- .../integration/fitting/test_project_load.py | 238 ++++++++++++++++++ .../project/test_project_load.py | 142 +++++++++++ .../test_project_load_and_summary_wrap.py | 22 +- 9 files changed, 816 insertions(+), 183 deletions(-) create mode 100644 tests/integration/fitting/test_project_load.py create mode 100644 tests/unit/easydiffraction/project/test_project_load.py diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md index f4c6fc52..d9827c07 100644 --- a/docs/architecture/architecture.md +++ b/docs/architecture/architecture.md @@ -188,7 +188,7 @@ GuardedBase └── GenericDescriptorBase # name, value (validated via AttributeSpec), description ├── GenericStringDescriptor # _value_type = DataTypes.STRING └── GenericNumericDescriptor # _value_type = DataTypes.NUMERIC, + units - └── GenericParameter # + free, uncertainty, fit_min, fit_max, constrained, uid + └── GenericParameter # + free, uncertainty, fit_min, fit_max, constrained ``` CIF-bound concrete classes add a `CifHandler` for serialisation: diff --git a/docs/architecture/issues_closed.md b/docs/architecture/issues_closed.md index a67edcbe..d6e14219 100644 --- a/docs/architecture/issues_closed.md +++ b/docs/architecture/issues_closed.md @@ -4,6 +4,32 @@ Issues that have been fully resolved. Kept for historical reference. --- +## Replace UID Map with Direct References and Auto-Apply Constraints + +**Resolution:** eliminated `UidMapHandler` and random UID generation +from parameters entirely. Aliases now store a direct object reference to +the parameter (`Alias._param_ref`) instead of a random UID string. +`ConstraintsHandler.apply()` uses the direct reference — no map lookup. +For CIF serialisation, `Alias._param_unique_name` stores the parameter's +deterministic `unique_name`. `_minimizer_uid` now returns +`unique_name.replace('.', '__')` instead of a random string. + +Also added `enable()`/`disable()` on `Constraints` with auto-enable on +`create()`, replacing the manual `apply_constraints()` call. +`Analysis._update_categories()` now always syncs handler state from the +current aliases and constraints when `constraints.enabled` is `True`, +eliminating stale-state bugs (former issue #4). `_set_value_constrained` +bypasses validation like `_set_value_from_minimizer` since constraints +run inside the minimiser loop. `Analysis.fit()` calls +`_update_categories()` before collecting free parameters so that +constrained parameters are correctly excluded. + +API change: `aliases.create(label=..., param_uid=...uid)` → +`aliases.create(label=..., param=...)`. `apply_constraints()` removed; +`constraints.create()` auto-enables. + +--- + ## Dirty-Flag Guard Was Disabled **Resolution:** added `_set_value_from_minimizer()` on diff --git a/docs/architecture/issues_open.md b/docs/architecture/issues_open.md index 05ed175f..b721a2b2 100644 --- a/docs/architecture/issues_open.md +++ b/docs/architecture/issues_open.md @@ -83,31 +83,6 @@ exactly match `project.experiments.names`. --- -## 4. 🔴 Refresh Constraint State Before Automatic Updates and Fitting - -**Type:** Correctness - -`ConstraintsHandler` is only synchronised from `analysis.aliases` and -`analysis.constraints` when the user explicitly calls -`project.analysis.apply_constraints()`. The normal fit / serialisation -path calls `constraints_handler.apply()` directly, so newly added or -edited aliases and constraints can be ignored until that manual sync -step happens. - -**Why high:** this produces silently incorrect results. A user can -define constraints, run a fit, and believe they were applied when the -active singleton still contains stale state from a previous run or no -state at all. - -**Fix:** before any automatic constraint application, always refresh the -singleton from the current `Aliases` and `Constraints` collections. The -sync should happen inside `Analysis._update_categories()` or inside the -constraints category itself, not only in a user-facing helper method. - -**Depends on:** nothing. - ---- - ## 5. 🟡 Make `Analysis` a `DatablockItem` **Type:** Consistency @@ -339,21 +314,20 @@ re-derivable default. ## Summary -| # | Issue | Severity | Type | -| --- | ------------------------------------------ | -------- | ----------------------- | -| 1 | Implement `Project.load()` | 🔴 High | Completeness | -| 2 | Restore minimiser variants | 🟡 Med | Feature loss | -| 3 | Rebuild joint-fit weights | 🟡 Med | Fragility | -| 4 | Refresh constraint state before auto-apply | 🔴 High | Correctness | -| 5 | `Analysis` as `DatablockItem` | 🟡 Med | Consistency | -| 6 | Restrict `data_type` switching | 🔴 High | Correctness/Data safety | -| 7 | Eliminate dummy `Experiments` | 🟡 Med | Fragility | -| 8 | Explicit `create()` signatures | 🟡 Med | API safety | -| 9 | Future enum extensions | 🟢 Low | Design | -| 10 | Unify update orchestration | 🟢 Low | Maintainability | -| 11 | Document `_update` contract | 🟢 Low | Maintainability | -| 12 | CIF round-trip integration test | 🟢 Low | Quality | -| 13 | Suppress redundant dirty-flag sets | 🟢 Low | Performance | -| 14 | Finer-grained change tracking | 🟢 Low | Performance | -| 15 | Validate joint-fit weights | 🟡 Med | Correctness | -| 16 | Persist per-experiment `calculator_type` | 🟡 Med | Completeness | +| # | Issue | Severity | Type | +| --- | ---------------------------------------- | -------- | ----------------------- | +| 1 | Implement `Project.load()` | 🔴 High | Completeness | +| 2 | Restore minimiser variants | 🟡 Med | Feature loss | +| 3 | Rebuild joint-fit weights | 🟡 Med | Fragility | +| 5 | `Analysis` as `DatablockItem` | 🟡 Med | Consistency | +| 6 | Restrict `data_type` switching | 🔴 High | Correctness/Data safety | +| 7 | Eliminate dummy `Experiments` | 🟡 Med | Fragility | +| 8 | Explicit `create()` signatures | 🟡 Med | API safety | +| 9 | Future enum extensions | 🟢 Low | Design | +| 10 | Unify update orchestration | 🟢 Low | Maintainability | +| 11 | Document `_update` contract | 🟢 Low | Maintainability | +| 12 | CIF round-trip integration test | 🟢 Low | Quality | +| 13 | Suppress redundant dirty-flag sets | 🟢 Low | Performance | +| 14 | Finer-grained change tracking | 🟢 Low | Performance | +| 15 | Validate joint-fit weights | 🟡 Med | Correctness | +| 16 | Persist per-experiment `calculator_type` | 🟡 Med | Completeness | diff --git a/docs/architecture/sequential_fitting_design.md b/docs/architecture/sequential_fitting_design.md index fd10a104..16f01634 100644 --- a/docs/architecture/sequential_fitting_design.md +++ b/docs/architecture/sequential_fitting_design.md @@ -845,10 +845,11 @@ here. `analysis/` directory. All analysis artifacts (settings + results) live under one directory. See § 5.4 and § 9.6. -11. **Singletons (`UidMapHandler`, `ConstraintsHandler`)** → replace - with instance-owned state on `Project` and `Analysis`. Fixes - notebook rerun issues, simplifies worker isolation, resolves issue - #4. See § 9.5. +11. **Singletons (`UidMapHandler`, `ConstraintsHandler`)** → + `UidMapHandler` eliminated (aliases use direct references + + `unique_name`). `ConstraintsHandler` stays singleton but is now + always synced before use. Fixes notebook rerun issues, resolves + issue #4. See § 9.5. --- @@ -857,19 +858,16 @@ here. These changes are needed before implementing `fit_sequential()` itself. Each is a separate, atomic change. -### 9.1 Switch alias `param_uid` to `param_unique_name` +### 9.1 Switch alias `param_uid` to `param_unique_name` ✅ -The `Alias` category currently stores `param_uid` (random UID). Change -to `param_unique_name` (deterministic `unique_name`). Update: - -- `Alias._param_uid` → `Alias._param_unique_name` -- `CifHandler(names=['_alias.param_uid'])` → - `CifHandler(names=['_alias.param_unique_name'])` -- `ConstraintsHandler` to resolve via `unique_name` lookup instead of - UID lookup. -- `UidMapHandler` — may no longer be needed for constraint resolution - (but still used for other purposes). -- Tutorial `ed-17.py` and any tests that create aliases. +**Done.** Went further than planned: eliminated `UidMapHandler` and +random UIDs entirely. Aliases now store a direct object reference to the +parameter (`Alias._param_ref`, runtime) plus `Alias._param_unique_name` +(`StringDescriptor`, CIF serialisation with tag +`_alias.param_unique_name`). `ConstraintsHandler.apply()` uses the +direct reference — no map lookup needed. `_minimizer_uid` returns +`unique_name.replace('.', '__')` instead of a random string. All +tutorials, tests, and call sites updated. ### 9.2 Fix `category_collection_to_cif` truncation @@ -901,7 +899,7 @@ Currently extracts to a temp dir. Add optional `destination` parameter to extract to a user-specified directory, enabling a clean two-step workflow (extract → fit_sequential). -### 9.5 Replace singletons with instance-owned state +### 9.5 Replace singletons with instance-owned state (partially done) #### Problem @@ -927,53 +925,34 @@ workflow (extract → fit_sequential). the same session (e.g. to compare fits), their constraints and UID maps collide in the shared singleton. -#### Proposed fix - -Move the state owned by singletons into `Analysis` (for constraints) and -`Project` (for the UID map): - -| Current singleton | New owner | Lifetime | -| -------------------- | ------------------------------ | ----------------------- | -| `ConstraintsHandler` | `Analysis._constraints_engine` | Per-`Analysis` instance | -| `UidMapHandler` | `Project._uid_map` | Per-`Project` instance | - -The objects are the same classes, just no longer singletons — they are -instantiated in `__init__` and passed explicitly to the components that -need them (e.g. `Parameter.__init__` receives a `uid_map` reference from -its owning project, `ConstraintsHandler` is accessed via -`self.project.analysis._constraints_engine`). - -#### Impact on sequential fitting +#### Current status -- **Simplifies workers:** each worker's `Project()` naturally creates - its own `_uid_map` and `_constraints_engine`. No singleton isolation - concern at all. -- **Simplifies crash recovery and notebook reruns:** creating a new - `Project` starts with a blank slate, no stale state leaks. -- **No impact on the `fit_sequential` API** — the change is purely - internal. +**`UidMapHandler`: eliminated entirely.** Random UIDs and the global +UID-to-Parameter map have been removed. Aliases store direct object +references at runtime and deterministic `unique_name` strings for CIF +serialisation. This fully resolves problems 1–3 for the UID map. -#### Scope and sequencing +**`ConstraintsHandler`: stale-state bug fixed, still a singleton.** +`Analysis._update_categories()` now always syncs the handler from the +current `aliases` and `constraints` before calling `apply()`. This +resolves problem 1 (notebook reruns) and problem 2 (worker isolation is +natural with `spawn`). Problem 3 (multiple projects) remains theoretical +— if multi-project support becomes a real need, moving +`ConstraintsHandler` to instance scope is a standalone follow-up. -This is a self-contained refactor that can be done independently of -sequential fitting. It improves correctness for existing workflows -(notebook reruns, issue #4) and simplifies the sequential fitting -implementation. It is listed as a prerequisite because it eliminates a -class of bugs that would otherwise need workaround code in the worker. +#### Remaining work (optional) -However, if the refactor proves too large for the initial sequential -fitting work, the `spawn`-based multiprocessing provides natural -isolation and the singletons can be addressed in a follow-up. The -sequential fitting design does **not** depend on this change — it works -either way. +Move `ConstraintsHandler` from singleton to per-`Analysis` instance. +This only matters for the multiple-projects edge case. The sequential +fitting design does **not** depend on this change. #### Relationship to issue #4 -Open issue #4 ("Refresh constraint state before auto-apply") is a -symptom of the singleton problem. If constraints are instance-owned, -there is no stale state to refresh — the constraint engine always -reflects the current `Analysis` instance's aliases and constraints. -Fixing the singleton issue resolves issue #4 as a side effect. +Issue #4 ("Refresh constraint state before auto-apply") is **fully +resolved.** `_update_categories()` syncs handler state on every call. +Constraints auto-enable on `create()` and are applied before fitting +starts. The manual `apply_constraints()` method has been removed. Fixing +the singleton issue resolves issue #4 as a side effect. ### 9.6 Move `analysis.cif` into `analysis/` directory @@ -1017,30 +996,28 @@ Fixing it now gives the worker a clean `Fitter.fit(structures, [experiment])` call without any collection ceremony. -#### PR 2 — Replace singletons with instance-owned state (issue #4 + § 9.5) +#### PR 2 — Replace UID map with direct references and auto-apply constraints (issue #4 + § 9.5) ✅ > **Title:** -> `Move ConstraintsHandler and UidMapHandler to instance scope` +> `Replace UID map with direct references and auto-apply constraints` > -> **Description:** Replace the `SingletonBase` pattern for -> `ConstraintsHandler` and `UidMapHandler` with per-project instances. -> `Project.__init__` creates `_uid_map`; `Analysis.__init__` creates -> `_constraints_engine`. Thread the references through to `Parameter` -> and constraint resolution. Remove `SingletonBase` class if no longer -> used. Update all call sites that use `.get()`. This also fixes issue -> #4 (stale constraint state) as a side effect — the constraint engine -> is always in sync with its owning `Analysis`. - -**Why second:** removes the global mutable state that makes notebook -reruns unreliable and multi-project sessions impossible. Sequential -fitting workers benefit from natural isolation (each `Project()` has its -own engine), but the main benefit is correctness for existing workflows. - -This is a sub-step breakdown if the PR proves too large: - -- **PR 2a:** `Move UidMapHandler to Project instance scope` -- **PR 2b:** `Move ConstraintsHandler to Analysis instance scope` -- **PR 2c:** `Remove SingletonBase if unused` +> **Description:** Eliminated `UidMapHandler` and random UID generation +> entirely. Aliases store direct parameter object references at runtime +> and deterministic `unique_name` strings for CIF. Added +> `enable()`/`disable()` on `Constraints` with auto-enable on +> `create()`, replacing the manual `apply_constraints()` call. +> `Analysis._update_categories()` always syncs handler state when +> constraints are enabled. Also fixes issue #4 (stale constraint state) +> and completes PR 4 (alias `param_unique_name`). + +**Why second:** removes the global UID map that made constraint +resolution opaque and fragile. The stale-state bug (issue #4) is fully +fixed. `ConstraintsHandler` remains a singleton but is now always in +sync — moving it to instance scope is an optional follow-up for the +multi-project edge case. + +This PR also absorbed PR 4 (§ 9.1) since switching from random UIDs to +`unique_name` was a natural part of the same change. #### PR 3 — Implement Project.load() (issue #1) @@ -1061,15 +1038,10 @@ to fix any serialisation gaps before they become worker bugs. Phase 3 ### Sequential-fitting prerequisite PRs -#### PR 4 — Switch alias param_uid to param_unique_name (§ 9.1) +#### PR 4 — Switch alias param_uid to param_unique_name (§ 9.1) ✅ -> **Title:** `Use unique_name instead of random UID in aliases` -> -> **Description:** Rename `Alias._param_uid` to -> `Alias._param_unique_name`. Update `CifHandler` names. Change -> `ConstraintsHandler` to resolve parameters via `unique_name` lookup -> instead of UID. Update `ed-17.py` tutorial and all tests that create -> aliases. +> Absorbed into PR 2. Aliases now use `param_unique_name` with direct +> object references. All tutorials and tests updated. #### PR 5 — Fix CIF collection truncation (§ 9.2) @@ -1186,24 +1158,23 @@ This is a sub-step breakdown if the PR proves too large: ``` PR 1 (issue #7: eliminate dummy Experiments) - └─► PR 2 (issue #4: singletons → instance-owned) + └─► PR 2 (issue #4: UID map + constraints) ✅ └─► PR 3 (issue #1: Project.load) - └─► PR 4 (alias unique_name) - └─► PR 5 (CIF truncation) - └─► PR 6 (CIF round-trip test) - ├─► PR 7 (analysis.cif → analysis/) - │ └─► PR 9 (streaming sequential fit) - │ ├─► PR 10 (plot from CSV) - │ │ └─► PR 13 (CSV for existing fit) - │ └─► PR 11 (parallel fitting) - │ └─► PR 14 (optional: parallel fit()) - └─► PR 8 (zip destination) - └─► PR 12 (dataset replay) + └─► PR 5 (CIF truncation) + └─► PR 6 (CIF round-trip test) + ├─► PR 7 (analysis.cif → analysis/) + │ └─► PR 9 (streaming sequential fit) + │ ├─► PR 10 (plot from CSV) + │ │ └─► PR 13 (CSV for existing fit) + │ └─► PR 11 (parallel fitting) + │ └─► PR 14 (optional: parallel fit()) + └─► PR 8 (zip destination) + └─► PR 12 (dataset replay) ``` -Note: PRs 4–8 are largely independent of each other and can be -parallelised or reordered as long as PRs 1–3 are done first and PRs 4–6 -are done before PR 9. +Note: PR 4 was absorbed into PR 2. PRs 5–8 are largely independent of +each other and can be parallelised or reordered as long as PRs 1–3 are +done first and PRs 5–6 are done before PR 9. --- @@ -1228,31 +1199,38 @@ are all stdlib. - **Issue #7 (dummy Experiments wrapper):** resolved in PR 1. The worker uses the clean `Fitter.fit(structures, [experiment])` API. -- **Issue #4 (constraint refresh) + § 9.5 (singletons):** resolved in - PR 2. Instance-owned constraint engine eliminates stale state. +- **Issue #4 (constraint refresh) + § 9.1 (alias unique_name) + § 9.5 + (singletons):** resolved in PR 2. `UidMapHandler` eliminated; aliases + use direct object references and deterministic `unique_name` for CIF; + `_update_categories()` always syncs handler state; constraints + auto-enable on `create()`. `ConstraintsHandler` remains a singleton + but is always in sync — multi-project isolation is an optional + follow-up. - **Issue #1 (Project.load):** resolved in PR 3. CIF round-trip reliability is proven before workers depend on it. Dataset replay - (PR 12) uses `load()` directly. + (PR 12) uses `load()` directly. Note: `Project.load()` must now + resolve `_alias.param_unique_name` strings back to `Parameter` objects + by building a temporary `unique_name → Parameter` map. --- ## 12. Summary -| Aspect | Decision | -| ------------------- | --------------------------------------------------------------------- | -| Parallelism backend | `concurrent.futures.ProcessPoolExecutor` with `spawn` | -| Worker isolation | Each worker creates a fresh `Project` — no shared state | -| Data source | `data_dir` argument; ZIP → extract first | -| Data flow | Template CIF + data path → worker → result dict → CSV | -| Parameter IDs | `unique_name` (deterministic), not `uid` (random) | -| Parameter seeding | Last successful result in chunk → next chunk | -| CSV location | `project_dir/analysis/results.csv` (deterministic) | -| CSV contents | Fit metrics + diffrn metadata + all free param values/uncert | -| Metadata extraction | User-provided `extract_diffrn` callback, not hidden in lib | -| Crash recovery | Read existing CSV, skip fitted files, resume | -| Plotting | Unified `plot_param_series()` always reads from CSV | -| Configuration | `max_workers` + `data_dir` on `fit_sequential()` | -| Project layout | `analysis.cif` moves into `analysis/` directory | -| Singletons | Replace with instance-owned state (recommended prerequisite) | -| New dependencies | None (stdlib only) | -| First step | PRs 1–3 (foundation issues), then PRs 4–8 (prerequisites), then PR 9+ | +| Aspect | Decision | +| ------------------- | ---------------------------------------------------------------------------------- | +| Parallelism backend | `concurrent.futures.ProcessPoolExecutor` with `spawn` | +| Worker isolation | Each worker creates a fresh `Project` — no shared state | +| Data source | `data_dir` argument; ZIP → extract first | +| Data flow | Template CIF + data path → worker → result dict → CSV | +| Parameter IDs | `unique_name` (deterministic), not `uid` (random) | +| Parameter seeding | Last successful result in chunk → next chunk | +| CSV location | `project_dir/analysis/results.csv` (deterministic) | +| CSV contents | Fit metrics + diffrn metadata + all free param values/uncert | +| Metadata extraction | User-provided `extract_diffrn` callback, not hidden in lib | +| Crash recovery | Read existing CSV, skip fitted files, resume | +| Plotting | Unified `plot_param_series()` always reads from CSV | +| Configuration | `max_workers` + `data_dir` on `fit_sequential()` | +| Project layout | `analysis.cif` moves into `analysis/` directory | +| Singletons | `UidMapHandler` eliminated; `ConstraintsHandler` stays singleton but always synced | +| New dependencies | None (stdlib only) | +| First step | PRs 1–3 (foundation issues), then PRs 4–8 (prerequisites), then PR 9+ | diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index 42a215eb..89655c64 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -35,9 +35,15 @@ def format_value(value: object) -> str: # Converting + # None → CIF unknown marker + if value is None: + value = '?' # Convert ints to floats - if isinstance(value, int): + elif isinstance(value, int): value = float(value) + # Empty strings → CIF unknown marker + elif isinstance(value, str) and not value.strip(): + value = '?' # Strings with whitespace are quoted elif isinstance(value, str) and (' ' in value or '\t' in value): value = f'"{value}"' @@ -83,12 +89,22 @@ def category_item_to_cif(item: object) -> str: def category_collection_to_cif( collection: object, - max_display: int | None = 20, + max_display: int | None = None, ) -> str: """ Render a CategoryCollection-like object to CIF text. - Uses first item to build loop header, then emits rows for each item. + Uses first item to build loop header, then emits rows for each + item. + + Parameters + ---------- + collection : object + A ``CategoryCollection``-like object. + max_display : int | None, default=None + When set to a positive integer, truncate the output to at most + this many rows (half from the start, half from the end) with an + ``...`` separator. ``None`` emits all rows. """ if not len(collection): return '' @@ -104,7 +120,7 @@ def category_collection_to_cif( # Rows # Limit number of displayed rows if requested - if len(collection) > max_display: + if max_display is not None and len(collection) > max_display: half_display = max_display // 2 for i in range(half_display): item = list(collection.values())[i] @@ -161,10 +177,12 @@ def project_info_to_cif(info: object) -> str: if len(info.description) > 60: description = f'\n;\n{info.description}\n;' - else: + elif info.description: description = f'{info.description}' if ' ' in description: description = f"'{description}'" + else: + description = '?' created = f"'{info._created.strftime('%d %b %Y %H:%M:%S')}'" last_modified = f"'{info._last_modified.strftime('%d %b %Y %H:%M:%S')}'" @@ -221,6 +239,135 @@ def summary_to_cif(_summary: object) -> str: return 'To be added...' +def _wrap_in_data_block(cif_text: str, block_name: str = '_') -> str: + """ + Wrap bare CIF key-value pairs in a ``data_`` block header. + + Parameters + ---------- + cif_text : str + CIF text without a ``data_`` header. + block_name : str, default='_' + Name for the CIF data block. + + Returns + ------- + str + CIF text with a ``data_`` header prepended. + """ + return f'data_{block_name}\n\n{cif_text}' + + +def project_info_from_cif(info: object, cif_text: str) -> None: + """ + Populate a ProjectInfo instance from CIF text. + + Reads ``_project.id``, ``_project.title``, and + ``_project.description`` from the given CIF string and sets them on + the *info* object. + + Parameters + ---------- + info : object + The ``ProjectInfo`` instance to populate. + cif_text : str + CIF text content of ``project.cif``. + """ + import gemmi # noqa: PLC0415 + + doc = gemmi.cif.read_string(_wrap_in_data_block(cif_text, 'project')) + block = doc.sole_block() + + _read_cif_string = _make_cif_string_reader(block) + + name = _read_cif_string('_project.id') + if name is not None: + info.name = name + + title = _read_cif_string('_project.title') + if title is not None: + info.title = title + + description = _read_cif_string('_project.description') + if description is not None: + info.description = description + + +def analysis_from_cif(analysis: object, cif_text: str) -> None: + """ + Populate an Analysis instance from CIF text. + + Reads the fitting engine, fit mode, aliases, constraints, and + joint-fit experiment weights from the given CIF string. + + Parameters + ---------- + analysis : object + The ``Analysis`` instance to populate. + cif_text : str + CIF text content of ``analysis.cif``. + """ + import gemmi # noqa: PLC0415 + + doc = gemmi.cif.read_string(_wrap_in_data_block(cif_text, 'analysis')) + block = doc.sole_block() + + _read_cif_string = _make_cif_string_reader(block) + + # Restore minimizer selection + engine = _read_cif_string('_analysis.fitting_engine') + if engine is not None: + from easydiffraction.analysis.fitting import Fitter # noqa: PLC0415 + + analysis.fitter = Fitter(engine) + + # Restore fit mode + analysis.fit_mode.from_cif(block) + + # Restore aliases (loop) + analysis.aliases.from_cif(block) + + # Restore constraints (loop) + analysis.constraints.from_cif(block) + if analysis.constraints._items: + analysis.constraints.enable() + + # Restore joint-fit experiment weights (loop) + analysis._joint_fit_experiments.from_cif(block) + + +def _make_cif_string_reader(block: gemmi.cif.Block) -> object: + """ + Return a helper that reads a single CIF tag as a stripped string. + + Parameters + ---------- + block : gemmi.cif.Block + Parsed CIF data block. + + Returns + ------- + object + A function ``(tag) -> str | None`` that returns the unquoted + value for *tag*, or ``None`` if not found. + """ + + def _read(tag: str) -> str | None: + vals = list(block.find_values(tag)) + if not vals: + return None + raw = vals[0] + # CIF unknown / inapplicable markers + if raw in ('?', '.'): + return None + # Strip surrounding quotes + if len(raw) >= 2 and raw[0] == raw[-1] and raw[0] in {"'", '"'}: + raw = raw[1:-1] + return raw + + return _read + + # TODO: Check the following methods: ###################### @@ -262,6 +409,10 @@ def param_from_cif( # If found, pick the one at the given index raw = found_values[idx] + # CIF unknown / inapplicable markers → keep default + if raw in ('?', '.'): + return + # If numeric, parse with uncertainty if present if self._value_type == DataTypes.NUMERIC: u = str_to_ufloat(raw) @@ -363,6 +514,10 @@ def _get_loop(block: object, category_item: object) -> object | None: # param_from_cif raw = array[row_idx][col_idx] + # CIF unknown / inapplicable markers → keep default + if raw in ('?', '.'): + break + # If numeric, parse with uncertainty if present if param._value_type == DataTypes.NUMERIC: u = str_to_ufloat(raw) diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index 42bb71a0..06e36d14 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -2,6 +2,8 @@ # SPDX-License-Identifier: BSD-3-Clause """Project facade to orchestrate models, experiments, and analysis.""" +from __future__ import annotations + import pathlib import tempfile @@ -32,6 +34,9 @@ class Project(GuardedBase): # ------------------------------------------------------------------ # Initialization # ------------------------------------------------------------------ + # Class-level sentinel: True while load() is constructing a project. + _loading: bool = False + def __init__( self, name: str = 'untitled_project', @@ -48,7 +53,7 @@ def __init__( self._analysis = Analysis(self) self._summary = Summary(self) self._saved = False - self._varname = varname() + self._varname = 'project' if type(self)._loading else varname() self._verbosity: VerbosityEnum = VerbosityEnum.FULL # ------------------------------------------------------------------ @@ -172,15 +177,118 @@ def verbosity(self, value: str) -> None: # Project File I/O # ------------------------------------------ - def load(self, dir_path: str) -> None: + @classmethod + def load(cls, dir_path: str) -> Project: """ - Load a project from a given directory. + Load a project from a saved directory. + + Reads ``project.cif``, ``structures/*.cif``, + ``experiments/*.cif``, and ``analysis.cif`` from *dir_path* and + reconstructs the full project state. + + Parameters + ---------- + dir_path : str + Path to the project directory previously created by + :meth:`save_as`. + + Returns + ------- + Project + A fully reconstructed project instance. - Loads project info, structures, experiments, etc. + Raises + ------ + FileNotFoundError + If *dir_path* does not exist. """ - # TODO: load project components from files inside dir_path - msg = 'Project.load() is not implemented yet.' - raise NotImplementedError(msg) + from easydiffraction.io.cif.serialize import analysis_from_cif # noqa: PLC0415 + from easydiffraction.io.cif.serialize import project_info_from_cif # noqa: PLC0415 + + project_path = pathlib.Path(dir_path) + if not project_path.is_dir(): + msg = f"Project directory not found: '{dir_path}'" + raise FileNotFoundError(msg) + + # Create a minimal project. + # Use _loading sentinel to skip varname() inside __init__. + cls._loading = True + try: + project = cls() + finally: + cls._loading = False + project._saved = True + + # 1. Load project info + project_cif_path = project_path / 'project.cif' + if project_cif_path.is_file(): + cif_text = project_cif_path.read_text() + project_info_from_cif(project._info, cif_text) + + project._info.path = project_path + + # 2. Load structures + structures_dir = project_path / 'structures' + if structures_dir.is_dir(): + for cif_file in sorted(structures_dir.glob('*.cif')): + project._structures.add_from_cif_path(str(cif_file)) + + # 3. Load experiments + experiments_dir = project_path / 'experiments' + if experiments_dir.is_dir(): + for cif_file in sorted(experiments_dir.glob('*.cif')): + project._experiments.add_from_cif_path(str(cif_file)) + + # 4. Load analysis + # Check analysis/analysis.cif first (future layout), then + # fall back to analysis.cif at root (current layout). + analysis_cif_path = project_path / 'analysis' / 'analysis.cif' + if not analysis_cif_path.is_file(): + analysis_cif_path = project_path / 'analysis.cif' + if analysis_cif_path.is_file(): + cif_text = analysis_cif_path.read_text() + analysis_from_cif(project._analysis, cif_text) + + # 5. Resolve alias param references + project._resolve_alias_references() + + # 6. Apply symmetry constraints and update categories + for structure in project._structures: + structure._update_categories() + + log.info(f"Project '{project.name}' loaded from '{dir_path}'.") + return project + + def _resolve_alias_references(self) -> None: + """ + Resolve alias ``param_unique_name`` strings to live objects. + + After loading structures and experiments from CIF, aliases only + contain the ``param_unique_name`` string. This method builds a + ``{unique_name: param}`` map from all project parameters and + wires each alias's ``_param_ref``. + """ + aliases = self._analysis.aliases + if not aliases._items: + return + + # Build unique_name → parameter map + all_params = self._structures.parameters + self._experiments.parameters + param_map: dict[str, object] = {} + for p in all_params: + uname = getattr(p, 'unique_name', None) + if uname is not None: + param_map[uname] = p + + for alias in aliases: + uname = alias.param_unique_name.value + if uname in param_map: + alias._set_param(param_map[uname]) + else: + log.warning( + f"Alias '{alias.label.value}' references unknown " + f"parameter '{uname}'. Reference not resolved." + ) def save(self) -> None: """Save the project into the existing project directory.""" diff --git a/tests/integration/fitting/test_project_load.py b/tests/integration/fitting/test_project_load.py new file mode 100644 index 00000000..3598ed69 --- /dev/null +++ b/tests/integration/fitting/test_project_load.py @@ -0,0 +1,238 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Integration tests for Project save → load round-trip.""" + +from __future__ import annotations + +import tempfile + +from numpy.testing import assert_almost_equal + +from easydiffraction import ExperimentFactory +from easydiffraction import Project +from easydiffraction import StructureFactory +from easydiffraction import download_data + +TEMP_DIR = tempfile.gettempdir() + + +# ------------------------------------------------------------------ +# Helpers +# ------------------------------------------------------------------ + + +def _create_lbco_project() -> Project: + """ + Build a complete LBCO project ready for fitting. + + Returns a project with one structure, one experiment (with data), + instrument settings, peak profile, background, linked phases, free + parameters, aliases, and constraints. + """ + # Structure + model = StructureFactory.from_scratch(name='lbco') + model.space_group.name_h_m = 'P m -3 m' + model.cell.length_a = 3.8909 + model.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.5, + ) + model.atom_sites.create( + label='Ba', + type_symbol='Ba', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.5, + ) + model.atom_sites.create( + label='Co', + type_symbol='Co', + fract_x=0.5, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='b', + b_iso=0.5, + ) + model.atom_sites.create( + label='O', + type_symbol='O', + fract_x=0, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='c', + b_iso=0.5, + ) + + # Experiment + data_path = download_data(id=3, destination=TEMP_DIR) + expt = ExperimentFactory.from_data_path( + name='hrpt', + data_path=data_path, + ) + expt.instrument.setup_wavelength = 1.494 + expt.instrument.calib_twotheta_offset = 0.6225 + expt.peak.broad_gauss_u = 0.0834 + expt.peak.broad_gauss_v = -0.1168 + expt.peak.broad_gauss_w = 0.123 + expt.peak.broad_lorentz_x = 0 + expt.peak.broad_lorentz_y = 0.0797 + expt.background.create(id='1', x=10, y=170) + expt.background.create(id='2', x=165, y=170) + expt.linked_phases.create(id='lbco', scale=9.0) + + # Project assembly + project = Project(name='lbco_project') + project.structures.add(model) + project.experiments.add(expt) + + # Free parameters + model.cell.length_a.free = True + expt.linked_phases['lbco'].scale.free = True + expt.instrument.calib_twotheta_offset.free = True + expt.background['1'].y.free = True + expt.background['2'].y.free = True + + # Aliases and constraints + project.analysis.aliases.create( + label='biso_La', + param=model.atom_sites['La'].b_iso, + ) + project.analysis.aliases.create( + label='biso_Ba', + param=model.atom_sites['Ba'].b_iso, + ) + project.analysis.constraints.create(expression='biso_Ba = biso_La') + + return project + + +def _collect_param_snapshot(project: Project) -> dict[str, float]: + """Return ``{unique_name: value}`` for all project parameters.""" + return {p.unique_name: p.value for p in project.parameters} + + +def _collect_free_flags(project: Project) -> dict[str, bool]: + """Return ``{unique_name: free}`` for fittable parameters.""" + from easydiffraction.core.variable import Parameter # noqa: PLC0415 + + return {p.unique_name: p.free for p in project.parameters if isinstance(p, Parameter)} + + +# ------------------------------------------------------------------ +# Test 1: save → load → compare all parameters +# ------------------------------------------------------------------ + + +def test_save_load_round_trip_preserves_parameters(tmp_path) -> None: + """ + Every parameter value must survive a save → load cycle. + + Also verifies project info, free flags, aliases, and constraints. + """ + original = _create_lbco_project() + original_params = _collect_param_snapshot(original) + original_free = _collect_free_flags(original) + + # Save + proj_dir = str(tmp_path / 'lbco_project') + original.save_as(proj_dir) + + # Load + loaded = Project.load(proj_dir) + + # Compare project info + assert loaded.name == original.name + assert loaded.info.title == original.info.title + + # Compare structures + assert loaded.structures.names == original.structures.names + orig_s = original.structures['lbco'] + load_s = loaded.structures['lbco'] + assert load_s.space_group.name_h_m.value == orig_s.space_group.name_h_m.value + assert_almost_equal(load_s.cell.length_a.value, orig_s.cell.length_a.value, decimal=6) + assert len(load_s.atom_sites) == len(orig_s.atom_sites) + + # Compare experiments + assert loaded.experiments.names == original.experiments.names + + # Compare all parameter values + loaded_params = _collect_param_snapshot(loaded) + for name, orig_val in original_params.items(): + assert name in loaded_params, f'Parameter {name} missing after load' + if isinstance(orig_val, float): + assert_almost_equal( + loaded_params[name], + orig_val, + decimal=6, + err_msg=f'Mismatch for {name}', + ) + else: + assert loaded_params[name] == orig_val, f'Mismatch for {name}' + + # Compare free flags + loaded_free = _collect_free_flags(loaded) + for name, orig_flag in original_free.items(): + if name in loaded_free: + assert loaded_free[name] == orig_flag, ( + f'Free flag mismatch for {name}: expected {orig_flag}, got {loaded_free[name]}' + ) + + # Compare aliases + assert len(loaded.analysis.aliases) == len(original.analysis.aliases) + for orig_alias in original.analysis.aliases: + label = orig_alias.label.value + loaded_alias = loaded.analysis.aliases[label] + assert loaded_alias.param_unique_name.value == orig_alias.param_unique_name.value + assert loaded_alias.param is not None, f"Alias '{label}' param reference not resolved" + + # Compare constraints + assert len(loaded.analysis.constraints) == len(original.analysis.constraints) + for i, orig_c in enumerate(original.analysis.constraints): + assert loaded.analysis.constraints[i].expression.value == orig_c.expression.value + assert loaded.analysis.constraints.enabled is True + + # Compare analysis settings + assert loaded.analysis.current_minimizer == original.analysis.current_minimizer + assert loaded.analysis.fit_mode.mode.value == original.analysis.fit_mode.mode.value + + +# ------------------------------------------------------------------ +# Test 2: create → fit → save → load → fit → compare χ² +# ------------------------------------------------------------------ + + +def test_save_load_round_trip_preserves_fit_quality(tmp_path) -> None: + """ + A loaded project must produce the same χ² as the original. + + Fits the original project, saves it, loads it back, fits again, + and compares reduced χ² values. + """ + # Create and fit the original project + original = _create_lbco_project() + original.analysis.fit(verbosity='silent') + original_chi2 = original.analysis.fit_results.reduced_chi_square + + # Save the fitted project + proj_dir = str(tmp_path / 'lbco_fitted') + original.save_as(proj_dir) + + # Load + loaded = Project.load(proj_dir) + + # Fit the loaded project + loaded.analysis.fit(verbosity='silent') + loaded_chi2 = loaded.analysis.fit_results.reduced_chi_square + + # The χ² values should be very close (same starting point, + # same data, same model) + assert_almost_equal(loaded_chi2, original_chi2, decimal=1) diff --git a/tests/unit/easydiffraction/project/test_project_load.py b/tests/unit/easydiffraction/project/test_project_load.py new file mode 100644 index 00000000..6d9020b7 --- /dev/null +++ b/tests/unit/easydiffraction/project/test_project_load.py @@ -0,0 +1,142 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Unit tests for Project.load().""" + +from __future__ import annotations + +import pytest + +from easydiffraction.project.project import Project + + +class TestLoadMinimal: + """Load a project that has no structures or experiments.""" + + def test_raises_on_missing_directory(self, tmp_path): + missing = tmp_path / 'nonexistent' + with pytest.raises(FileNotFoundError, match='not found'): + Project.load(str(missing)) + + def test_round_trips_empty_project(self, tmp_path): + original = Project(name='empty', title='Empty', description='nothing') + original.save_as(str(tmp_path / 'proj')) + + loaded = Project.load(str(tmp_path / 'proj')) + + assert loaded.name == 'empty' + assert loaded.info.title == 'Empty' + assert loaded.info.description == 'nothing' + assert loaded.info.path is not None + assert len(loaded.structures) == 0 + assert len(loaded.experiments) == 0 + + +class TestLoadStructures: + """Load structures from a saved project.""" + + def test_round_trips_structure(self, tmp_path): + original = Project(name='s1') + original.structures.create(name='cosio') + s = original.structures['cosio'] + s.space_group.name_h_m = 'P m -3 m' + s.cell.length_a = 3.88 + s.atom_sites.create( + label='Co', + type_symbol='Co', + fract_x=0.0, + fract_y=0.0, + fract_z=0.0, + b_iso=0.5, + ) + original.save_as(str(tmp_path / 'proj')) + + loaded = Project.load(str(tmp_path / 'proj')) + + assert len(loaded.structures) == 1 + ls = loaded.structures['cosio'] + assert ls.space_group.name_h_m.value == 'P m -3 m' + assert abs(ls.cell.length_a.value - 3.88) < 1e-6 + assert len(ls.atom_sites) == 1 + assert ls.atom_sites['Co'].type_symbol.value == 'Co' + assert abs(ls.atom_sites['Co'].b_iso.value - 0.5) < 1e-6 + + +class TestLoadAnalysis: + """Load analysis settings from a saved project.""" + + def test_round_trips_minimizer(self, tmp_path): + original = Project(name='a1') + original.save_as(str(tmp_path / 'proj')) + + loaded = Project.load(str(tmp_path / 'proj')) + + assert loaded.analysis.current_minimizer == 'lmfit' + + def test_round_trips_fit_mode(self, tmp_path): + original = Project(name='a2') + original.analysis.fit_mode.mode = 'joint' + original.save_as(str(tmp_path / 'proj')) + + loaded = Project.load(str(tmp_path / 'proj')) + + assert loaded.analysis.fit_mode.mode.value == 'joint' + + def test_round_trips_constraints(self, tmp_path): + original = Project(name='c1') + original.structures.create(name='s') + s = original.structures['s'] + s.cell.length_a = 5.0 + s.cell.length_b = 5.0 + + original.analysis.aliases.create( + label='a_param', + param=s.cell.length_a, + ) + original.analysis.aliases.create( + label='b_param', + param=s.cell.length_b, + ) + original.analysis.constraints.create(expression='b_param = a_param') + original.save_as(str(tmp_path / 'proj')) + + loaded = Project.load(str(tmp_path / 'proj')) + + assert len(loaded.analysis.aliases) == 2 + assert loaded.analysis.aliases['a_param'].label.value == 'a_param' + assert loaded.analysis.aliases['b_param'].label.value == 'b_param' + # Verify alias param references are resolved + assert loaded.analysis.aliases['a_param'].param is not None + assert loaded.analysis.aliases['b_param'].param is not None + + assert len(loaded.analysis.constraints) == 1 + assert loaded.analysis.constraints[0].expression.value == 'b_param = a_param' + assert loaded.analysis.constraints.enabled is True + + +class TestLoadAnalysisCifFallback: + """Load falls back from analysis/analysis.cif to analysis.cif at root.""" + + def test_loads_analysis_from_root(self, tmp_path): + """Current save layout: analysis.cif at project root.""" + original = Project(name='fb1') + original.save_as(str(tmp_path / 'proj')) + + # Verify analysis.cif is at root (current save layout) + assert (tmp_path / 'proj' / 'analysis.cif').is_file() + + loaded = Project.load(str(tmp_path / 'proj')) + assert loaded.analysis.current_minimizer == 'lmfit' + + def test_loads_analysis_from_subdir(self, tmp_path): + """Future layout: analysis/analysis.cif takes priority.""" + original = Project(name='fb2') + original.save_as(str(tmp_path / 'proj')) + + # Move analysis.cif to analysis/ subdirectory + proj_dir = tmp_path / 'proj' + analysis_dir = proj_dir / 'analysis' + analysis_dir.mkdir(exist_ok=True) + (proj_dir / 'analysis.cif').rename(analysis_dir / 'analysis.cif') + + loaded = Project.load(str(proj_dir)) + assert loaded.analysis.current_minimizer == 'lmfit' diff --git a/tests/unit/easydiffraction/project/test_project_load_and_summary_wrap.py b/tests/unit/easydiffraction/project/test_project_load_and_summary_wrap.py index cdeafd35..69f84127 100644 --- a/tests/unit/easydiffraction/project/test_project_load_and_summary_wrap.py +++ b/tests/unit/easydiffraction/project/test_project_load_and_summary_wrap.py @@ -2,15 +2,27 @@ # SPDX-License-Identifier: BSD-3-Clause -def test_project_load_prints_and_sets_path(tmp_path, capsys): +def test_project_load_raises_on_missing_directory(tmp_path): import pytest from easydiffraction.project.project import Project - p = Project() - dir_path = tmp_path / 'pdir' - with pytest.raises(NotImplementedError, match='not implemented yet'): - p.load(str(dir_path)) + missing_dir = tmp_path / 'nonexistent' + with pytest.raises(FileNotFoundError, match='not found'): + Project.load(str(missing_dir)) + + +def test_project_load_reads_project_info(tmp_path): + from easydiffraction.project.project import Project + + p = Project(name='myproj', title='My Title', description='A description') + p.save_as(str(tmp_path / 'proj')) + + loaded = Project.load(str(tmp_path / 'proj')) + assert loaded.name == 'myproj' + assert loaded.info.title == 'My Title' + assert loaded.info.description == 'A description' + assert loaded.info.path is not None def test_summary_show_project_info_wraps_description(capsys): From 47e268eed5e3f0bb7de13b25188aadf6ebffd494 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 11:15:40 +0200 Subject: [PATCH 04/51] Encode free flags via CIF uncertainty brackets --- docs/architecture/architecture.md | 4 + docs/docs/tutorials/ed-18.py | 84 +++++++++++++++++++ docs/docs/tutorials/index.json | 7 ++ docs/docs/tutorials/index.md | 4 + docs/mkdocs.yml | 1 + src/easydiffraction/io/cif/serialize.py | 79 ++++++++++++++--- src/easydiffraction/utils/utils.py | 19 +++-- .../integration/fitting/test_project_load.py | 12 ++- .../easydiffraction/core/test_parameters.py | 4 +- 9 files changed, 192 insertions(+), 22 deletions(-) create mode 100644 docs/docs/tutorials/ed-18.py diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md index d9827c07..605ee28c 100644 --- a/docs/architecture/architecture.md +++ b/docs/architecture/architecture.md @@ -919,6 +919,10 @@ project.experiments['xray_pdf'].peak_profile_type = 'gaussian-damped-sinc' - `DatablockItem` = one CIF `data_` block, `DatablockCollection` = set of blocks. - `CategoryItem` = one CIF category, `CategoryCollection` = CIF loop. +- **Free-flag encoding**: A parameter's free/fixed status is encoded in + CIF via uncertainty brackets. `3.89` = fixed, `3.89(2)` = free with + esd, `3.89()` = free without esd. There is no separate list of free + parameters; the brackets are the single source of truth. ### 9.2 Immutability of Experiment Type diff --git a/docs/docs/tutorials/ed-18.py b/docs/docs/tutorials/ed-18.py new file mode 100644 index 00000000..b883e9d0 --- /dev/null +++ b/docs/docs/tutorials/ed-18.py @@ -0,0 +1,84 @@ +# %% [markdown] +# # Load Project and Fit: LBCO, HRPT +# +# This is the most minimal example of using EasyDiffraction. It shows +# how to load a previously saved project from a directory and run +# refinement — all in just a few lines of code. +# +# The project is first created and saved as a setup step (this would +# normally be done once and the directory would already exist on disk). +# Then the saved project is loaded back and fitted. +# +# For details on how to define structures and experiments, see the other +# tutorials. + +# %% [markdown] +# ## Import Library + +# %% +import easydiffraction as ed + +# %% [markdown] +# ## Setup: Create and Save a Project +# +# This step creates a project from CIF files and saves it to a +# directory. In practice, the project directory would already exist +# on disk from a previous session. + +# %% +# Create a project from CIF files +project = ed.Project() +project.structures.add_from_cif_path(ed.download_data(id=1, destination='data')) +project.experiments.add_from_cif_path(ed.download_data(id=2, destination='data')) + +# %% +project.analysis.aliases.create( + label='biso_La', + param=project.structures['lbco'].atom_sites['La'].b_iso, +) +project.analysis.aliases.create( + label='biso_Ba', + param=project.structures['lbco'].atom_sites['Ba'].b_iso, +) + +project.analysis.aliases.create( + label='occ_La', + param=project.structures['lbco'].atom_sites['La'].occupancy, +) +project.analysis.aliases.create( + label='occ_Ba', + param=project.structures['lbco'].atom_sites['Ba'].occupancy, +) + +project.analysis.constraints.create(expression='biso_Ba = biso_La') +project.analysis.constraints.create(expression='occ_Ba = 1 - occ_La') + +project.structures['lbco'].atom_sites['La'].occupancy.free = True + +# %% +# Save to a directory +project.save_as('lbco_project') + +# %% [markdown] +# ## Step 1: Load Project from Directory + +# %% +project = ed.Project.load('lbco_project') + +# %% [markdown] +# ## Step 2: Perform Analysis + +# %% +project.analysis.fit() + +# %% +project.analysis.show_fit_results() + +# %% +project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) + +# %% [markdown] +# ## Step 3: Show Project Summary + +# %% +project.summary.show_report() diff --git a/docs/docs/tutorials/index.json b/docs/docs/tutorials/index.json index 3f2f223c..9a438874 100644 --- a/docs/docs/tutorials/index.json +++ b/docs/docs/tutorials/index.json @@ -117,5 +117,12 @@ "title": "Structure Refinement: Co2SiO4, D20 (Temperature scan)", "description": "Sequential Rietveld refinement of Co2SiO4 using constant wavelength neutron powder diffraction data from D20 at ILL across a temperature scan", "level": "advanced" + }, + "18": { + "url": "https://easyscience.github.io/diffraction-lib/{version}/tutorials/ed-18/ed-18.ipynb", + "original_name": "", + "title": "Quick Start: LBCO Load Project", + "description": "Most minimal example: load a saved project from a directory and run Rietveld refinement of La0.5Ba0.5CoO3", + "level": "quick" } } diff --git a/docs/docs/tutorials/index.md b/docs/docs/tutorials/index.md index ef22e949..06a9c69e 100644 --- a/docs/docs/tutorials/index.md +++ b/docs/docs/tutorials/index.md @@ -17,6 +17,10 @@ The tutorials are organized into the following categories. ## Getting Started +- [LBCO `quick` `load`](ed-18.ipynb) – The most minimal example showing + how to load a previously saved project from a directory and run + refinement. Useful when a project has already been set up and saved in + a prior session. - [LBCO `quick` CIF](ed-1.ipynb) – A minimal example intended as a quick reference for users already familiar with the EasyDiffraction API or who want to see how Rietveld refinement of the La0.5Ba0.5CoO3 crystal diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index c272bdec..5f2415c3 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -191,6 +191,7 @@ nav: - Tutorials: - Tutorials: tutorials/index.md - Getting Started: + - LBCO quick load: tutorials/ed-18.ipynb - LBCO quick CIF: tutorials/ed-1.ipynb - LBCO quick code: tutorials/ed-2.ipynb - LBCO complete: tutorials/ed-3.ipynb diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index 89655c64..184e40f4 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -65,15 +65,67 @@ def format_value(value: object) -> str: ################## +def format_param_value(param: object) -> str: + """ + Format a parameter value for CIF output, encoding the free flag. + + CIF convention for numeric parameters: + + - Fixed or constrained parameter: plain value, e.g. ``3.89090000`` + - Free parameter without uncertainty: value with empty brackets, + e.g. ``3.89090000()`` + - Free parameter with uncertainty: value with esd in brackets, + e.g. ``3.89090000(200000)`` + + Constrained (dependent) parameters are always written without + brackets, even if their ``free`` flag is ``True``, because they are + not independently varied by the minimizer. + + Non-numeric parameters and descriptors without a ``free`` attribute + are formatted with :func:`format_value`. + + Parameters + ---------- + param : object + A descriptor or parameter exposing ``.value`` and optionally + ``.free``, ``.constrained``, and ``.uncertainty``. + + Returns + ------- + str + Formatted CIF value string. + """ + is_free = getattr(param, 'free', False) + is_constrained = getattr(param, 'constrained', False) + value = param.value # type: ignore[attr-defined] + + if not is_free or is_constrained or not isinstance(value, (int, float)): + return format_value(value) + + precision = 8 + uncertainty = getattr(param, 'uncertainty', None) + formatted_value = f'{float(value):.{precision}f}' + + if uncertainty is not None and uncertainty > 0: + from uncertainties import ufloat as _ufloat # noqa: PLC0415 + + u = _ufloat(float(value), float(uncertainty)) + return f'{u:.{precision}fS}' + + return f'{formatted_value}()' + + def param_to_cif(param: object) -> str: """ Render a single descriptor/parameter to a CIF line. Expects ``param`` to expose ``_cif_handler.names`` and ``value``. + Free parameters are written with uncertainty brackets (see + :func:`format_param_value`). """ tags: Sequence[str] = param._cif_handler.names # type: ignore[attr-defined] main_key: str = tags[0] - return f'{main_key} {format_value(param.value)}' + return f'{main_key} {format_param_value(param)}' def category_item_to_cif(item: object) -> str: @@ -94,8 +146,7 @@ def category_collection_to_cif( """ Render a CategoryCollection-like object to CIF text. - Uses first item to build loop header, then emits rows for each - item. + Uses first item to build loop header, then emits rows for each item. Parameters ---------- @@ -124,17 +175,17 @@ def category_collection_to_cif( half_display = max_display // 2 for i in range(half_display): item = list(collection.values())[i] - row_vals = [format_value(p.value) for p in item.parameters] + row_vals = [format_param_value(p) for p in item.parameters] lines.append(' '.join(row_vals)) lines.append('...') for i in range(-half_display, 0): item = list(collection.values())[i] - row_vals = [format_value(p.value) for p in item.parameters] + row_vals = [format_param_value(p) for p in item.parameters] lines.append(' '.join(row_vals)) # No limit else: for item in collection.values(): - row_vals = [format_value(p.value) for p in item.parameters] + row_vals = [format_param_value(p) for p in item.parameters] lines.append(' '.join(row_vals)) return '\n'.join(lines) @@ -415,11 +466,13 @@ def param_from_cif( # If numeric, parse with uncertainty if present if self._value_type == DataTypes.NUMERIC: + has_brackets = '(' in raw u = str_to_ufloat(raw) self.value = u.n - if not np.isnan(u.s) and hasattr(self, 'uncertainty'): - self.uncertainty = u.s # type: ignore[attr-defined] - self.free = True # Mark as free if uncertainty is present + if has_brackets and hasattr(self, 'free'): + self.free = True # type: ignore[attr-defined] + if not np.isnan(u.s) and hasattr(self, 'uncertainty'): + self.uncertainty = u.s # type: ignore[attr-defined] # If string, strip quotes if present elif self._value_type == DataTypes.STRING: @@ -520,11 +573,13 @@ def _get_loop(block: object, category_item: object) -> object | None: # If numeric, parse with uncertainty if present if param._value_type == DataTypes.NUMERIC: + has_brackets = '(' in raw u = str_to_ufloat(raw) param.value = u.n - if not np.isnan(u.s) and hasattr(param, 'uncertainty'): - param.uncertainty = u.s # type: ignore[attr-defined] - param.free = True # Mark as free if uncertainty is present + if has_brackets and hasattr(param, 'free'): + param.free = True # type: ignore[attr-defined] + if not np.isnan(u.s) and hasattr(param, 'uncertainty'): + param.uncertainty = u.s # type: ignore[attr-defined] # If string, strip quotes if present # TODO: Make a helper function for this diff --git a/src/easydiffraction/utils/utils.py b/src/easydiffraction/utils/utils.py index 4a029917..a54038fc 100644 --- a/src/easydiffraction/utils/utils.py +++ b/src/easydiffraction/utils/utils.py @@ -705,19 +705,23 @@ def str_to_ufloat(s: str | None, default: float | None = None) -> UFloat: Parse a CIF-style numeric string into a ufloat. Examples of supported input: - "3.566" → ufloat(3.566, nan) - - "3.566(2)" → ufloat(3.566, 0.002) - None → ufloat(default, nan) + "3.566(2)" → ufloat(3.566, 0.002) - "3.566()" → ufloat(3.566, 0.0) - + None → ufloat(default, nan) Behavior: - If the input string contains a value with parentheses (e.g. "3.566(2)"), the number in parentheses is interpreted as an - estimated standard deviation (esd) in the last digit(s). - If the - input string has no parentheses, an uncertainty of NaN is assigned - to indicate "no esd provided". - If parsing fails, the function - falls back to the given ``default`` value with uncertainty NaN. + estimated standard deviation (esd) in the last digit(s). - Empty + parentheses (e.g. "3.566()") are treated as zero uncertainty. - If + the input string has no parentheses, an uncertainty of NaN is + assigned to indicate "no esd provided". - If parsing fails, the + function falls back to the given ``default`` value with uncertainty + NaN. Parameters ---------- s : str | None - Numeric string in CIF format (e.g. "3.566", "3.566(2)") or None. + Numeric string in CIF format (e.g. "3.566", "3.566(2)", + "3.566()") or None. default : float | None, default=None Default value to use if ``s`` is None or parsing fails. @@ -733,6 +737,9 @@ def str_to_ufloat(s: str | None, default: float | None = None) -> UFloat: if '(' not in s and ')' not in s: s = f'{s}(nan)' + elif s.endswith('()'): + # Empty brackets → zero uncertainty (free parameter, no esd yet) + s = s[:-2] + '(0)' try: return ufloat_fromstr(s) except Exception: diff --git a/tests/integration/fitting/test_project_load.py b/tests/integration/fitting/test_project_load.py index 3598ed69..789482f3 100644 --- a/tests/integration/fitting/test_project_load.py +++ b/tests/integration/fitting/test_project_load.py @@ -116,8 +116,12 @@ def _create_lbco_project() -> Project: def _collect_param_snapshot(project: Project) -> dict[str, float]: - """Return ``{unique_name: value}`` for all project parameters.""" - return {p.unique_name: p.value for p in project.parameters} + """Return ``{unique_name: value}`` for model parameters (excluding raw data).""" + return { + p.unique_name: p.value + for p in project.parameters + if not p.unique_name.startswith('pd_data.') + } def _collect_free_flags(project: Project) -> dict[str, bool]: @@ -139,6 +143,10 @@ def test_save_load_round_trip_preserves_parameters(tmp_path) -> None: Also verifies project info, free flags, aliases, and constraints. """ original = _create_lbco_project() + # Apply symmetry constraints so snapshot matches the loaded state + # (load() calls _update_categories which applies symmetry). + for structure in original.structures: + structure._update_categories() original_params = _collect_param_snapshot(original) original_free = _collect_free_flags(original) diff --git a/tests/unit/easydiffraction/core/test_parameters.py b/tests/unit/easydiffraction/core/test_parameters.py index 87ded2b6..d9f96b0e 100644 --- a/tests/unit/easydiffraction/core/test_parameters.py +++ b/tests/unit/easydiffraction/core/test_parameters.py @@ -66,8 +66,8 @@ def test_parameter_string_repr_and_as_cif_and_flags(): assert 'A' in s assert '(free=True)' in s - # CIF line is ` ` - assert p.as_cif == '_param.a 2.50000000' + # CIF line: free param with uncertainty uses esd brackets + assert p.as_cif == '_param.a 2.50000000(10000000)' # CifHandler uid is owner's unique_name (parameter name here) assert p._cif_handler.uid == p.unique_name == 'a' From 4db00cf7e7a9b7e679c6a714a5ca48cd67639cb4 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 11:59:23 +0200 Subject: [PATCH 05/51] Update notebooks --- docs/docs/tutorials/ed-1.ipynb | 53 ++-- docs/docs/tutorials/ed-10.ipynb | 55 ++-- docs/docs/tutorials/ed-11.ipynb | 61 +++-- docs/docs/tutorials/ed-12.ipynb | 69 +++-- docs/docs/tutorials/ed-13.ipynb | 427 ++++++++++++++++--------------- docs/docs/tutorials/ed-14.ipynb | 77 +++--- docs/docs/tutorials/ed-15.ipynb | 73 +++--- docs/docs/tutorials/ed-16.ipynb | 133 +++++----- docs/docs/tutorials/ed-17.ipynb | 143 ++++++----- docs/docs/tutorials/ed-18.ipynb | 225 +++++++++++++++++ docs/docs/tutorials/ed-2.ipynb | 67 +++-- docs/docs/tutorials/ed-3.ipynb | 431 +++++++++++++++----------------- docs/docs/tutorials/ed-4.ipynb | 139 +++++----- docs/docs/tutorials/ed-5.ipynb | 143 ++++++----- docs/docs/tutorials/ed-6.ipynb | 175 +++++++------ docs/docs/tutorials/ed-7.ipynb | 163 ++++++------ docs/docs/tutorials/ed-8.ipynb | 133 +++++----- docs/docs/tutorials/ed-9.ipynb | 143 ++++++----- 18 files changed, 1553 insertions(+), 1157 deletions(-) create mode 100644 docs/docs/tutorials/ed-18.ipynb diff --git a/docs/docs/tutorials/ed-1.ipynb b/docs/docs/tutorials/ed-1.ipynb index d2e178a2..830d7889 100644 --- a/docs/docs/tutorials/ed-1.ipynb +++ b/docs/docs/tutorials/ed-1.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8dbf8e63", + "id": "74c72059", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: LBCO, HRPT\n", "\n", @@ -47,7 +62,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -56,7 +71,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -65,7 +80,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Step 1: Define Project" @@ -74,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -84,7 +99,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "## Step 2: Define Crystal Structure" @@ -93,7 +108,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -104,7 +119,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -113,7 +128,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "9", "metadata": {}, "source": [ "## Step 3: Define Experiment" @@ -122,7 +137,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -133,7 +148,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -142,7 +157,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Step 4: Perform Analysis" @@ -151,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -163,7 +178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -174,7 +189,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -184,7 +199,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -193,7 +208,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "17", "metadata": {}, "source": [ "## Step 5: Show Project Summary" @@ -202,7 +217,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "18", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-10.ipynb b/docs/docs/tutorials/ed-10.ipynb index 0c865ce0..2fa9c6c5 100644 --- a/docs/docs/tutorials/ed-10.ipynb +++ b/docs/docs/tutorials/ed-10.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f42176f2", + "id": "239bda80", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Pair Distribution Function: Ni, NPD\n", "\n", @@ -36,7 +51,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -45,7 +60,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Create Project" @@ -63,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -72,7 +87,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "## Add Structure" @@ -81,7 +96,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -91,7 +106,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -111,7 +126,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "9", "metadata": {}, "source": [ "## Add Experiment" @@ -120,7 +135,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -130,7 +145,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +162,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -162,7 +177,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "13", "metadata": {}, "source": [ "## Select Fitting Parameters" @@ -171,7 +186,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -182,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -193,7 +208,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "## Run Fitting" @@ -202,7 +217,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -212,7 +227,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "## Plot Measured vs Calculated" @@ -221,7 +236,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-11.ipynb b/docs/docs/tutorials/ed-11.ipynb index 92714987..cbb509bf 100644 --- a/docs/docs/tutorials/ed-11.ipynb +++ b/docs/docs/tutorials/ed-11.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b38dbf4f", + "id": "958d9ba3", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Pair Distribution Function: Si, NPD\n", "\n", @@ -33,7 +48,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -42,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -51,7 +66,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Create Project" @@ -60,7 +75,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +84,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "## Set Plotting Engine" @@ -78,7 +93,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -90,7 +105,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +115,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "9", "metadata": {}, "source": [ "## Add Structure" @@ -109,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -119,7 +134,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -140,7 +155,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Add Experiment" @@ -149,7 +164,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -159,7 +174,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -176,7 +191,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -192,7 +207,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "## Select Fitting Parameters" @@ -201,7 +216,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -213,7 +228,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -225,7 +240,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "19", "metadata": {}, "source": [ "## Run Fitting" @@ -234,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -244,7 +259,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "21", "metadata": {}, "source": [ "## Plot Measured vs Calculated" @@ -253,7 +268,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-12.ipynb b/docs/docs/tutorials/ed-12.ipynb index 51bdc129..deaca165 100644 --- a/docs/docs/tutorials/ed-12.ipynb +++ b/docs/docs/tutorials/ed-12.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "effff825", + "id": "a6c12e68", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Pair Distribution Function: NaCl, XRD\n", "\n", @@ -36,7 +51,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -45,7 +60,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Create Project" @@ -63,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -72,7 +87,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "## Set Plotting Engine" @@ -81,7 +96,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -93,7 +108,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -104,7 +119,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "9", "metadata": {}, "source": [ "## Add Structure" @@ -113,7 +128,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -123,7 +138,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -152,7 +167,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Add Experiment" @@ -161,7 +176,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -171,7 +186,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -188,7 +203,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -198,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -208,7 +223,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -218,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -233,7 +248,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -242,7 +257,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "## Select Fitting Parameters" @@ -251,7 +266,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -263,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -274,7 +289,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "23", "metadata": {}, "source": [ "## Run Fitting" @@ -283,7 +298,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -293,7 +308,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "25", "metadata": {}, "source": [ "## Plot Measured vs Calculated" @@ -302,7 +317,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-13.ipynb b/docs/docs/tutorials/ed-13.ipynb index bb4dd4c2..ef68958b 100644 --- a/docs/docs/tutorials/ed-13.ipynb +++ b/docs/docs/tutorials/ed-13.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "263b6625", + "id": "1c60b738", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Fitting Powder Diffraction data\n", "\n", @@ -55,7 +70,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "📖 See\n", @@ -67,7 +82,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -76,7 +91,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## 📘 Introduction: Simple Reference Fit – Si\n", @@ -103,7 +118,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "5", "metadata": {}, "source": [ "📖 See\n", @@ -115,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -124,7 +139,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "7", "metadata": {}, "source": [ "You can set the title and description of the project to provide\n", @@ -136,7 +151,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -146,7 +161,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "9", "metadata": {}, "source": [ "### 🔬 Create an Experiment\n", @@ -159,7 +174,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "📖 See\n", @@ -171,7 +186,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -182,7 +197,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "Uncomment the following cell if your data reduction failed and the\n", @@ -196,7 +211,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -205,7 +220,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "Now we can create the experiment and load the measured data. In this\n", @@ -217,7 +232,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "15", "metadata": {}, "source": [ "📖 See\n", @@ -228,7 +243,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -243,7 +258,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "17", "metadata": {}, "source": [ "#### Inspect Measured Data\n", @@ -259,7 +274,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "📖 See\n", @@ -273,7 +288,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "19", "metadata": {}, "source": [ "📖 See\n", @@ -284,7 +299,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -296,7 +311,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -305,7 +320,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "22", "metadata": {}, "source": [ "If you zoom in on the highest TOF peak (around 120,000 μs), you will\n", @@ -328,7 +343,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "23", "metadata": {}, "source": [ "📖 See\n", @@ -339,7 +354,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -349,7 +364,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "25", "metadata": {}, "source": [ "To visualize the effect of excluding the high TOF region, we can plot\n", @@ -360,7 +375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -369,7 +384,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "#### Set Instrument Parameters\n", @@ -392,7 +407,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "28", "metadata": {}, "source": [ "📖 See\n", @@ -403,7 +418,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -417,7 +432,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "30", "metadata": {}, "source": [ "Before proceeding, let's take a quick look at the concept of\n", @@ -437,7 +452,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -446,7 +461,7 @@ }, { "cell_type": "markdown", - "id": "31", + "id": "32", "metadata": {}, "source": [ "The `value` attribute represents the current value of the parameter as\n", @@ -460,7 +475,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -469,7 +484,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "34", "metadata": {}, "source": [ "Note that to set the value of the parameter, you can simply assign a\n", @@ -479,7 +494,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "35", "metadata": {}, "source": [ "📖 See\n", @@ -490,7 +505,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "36", "metadata": {}, "source": [ "#### Set Peak Profile Parameters\n", @@ -532,7 +547,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "37", "metadata": {}, "source": [ "📖 See\n", @@ -543,7 +558,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -559,7 +574,7 @@ }, { "cell_type": "markdown", - "id": "38", + "id": "39", "metadata": {}, "source": [ "#### Set Background\n", @@ -594,7 +609,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "40", "metadata": {}, "source": [ "📖 See\n", @@ -605,7 +620,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -621,7 +636,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "42", "metadata": {}, "source": [ "### 🧩 Create a Structure – Si\n", @@ -664,7 +679,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "43", "metadata": {}, "source": [ "📖 See\n", @@ -674,7 +689,7 @@ }, { "cell_type": "markdown", - "id": "43", + "id": "44", "metadata": {}, "source": [ "```\n", @@ -706,7 +721,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "45", "metadata": {}, "source": [ "As with adding the experiment in the previous step, we will create a\n", @@ -716,7 +731,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "46", "metadata": {}, "source": [ "📖 See\n", @@ -727,7 +742,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "47", "metadata": {}, "source": [ "#### Add Structure" @@ -736,7 +751,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -745,7 +760,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "49", "metadata": {}, "source": [ "#### Set Space Group" @@ -753,7 +768,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "50", "metadata": {}, "source": [ "📖 See\n", @@ -764,7 +779,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "51", "metadata": {}, "outputs": [], "source": [ @@ -774,7 +789,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "52", "metadata": {}, "source": [ "#### Set Lattice Parameters" @@ -782,7 +797,7 @@ }, { "cell_type": "markdown", - "id": "52", + "id": "53", "metadata": {}, "source": [ "📖 See\n", @@ -793,7 +808,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -802,7 +817,7 @@ }, { "cell_type": "markdown", - "id": "54", + "id": "55", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -810,7 +825,7 @@ }, { "cell_type": "markdown", - "id": "55", + "id": "56", "metadata": {}, "source": [ "📖 See\n", @@ -821,7 +836,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "57", "metadata": {}, "outputs": [], "source": [ @@ -838,7 +853,7 @@ }, { "cell_type": "markdown", - "id": "57", + "id": "58", "metadata": {}, "source": [ "### 🔗 Assign Structure to Experiment\n", @@ -851,7 +866,7 @@ }, { "cell_type": "markdown", - "id": "58", + "id": "59", "metadata": {}, "source": [ "📖 See\n", @@ -862,7 +877,7 @@ { "cell_type": "code", "execution_count": null, - "id": "59", + "id": "60", "metadata": {}, "outputs": [], "source": [ @@ -871,7 +886,7 @@ }, { "cell_type": "markdown", - "id": "60", + "id": "61", "metadata": {}, "source": [ "### 🚀 Analyze and Fit the Data\n", @@ -893,7 +908,7 @@ }, { "cell_type": "markdown", - "id": "61", + "id": "62", "metadata": { "title": "**Reminder:**" }, @@ -910,7 +925,7 @@ }, { "cell_type": "markdown", - "id": "62", + "id": "63", "metadata": {}, "source": [ "📖 See\n", @@ -920,7 +935,7 @@ }, { "cell_type": "markdown", - "id": "63", + "id": "64", "metadata": {}, "source": [ "#### Set Fit Parameters\n", @@ -943,7 +958,7 @@ { "cell_type": "code", "execution_count": null, - "id": "64", + "id": "65", "metadata": {}, "outputs": [], "source": [ @@ -963,7 +978,7 @@ }, { "cell_type": "markdown", - "id": "65", + "id": "66", "metadata": {}, "source": [ "#### Show Free Parameters\n", @@ -974,7 +989,7 @@ }, { "cell_type": "markdown", - "id": "66", + "id": "67", "metadata": {}, "source": [ "📖 See\n", @@ -988,7 +1003,7 @@ { "cell_type": "code", "execution_count": null, - "id": "67", + "id": "68", "metadata": {}, "outputs": [], "source": [ @@ -997,7 +1012,7 @@ }, { "cell_type": "markdown", - "id": "68", + "id": "69", "metadata": {}, "source": [ "#### Visualize Diffraction Patterns\n", @@ -1013,7 +1028,7 @@ { "cell_type": "code", "execution_count": null, - "id": "69", + "id": "70", "metadata": {}, "outputs": [], "source": [ @@ -1022,7 +1037,7 @@ }, { "cell_type": "markdown", - "id": "70", + "id": "71", "metadata": {}, "source": [ "#### Run Fitting\n", @@ -1033,7 +1048,7 @@ }, { "cell_type": "markdown", - "id": "71", + "id": "72", "metadata": {}, "source": [ "📖 See\n", @@ -1044,7 +1059,7 @@ { "cell_type": "code", "execution_count": null, - "id": "72", + "id": "73", "metadata": {}, "outputs": [], "source": [ @@ -1054,7 +1069,7 @@ }, { "cell_type": "markdown", - "id": "73", + "id": "74", "metadata": {}, "source": [ "#### Check Fit Results\n", @@ -1073,7 +1088,7 @@ }, { "cell_type": "markdown", - "id": "74", + "id": "75", "metadata": {}, "source": [ "#### Visualize Fit Results\n", @@ -1087,7 +1102,7 @@ { "cell_type": "code", "execution_count": null, - "id": "75", + "id": "76", "metadata": {}, "outputs": [], "source": [ @@ -1096,7 +1111,7 @@ }, { "cell_type": "markdown", - "id": "76", + "id": "77", "metadata": {}, "source": [ "#### TOF vs d-spacing\n", @@ -1130,7 +1145,7 @@ { "cell_type": "code", "execution_count": null, - "id": "77", + "id": "78", "metadata": {}, "outputs": [], "source": [ @@ -1139,7 +1154,7 @@ }, { "cell_type": "markdown", - "id": "78", + "id": "79", "metadata": {}, "source": [ "As you can see, the calculated diffraction pattern now matches the\n", @@ -1165,7 +1180,7 @@ { "cell_type": "code", "execution_count": null, - "id": "79", + "id": "80", "metadata": {}, "outputs": [], "source": [ @@ -1174,7 +1189,7 @@ }, { "cell_type": "markdown", - "id": "80", + "id": "81", "metadata": {}, "source": [ "## 💪 Exercise: Complex Fit – LBCO\n", @@ -1195,7 +1210,7 @@ }, { "cell_type": "markdown", - "id": "81", + "id": "82", "metadata": {}, "source": [ "**Hint:**" @@ -1203,7 +1218,7 @@ }, { "cell_type": "markdown", - "id": "82", + "id": "83", "metadata": {}, "source": [ "You can use the same approach as in the previous part of the notebook,\n", @@ -1212,7 +1227,7 @@ }, { "cell_type": "markdown", - "id": "83", + "id": "84", "metadata": {}, "source": [ "**Solution:**" @@ -1221,7 +1236,7 @@ { "cell_type": "code", "execution_count": null, - "id": "84", + "id": "85", "metadata": {}, "outputs": [], "source": [ @@ -1232,7 +1247,7 @@ }, { "cell_type": "markdown", - "id": "85", + "id": "86", "metadata": {}, "source": [ "### 🔬 Exercise 2: Define an Experiment\n", @@ -1245,7 +1260,7 @@ }, { "cell_type": "markdown", - "id": "86", + "id": "87", "metadata": {}, "source": [ "**Hint:**" @@ -1253,7 +1268,7 @@ }, { "cell_type": "markdown", - "id": "87", + "id": "88", "metadata": {}, "source": [ "You can use the same approach as in the previous part of the notebook,\n", @@ -1262,7 +1277,7 @@ }, { "cell_type": "markdown", - "id": "88", + "id": "89", "metadata": {}, "source": [ "**Solution:**" @@ -1271,7 +1286,7 @@ { "cell_type": "code", "execution_count": null, - "id": "89", + "id": "90", "metadata": {}, "outputs": [], "source": [ @@ -1294,7 +1309,7 @@ }, { "cell_type": "markdown", - "id": "90", + "id": "91", "metadata": {}, "source": [ "#### Exercise 2.1: Inspect Measured Data\n", @@ -1306,7 +1321,7 @@ }, { "cell_type": "markdown", - "id": "91", + "id": "92", "metadata": {}, "source": [ "**Hint:**" @@ -1314,7 +1329,7 @@ }, { "cell_type": "markdown", - "id": "92", + "id": "93", "metadata": {}, "source": [ "You can use the `plot_meas` method of the project to visualize the\n", @@ -1325,7 +1340,7 @@ }, { "cell_type": "markdown", - "id": "93", + "id": "94", "metadata": {}, "source": [ "**Solution:**" @@ -1334,7 +1349,7 @@ { "cell_type": "code", "execution_count": null, - "id": "94", + "id": "95", "metadata": {}, "outputs": [], "source": [ @@ -1348,7 +1363,7 @@ }, { "cell_type": "markdown", - "id": "95", + "id": "96", "metadata": {}, "source": [ "#### Exercise 2.2: Set Instrument Parameters\n", @@ -1358,7 +1373,7 @@ }, { "cell_type": "markdown", - "id": "96", + "id": "97", "metadata": {}, "source": [ "**Hint:**" @@ -1366,7 +1381,7 @@ }, { "cell_type": "markdown", - "id": "97", + "id": "98", "metadata": {}, "source": [ "Use the values from the data reduction process for the LBCO and\n", @@ -1375,7 +1390,7 @@ }, { "cell_type": "markdown", - "id": "98", + "id": "99", "metadata": {}, "source": [ "**Solution:**" @@ -1384,7 +1399,7 @@ { "cell_type": "code", "execution_count": null, - "id": "99", + "id": "100", "metadata": {}, "outputs": [], "source": [ @@ -1398,7 +1413,7 @@ }, { "cell_type": "markdown", - "id": "100", + "id": "101", "metadata": {}, "source": [ "#### Exercise 2.3: Set Peak Profile Parameters\n", @@ -1408,7 +1423,7 @@ }, { "cell_type": "markdown", - "id": "101", + "id": "102", "metadata": {}, "source": [ "**Hint:**" @@ -1416,7 +1431,7 @@ }, { "cell_type": "markdown", - "id": "102", + "id": "103", "metadata": {}, "source": [ "Use the values from the\n", @@ -1428,7 +1443,7 @@ }, { "cell_type": "markdown", - "id": "103", + "id": "104", "metadata": {}, "source": [ "**Solution:**" @@ -1437,7 +1452,7 @@ { "cell_type": "code", "execution_count": null, - "id": "104", + "id": "105", "metadata": {}, "outputs": [], "source": [ @@ -1455,7 +1470,7 @@ }, { "cell_type": "markdown", - "id": "105", + "id": "106", "metadata": {}, "source": [ "#### Exercise 2.4: Set Background\n", @@ -1466,7 +1481,7 @@ }, { "cell_type": "markdown", - "id": "106", + "id": "107", "metadata": {}, "source": [ "**Hint:**" @@ -1474,7 +1489,7 @@ }, { "cell_type": "markdown", - "id": "107", + "id": "108", "metadata": {}, "source": [ "Use the same approach as in the previous part of the notebook, but\n", @@ -1485,7 +1500,7 @@ }, { "cell_type": "markdown", - "id": "108", + "id": "109", "metadata": {}, "source": [ "**Solution:**" @@ -1494,7 +1509,7 @@ { "cell_type": "code", "execution_count": null, - "id": "109", + "id": "110", "metadata": {}, "outputs": [], "source": [ @@ -1510,7 +1525,7 @@ }, { "cell_type": "markdown", - "id": "110", + "id": "111", "metadata": {}, "source": [ "### 🧩 Exercise 3: Define a Structure – LBCO\n", @@ -1526,7 +1541,7 @@ }, { "cell_type": "markdown", - "id": "111", + "id": "112", "metadata": {}, "source": [ "```\n", @@ -1561,7 +1576,7 @@ }, { "cell_type": "markdown", - "id": "112", + "id": "113", "metadata": {}, "source": [ "Note that the `occupancy` of the La and Ba atoms is 0.5\n", @@ -1591,7 +1606,7 @@ }, { "cell_type": "markdown", - "id": "113", + "id": "114", "metadata": {}, "source": [ "#### Exercise 3.1: Create Structure\n", @@ -1602,7 +1617,7 @@ }, { "cell_type": "markdown", - "id": "114", + "id": "115", "metadata": {}, "source": [ "**Hint:**" @@ -1610,7 +1625,7 @@ }, { "cell_type": "markdown", - "id": "115", + "id": "116", "metadata": {}, "source": [ "You can use the same approach as in the previous part of the notebook,\n", @@ -1620,7 +1635,7 @@ }, { "cell_type": "markdown", - "id": "116", + "id": "117", "metadata": {}, "source": [ "**Solution:**" @@ -1629,7 +1644,7 @@ { "cell_type": "code", "execution_count": null, - "id": "117", + "id": "118", "metadata": {}, "outputs": [], "source": [ @@ -1638,7 +1653,7 @@ }, { "cell_type": "markdown", - "id": "118", + "id": "119", "metadata": {}, "source": [ "#### Exercise 3.2: Set Space Group\n", @@ -1648,7 +1663,7 @@ }, { "cell_type": "markdown", - "id": "119", + "id": "120", "metadata": {}, "source": [ "**Hint:**" @@ -1656,7 +1671,7 @@ }, { "cell_type": "markdown", - "id": "120", + "id": "121", "metadata": {}, "source": [ "Use the space group name and IT coordinate system code from the CIF\n", @@ -1665,7 +1680,7 @@ }, { "cell_type": "markdown", - "id": "121", + "id": "122", "metadata": {}, "source": [ "**Solution:**" @@ -1674,7 +1689,7 @@ { "cell_type": "code", "execution_count": null, - "id": "122", + "id": "123", "metadata": {}, "outputs": [], "source": [ @@ -1684,7 +1699,7 @@ }, { "cell_type": "markdown", - "id": "123", + "id": "124", "metadata": {}, "source": [ "#### Exercise 3.3: Set Lattice Parameters\n", @@ -1694,7 +1709,7 @@ }, { "cell_type": "markdown", - "id": "124", + "id": "125", "metadata": {}, "source": [ "**Hint:**" @@ -1702,7 +1717,7 @@ }, { "cell_type": "markdown", - "id": "125", + "id": "126", "metadata": {}, "source": [ "Use the lattice parameters from the CIF data." @@ -1710,7 +1725,7 @@ }, { "cell_type": "markdown", - "id": "126", + "id": "127", "metadata": {}, "source": [ "**Solution:**" @@ -1719,7 +1734,7 @@ { "cell_type": "code", "execution_count": null, - "id": "127", + "id": "128", "metadata": {}, "outputs": [], "source": [ @@ -1728,7 +1743,7 @@ }, { "cell_type": "markdown", - "id": "128", + "id": "129", "metadata": {}, "source": [ "#### Exercise 3.4: Set Atom Sites\n", @@ -1738,7 +1753,7 @@ }, { "cell_type": "markdown", - "id": "129", + "id": "130", "metadata": {}, "source": [ "**Hint:**" @@ -1746,7 +1761,7 @@ }, { "cell_type": "markdown", - "id": "130", + "id": "131", "metadata": {}, "source": [ "Use the atom sites from the CIF data. You can use the `add` method of\n", @@ -1755,7 +1770,7 @@ }, { "cell_type": "markdown", - "id": "131", + "id": "132", "metadata": {}, "source": [ "**Solution:**" @@ -1764,7 +1779,7 @@ { "cell_type": "code", "execution_count": null, - "id": "132", + "id": "133", "metadata": {}, "outputs": [], "source": [ @@ -1810,7 +1825,7 @@ }, { "cell_type": "markdown", - "id": "133", + "id": "134", "metadata": {}, "source": [ "### 🔗 Exercise 4: Assign Structure to Experiment\n", @@ -1820,7 +1835,7 @@ }, { "cell_type": "markdown", - "id": "134", + "id": "135", "metadata": {}, "source": [ "**Hint:**" @@ -1828,7 +1843,7 @@ }, { "cell_type": "markdown", - "id": "135", + "id": "136", "metadata": {}, "source": [ "Use the `linked_phases` attribute of the experiment to link the\n", @@ -1837,7 +1852,7 @@ }, { "cell_type": "markdown", - "id": "136", + "id": "137", "metadata": {}, "source": [ "**Solution:**" @@ -1846,7 +1861,7 @@ { "cell_type": "code", "execution_count": null, - "id": "137", + "id": "138", "metadata": {}, "outputs": [], "source": [ @@ -1855,7 +1870,7 @@ }, { "cell_type": "markdown", - "id": "138", + "id": "139", "metadata": {}, "source": [ "### 🚀 Exercise 5: Analyze and Fit the Data\n", @@ -1868,7 +1883,7 @@ }, { "cell_type": "markdown", - "id": "139", + "id": "140", "metadata": {}, "source": [ "**Hint:**" @@ -1876,7 +1891,7 @@ }, { "cell_type": "markdown", - "id": "140", + "id": "141", "metadata": {}, "source": [ "You can start with the scale factor and the background points, as in\n", @@ -1885,7 +1900,7 @@ }, { "cell_type": "markdown", - "id": "141", + "id": "142", "metadata": {}, "source": [ "**Solution:**" @@ -1894,7 +1909,7 @@ { "cell_type": "code", "execution_count": null, - "id": "142", + "id": "143", "metadata": {}, "outputs": [], "source": [ @@ -1906,7 +1921,7 @@ }, { "cell_type": "markdown", - "id": "143", + "id": "144", "metadata": {}, "source": [ "#### Exercise 5.2: Run Fitting\n", @@ -1917,7 +1932,7 @@ }, { "cell_type": "markdown", - "id": "144", + "id": "145", "metadata": {}, "source": [ "**Hint:**" @@ -1925,7 +1940,7 @@ }, { "cell_type": "markdown", - "id": "145", + "id": "146", "metadata": {}, "source": [ "Use the `plot_meas_vs_calc` method of the project to visualize the\n", @@ -1936,7 +1951,7 @@ }, { "cell_type": "markdown", - "id": "146", + "id": "147", "metadata": {}, "source": [ "**Solution:**" @@ -1945,7 +1960,7 @@ { "cell_type": "code", "execution_count": null, - "id": "147", + "id": "148", "metadata": {}, "outputs": [], "source": [ @@ -1957,7 +1972,7 @@ }, { "cell_type": "markdown", - "id": "148", + "id": "149", "metadata": {}, "source": [ "#### Exercise 5.3: Find the Misfit in the Fit\n", @@ -1972,7 +1987,7 @@ }, { "cell_type": "markdown", - "id": "149", + "id": "150", "metadata": {}, "source": [ "**Hint:**" @@ -1980,7 +1995,7 @@ }, { "cell_type": "markdown", - "id": "150", + "id": "151", "metadata": {}, "source": [ "Consider the following options:\n", @@ -1992,7 +2007,7 @@ }, { "cell_type": "markdown", - "id": "151", + "id": "152", "metadata": {}, "source": [ "**Solution:**" @@ -2000,7 +2015,7 @@ }, { "cell_type": "markdown", - "id": "152", + "id": "153", "metadata": {}, "source": [ "\n", @@ -2022,7 +2037,7 @@ { "cell_type": "code", "execution_count": null, - "id": "153", + "id": "154", "metadata": {}, "outputs": [], "source": [ @@ -2031,7 +2046,7 @@ }, { "cell_type": "markdown", - "id": "154", + "id": "155", "metadata": {}, "source": [ "#### Exercise 5.4: Refine the LBCO Lattice Parameter\n", @@ -2041,7 +2056,7 @@ }, { "cell_type": "markdown", - "id": "155", + "id": "156", "metadata": {}, "source": [ "**Hint:**" @@ -2049,7 +2064,7 @@ }, { "cell_type": "markdown", - "id": "156", + "id": "157", "metadata": {}, "source": [ "To achieve this, we will set the `free` attribute of the `length_a`\n", @@ -2064,7 +2079,7 @@ }, { "cell_type": "markdown", - "id": "157", + "id": "158", "metadata": {}, "source": [ "**Solution:**" @@ -2073,7 +2088,7 @@ { "cell_type": "code", "execution_count": null, - "id": "158", + "id": "159", "metadata": {}, "outputs": [], "source": [ @@ -2087,7 +2102,7 @@ }, { "cell_type": "markdown", - "id": "159", + "id": "160", "metadata": {}, "source": [ "One of the main goals of this study was to refine the lattice\n", @@ -2100,7 +2115,7 @@ }, { "cell_type": "markdown", - "id": "160", + "id": "161", "metadata": {}, "source": [ "#### Exercise 5.5: Visualize the Fit Results in d-spacing\n", @@ -2111,7 +2126,7 @@ }, { "cell_type": "markdown", - "id": "161", + "id": "162", "metadata": {}, "source": [ "**Hint:**" @@ -2119,7 +2134,7 @@ }, { "cell_type": "markdown", - "id": "162", + "id": "163", "metadata": {}, "source": [ "Use the `plot_meas_vs_calc` method of the project and set the\n", @@ -2128,7 +2143,7 @@ }, { "cell_type": "markdown", - "id": "163", + "id": "164", "metadata": {}, "source": [ "**Solution:**" @@ -2137,7 +2152,7 @@ { "cell_type": "code", "execution_count": null, - "id": "164", + "id": "165", "metadata": {}, "outputs": [], "source": [ @@ -2146,7 +2161,7 @@ }, { "cell_type": "markdown", - "id": "165", + "id": "166", "metadata": {}, "source": [ "#### Exercise 5.6: Refine the Peak Profile Parameters\n", @@ -2166,7 +2181,7 @@ { "cell_type": "code", "execution_count": null, - "id": "166", + "id": "167", "metadata": {}, "outputs": [], "source": [ @@ -2175,7 +2190,7 @@ }, { "cell_type": "markdown", - "id": "167", + "id": "168", "metadata": {}, "source": [ "The peak profile parameters are determined based on both the\n", @@ -2189,7 +2204,7 @@ }, { "cell_type": "markdown", - "id": "168", + "id": "169", "metadata": {}, "source": [ "**Hint:**" @@ -2197,7 +2212,7 @@ }, { "cell_type": "markdown", - "id": "169", + "id": "170", "metadata": {}, "source": [ "You can set the `free` attribute of the peak profile parameters to\n", @@ -2208,7 +2223,7 @@ }, { "cell_type": "markdown", - "id": "170", + "id": "171", "metadata": {}, "source": [ "**Solution:**" @@ -2217,7 +2232,7 @@ { "cell_type": "code", "execution_count": null, - "id": "171", + "id": "172", "metadata": {}, "outputs": [], "source": [ @@ -2237,7 +2252,7 @@ }, { "cell_type": "markdown", - "id": "172", + "id": "173", "metadata": {}, "source": [ "#### Exercise 5.7: Find Undefined Features\n", @@ -2249,7 +2264,7 @@ }, { "cell_type": "markdown", - "id": "173", + "id": "174", "metadata": {}, "source": [ "**Hint:**" @@ -2257,7 +2272,7 @@ }, { "cell_type": "markdown", - "id": "174", + "id": "175", "metadata": {}, "source": [ "While the fit is now significantly better, there are still some\n", @@ -2269,7 +2284,7 @@ }, { "cell_type": "markdown", - "id": "175", + "id": "176", "metadata": {}, "source": [ "**Solution:**" @@ -2278,7 +2293,7 @@ { "cell_type": "code", "execution_count": null, - "id": "176", + "id": "177", "metadata": {}, "outputs": [], "source": [ @@ -2287,7 +2302,7 @@ }, { "cell_type": "markdown", - "id": "177", + "id": "178", "metadata": {}, "source": [ "#### Exercise 5.8: Identify the Cause of the Unexplained Peaks\n", @@ -2300,7 +2315,7 @@ }, { "cell_type": "markdown", - "id": "178", + "id": "179", "metadata": {}, "source": [ "**Hint:**" @@ -2308,7 +2323,7 @@ }, { "cell_type": "markdown", - "id": "179", + "id": "180", "metadata": {}, "source": [ "Consider the following options:\n", @@ -2320,7 +2335,7 @@ }, { "cell_type": "markdown", - "id": "180", + "id": "181", "metadata": {}, "source": [ "**Solution:**" @@ -2328,7 +2343,7 @@ }, { "cell_type": "markdown", - "id": "181", + "id": "182", "metadata": {}, "source": [ "1. ❌ In principle, this could be the case, as sometimes the presence\n", @@ -2348,7 +2363,7 @@ }, { "cell_type": "markdown", - "id": "182", + "id": "183", "metadata": {}, "source": [ "#### Exercise 5.9: Identify the impurity phase\n", @@ -2359,7 +2374,7 @@ }, { "cell_type": "markdown", - "id": "183", + "id": "184", "metadata": {}, "source": [ "**Hint:**" @@ -2367,7 +2382,7 @@ }, { "cell_type": "markdown", - "id": "184", + "id": "185", "metadata": {}, "source": [ "Check the positions of the unexplained peaks in the diffraction\n", @@ -2377,7 +2392,7 @@ }, { "cell_type": "markdown", - "id": "185", + "id": "186", "metadata": {}, "source": [ "**Solution:**" @@ -2385,7 +2400,7 @@ }, { "cell_type": "markdown", - "id": "186", + "id": "187", "metadata": {}, "source": [ "The unexplained peaks are likely due to the presence of a small amount\n", @@ -2400,7 +2415,7 @@ { "cell_type": "code", "execution_count": null, - "id": "187", + "id": "188", "metadata": {}, "outputs": [], "source": [ @@ -2410,7 +2425,7 @@ }, { "cell_type": "markdown", - "id": "188", + "id": "189", "metadata": {}, "source": [ "#### Exercise 5.10: Create a Second Structure – Si as Impurity\n", @@ -2422,7 +2437,7 @@ }, { "cell_type": "markdown", - "id": "189", + "id": "190", "metadata": {}, "source": [ "**Hint:**" @@ -2430,7 +2445,7 @@ }, { "cell_type": "markdown", - "id": "190", + "id": "191", "metadata": {}, "source": [ "You can use the same approach as in the previous part of the notebook,\n", @@ -2440,7 +2455,7 @@ }, { "cell_type": "markdown", - "id": "191", + "id": "192", "metadata": {}, "source": [ "**Solution:**" @@ -2449,7 +2464,7 @@ { "cell_type": "code", "execution_count": null, - "id": "192", + "id": "193", "metadata": {}, "outputs": [], "source": [ @@ -2478,7 +2493,7 @@ }, { "cell_type": "markdown", - "id": "193", + "id": "194", "metadata": {}, "source": [ "#### Exercise 5.11: Refine the Scale of the Si Phase\n", @@ -2491,7 +2506,7 @@ }, { "cell_type": "markdown", - "id": "194", + "id": "195", "metadata": {}, "source": [ "**Hint:**" @@ -2499,7 +2514,7 @@ }, { "cell_type": "markdown", - "id": "195", + "id": "196", "metadata": {}, "source": [ "You can use the `plot_meas_vs_calc` method of the project to visualize\n", @@ -2510,7 +2525,7 @@ }, { "cell_type": "markdown", - "id": "196", + "id": "197", "metadata": {}, "source": [ "**Solution:**" @@ -2519,7 +2534,7 @@ { "cell_type": "code", "execution_count": null, - "id": "197", + "id": "198", "metadata": {}, "outputs": [], "source": [ @@ -2548,7 +2563,7 @@ }, { "cell_type": "markdown", - "id": "198", + "id": "199", "metadata": {}, "source": [ "All previously unexplained peaks are now accounted for in the pattern,\n", @@ -2571,7 +2586,7 @@ { "cell_type": "code", "execution_count": null, - "id": "199", + "id": "200", "metadata": {}, "outputs": [], "source": [ @@ -2580,7 +2595,7 @@ }, { "cell_type": "markdown", - "id": "200", + "id": "201", "metadata": {}, "source": [ "Finally, we save the project to disk to preserve the current state of\n", @@ -2590,7 +2605,7 @@ { "cell_type": "code", "execution_count": null, - "id": "201", + "id": "202", "metadata": {}, "outputs": [], "source": [ @@ -2599,7 +2614,7 @@ }, { "cell_type": "markdown", - "id": "202", + "id": "203", "metadata": {}, "source": [ "#### Final Remarks\n", @@ -2618,7 +2633,7 @@ }, { "cell_type": "markdown", - "id": "203", + "id": "204", "metadata": {}, "source": [ "## 🎁 Bonus\n", @@ -2647,7 +2662,7 @@ ], "metadata": { "jupytext": { - "cell_metadata_filter": "title,tags,-all", + "cell_metadata_filter": "tags,title,-all", "main_language": "python", "notebook_metadata_filter": "-all" } diff --git a/docs/docs/tutorials/ed-14.ipynb b/docs/docs/tutorials/ed-14.ipynb index 5c1f6717..a5db5807 100644 --- a/docs/docs/tutorials/ed-14.ipynb +++ b/docs/docs/tutorials/ed-14.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "502ed03e", + "id": "5bd8b55f", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: Tb2TiO7, HEiDi\n", "\n", @@ -32,7 +47,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -41,7 +56,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +65,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Step 1: Define Project" @@ -59,7 +74,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +84,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "## Step 2: Define Structure" @@ -78,7 +93,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -89,7 +104,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +114,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -109,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -119,7 +134,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -132,7 +147,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -141,7 +156,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "13", "metadata": {}, "source": [ "## Step 3: Define Experiment" @@ -150,7 +165,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -160,7 +175,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -176,7 +191,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -186,7 +201,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -197,7 +212,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -207,7 +222,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -217,7 +232,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "## Step 4: Perform Analysis" @@ -226,7 +241,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -236,7 +251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -247,7 +262,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -257,7 +272,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -269,7 +284,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -280,7 +295,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -290,7 +305,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -300,7 +315,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -309,7 +324,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "29", "metadata": {}, "source": [ "## Step 5: Show Project Summary" @@ -318,7 +333,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "30", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-15.ipynb b/docs/docs/tutorials/ed-15.ipynb index 6e2b2547..cdd646ac 100644 --- a/docs/docs/tutorials/ed-15.ipynb +++ b/docs/docs/tutorials/ed-15.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65ccac80", + "id": "4e0ef3ea", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: Taurine, SENJU\n", "\n", @@ -32,7 +47,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -41,7 +56,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +65,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Step 1: Define Project" @@ -59,7 +74,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +84,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "## Step 2: Define Structure" @@ -78,7 +93,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -89,7 +104,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +114,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -109,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -119,7 +134,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -128,7 +143,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Step 3: Define Experiment" @@ -137,7 +152,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +162,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -163,7 +178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -173,7 +188,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -184,7 +199,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -194,7 +209,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "## Step 4: Perform Analysis" @@ -203,7 +218,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -213,7 +228,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -224,7 +239,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -234,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -246,7 +261,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -257,7 +272,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -267,7 +282,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -277,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -286,7 +301,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "## Step 5: Show Project Summary" @@ -295,7 +310,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "28", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-16.ipynb b/docs/docs/tutorials/ed-16.ipynb index 4cb7d1be..458ded2c 100644 --- a/docs/docs/tutorials/ed-16.ipynb +++ b/docs/docs/tutorials/ed-16.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d57a9295", + "id": "7311bb93", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Joint Refinement: Si, Bragg + PDF\n", "\n", @@ -36,7 +51,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -45,7 +60,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -57,7 +72,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Define Structure\n", @@ -72,7 +87,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -81,7 +96,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "#### Set Space Group" @@ -90,7 +105,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +115,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -109,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -118,7 +133,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -127,7 +142,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -144,7 +159,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Define Experiments\n", @@ -160,7 +175,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -169,7 +184,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "#### Create Experiment" @@ -178,7 +193,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -189,7 +204,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "#### Set Instrument" @@ -198,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -210,7 +225,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -219,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -235,7 +250,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "#### Set Background" @@ -244,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -255,7 +270,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "22", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -264,7 +279,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -273,7 +288,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "### Experiment 2: PDF (NOMAD, TOF)\n", @@ -284,7 +299,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -293,7 +308,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "26", "metadata": {}, "source": [ "#### Create Experiment" @@ -302,7 +317,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -316,7 +331,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "28", "metadata": {}, "source": [ "#### Set Peak Profile (PDF Parameters)" @@ -325,7 +340,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -339,7 +354,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "30", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -348,7 +363,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -357,7 +372,7 @@ }, { "cell_type": "markdown", - "id": "31", + "id": "32", "metadata": {}, "source": [ "## Define Project\n", @@ -371,7 +386,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -380,7 +395,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "34", "metadata": {}, "source": [ "#### Add Structure" @@ -389,7 +404,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -398,7 +413,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "36", "metadata": {}, "source": [ "#### Add Experiments" @@ -407,7 +422,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -417,7 +432,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "38", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -431,7 +446,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -442,7 +457,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "40", "metadata": {}, "source": [ "#### Set Minimizer" @@ -451,7 +466,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -460,7 +475,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "42", "metadata": {}, "source": [ "#### Plot Measured vs Calculated (Before Fit)" @@ -469,7 +484,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -479,7 +494,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -488,7 +503,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "45", "metadata": {}, "source": [ "#### Set Fitting Parameters\n", @@ -500,7 +515,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -510,7 +525,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "47", "metadata": {}, "source": [ "Bragg experiment parameters." @@ -519,7 +534,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -534,7 +549,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "49", "metadata": {}, "source": [ "PDF experiment parameters." @@ -543,7 +558,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -556,7 +571,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "51", "metadata": {}, "source": [ "#### Show Free Parameters" @@ -565,7 +580,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -574,7 +589,7 @@ }, { "cell_type": "markdown", - "id": "52", + "id": "53", "metadata": {}, "source": [ "#### Run Fitting" @@ -583,7 +598,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -593,7 +608,7 @@ }, { "cell_type": "markdown", - "id": "54", + "id": "55", "metadata": {}, "source": [ "#### Plot Measured vs Calculated (After Fit)" @@ -602,7 +617,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "56", "metadata": {}, "outputs": [], "source": [ @@ -612,7 +627,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "57", "metadata": { "lines_to_next_cell": 2 }, @@ -624,7 +639,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "58", "metadata": {}, "outputs": [], "source": [] diff --git a/docs/docs/tutorials/ed-17.ipynb b/docs/docs/tutorials/ed-17.ipynb index bbb422ca..08eb5a20 100644 --- a/docs/docs/tutorials/ed-17.ipynb +++ b/docs/docs/tutorials/ed-17.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9fd2be7c", + "id": "1c3cb779", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: Co2SiO4, D20 (T-scan)\n", "\n", @@ -35,7 +50,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -44,7 +59,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Step 1: Define Project\n", @@ -64,7 +79,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -73,7 +88,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "Set output verbosity level to \"short\" to show only one-line status\n", @@ -83,7 +98,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -92,7 +107,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "## Step 2: Define Crystal Structure\n", @@ -106,7 +121,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -116,7 +131,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "#### Set Space Group" @@ -125,7 +140,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -135,7 +150,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -144,7 +159,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -155,7 +170,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -164,7 +179,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -226,7 +241,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "## Step 3: Define Experiments\n", @@ -240,7 +255,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -249,7 +264,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "#### Create Experiments and Set Temperature" @@ -258,7 +273,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -278,7 +293,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "#### Set Instrument" @@ -287,7 +302,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -298,7 +313,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "22", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -307,7 +322,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -320,7 +335,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "#### Set Excluded Regions" @@ -329,7 +344,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -340,7 +355,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "26", "metadata": {}, "source": [ "#### Set Background" @@ -349,7 +364,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -372,7 +387,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "28", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -381,7 +396,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -391,7 +406,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "30", "metadata": {}, "source": [ "## Step 4: Perform Analysis\n", @@ -402,7 +417,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "31", "metadata": {}, "source": [ "#### Set Free Parameters" @@ -411,7 +426,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -442,7 +457,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -462,7 +477,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "34", "metadata": {}, "source": [ "#### Set Constraints\n", @@ -473,23 +488,23 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "35", "metadata": {}, "outputs": [], "source": [ "project.analysis.aliases.create(\n", " label='biso_Co1',\n", - " param_uid=structure.atom_sites['Co1'].b_iso.uid,\n", + " param=structure.atom_sites['Co1'].b_iso,\n", ")\n", "project.analysis.aliases.create(\n", " label='biso_Co2',\n", - " param_uid=structure.atom_sites['Co2'].b_iso.uid,\n", + " param=structure.atom_sites['Co2'].b_iso,\n", ")" ] }, { "cell_type": "markdown", - "id": "35", + "id": "36", "metadata": {}, "source": [ "Set constraints." @@ -498,8 +513,10 @@ { "cell_type": "code", "execution_count": null, - "id": "36", - "metadata": {}, + "id": "37", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "project.analysis.constraints.create(\n", @@ -509,26 +526,8 @@ }, { "cell_type": "markdown", - "id": "37", - "metadata": {}, - "source": [ - "Apply constraints." - ] - }, - { - "cell_type": "code", - "execution_count": null, "id": "38", "metadata": {}, - "outputs": [], - "source": [ - "project.analysis.apply_constraints()" - ] - }, - { - "cell_type": "markdown", - "id": "39", - "metadata": {}, "source": [ "#### Set Fit Mode" ] @@ -536,7 +535,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -545,7 +544,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "40", "metadata": {}, "source": [ "#### Run Fitting" @@ -554,7 +553,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -563,7 +562,7 @@ }, { "cell_type": "markdown", - "id": "43", + "id": "42", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -572,7 +571,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -582,7 +581,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "44", "metadata": {}, "source": [ "#### Plot Parameter Evolution\n", @@ -593,7 +592,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -602,7 +601,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "46", "metadata": {}, "source": [ "Plot unit cell parameters vs. temperature." @@ -611,7 +610,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -622,7 +621,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "48", "metadata": {}, "source": [ "Plot isotropic displacement parameters vs. temperature." @@ -631,7 +630,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -644,7 +643,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "50", "metadata": {}, "source": [ "Plot selected fractional coordinates vs. temperature." @@ -653,7 +652,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "51", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-18.ipynb b/docs/docs/tutorials/ed-18.ipynb new file mode 100644 index 00000000..9f51a133 --- /dev/null +++ b/docs/docs/tutorials/ed-18.ipynb @@ -0,0 +1,225 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "a3ec0a8c", + "metadata": { + "tags": [ + "hide-in-docs" + ] + }, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0", + "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "# Load Project and Fit: LBCO, HRPT\n", + "\n", + "This is the most minimal example of using EasyDiffraction. It shows\n", + "how to load a previously saved project from a directory and run\n", + "refinement — all in just a few lines of code.\n", + "\n", + "The project is first created and saved as a setup step (this would\n", + "normally be done once and the directory would already exist on disk).\n", + "Then the saved project is loaded back and fitted.\n", + "\n", + "For details on how to define structures and experiments, see the other\n", + "tutorials." + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "## Import Library" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "import easydiffraction as ed" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "## Setup: Create and Save a Project\n", + "\n", + "This step creates a project from CIF files and saves it to a\n", + "directory. In practice, the project directory would already exist\n", + "on disk from a previous session." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a project from CIF files\n", + "project = ed.Project()\n", + "project.structures.add_from_cif_path(ed.download_data(id=1, destination='data'))\n", + "project.experiments.add_from_cif_path(ed.download_data(id=2, destination='data'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "project.analysis.aliases.create(\n", + " label='biso_La',\n", + " param=project.structures['lbco'].atom_sites['La'].b_iso,\n", + ")\n", + "project.analysis.aliases.create(\n", + " label='biso_Ba',\n", + " param=project.structures['lbco'].atom_sites['Ba'].b_iso,\n", + ")\n", + "\n", + "project.analysis.aliases.create(\n", + " label='occ_La',\n", + " param=project.structures['lbco'].atom_sites['La'].occupancy,\n", + ")\n", + "project.analysis.aliases.create(\n", + " label='occ_Ba',\n", + " param=project.structures['lbco'].atom_sites['Ba'].occupancy,\n", + ")\n", + "\n", + "project.analysis.constraints.create(expression='biso_Ba = biso_La')\n", + "project.analysis.constraints.create(expression='occ_Ba = 1 - occ_La')\n", + "\n", + "project.structures['lbco'].atom_sites['La'].occupancy.free = True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# Save to a directory\n", + "project.save_as('lbco_project')" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "## Step 1: Load Project from Directory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "project = ed.Project.load('lbco_project')" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "## Step 2: Perform Analysis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "project.analysis.fit()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "project.analysis.show_fit_results()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True)" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "## Step 3: Show Project Summary" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "project.summary.show_report()" + ] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/tutorials/ed-2.ipynb b/docs/docs/tutorials/ed-2.ipynb index 06f40dc9..cb95e408 100644 --- a/docs/docs/tutorials/ed-2.ipynb +++ b/docs/docs/tutorials/ed-2.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7dda0a11", + "id": "d9a613b4", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: LBCO, HRPT\n", "\n", @@ -48,7 +63,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -57,7 +72,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -66,7 +81,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Step 1: Define Project" @@ -75,7 +90,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -84,7 +99,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "## Step 2: Define Structure" @@ -93,7 +108,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -103,7 +118,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -113,7 +128,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -124,7 +139,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -134,7 +149,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -180,7 +195,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Step 3: Define Experiment" @@ -189,7 +204,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -199,7 +214,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -215,7 +230,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -225,7 +240,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -236,7 +251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -249,7 +264,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -263,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -274,7 +289,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -283,7 +298,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "21", "metadata": {}, "source": [ "## Step 4: Perform Analysis" @@ -292,7 +307,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -307,7 +322,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": { "lines_to_next_cell": 2 }, @@ -332,7 +347,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -343,7 +358,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-3.ipynb b/docs/docs/tutorials/ed-3.ipynb index f64a041a..3dd711e2 100644 --- a/docs/docs/tutorials/ed-3.ipynb +++ b/docs/docs/tutorials/ed-3.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "be19f628", + "id": "d59d709c", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: LBCO, HRPT\n", "\n", @@ -46,7 +61,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -55,7 +70,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -64,7 +79,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Step 1: Create a Project\n", @@ -74,7 +89,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "5", "metadata": {}, "source": [ "#### Create Project" @@ -83,7 +98,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -92,7 +107,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "7", "metadata": {}, "source": [ "#### Set Project Metadata" @@ -101,7 +116,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -114,7 +129,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "9", "metadata": {}, "source": [ "#### Show Project Metadata as CIF" @@ -123,7 +138,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -132,7 +147,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "11", "metadata": {}, "source": [ "#### Save Project\n", @@ -145,7 +160,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -154,7 +169,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "13", "metadata": {}, "source": [ "#### Set Up Data Plotter" @@ -162,7 +177,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "Show supported plotting engines." @@ -171,7 +186,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -180,7 +195,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "Show current plotting configuration." @@ -189,7 +204,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -198,7 +213,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "Set plotting engine." @@ -207,7 +222,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -218,7 +233,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "## Step 2: Define Structure\n", @@ -229,7 +244,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "21", "metadata": {}, "source": [ "#### Add Structure" @@ -238,7 +253,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -247,7 +262,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "23", "metadata": {}, "source": [ "#### Show Defined Structures\n", @@ -261,7 +276,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -270,7 +285,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "25", "metadata": {}, "source": [ "#### Set Space Group\n", @@ -281,7 +296,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -291,7 +306,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "#### Set Unit Cell\n", @@ -302,7 +317,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -311,7 +326,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "29", "metadata": {}, "source": [ "#### Set Atom Sites\n", @@ -322,7 +337,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -368,7 +383,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "31", "metadata": {}, "source": [ "#### Show Structure as CIF" @@ -377,7 +392,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -386,7 +401,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "33", "metadata": {}, "source": [ "#### Show Structure Structure" @@ -395,7 +410,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -404,7 +419,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "35", "metadata": {}, "source": [ "#### Save Project State\n", @@ -417,7 +432,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -426,7 +441,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "37", "metadata": {}, "source": [ "## Step 3: Define Experiment\n", @@ -437,7 +452,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "38", "metadata": {}, "source": [ "#### Download Measured Data\n", @@ -448,7 +463,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -457,7 +472,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "40", "metadata": {}, "source": [ "#### Add Diffraction Experiment" @@ -466,7 +481,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -481,7 +496,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "42", "metadata": {}, "source": [ "#### Show Defined Experiments" @@ -490,7 +505,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -499,7 +514,7 @@ }, { "cell_type": "markdown", - "id": "43", + "id": "44", "metadata": {}, "source": [ "#### Show Measured Data" @@ -508,7 +523,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -517,7 +532,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "46", "metadata": {}, "source": [ "#### Set Instrument\n", @@ -528,7 +543,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -538,7 +553,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "48", "metadata": {}, "source": [ "#### Set Peak Profile\n", @@ -549,7 +564,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -558,7 +573,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "50", "metadata": {}, "source": [ "Show the current peak profile type." @@ -567,7 +582,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "51", "metadata": {}, "outputs": [], "source": [ @@ -576,7 +591,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "52", "metadata": {}, "source": [ "Select the desired peak profile type." @@ -585,7 +600,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -594,7 +609,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "54", "metadata": {}, "source": [ "Modify default peak profile parameters." @@ -603,7 +618,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -616,7 +631,7 @@ }, { "cell_type": "markdown", - "id": "55", + "id": "56", "metadata": {}, "source": [ "#### Set Background" @@ -624,7 +639,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "57", "metadata": {}, "source": [ "Show supported background types." @@ -633,7 +648,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "58", "metadata": {}, "outputs": [], "source": [ @@ -642,7 +657,7 @@ }, { "cell_type": "markdown", - "id": "58", + "id": "59", "metadata": {}, "source": [ "Show current background type." @@ -651,7 +666,7 @@ { "cell_type": "code", "execution_count": null, - "id": "59", + "id": "60", "metadata": {}, "outputs": [], "source": [ @@ -660,7 +675,7 @@ }, { "cell_type": "markdown", - "id": "60", + "id": "61", "metadata": {}, "source": [ "Select the desired background type." @@ -669,7 +684,7 @@ { "cell_type": "code", "execution_count": null, - "id": "61", + "id": "62", "metadata": {}, "outputs": [], "source": [ @@ -678,7 +693,7 @@ }, { "cell_type": "markdown", - "id": "62", + "id": "63", "metadata": {}, "source": [ "Add background points." @@ -687,7 +702,7 @@ { "cell_type": "code", "execution_count": null, - "id": "63", + "id": "64", "metadata": {}, "outputs": [], "source": [ @@ -700,7 +715,7 @@ }, { "cell_type": "markdown", - "id": "64", + "id": "65", "metadata": {}, "source": [ "Show current background points." @@ -709,7 +724,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65", + "id": "66", "metadata": {}, "outputs": [], "source": [ @@ -718,7 +733,7 @@ }, { "cell_type": "markdown", - "id": "66", + "id": "67", "metadata": {}, "source": [ "#### Set Linked Phases\n", @@ -729,7 +744,7 @@ { "cell_type": "code", "execution_count": null, - "id": "67", + "id": "68", "metadata": {}, "outputs": [], "source": [ @@ -738,7 +753,7 @@ }, { "cell_type": "markdown", - "id": "68", + "id": "69", "metadata": {}, "source": [ "#### Show Experiment as CIF" @@ -747,7 +762,7 @@ { "cell_type": "code", "execution_count": null, - "id": "69", + "id": "70", "metadata": {}, "outputs": [], "source": [ @@ -756,7 +771,7 @@ }, { "cell_type": "markdown", - "id": "70", + "id": "71", "metadata": {}, "source": [ "#### Save Project State" @@ -765,7 +780,7 @@ { "cell_type": "code", "execution_count": null, - "id": "71", + "id": "72", "metadata": {}, "outputs": [], "source": [ @@ -774,7 +789,7 @@ }, { "cell_type": "markdown", - "id": "72", + "id": "73", "metadata": {}, "source": [ "## Step 4: Perform Analysis\n", @@ -790,7 +805,7 @@ { "cell_type": "code", "execution_count": null, - "id": "73", + "id": "74", "metadata": {}, "outputs": [], "source": [ @@ -799,7 +814,7 @@ }, { "cell_type": "markdown", - "id": "74", + "id": "75", "metadata": {}, "source": [ "Show current calculation engine for this experiment." @@ -808,7 +823,7 @@ { "cell_type": "code", "execution_count": null, - "id": "75", + "id": "76", "metadata": {}, "outputs": [], "source": [ @@ -817,7 +832,7 @@ }, { "cell_type": "markdown", - "id": "76", + "id": "77", "metadata": {}, "source": [ "Select the desired calculation engine." @@ -826,7 +841,7 @@ { "cell_type": "code", "execution_count": null, - "id": "77", + "id": "78", "metadata": {}, "outputs": [], "source": [ @@ -835,7 +850,7 @@ }, { "cell_type": "markdown", - "id": "78", + "id": "79", "metadata": {}, "source": [ "#### Show Calculated Data" @@ -844,7 +859,7 @@ { "cell_type": "code", "execution_count": null, - "id": "79", + "id": "80", "metadata": {}, "outputs": [], "source": [ @@ -853,7 +868,7 @@ }, { "cell_type": "markdown", - "id": "80", + "id": "81", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -862,7 +877,7 @@ { "cell_type": "code", "execution_count": null, - "id": "81", + "id": "82", "metadata": {}, "outputs": [], "source": [ @@ -872,7 +887,7 @@ { "cell_type": "code", "execution_count": null, - "id": "82", + "id": "83", "metadata": {}, "outputs": [], "source": [ @@ -881,7 +896,7 @@ }, { "cell_type": "markdown", - "id": "83", + "id": "84", "metadata": {}, "source": [ "#### Show Parameters\n", @@ -892,7 +907,7 @@ { "cell_type": "code", "execution_count": null, - "id": "84", + "id": "85", "metadata": {}, "outputs": [], "source": [ @@ -901,7 +916,7 @@ }, { "cell_type": "markdown", - "id": "85", + "id": "86", "metadata": {}, "source": [ "Show all fittable parameters." @@ -910,7 +925,7 @@ { "cell_type": "code", "execution_count": null, - "id": "86", + "id": "87", "metadata": {}, "outputs": [], "source": [ @@ -919,7 +934,7 @@ }, { "cell_type": "markdown", - "id": "87", + "id": "88", "metadata": {}, "source": [ "Show only free parameters." @@ -928,7 +943,7 @@ { "cell_type": "code", "execution_count": null, - "id": "88", + "id": "89", "metadata": {}, "outputs": [], "source": [ @@ -937,7 +952,7 @@ }, { "cell_type": "markdown", - "id": "89", + "id": "90", "metadata": {}, "source": [ "Show how to access parameters in the code." @@ -946,7 +961,7 @@ { "cell_type": "code", "execution_count": null, - "id": "90", + "id": "91", "metadata": {}, "outputs": [], "source": [ @@ -955,7 +970,7 @@ }, { "cell_type": "markdown", - "id": "91", + "id": "92", "metadata": {}, "source": [ "#### Set Fit Mode\n", @@ -966,7 +981,7 @@ { "cell_type": "code", "execution_count": null, - "id": "92", + "id": "93", "metadata": {}, "outputs": [], "source": [ @@ -975,7 +990,7 @@ }, { "cell_type": "markdown", - "id": "93", + "id": "94", "metadata": {}, "source": [ "Show current fit mode." @@ -984,7 +999,7 @@ { "cell_type": "code", "execution_count": null, - "id": "94", + "id": "95", "metadata": {}, "outputs": [], "source": [ @@ -993,7 +1008,7 @@ }, { "cell_type": "markdown", - "id": "95", + "id": "96", "metadata": {}, "source": [ "Select desired fit mode." @@ -1002,7 +1017,7 @@ { "cell_type": "code", "execution_count": null, - "id": "96", + "id": "97", "metadata": {}, "outputs": [], "source": [ @@ -1011,7 +1026,7 @@ }, { "cell_type": "markdown", - "id": "97", + "id": "98", "metadata": {}, "source": [ "#### Set Minimizer\n", @@ -1022,7 +1037,7 @@ { "cell_type": "code", "execution_count": null, - "id": "98", + "id": "99", "metadata": {}, "outputs": [], "source": [ @@ -1031,7 +1046,7 @@ }, { "cell_type": "markdown", - "id": "99", + "id": "100", "metadata": {}, "source": [ "Show current fitting engine." @@ -1040,7 +1055,7 @@ { "cell_type": "code", "execution_count": null, - "id": "100", + "id": "101", "metadata": {}, "outputs": [], "source": [ @@ -1049,7 +1064,7 @@ }, { "cell_type": "markdown", - "id": "101", + "id": "102", "metadata": {}, "source": [ "Select desired fitting engine." @@ -1058,7 +1073,7 @@ { "cell_type": "code", "execution_count": null, - "id": "102", + "id": "103", "metadata": {}, "outputs": [], "source": [ @@ -1067,7 +1082,7 @@ }, { "cell_type": "markdown", - "id": "103", + "id": "104", "metadata": {}, "source": [ "### Perform Fit 1/5\n", @@ -1078,7 +1093,7 @@ { "cell_type": "code", "execution_count": null, - "id": "104", + "id": "105", "metadata": {}, "outputs": [], "source": [ @@ -1087,7 +1102,7 @@ }, { "cell_type": "markdown", - "id": "105", + "id": "106", "metadata": {}, "source": [ "Set experiment parameters to be refined." @@ -1096,7 +1111,7 @@ { "cell_type": "code", "execution_count": null, - "id": "106", + "id": "107", "metadata": {}, "outputs": [], "source": [ @@ -1111,7 +1126,7 @@ }, { "cell_type": "markdown", - "id": "107", + "id": "108", "metadata": {}, "source": [ "Show free parameters after selection." @@ -1120,7 +1135,7 @@ { "cell_type": "code", "execution_count": null, - "id": "108", + "id": "109", "metadata": {}, "outputs": [], "source": [ @@ -1129,7 +1144,7 @@ }, { "cell_type": "markdown", - "id": "109", + "id": "110", "metadata": {}, "source": [ "#### Run Fitting" @@ -1138,7 +1153,7 @@ { "cell_type": "code", "execution_count": null, - "id": "110", + "id": "111", "metadata": {}, "outputs": [], "source": [ @@ -1148,7 +1163,7 @@ }, { "cell_type": "markdown", - "id": "111", + "id": "112", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1157,7 +1172,7 @@ { "cell_type": "code", "execution_count": null, - "id": "112", + "id": "113", "metadata": {}, "outputs": [], "source": [ @@ -1167,7 +1182,7 @@ { "cell_type": "code", "execution_count": null, - "id": "113", + "id": "114", "metadata": {}, "outputs": [], "source": [ @@ -1176,7 +1191,7 @@ }, { "cell_type": "markdown", - "id": "114", + "id": "115", "metadata": {}, "source": [ "#### Save Project State" @@ -1185,7 +1200,7 @@ { "cell_type": "code", "execution_count": null, - "id": "115", + "id": "116", "metadata": {}, "outputs": [], "source": [ @@ -1194,7 +1209,7 @@ }, { "cell_type": "markdown", - "id": "116", + "id": "117", "metadata": {}, "source": [ "### Perform Fit 2/5\n", @@ -1205,7 +1220,7 @@ { "cell_type": "code", "execution_count": null, - "id": "117", + "id": "118", "metadata": {}, "outputs": [], "source": [ @@ -1217,7 +1232,7 @@ }, { "cell_type": "markdown", - "id": "118", + "id": "119", "metadata": {}, "source": [ "Show free parameters after selection." @@ -1226,7 +1241,7 @@ { "cell_type": "code", "execution_count": null, - "id": "119", + "id": "120", "metadata": {}, "outputs": [], "source": [ @@ -1235,7 +1250,7 @@ }, { "cell_type": "markdown", - "id": "120", + "id": "121", "metadata": {}, "source": [ "#### Run Fitting" @@ -1244,7 +1259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "121", + "id": "122", "metadata": {}, "outputs": [], "source": [ @@ -1254,7 +1269,7 @@ }, { "cell_type": "markdown", - "id": "122", + "id": "123", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1263,7 +1278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "123", + "id": "124", "metadata": {}, "outputs": [], "source": [ @@ -1273,7 +1288,7 @@ { "cell_type": "code", "execution_count": null, - "id": "124", + "id": "125", "metadata": {}, "outputs": [], "source": [ @@ -1282,7 +1297,7 @@ }, { "cell_type": "markdown", - "id": "125", + "id": "126", "metadata": {}, "source": [ "#### Save Project State" @@ -1291,7 +1306,7 @@ { "cell_type": "code", "execution_count": null, - "id": "126", + "id": "127", "metadata": {}, "outputs": [], "source": [ @@ -1300,7 +1315,7 @@ }, { "cell_type": "markdown", - "id": "127", + "id": "128", "metadata": {}, "source": [ "### Perform Fit 3/5\n", @@ -1311,7 +1326,7 @@ { "cell_type": "code", "execution_count": null, - "id": "128", + "id": "129", "metadata": {}, "outputs": [], "source": [ @@ -1323,7 +1338,7 @@ }, { "cell_type": "markdown", - "id": "129", + "id": "130", "metadata": {}, "source": [ "Show free parameters after selection." @@ -1332,7 +1347,7 @@ { "cell_type": "code", "execution_count": null, - "id": "130", + "id": "131", "metadata": {}, "outputs": [], "source": [ @@ -1341,7 +1356,7 @@ }, { "cell_type": "markdown", - "id": "131", + "id": "132", "metadata": {}, "source": [ "#### Run Fitting" @@ -1350,7 +1365,7 @@ { "cell_type": "code", "execution_count": null, - "id": "132", + "id": "133", "metadata": {}, "outputs": [], "source": [ @@ -1360,7 +1375,7 @@ }, { "cell_type": "markdown", - "id": "133", + "id": "134", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1369,7 +1384,7 @@ { "cell_type": "code", "execution_count": null, - "id": "134", + "id": "135", "metadata": {}, "outputs": [], "source": [ @@ -1379,7 +1394,7 @@ { "cell_type": "code", "execution_count": null, - "id": "135", + "id": "136", "metadata": {}, "outputs": [], "source": [ @@ -1388,7 +1403,7 @@ }, { "cell_type": "markdown", - "id": "136", + "id": "137", "metadata": {}, "source": [ "#### Save Project State" @@ -1397,7 +1412,7 @@ { "cell_type": "code", "execution_count": null, - "id": "137", + "id": "138", "metadata": {}, "outputs": [], "source": [ @@ -1406,7 +1421,7 @@ }, { "cell_type": "markdown", - "id": "138", + "id": "139", "metadata": {}, "source": [ "### Perform Fit 4/5\n", @@ -1419,23 +1434,23 @@ { "cell_type": "code", "execution_count": null, - "id": "139", + "id": "140", "metadata": {}, "outputs": [], "source": [ "project.analysis.aliases.create(\n", " label='biso_La',\n", - " param_uid=project.structures['lbco'].atom_sites['La'].b_iso.uid,\n", + " param=project.structures['lbco'].atom_sites['La'].b_iso,\n", ")\n", "project.analysis.aliases.create(\n", " label='biso_Ba',\n", - " param_uid=project.structures['lbco'].atom_sites['Ba'].b_iso.uid,\n", + " param=project.structures['lbco'].atom_sites['Ba'].b_iso,\n", ")" ] }, { "cell_type": "markdown", - "id": "140", + "id": "141", "metadata": {}, "source": [ "Set constraints." @@ -1444,7 +1459,7 @@ { "cell_type": "code", "execution_count": null, - "id": "141", + "id": "142", "metadata": {}, "outputs": [], "source": [ @@ -1453,7 +1468,7 @@ }, { "cell_type": "markdown", - "id": "142", + "id": "143", "metadata": {}, "source": [ "Show defined constraints." @@ -1462,7 +1477,7 @@ { "cell_type": "code", "execution_count": null, - "id": "143", + "id": "144", "metadata": {}, "outputs": [], "source": [ @@ -1471,52 +1486,16 @@ }, { "cell_type": "markdown", - "id": "144", - "metadata": {}, - "source": [ - "Show free parameters before applying constraints." - ] - }, - { - "cell_type": "code", - "execution_count": null, "id": "145", "metadata": {}, - "outputs": [], - "source": [ - "project.analysis.show_free_params()" - ] - }, - { - "cell_type": "markdown", - "id": "146", - "metadata": {}, "source": [ - "Apply constraints." + "Show free parameters." ] }, { "cell_type": "code", "execution_count": null, - "id": "147", - "metadata": {}, - "outputs": [], - "source": [ - "project.analysis.apply_constraints()" - ] - }, - { - "cell_type": "markdown", - "id": "148", - "metadata": {}, - "source": [ - "Show free parameters after applying constraints." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "149", + "id": "146", "metadata": {}, "outputs": [], "source": [ @@ -1525,7 +1504,7 @@ }, { "cell_type": "markdown", - "id": "150", + "id": "147", "metadata": {}, "source": [ "#### Run Fitting" @@ -1534,7 +1513,7 @@ { "cell_type": "code", "execution_count": null, - "id": "151", + "id": "148", "metadata": {}, "outputs": [], "source": [ @@ -1544,7 +1523,7 @@ }, { "cell_type": "markdown", - "id": "152", + "id": "149", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1553,7 +1532,7 @@ { "cell_type": "code", "execution_count": null, - "id": "153", + "id": "150", "metadata": {}, "outputs": [], "source": [ @@ -1563,7 +1542,7 @@ { "cell_type": "code", "execution_count": null, - "id": "154", + "id": "151", "metadata": {}, "outputs": [], "source": [ @@ -1572,7 +1551,7 @@ }, { "cell_type": "markdown", - "id": "155", + "id": "152", "metadata": {}, "source": [ "#### Save Project State" @@ -1581,7 +1560,7 @@ { "cell_type": "code", "execution_count": null, - "id": "156", + "id": "153", "metadata": {}, "outputs": [], "source": [ @@ -1590,7 +1569,7 @@ }, { "cell_type": "markdown", - "id": "157", + "id": "154", "metadata": {}, "source": [ "### Perform Fit 5/5\n", @@ -1603,23 +1582,23 @@ { "cell_type": "code", "execution_count": null, - "id": "158", + "id": "155", "metadata": {}, "outputs": [], "source": [ "project.analysis.aliases.create(\n", " label='occ_La',\n", - " param_uid=project.structures['lbco'].atom_sites['La'].occupancy.uid,\n", + " param=project.structures['lbco'].atom_sites['La'].occupancy,\n", ")\n", "project.analysis.aliases.create(\n", " label='occ_Ba',\n", - " param_uid=project.structures['lbco'].atom_sites['Ba'].occupancy.uid,\n", + " param=project.structures['lbco'].atom_sites['Ba'].occupancy,\n", ")" ] }, { "cell_type": "markdown", - "id": "159", + "id": "156", "metadata": {}, "source": [ "Set more constraints." @@ -1628,7 +1607,7 @@ { "cell_type": "code", "execution_count": null, - "id": "160", + "id": "157", "metadata": {}, "outputs": [], "source": [ @@ -1639,7 +1618,7 @@ }, { "cell_type": "markdown", - "id": "161", + "id": "158", "metadata": {}, "source": [ "Show defined constraints." @@ -1648,8 +1627,10 @@ { "cell_type": "code", "execution_count": null, - "id": "162", - "metadata": {}, + "id": "159", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "project.analysis.show_constraints()" @@ -1657,25 +1638,7 @@ }, { "cell_type": "markdown", - "id": "163", - "metadata": {}, - "source": [ - "Apply constraints." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "164", - "metadata": {}, - "outputs": [], - "source": [ - "project.analysis.apply_constraints()" - ] - }, - { - "cell_type": "markdown", - "id": "165", + "id": "160", "metadata": {}, "source": [ "Set structure parameters to be refined." @@ -1684,7 +1647,7 @@ { "cell_type": "code", "execution_count": null, - "id": "166", + "id": "161", "metadata": {}, "outputs": [], "source": [ @@ -1693,7 +1656,7 @@ }, { "cell_type": "markdown", - "id": "167", + "id": "162", "metadata": {}, "source": [ "Show free parameters after selection." @@ -1702,7 +1665,7 @@ { "cell_type": "code", "execution_count": null, - "id": "168", + "id": "163", "metadata": {}, "outputs": [], "source": [ @@ -1711,7 +1674,7 @@ }, { "cell_type": "markdown", - "id": "169", + "id": "164", "metadata": {}, "source": [ "#### Run Fitting" @@ -1720,7 +1683,7 @@ { "cell_type": "code", "execution_count": null, - "id": "170", + "id": "165", "metadata": {}, "outputs": [], "source": [ @@ -1730,7 +1693,7 @@ }, { "cell_type": "markdown", - "id": "171", + "id": "166", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1739,7 +1702,7 @@ { "cell_type": "code", "execution_count": null, - "id": "172", + "id": "167", "metadata": {}, "outputs": [], "source": [ @@ -1749,7 +1712,7 @@ { "cell_type": "code", "execution_count": null, - "id": "173", + "id": "168", "metadata": {}, "outputs": [], "source": [ @@ -1758,7 +1721,7 @@ }, { "cell_type": "markdown", - "id": "174", + "id": "169", "metadata": {}, "source": [ "#### Save Project State" @@ -1767,7 +1730,7 @@ { "cell_type": "code", "execution_count": null, - "id": "175", + "id": "170", "metadata": {}, "outputs": [], "source": [ @@ -1776,7 +1739,7 @@ }, { "cell_type": "markdown", - "id": "176", + "id": "171", "metadata": {}, "source": [ "## Step 5: Summary\n", @@ -1786,7 +1749,7 @@ }, { "cell_type": "markdown", - "id": "177", + "id": "172", "metadata": {}, "source": [ "#### Show Project Summary" @@ -1795,7 +1758,7 @@ { "cell_type": "code", "execution_count": null, - "id": "178", + "id": "173", "metadata": {}, "outputs": [], "source": [ @@ -1805,7 +1768,7 @@ { "cell_type": "code", "execution_count": null, - "id": "179", + "id": "174", "metadata": {}, "outputs": [], "source": [] diff --git a/docs/docs/tutorials/ed-4.ipynb b/docs/docs/tutorials/ed-4.ipynb index 9d9381b6..5bba8e84 100644 --- a/docs/docs/tutorials/ed-4.ipynb +++ b/docs/docs/tutorials/ed-4.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ebfd4b4e", + "id": "0e6027e8", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: PbSO4, NPD + XRD\n", "\n", @@ -39,7 +54,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -48,7 +63,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -60,7 +75,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Define Structure\n", @@ -74,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -83,7 +98,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "#### Set Space Group" @@ -92,7 +107,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -101,7 +116,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -110,7 +125,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -121,7 +136,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -130,7 +145,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": { "lines_to_next_cell": 2 }, @@ -185,7 +200,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Define Experiments\n", @@ -201,7 +216,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -210,7 +225,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "#### Create Experiment" @@ -219,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -232,7 +247,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "#### Set Instrument" @@ -241,7 +256,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -251,7 +266,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -260,7 +275,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -273,7 +288,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "#### Set Background" @@ -281,7 +296,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "21", "metadata": {}, "source": [ "Select the background type." @@ -290,7 +305,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -299,7 +314,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "23", "metadata": {}, "source": [ "Add background points." @@ -308,7 +323,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -327,7 +342,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "25", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -336,7 +351,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -345,7 +360,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "### Experiment 2: xrd\n", @@ -356,7 +371,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -365,7 +380,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "29", "metadata": {}, "source": [ "#### Create Experiment" @@ -374,7 +389,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -387,7 +402,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "31", "metadata": {}, "source": [ "#### Set Instrument" @@ -396,7 +411,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -406,7 +421,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "33", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -415,7 +430,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -428,7 +443,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "35", "metadata": {}, "source": [ "#### Set Background" @@ -436,7 +451,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "36", "metadata": {}, "source": [ "Select background type." @@ -445,7 +460,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -454,7 +469,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "38", "metadata": {}, "source": [ "Add background points." @@ -463,7 +478,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -480,7 +495,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "40", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -489,7 +504,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -498,7 +513,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "42", "metadata": {}, "source": [ "## Define Project\n", @@ -512,7 +527,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -521,7 +536,7 @@ }, { "cell_type": "markdown", - "id": "43", + "id": "44", "metadata": {}, "source": [ "#### Add Structure" @@ -530,7 +545,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -539,7 +554,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "46", "metadata": {}, "source": [ "#### Add Experiments" @@ -548,7 +563,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -558,7 +573,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "48", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -572,7 +587,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -581,7 +596,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "50", "metadata": {}, "source": [ "#### Set Minimizer" @@ -590,7 +605,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "51", "metadata": {}, "outputs": [], "source": [ @@ -599,7 +614,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "52", "metadata": {}, "source": [ "#### Set Fitting Parameters\n", @@ -610,7 +625,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -621,7 +636,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "54", "metadata": {}, "source": [ "Set experiment parameters to be optimized." @@ -630,7 +645,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -647,7 +662,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "56", "metadata": {}, "outputs": [], "source": [ @@ -666,7 +681,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "57", "metadata": {}, "source": [ "#### Perform Fit" @@ -675,7 +690,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "58", "metadata": {}, "outputs": [], "source": [ @@ -685,7 +700,7 @@ }, { "cell_type": "markdown", - "id": "58", + "id": "59", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -694,7 +709,7 @@ { "cell_type": "code", "execution_count": null, - "id": "59", + "id": "60", "metadata": {}, "outputs": [], "source": [ @@ -704,7 +719,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "61", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-5.ipynb b/docs/docs/tutorials/ed-5.ipynb index f3b3ba67..ef7e672a 100644 --- a/docs/docs/tutorials/ed-5.ipynb +++ b/docs/docs/tutorials/ed-5.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d07ce7b8", + "id": "1302dcf1", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: Co2SiO4, D20\n", "\n", @@ -33,7 +48,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -42,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Define Structure\n", @@ -68,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -77,7 +92,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "#### Set Space Group" @@ -86,7 +101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -96,7 +111,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -105,7 +120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -116,7 +131,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -125,7 +140,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -187,7 +202,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Define Experiment\n", @@ -201,7 +216,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -210,7 +225,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "#### Create Experiment" @@ -219,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -228,7 +243,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "#### Set Instrument" @@ -237,7 +252,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -247,7 +262,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -256,7 +271,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -267,7 +282,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "#### Set Background" @@ -276,7 +291,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -298,7 +313,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "22", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -307,7 +322,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -316,7 +331,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "## Define Project\n", @@ -330,7 +345,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -339,7 +354,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "26", "metadata": {}, "source": [ "#### Set Plotting Engine" @@ -348,7 +363,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -359,7 +374,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "28", "metadata": {}, "source": [ "#### Add Structure" @@ -368,7 +383,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -377,7 +392,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "30", "metadata": {}, "source": [ "#### Add Experiment" @@ -386,7 +401,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -395,7 +410,7 @@ }, { "cell_type": "markdown", - "id": "31", + "id": "32", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -409,7 +424,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -418,7 +433,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "34", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -427,7 +442,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -437,7 +452,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -446,7 +461,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "37", "metadata": {}, "source": [ "#### Set Free Parameters" @@ -455,7 +470,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -486,7 +501,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -505,7 +520,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "40", "metadata": {}, "source": [ "#### Set Constraints\n", @@ -516,23 +531,23 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "41", "metadata": {}, "outputs": [], "source": [ "project.analysis.aliases.create(\n", " label='biso_Co1',\n", - " param_uid=project.structures['cosio'].atom_sites['Co1'].b_iso.uid,\n", + " param=project.structures['cosio'].atom_sites['Co1'].b_iso,\n", ")\n", "project.analysis.aliases.create(\n", " label='biso_Co2',\n", - " param_uid=project.structures['cosio'].atom_sites['Co2'].b_iso.uid,\n", + " param=project.structures['cosio'].atom_sites['Co2'].b_iso,\n", ")" ] }, { "cell_type": "markdown", - "id": "41", + "id": "42", "metadata": {}, "source": [ "Set constraints." @@ -541,8 +556,10 @@ { "cell_type": "code", "execution_count": null, - "id": "42", - "metadata": {}, + "id": "43", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "project.analysis.constraints.create(\n", @@ -552,26 +569,8 @@ }, { "cell_type": "markdown", - "id": "43", - "metadata": {}, - "source": [ - "Apply constraints." - ] - }, - { - "cell_type": "code", - "execution_count": null, "id": "44", "metadata": {}, - "outputs": [], - "source": [ - "project.analysis.apply_constraints()" - ] - }, - { - "cell_type": "markdown", - "id": "45", - "metadata": {}, "source": [ "#### Run Fitting" ] @@ -579,7 +578,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -589,7 +588,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "46", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -598,7 +597,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -608,7 +607,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -617,7 +616,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "49", "metadata": {}, "source": [ "## Summary\n", @@ -627,7 +626,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "50", "metadata": {}, "source": [ "#### Show Project Summary" @@ -636,7 +635,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "51", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-6.ipynb b/docs/docs/tutorials/ed-6.ipynb index 70a334b8..c4e2d34d 100644 --- a/docs/docs/tutorials/ed-6.ipynb +++ b/docs/docs/tutorials/ed-6.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "08932f7f", + "id": "a1e678e6", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: HS, HRPT\n", "\n", @@ -33,7 +48,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -42,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Define Structure\n", @@ -68,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -77,7 +92,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "#### Set Space Group" @@ -86,7 +101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -96,7 +111,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": { "lines_to_next_cell": 2 }, @@ -107,7 +122,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -117,7 +132,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -126,7 +141,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -179,7 +194,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Define Experiment\n", @@ -193,7 +208,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -202,7 +217,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "#### Create Experiment" @@ -211,7 +226,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -220,7 +235,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "#### Set Instrument" @@ -229,7 +244,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -239,7 +254,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -248,7 +263,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -261,7 +276,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "#### Set Background" @@ -270,7 +285,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -287,7 +302,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "22", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -296,7 +311,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -305,7 +320,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "## Define Project\n", @@ -319,7 +334,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -328,7 +343,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "26", "metadata": {}, "source": [ "#### Set Plotting Engine" @@ -337,7 +352,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -348,7 +363,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "28", "metadata": {}, "source": [ "#### Add Structure" @@ -357,7 +372,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -366,7 +381,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "30", "metadata": {}, "source": [ "#### Add Experiment" @@ -375,7 +390,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -384,7 +399,7 @@ }, { "cell_type": "markdown", - "id": "31", + "id": "32", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -398,7 +413,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -407,7 +422,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "34", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -416,7 +431,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -426,7 +441,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -435,7 +450,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "37", "metadata": {}, "source": [ "### Perform Fit 1/5\n", @@ -446,7 +461,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -459,7 +474,7 @@ }, { "cell_type": "markdown", - "id": "38", + "id": "39", "metadata": {}, "source": [ "Show free parameters after selection." @@ -468,7 +483,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -477,7 +492,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "41", "metadata": {}, "source": [ "#### Run Fitting" @@ -486,7 +501,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "42", "metadata": {}, "outputs": [], "source": [ @@ -496,7 +511,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -505,7 +520,7 @@ }, { "cell_type": "markdown", - "id": "43", + "id": "44", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -514,7 +529,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -524,7 +539,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -533,7 +548,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "47", "metadata": {}, "source": [ "### Perform Fit 2/5\n", @@ -544,7 +559,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -559,7 +574,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "49", "metadata": {}, "source": [ "Show free parameters after selection." @@ -568,7 +583,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -577,7 +592,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "51", "metadata": {}, "source": [ "#### Run Fitting" @@ -586,7 +601,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -596,7 +611,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -605,7 +620,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "54", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -614,7 +629,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -624,7 +639,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "56", "metadata": {}, "outputs": [], "source": [ @@ -633,7 +648,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "57", "metadata": {}, "source": [ "### Perform Fit 3/5\n", @@ -644,7 +659,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "58", "metadata": {}, "outputs": [], "source": [ @@ -657,7 +672,7 @@ }, { "cell_type": "markdown", - "id": "58", + "id": "59", "metadata": {}, "source": [ "Show free parameters after selection." @@ -666,7 +681,7 @@ { "cell_type": "code", "execution_count": null, - "id": "59", + "id": "60", "metadata": {}, "outputs": [], "source": [ @@ -675,7 +690,7 @@ }, { "cell_type": "markdown", - "id": "60", + "id": "61", "metadata": {}, "source": [ "#### Run Fitting" @@ -684,7 +699,7 @@ { "cell_type": "code", "execution_count": null, - "id": "61", + "id": "62", "metadata": {}, "outputs": [], "source": [ @@ -694,7 +709,7 @@ { "cell_type": "code", "execution_count": null, - "id": "62", + "id": "63", "metadata": {}, "outputs": [], "source": [ @@ -703,7 +718,7 @@ }, { "cell_type": "markdown", - "id": "63", + "id": "64", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -712,7 +727,7 @@ { "cell_type": "code", "execution_count": null, - "id": "64", + "id": "65", "metadata": {}, "outputs": [], "source": [ @@ -722,7 +737,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65", + "id": "66", "metadata": {}, "outputs": [], "source": [ @@ -731,7 +746,7 @@ }, { "cell_type": "markdown", - "id": "66", + "id": "67", "metadata": {}, "source": [ "### Perform Fit 4/5\n", @@ -742,7 +757,7 @@ { "cell_type": "code", "execution_count": null, - "id": "67", + "id": "68", "metadata": {}, "outputs": [], "source": [ @@ -755,7 +770,7 @@ }, { "cell_type": "markdown", - "id": "68", + "id": "69", "metadata": {}, "source": [ "Show free parameters after selection." @@ -764,7 +779,7 @@ { "cell_type": "code", "execution_count": null, - "id": "69", + "id": "70", "metadata": {}, "outputs": [], "source": [ @@ -773,7 +788,7 @@ }, { "cell_type": "markdown", - "id": "70", + "id": "71", "metadata": {}, "source": [ "#### Run Fitting" @@ -782,7 +797,7 @@ { "cell_type": "code", "execution_count": null, - "id": "71", + "id": "72", "metadata": {}, "outputs": [], "source": [ @@ -792,7 +807,7 @@ { "cell_type": "code", "execution_count": null, - "id": "72", + "id": "73", "metadata": {}, "outputs": [], "source": [ @@ -801,7 +816,7 @@ }, { "cell_type": "markdown", - "id": "73", + "id": "74", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -810,7 +825,7 @@ { "cell_type": "code", "execution_count": null, - "id": "74", + "id": "75", "metadata": {}, "outputs": [], "source": [ @@ -820,7 +835,7 @@ { "cell_type": "code", "execution_count": null, - "id": "75", + "id": "76", "metadata": {}, "outputs": [], "source": [ @@ -829,7 +844,7 @@ }, { "cell_type": "markdown", - "id": "76", + "id": "77", "metadata": {}, "source": [ "## Summary\n", @@ -839,7 +854,7 @@ }, { "cell_type": "markdown", - "id": "77", + "id": "78", "metadata": {}, "source": [ "#### Show Project Summary" @@ -848,7 +863,7 @@ { "cell_type": "code", "execution_count": null, - "id": "78", + "id": "79", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-7.ipynb b/docs/docs/tutorials/ed-7.ipynb index ce490e56..7284f08d 100644 --- a/docs/docs/tutorials/ed-7.ipynb +++ b/docs/docs/tutorials/ed-7.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "229e169b", + "id": "5dd7adf5", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: Si, SEPD\n", "\n", @@ -33,7 +48,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -42,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Define Structure\n", @@ -68,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -77,7 +92,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "#### Set Space Group" @@ -86,7 +101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -96,7 +111,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -105,7 +120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -114,7 +129,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -123,7 +138,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -139,7 +154,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Define Experiment\n", @@ -153,7 +168,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -162,7 +177,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "#### Create Experiment" @@ -171,7 +186,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -182,7 +197,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "#### Set Instrument" @@ -191,7 +206,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -203,7 +218,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -212,7 +227,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -226,7 +241,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "#### Set Peak Asymmetry" @@ -235,7 +250,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -245,7 +260,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "22", "metadata": {}, "source": [ "#### Set Background" @@ -254,7 +269,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -265,7 +280,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -274,7 +289,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -283,7 +298,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "26", "metadata": {}, "source": [ "## Define Project\n", @@ -297,7 +312,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -306,7 +321,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "28", "metadata": {}, "source": [ "#### Add Structure" @@ -315,7 +330,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -324,7 +339,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "30", "metadata": {}, "source": [ "#### Add Experiment" @@ -333,7 +348,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -342,7 +357,7 @@ }, { "cell_type": "markdown", - "id": "31", + "id": "32", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -356,7 +371,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -365,7 +380,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "34", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -374,7 +389,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -384,7 +399,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "36", "metadata": {}, "source": [ "### Perform Fit 1/5\n", @@ -395,7 +410,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -407,7 +422,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "38", "metadata": {}, "source": [ "Show free parameters after selection." @@ -416,7 +431,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -425,7 +440,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "40", "metadata": {}, "source": [ "#### Run Fitting" @@ -434,7 +449,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -444,7 +459,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "42", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -453,7 +468,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -463,7 +478,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -472,7 +487,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "45", "metadata": {}, "source": [ "### Perform Fit 2/5\n", @@ -483,7 +498,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -493,7 +508,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "47", "metadata": {}, "source": [ "Show free parameters after selection." @@ -502,7 +517,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -511,7 +526,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "49", "metadata": {}, "source": [ "#### Run Fitting" @@ -520,7 +535,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -530,7 +545,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "51", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -539,7 +554,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -549,7 +564,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -558,7 +573,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "54", "metadata": {}, "source": [ "### Perform Fit 3/5\n", @@ -569,7 +584,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -579,7 +594,7 @@ }, { "cell_type": "markdown", - "id": "55", + "id": "56", "metadata": {}, "source": [ "Set more parameters to be refined." @@ -588,7 +603,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "57", "metadata": {}, "outputs": [], "source": [ @@ -599,7 +614,7 @@ }, { "cell_type": "markdown", - "id": "57", + "id": "58", "metadata": {}, "source": [ "Show free parameters after selection." @@ -608,7 +623,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "59", "metadata": {}, "outputs": [], "source": [ @@ -617,7 +632,7 @@ }, { "cell_type": "markdown", - "id": "59", + "id": "60", "metadata": {}, "source": [ "#### Run Fitting" @@ -626,7 +641,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "61", "metadata": {}, "outputs": [], "source": [ @@ -636,7 +651,7 @@ }, { "cell_type": "markdown", - "id": "61", + "id": "62", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -645,7 +660,7 @@ { "cell_type": "code", "execution_count": null, - "id": "62", + "id": "63", "metadata": {}, "outputs": [], "source": [ @@ -655,7 +670,7 @@ { "cell_type": "code", "execution_count": null, - "id": "63", + "id": "64", "metadata": {}, "outputs": [], "source": [ @@ -664,7 +679,7 @@ }, { "cell_type": "markdown", - "id": "64", + "id": "65", "metadata": {}, "source": [ "### Perform Fit 4/5\n", @@ -675,7 +690,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65", + "id": "66", "metadata": {}, "outputs": [], "source": [ @@ -684,7 +699,7 @@ }, { "cell_type": "markdown", - "id": "66", + "id": "67", "metadata": {}, "source": [ "Show free parameters after selection." @@ -693,7 +708,7 @@ { "cell_type": "code", "execution_count": null, - "id": "67", + "id": "68", "metadata": {}, "outputs": [], "source": [ @@ -702,7 +717,7 @@ }, { "cell_type": "markdown", - "id": "68", + "id": "69", "metadata": {}, "source": [ "#### Run Fitting" @@ -711,7 +726,7 @@ { "cell_type": "code", "execution_count": null, - "id": "69", + "id": "70", "metadata": {}, "outputs": [], "source": [ @@ -721,7 +736,7 @@ }, { "cell_type": "markdown", - "id": "70", + "id": "71", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -730,7 +745,7 @@ { "cell_type": "code", "execution_count": null, - "id": "71", + "id": "72", "metadata": {}, "outputs": [], "source": [ @@ -740,7 +755,7 @@ { "cell_type": "code", "execution_count": null, - "id": "72", + "id": "73", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-8.ipynb b/docs/docs/tutorials/ed-8.ipynb index 13ce802e..79e03966 100644 --- a/docs/docs/tutorials/ed-8.ipynb +++ b/docs/docs/tutorials/ed-8.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8550c647", + "id": "f45eca14", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: NCAF, WISH\n", "\n", @@ -36,7 +51,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -45,7 +60,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -57,7 +72,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Define Structure\n", @@ -71,7 +86,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -80,7 +95,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "#### Set Space Group" @@ -89,7 +104,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +114,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -108,7 +123,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -117,7 +132,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -126,7 +141,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -188,7 +203,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Define Experiment\n", @@ -202,7 +217,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -212,7 +227,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -221,7 +236,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "15", "metadata": {}, "source": [ "#### Create Experiment" @@ -230,7 +245,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -244,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -257,7 +272,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "#### Set Instrument" @@ -266,7 +281,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -279,7 +294,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -291,7 +306,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "21", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -300,7 +315,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -316,7 +331,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -331,7 +346,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "#### Set Background" @@ -340,7 +355,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -384,7 +399,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -426,7 +441,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -435,7 +450,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -445,7 +460,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -454,7 +469,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "30", "metadata": {}, "source": [ "#### Set Excluded Regions" @@ -463,7 +478,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -474,7 +489,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -484,7 +499,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "33", "metadata": {}, "source": [ "## Define Project\n", @@ -498,7 +513,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -507,7 +522,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "35", "metadata": {}, "source": [ "#### Set Plotting Engine" @@ -516,7 +531,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -527,7 +542,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "37", "metadata": {}, "source": [ "#### Add Structure" @@ -536,7 +551,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -545,7 +560,7 @@ }, { "cell_type": "markdown", - "id": "38", + "id": "39", "metadata": {}, "source": [ "#### Add Experiment" @@ -554,7 +569,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -564,7 +579,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "41", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -578,7 +593,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "42", "metadata": {}, "outputs": [], "source": [ @@ -587,7 +602,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "43", "metadata": {}, "source": [ "#### Set Fit Mode" @@ -596,7 +611,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -605,7 +620,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "45", "metadata": {}, "source": [ "#### Set Free Parameters" @@ -614,7 +629,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -629,7 +644,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -652,7 +667,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "48", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -661,7 +676,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -671,7 +686,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -680,7 +695,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "51", "metadata": {}, "source": [ "#### Run Fitting" @@ -689,7 +704,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -699,7 +714,7 @@ }, { "cell_type": "markdown", - "id": "52", + "id": "53", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -708,7 +723,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -718,7 +733,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -727,7 +742,7 @@ }, { "cell_type": "markdown", - "id": "55", + "id": "56", "metadata": {}, "source": [ "## Summary\n", @@ -737,7 +752,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "57", "metadata": {}, "source": [ "#### Show Project Summary" @@ -746,7 +761,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "58", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-9.ipynb b/docs/docs/tutorials/ed-9.ipynb index 1d3c883d..b9b845da 100644 --- a/docs/docs/tutorials/ed-9.ipynb +++ b/docs/docs/tutorials/ed-9.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "520a3ca6", + "id": "e2e25ed0", "metadata": { "tags": [ "hide-in-docs" @@ -20,9 +20,24 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "0", "metadata": {}, + "outputs": [], + "source": [ + "# Check whether easydiffraction is installed; install it if needed.\n", + "# Required for remote environments such as Google Colab.\n", + "import importlib.util\n", + "\n", + "if importlib.util.find_spec('easydiffraction') is None:\n", + " %pip install easydiffraction" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, "source": [ "# Structure Refinement: LBCO+Si, McStas\n", "\n", @@ -33,7 +48,7 @@ }, { "cell_type": "markdown", - "id": "1", + "id": "2", "metadata": {}, "source": [ "## Import Library" @@ -42,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "3", + "id": "4", "metadata": {}, "source": [ "## Define Structures\n", @@ -68,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -77,7 +92,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "6", "metadata": {}, "source": [ "#### Set Space Group" @@ -86,7 +101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -96,7 +111,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -105,7 +120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -114,7 +129,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -123,7 +138,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -169,7 +184,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "### Create Structure 2: Si" @@ -178,7 +193,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -187,7 +202,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "#### Set Space Group" @@ -196,7 +211,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -206,7 +221,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -215,7 +230,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -224,7 +239,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -233,7 +248,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -250,7 +265,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "## Define Experiment\n", @@ -264,7 +279,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -273,7 +288,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "22", "metadata": {}, "source": [ "#### Create Experiment" @@ -282,7 +297,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -298,7 +313,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "#### Set Instrument" @@ -307,7 +322,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -319,7 +334,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "26", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -328,7 +343,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -344,7 +359,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "28", "metadata": {}, "source": [ "#### Set Background" @@ -352,7 +367,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "29", "metadata": {}, "source": [ "Select the background type." @@ -361,7 +376,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -370,7 +385,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "31", "metadata": {}, "source": [ "Add background points." @@ -379,7 +394,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -400,7 +415,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "33", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -409,7 +424,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -419,7 +434,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "35", "metadata": {}, "source": [ "## Define Project\n", @@ -433,7 +448,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -442,7 +457,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "37", "metadata": {}, "source": [ "#### Add Structures" @@ -451,7 +466,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -461,7 +476,7 @@ }, { "cell_type": "markdown", - "id": "38", + "id": "39", "metadata": {}, "source": [ "#### Show Structures" @@ -470,7 +485,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -479,7 +494,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "41", "metadata": {}, "source": [ "#### Add Experiments" @@ -488,7 +503,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "42", "metadata": {}, "outputs": [], "source": [ @@ -497,7 +512,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "43", "metadata": {}, "source": [ "#### Set Excluded Regions\n", @@ -508,7 +523,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -517,7 +532,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "45", "metadata": {}, "source": [ "Add excluded regions." @@ -526,7 +541,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -536,7 +551,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "47", "metadata": {}, "source": [ "Show excluded regions." @@ -545,7 +560,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -554,7 +569,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "49", "metadata": {}, "source": [ "Show measured data after adding excluded regions." @@ -563,7 +578,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -572,7 +587,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "51", "metadata": {}, "source": [ "Show experiment as CIF." @@ -581,7 +596,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -590,7 +605,7 @@ }, { "cell_type": "markdown", - "id": "52", + "id": "53", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -604,7 +619,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -613,7 +628,7 @@ }, { "cell_type": "markdown", - "id": "54", + "id": "55", "metadata": {}, "source": [ "#### Set Fitting Parameters\n", @@ -624,7 +639,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "56", "metadata": {}, "outputs": [], "source": [ @@ -637,7 +652,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "57", "metadata": {}, "source": [ "Set experiment parameters to be optimized." @@ -646,7 +661,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "58", "metadata": {}, "outputs": [], "source": [ @@ -667,7 +682,7 @@ }, { "cell_type": "markdown", - "id": "58", + "id": "59", "metadata": {}, "source": [ "#### Perform Fit" @@ -676,7 +691,7 @@ { "cell_type": "code", "execution_count": null, - "id": "59", + "id": "60", "metadata": {}, "outputs": [], "source": [ @@ -686,7 +701,7 @@ }, { "cell_type": "markdown", - "id": "60", + "id": "61", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -695,7 +710,7 @@ { "cell_type": "code", "execution_count": null, - "id": "61", + "id": "62", "metadata": {}, "outputs": [], "source": [ @@ -705,7 +720,7 @@ { "cell_type": "code", "execution_count": null, - "id": "62", + "id": "63", "metadata": {}, "outputs": [], "source": [] From 0cbfc94e3cc28e473a043516417a394752792ead Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 12:02:34 +0200 Subject: [PATCH 06/51] Update serialization process to apply constraints before saving project --- src/easydiffraction/io/cif/serialize.py | 5 +++++ src/easydiffraction/project/project.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index 184e40f4..fa358e9e 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -156,6 +156,11 @@ def category_collection_to_cif( When set to a positive integer, truncate the output to at most this many rows (half from the start, half from the end) with an ``...`` separator. ``None`` emits all rows. + + Returns + ------- + str + CIF text representing the collection as a loop. """ if not len(collection): return '' diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index 06e36d14..33ec12e5 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -299,6 +299,11 @@ def save(self) -> None: console.paragraph(f"Saving project 📦 '{self.name}' to") console.print(self.info.path.resolve()) + # Apply constraints so dependent parameters are flagged + # before serialization (constrained params are written + # without brackets). + self._analysis._update_categories() + # Ensure project directory exists self._info.path.mkdir(parents=True, exist_ok=True) From 87d36697dd727e0ba184e73023bab65cc825c42f Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 13:06:49 +0200 Subject: [PATCH 07/51] Move CIF loop truncation from persistence to display methods --- src/easydiffraction/core/datablock.py | 14 ++++++++++++++ .../datablocks/experiment/item/base.py | 3 +-- .../datablocks/structure/item/base.py | 2 +- src/easydiffraction/io/cif/serialize.py | 19 +++++++++++++++++-- 4 files changed, 33 insertions(+), 5 deletions(-) diff --git a/src/easydiffraction/core/datablock.py b/src/easydiffraction/core/datablock.py index 36c5b589..d9fc9f42 100644 --- a/src/easydiffraction/core/datablock.py +++ b/src/easydiffraction/core/datablock.py @@ -91,6 +91,20 @@ def as_cif(self) -> str: self._update_categories() return datablock_item_to_cif(self) + def _cif_for_display(self, max_loop_display: int = 20) -> str: + """ + Return CIF text with loop categories truncated for display. + + Parameters + ---------- + max_loop_display : int, default=20 + Maximum number of rows to show per loop category. + """ + from easydiffraction.io.cif.serialize import datablock_item_to_cif # noqa: PLC0415 + + self._update_categories() + return datablock_item_to_cif(self, max_loop_display=max_loop_display) + def help(self) -> None: """Print a summary of public attributes and categories.""" super().help() diff --git a/src/easydiffraction/datablocks/experiment/item/base.py b/src/easydiffraction/datablocks/experiment/item/base.py index 1ac72d0f..d3a0f4a0 100644 --- a/src/easydiffraction/datablocks/experiment/item/base.py +++ b/src/easydiffraction/datablocks/experiment/item/base.py @@ -129,10 +129,9 @@ def as_cif(self) -> str: def show_as_cif(self) -> None: """Pretty-print the experiment as CIF text.""" - experiment_cif = super().as_cif paragraph_title: str = f"Experiment 🔬 '{self.name}' as cif" console.paragraph(paragraph_title) - render_cif(experiment_cif) + render_cif(self._cif_for_display()) @abstractmethod def _load_ascii_data_to_experiment(self, data_path: str) -> None: diff --git a/src/easydiffraction/datablocks/structure/item/base.py b/src/easydiffraction/datablocks/structure/item/base.py index 80d8f76a..8181f1db 100644 --- a/src/easydiffraction/datablocks/structure/item/base.py +++ b/src/easydiffraction/datablocks/structure/item/base.py @@ -252,4 +252,4 @@ def show(self) -> None: def show_as_cif(self) -> None: """Render the CIF text for this structure in the terminal.""" console.paragraph(f"Structure 🧩 '{self.name}' as cif") - render_cif(self.as_cif) + render_cif(self._cif_for_display()) diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index fa358e9e..cfceca74 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -196,11 +196,22 @@ def category_collection_to_cif( return '\n'.join(lines) -def datablock_item_to_cif(datablock: object) -> str: +def datablock_item_to_cif( + datablock: object, + max_loop_display: int | None = None, +) -> str: """ Render a DatablockItem-like object to CIF text. Emits a data_ header and then concatenates category CIF sections. + + Parameters + ---------- + datablock : object + A ``DatablockItem``-like object. + max_loop_display : int | None, default=None + When set, truncate loop categories to this many rows. ``None`` + emits all rows (used for serialisation). """ # Local imports to avoid import-time cycles from easydiffraction.core.category import CategoryCollection # noqa: PLC0415 @@ -213,7 +224,11 @@ def datablock_item_to_cif(datablock: object) -> str: parts.extend(v.as_cif for v in vars(datablock).values() if isinstance(v, CategoryItem)) # Then collections - parts.extend(v.as_cif for v in vars(datablock).values() if isinstance(v, CategoryCollection)) + parts.extend( + category_collection_to_cif(v, max_display=max_loop_display) + for v in vars(datablock).values() + if isinstance(v, CategoryCollection) + ) return '\n\n'.join(parts) From 9a11882613b412691a063d8ded5111e34f1bed30 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 13:13:13 +0200 Subject: [PATCH 08/51] Add CIF round-trip integration tests for experiments and structures --- .../fitting/test_cif_round_trip.py | 322 ++++++++++++++++++ 1 file changed, 322 insertions(+) create mode 100644 tests/integration/fitting/test_cif_round_trip.py diff --git a/tests/integration/fitting/test_cif_round_trip.py b/tests/integration/fitting/test_cif_round_trip.py new file mode 100644 index 00000000..b089027b --- /dev/null +++ b/tests/integration/fitting/test_cif_round_trip.py @@ -0,0 +1,322 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Integration tests for experiment CIF round-trip (as_cif → from_cif_str).""" + +from __future__ import annotations + +import tempfile + +from numpy.testing import assert_almost_equal + +from easydiffraction import ExperimentFactory +from easydiffraction import StructureFactory +from easydiffraction import download_data +from easydiffraction.core.variable import Parameter + +TEMP_DIR = tempfile.gettempdir() + + +def _build_fully_configured_experiment() -> ExperimentFactory: + """ + Create a fully configured powder CWL neutron experiment. + + Includes instrument, peak profile, background, excluded regions, + linked phases, and measured data. + + Returns + ------- + ExperimentBase + A complete experiment ready for CIF round-trip testing. + """ + data_path = download_data(id=3, destination=TEMP_DIR) + expt = ExperimentFactory.from_data_path( + name='hrpt', + data_path=data_path, + ) + # Instrument + expt.instrument.setup_wavelength = 1.494 + expt.instrument.calib_twotheta_offset = 0.6225 + + # Peak profile + expt.peak.broad_gauss_u = 0.0834 + expt.peak.broad_gauss_v = -0.1168 + expt.peak.broad_gauss_w = 0.123 + expt.peak.broad_lorentz_x = 0.0 + expt.peak.broad_lorentz_y = 0.0797 + + # Background + expt.background.create(id='1', x=10, y=170) + expt.background.create(id='2', x=80, y=160) + expt.background.create(id='3', x=165, y=170) + + # Excluded regions + expt.excluded_regions.create(id='1', start=0, end=5) + expt.excluded_regions.create(id='2', start=165, end=180) + + # Linked phases + expt.linked_phases.create(id='lbco', scale=9.0) + + # Free parameters + expt.instrument.calib_twotheta_offset.free = True + expt.linked_phases['lbco'].scale.free = True + expt.background['1'].y.free = True + expt.background['2'].y.free = True + expt.background['3'].y.free = True + + return expt + + +def _collect_param_values(expt: object) -> dict[str, object]: + """ + Collect all parameter values from an experiment. + + Returns a dict keyed by unique_name with the parameter value. + Skips raw data parameters (pd_data.*) since those are large arrays. + """ + result = {} + for p in expt.parameters: + uname = getattr(p, 'unique_name', None) + if uname is None: + continue + # Skip raw data arrays + if 'pd_data.' in uname: + continue + result[uname] = p.value + return result + + +def _collect_free_flags(expt: object) -> dict[str, bool]: + """Return {unique_name: free} for fittable parameters.""" + return { + p.unique_name: p.free + for p in expt.parameters + if isinstance(p, Parameter) and not p.unique_name.startswith('pd_data.') + } + + +# ------------------------------------------------------------------ +# Test 1: Experiment CIF round-trip preserves all parameter values +# ------------------------------------------------------------------ + + +def test_experiment_cif_round_trip_preserves_parameters() -> None: + """ + Every parameter value must survive an as_cif → from_cif_str cycle. + + Creates a fully configured experiment, serialises it to CIF, + reconstructs from CIF, and compares all parameter values. + """ + original = _build_fully_configured_experiment() + + # Serialise + cif_str = original.as_cif + + # Reconstruct + loaded = ExperimentFactory.from_cif_str(cif_str) + + # Compare parameter values + orig_params = _collect_param_values(original) + loaded_params = _collect_param_values(loaded) + + for name, orig_val in orig_params.items(): + assert name in loaded_params, f'Parameter {name} missing after round-trip' + loaded_val = loaded_params[name] + if isinstance(orig_val, float): + assert_almost_equal( + loaded_val, + orig_val, + decimal=4, + err_msg=f'Value mismatch for {name}', + ) + else: + assert loaded_val == orig_val, ( + f'Value mismatch for {name}: expected {orig_val!r}, got {loaded_val!r}' + ) + + +# ------------------------------------------------------------------ +# Test 2: Free flags survive the round-trip +# ------------------------------------------------------------------ + + +def test_experiment_cif_round_trip_preserves_free_flags() -> None: + """ + Free flags must survive an as_cif → from_cif_str cycle. + + Parameters marked as free on the original experiment must also be + free on the reconstructed experiment. + """ + original = _build_fully_configured_experiment() + + cif_str = original.as_cif + loaded = ExperimentFactory.from_cif_str(cif_str) + + orig_free = _collect_free_flags(original) + loaded_free = _collect_free_flags(loaded) + + for name, orig_flag in orig_free.items(): + if name in loaded_free: + assert loaded_free[name] == orig_flag, ( + f'Free flag mismatch for {name}: expected {orig_flag}, got {loaded_free[name]}' + ) + + +# ------------------------------------------------------------------ +# Test 3: Categories survive the round-trip +# ------------------------------------------------------------------ + + +def test_experiment_cif_round_trip_preserves_categories() -> None: + """ + Category collections (background, excluded regions, linked phases) + must preserve their item count after a round-trip. + """ + original = _build_fully_configured_experiment() + + cif_str = original.as_cif + loaded = ExperimentFactory.from_cif_str(cif_str) + + # Background points + assert len(loaded.background) == len(original.background), ( + f'Background count mismatch: ' + f'expected {len(original.background)}, got {len(loaded.background)}' + ) + + # Excluded regions + assert len(loaded.excluded_regions) == len(original.excluded_regions), ( + f'Excluded regions count mismatch: ' + f'expected {len(original.excluded_regions)}, ' + f'got {len(loaded.excluded_regions)}' + ) + + # Linked phases + assert len(loaded.linked_phases) == len(original.linked_phases), ( + f'Linked phases count mismatch: ' + f'expected {len(original.linked_phases)}, ' + f'got {len(loaded.linked_phases)}' + ) + + +# ------------------------------------------------------------------ +# Test 4: Data points survive the round-trip +# ------------------------------------------------------------------ + + +def test_experiment_cif_round_trip_preserves_data() -> None: + """ + Measured data points must survive an as_cif → from_cif_str cycle. + + The number of data points and the first/last values must match. + """ + original = _build_fully_configured_experiment() + + cif_str = original.as_cif + loaded = ExperimentFactory.from_cif_str(cif_str) + + # Number of data points + assert len(loaded.data) == len(original.data), ( + f'Data point count mismatch: expected {len(original.data)}, got {len(loaded.data)}' + ) + + # First and last data point two_theta and intensity_meas + orig_first = list(original.data.values())[0] + loaded_first = list(loaded.data.values())[0] + orig_last = list(original.data.values())[-1] + loaded_last = list(loaded.data.values())[-1] + + assert_almost_equal( + loaded_first.two_theta.value, + orig_first.two_theta.value, + decimal=4, + err_msg='First data point two_theta mismatch', + ) + assert_almost_equal( + loaded_last.two_theta.value, + orig_last.two_theta.value, + decimal=4, + err_msg='Last data point two_theta mismatch', + ) + assert_almost_equal( + loaded_first.intensity_meas.value, + orig_first.intensity_meas.value, + decimal=2, + err_msg='First data point intensity_meas mismatch', + ) + + +# ------------------------------------------------------------------ +# Test 5: Structure CIF round-trip preserves all parameter values +# ------------------------------------------------------------------ + + +def test_structure_cif_round_trip_preserves_parameters() -> None: + """ + Every structure parameter must survive an as_cif → from_cif_str + cycle, including atom sites with symmetry constraints. + """ + original = StructureFactory.from_scratch(name='lbco') + original.space_group.name_h_m = 'P m -3 m' + original.cell.length_a = 3.8909 + original.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.5, + ) + original.atom_sites.create( + label='Co', + type_symbol='Co', + fract_x=0.5, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='b', + b_iso=0.5, + ) + original.atom_sites.create( + label='O', + type_symbol='O', + fract_x=0, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='c', + b_iso=0.5, + ) + # Apply symmetry constraints before serialisation + original._update_categories() + + cif_str = original.as_cif + loaded = StructureFactory.from_cif_str(cif_str) + # Apply symmetry on loaded to match original state + loaded._update_categories() + + # Compare cell parameters + assert_almost_equal( + loaded.cell.length_a.value, + original.cell.length_a.value, + decimal=6, + ) + + # Compare space group + assert loaded.space_group.name_h_m.value == original.space_group.name_h_m.value + + # Compare atom sites count and values + assert len(loaded.atom_sites) == len(original.atom_sites) + for label in ['La', 'Co', 'O']: + orig_site = original.atom_sites[label] + loaded_site = loaded.atom_sites[label] + assert_almost_equal( + loaded_site.fract_x.value, + orig_site.fract_x.value, + decimal=6, + err_msg=f'fract_x mismatch for {label}', + ) + assert_almost_equal( + loaded_site.b_iso.value, + orig_site.b_iso.value, + decimal=4, + err_msg=f'b_iso mismatch for {label}', + ) From ca8d73673dfd8e1685f118405b574e99e4e9b3c7 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 13:13:35 +0200 Subject: [PATCH 09/51] Add new integration test --- ..._powder-diffraction_constant-wavelength.py | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py b/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py index 2184fc07..cb5e640a 100644 --- a/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py +++ b/tests/integration/fitting/test_powder-diffraction_constant-wavelength.py @@ -479,6 +479,56 @@ def test_fit_neutron_pd_cwl_hs() -> None: ) +def test_single_fit_neutron_pd_cwl_lbco_with_constraints_from_project() -> None: + import easydiffraction as ed + + # Create a project from CIF files + project = ed.Project() + project.structures.add_from_cif_path(ed.download_data(id=1, destination='data')) + project.experiments.add_from_cif_path(ed.download_data(id=2, destination='data')) + + # Set constraints + project.analysis.aliases.create( + label='biso_La', + param=project.structures['lbco'].atom_sites['La'].b_iso, + ) + project.analysis.aliases.create( + label='biso_Ba', + param=project.structures['lbco'].atom_sites['Ba'].b_iso, + ) + + project.analysis.aliases.create( + label='occ_La', + param=project.structures['lbco'].atom_sites['La'].occupancy, + ) + project.analysis.aliases.create( + label='occ_Ba', + param=project.structures['lbco'].atom_sites['Ba'].occupancy, + ) + + project.analysis.constraints.create(expression='biso_Ba = biso_La') + project.analysis.constraints.create(expression='occ_Ba = 1 - occ_La') + + # More fit patams + project.structures['lbco'].atom_sites['La'].occupancy.free = True + + # Save to a directory + project.save_as('lbco_project') + + # Load Project from Directory + project = ed.Project.load('lbco_project') + + # Perform Analysis + project.analysis.fit() + + # Compare fit quality + assert_almost_equal( + project.analysis.fit_results.reduced_chi_square, + desired=1.28, + decimal=1, + ) + + if __name__ == '__main__': test_fit_neutron_pd_cwl_hs() test_single_fit_neutron_pd_cwl_lbco() From 21941c9f408adf53110a27514512350ca8d8c83e Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 13:26:28 +0200 Subject: [PATCH 10/51] Move analysis.cif into analysis/ directory --- docs/architecture/architecture.md | 7 ++++--- src/easydiffraction/project/project.py | 7 +++++-- .../project/test_project_load.py | 18 +++++++++--------- .../project/test_project_save.py | 4 ++-- 4 files changed, 20 insertions(+), 16 deletions(-) diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md index 605ee28c..8a7a9369 100644 --- a/docs/architecture/architecture.md +++ b/docs/architecture/architecture.md @@ -714,12 +714,13 @@ Projects are saved as a directory of CIF files: ```shell project_dir/ ├── project.cif # ProjectInfo -├── analysis.cif # Analysis settings ├── summary.cif # Summary report ├── structures/ │ └── lbco.cif # One file per structure -└── experiments/ - └── hrpt.cif # One file per experiment +├── experiments/ +│ └── hrpt.cif # One file per experiment +└── analysis/ + └── analysis.cif # Analysis settings ``` ### 7.3 Verbosity diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index 33ec12e5..cb3a90fc 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -335,9 +335,12 @@ def save(self) -> None: console.print(f'│ └── 📄 {file_name}') # Save analysis - with (self._info.path / 'analysis.cif').open('w') as f: + analysis_dir = self._info.path / 'analysis' + analysis_dir.mkdir(parents=True, exist_ok=True) + with (analysis_dir / 'analysis.cif').open('w') as f: f.write(self.analysis.as_cif()) - console.print('├── 📄 analysis.cif') + console.print('├── 📁 analysis/') + console.print('│ └── 📄 analysis.cif') # Save summary with (self._info.path / 'summary.cif').open('w') as f: diff --git a/tests/unit/easydiffraction/project/test_project_load.py b/tests/unit/easydiffraction/project/test_project_load.py index 6d9020b7..c676cb89 100644 --- a/tests/unit/easydiffraction/project/test_project_load.py +++ b/tests/unit/easydiffraction/project/test_project_load.py @@ -116,27 +116,27 @@ def test_round_trips_constraints(self, tmp_path): class TestLoadAnalysisCifFallback: """Load falls back from analysis/analysis.cif to analysis.cif at root.""" - def test_loads_analysis_from_root(self, tmp_path): - """Current save layout: analysis.cif at project root.""" + def test_loads_analysis_from_subdir(self, tmp_path): + """Current save layout: analysis/analysis.cif.""" original = Project(name='fb1') original.save_as(str(tmp_path / 'proj')) - # Verify analysis.cif is at root (current save layout) - assert (tmp_path / 'proj' / 'analysis.cif').is_file() + # Verify analysis.cif is in analysis/ subdirectory (current save layout) + assert (tmp_path / 'proj' / 'analysis' / 'analysis.cif').is_file() loaded = Project.load(str(tmp_path / 'proj')) assert loaded.analysis.current_minimizer == 'lmfit' - def test_loads_analysis_from_subdir(self, tmp_path): - """Future layout: analysis/analysis.cif takes priority.""" + def test_loads_analysis_from_root_fallback(self, tmp_path): + """Old layout fallback: analysis.cif at project root.""" original = Project(name='fb2') original.save_as(str(tmp_path / 'proj')) - # Move analysis.cif to analysis/ subdirectory + # Move analysis.cif from analysis/ subdirectory to project root proj_dir = tmp_path / 'proj' analysis_dir = proj_dir / 'analysis' - analysis_dir.mkdir(exist_ok=True) - (proj_dir / 'analysis.cif').rename(analysis_dir / 'analysis.cif') + (analysis_dir / 'analysis.cif').rename(proj_dir / 'analysis.cif') + analysis_dir.rmdir() loaded = Project.load(str(proj_dir)) assert loaded.analysis.current_minimizer == 'lmfit' diff --git a/tests/unit/easydiffraction/project/test_project_save.py b/tests/unit/easydiffraction/project/test_project_save.py index ac8b9895..bf632e11 100644 --- a/tests/unit/easydiffraction/project/test_project_save.py +++ b/tests/unit/easydiffraction/project/test_project_save.py @@ -13,7 +13,7 @@ def test_project_save_uses_cwd_when_no_explicit_path(monkeypatch, tmp_path, caps # It should announce saving and create the three core files assert 'Saving project' in out assert (tmp_path / 'project.cif').exists() - assert (tmp_path / 'analysis.cif').exists() + assert (tmp_path / 'analysis' / 'analysis.cif').exists() assert (tmp_path / 'summary.cif').exists() @@ -34,7 +34,7 @@ def test_project_save_as_writes_core_files(tmp_path, monkeypatch): # Assert expected files/dirs exist assert (target / 'project.cif').is_file() - assert (target / 'analysis.cif').is_file() + assert (target / 'analysis' / 'analysis.cif').is_file() assert (target / 'summary.cif').is_file() assert (target / 'structures').is_dir() assert (target / 'experiments').is_dir() From 5dffac30b816405a02c7932754f2770c1e35e87d Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 13:28:28 +0200 Subject: [PATCH 11/51] Add destination parameter to extract_data_paths_from_zip --- src/easydiffraction/io/ascii.py | 28 +++-- tests/unit/easydiffraction/io/test_ascii.py | 125 ++++++++++++++++++++ 2 files changed, 144 insertions(+), 9 deletions(-) create mode 100644 tests/unit/easydiffraction/io/test_ascii.py diff --git a/src/easydiffraction/io/ascii.py b/src/easydiffraction/io/ascii.py index ae231a2c..75ec0fcb 100644 --- a/src/easydiffraction/io/ascii.py +++ b/src/easydiffraction/io/ascii.py @@ -13,21 +13,27 @@ import numpy as np -def extract_data_paths_from_zip(zip_path: str | Path) -> list[str]: +def extract_data_paths_from_zip( + zip_path: str | Path, + destination: str | Path | None = None, +) -> list[str]: """ Extract all files from a ZIP archive and return their paths. - Files are extracted into a temporary directory that persists for the - lifetime of the process. The returned paths are sorted - lexicographically by file name so that numbered data files (e.g. - ``scan_001.dat``, ``scan_002.dat``) appear in natural order. Hidden - files and directories (names starting with ``'.'`` or ``'__'``) are - excluded. + Files are extracted into *destination* when provided, or into a + temporary directory that persists for the lifetime of the process. + The returned paths are sorted lexicographically by file name so that + numbered data files (e.g. ``scan_001.dat``, ``scan_002.dat``) appear + in natural order. Hidden files and directories (names starting with + ``'.'`` or ``'__'``) are excluded. Parameters ---------- zip_path : str | Path Path to the ZIP archive. + destination : str | Path | None, default=None + Directory to extract files into. When ``None``, a temporary + directory is created. Returns ------- @@ -46,8 +52,12 @@ def extract_data_paths_from_zip(zip_path: str | Path) -> list[str]: msg = f'ZIP file not found: {zip_path}' raise FileNotFoundError(msg) - # TODO: Unify mkdir with other uses in the code - extract_dir = Path(tempfile.mkdtemp(prefix='ed_zip_')) + if destination is not None: + extract_dir = Path(destination) + extract_dir.mkdir(parents=True, exist_ok=True) + else: + # TODO: Unify mkdir with other uses in the code + extract_dir = Path(tempfile.mkdtemp(prefix='ed_zip_')) with zipfile.ZipFile(zip_path, 'r') as zf: zf.extractall(extract_dir) diff --git a/tests/unit/easydiffraction/io/test_ascii.py b/tests/unit/easydiffraction/io/test_ascii.py new file mode 100644 index 00000000..8519c6d7 --- /dev/null +++ b/tests/unit/easydiffraction/io/test_ascii.py @@ -0,0 +1,125 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for extract_data_paths_from_zip and extract_data_paths_from_dir.""" + +from __future__ import annotations + +import zipfile + +import pytest + +from easydiffraction.io.ascii import extract_data_paths_from_dir +from easydiffraction.io.ascii import extract_data_paths_from_zip + + +class TestExtractDataPathsFromZip: + """Tests for extract_data_paths_from_zip.""" + + def test_extracts_to_temp_dir_by_default(self, tmp_path): + """Without destination, files go to a temp directory.""" + zip_path = tmp_path / 'test.zip' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('scan_001.dat', '1 2 3\n') + zf.writestr('scan_002.dat', '4 5 6\n') + + paths = extract_data_paths_from_zip(zip_path) + + assert len(paths) == 2 + assert 'scan_001.dat' in paths[0] + assert 'scan_002.dat' in paths[1] + + def test_extracts_to_destination(self, tmp_path): + """With destination, files go to the specified directory.""" + zip_path = tmp_path / 'test.zip' + dest = tmp_path / 'output' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('scan_001.dat', '1 2 3\n') + zf.writestr('scan_002.dat', '4 5 6\n') + + paths = extract_data_paths_from_zip(zip_path, destination=dest) + + assert len(paths) == 2 + assert all(str(dest) in p for p in paths) + assert (dest / 'scan_001.dat').is_file() + assert (dest / 'scan_002.dat').is_file() + + def test_destination_creates_directory(self, tmp_path): + """Destination directory is created if it does not exist.""" + zip_path = tmp_path / 'test.zip' + dest = tmp_path / 'nested' / 'output' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('data.dat', '1 2 3\n') + + paths = extract_data_paths_from_zip(zip_path, destination=dest) + + assert len(paths) == 1 + assert dest.is_dir() + + def test_raises_file_not_found(self, tmp_path): + """Raises FileNotFoundError for missing ZIP path.""" + with pytest.raises(FileNotFoundError): + extract_data_paths_from_zip(tmp_path / 'missing.zip') + + def test_raises_value_error_for_empty_zip(self, tmp_path): + """Raises ValueError when ZIP has no usable files.""" + zip_path = tmp_path / 'empty.zip' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('.hidden', 'hidden\n') + + with pytest.raises(ValueError, match='No data files found'): + extract_data_paths_from_zip(zip_path) + + def test_excludes_hidden_files(self, tmp_path): + """Hidden files are excluded from returned paths.""" + zip_path = tmp_path / 'test.zip' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('data.dat', '1 2 3\n') + zf.writestr('.hidden', 'hidden\n') + zf.writestr('__meta', 'meta\n') + + paths = extract_data_paths_from_zip(zip_path) + + assert len(paths) == 1 + assert 'data.dat' in paths[0] + + def test_returns_sorted_paths(self, tmp_path): + """Returned paths are sorted lexicographically.""" + zip_path = tmp_path / 'test.zip' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('c.dat', '3\n') + zf.writestr('a.dat', '1\n') + zf.writestr('b.dat', '2\n') + + paths = extract_data_paths_from_zip(zip_path) + + assert 'a.dat' in paths[0] + assert 'b.dat' in paths[1] + assert 'c.dat' in paths[2] + + +class TestExtractDataPathsFromDir: + """Tests for extract_data_paths_from_dir.""" + + def test_lists_files_in_directory(self, tmp_path): + """Returns sorted paths for files in a directory.""" + (tmp_path / 'scan_002.dat').write_text('2\n') + (tmp_path / 'scan_001.dat').write_text('1\n') + + paths = extract_data_paths_from_dir(tmp_path) + + assert len(paths) == 2 + assert 'scan_001.dat' in paths[0] + assert 'scan_002.dat' in paths[1] + + def test_raises_for_missing_directory(self, tmp_path): + """Raises FileNotFoundError for non-existent directory.""" + with pytest.raises(FileNotFoundError): + extract_data_paths_from_dir(tmp_path / 'missing') + + def test_raises_for_empty_directory(self, tmp_path): + """Raises ValueError when directory has no matching files.""" + empty = tmp_path / 'empty' + empty.mkdir() + + with pytest.raises(ValueError, match='No files matching'): + extract_data_paths_from_dir(empty) From 16af6de5711942463fe7b0097d595058aa548e35 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 13:31:55 +0200 Subject: [PATCH 12/51] Add missing Returns sections to docstrings --- src/easydiffraction/core/datablock.py | 7 +++++++ src/easydiffraction/io/cif/serialize.py | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/src/easydiffraction/core/datablock.py b/src/easydiffraction/core/datablock.py index d9fc9f42..5d497e4c 100644 --- a/src/easydiffraction/core/datablock.py +++ b/src/easydiffraction/core/datablock.py @@ -99,6 +99,13 @@ def _cif_for_display(self, max_loop_display: int = 20) -> str: ---------- max_loop_display : int, default=20 Maximum number of rows to show per loop category. + + Returns + ------- + str + CIF representation of this object, with loop categories + truncated to at most *max_loop_display* rows for display + purposes. """ from easydiffraction.io.cif.serialize import datablock_item_to_cif # noqa: PLC0415 diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index cfceca74..fa035981 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -212,6 +212,11 @@ def datablock_item_to_cif( max_loop_display : int | None, default=None When set, truncate loop categories to this many rows. ``None`` emits all rows (used for serialisation). + + Returns + ------- + str + CIF text representing the datablock as a loop. """ # Local imports to avoid import-time cycles from easydiffraction.core.category import CategoryCollection # noqa: PLC0415 From 4ddd38e889997b36f8c411e71f936c8e9e85914c Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 13:42:53 +0200 Subject: [PATCH 13/51] Add sequential fitting infrastructure with CSV output --- .github/copilot-instructions.md | 5 +- docs/architecture/issues_closed.md | 24 + docs/architecture/issues_open.md | 37 - .../architecture/sequential_fitting_design.md | 210 +++--- src/easydiffraction/analysis/analysis.py | 54 ++ src/easydiffraction/analysis/sequential.py | 681 ++++++++++++++++++ tests/integration/fitting/test_sequential.py | 282 ++++++++ 7 files changed, 1153 insertions(+), 140 deletions(-) create mode 100644 src/easydiffraction/analysis/sequential.py create mode 100644 tests/integration/fitting/test_sequential.py diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 67a761c8..1fabd502 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -42,7 +42,8 @@ and UPPER_SNAKE_CASE for constants. - Use `from __future__ import annotations` in every module. - Type-annotate all public function signatures. -- Docstrings on all public classes and methods (numpy style). +- Docstrings on all public classes and methods (numpy style). These must + include sections Parameters, Returns and Raises, where applicable. - Prefer flat over nested, explicit over clever. - Write straightforward code; do not add defensive checks for unlikely edge cases. @@ -147,6 +148,8 @@ `docs/architecture/architecture.md`. - After changes, run linting and formatting fixes with `pixi run fix`. Do not check what was auto-fixed, just accept the fixes and move on. + Then, run linting and formatting checks with `pixi run check` and + address any remaining issues until the code is clean. - After changes, run unit tests with `pixi run unit-tests`. - After changes, run integration tests with `pixi run integration-tests`. diff --git a/docs/architecture/issues_closed.md b/docs/architecture/issues_closed.md index d6e14219..0612ea04 100644 --- a/docs/architecture/issues_closed.md +++ b/docs/architecture/issues_closed.md @@ -4,6 +4,30 @@ Issues that have been fully resolved. Kept for historical reference. --- +## Implement `Project.load()` + +**Resolution:** implemented `Project.load(dir_path)` as a classmethod +that reads `project.cif`, `structures/*.cif`, `experiments/*.cif`, and +`analysis/analysis.cif` (with fallback to `analysis.cif` at root for +backward compatibility). Reconstructs the full project state including +alias parameter references via `_resolve_alias_references()`. Integration +tests verify save → load → parameter comparison and save → load → fit → +χ² comparison. Also used by `fit_sequential` workers to reconstruct +projects from CIF strings. + +--- + +## Eliminate Dummy `Experiments` Wrapper in Single-Fit Mode + +**Resolution:** refactored `Fitter.fit()` and `_residual_function()` to +accept `experiments: list[ExperimentBase]` instead of requiring an +`Experiments` collection. `Analysis.fit()` passes +`experiments_list = [experiment]` in single-fit mode and +`list(experiments.values())` in joint-fit mode. Removed the +`object.__setattr__` hack that forced `_parent` on the dummy wrapper. + +--- + ## Replace UID Map with Direct References and Auto-Apply Constraints **Resolution:** eliminated `UidMapHandler` and random UID generation diff --git a/docs/architecture/issues_open.md b/docs/architecture/issues_open.md index b721a2b2..e01cc36d 100644 --- a/docs/architecture/issues_open.md +++ b/docs/architecture/issues_open.md @@ -10,24 +10,6 @@ needed. --- -## 1. 🔴 Implement `Project.load()` - -**Type:** Completeness - -`save()` serialises all components to CIF files but `load()` is a stub -that raises `NotImplementedError`. Users cannot round-trip a project. - -**Why first:** this is the highest-severity gap. Without it the save -functionality is only half useful — CIF files are written but cannot be -read back. Tutorials that demonstrate save/load are blocked. - -**Fix:** implement `load()` that reads CIF files from the project -directory and reconstructs structures, experiments, and analysis -settings. - -**Depends on:** nothing (standalone). - ---- ## 2. 🟡 Restore Minimiser Variant Support @@ -125,23 +107,6 @@ effectively fixed after experiment creation. --- -## 7. 🟡 Eliminate Dummy `Experiments` Wrapper in Single-Fit Mode - -**Type:** Fragility - -Single-fit mode creates a throw-away `Experiments` collection per -experiment, manually forces `_parent` via `object.__setattr__`, and -passes it to `Fitter`. This bypasses `GuardedBase` parent tracking and -is fragile. - -**Fix:** make `Fitter.fit()` accept a list of experiment objects (or a -single experiment) instead of requiring an `Experiments` collection. Or -add a `fit_single(experiment)` method. - -**Depends on:** nothing, but simpler after issue 5 (Analysis refactor) -clarifies the fitting orchestration. - ---- ## 8. 🟡 Add Explicit `create()` Signatures on Collections @@ -316,12 +281,10 @@ re-derivable default. | # | Issue | Severity | Type | | --- | ---------------------------------------- | -------- | ----------------------- | -| 1 | Implement `Project.load()` | 🔴 High | Completeness | | 2 | Restore minimiser variants | 🟡 Med | Feature loss | | 3 | Rebuild joint-fit weights | 🟡 Med | Fragility | | 5 | `Analysis` as `DatablockItem` | 🟡 Med | Consistency | | 6 | Restrict `data_type` switching | 🔴 High | Correctness/Data safety | -| 7 | Eliminate dummy `Experiments` | 🟡 Med | Fragility | | 8 | Explicit `create()` signatures | 🟡 Med | API safety | | 9 | Future enum extensions | 🟢 Low | Design | | 10 | Unify update orchestration | 🟢 Low | Maintainability | diff --git a/docs/architecture/sequential_fitting_design.md b/docs/architecture/sequential_fitting_design.md index 16f01634..9e11e8ca 100644 --- a/docs/architecture/sequential_fitting_design.md +++ b/docs/architecture/sequential_fitting_design.md @@ -1,7 +1,7 @@ # Sequential Fitting — Architecture Design -**Status:** Draft — for discussion before implementation **Date:** -2026-04-02 +**Status:** Implementation in progress (PRs 1–9 complete, PRs 10–14 +remaining) **Date:** 2026-04-02 (updated 2026-04-03) --- @@ -869,35 +869,28 @@ direct reference — no map lookup needed. `_minimizer_uid` returns `unique_name.replace('.', '__')` instead of a random string. All tutorials, tests, and call sites updated. -### 9.2 Fix `category_collection_to_cif` truncation +### 9.2 Fix `category_collection_to_cif` truncation ✅ -`category_collection_to_cif` has `max_display=20` which truncates loop -output. For CIF used in save/load/round-trip, all rows must be emitted. +**Done.** `category_collection_to_cif` default changed to +`max_display=None` (emit all rows). Truncation is opt-in via explicit +`max_display` parameter, used only by display methods. -Options: +### 9.3 Verify CIF round-trip for experiments ✅ -- (a) Remove `max_display` from `category_collection_to_cif` entirely, - add truncation only in display methods. -- (b) Add a `full=True` parameter and use it when serialising for - persistence. +**Done.** Five integration tests in `test_cif_round_trip.py`: -### 9.3 Verify CIF round-trip for experiments +1. Parameter values survive `as_cif` → `from_cif_str`. +2. Free flags survive the round-trip. +3. Category collections (background, excluded regions, linked phases) + preserve item count. +4. Data points survive (count, first/last values). +5. Structure round-trip with symmetry constraints. -Write an integration test: +### 9.4 Add `destination` parameter to `extract_data_paths_from_zip` ✅ -1. Create a fully configured experiment (instrument, peak, background, - excluded regions, linked phases, data). -2. Serialise to CIF (`experiment.as_cif`). -3. Reconstruct from CIF (`ExperimentFactory.from_cif_str(cif_str)`). -4. Compare all parameter values. - -Fix any parameters that don't survive the round-trip. - -### 9.4 Add `destination` parameter to `extract_data_paths_from_zip` - -Currently extracts to a temp dir. Add optional `destination` parameter -to extract to a user-specified directory, enabling a clean two-step -workflow (extract → fit_sequential). +**Done.** Optional `destination` parameter added. When provided, extracts +to the given directory. When `None`, uses a temporary directory (original +behaviour). ### 9.5 Replace singletons with instance-owned state (partially done) @@ -954,18 +947,12 @@ Constraints auto-enable on `create()` and are applied before fitting starts. The manual `apply_constraints()` method has been removed. Fixing the singleton issue resolves issue #4 as a side effect. -### 9.6 Move `analysis.cif` into `analysis/` directory +### 9.6 Move `analysis.cif` into `analysis/` directory ✅ -Currently `analysis.cif` lives at the project root alongside -`project.cif` and `summary.cif`. Adding an `analysis/` directory for -`results.csv` next to a file named `analysis.cif` at the same level -creates a naming conflict and a confusing layout. - -**Fix:** update `Project.save()` to write `analysis.cif` to -`project_dir/analysis/analysis.cif`. Update `Project.load()` (when -implemented) to read from the new path, with a fallback to the old path -for backward compatibility with existing saved projects. Update docs -(`architecture.md`, `project.md`), tests, and the save output messages. +**Done.** `Project.save()` writes to `analysis/analysis.cif`. +`Project.load()` checks `analysis/analysis.cif` first, falls back to +`analysis.cif` at root for backward compatibility. Unit tests verify +both layouts. --- @@ -978,7 +965,7 @@ resolved first because they clean up the fitting internals that ### Foundation PRs (resolve existing issues) -#### PR 1 — Eliminate dummy Experiments wrapper in single-fit mode (issue #7) +#### PR 1 — Eliminate dummy Experiments wrapper in single-fit mode (issue #7) ✅ > **Title:** `Accept single Experiment in Fitter.fit()` > @@ -990,11 +977,11 @@ resolved first because they clean up the fitting internals that > Update all callers (single-fit, joint-fit). Update unit and > integration tests. -**Why first:** the current dummy-wrapper pattern is the exact -antipattern that `fit_sequential` workers would otherwise inherit. -Fixing it now gives the worker a clean -`Fitter.fit(structures, [experiment])` call without any collection -ceremony. +**Done.** `Fitter.fit()` and `_residual_function()` now accept +`experiments: list[ExperimentBase]`. `Analysis.fit()` passes +`experiments_list = [experiment]` in single-fit mode and +`list(experiments.values())` in joint-fit mode. No more dummy +`Experiments` wrapper or `object.__setattr__` hack. #### PR 2 — Replace UID map with direct references and auto-apply constraints (issue #4 + § 9.5) ✅ @@ -1019,7 +1006,7 @@ multi-project edge case. This PR also absorbed PR 4 (§ 9.1) since switching from random UIDs to `unique_name` was a natural part of the same change. -#### PR 3 — Implement Project.load() (issue #1) +#### PR 3 — Implement Project.load() (issue #1) ✅ > **Title:** `Implement Project.load() from CIF directory` > @@ -1030,11 +1017,11 @@ This PR also absorbed PR 4 (§ 9.1) since switching from random UIDs to > as a fallback. Add integration test: save → load → compare all > parameter values. -**Why third:** the CIF round-trip reliability that `load()` proves is -the same reliability that `fit_sequential` workers depend on (they -reconstruct a project from CIF strings). Implementing `load()` forces us -to fix any serialisation gaps before they become worker bugs. Phase 3 -(dataset replay) also directly uses `load()`. +**Done.** `Project.load()` reads CIF files from the project directory, +reconstructs structures, experiments, and analysis. Resolves alias +`param_unique_name` strings back to live `Parameter` references. +Integration tests verify save → load → parameter comparison and +save → load → fit → χ² comparison. ### Sequential-fitting prerequisite PRs @@ -1043,7 +1030,7 @@ to fix any serialisation gaps before they become worker bugs. Phase 3 > Absorbed into PR 2. Aliases now use `param_unique_name` with direct > object references. All tutorials and tests updated. -#### PR 5 — Fix CIF collection truncation (§ 9.2) +#### PR 5 — Fix CIF collection truncation (§ 9.2) ✅ > **Title:** `Remove max_display truncation from CIF serialisation` > @@ -1052,7 +1039,11 @@ to fix any serialisation gaps before they become worker bugs. Phase 3 > (`show_as_cif()`). Ensures experiments with many background/data > points survive CIF round-trips. -#### PR 6 — Verify CIF round-trip for experiments (§ 9.3) +**Done.** `category_collection_to_cif` default changed to +`max_display=None` (emit all rows). Truncation is now opt-in, only used +by display methods. + +#### PR 6 — Verify CIF round-trip for experiments (§ 9.3) ✅ > **Title:** `Add CIF round-trip integration test for experiments` > @@ -1062,7 +1053,11 @@ to fix any serialisation gaps before they become worker bugs. Phase 3 > asserts all parameter values match. Fix any parameters that don't > survive the round-trip. -#### PR 7 — Move analysis.cif into analysis/ directory (§ 9.6) +**Done.** Five integration tests in `test_cif_round_trip.py`: parameter +values, free flags, categories (background/excluded regions/linked +phases), data points, and structure round-trip. + +#### PR 7 — Move analysis.cif into analysis/ directory (§ 9.6) ✅ > **Title:** `Move analysis.cif into analysis/ directory` > @@ -1071,7 +1066,12 @@ to fix any serialisation gaps before they become worker bugs. Phase 3 > from the new path (with fallback to old path). Update docs > (`architecture.md`, `project.md`), tests, and console output messages. -#### PR 8 — Add destination to extract_data_paths_from_zip (§ 9.4) +**Done.** `Project.save()` writes to `analysis/analysis.cif`. +`Project.load()` checks `analysis/analysis.cif` first, falls back to +`analysis.cif` at root for backward compatibility. Unit tests verify +both layouts. + +#### PR 8 — Add destination to extract_data_paths_from_zip (§ 9.4) ✅ > **Title:** `Add destination parameter to extract_data_paths_from_zip` > @@ -1080,9 +1080,13 @@ to fix any serialisation gaps before they become worker bugs. Phase 3 > directory instead of a temp dir. Enables clean two-step workflow: > extract ZIP → pass directory to `fit_sequential()`. +**Done.** `extract_data_paths_from_zip` accepts `destination` parameter. +When provided, extracts to the given directory. When `None`, uses a +temporary directory (original behaviour). + ### Sequential-fitting core PRs -#### PR 9 — Streaming sequential fit (max_workers=1) +#### PR 9 — Streaming sequential fit (max_workers=1) ✅ > **Title:** `Add fit_sequential() for streaming single-worker fitting` > @@ -1094,14 +1098,14 @@ to fix any serialisation gaps before they become worker bugs. Phase 3 > `extract_diffrn` callback support for metadata columns. Unit tests for > CSV writing, crash recovery, parameter propagation. -This is a sub-step breakdown if the PR proves too large: - -- **PR 9a:** `Add SequentialFitTemplate and _fit_worker function` — - dataclass, worker function, no CSV, no recovery. -- **PR 9b:** `Add CSV output and crash recovery to fit_sequential` — CSV - writing, reading, resumption logic. -- **PR 9c:** `Add parameter propagation and extract_diffrn callback` — - chunk-to-chunk seeding, diffrn metadata columns. +**Done.** Full implementation in `analysis/sequential.py`: +`SequentialFitTemplate` dataclass, `_fit_worker()` module-level function, +CSV helpers (`_build_csv_header`, `_write_csv_header`, `_append_to_csv`, +`_read_csv_for_recovery`), `_build_template()`, chunk-based processing +with parameter propagation, `extract_diffrn` callback support, progress +reporting. Five integration tests in `test_sequential.py`: CSV +production, crash recovery, parameter propagation, diffrn callback, +precondition validation. #### PR 10 — Update plot_param_series to read from CSV @@ -1157,18 +1161,18 @@ This is a sub-step breakdown if the PR proves too large: ### Dependency graph ``` -PR 1 (issue #7: eliminate dummy Experiments) +PR 1 (issue #7: eliminate dummy Experiments) ✅ └─► PR 2 (issue #4: UID map + constraints) ✅ - └─► PR 3 (issue #1: Project.load) - └─► PR 5 (CIF truncation) - └─► PR 6 (CIF round-trip test) - ├─► PR 7 (analysis.cif → analysis/) - │ └─► PR 9 (streaming sequential fit) - │ ├─► PR 10 (plot from CSV) + └─► PR 3 (issue #1: Project.load) ✅ + └─► PR 5 (CIF truncation) ✅ + └─► PR 6 (CIF round-trip test) ✅ + ├─► PR 7 (analysis.cif → analysis/) ✅ + │ └─► PR 9 (streaming sequential fit) ✅ + │ ├─► PR 10 (plot from CSV) ← next │ │ └─► PR 13 (CSV for existing fit) │ └─► PR 11 (parallel fitting) │ └─► PR 14 (optional: parallel fit()) - └─► PR 8 (zip destination) + └─► PR 8 (zip destination) ✅ └─► PR 12 (dataset replay) ``` @@ -1187,18 +1191,20 @@ are all stdlib. ### Risks -| Risk | Mitigation | -| ------------------------------------------------ | -------------------------------------------------------- | -| CIF round-trip loses information | PR 3 (load) + PR 6 (round-trip test) verify before PR 9 | -| CIF collection truncation at 20 rows | PR 5 fixes before PR 9 | -| Worker memory leak (large N, long-running pool) | Use `max_tasks_per_child=100` on the pool | -| Pickling failures for SequentialFitTemplate | Keep it a plain dataclass with only str/dict/list fields | -| crysfml Fortran global state in forked processes | Enforced `spawn` context avoids fork issues | +| Risk | Mitigation | +| ------------------------------------------------ | ------------------------------------------------------------- | +| CIF round-trip loses information | ✅ PR 3 (load) + PR 6 (round-trip test) verified | +| CIF collection truncation at 20 rows | ✅ PR 5 fixed (default `max_display=None`) | +| Worker memory leak (large N, long-running pool) | Use `max_tasks_per_child=100` on the pool (PR 11) | +| Pickling failures for SequentialFitTemplate | ✅ Keep it a plain dataclass with only str/dict/list fields | +| crysfml Fortran global state in forked processes | Enforced `spawn` context avoids fork issues (PR 11) | -### Resolved open issues (now prerequisites) +### Resolved open issues (now prerequisites) — all done ✅ -- **Issue #7 (dummy Experiments wrapper):** resolved in PR 1. The worker - uses the clean `Fitter.fit(structures, [experiment])` API. +- **Issue #7 (dummy Experiments wrapper):** resolved in PR 1. + `Fitter.fit()` and `_residual_function()` accept + `list[ExperimentBase]`. The worker uses the clean + `Fitter.fit(structures, [experiment])` API. - **Issue #4 (constraint refresh) + § 9.1 (alias unique_name) + § 9.5 (singletons):** resolved in PR 2. `UidMapHandler` eliminated; aliases use direct object references and deterministic `unique_name` for CIF; @@ -1206,31 +1212,31 @@ are all stdlib. auto-enable on `create()`. `ConstraintsHandler` remains a singleton but is always in sync — multi-project isolation is an optional follow-up. -- **Issue #1 (Project.load):** resolved in PR 3. CIF round-trip - reliability is proven before workers depend on it. Dataset replay - (PR 12) uses `load()` directly. Note: `Project.load()` must now - resolve `_alias.param_unique_name` strings back to `Parameter` objects - by building a temporary `unique_name → Parameter` map. +- **Issue #1 (Project.load):** resolved in PR 3. `Project.load()` reads + CIF files, reconstructs full project state, resolves alias + `param_unique_name` strings back to `Parameter` objects via + `_resolve_alias_references()`. Dataset replay (PR 12) uses `load()` + directly. --- ## 12. Summary -| Aspect | Decision | -| ------------------- | ---------------------------------------------------------------------------------- | -| Parallelism backend | `concurrent.futures.ProcessPoolExecutor` with `spawn` | -| Worker isolation | Each worker creates a fresh `Project` — no shared state | -| Data source | `data_dir` argument; ZIP → extract first | -| Data flow | Template CIF + data path → worker → result dict → CSV | -| Parameter IDs | `unique_name` (deterministic), not `uid` (random) | -| Parameter seeding | Last successful result in chunk → next chunk | -| CSV location | `project_dir/analysis/results.csv` (deterministic) | -| CSV contents | Fit metrics + diffrn metadata + all free param values/uncert | -| Metadata extraction | User-provided `extract_diffrn` callback, not hidden in lib | -| Crash recovery | Read existing CSV, skip fitted files, resume | -| Plotting | Unified `plot_param_series()` always reads from CSV | -| Configuration | `max_workers` + `data_dir` on `fit_sequential()` | -| Project layout | `analysis.cif` moves into `analysis/` directory | -| Singletons | `UidMapHandler` eliminated; `ConstraintsHandler` stays singleton but always synced | -| New dependencies | None (stdlib only) | -| First step | PRs 1–3 (foundation issues), then PRs 4–8 (prerequisites), then PR 9+ | +| Aspect | Decision | Status | +| ------------------- | ---------------------------------------------------------------------------------- | ------ | +| Parallelism backend | `concurrent.futures.ProcessPoolExecutor` with `spawn` | PR 11 | +| Worker isolation | Each worker creates a fresh `Project` — no shared state | ✅ | +| Data source | `data_dir` argument; ZIP → extract first | ✅ | +| Data flow | Template CIF + data path → worker → result dict → CSV | ✅ | +| Parameter IDs | `unique_name` (deterministic), not `uid` (random) | ✅ | +| Parameter seeding | Last successful result in chunk → next chunk | ✅ | +| CSV location | `project_dir/analysis/results.csv` (deterministic) | ✅ | +| CSV contents | Fit metrics + diffrn metadata + all free param values/uncert | ✅ | +| Metadata extraction | User-provided `extract_diffrn` callback, not hidden in lib | ✅ | +| Crash recovery | Read existing CSV, skip fitted files, resume | ✅ | +| Plotting | Unified `plot_param_series()` always reads from CSV | PR 10 | +| Configuration | `max_workers` + `data_dir` on `fit_sequential()` | ✅ | +| Project layout | `analysis.cif` moves into `analysis/` directory | ✅ | +| Singletons | `UidMapHandler` eliminated; `ConstraintsHandler` stays singleton but always synced | ✅ | +| New dependencies | None (stdlib only) | ✅ | +| First step | PRs 1–9 done; PRs 10–14 remaining | ✅ | diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index 3a675381..98517729 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -722,6 +722,60 @@ def fit(self, verbosity: str | None = None) -> None: if self.project.info.path is not None: self.project.save() + def fit_sequential( + self, + data_dir: str, + max_workers: int | str = 1, + chunk_size: int | None = None, + file_pattern: str = '*', + extract_diffrn: object = None, + verbosity: str | None = None, + ) -> None: + """ + Run sequential fitting over all data files in a directory. + + Fits each dataset independently using the current structure and + experiment as a template. Results are written incrementally to + ``analysis/results.csv`` in the project directory. + + The project must contain exactly one structure and one + experiment (the template), and must have been saved + (``save_as()``) before calling this method. + + Parameters + ---------- + data_dir : str + Path to directory containing data files. + max_workers : int | str, default=1 + Number of parallel worker processes. ``1`` = sequential. + ``'auto'`` = physical CPU count. + chunk_size : int | None, default=None + Files per chunk. Default ``None`` uses *max_workers*. + file_pattern : str, default='*' + Glob pattern to filter files in *data_dir*. + extract_diffrn : object, default=None + User callback ``f(file_path) → {diffrn_field: value}``. + Called per file after fitting. ``None`` = no diffrn + metadata. + verbosity : str | None, default=None + ``'full'``, ``'short'``, or ``'silent'``. Default: project + verbosity. + """ + from easydiffraction.analysis.sequential import fit_sequential as _fit_seq # noqa: PLC0415 + + # Apply constraints before building the template + self._update_categories() + + _fit_seq( + analysis=self, + data_dir=data_dir, + max_workers=max_workers, + chunk_size=chunk_size, + file_pattern=file_pattern, + extract_diffrn=extract_diffrn, + verbosity=verbosity, + ) + def show_fit_results(self) -> None: """ Display a summary of the fit results. diff --git a/src/easydiffraction/analysis/sequential.py b/src/easydiffraction/analysis/sequential.py new file mode 100644 index 00000000..739fc1c4 --- /dev/null +++ b/src/easydiffraction/analysis/sequential.py @@ -0,0 +1,681 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +""" +Sequential fitting infrastructure: template, worker, CSV, recovery. +""" + +from __future__ import annotations + +import contextlib +import csv +from dataclasses import dataclass +from dataclasses import replace +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any + +from easydiffraction.io.ascii import extract_data_paths_from_dir +from easydiffraction.utils.enums import VerbosityEnum +from easydiffraction.utils.logging import log + +if TYPE_CHECKING: + from collections.abc import Callable + +# ------------------------------------------------------------------ +# Template dataclass (picklable for future multiprocessing) +# ------------------------------------------------------------------ + + +@dataclass(frozen=True) +class SequentialFitTemplate: + """ + Snapshot of everything a worker needs to recreate and fit a project. + + All fields are plain Python types (str, dict, list) so that the + template can be pickled for ``ProcessPoolExecutor`` in the future. + """ + + structure_cif: str + experiment_cif: str + initial_params: dict[str, float] + free_param_unique_names: list[str] + alias_defs: list[dict[str, str]] + constraint_defs: list[str] + constraints_enabled: bool + minimizer_tag: str + calculator_tag: str + diffrn_field_names: list[str] + + +# ------------------------------------------------------------------ +# Worker function (module-level for pickling) +# ------------------------------------------------------------------ + + +def _fit_worker( + template: SequentialFitTemplate, + data_path: str, +) -> dict[str, Any]: + """ + Fit a single dataset in isolation. + + Creates a fresh Project, loads the template configuration via CIF, + replaces data from *data_path*, applies initial parameters, fits, + and returns a plain dict of results. + + Parameters + ---------- + template : SequentialFitTemplate + Snapshot of the project configuration. + data_path : str + Path to the data file to fit. + + Returns + ------- + dict[str, Any] + Result dict with keys: ``file_path``, ``fit_success``, + ``chi_squared``, ``reduced_chi_squared``, ``n_iterations``, and + per-parameter ``{unique_name}`` / ``{unique_name}.uncertainty``. + """ + # Lazy import to avoid circular dependencies and keep the module + # importable without heavy imports at top level. + from easydiffraction.project.project import Project # noqa: PLC0415 + + result: dict[str, Any] = {'file_path': data_path} + + try: + # 1. Create a fresh, isolated project + Project._loading = True + try: + project = Project(name='_worker') + finally: + Project._loading = False + + # 2. Load structure from template CIF + project.structures.add_from_cif_str(template.structure_cif) + + # 3. Load experiment from template CIF + # (full config + template data) + project.experiments.add_from_cif_str(template.experiment_cif) + expt = list(project.experiments.values())[0] + + # 4. Replace data from the new data path + expt._load_ascii_data_to_experiment(data_path) + + # 5. Override parameter values from propagated starting values + _apply_param_overrides(project, template.initial_params) + + # 6. Set free flags + _set_free_params(project, template.free_param_unique_names) + + # 7. Apply constraints + if template.constraints_enabled and template.alias_defs: + _apply_constraints( + project, + template.alias_defs, + template.constraint_defs, + ) + + # 8. Set calculator and minimizer + # (internal, no console output) + from easydiffraction.analysis.calculators.factory import CalculatorFactory # noqa: PLC0415 + from easydiffraction.analysis.fitting import Fitter # noqa: PLC0415 + + expt._calculator = CalculatorFactory.create(template.calculator_tag) + expt._calculator_type = template.calculator_tag + project.analysis.fitter = Fitter(template.minimizer_tag) + + # 9. Fit + project.analysis.fit(verbosity='silent') + + # 10. Collect results + result.update(_collect_results(project, template)) + + except Exception as exc: # noqa: BLE001 + result['fit_success'] = False + result['chi_squared'] = None + result['reduced_chi_squared'] = None + result['n_iterations'] = 0 + result['error'] = str(exc) + + return result + + +# ------------------------------------------------------------------ +# Helper functions +# ------------------------------------------------------------------ + + +def _apply_param_overrides( + project: object, + overrides: dict[str, float], +) -> None: + """ + Set parameter values from a ``{unique_name: value}`` dict. + + Parameters + ---------- + project : object + The worker's project instance. + overrides : dict[str, float] + Map of parameter unique names to values. + """ + all_params = project.structures.parameters + project.experiments.parameters + by_name = {p.unique_name: p for p in all_params if hasattr(p, 'unique_name')} + for name, value in overrides.items(): + if name in by_name: + by_name[name].value = value + + +def _set_free_params( + project: object, + free_names: list[str], +) -> None: + """ + Mark parameters as free based on their unique names. + + Parameters + ---------- + project : object + The worker's project instance. + free_names : list[str] + Unique names of parameters to mark as free. + """ + from easydiffraction.core.variable import Parameter # noqa: PLC0415 + + all_params = project.structures.parameters + project.experiments.parameters + free_set = set(free_names) + for p in all_params: + if isinstance(p, Parameter) and hasattr(p, 'unique_name'): + p.free = p.unique_name in free_set + + +def _apply_constraints( + project: object, + alias_defs: list[dict[str, str]], + constraint_defs: list[str], +) -> None: + """ + Recreate aliases and constraints in the worker project. + + Parameters + ---------- + project : object + The worker's project instance. + alias_defs : list[dict[str, str]] + Each dict has ``label`` and ``param_unique_name``. + constraint_defs : list[str] + Constraint expression strings. + """ + all_params = project.structures.parameters + project.experiments.parameters + by_name = {p.unique_name: p for p in all_params if hasattr(p, 'unique_name')} + + for alias_def in alias_defs: + param = by_name.get(alias_def['param_unique_name']) + if param is not None: + project.analysis.aliases.create( + label=alias_def['label'], + param=param, + ) + + for expr in constraint_defs: + project.analysis.constraints.create(expression=expr) + + +def _collect_results( + project: object, + template: SequentialFitTemplate, +) -> dict[str, Any]: + """ + Collect fit results into a plain dict. + + Parameters + ---------- + project : object + The worker's project instance after fitting. + template : SequentialFitTemplate + The template (for knowing which params to collect). + + Returns + ------- + dict[str, Any] + Fit metrics and parameter values/uncertainties. + """ + from easydiffraction.core.variable import Parameter # noqa: PLC0415 + + result: dict[str, Any] = {} + fit_results = project.analysis.fit_results + + if fit_results is not None: + result['fit_success'] = fit_results.success + result['chi_squared'] = fit_results.chi_square + result['reduced_chi_squared'] = fit_results.reduced_chi_square + result['n_iterations'] = project.analysis.fitter.minimizer.tracker.best_iteration or 0 + else: + result['fit_success'] = False + result['chi_squared'] = None + result['reduced_chi_squared'] = None + result['n_iterations'] = 0 + + # Collect all free parameter values and uncertainties + all_params = project.structures.parameters + project.experiments.parameters + free_set = set(template.free_param_unique_names) + result['params'] = {} + for p in all_params: + if isinstance(p, Parameter) and p.unique_name in free_set: + result[p.unique_name] = p.value + result[f'{p.unique_name}.uncertainty'] = p.uncertainty + result['params'][p.unique_name] = p.value + + return result + + +# ------------------------------------------------------------------ +# CSV helpers +# ------------------------------------------------------------------ + +_META_COLUMNS = [ + 'file_path', + 'chi_squared', + 'reduced_chi_squared', + 'fit_success', + 'n_iterations', +] + + +def _build_csv_header( + template: SequentialFitTemplate, +) -> list[str]: + """ + Build the CSV column header list. + + Parameters + ---------- + template : SequentialFitTemplate + The template for diffrn fields and free param names. + + Returns + ------- + list[str] + Ordered list of column names. + """ + header = list(_META_COLUMNS) + header.extend(f'diffrn.{field}' for field in template.diffrn_field_names) + for name in template.free_param_unique_names: + header.append(name) + header.append(f'{name}.uncertainty') + return header + + +def _write_csv_header( + csv_path: Path, + header: list[str], +) -> None: + """ + Create the CSV file and write the header row. + + Parameters + ---------- + csv_path : Path + Path to the CSV file. + header : list[str] + Column names. + """ + with csv_path.open('w', newline='', encoding='utf-8') as f: + writer = csv.DictWriter(f, fieldnames=header) + writer.writeheader() + + +def _append_to_csv( + csv_path: Path, + header: list[str], + results: list[dict[str, Any]], +) -> None: + """ + Append result rows to the CSV file. + + Parameters + ---------- + csv_path : Path + Path to the CSV file. + header : list[str] + Column names (for DictWriter fieldnames). + results : list[dict[str, Any]] + Result dicts from workers. + """ + with csv_path.open('a', newline='', encoding='utf-8') as f: + writer = csv.DictWriter(f, fieldnames=header, extrasaction='ignore') + for result in results: + writer.writerow(result) + + +def _read_csv_for_recovery( + csv_path: Path, +) -> tuple[set[str], dict[str, float] | None]: + """ + Read an existing CSV for crash recovery. + + Parameters + ---------- + csv_path : Path + Path to the CSV file. + + Returns + ------- + tuple[set[str], dict[str, float] | None] + A set of already-fitted file paths and the parameter values from + the last successful row (or ``None`` if no rows). + """ + fitted: set[str] = set() + last_params: dict[str, float] | None = None + + if not csv_path.is_file(): + return fitted, last_params + + with csv_path.open(newline='', encoding='utf-8') as f: + reader = csv.DictReader(f) + for row in reader: + file_path = row.get('file_path', '') + if file_path: + fitted.add(file_path) + if row.get('fit_success', '').lower() == 'true': + # Extract parameter values from this row + params: dict[str, float] = {} + for key, val in row.items(): + if key in _META_COLUMNS: + continue + if key.startswith('diffrn.'): + continue + if key.endswith('.uncertainty'): + continue + if val: + with contextlib.suppress(ValueError, TypeError): + params[key] = float(val) + if params: + last_params = params + + return fitted, last_params + + +# ------------------------------------------------------------------ +# Template builder +# ------------------------------------------------------------------ + + +def _build_template(project: object) -> SequentialFitTemplate: + """ + Build a SequentialFitTemplate from the current project state. + + Parameters + ---------- + project : object + The main project instance (must have exactly 1 structure and 1 + experiment). + + Returns + ------- + SequentialFitTemplate + A frozen, picklable snapshot. + """ + from easydiffraction.core.variable import Parameter # noqa: PLC0415 + + structure = list(project.structures.values())[0] + experiment = list(project.experiments.values())[0] + + # Collect free parameter unique_names and initial values + all_params = project.structures.parameters + project.experiments.parameters + free_names: list[str] = [] + initial_params: dict[str, float] = {} + for p in all_params: + if isinstance(p, Parameter) and not p.constrained and p.free: + free_names.append(p.unique_name) + initial_params[p.unique_name] = p.value + + # Collect alias definitions + alias_defs: list[dict[str, str]] = [ + { + 'label': alias.label.value, + 'param_unique_name': alias.param_unique_name.value, + } + for alias in project.analysis.aliases + ] + + # Collect constraint expressions + constraint_defs: list[str] = [ + constraint.expression.value for constraint in project.analysis.constraints + ] + + # Collect diffrn field names from the experiment + diffrn_field_names: list[str] = [] + if hasattr(experiment, 'diffrn'): + diffrn_field_names.extend( + p.name + for p in experiment.diffrn.parameters + if hasattr(p, 'name') and p.name not in ('type',) + ) + + return SequentialFitTemplate( + structure_cif=structure.as_cif, + experiment_cif=experiment.as_cif, + initial_params=initial_params, + free_param_unique_names=free_names, + alias_defs=alias_defs, + constraint_defs=constraint_defs, + constraints_enabled=project.analysis.constraints.enabled, + minimizer_tag=project.analysis.current_minimizer or 'lmfit', + calculator_tag=experiment.calculator_type, + diffrn_field_names=diffrn_field_names, + ) + + +# ------------------------------------------------------------------ +# Progress reporting +# ------------------------------------------------------------------ + + +def _report_chunk_progress( + chunk_idx: int, + total_chunks: int, + results: list[dict[str, Any]], + verbosity: VerbosityEnum, +) -> None: + """ + Report progress after a chunk completes. + + Parameters + ---------- + chunk_idx : int + 1-based index of the current chunk. + total_chunks : int + Total number of chunks. + results : list[dict[str, Any]] + Results from the chunk. + verbosity : VerbosityEnum + Output verbosity. + """ + if verbosity is VerbosityEnum.SILENT: + return + + num_files = len(results) + successful = [r for r in results if r.get('fit_success')] + if successful: + avg_chi2 = sum(r['reduced_chi_squared'] for r in successful) / len(successful) + chi2_str = f'{avg_chi2:.2f}' + else: + chi2_str = '—' + + if verbosity is VerbosityEnum.SHORT: + status = '✅' if successful else '❌' + print(f'{status} Chunk {chunk_idx}/{total_chunks}: {num_files} files, avg χ² = {chi2_str}') + elif verbosity is VerbosityEnum.FULL: + print( + f'Chunk {chunk_idx}/{total_chunks}: ' + f'{num_files} files, {len(successful)} succeeded, ' + f'avg reduced χ² = {chi2_str}' + ) + for r in results: + status = '✅' if r.get('fit_success') else '❌' + rchi2 = r.get('reduced_chi_squared') + rchi2_str = f'{rchi2:.2f}' if rchi2 is not None else '—' + print(f' {status} {Path(r["file_path"]).name}: χ² = {rchi2_str}') + + +# ------------------------------------------------------------------ +# Main orchestration +# ------------------------------------------------------------------ + + +def fit_sequential( + analysis: object, + data_dir: str, + max_workers: int | str = 1, + chunk_size: int | None = None, + file_pattern: str = '*', + extract_diffrn: Callable | None = None, + verbosity: str | None = None, +) -> None: + """ + Run sequential fitting over all data files in a directory. + + Parameters + ---------- + analysis : object + The ``Analysis`` instance (owns project reference). + data_dir : str + Path to directory containing data files. + max_workers : int | str, default=1 + Number of parallel worker processes. ``1`` = sequential. + ``'auto'`` = physical CPU count (future). + chunk_size : int | None, default=None + Files per chunk. Default ``None`` uses ``max_workers``. + file_pattern : str, default='*' + Glob pattern to filter files in *data_dir*. + extract_diffrn : Callable | None, default=None + User callback: ``f(file_path) → {diffrn_field: value}``. + verbosity : str | None, default=None + ``'full'``, ``'short'``, ``'silent'``. Default: project + verbosity. + + Raises + ------ + ValueError + If preconditions are not met (e.g. multiple structures, missing + project path, no free parameters). + """ + project = analysis.project + verb = VerbosityEnum(verbosity if verbosity is not None else project.verbosity) + + # ── Preconditions ──────────────────────────────────────────── + if len(project.structures) != 1: + msg = f'Sequential fitting requires exactly 1 structure, found {len(project.structures)}.' + raise ValueError(msg) + + if len(project.experiments) != 1: + msg = ( + f'Sequential fitting requires exactly 1 experiment (the template), ' + f'found {len(project.experiments)}.' + ) + raise ValueError(msg) + + if project.info.path is None: + msg = 'Project must be saved before sequential fitting. Call save_as() first.' + raise ValueError(msg) + + # Discover data files + data_paths = extract_data_paths_from_dir(data_dir, file_pattern=file_pattern) + + from easydiffraction.core.variable import Parameter # noqa: PLC0415 + + free_params = [ + p for p in project.parameters if isinstance(p, Parameter) and not p.constrained and p.free + ] + if not free_params: + msg = 'No free parameters found. Mark at least one parameter as free.' + raise ValueError(msg) + + # ── Build template ─────────────────────────────────────────── + template = _build_template(project) + + # ── CSV setup and crash recovery ───────────────────────────── + csv_path = project.info.path / 'analysis' / 'results.csv' + csv_path.parent.mkdir(parents=True, exist_ok=True) + header = _build_csv_header(template) + + already_fitted, recovered_params = _read_csv_for_recovery(csv_path) + + if already_fitted: + num_skipped = len(already_fitted) + log.info(f'Resuming: {num_skipped} files already fitted, skipping.') + if verb is not VerbosityEnum.SILENT: + print(f'📂 Resuming from CSV: {num_skipped} files already fitted.') + # Seed from recovered params if available + if recovered_params is not None: + template = replace(template, initial_params=recovered_params) + else: + _write_csv_header(csv_path, header) + + # Filter out already-fitted files + remaining = [p for p in data_paths if p not in already_fitted] + if not remaining: + if verb is not VerbosityEnum.SILENT: + print('✅ All files already fitted. Nothing to do.') + return + + # ── Resolve workers and chunk size ─────────────────────────── + if isinstance(max_workers, str) and max_workers == 'auto': + import os # noqa: PLC0415 + + max_workers = os.cpu_count() or 1 + + if not isinstance(max_workers, int) or max_workers < 1: + msg = f"max_workers must be a positive integer or 'auto', got {max_workers!r}" + raise ValueError(msg) + + if chunk_size is None: + chunk_size = max_workers + + # ── Chunk and fit ──────────────────────────────────────────── + chunks = [remaining[i : i + chunk_size] for i in range(0, len(remaining), chunk_size)] + total_chunks = len(chunks) + + if verb is not VerbosityEnum.SILENT: + print( + f'🚀 Sequential fitting: {len(remaining)} files in ' + f'{total_chunks} chunks (max_workers={max_workers})' + ) + + for chunk_idx, chunk in enumerate(chunks, start=1): + # Single-worker mode: call worker directly + results = [_fit_worker(template, path) for path in chunk] + + # Extract diffrn metadata in the main process + if extract_diffrn is not None: + for result in results: + try: + diffrn_values = extract_diffrn(result['file_path']) + for key, val in diffrn_values.items(): + result[f'diffrn.{key}'] = val + except Exception as exc: # noqa: BLE001 + log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') + + # Write to CSV + _append_to_csv(csv_path, header, results) + + # Report progress + _report_chunk_progress(chunk_idx, total_chunks, results, verb) + + # Propagate: use last successful file's + # params as starting values + last_ok = None + for r in reversed(results): + if r.get('fit_success') and r.get('params'): + last_ok = r + break + + if last_ok is not None: + template = replace(template, initial_params=last_ok['params']) + + if verb is not VerbosityEnum.SILENT: + total_fitted = len(already_fitted) + len(remaining) + print(f'✅ Sequential fitting complete: {total_fitted} files processed.') + print(f'📄 Results saved to: {csv_path}') diff --git a/tests/integration/fitting/test_sequential.py b/tests/integration/fitting/test_sequential.py new file mode 100644 index 00000000..c88f3b4b --- /dev/null +++ b/tests/integration/fitting/test_sequential.py @@ -0,0 +1,282 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Integration tests for Analysis.fit_sequential().""" + +from __future__ import annotations + +import csv +import shutil +import tempfile +from pathlib import Path + +import pytest +from numpy.testing import assert_almost_equal + +from easydiffraction import ExperimentFactory +from easydiffraction import Project +from easydiffraction import StructureFactory +from easydiffraction import download_data +from easydiffraction.utils.enums import VerbosityEnum + +TEMP_DIR = tempfile.gettempdir() + + +def _create_sequential_project(tmp_path: Path) -> tuple[Project, str]: + """ + Build a project for sequential fitting and save it. + + Returns the project and the path to a data directory with a few + copies of the same data file (to simulate a scan). + """ + # Structure + model = StructureFactory.from_scratch(name='lbco') + model.space_group.name_h_m = 'P m -3 m' + model.cell.length_a = 3.8909 + model.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.5, + ) + model.atom_sites.create( + label='Ba', + type_symbol='Ba', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.5, + ) + model.atom_sites.create( + label='Co', + type_symbol='Co', + fract_x=0.5, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='b', + b_iso=0.5, + ) + model.atom_sites.create( + label='O', + type_symbol='O', + fract_x=0, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='c', + b_iso=0.5, + ) + + # Experiment (template) + data_path = download_data(id=3, destination=TEMP_DIR) + expt = ExperimentFactory.from_data_path( + name='template', + data_path=data_path, + verbosity=VerbosityEnum.SILENT, + ) + expt.instrument.setup_wavelength = 1.494 + expt.instrument.calib_twotheta_offset = 0.6225 + expt.peak.broad_gauss_u = 0.0834 + expt.peak.broad_gauss_v = -0.1168 + expt.peak.broad_gauss_w = 0.123 + expt.peak.broad_lorentz_x = 0 + expt.peak.broad_lorentz_y = 0.0797 + expt.background.create(id='1', x=10, y=170) + expt.background.create(id='2', x=165, y=170) + expt.linked_phases.create(id='lbco', scale=9.0) + + # Project assembly + project = Project(name='seq_test') + project.structures.add(model) + project.experiments.add(expt) + + # Free parameters + model.cell.length_a.free = True + expt.linked_phases['lbco'].scale.free = True + expt.instrument.calib_twotheta_offset.free = True + expt.background['1'].y.free = True + expt.background['2'].y.free = True + + # Initial fit on the template + project.analysis.fit(verbosity='silent') + + # Save project + proj_dir = str(tmp_path / 'seq_project') + project.save_as(proj_dir) + + # Create a data directory with copies of the same data file + data_dir = tmp_path / 'scan_data' + data_dir.mkdir() + for i in range(3): + shutil.copy(data_path, data_dir / f'scan_{i + 1:03d}.xye') + + return project, str(data_dir) + + +# ------------------------------------------------------------------ +# Test 1: Basic sequential fit produces CSV +# ------------------------------------------------------------------ + + +def test_fit_sequential_produces_csv(tmp_path) -> None: + """fit_sequential creates a results.csv with one row per file.""" + project, data_dir = _create_sequential_project(tmp_path) + + project.analysis.fit_sequential( + data_dir=data_dir, + verbosity='silent', + ) + + csv_path = project.info.path / 'analysis' / 'results.csv' + assert csv_path.is_file(), 'results.csv was not created' + + with csv_path.open() as f: + reader = csv.DictReader(f) + rows = list(reader) + + assert len(rows) == 3, f'Expected 3 rows, got {len(rows)}' + + # Each row should have fit_success + for row in rows: + assert row['fit_success'] == 'True', f'Fit failed for {row["file_path"]}' + + # Each row should have parameter values + assert 'lbco.cell.length_a' in rows[0] + assert rows[0]['lbco.cell.length_a'] != '' + + +# ------------------------------------------------------------------ +# Test 2: Crash recovery skips already-fitted files +# ------------------------------------------------------------------ + + +def test_fit_sequential_crash_recovery(tmp_path) -> None: + """Running fit_sequential twice does not re-fit already-fitted files.""" + project, data_dir = _create_sequential_project(tmp_path) + + # First run: fit all 3 files + project.analysis.fit_sequential( + data_dir=data_dir, + verbosity='silent', + ) + + csv_path = project.info.path / 'analysis' / 'results.csv' + with csv_path.open() as f: + rows_first = list(csv.DictReader(f)) + assert len(rows_first) == 3 + + # Second run: should skip all 3 files + project.analysis.fit_sequential( + data_dir=data_dir, + verbosity='silent', + ) + + with csv_path.open() as f: + rows_second = list(csv.DictReader(f)) + # Still 3 rows — no duplicates + assert len(rows_second) == 3 + + +# ------------------------------------------------------------------ +# Test 3: Parameter propagation +# ------------------------------------------------------------------ + + +def test_fit_sequential_parameter_propagation(tmp_path) -> None: + """Parameters from one fit propagate to the next.""" + project, data_dir = _create_sequential_project(tmp_path) + + project.analysis.fit_sequential( + data_dir=data_dir, + verbosity='silent', + ) + + csv_path = project.info.path / 'analysis' / 'results.csv' + with csv_path.open() as f: + rows = list(csv.DictReader(f)) + + # All rows should have similar parameter values (same data) + vals = [float(r['lbco.cell.length_a']) for r in rows] + for v in vals: + assert_almost_equal(v, vals[0], decimal=3) + + +# ------------------------------------------------------------------ +# Test 4: extract_diffrn callback +# ------------------------------------------------------------------ + + +def test_fit_sequential_with_diffrn_callback(tmp_path) -> None: + """extract_diffrn callback populates diffrn columns in CSV.""" + project, data_dir = _create_sequential_project(tmp_path) + + temperatures = {'scan_001.xye': 300.0, 'scan_002.xye': 350.0, 'scan_003.xye': 400.0} + + def extract_diffrn(file_path: str) -> dict[str, float]: + name = Path(file_path).name + return {'ambient_temperature': temperatures.get(name, 0.0)} + + project.analysis.fit_sequential( + data_dir=data_dir, + extract_diffrn=extract_diffrn, + verbosity='silent', + ) + + csv_path = project.info.path / 'analysis' / 'results.csv' + with csv_path.open() as f: + rows = list(csv.DictReader(f)) + + # Check that temperature column is present and populated + for row in rows: + name = Path(row['file_path']).name + if 'diffrn.ambient_temperature' in row: + expected = temperatures.get(name, 0.0) + assert_almost_equal(float(row['diffrn.ambient_temperature']), expected) + + +# ------------------------------------------------------------------ +# Test 5: Precondition checks +# ------------------------------------------------------------------ + + +def test_fit_sequential_requires_saved_project(tmp_path) -> None: + """fit_sequential raises if project hasn't been saved.""" + data_path = download_data(id=3, destination=TEMP_DIR) + model = StructureFactory.from_scratch(name='s') + expt = ExperimentFactory.from_data_path( + name='e', + data_path=data_path, + verbosity=VerbosityEnum.SILENT, + ) + expt.linked_phases.create(id='s', scale=1.0) + expt.linked_phases['s'].scale.free = True + project = Project(name='unsaved') + project.structures.add(model) + project.experiments.add(expt) + + with pytest.raises(ValueError, match='must be saved'): + project.analysis.fit_sequential(data_dir=str(tmp_path)) + + +def test_fit_sequential_requires_one_structure(tmp_path) -> None: + """fit_sequential raises if no structures exist.""" + project = Project(name='no_struct') + project.save_as(str(tmp_path / 'proj')) + + with pytest.raises(ValueError, match='exactly 1 structure'): + project.analysis.fit_sequential(data_dir=str(tmp_path)) + + +def test_fit_sequential_requires_one_experiment(tmp_path) -> None: + """fit_sequential raises if no experiments exist.""" + model = StructureFactory.from_scratch(name='s') + project = Project(name='no_expt') + project.structures.add(model) + project.save_as(str(tmp_path / 'proj')) + + with pytest.raises(ValueError, match='exactly 1 experiment'): + project.analysis.fit_sequential(data_dir=str(tmp_path)) From de0d2184288016b29ecee82cd51b27267d2338b5 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 16:38:17 +0200 Subject: [PATCH 14/51] Unify plot_param_series to read from CSV with snapshot fallback --- docs/architecture/issues_closed.md | 8 +- docs/architecture/issues_open.md | 2 - .../architecture/sequential_fitting_design.md | 58 +++++++------ src/easydiffraction/display/plotting.py | 86 +++++++++++++++++-- src/easydiffraction/project/project.py | 36 ++++++-- 5 files changed, 145 insertions(+), 45 deletions(-) diff --git a/docs/architecture/issues_closed.md b/docs/architecture/issues_closed.md index 0612ea04..f95e3b89 100644 --- a/docs/architecture/issues_closed.md +++ b/docs/architecture/issues_closed.md @@ -10,10 +10,10 @@ Issues that have been fully resolved. Kept for historical reference. that reads `project.cif`, `structures/*.cif`, `experiments/*.cif`, and `analysis/analysis.cif` (with fallback to `analysis.cif` at root for backward compatibility). Reconstructs the full project state including -alias parameter references via `_resolve_alias_references()`. Integration -tests verify save → load → parameter comparison and save → load → fit → -χ² comparison. Also used by `fit_sequential` workers to reconstruct -projects from CIF strings. +alias parameter references via `_resolve_alias_references()`. +Integration tests verify save → load → parameter comparison and save → +load → fit → χ² comparison. Also used by `fit_sequential` workers to +reconstruct projects from CIF strings. --- diff --git a/docs/architecture/issues_open.md b/docs/architecture/issues_open.md index e01cc36d..b1ed9aa8 100644 --- a/docs/architecture/issues_open.md +++ b/docs/architecture/issues_open.md @@ -10,7 +10,6 @@ needed. --- - ## 2. 🟡 Restore Minimiser Variant Support **Type:** Feature loss + Design limitation @@ -107,7 +106,6 @@ effectively fixed after experiment creation. --- - ## 8. 🟡 Add Explicit `create()` Signatures on Collections **Type:** API safety diff --git a/docs/architecture/sequential_fitting_design.md b/docs/architecture/sequential_fitting_design.md index 9e11e8ca..183b01fc 100644 --- a/docs/architecture/sequential_fitting_design.md +++ b/docs/architecture/sequential_fitting_design.md @@ -1,6 +1,6 @@ # Sequential Fitting — Architecture Design -**Status:** Implementation in progress (PRs 1–9 complete, PRs 10–14 +**Status:** Implementation in progress (PRs 1–10 complete, PRs 11–14 remaining) **Date:** 2026-04-02 (updated 2026-04-03) --- @@ -888,9 +888,9 @@ tutorials, tests, and call sites updated. ### 9.4 Add `destination` parameter to `extract_data_paths_from_zip` ✅ -**Done.** Optional `destination` parameter added. When provided, extracts -to the given directory. When `None`, uses a temporary directory (original -behaviour). +**Done.** Optional `destination` parameter added. When provided, +extracts to the given directory. When `None`, uses a temporary directory +(original behaviour). ### 9.5 Replace singletons with instance-owned state (partially done) @@ -1020,8 +1020,8 @@ This PR also absorbed PR 4 (§ 9.1) since switching from random UIDs to **Done.** `Project.load()` reads CIF files from the project directory, reconstructs structures, experiments, and analysis. Resolves alias `param_unique_name` strings back to live `Parameter` references. -Integration tests verify save → load → parameter comparison and -save → load → fit → χ² comparison. +Integration tests verify save → load → parameter comparison and save → +load → fit → χ² comparison. ### Sequential-fitting prerequisite PRs @@ -1099,15 +1099,15 @@ temporary directory (original behaviour). > CSV writing, crash recovery, parameter propagation. **Done.** Full implementation in `analysis/sequential.py`: -`SequentialFitTemplate` dataclass, `_fit_worker()` module-level function, -CSV helpers (`_build_csv_header`, `_write_csv_header`, `_append_to_csv`, -`_read_csv_for_recovery`), `_build_template()`, chunk-based processing -with parameter propagation, `extract_diffrn` callback support, progress -reporting. Five integration tests in `test_sequential.py`: CSV -production, crash recovery, parameter propagation, diffrn callback, -precondition validation. +`SequentialFitTemplate` dataclass, `_fit_worker()` module-level +function, CSV helpers (`_build_csv_header`, `_write_csv_header`, +`_append_to_csv`, `_read_csv_for_recovery`), `_build_template()`, +chunk-based processing with parameter propagation, `extract_diffrn` +callback support, progress reporting. Five integration tests in +`test_sequential.py`: CSV production, crash recovery, parameter +propagation, diffrn callback, precondition validation. -#### PR 10 — Update plot_param_series to read from CSV +#### PR 10 — Update plot_param_series to read from CSV ✅ > **Title:** `Unify plot_param_series to always read from CSV` > @@ -1117,7 +1117,13 @@ precondition validation. > and existing `fit()` single-mode (Phase 4). Remove the old > `_parameter_snapshots` dict. -#### PR 11 — Parallel fitting (max_workers > 1) +**Implemented:** `Plotter.plot_param_series()` reads CSV via pandas. +`Plotter.plot_param_series_from_snapshots()` preserves backward +compatibility for `fit()` single-mode (no CSV yet). `Project.plot_param_series()` +tries CSV first, falls back to snapshots. Axis labels derived from live +descriptor objects. + +#### PR 11 — Parallel fitting (max_workers > 1) ← next > **Title:** `Add multiprocessing support to fit_sequential` > @@ -1168,9 +1174,9 @@ PR 1 (issue #7: eliminate dummy Experiments) ✅ └─► PR 6 (CIF round-trip test) ✅ ├─► PR 7 (analysis.cif → analysis/) ✅ │ └─► PR 9 (streaming sequential fit) ✅ - │ ├─► PR 10 (plot from CSV) ← next + │ ├─► PR 10 (plot from CSV) ✅ │ │ └─► PR 13 (CSV for existing fit) - │ └─► PR 11 (parallel fitting) + │ └─► PR 11 (parallel fitting) ← next │ └─► PR 14 (optional: parallel fit()) └─► PR 8 (zip destination) ✅ └─► PR 12 (dataset replay) @@ -1191,13 +1197,13 @@ are all stdlib. ### Risks -| Risk | Mitigation | -| ------------------------------------------------ | ------------------------------------------------------------- | -| CIF round-trip loses information | ✅ PR 3 (load) + PR 6 (round-trip test) verified | -| CIF collection truncation at 20 rows | ✅ PR 5 fixed (default `max_display=None`) | -| Worker memory leak (large N, long-running pool) | Use `max_tasks_per_child=100` on the pool (PR 11) | -| Pickling failures for SequentialFitTemplate | ✅ Keep it a plain dataclass with only str/dict/list fields | -| crysfml Fortran global state in forked processes | Enforced `spawn` context avoids fork issues (PR 11) | +| Risk | Mitigation | +| ------------------------------------------------ | ----------------------------------------------------------- | +| CIF round-trip loses information | ✅ PR 3 (load) + PR 6 (round-trip test) verified | +| CIF collection truncation at 20 rows | ✅ PR 5 fixed (default `max_display=None`) | +| Worker memory leak (large N, long-running pool) | Use `max_tasks_per_child=100` on the pool (PR 11) | +| Pickling failures for SequentialFitTemplate | ✅ Keep it a plain dataclass with only str/dict/list fields | +| crysfml Fortran global state in forked processes | Enforced `spawn` context avoids fork issues (PR 11) | ### Resolved open issues (now prerequisites) — all done ✅ @@ -1234,9 +1240,9 @@ are all stdlib. | CSV contents | Fit metrics + diffrn metadata + all free param values/uncert | ✅ | | Metadata extraction | User-provided `extract_diffrn` callback, not hidden in lib | ✅ | | Crash recovery | Read existing CSV, skip fitted files, resume | ✅ | -| Plotting | Unified `plot_param_series()` always reads from CSV | PR 10 | +| Plotting | Unified `plot_param_series()` always reads from CSV | ✅ | | Configuration | `max_workers` + `data_dir` on `fit_sequential()` | ✅ | | Project layout | `analysis.cif` moves into `analysis/` directory | ✅ | | Singletons | `UidMapHandler` eliminated; `ConstraintsHandler` stays singleton but always synced | ✅ | | New dependencies | None (stdlib only) | ✅ | -| First step | PRs 1–9 done; PRs 10–14 remaining | ✅ | +| First step | PRs 1–10 done; PRs 11–14 remaining | ✅ | diff --git a/src/easydiffraction/display/plotting.py b/src/easydiffraction/display/plotting.py index 5b010ea4..92a3a031 100644 --- a/src/easydiffraction/display/plotting.py +++ b/src/easydiffraction/display/plotting.py @@ -570,6 +570,79 @@ def plot_meas_vs_calc( ) def plot_param_series( + self, + csv_path: str, + unique_name: str, + param_descriptor: object, + versus_descriptor: object | None = None, + ) -> None: + """ + Plot a parameter's value across sequential fit results. + + Reads data from the CSV file at *csv_path*. The y-axis values + come from the column named *unique_name*, uncertainties from + ``{unique_name}.uncertainty``. When *versus_descriptor* is + provided, the x-axis uses the corresponding ``diffrn.{name}`` + column; otherwise the row index is used. + + Axis labels are derived from the live descriptor objects + (*param_descriptor* and *versus_descriptor*), which carry + ``.description`` and ``.units`` attributes. + + Parameters + ---------- + csv_path : str + Path to the ``results.csv`` file. + unique_name : str + Unique name of the parameter to plot (CSV column key). + param_descriptor : object + The live parameter descriptor (for axis label / units). + versus_descriptor : object | None, default=None + A diffrn descriptor whose ``.name`` maps to a + ``diffrn.{name}`` CSV column. ``None`` → use row index. + """ + df = pd.read_csv(csv_path) + + if unique_name not in df.columns: + log.warning( + f"Parameter '{unique_name}' not found in CSV columns. " + f'Available: {list(df.columns)}' + ) + return + + y = df[unique_name].astype(float).tolist() + uncert_col = f'{unique_name}.uncertainty' + sy = df[uncert_col].astype(float).tolist() if uncert_col in df.columns else [0.0] * len(y) + + # X-axis: diffrn column or row index + versus_name = versus_descriptor.name if versus_descriptor is not None else None + diffrn_col = f'diffrn.{versus_name}' if versus_name else None + + if diffrn_col and diffrn_col in df.columns: + x = pd.to_numeric(df[diffrn_col], errors='coerce').tolist() + x_label = getattr(versus_descriptor, 'description', None) or versus_name + if hasattr(versus_descriptor, 'units') and versus_descriptor.units: + x_label = f'{x_label} ({versus_descriptor.units})' + else: + x = list(range(1, len(y) + 1)) + x_label = 'Experiment No.' + + # Y-axis label from descriptor + param_units = getattr(param_descriptor, 'units', '') + y_label = f'Parameter value ({param_units})' if param_units else 'Parameter value' + + title = f"Parameter '{unique_name}' across fit results" + + self._backend.plot_scatter( + x=x, + y=y, + sy=sy, + axes_labels=[x_label, y_label], + title=title, + height=self.height, + ) + + def plot_param_series_from_snapshots( self, unique_name: str, versus_name: str | None, @@ -577,21 +650,22 @@ def plot_param_series( parameter_snapshots: dict[str, dict[str, dict]], ) -> None: """ - Plot a parameter's value across sequential fit results. + Plot a parameter's value from in-memory snapshots. + + This is a backward-compatibility method used when no CSV file is + available (e.g. after ``fit()`` in single mode, before PR 13 + adds CSV output to the existing fit loop). Parameters ---------- unique_name : str Unique name of the parameter to plot. versus_name : str | None - Name of the diffrn descriptor to use as the x-axis (e.g. - ``'ambient_temperature'``). When ``None``, the experiment - sequence index is used instead. + Name of the diffrn descriptor for the x-axis. experiments : object Experiments collection for accessing diffrn conditions. parameter_snapshots : dict[str, dict[str, dict]] - Per-experiment parameter value snapshots keyed by experiment - name, then by parameter unique name. + Per-experiment parameter value snapshots. """ x = [] y = [] diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index cb3a90fc..fe3db57d 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -480,6 +480,11 @@ def plot_param_series(self, param: object, versus: object | None = None) -> None """ Plot a parameter's value across sequential fit results. + When a ``results.csv`` file exists in the project's + ``analysis/`` directory, data is read from CSV. Otherwise, + falls back to in-memory parameter snapshots (produced by + ``fit()`` in single mode). + Parameters ---------- param : object @@ -492,10 +497,27 @@ def plot_param_series(self, param: object, versus: object | None = None) -> None experiment sequence number is used instead. """ unique_name = param.unique_name - versus_name = versus.name if versus is not None else None - self.plotter.plot_param_series( - unique_name, - versus_name, - self.experiments, - self.analysis._parameter_snapshots, - ) + + # Try CSV first (produced by fit_sequential or future fit) + csv_path = None + if self.info.path is not None: + candidate = pathlib.Path(self.info.path) / 'analysis' / 'results.csv' + if candidate.is_file(): + csv_path = str(candidate) + + if csv_path is not None: + self.plotter.plot_param_series( + csv_path=csv_path, + unique_name=unique_name, + param_descriptor=param, + versus_descriptor=versus, + ) + else: + # Fallback: in-memory snapshots from fit() single mode + versus_name = versus.name if versus is not None else None + self.plotter.plot_param_series_from_snapshots( + unique_name, + versus_name, + self.experiments, + self.analysis._parameter_snapshots, + ) From 0a6bd3bf2ed517e2cfdde403b5cfec24a24b5bd1 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 17:30:31 +0200 Subject: [PATCH 15/51] Add multiprocessing support to fit_sequential --- .../architecture/sequential_fitting_design.md | 29 +++--- src/easydiffraction/analysis/analysis.py | 3 +- src/easydiffraction/analysis/sequential.py | 88 ++++++++++++------- tests/integration/fitting/test_sequential.py | 34 +++++++ 4 files changed, 108 insertions(+), 46 deletions(-) diff --git a/docs/architecture/sequential_fitting_design.md b/docs/architecture/sequential_fitting_design.md index 183b01fc..9b844980 100644 --- a/docs/architecture/sequential_fitting_design.md +++ b/docs/architecture/sequential_fitting_design.md @@ -1,6 +1,6 @@ # Sequential Fitting — Architecture Design -**Status:** Implementation in progress (PRs 1–10 complete, PRs 11–14 +**Status:** Implementation in progress (PRs 1–11 complete, PRs 12–14 remaining) **Date:** 2026-04-02 (updated 2026-04-03) --- @@ -1119,11 +1119,11 @@ propagation, diffrn callback, precondition validation. **Implemented:** `Plotter.plot_param_series()` reads CSV via pandas. `Plotter.plot_param_series_from_snapshots()` preserves backward -compatibility for `fit()` single-mode (no CSV yet). `Project.plot_param_series()` -tries CSV first, falls back to snapshots. Axis labels derived from live -descriptor objects. +compatibility for `fit()` single-mode (no CSV yet). +`Project.plot_param_series()` tries CSV first, falls back to snapshots. +Axis labels derived from live descriptor objects. -#### PR 11 — Parallel fitting (max_workers > 1) ← next +#### PR 11 — Parallel fitting (max_workers > 1) ✅ > **Title:** `Add multiprocessing support to fit_sequential` > @@ -1133,6 +1133,13 @@ descriptor objects. > `max_workers='auto'` support (`os.cpu_count()`). Integration test: > parallel sequential fit (10 files, 2 workers). +**Implemented:** `ProcessPoolExecutor` with `mp.get_context('spawn')` +and `max_tasks_per_child=100` dispatches chunks in parallel when +`max_workers > 1`. Single-worker mode (`max_workers=1`) still calls +`_fit_worker` directly (no subprocess overhead). `max_workers='auto'` +resolves to `os.cpu_count()`. Integration test +`test_fit_sequential_parallel` verifies 2-worker parallel fitting. + ### Post-sequential PRs #### PR 12 — Dataset replay from CSV @@ -1175,8 +1182,8 @@ PR 1 (issue #7: eliminate dummy Experiments) ✅ ├─► PR 7 (analysis.cif → analysis/) ✅ │ └─► PR 9 (streaming sequential fit) ✅ │ ├─► PR 10 (plot from CSV) ✅ - │ │ └─► PR 13 (CSV for existing fit) - │ └─► PR 11 (parallel fitting) ← next + │ │ └─► PR 13 (CSV for existing fit) ← next + │ └─► PR 11 (parallel fitting) ✅ │ └─► PR 14 (optional: parallel fit()) └─► PR 8 (zip destination) ✅ └─► PR 12 (dataset replay) @@ -1201,9 +1208,9 @@ are all stdlib. | ------------------------------------------------ | ----------------------------------------------------------- | | CIF round-trip loses information | ✅ PR 3 (load) + PR 6 (round-trip test) verified | | CIF collection truncation at 20 rows | ✅ PR 5 fixed (default `max_display=None`) | -| Worker memory leak (large N, long-running pool) | Use `max_tasks_per_child=100` on the pool (PR 11) | +| Worker memory leak (large N, long-running pool) | ✅ `max_tasks_per_child=100` on the pool (PR 11) | | Pickling failures for SequentialFitTemplate | ✅ Keep it a plain dataclass with only str/dict/list fields | -| crysfml Fortran global state in forked processes | Enforced `spawn` context avoids fork issues (PR 11) | +| crysfml Fortran global state in forked processes | ✅ Enforced `spawn` context avoids fork issues (PR 11) | ### Resolved open issues (now prerequisites) — all done ✅ @@ -1230,7 +1237,7 @@ are all stdlib. | Aspect | Decision | Status | | ------------------- | ---------------------------------------------------------------------------------- | ------ | -| Parallelism backend | `concurrent.futures.ProcessPoolExecutor` with `spawn` | PR 11 | +| Parallelism backend | `concurrent.futures.ProcessPoolExecutor` with `spawn` | ✅ | | Worker isolation | Each worker creates a fresh `Project` — no shared state | ✅ | | Data source | `data_dir` argument; ZIP → extract first | ✅ | | Data flow | Template CIF + data path → worker → result dict → CSV | ✅ | @@ -1245,4 +1252,4 @@ are all stdlib. | Project layout | `analysis.cif` moves into `analysis/` directory | ✅ | | Singletons | `UidMapHandler` eliminated; `ConstraintsHandler` stays singleton but always synced | ✅ | | New dependencies | None (stdlib only) | ✅ | -| First step | PRs 1–10 done; PRs 11–14 remaining | ✅ | +| First step | PRs 1–11 done; PRs 12–14 remaining | ✅ | diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index 98517729..ce0c3304 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -748,7 +748,8 @@ def fit_sequential( Path to directory containing data files. max_workers : int | str, default=1 Number of parallel worker processes. ``1`` = sequential. - ``'auto'`` = physical CPU count. + ``'auto'`` = physical CPU count. Uses + ``ProcessPoolExecutor`` with ``spawn`` context when > 1. chunk_size : int | None, default=None Files per chunk. Default ``None`` uses *max_workers*. file_pattern : str, default='*' diff --git a/src/easydiffraction/analysis/sequential.py b/src/easydiffraction/analysis/sequential.py index 739fc1c4..412b5d10 100644 --- a/src/easydiffraction/analysis/sequential.py +++ b/src/easydiffraction/analysis/sequential.py @@ -8,6 +8,8 @@ import contextlib import csv +import multiprocessing as mp +from concurrent.futures import ProcessPoolExecutor from dataclasses import dataclass from dataclasses import replace from pathlib import Path @@ -22,7 +24,7 @@ from collections.abc import Callable # ------------------------------------------------------------------ -# Template dataclass (picklable for future multiprocessing) +# Template dataclass (picklable for ProcessPoolExecutor) # ------------------------------------------------------------------ @@ -32,7 +34,7 @@ class SequentialFitTemplate: Snapshot of everything a worker needs to recreate and fit a project. All fields are plain Python types (str, dict, list) so that the - template can be pickled for ``ProcessPoolExecutor`` in the future. + template can be pickled for ``ProcessPoolExecutor``. """ structure_cif: str @@ -544,8 +546,9 @@ def fit_sequential( data_dir : str Path to directory containing data files. max_workers : int | str, default=1 - Number of parallel worker processes. ``1`` = sequential. - ``'auto'`` = physical CPU count (future). + Number of parallel worker processes. ``1`` = sequential (no + subprocess overhead). ``'auto'`` = physical CPU count. Uses + ``ProcessPoolExecutor`` with ``spawn`` context when > 1. chunk_size : int | None, default=None Files per chunk. Default ``None`` uses ``max_workers``. file_pattern : str, default='*' @@ -644,36 +647,53 @@ def fit_sequential( f'{total_chunks} chunks (max_workers={max_workers})' ) - for chunk_idx, chunk in enumerate(chunks, start=1): - # Single-worker mode: call worker directly - results = [_fit_worker(template, path) for path in chunk] - - # Extract diffrn metadata in the main process - if extract_diffrn is not None: - for result in results: - try: - diffrn_values = extract_diffrn(result['file_path']) - for key, val in diffrn_values.items(): - result[f'diffrn.{key}'] = val - except Exception as exc: # noqa: BLE001 - log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') - - # Write to CSV - _append_to_csv(csv_path, header, results) - - # Report progress - _report_chunk_progress(chunk_idx, total_chunks, results, verb) - - # Propagate: use last successful file's - # params as starting values - last_ok = None - for r in reversed(results): - if r.get('fit_success') and r.get('params'): - last_ok = r - break - - if last_ok is not None: - template = replace(template, initial_params=last_ok['params']) + # Create a process pool for parallel dispatch, or a no-op context + # for single-worker mode (avoids process-spawn overhead). + if max_workers > 1: + spawn_ctx = mp.get_context('spawn') + pool_cm = ProcessPoolExecutor( + max_workers=max_workers, + mp_context=spawn_ctx, + max_tasks_per_child=100, + ) + else: + pool_cm = contextlib.nullcontext() + + with pool_cm as executor: + for chunk_idx, chunk in enumerate(chunks, start=1): + # Dispatch: parallel or sequential + if executor is not None: + templates = [template] * len(chunk) + results = list(executor.map(_fit_worker, templates, chunk)) + else: + results = [_fit_worker(template, path) for path in chunk] + + # Extract diffrn metadata in the main process + if extract_diffrn is not None: + for result in results: + try: + diffrn_values = extract_diffrn(result['file_path']) + for key, val in diffrn_values.items(): + result[f'diffrn.{key}'] = val + except Exception as exc: # noqa: BLE001 + log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') + + # Write to CSV + _append_to_csv(csv_path, header, results) + + # Report progress + _report_chunk_progress(chunk_idx, total_chunks, results, verb) + + # Propagate: use last successful file's + # params as starting values + last_ok = None + for r in reversed(results): + if r.get('fit_success') and r.get('params'): + last_ok = r + break + + if last_ok is not None: + template = replace(template, initial_params=last_ok['params']) if verb is not VerbosityEnum.SILENT: total_fitted = len(already_fitted) + len(remaining) diff --git a/tests/integration/fitting/test_sequential.py b/tests/integration/fitting/test_sequential.py index c88f3b4b..a018be9d 100644 --- a/tests/integration/fitting/test_sequential.py +++ b/tests/integration/fitting/test_sequential.py @@ -280,3 +280,37 @@ def test_fit_sequential_requires_one_experiment(tmp_path) -> None: with pytest.raises(ValueError, match='exactly 1 experiment'): project.analysis.fit_sequential(data_dir=str(tmp_path)) + + +# ------------------------------------------------------------------ +# Test 6: Parallel sequential fit (max_workers=2) +# ------------------------------------------------------------------ + + +def test_fit_sequential_parallel(tmp_path) -> None: + """fit_sequential with max_workers=2 produces correct CSV.""" + project, data_dir = _create_sequential_project(tmp_path) + + project.analysis.fit_sequential( + data_dir=data_dir, + max_workers=2, + verbosity='silent', + ) + + csv_path = project.info.path / 'analysis' / 'results.csv' + assert csv_path.is_file(), 'results.csv was not created' + + with csv_path.open() as f: + reader = csv.DictReader(f) + rows = list(reader) + + assert len(rows) == 3, f'Expected 3 rows, got {len(rows)}' + + for row in rows: + assert row['fit_success'] == 'True', f'Fit failed for {row["file_path"]}' + + # Parameter values should be present and reasonable + assert 'lbco.cell.length_a' in rows[0] + vals = [float(r['lbco.cell.length_a']) for r in rows] + for v in vals: + assert_almost_equal(v, vals[0], decimal=3) From 69accc15963c675cd4c5ee6de270d54db80ae7e4 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 18:02:49 +0200 Subject: [PATCH 16/51] Write results.csv from existing single-fit mode --- .../architecture/sequential_fitting_design.md | 20 ++++-- src/easydiffraction/analysis/analysis.py | 61 ++++++++++++++++++- 2 files changed, 72 insertions(+), 9 deletions(-) diff --git a/docs/architecture/sequential_fitting_design.md b/docs/architecture/sequential_fitting_design.md index 9b844980..1b47e90e 100644 --- a/docs/architecture/sequential_fitting_design.md +++ b/docs/architecture/sequential_fitting_design.md @@ -1,7 +1,7 @@ # Sequential Fitting — Architecture Design -**Status:** Implementation in progress (PRs 1–11 complete, PRs 12–14 -remaining) **Date:** 2026-04-02 (updated 2026-04-03) +**Status:** Implementation in progress (PRs 1–11, 13 complete; PRs 12, +14 remaining) **Date:** 2026-04-02 (updated 2026-04-03) --- @@ -1151,7 +1151,7 @@ resolves to `os.cpu_count()`. Integration test > reloads data from the file path in that row. Enables > `plot_meas_vs_calc()` for any previously fitted dataset. -#### PR 13 — CSV output for existing single-fit mode +#### PR 13 — CSV output for existing single-fit mode ✅ > **Title:** `Write results.csv from existing single-fit mode` > @@ -1160,6 +1160,14 @@ resolves to `os.cpu_count()`. Integration test > `fit_sequential`). This gives `ed-17.py`-style workflows persistent > CSV output and unified `plot_param_series()`. +**Implemented:** `Analysis.fit()` single-mode now writes +`analysis/results.csv` incrementally (one row per experiment) when the +project has been saved. Reuses `_META_COLUMNS`, `_write_csv_header`, and +`_append_to_csv` from `sequential.py`. Diffrn metadata and free +parameter values/uncertainties are written per row. The in-memory +`_parameter_snapshots` is kept for unsaved-project fallback. +`plot_param_series()` now uses CSV for saved projects automatically. + #### PR 14 (optional) — Parallel single-fit for pre-loaded experiments > **Title:** @@ -1182,11 +1190,11 @@ PR 1 (issue #7: eliminate dummy Experiments) ✅ ├─► PR 7 (analysis.cif → analysis/) ✅ │ └─► PR 9 (streaming sequential fit) ✅ │ ├─► PR 10 (plot from CSV) ✅ - │ │ └─► PR 13 (CSV for existing fit) ← next + │ │ └─► PR 13 (CSV for existing fit) ✅ │ └─► PR 11 (parallel fitting) ✅ │ └─► PR 14 (optional: parallel fit()) └─► PR 8 (zip destination) ✅ - └─► PR 12 (dataset replay) + └─► PR 12 (dataset replay) ← next ``` Note: PR 4 was absorbed into PR 2. PRs 5–8 are largely independent of @@ -1252,4 +1260,4 @@ are all stdlib. | Project layout | `analysis.cif` moves into `analysis/` directory | ✅ | | Singletons | `UidMapHandler` eliminated; `ConstraintsHandler` stays singleton but always synced | ✅ | | New dependencies | None (stdlib only) | ✅ | -| First step | PRs 1–11 done; PRs 12–14 remaining | ✅ | +| First step | PRs 1–11, 13 done; PRs 12, 14 remaining | ✅ | diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index ce0c3304..3f685b24 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: BSD-3-Clause from contextlib import suppress +from pathlib import Path import numpy as np import pandas as pd @@ -644,6 +645,44 @@ def fit(self, verbosity: str | None = None) -> None: expt_names = experiments.names num_expts = len(expt_names) + # CSV setup: write results if the project has been saved + csv_path = None + csv_header = None + csv_free_names = None + csv_diffrn_fields = None + if self.project.info.path is not None: + from easydiffraction.analysis.sequential import _META_COLUMNS # noqa: PLC0415 + from easydiffraction.analysis.sequential import _append_to_csv # noqa: PLC0415 + from easydiffraction.analysis.sequential import _write_csv_header # noqa: PLC0415 + + csv_path = Path(self.project.info.path) / 'analysis' / 'results.csv' + csv_path.parent.mkdir(parents=True, exist_ok=True) + + all_params = ( + self.project.structures.parameters + self.project.experiments.parameters + ) + csv_free_names = [ + p.unique_name + for p in all_params + if isinstance(p, Parameter) and not p.constrained and p.free + ] + + first_expt = list(experiments.values())[0] + csv_diffrn_fields = [] + if hasattr(first_expt, 'diffrn'): + csv_diffrn_fields = [ + p.name + for p in first_expt.diffrn.parameters + if hasattr(p, 'name') and p.name not in ('type',) + ] + + csv_header = list(_META_COLUMNS) + csv_header.extend(f'diffrn.{f}' for f in csv_diffrn_fields) + for name in csv_free_names: + csv_header.append(name) + csv_header.append(f'{name}.uncertainty') + _write_csv_header(csv_path, csv_header) + # Short mode: print header and create display handle once short_headers = ['experiment', 'χ²', 'iterations', 'status'] short_alignments = ['left', 'right', 'right', 'center'] @@ -689,6 +728,25 @@ def fit(self, verbosity: str | None = None) -> None: self._parameter_snapshots[expt_name] = snapshot self.fit_results = results + # Append row to CSV + if csv_path is not None: + row = { + 'file_path': expt_name, + 'fit_success': results.success, + 'chi_squared': results.chi_square, + 'reduced_chi_squared': results.reduced_chi_square, + 'n_iterations': (self.fitter.minimizer.tracker.best_iteration or 0), + } + if hasattr(experiment, 'diffrn') and csv_diffrn_fields: + for p in experiment.diffrn.parameters: + if hasattr(p, 'name') and p.name not in ('type',): + row[f'diffrn.{p.name}'] = p.value + for uname in csv_free_names: + if uname in snapshot: + row[uname] = snapshot[uname]['value'] + row[f'{uname}.uncertainty'] = snapshot[uname]['uncertainty'] + _append_to_csv(csv_path, csv_header, [row]) + # Short mode: append one summary row and update in-place if verb is VerbosityEnum.SHORT: chi2_str = ( @@ -716,9 +774,6 @@ def fit(self, verbosity: str | None = None) -> None: raise NotImplementedError(msg) # After fitting, save the project - # TODO: Consider saving individual data during sequential - # (single) fitting, instead of waiting until the end and save - # only the last one if self.project.info.path is not None: self.project.save() From 5a4e760dc4c7d55ed8a76b74e9e172c4a27a0210 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 18:37:10 +0200 Subject: [PATCH 17/51] Add apply_params_from_csv for dataset replay --- .../architecture/sequential_fitting_design.md | 15 ++-- src/easydiffraction/project/project.py | 80 +++++++++++++++++++ tests/integration/fitting/test_sequential.py | 57 +++++++++++++ 3 files changed, 147 insertions(+), 5 deletions(-) diff --git a/docs/architecture/sequential_fitting_design.md b/docs/architecture/sequential_fitting_design.md index 1b47e90e..0513c89f 100644 --- a/docs/architecture/sequential_fitting_design.md +++ b/docs/architecture/sequential_fitting_design.md @@ -1,7 +1,7 @@ # Sequential Fitting — Architecture Design -**Status:** Implementation in progress (PRs 1–11, 13 complete; PRs 12, -14 remaining) **Date:** 2026-04-02 (updated 2026-04-03) +**Status:** Implementation in progress (PRs 1–13 complete; PR 14 +optional) **Date:** 2026-04-02 (updated 2026-04-03) --- @@ -1142,7 +1142,7 @@ resolves to `os.cpu_count()`. Integration test ### Post-sequential PRs -#### PR 12 — Dataset replay from CSV +#### PR 12 — Dataset replay from CSV ✅ > **Title:** `Add apply_params_from_csv() for dataset replay` > @@ -1151,6 +1151,11 @@ resolves to `os.cpu_count()`. Integration test > reloads data from the file path in that row. Enables > `plot_meas_vs_calc()` for any previously fitted dataset. +**Implemented:** `Project.apply_params_from_csv(row_index)` reads a CSV +row, overrides parameter values and uncertainties, and reloads measured +data when `file_path` points to a real file (sequential-fit case). Three +integration tests: parameter override, missing CSV, out-of-range index. + #### PR 13 — CSV output for existing single-fit mode ✅ > **Title:** `Write results.csv from existing single-fit mode` @@ -1194,7 +1199,7 @@ PR 1 (issue #7: eliminate dummy Experiments) ✅ │ └─► PR 11 (parallel fitting) ✅ │ └─► PR 14 (optional: parallel fit()) └─► PR 8 (zip destination) ✅ - └─► PR 12 (dataset replay) ← next + └─► PR 12 (dataset replay) ✅ ``` Note: PR 4 was absorbed into PR 2. PRs 5–8 are largely independent of @@ -1260,4 +1265,4 @@ are all stdlib. | Project layout | `analysis.cif` moves into `analysis/` directory | ✅ | | Singletons | `UidMapHandler` eliminated; `ConstraintsHandler` stays singleton but always synced | ✅ | | New dependencies | None (stdlib only) | ✅ | -| First step | PRs 1–11, 13 done; PRs 12, 14 remaining | ✅ | +| First step | PRs 1–13 done; PR 14 optional | ✅ | diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index fe3db57d..31cd84ca 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -362,6 +362,86 @@ def save_as( self._info.path = dir_path self.save() + def apply_params_from_csv(self, row_index: int) -> None: + """ + Load a single CSV row and apply its parameters to the project. + + Reads the row at *row_index* from ``analysis/results.csv``, + overrides parameter values in the live project, and (for + sequential-fit results where ``file_path`` points to a real + file) reloads the measured data into the template experiment. + + After calling this method, ``plot_meas_vs_calc()`` will show the + fit for that specific dataset. + + Parameters + ---------- + row_index : int + 0-based row index in the CSV file. + + Raises + ------ + FileNotFoundError + If ``analysis/results.csv`` does not exist. + IndexError + If *row_index* is out of range. + """ + import pandas as pd # noqa: PLC0415 + + from easydiffraction.analysis.sequential import _META_COLUMNS # noqa: PLC0415 + from easydiffraction.core.variable import Parameter # noqa: PLC0415 + + if self.info.path is None: + msg = 'Project has no saved path. Save the project first.' + raise FileNotFoundError(msg) + + csv_path = pathlib.Path(self.info.path) / 'analysis' / 'results.csv' + if not csv_path.is_file(): + msg = f"Results CSV not found: '{csv_path}'" + raise FileNotFoundError(msg) + + df = pd.read_csv(csv_path) + if row_index < 0 or row_index >= len(df): + msg = f'Row index {row_index} out of range (CSV has {len(df)} rows).' + raise IndexError(msg) + + row = df.iloc[row_index] + + # 1. Reload data if file_path points to a real file + file_path = row.get('file_path', '') + if file_path and pathlib.Path(file_path).is_file(): + experiment = list(self.experiments.values())[0] + experiment._load_ascii_data_to_experiment(file_path) + + # 2. Override parameter values + all_params = self.structures.parameters + self.experiments.parameters + param_map = { + p.unique_name: p + for p in all_params + if isinstance(p, Parameter) and hasattr(p, 'unique_name') + } + + skip_cols = set(_META_COLUMNS) + for col_name in df.columns: + if col_name in skip_cols: + continue + if col_name.startswith('diffrn.'): + continue + if col_name.endswith('.uncertainty'): + continue + if col_name in param_map and pd.notna(row[col_name]): + param_map[col_name].value = float(row[col_name]) + + # 3. Apply uncertainties + for col_name in df.columns: + if not col_name.endswith('.uncertainty'): + continue + base_name = col_name.removesuffix('.uncertainty') + if base_name in param_map and pd.notna(row[col_name]): + param_map[base_name].uncertainty = float(row[col_name]) + + log.info(f'Applied parameters from CSV row {row_index} (file: {file_path}).') + # ------------------------------------------ # Plotting # ------------------------------------------ diff --git a/tests/integration/fitting/test_sequential.py b/tests/integration/fitting/test_sequential.py index a018be9d..59945e87 100644 --- a/tests/integration/fitting/test_sequential.py +++ b/tests/integration/fitting/test_sequential.py @@ -314,3 +314,60 @@ def test_fit_sequential_parallel(tmp_path) -> None: vals = [float(r['lbco.cell.length_a']) for r in rows] for v in vals: assert_almost_equal(v, vals[0], decimal=3) + + +# ------------------------------------------------------------------ +# Test 7: Dataset replay from CSV (apply_params_from_csv) +# ------------------------------------------------------------------ + + +def test_apply_params_from_csv_loads_data_and_params(tmp_path) -> None: + """apply_params_from_csv overrides params and reloads data.""" + project, data_dir = _create_sequential_project(tmp_path) + + project.analysis.fit_sequential( + data_dir=data_dir, + verbosity='silent', + ) + + csv_path = project.info.path / 'analysis' / 'results.csv' + with csv_path.open() as f: + rows = list(csv.DictReader(f)) + + # Read the expected cell_length_a from CSV row 1 + expected_a = float(rows[1]['lbco.cell.length_a']) + + # Apply params from row 1 + project.apply_params_from_csv(row_index=1) + + # Verify the parameter value was overridden + model = list(project.structures.values())[0] + assert_almost_equal(model.cell.length_a.value, expected_a, decimal=5) + + # Verify that the experiment has measured data loaded + # (from the file_path in that CSV row) + expt = list(project.experiments.values())[0] + assert expt.data.intensity_meas is not None + + +def test_apply_params_from_csv_raises_on_missing_csv(tmp_path) -> None: + """apply_params_from_csv raises if no CSV exists.""" + project = Project(name='no_csv') + project.save_as(str(tmp_path / 'proj')) + + with pytest.raises(FileNotFoundError, match='Results CSV not found'): + project.apply_params_from_csv(row_index=0) + + +def test_apply_params_from_csv_raises_on_bad_index(tmp_path) -> None: + """apply_params_from_csv raises on out-of-range index.""" + project, data_dir = _create_sequential_project(tmp_path) + + project.analysis.fit_sequential( + data_dir=data_dir, + verbosity='silent', + ) + + with pytest.raises(IndexError, match='out of range'): + project.apply_params_from_csv(row_index=99) + From 2d49d168aefd5ffd21874152666f7dfd5f6b6c22 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 21:08:47 +0200 Subject: [PATCH 18/51] Prevent spawn re-import of __main__ in parallel fit_sequential --- docs/docs/tutorials/ed-17.py | 170 +++++++++++-------- src/easydiffraction/analysis/analysis.py | 10 +- src/easydiffraction/analysis/sequential.py | 115 ++++++++----- tests/integration/fitting/test_sequential.py | 1 - 4 files changed, 184 insertions(+), 112 deletions(-) diff --git a/docs/docs/tutorials/ed-17.py b/docs/docs/tutorials/ed-17.py index 68be3ff0..f8cde1e7 100644 --- a/docs/docs/tutorials/ed-17.py +++ b/docs/docs/tutorials/ed-17.py @@ -3,14 +3,16 @@ # # This example demonstrates a Rietveld refinement of the Co2SiO4 crystal # structure using constant-wavelength neutron powder diffraction data -# from D20 at ILL. A sequential refinement of the same structure against -# a temperature scan is performed to show how to manage multiple -# experiments in a project. +# from D20 at ILL. A sequential refinement is performed against a +# temperature scan using `fit_sequential`, which processes each data +# file independently without loading all datasets into memory at once. # %% [markdown] # ## Import Library # %% +import pandas as pd + import easydiffraction as ed # %% [markdown] @@ -22,11 +24,11 @@ project = ed.Project() # %% [markdown] -# Set output verbosity level to "short" to show only one-line status -# messages during the analysis process. +# The project must be saved before running sequential fitting, so that +# results can be written to `analysis/results.csv`. # %% -project.verbosity = 'short' +project.save_as('data/cosio_project', temporary=False) # %% [markdown] # ## Step 2: Define Crystal Structure @@ -115,91 +117,88 @@ ) # %% [markdown] -# ## Step 3: Define Experiments +# ## Step 3: Define Template Experiment # -# This section shows how to add experiments, configure their parameters, -# and link the structures defined above. +# For sequential fitting, we create a single template experiment from +# the first data file. This template defines the instrument, peak +# profile, background, and linked phases that will be reused for every +# data file in the scan. # # #### Download Measured Data # %% -file_path = ed.download_data(id=27, destination='data') +zip_path = ed.download_data(id=27, destination='data') # %% [markdown] -# #### Create Experiments and Set Temperature +# #### Extract Data Files # %% -data_paths = ed.extract_data_paths_from_zip(file_path) -for i, data_path in enumerate(data_paths, start=1): - name = f'd20_{i}' - project.experiments.add_from_data_path( - name=name, - data_path=data_path, - ) - expt = project.experiments[name] - expt.diffrn.ambient_temperature = ed.extract_metadata( - file_path=data_path, - pattern=r'^TEMP\s+([0-9.]+)', - ) +data_dir = 'data/d20_scan' +data_paths = ed.extract_data_paths_from_zip(zip_path, destination=data_dir) + +# %% [markdown] +# #### Create Template Experiment from the First File + +# %% +project.experiments.add_from_data_path( + name='d20', + data_path=data_paths[0], +) +expt = project.experiments['d20'] # %% [markdown] # #### Set Instrument # %% -for expt in project.experiments: - expt.instrument.setup_wavelength = 1.87 - expt.instrument.calib_twotheta_offset = 0.29 +expt.instrument.setup_wavelength = 1.87 +expt.instrument.calib_twotheta_offset = 0.29 # %% [markdown] # #### Set Peak Profile # %% -for expt in project.experiments: - expt.peak.broad_gauss_u = 0.24 - expt.peak.broad_gauss_v = -0.53 - expt.peak.broad_gauss_w = 0.38 - expt.peak.broad_lorentz_y = 0.02 +expt.peak.broad_gauss_u = 0.24 +expt.peak.broad_gauss_v = -0.53 +expt.peak.broad_gauss_w = 0.38 +expt.peak.broad_lorentz_y = 0.02 # %% [markdown] # #### Set Excluded Regions # %% -for expt in project.experiments: - expt.excluded_regions.create(id='1', start=0, end=8) - expt.excluded_regions.create(id='2', start=150, end=180) +expt.excluded_regions.create(id='1', start=0, end=8) +expt.excluded_regions.create(id='2', start=150, end=180) # %% [markdown] # #### Set Background # %% -for expt in project.experiments: - expt.background.create(id='1', x=8, y=609) - expt.background.create(id='2', x=9, y=581) - expt.background.create(id='3', x=10, y=563) - expt.background.create(id='4', x=11, y=540) - expt.background.create(id='5', x=12, y=520) - expt.background.create(id='6', x=15, y=507) - expt.background.create(id='7', x=25, y=463) - expt.background.create(id='8', x=30, y=434) - expt.background.create(id='9', x=50, y=451) - expt.background.create(id='10', x=70, y=431) - expt.background.create(id='11', x=90, y=414) - expt.background.create(id='12', x=110, y=361) - expt.background.create(id='13', x=130, y=292) - expt.background.create(id='14', x=150, y=241) +expt.background.create(id='1', x=8, y=609) +expt.background.create(id='2', x=9, y=581) +expt.background.create(id='3', x=10, y=563) +expt.background.create(id='4', x=11, y=540) +expt.background.create(id='5', x=12, y=520) +expt.background.create(id='6', x=15, y=507) +expt.background.create(id='7', x=25, y=463) +expt.background.create(id='8', x=30, y=434) +expt.background.create(id='9', x=50, y=451) +expt.background.create(id='10', x=70, y=431) +expt.background.create(id='11', x=90, y=414) +expt.background.create(id='12', x=110, y=361) +expt.background.create(id='13', x=130, y=292) +expt.background.create(id='14', x=150, y=241) # %% [markdown] # #### Set Linked Phases # %% -for expt in project.experiments: - expt.linked_phases.create(id='cosio', scale=1.2) +expt.linked_phases.create(id='cosio', scale=1.2) # %% [markdown] # ## Step 4: Perform Analysis # # This section shows how to set free parameters, define constraints, -# and run the refinement. +# and run the sequential refinement. # %% [markdown] # #### Set Free Parameters @@ -229,18 +228,17 @@ structure.atom_sites['O3'].b_iso.free = True # %% -for expt in project.experiments: - expt.linked_phases['cosio'].scale.free = True +expt.linked_phases['cosio'].scale.free = True - expt.instrument.calib_twotheta_offset.free = True +expt.instrument.calib_twotheta_offset.free = True - expt.peak.broad_gauss_u.free = True - expt.peak.broad_gauss_v.free = True - expt.peak.broad_gauss_w.free = True - expt.peak.broad_lorentz_y.free = True +expt.peak.broad_gauss_u.free = True +expt.peak.broad_gauss_v.free = True +expt.peak.broad_gauss_w.free = True +expt.peak.broad_lorentz_y.free = True - for point in expt.background: - point.y.free = True +for point in expt.background: + point.y.free = True # %% [markdown] # #### Set Constraints @@ -265,25 +263,59 @@ expression='biso_Co2 = biso_Co1', ) +# %% [markdown] +# #### Run Single Fitting +# +# This is the fitting of the first dataset to optimize the initial +# parameters for the sequential fitting. This step is optional but can +# help with convergence and speed of the sequential fitting, especially +# if the initial parameters are far from optimal. + +# %% +project.analysis.fit() + +# %% [markdown] +# #### Run Sequential Fitting +# +# Define a callback that extracts the temperature from each data file. + + +# %% +def extract_diffrn(file_path): + temperature = ed.extract_metadata( + file_path=file_path, + pattern=r'^TEMP\s+([0-9.]+)', + ) + return {'ambient_temperature': temperature} + # %% [markdown] -# #### Set Fit Mode +# Set output verbosity level to "short" to show only one-line status +# messages during the analysis process. # %% -project.analysis.fit_mode.mode = 'single' +project.verbosity = 'short' # %% [markdown] -# #### Run Fitting +# Run the sequential fit over all data files in the scan directory. # %% -project.analysis.fit() +project.analysis.fit_sequential( + data_dir=data_dir, + extract_diffrn=extract_diffrn, + max_workers='auto', +) # %% [markdown] -# #### Plot Measured vs Calculated +# #### Replay a Dataset +# +# Apply fitted parameters from the last CSV row and plot the result. # %% -last_expt_name = project.experiments.names[-1] -project.plot_meas_vs_calc(expt_name=last_expt_name, show_residual=True) +csv_path = project.info.path / 'analysis' / 'results.csv' +n_rows = len(pd.read_csv(csv_path)) +project.apply_params_from_csv(row_index=n_rows - 1) +project.plot_meas_vs_calc(expt_name='d20', show_residual=True) # %% [markdown] # #### Plot Parameter Evolution @@ -291,7 +323,7 @@ # Define the quantity to use as the x-axis in the following plots. # %% -temperature = project.experiments[0].diffrn.ambient_temperature +temperature = expt.diffrn.ambient_temperature # %% [markdown] # Plot unit cell parameters vs. temperature. diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index 3f685b24..4218b480 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -688,12 +688,14 @@ def fit(self, verbosity: str | None = None) -> None: short_alignments = ['left', 'right', 'right', 'center'] short_rows: list[list[str]] = [] short_display_handle: object | None = None + if verb is not VerbosityEnum.SILENT: + console.paragraph('Standard fitting') if verb is VerbosityEnum.SHORT: first = expt_names[0] last = expt_names[-1] minimizer_name = self.fitter.selection - console.paragraph( - f"Using {num_expts} experiments 🔬 from '{first}' to " + console.print( + f"📋 Using {num_expts} experiments 🔬 from '{first}' to " f"'{last}' for '{mode.value}' fitting" ) console.print(f"🚀 Starting fit process with '{minimizer_name}'...") @@ -702,8 +704,8 @@ def fit(self, verbosity: str | None = None) -> None: for _idx, expt_name in enumerate(expt_names, start=1): if verb is VerbosityEnum.FULL: - console.paragraph( - f"Using experiment 🔬 '{expt_name}' for '{mode.value}' fitting" + console.print( + f"📋 Using experiment 🔬 '{expt_name}' for '{mode.value}' fitting" ) experiment = experiments[expt_name] diff --git a/src/easydiffraction/analysis/sequential.py b/src/easydiffraction/analysis/sequential.py index 412b5d10..9c3b45f6 100644 --- a/src/easydiffraction/analysis/sequential.py +++ b/src/easydiffraction/analysis/sequential.py @@ -9,6 +9,7 @@ import contextlib import csv import multiprocessing as mp +import sys from concurrent.futures import ProcessPoolExecutor from dataclasses import dataclass from dataclasses import replace @@ -18,6 +19,7 @@ from easydiffraction.io.ascii import extract_data_paths_from_dir from easydiffraction.utils.enums import VerbosityEnum +from easydiffraction.utils.logging import console from easydiffraction.utils.logging import log if TYPE_CHECKING: @@ -565,6 +567,13 @@ def fit_sequential( If preconditions are not met (e.g. multiple structures, missing project path, no free parameters). """ + # Guard against re-entry in spawned child processes. With the + # ``spawn`` multiprocessing context the child re-imports __main__, + # which re-executes the user script and would call fit_sequential + # again, causing infinite process spawning. + if mp.parent_process() is not None: + return + project = analysis.project verb = VerbosityEnum(verbosity if verbosity is not None else project.verbosity) @@ -642,14 +651,37 @@ def fit_sequential( total_chunks = len(chunks) if verb is not VerbosityEnum.SILENT: - print( - f'🚀 Sequential fitting: {len(remaining)} files in ' - f'{total_chunks} chunks (max_workers={max_workers})' + minimizer_name = analysis.fitter.selection + console.paragraph('Sequential fitting') + console.print(f"🚀 Starting fit process with '{minimizer_name}'...") + console.print( + f'📋 {len(remaining)} files in {total_chunks} chunks (max_workers={max_workers})' ) + console.print('📈 Goodness-of-fit (reduced χ²):') # Create a process pool for parallel dispatch, or a no-op context # for single-worker mode (avoids process-spawn overhead). + # + # When max_workers > 1 we use ``spawn`` context, which normally + # re-imports ``__main__`` in every child process. If the user runs + # a script without an ``if __name__ == '__main__':`` guard the + # whole script would re-execute in every worker, causing infinite + # process spawning. To prevent this we temporarily hide + # ``__main__.__file__`` and ``__main__.__spec__`` so that the spawn + # bootstrap has no path to re-import the script. ``_fit_worker`` + # lives in this module (not ``__main__``), so it is still resolved + # via normal pickle/import machinery. + _main_mod = sys.modules.get('__main__') + _main_file_bak = getattr(_main_mod, '__file__', None) + _main_spec_bak = getattr(_main_mod, '__spec__', None) + if max_workers > 1: + # Hide __main__ origin from spawn + if _main_mod is not None and _main_file_bak is not None: + _main_mod.__file__ = None # type: ignore[assignment] + if _main_mod is not None and _main_spec_bak is not None: + _main_mod.__spec__ = None + spawn_ctx = mp.get_context('spawn') pool_cm = ProcessPoolExecutor( max_workers=max_workers, @@ -659,41 +691,48 @@ def fit_sequential( else: pool_cm = contextlib.nullcontext() - with pool_cm as executor: - for chunk_idx, chunk in enumerate(chunks, start=1): - # Dispatch: parallel or sequential - if executor is not None: - templates = [template] * len(chunk) - results = list(executor.map(_fit_worker, templates, chunk)) - else: - results = [_fit_worker(template, path) for path in chunk] - - # Extract diffrn metadata in the main process - if extract_diffrn is not None: - for result in results: - try: - diffrn_values = extract_diffrn(result['file_path']) - for key, val in diffrn_values.items(): - result[f'diffrn.{key}'] = val - except Exception as exc: # noqa: BLE001 - log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') - - # Write to CSV - _append_to_csv(csv_path, header, results) - - # Report progress - _report_chunk_progress(chunk_idx, total_chunks, results, verb) - - # Propagate: use last successful file's - # params as starting values - last_ok = None - for r in reversed(results): - if r.get('fit_success') and r.get('params'): - last_ok = r - break - - if last_ok is not None: - template = replace(template, initial_params=last_ok['params']) + try: + with pool_cm as executor: + for chunk_idx, chunk in enumerate(chunks, start=1): + # Dispatch: parallel or sequential + if executor is not None: + templates = [template] * len(chunk) + results = list(executor.map(_fit_worker, templates, chunk)) + else: + results = [_fit_worker(template, path) for path in chunk] + + # Extract diffrn metadata in the main process + if extract_diffrn is not None: + for result in results: + try: + diffrn_values = extract_diffrn(result['file_path']) + for key, val in diffrn_values.items(): + result[f'diffrn.{key}'] = val + except Exception as exc: # noqa: BLE001 + log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') + + # Write to CSV + _append_to_csv(csv_path, header, results) + + # Report progress + _report_chunk_progress(chunk_idx, total_chunks, results, verb) + + # Propagate: use last successful file's + # params as starting values + last_ok = None + for r in reversed(results): + if r.get('fit_success') and r.get('params'): + last_ok = r + break + + if last_ok is not None: + template = replace(template, initial_params=last_ok['params']) + finally: + # Restore __main__ attributes + if _main_mod is not None and _main_file_bak is not None: + _main_mod.__file__ = _main_file_bak + if _main_mod is not None and _main_spec_bak is not None: + _main_mod.__spec__ = _main_spec_bak if verb is not VerbosityEnum.SILENT: total_fitted = len(already_fitted) + len(remaining) diff --git a/tests/integration/fitting/test_sequential.py b/tests/integration/fitting/test_sequential.py index 59945e87..12c14bea 100644 --- a/tests/integration/fitting/test_sequential.py +++ b/tests/integration/fitting/test_sequential.py @@ -370,4 +370,3 @@ def test_apply_params_from_csv_raises_on_bad_index(tmp_path) -> None: with pytest.raises(IndexError, match='out of range'): project.apply_params_from_csv(row_index=99) - From 8b209df50695d8afb38323ea2a1ff49ea45be7d4 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 22:08:43 +0200 Subject: [PATCH 19/51] Support negative indexing and force recalc in apply_params_from_csv --- docs/docs/tutorials/ed-17.py | 25 ++++++++++--------------- src/easydiffraction/project/project.py | 20 +++++++++++++++++--- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/docs/docs/tutorials/ed-17.py b/docs/docs/tutorials/ed-17.py index f8cde1e7..eb8bcd2a 100644 --- a/docs/docs/tutorials/ed-17.py +++ b/docs/docs/tutorials/ed-17.py @@ -11,8 +11,6 @@ # ## Import Library # %% -import pandas as pd - import easydiffraction as ed # %% [markdown] @@ -259,9 +257,7 @@ # Set constraints. # %% -project.analysis.constraints.create( - expression='biso_Co2 = biso_Co1', -) +project.analysis.constraints.create(expression='biso_Co2 = biso_Co1') # %% [markdown] # #### Run Single Fitting @@ -277,6 +273,14 @@ # %% [markdown] # #### Run Sequential Fitting # +# Set output verbosity level to "short" to show only one-line status +# messages during the analysis process. + +# %% +project.verbosity = 'short' + +# %% [markdown] +# # Define a callback that extracts the temperature from each data file. @@ -289,13 +293,6 @@ def extract_diffrn(file_path): return {'ambient_temperature': temperature} -# %% [markdown] -# Set output verbosity level to "short" to show only one-line status -# messages during the analysis process. - -# %% -project.verbosity = 'short' - # %% [markdown] # Run the sequential fit over all data files in the scan directory. @@ -312,9 +309,7 @@ def extract_diffrn(file_path): # Apply fitted parameters from the last CSV row and plot the result. # %% -csv_path = project.info.path / 'analysis' / 'results.csv' -n_rows = len(pd.read_csv(csv_path)) -project.apply_params_from_csv(row_index=n_rows - 1) +project.apply_params_from_csv(row_index=-1) project.plot_meas_vs_calc(expt_name='d20', show_residual=True) # %% [markdown] diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index 31cd84ca..5bc96e79 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -377,7 +377,8 @@ def apply_params_from_csv(self, row_index: int) -> None: Parameters ---------- row_index : int - 0-based row index in the CSV file. + Row index in the CSV file. Supports Python-style negative + indexing (e.g. ``-1`` for the last row). Raises ------ @@ -401,8 +402,14 @@ def apply_params_from_csv(self, row_index: int) -> None: raise FileNotFoundError(msg) df = pd.read_csv(csv_path) - if row_index < 0 or row_index >= len(df): - msg = f'Row index {row_index} out of range (CSV has {len(df)} rows).' + n_rows = len(df) + + # Support Python-style negative indexing + if row_index < 0: + row_index += n_rows + + if row_index < 0 or row_index >= n_rows: + msg = f'Row index {row_index} out of range (CSV has {n_rows} rows).' raise IndexError(msg) row = df.iloc[row_index] @@ -440,6 +447,13 @@ def apply_params_from_csv(self, row_index: int) -> None: if base_name in param_map and pd.notna(row[col_name]): param_map[base_name].uncertainty = float(row[col_name]) + # 4. Force recalculation: data was replaced directly (bypassing + # value setters), so the dirty flag may not be set. + for structure in self.structures: + structure._need_categories_update = True + for experiment in self.experiments.values(): + experiment._need_categories_update = True + log.info(f'Applied parameters from CSV row {row_index} (file: {file_path}).') # ------------------------------------------ From e88d18d1d81a3d92bab597088772d9b29a1ef6d8 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 23:04:34 +0200 Subject: [PATCH 20/51] Add extract_project_from_zip helper function --- docs/docs/tutorials/ed-18.py | 65 +++++++-------------- src/easydiffraction/__init__.py | 1 + src/easydiffraction/io/__init__.py | 1 + src/easydiffraction/io/ascii.py | 56 ++++++++++++++++++ src/easydiffraction/utils/utils.py | 2 +- tests/unit/easydiffraction/io/test_ascii.py | 56 +++++++++++++++++- 6 files changed, 134 insertions(+), 47 deletions(-) diff --git a/docs/docs/tutorials/ed-18.py b/docs/docs/tutorials/ed-18.py index b883e9d0..3d6cfca4 100644 --- a/docs/docs/tutorials/ed-18.py +++ b/docs/docs/tutorials/ed-18.py @@ -5,80 +5,55 @@ # how to load a previously saved project from a directory and run # refinement — all in just a few lines of code. # -# The project is first created and saved as a setup step (this would -# normally be done once and the directory would already exist on disk). -# Then the saved project is loaded back and fitted. -# # For details on how to define structures and experiments, see the other # tutorials. # %% [markdown] -# ## Import Library +# ## Import Modules # %% -import easydiffraction as ed +from easydiffraction import Project +from easydiffraction import download_data +from easydiffraction import extract_project_from_zip # %% [markdown] -# ## Setup: Create and Save a Project -# -# This step creates a project from CIF files and saves it to a -# directory. In practice, the project directory would already exist -# on disk from a previous session. +# ## Download Project Archive # %% -# Create a project from CIF files -project = ed.Project() -project.structures.add_from_cif_path(ed.download_data(id=1, destination='data')) -project.experiments.add_from_cif_path(ed.download_data(id=2, destination='data')) +zip_path = download_data(id=28, destination='data') -# %% -project.analysis.aliases.create( - label='biso_La', - param=project.structures['lbco'].atom_sites['La'].b_iso, -) -project.analysis.aliases.create( - label='biso_Ba', - param=project.structures['lbco'].atom_sites['Ba'].b_iso, -) - -project.analysis.aliases.create( - label='occ_La', - param=project.structures['lbco'].atom_sites['La'].occupancy, -) -project.analysis.aliases.create( - label='occ_Ba', - param=project.structures['lbco'].atom_sites['Ba'].occupancy, -) - -project.analysis.constraints.create(expression='biso_Ba = biso_La') -project.analysis.constraints.create(expression='occ_Ba = 1 - occ_La') - -project.structures['lbco'].atom_sites['La'].occupancy.free = True +# %% [markdown] +# ## Extract Project # %% -# Save to a directory -project.save_as('lbco_project') +project_dir = extract_project_from_zip('lbco_project.zip', destination='data') # %% [markdown] -# ## Step 1: Load Project from Directory +# ## Load Project # %% -project = ed.Project.load('lbco_project') +project = Project.load(project_dir) # %% [markdown] -# ## Step 2: Perform Analysis +# ## Perform Analysis # %% project.analysis.fit() +# %% [markdown] +# ## Show Results + # %% project.analysis.show_fit_results() +# %% [markdown] +# ## Plot Meas vs Calc + # %% project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% [markdown] -# ## Step 3: Show Project Summary +# ## Save Project # %% -project.summary.show_report() +project.save() diff --git a/src/easydiffraction/__init__.py b/src/easydiffraction/__init__.py index 10308402..11ea117c 100644 --- a/src/easydiffraction/__init__.py +++ b/src/easydiffraction/__init__.py @@ -6,6 +6,7 @@ from easydiffraction.io.ascii import extract_data_paths_from_dir from easydiffraction.io.ascii import extract_data_paths_from_zip from easydiffraction.io.ascii import extract_metadata +from easydiffraction.io.ascii import extract_project_from_zip from easydiffraction.project.project import Project from easydiffraction.utils.logging import Logger from easydiffraction.utils.logging import console diff --git a/src/easydiffraction/io/__init__.py b/src/easydiffraction/io/__init__.py index 6ce45a95..4d0c1560 100644 --- a/src/easydiffraction/io/__init__.py +++ b/src/easydiffraction/io/__init__.py @@ -4,4 +4,5 @@ from easydiffraction.io.ascii import extract_data_paths_from_dir from easydiffraction.io.ascii import extract_data_paths_from_zip from easydiffraction.io.ascii import extract_metadata +from easydiffraction.io.ascii import extract_project_from_zip from easydiffraction.io.ascii import load_numeric_block diff --git a/src/easydiffraction/io/ascii.py b/src/easydiffraction/io/ascii.py index 75ec0fcb..45061787 100644 --- a/src/easydiffraction/io/ascii.py +++ b/src/easydiffraction/io/ascii.py @@ -13,6 +13,62 @@ import numpy as np +def extract_project_from_zip( + zip_path: str | Path, + destination: str | Path | None = None, +) -> str: + """ + Extract a project directory from a ZIP archive. + + The archive must contain exactly one directory with a + ``project.cif`` file. Files are extracted into *destination* when + provided, or into a temporary directory that persists for the + lifetime of the process. + + Parameters + ---------- + zip_path : str | Path + Path to the ZIP archive containing the project. + destination : str | Path | None, default=None + Directory to extract into. When ``None``, a temporary directory + is created. + + Returns + ------- + str + Absolute path to the extracted project directory (the directory + that contains ``project.cif``). + + Raises + ------ + FileNotFoundError + If *zip_path* does not exist. + ValueError + If the archive does not contain a ``project.cif`` file. + """ + zip_path = Path(zip_path) + if not zip_path.exists(): + msg = f'ZIP file not found: {zip_path}' + raise FileNotFoundError(msg) + + if destination is not None: + extract_dir = Path(destination) + extract_dir.mkdir(parents=True, exist_ok=True) + else: + extract_dir = Path(tempfile.mkdtemp(prefix='ed_zip_')) + + with zipfile.ZipFile(zip_path, 'r') as zf: + zf.extractall(extract_dir) + + # Find the project directory (the one containing project.cif) + project_cifs = list(extract_dir.rglob('project.cif')) + if not project_cifs: + msg = f'No project.cif found in ZIP archive: {zip_path}' + raise ValueError(msg) + + return str(project_cifs[0].parent.resolve()) + + def extract_data_paths_from_zip( zip_path: str | Path, destination: str | Path | None = None, diff --git a/src/easydiffraction/utils/utils.py b/src/easydiffraction/utils/utils.py index a54038fc..50af41fe 100644 --- a/src/easydiffraction/utils/utils.py +++ b/src/easydiffraction/utils/utils.py @@ -73,7 +73,7 @@ def _fetch_data_index() -> dict: _validate_url(index_url) # macOS: sha256sum index.json - index_hash = 'sha256:f421aab32ec532782dc62f4440a97320e5cec23b9e64f5ae3f8a3e818d013430' + index_hash = 'sha256:1032db0c04ef713c3f5209020a14b18dcdc3cfa4d995664ae5c9f5096f4508d4' destination_dirname = 'easydiffraction' destination_fname = 'data-index.json' cache_dir = pooch.os_cache(destination_dirname) diff --git a/tests/unit/easydiffraction/io/test_ascii.py b/tests/unit/easydiffraction/io/test_ascii.py index 8519c6d7..1410f8e9 100644 --- a/tests/unit/easydiffraction/io/test_ascii.py +++ b/tests/unit/easydiffraction/io/test_ascii.py @@ -1,6 +1,6 @@ # SPDX-FileCopyrightText: 2026 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause -"""Tests for extract_data_paths_from_zip and extract_data_paths_from_dir.""" +"""Tests for extract_project_from_zip, extract_data_paths_from_zip and extract_data_paths_from_dir.""" from __future__ import annotations @@ -10,6 +10,60 @@ from easydiffraction.io.ascii import extract_data_paths_from_dir from easydiffraction.io.ascii import extract_data_paths_from_zip +from easydiffraction.io.ascii import extract_project_from_zip + + +class TestExtractProjectFromZip: + """Tests for extract_project_from_zip.""" + + def test_extracts_project_dir(self, tmp_path): + """Returns path to the directory containing project.cif.""" + zip_path = tmp_path / 'proj.zip' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('my_project/project.cif', 'data_project\n') + zf.writestr('my_project/structures/struct.cif', 'data_struct\n') + + result = extract_project_from_zip(zip_path, destination=tmp_path / 'out') + + assert result.endswith('my_project') + assert (tmp_path / 'out' / 'my_project' / 'project.cif').is_file() + + def test_extracts_to_temp_dir_by_default(self, tmp_path): + """Without destination, files go to a temp directory.""" + zip_path = tmp_path / 'proj.zip' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('myproj/project.cif', 'data_project\n') + + result = extract_project_from_zip(zip_path) + + assert 'myproj' in result + assert 'project.cif' not in result # returns parent dir, not file + + def test_raises_file_not_found(self, tmp_path): + """Raises FileNotFoundError for missing ZIP path.""" + with pytest.raises(FileNotFoundError): + extract_project_from_zip(tmp_path / 'missing.zip') + + def test_raises_value_error_no_project_cif(self, tmp_path): + """Raises ValueError when ZIP has no project.cif.""" + zip_path = tmp_path / 'bad.zip' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('data.dat', '1 2 3\n') + + with pytest.raises(ValueError, match='No project.cif found'): + extract_project_from_zip(zip_path) + + def test_destination_creates_directory(self, tmp_path): + """Destination directory is created if it does not exist.""" + zip_path = tmp_path / 'proj.zip' + dest = tmp_path / 'nested' / 'output' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('proj/project.cif', 'data\n') + + result = extract_project_from_zip(zip_path, destination=dest) + + assert dest.is_dir() + assert 'proj' in result class TestExtractDataPathsFromZip: From dcaef627bd1dbd3298941b221850926ffef7dde3 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 23:23:10 +0200 Subject: [PATCH 21/51] Refactor extract_project_from_zip call to use zip_path variable --- docs/docs/tutorials/ed-18.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/tutorials/ed-18.py b/docs/docs/tutorials/ed-18.py index 3d6cfca4..f4485dbc 100644 --- a/docs/docs/tutorials/ed-18.py +++ b/docs/docs/tutorials/ed-18.py @@ -26,7 +26,7 @@ # ## Extract Project # %% -project_dir = extract_project_from_zip('lbco_project.zip', destination='data') +project_dir = extract_project_from_zip(zip_path, destination='data') # %% [markdown] # ## Load Project From ed22516720f3843d0633933c61c405cef7106da5 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Fri, 3 Apr 2026 23:23:33 +0200 Subject: [PATCH 22/51] Refactor notebook cell IDs and update project loading process --- docs/docs/tutorials/ed-1.ipynb | 53 ++-- docs/docs/tutorials/ed-10.ipynb | 55 ++-- docs/docs/tutorials/ed-11.ipynb | 61 ++--- docs/docs/tutorials/ed-12.ipynb | 69 ++---- docs/docs/tutorials/ed-13.ipynb | 427 +++++++++++++++----------------- docs/docs/tutorials/ed-14.ipynb | 77 +++--- docs/docs/tutorials/ed-15.ipynb | 73 +++--- docs/docs/tutorials/ed-16.ipynb | 133 +++++----- docs/docs/tutorials/ed-17.ipynb | 329 +++++++++++++----------- docs/docs/tutorials/ed-18.ipynb | 122 ++++----- docs/docs/tutorials/ed-2.ipynb | 67 ++--- docs/docs/tutorials/ed-3.ipynb | 365 +++++++++++++-------------- docs/docs/tutorials/ed-4.ipynb | 139 +++++------ docs/docs/tutorials/ed-5.ipynb | 119 ++++----- docs/docs/tutorials/ed-6.ipynb | 175 ++++++------- docs/docs/tutorials/ed-7.ipynb | 163 ++++++------ docs/docs/tutorials/ed-8.ipynb | 133 +++++----- docs/docs/tutorials/ed-9.ipynb | 143 +++++------ 18 files changed, 1239 insertions(+), 1464 deletions(-) diff --git a/docs/docs/tutorials/ed-1.ipynb b/docs/docs/tutorials/ed-1.ipynb index 830d7889..3b8085bb 100644 --- a/docs/docs/tutorials/ed-1.ipynb +++ b/docs/docs/tutorials/ed-1.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "74c72059", + "id": "e1c6f514", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: LBCO, HRPT\n", @@ -62,7 +47,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -71,7 +56,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -80,7 +65,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Step 1: Define Project" @@ -89,7 +74,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +84,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "## Step 2: Define Crystal Structure" @@ -108,7 +93,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -119,7 +104,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -128,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "## Step 3: Define Experiment" @@ -137,7 +122,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -148,7 +133,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -157,7 +142,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Step 4: Perform Analysis" @@ -166,7 +151,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -178,7 +163,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -189,7 +174,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -199,7 +184,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -208,7 +193,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "16", "metadata": {}, "source": [ "## Step 5: Show Project Summary" @@ -217,7 +202,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "17", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-10.ipynb b/docs/docs/tutorials/ed-10.ipynb index 2fa9c6c5..5707c038 100644 --- a/docs/docs/tutorials/ed-10.ipynb +++ b/docs/docs/tutorials/ed-10.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "239bda80", + "id": "2e0ed9d7", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Pair Distribution Function: Ni, NPD\n", @@ -51,7 +36,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -60,7 +45,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +54,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Create Project" @@ -78,7 +63,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -87,7 +72,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "## Add Structure" @@ -96,7 +81,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -106,7 +91,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -126,7 +111,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "## Add Experiment" @@ -135,7 +120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -145,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -162,7 +147,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -177,7 +162,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "12", "metadata": {}, "source": [ "## Select Fitting Parameters" @@ -186,7 +171,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -197,7 +182,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -208,7 +193,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "## Run Fitting" @@ -217,7 +202,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -227,7 +212,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "## Plot Measured vs Calculated" @@ -236,7 +221,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-11.ipynb b/docs/docs/tutorials/ed-11.ipynb index cbb509bf..30fc3e26 100644 --- a/docs/docs/tutorials/ed-11.ipynb +++ b/docs/docs/tutorials/ed-11.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "958d9ba3", + "id": "e0a12c6e", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Pair Distribution Function: Si, NPD\n", @@ -48,7 +33,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -57,7 +42,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -66,7 +51,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Create Project" @@ -75,7 +60,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -84,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "## Set Plotting Engine" @@ -93,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -105,7 +90,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -115,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "## Add Structure" @@ -124,7 +109,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -134,7 +119,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -155,7 +140,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Add Experiment" @@ -164,7 +149,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -174,7 +159,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -191,7 +176,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -207,7 +192,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "## Select Fitting Parameters" @@ -216,7 +201,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -228,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -240,7 +225,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "18", "metadata": {}, "source": [ "## Run Fitting" @@ -249,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -259,7 +244,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "20", "metadata": {}, "source": [ "## Plot Measured vs Calculated" @@ -268,7 +253,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-12.ipynb b/docs/docs/tutorials/ed-12.ipynb index deaca165..6aa16c6b 100644 --- a/docs/docs/tutorials/ed-12.ipynb +++ b/docs/docs/tutorials/ed-12.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a6c12e68", + "id": "edee23bc", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Pair Distribution Function: NaCl, XRD\n", @@ -51,7 +36,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -60,7 +45,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +54,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Create Project" @@ -78,7 +63,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -87,7 +72,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "## Set Plotting Engine" @@ -96,7 +81,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -108,7 +93,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -119,7 +104,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "## Add Structure" @@ -128,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -138,7 +123,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -167,7 +152,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Add Experiment" @@ -176,7 +161,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -186,7 +171,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -203,7 +188,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -213,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -223,7 +208,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -233,7 +218,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -248,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -257,7 +242,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "## Select Fitting Parameters" @@ -266,7 +251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -278,7 +263,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -289,7 +274,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "22", "metadata": {}, "source": [ "## Run Fitting" @@ -298,7 +283,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -308,7 +293,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "24", "metadata": {}, "source": [ "## Plot Measured vs Calculated" @@ -317,7 +302,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "25", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-13.ipynb b/docs/docs/tutorials/ed-13.ipynb index ef68958b..66854a4c 100644 --- a/docs/docs/tutorials/ed-13.ipynb +++ b/docs/docs/tutorials/ed-13.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1c60b738", + "id": "1a143d79", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Fitting Powder Diffraction data\n", @@ -70,7 +55,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "📖 See\n", @@ -82,7 +67,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -91,7 +76,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## 📘 Introduction: Simple Reference Fit – Si\n", @@ -118,7 +103,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "4", "metadata": {}, "source": [ "📖 See\n", @@ -130,7 +115,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -139,7 +124,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "6", "metadata": {}, "source": [ "You can set the title and description of the project to provide\n", @@ -151,7 +136,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -161,7 +146,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "### 🔬 Create an Experiment\n", @@ -174,7 +159,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "📖 See\n", @@ -186,7 +171,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -197,7 +182,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "Uncomment the following cell if your data reduction failed and the\n", @@ -211,7 +196,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -220,7 +205,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "Now we can create the experiment and load the measured data. In this\n", @@ -232,7 +217,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "14", "metadata": {}, "source": [ "📖 See\n", @@ -243,7 +228,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -258,7 +243,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "16", "metadata": {}, "source": [ "#### Inspect Measured Data\n", @@ -274,7 +259,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "📖 See\n", @@ -288,7 +273,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "18", "metadata": {}, "source": [ "📖 See\n", @@ -299,7 +284,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -311,7 +296,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -320,7 +305,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "If you zoom in on the highest TOF peak (around 120,000 μs), you will\n", @@ -343,7 +328,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "22", "metadata": {}, "source": [ "📖 See\n", @@ -354,7 +339,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -364,7 +349,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "24", "metadata": {}, "source": [ "To visualize the effect of excluding the high TOF region, we can plot\n", @@ -375,7 +360,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -384,7 +369,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "26", "metadata": {}, "source": [ "#### Set Instrument Parameters\n", @@ -407,7 +392,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "27", "metadata": {}, "source": [ "📖 See\n", @@ -418,7 +403,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -432,7 +417,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "29", "metadata": {}, "source": [ "Before proceeding, let's take a quick look at the concept of\n", @@ -452,7 +437,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -461,7 +446,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "31", "metadata": {}, "source": [ "The `value` attribute represents the current value of the parameter as\n", @@ -475,7 +460,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -484,7 +469,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "33", "metadata": {}, "source": [ "Note that to set the value of the parameter, you can simply assign a\n", @@ -494,7 +479,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "34", "metadata": {}, "source": [ "📖 See\n", @@ -505,7 +490,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "35", "metadata": {}, "source": [ "#### Set Peak Profile Parameters\n", @@ -547,7 +532,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "36", "metadata": {}, "source": [ "📖 See\n", @@ -558,7 +543,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -574,7 +559,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "38", "metadata": {}, "source": [ "#### Set Background\n", @@ -609,7 +594,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "39", "metadata": {}, "source": [ "📖 See\n", @@ -620,7 +605,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -636,7 +621,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "41", "metadata": {}, "source": [ "### 🧩 Create a Structure – Si\n", @@ -679,7 +664,7 @@ }, { "cell_type": "markdown", - "id": "43", + "id": "42", "metadata": {}, "source": [ "📖 See\n", @@ -689,7 +674,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "43", "metadata": {}, "source": [ "```\n", @@ -721,7 +706,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "44", "metadata": {}, "source": [ "As with adding the experiment in the previous step, we will create a\n", @@ -731,7 +716,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "45", "metadata": {}, "source": [ "📖 See\n", @@ -742,7 +727,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "46", "metadata": {}, "source": [ "#### Add Structure" @@ -751,7 +736,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -760,7 +745,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "48", "metadata": {}, "source": [ "#### Set Space Group" @@ -768,7 +753,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "49", "metadata": {}, "source": [ "📖 See\n", @@ -779,7 +764,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -789,7 +774,7 @@ }, { "cell_type": "markdown", - "id": "52", + "id": "51", "metadata": {}, "source": [ "#### Set Lattice Parameters" @@ -797,7 +782,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "52", "metadata": {}, "source": [ "📖 See\n", @@ -808,7 +793,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -817,7 +802,7 @@ }, { "cell_type": "markdown", - "id": "55", + "id": "54", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -825,7 +810,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "55", "metadata": {}, "source": [ "📖 See\n", @@ -836,7 +821,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "56", "metadata": {}, "outputs": [], "source": [ @@ -853,7 +838,7 @@ }, { "cell_type": "markdown", - "id": "58", + "id": "57", "metadata": {}, "source": [ "### 🔗 Assign Structure to Experiment\n", @@ -866,7 +851,7 @@ }, { "cell_type": "markdown", - "id": "59", + "id": "58", "metadata": {}, "source": [ "📖 See\n", @@ -877,7 +862,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "59", "metadata": {}, "outputs": [], "source": [ @@ -886,7 +871,7 @@ }, { "cell_type": "markdown", - "id": "61", + "id": "60", "metadata": {}, "source": [ "### 🚀 Analyze and Fit the Data\n", @@ -908,7 +893,7 @@ }, { "cell_type": "markdown", - "id": "62", + "id": "61", "metadata": { "title": "**Reminder:**" }, @@ -925,7 +910,7 @@ }, { "cell_type": "markdown", - "id": "63", + "id": "62", "metadata": {}, "source": [ "📖 See\n", @@ -935,7 +920,7 @@ }, { "cell_type": "markdown", - "id": "64", + "id": "63", "metadata": {}, "source": [ "#### Set Fit Parameters\n", @@ -958,7 +943,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65", + "id": "64", "metadata": {}, "outputs": [], "source": [ @@ -978,7 +963,7 @@ }, { "cell_type": "markdown", - "id": "66", + "id": "65", "metadata": {}, "source": [ "#### Show Free Parameters\n", @@ -989,7 +974,7 @@ }, { "cell_type": "markdown", - "id": "67", + "id": "66", "metadata": {}, "source": [ "📖 See\n", @@ -1003,7 +988,7 @@ { "cell_type": "code", "execution_count": null, - "id": "68", + "id": "67", "metadata": {}, "outputs": [], "source": [ @@ -1012,7 +997,7 @@ }, { "cell_type": "markdown", - "id": "69", + "id": "68", "metadata": {}, "source": [ "#### Visualize Diffraction Patterns\n", @@ -1028,7 +1013,7 @@ { "cell_type": "code", "execution_count": null, - "id": "70", + "id": "69", "metadata": {}, "outputs": [], "source": [ @@ -1037,7 +1022,7 @@ }, { "cell_type": "markdown", - "id": "71", + "id": "70", "metadata": {}, "source": [ "#### Run Fitting\n", @@ -1048,7 +1033,7 @@ }, { "cell_type": "markdown", - "id": "72", + "id": "71", "metadata": {}, "source": [ "📖 See\n", @@ -1059,7 +1044,7 @@ { "cell_type": "code", "execution_count": null, - "id": "73", + "id": "72", "metadata": {}, "outputs": [], "source": [ @@ -1069,7 +1054,7 @@ }, { "cell_type": "markdown", - "id": "74", + "id": "73", "metadata": {}, "source": [ "#### Check Fit Results\n", @@ -1088,7 +1073,7 @@ }, { "cell_type": "markdown", - "id": "75", + "id": "74", "metadata": {}, "source": [ "#### Visualize Fit Results\n", @@ -1102,7 +1087,7 @@ { "cell_type": "code", "execution_count": null, - "id": "76", + "id": "75", "metadata": {}, "outputs": [], "source": [ @@ -1111,7 +1096,7 @@ }, { "cell_type": "markdown", - "id": "77", + "id": "76", "metadata": {}, "source": [ "#### TOF vs d-spacing\n", @@ -1145,7 +1130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "78", + "id": "77", "metadata": {}, "outputs": [], "source": [ @@ -1154,7 +1139,7 @@ }, { "cell_type": "markdown", - "id": "79", + "id": "78", "metadata": {}, "source": [ "As you can see, the calculated diffraction pattern now matches the\n", @@ -1180,7 +1165,7 @@ { "cell_type": "code", "execution_count": null, - "id": "80", + "id": "79", "metadata": {}, "outputs": [], "source": [ @@ -1189,7 +1174,7 @@ }, { "cell_type": "markdown", - "id": "81", + "id": "80", "metadata": {}, "source": [ "## 💪 Exercise: Complex Fit – LBCO\n", @@ -1210,7 +1195,7 @@ }, { "cell_type": "markdown", - "id": "82", + "id": "81", "metadata": {}, "source": [ "**Hint:**" @@ -1218,7 +1203,7 @@ }, { "cell_type": "markdown", - "id": "83", + "id": "82", "metadata": {}, "source": [ "You can use the same approach as in the previous part of the notebook,\n", @@ -1227,7 +1212,7 @@ }, { "cell_type": "markdown", - "id": "84", + "id": "83", "metadata": {}, "source": [ "**Solution:**" @@ -1236,7 +1221,7 @@ { "cell_type": "code", "execution_count": null, - "id": "85", + "id": "84", "metadata": {}, "outputs": [], "source": [ @@ -1247,7 +1232,7 @@ }, { "cell_type": "markdown", - "id": "86", + "id": "85", "metadata": {}, "source": [ "### 🔬 Exercise 2: Define an Experiment\n", @@ -1260,7 +1245,7 @@ }, { "cell_type": "markdown", - "id": "87", + "id": "86", "metadata": {}, "source": [ "**Hint:**" @@ -1268,7 +1253,7 @@ }, { "cell_type": "markdown", - "id": "88", + "id": "87", "metadata": {}, "source": [ "You can use the same approach as in the previous part of the notebook,\n", @@ -1277,7 +1262,7 @@ }, { "cell_type": "markdown", - "id": "89", + "id": "88", "metadata": {}, "source": [ "**Solution:**" @@ -1286,7 +1271,7 @@ { "cell_type": "code", "execution_count": null, - "id": "90", + "id": "89", "metadata": {}, "outputs": [], "source": [ @@ -1309,7 +1294,7 @@ }, { "cell_type": "markdown", - "id": "91", + "id": "90", "metadata": {}, "source": [ "#### Exercise 2.1: Inspect Measured Data\n", @@ -1321,7 +1306,7 @@ }, { "cell_type": "markdown", - "id": "92", + "id": "91", "metadata": {}, "source": [ "**Hint:**" @@ -1329,7 +1314,7 @@ }, { "cell_type": "markdown", - "id": "93", + "id": "92", "metadata": {}, "source": [ "You can use the `plot_meas` method of the project to visualize the\n", @@ -1340,7 +1325,7 @@ }, { "cell_type": "markdown", - "id": "94", + "id": "93", "metadata": {}, "source": [ "**Solution:**" @@ -1349,7 +1334,7 @@ { "cell_type": "code", "execution_count": null, - "id": "95", + "id": "94", "metadata": {}, "outputs": [], "source": [ @@ -1363,7 +1348,7 @@ }, { "cell_type": "markdown", - "id": "96", + "id": "95", "metadata": {}, "source": [ "#### Exercise 2.2: Set Instrument Parameters\n", @@ -1373,7 +1358,7 @@ }, { "cell_type": "markdown", - "id": "97", + "id": "96", "metadata": {}, "source": [ "**Hint:**" @@ -1381,7 +1366,7 @@ }, { "cell_type": "markdown", - "id": "98", + "id": "97", "metadata": {}, "source": [ "Use the values from the data reduction process for the LBCO and\n", @@ -1390,7 +1375,7 @@ }, { "cell_type": "markdown", - "id": "99", + "id": "98", "metadata": {}, "source": [ "**Solution:**" @@ -1399,7 +1384,7 @@ { "cell_type": "code", "execution_count": null, - "id": "100", + "id": "99", "metadata": {}, "outputs": [], "source": [ @@ -1413,7 +1398,7 @@ }, { "cell_type": "markdown", - "id": "101", + "id": "100", "metadata": {}, "source": [ "#### Exercise 2.3: Set Peak Profile Parameters\n", @@ -1423,7 +1408,7 @@ }, { "cell_type": "markdown", - "id": "102", + "id": "101", "metadata": {}, "source": [ "**Hint:**" @@ -1431,7 +1416,7 @@ }, { "cell_type": "markdown", - "id": "103", + "id": "102", "metadata": {}, "source": [ "Use the values from the\n", @@ -1443,7 +1428,7 @@ }, { "cell_type": "markdown", - "id": "104", + "id": "103", "metadata": {}, "source": [ "**Solution:**" @@ -1452,7 +1437,7 @@ { "cell_type": "code", "execution_count": null, - "id": "105", + "id": "104", "metadata": {}, "outputs": [], "source": [ @@ -1470,7 +1455,7 @@ }, { "cell_type": "markdown", - "id": "106", + "id": "105", "metadata": {}, "source": [ "#### Exercise 2.4: Set Background\n", @@ -1481,7 +1466,7 @@ }, { "cell_type": "markdown", - "id": "107", + "id": "106", "metadata": {}, "source": [ "**Hint:**" @@ -1489,7 +1474,7 @@ }, { "cell_type": "markdown", - "id": "108", + "id": "107", "metadata": {}, "source": [ "Use the same approach as in the previous part of the notebook, but\n", @@ -1500,7 +1485,7 @@ }, { "cell_type": "markdown", - "id": "109", + "id": "108", "metadata": {}, "source": [ "**Solution:**" @@ -1509,7 +1494,7 @@ { "cell_type": "code", "execution_count": null, - "id": "110", + "id": "109", "metadata": {}, "outputs": [], "source": [ @@ -1525,7 +1510,7 @@ }, { "cell_type": "markdown", - "id": "111", + "id": "110", "metadata": {}, "source": [ "### 🧩 Exercise 3: Define a Structure – LBCO\n", @@ -1541,7 +1526,7 @@ }, { "cell_type": "markdown", - "id": "112", + "id": "111", "metadata": {}, "source": [ "```\n", @@ -1576,7 +1561,7 @@ }, { "cell_type": "markdown", - "id": "113", + "id": "112", "metadata": {}, "source": [ "Note that the `occupancy` of the La and Ba atoms is 0.5\n", @@ -1606,7 +1591,7 @@ }, { "cell_type": "markdown", - "id": "114", + "id": "113", "metadata": {}, "source": [ "#### Exercise 3.1: Create Structure\n", @@ -1617,7 +1602,7 @@ }, { "cell_type": "markdown", - "id": "115", + "id": "114", "metadata": {}, "source": [ "**Hint:**" @@ -1625,7 +1610,7 @@ }, { "cell_type": "markdown", - "id": "116", + "id": "115", "metadata": {}, "source": [ "You can use the same approach as in the previous part of the notebook,\n", @@ -1635,7 +1620,7 @@ }, { "cell_type": "markdown", - "id": "117", + "id": "116", "metadata": {}, "source": [ "**Solution:**" @@ -1644,7 +1629,7 @@ { "cell_type": "code", "execution_count": null, - "id": "118", + "id": "117", "metadata": {}, "outputs": [], "source": [ @@ -1653,7 +1638,7 @@ }, { "cell_type": "markdown", - "id": "119", + "id": "118", "metadata": {}, "source": [ "#### Exercise 3.2: Set Space Group\n", @@ -1663,7 +1648,7 @@ }, { "cell_type": "markdown", - "id": "120", + "id": "119", "metadata": {}, "source": [ "**Hint:**" @@ -1671,7 +1656,7 @@ }, { "cell_type": "markdown", - "id": "121", + "id": "120", "metadata": {}, "source": [ "Use the space group name and IT coordinate system code from the CIF\n", @@ -1680,7 +1665,7 @@ }, { "cell_type": "markdown", - "id": "122", + "id": "121", "metadata": {}, "source": [ "**Solution:**" @@ -1689,7 +1674,7 @@ { "cell_type": "code", "execution_count": null, - "id": "123", + "id": "122", "metadata": {}, "outputs": [], "source": [ @@ -1699,7 +1684,7 @@ }, { "cell_type": "markdown", - "id": "124", + "id": "123", "metadata": {}, "source": [ "#### Exercise 3.3: Set Lattice Parameters\n", @@ -1709,7 +1694,7 @@ }, { "cell_type": "markdown", - "id": "125", + "id": "124", "metadata": {}, "source": [ "**Hint:**" @@ -1717,7 +1702,7 @@ }, { "cell_type": "markdown", - "id": "126", + "id": "125", "metadata": {}, "source": [ "Use the lattice parameters from the CIF data." @@ -1725,7 +1710,7 @@ }, { "cell_type": "markdown", - "id": "127", + "id": "126", "metadata": {}, "source": [ "**Solution:**" @@ -1734,7 +1719,7 @@ { "cell_type": "code", "execution_count": null, - "id": "128", + "id": "127", "metadata": {}, "outputs": [], "source": [ @@ -1743,7 +1728,7 @@ }, { "cell_type": "markdown", - "id": "129", + "id": "128", "metadata": {}, "source": [ "#### Exercise 3.4: Set Atom Sites\n", @@ -1753,7 +1738,7 @@ }, { "cell_type": "markdown", - "id": "130", + "id": "129", "metadata": {}, "source": [ "**Hint:**" @@ -1761,7 +1746,7 @@ }, { "cell_type": "markdown", - "id": "131", + "id": "130", "metadata": {}, "source": [ "Use the atom sites from the CIF data. You can use the `add` method of\n", @@ -1770,7 +1755,7 @@ }, { "cell_type": "markdown", - "id": "132", + "id": "131", "metadata": {}, "source": [ "**Solution:**" @@ -1779,7 +1764,7 @@ { "cell_type": "code", "execution_count": null, - "id": "133", + "id": "132", "metadata": {}, "outputs": [], "source": [ @@ -1825,7 +1810,7 @@ }, { "cell_type": "markdown", - "id": "134", + "id": "133", "metadata": {}, "source": [ "### 🔗 Exercise 4: Assign Structure to Experiment\n", @@ -1835,7 +1820,7 @@ }, { "cell_type": "markdown", - "id": "135", + "id": "134", "metadata": {}, "source": [ "**Hint:**" @@ -1843,7 +1828,7 @@ }, { "cell_type": "markdown", - "id": "136", + "id": "135", "metadata": {}, "source": [ "Use the `linked_phases` attribute of the experiment to link the\n", @@ -1852,7 +1837,7 @@ }, { "cell_type": "markdown", - "id": "137", + "id": "136", "metadata": {}, "source": [ "**Solution:**" @@ -1861,7 +1846,7 @@ { "cell_type": "code", "execution_count": null, - "id": "138", + "id": "137", "metadata": {}, "outputs": [], "source": [ @@ -1870,7 +1855,7 @@ }, { "cell_type": "markdown", - "id": "139", + "id": "138", "metadata": {}, "source": [ "### 🚀 Exercise 5: Analyze and Fit the Data\n", @@ -1883,7 +1868,7 @@ }, { "cell_type": "markdown", - "id": "140", + "id": "139", "metadata": {}, "source": [ "**Hint:**" @@ -1891,7 +1876,7 @@ }, { "cell_type": "markdown", - "id": "141", + "id": "140", "metadata": {}, "source": [ "You can start with the scale factor and the background points, as in\n", @@ -1900,7 +1885,7 @@ }, { "cell_type": "markdown", - "id": "142", + "id": "141", "metadata": {}, "source": [ "**Solution:**" @@ -1909,7 +1894,7 @@ { "cell_type": "code", "execution_count": null, - "id": "143", + "id": "142", "metadata": {}, "outputs": [], "source": [ @@ -1921,7 +1906,7 @@ }, { "cell_type": "markdown", - "id": "144", + "id": "143", "metadata": {}, "source": [ "#### Exercise 5.2: Run Fitting\n", @@ -1932,7 +1917,7 @@ }, { "cell_type": "markdown", - "id": "145", + "id": "144", "metadata": {}, "source": [ "**Hint:**" @@ -1940,7 +1925,7 @@ }, { "cell_type": "markdown", - "id": "146", + "id": "145", "metadata": {}, "source": [ "Use the `plot_meas_vs_calc` method of the project to visualize the\n", @@ -1951,7 +1936,7 @@ }, { "cell_type": "markdown", - "id": "147", + "id": "146", "metadata": {}, "source": [ "**Solution:**" @@ -1960,7 +1945,7 @@ { "cell_type": "code", "execution_count": null, - "id": "148", + "id": "147", "metadata": {}, "outputs": [], "source": [ @@ -1972,7 +1957,7 @@ }, { "cell_type": "markdown", - "id": "149", + "id": "148", "metadata": {}, "source": [ "#### Exercise 5.3: Find the Misfit in the Fit\n", @@ -1987,7 +1972,7 @@ }, { "cell_type": "markdown", - "id": "150", + "id": "149", "metadata": {}, "source": [ "**Hint:**" @@ -1995,7 +1980,7 @@ }, { "cell_type": "markdown", - "id": "151", + "id": "150", "metadata": {}, "source": [ "Consider the following options:\n", @@ -2007,7 +1992,7 @@ }, { "cell_type": "markdown", - "id": "152", + "id": "151", "metadata": {}, "source": [ "**Solution:**" @@ -2015,7 +2000,7 @@ }, { "cell_type": "markdown", - "id": "153", + "id": "152", "metadata": {}, "source": [ "\n", @@ -2037,7 +2022,7 @@ { "cell_type": "code", "execution_count": null, - "id": "154", + "id": "153", "metadata": {}, "outputs": [], "source": [ @@ -2046,7 +2031,7 @@ }, { "cell_type": "markdown", - "id": "155", + "id": "154", "metadata": {}, "source": [ "#### Exercise 5.4: Refine the LBCO Lattice Parameter\n", @@ -2056,7 +2041,7 @@ }, { "cell_type": "markdown", - "id": "156", + "id": "155", "metadata": {}, "source": [ "**Hint:**" @@ -2064,7 +2049,7 @@ }, { "cell_type": "markdown", - "id": "157", + "id": "156", "metadata": {}, "source": [ "To achieve this, we will set the `free` attribute of the `length_a`\n", @@ -2079,7 +2064,7 @@ }, { "cell_type": "markdown", - "id": "158", + "id": "157", "metadata": {}, "source": [ "**Solution:**" @@ -2088,7 +2073,7 @@ { "cell_type": "code", "execution_count": null, - "id": "159", + "id": "158", "metadata": {}, "outputs": [], "source": [ @@ -2102,7 +2087,7 @@ }, { "cell_type": "markdown", - "id": "160", + "id": "159", "metadata": {}, "source": [ "One of the main goals of this study was to refine the lattice\n", @@ -2115,7 +2100,7 @@ }, { "cell_type": "markdown", - "id": "161", + "id": "160", "metadata": {}, "source": [ "#### Exercise 5.5: Visualize the Fit Results in d-spacing\n", @@ -2126,7 +2111,7 @@ }, { "cell_type": "markdown", - "id": "162", + "id": "161", "metadata": {}, "source": [ "**Hint:**" @@ -2134,7 +2119,7 @@ }, { "cell_type": "markdown", - "id": "163", + "id": "162", "metadata": {}, "source": [ "Use the `plot_meas_vs_calc` method of the project and set the\n", @@ -2143,7 +2128,7 @@ }, { "cell_type": "markdown", - "id": "164", + "id": "163", "metadata": {}, "source": [ "**Solution:**" @@ -2152,7 +2137,7 @@ { "cell_type": "code", "execution_count": null, - "id": "165", + "id": "164", "metadata": {}, "outputs": [], "source": [ @@ -2161,7 +2146,7 @@ }, { "cell_type": "markdown", - "id": "166", + "id": "165", "metadata": {}, "source": [ "#### Exercise 5.6: Refine the Peak Profile Parameters\n", @@ -2181,7 +2166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "167", + "id": "166", "metadata": {}, "outputs": [], "source": [ @@ -2190,7 +2175,7 @@ }, { "cell_type": "markdown", - "id": "168", + "id": "167", "metadata": {}, "source": [ "The peak profile parameters are determined based on both the\n", @@ -2204,7 +2189,7 @@ }, { "cell_type": "markdown", - "id": "169", + "id": "168", "metadata": {}, "source": [ "**Hint:**" @@ -2212,7 +2197,7 @@ }, { "cell_type": "markdown", - "id": "170", + "id": "169", "metadata": {}, "source": [ "You can set the `free` attribute of the peak profile parameters to\n", @@ -2223,7 +2208,7 @@ }, { "cell_type": "markdown", - "id": "171", + "id": "170", "metadata": {}, "source": [ "**Solution:**" @@ -2232,7 +2217,7 @@ { "cell_type": "code", "execution_count": null, - "id": "172", + "id": "171", "metadata": {}, "outputs": [], "source": [ @@ -2252,7 +2237,7 @@ }, { "cell_type": "markdown", - "id": "173", + "id": "172", "metadata": {}, "source": [ "#### Exercise 5.7: Find Undefined Features\n", @@ -2264,7 +2249,7 @@ }, { "cell_type": "markdown", - "id": "174", + "id": "173", "metadata": {}, "source": [ "**Hint:**" @@ -2272,7 +2257,7 @@ }, { "cell_type": "markdown", - "id": "175", + "id": "174", "metadata": {}, "source": [ "While the fit is now significantly better, there are still some\n", @@ -2284,7 +2269,7 @@ }, { "cell_type": "markdown", - "id": "176", + "id": "175", "metadata": {}, "source": [ "**Solution:**" @@ -2293,7 +2278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "177", + "id": "176", "metadata": {}, "outputs": [], "source": [ @@ -2302,7 +2287,7 @@ }, { "cell_type": "markdown", - "id": "178", + "id": "177", "metadata": {}, "source": [ "#### Exercise 5.8: Identify the Cause of the Unexplained Peaks\n", @@ -2315,7 +2300,7 @@ }, { "cell_type": "markdown", - "id": "179", + "id": "178", "metadata": {}, "source": [ "**Hint:**" @@ -2323,7 +2308,7 @@ }, { "cell_type": "markdown", - "id": "180", + "id": "179", "metadata": {}, "source": [ "Consider the following options:\n", @@ -2335,7 +2320,7 @@ }, { "cell_type": "markdown", - "id": "181", + "id": "180", "metadata": {}, "source": [ "**Solution:**" @@ -2343,7 +2328,7 @@ }, { "cell_type": "markdown", - "id": "182", + "id": "181", "metadata": {}, "source": [ "1. ❌ In principle, this could be the case, as sometimes the presence\n", @@ -2363,7 +2348,7 @@ }, { "cell_type": "markdown", - "id": "183", + "id": "182", "metadata": {}, "source": [ "#### Exercise 5.9: Identify the impurity phase\n", @@ -2374,7 +2359,7 @@ }, { "cell_type": "markdown", - "id": "184", + "id": "183", "metadata": {}, "source": [ "**Hint:**" @@ -2382,7 +2367,7 @@ }, { "cell_type": "markdown", - "id": "185", + "id": "184", "metadata": {}, "source": [ "Check the positions of the unexplained peaks in the diffraction\n", @@ -2392,7 +2377,7 @@ }, { "cell_type": "markdown", - "id": "186", + "id": "185", "metadata": {}, "source": [ "**Solution:**" @@ -2400,7 +2385,7 @@ }, { "cell_type": "markdown", - "id": "187", + "id": "186", "metadata": {}, "source": [ "The unexplained peaks are likely due to the presence of a small amount\n", @@ -2415,7 +2400,7 @@ { "cell_type": "code", "execution_count": null, - "id": "188", + "id": "187", "metadata": {}, "outputs": [], "source": [ @@ -2425,7 +2410,7 @@ }, { "cell_type": "markdown", - "id": "189", + "id": "188", "metadata": {}, "source": [ "#### Exercise 5.10: Create a Second Structure – Si as Impurity\n", @@ -2437,7 +2422,7 @@ }, { "cell_type": "markdown", - "id": "190", + "id": "189", "metadata": {}, "source": [ "**Hint:**" @@ -2445,7 +2430,7 @@ }, { "cell_type": "markdown", - "id": "191", + "id": "190", "metadata": {}, "source": [ "You can use the same approach as in the previous part of the notebook,\n", @@ -2455,7 +2440,7 @@ }, { "cell_type": "markdown", - "id": "192", + "id": "191", "metadata": {}, "source": [ "**Solution:**" @@ -2464,7 +2449,7 @@ { "cell_type": "code", "execution_count": null, - "id": "193", + "id": "192", "metadata": {}, "outputs": [], "source": [ @@ -2493,7 +2478,7 @@ }, { "cell_type": "markdown", - "id": "194", + "id": "193", "metadata": {}, "source": [ "#### Exercise 5.11: Refine the Scale of the Si Phase\n", @@ -2506,7 +2491,7 @@ }, { "cell_type": "markdown", - "id": "195", + "id": "194", "metadata": {}, "source": [ "**Hint:**" @@ -2514,7 +2499,7 @@ }, { "cell_type": "markdown", - "id": "196", + "id": "195", "metadata": {}, "source": [ "You can use the `plot_meas_vs_calc` method of the project to visualize\n", @@ -2525,7 +2510,7 @@ }, { "cell_type": "markdown", - "id": "197", + "id": "196", "metadata": {}, "source": [ "**Solution:**" @@ -2534,7 +2519,7 @@ { "cell_type": "code", "execution_count": null, - "id": "198", + "id": "197", "metadata": {}, "outputs": [], "source": [ @@ -2563,7 +2548,7 @@ }, { "cell_type": "markdown", - "id": "199", + "id": "198", "metadata": {}, "source": [ "All previously unexplained peaks are now accounted for in the pattern,\n", @@ -2586,7 +2571,7 @@ { "cell_type": "code", "execution_count": null, - "id": "200", + "id": "199", "metadata": {}, "outputs": [], "source": [ @@ -2595,7 +2580,7 @@ }, { "cell_type": "markdown", - "id": "201", + "id": "200", "metadata": {}, "source": [ "Finally, we save the project to disk to preserve the current state of\n", @@ -2605,7 +2590,7 @@ { "cell_type": "code", "execution_count": null, - "id": "202", + "id": "201", "metadata": {}, "outputs": [], "source": [ @@ -2614,7 +2599,7 @@ }, { "cell_type": "markdown", - "id": "203", + "id": "202", "metadata": {}, "source": [ "#### Final Remarks\n", @@ -2633,7 +2618,7 @@ }, { "cell_type": "markdown", - "id": "204", + "id": "203", "metadata": {}, "source": [ "## 🎁 Bonus\n", @@ -2662,7 +2647,7 @@ ], "metadata": { "jupytext": { - "cell_metadata_filter": "tags,title,-all", + "cell_metadata_filter": "title,tags,-all", "main_language": "python", "notebook_metadata_filter": "-all" } diff --git a/docs/docs/tutorials/ed-14.ipynb b/docs/docs/tutorials/ed-14.ipynb index a5db5807..df9a857c 100644 --- a/docs/docs/tutorials/ed-14.ipynb +++ b/docs/docs/tutorials/ed-14.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5bd8b55f", + "id": "80ba77ad", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: Tb2TiO7, HEiDi\n", @@ -47,7 +32,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -56,7 +41,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -65,7 +50,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Step 1: Define Project" @@ -74,7 +59,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -84,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "## Step 2: Define Structure" @@ -93,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -104,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -114,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -124,7 +109,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -134,7 +119,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +132,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -156,7 +141,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "12", "metadata": {}, "source": [ "## Step 3: Define Experiment" @@ -165,7 +150,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -175,7 +160,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -191,7 +176,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -201,7 +186,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -212,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -222,7 +207,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -232,7 +217,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "## Step 4: Perform Analysis" @@ -241,7 +226,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -251,7 +236,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -262,7 +247,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -272,7 +257,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -284,7 +269,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -295,7 +280,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -305,7 +290,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -315,7 +300,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -324,7 +309,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "28", "metadata": {}, "source": [ "## Step 5: Show Project Summary" @@ -333,7 +318,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "29", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-15.ipynb b/docs/docs/tutorials/ed-15.ipynb index cdd646ac..60ec08ce 100644 --- a/docs/docs/tutorials/ed-15.ipynb +++ b/docs/docs/tutorials/ed-15.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4e0ef3ea", + "id": "7a35fc22", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: Taurine, SENJU\n", @@ -47,7 +32,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -56,7 +41,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -65,7 +50,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Step 1: Define Project" @@ -74,7 +59,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -84,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "## Step 2: Define Structure" @@ -93,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -104,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -114,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -124,7 +109,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -134,7 +119,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -143,7 +128,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Step 3: Define Experiment" @@ -152,7 +137,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -162,7 +147,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -178,7 +163,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -188,7 +173,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -199,7 +184,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -209,7 +194,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "## Step 4: Perform Analysis" @@ -218,7 +203,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -228,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -239,7 +224,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -249,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -261,7 +246,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -272,7 +257,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -282,7 +267,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -292,7 +277,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -301,7 +286,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "26", "metadata": {}, "source": [ "## Step 5: Show Project Summary" @@ -310,7 +295,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "27", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-16.ipynb b/docs/docs/tutorials/ed-16.ipynb index 458ded2c..5d0aa659 100644 --- a/docs/docs/tutorials/ed-16.ipynb +++ b/docs/docs/tutorials/ed-16.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7311bb93", + "id": "0956c08b", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Joint Refinement: Si, Bragg + PDF\n", @@ -51,7 +36,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -60,7 +45,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -72,7 +57,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Define Structure\n", @@ -87,7 +72,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -96,7 +81,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "#### Set Space Group" @@ -105,7 +90,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -115,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -124,7 +109,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -133,7 +118,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -142,7 +127,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -159,7 +144,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Define Experiments\n", @@ -175,7 +160,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -184,7 +169,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "#### Create Experiment" @@ -193,7 +178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -204,7 +189,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "#### Set Instrument" @@ -213,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -225,7 +210,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -234,7 +219,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -250,7 +235,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "#### Set Background" @@ -259,7 +244,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -270,7 +255,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -279,7 +264,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -288,7 +273,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "23", "metadata": {}, "source": [ "### Experiment 2: PDF (NOMAD, TOF)\n", @@ -299,7 +284,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -308,7 +293,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "25", "metadata": {}, "source": [ "#### Create Experiment" @@ -317,7 +302,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -331,7 +316,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "27", "metadata": {}, "source": [ "#### Set Peak Profile (PDF Parameters)" @@ -340,7 +325,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -354,7 +339,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "29", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -363,7 +348,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -372,7 +357,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "31", "metadata": {}, "source": [ "## Define Project\n", @@ -386,7 +371,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -395,7 +380,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "33", "metadata": {}, "source": [ "#### Add Structure" @@ -404,7 +389,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -413,7 +398,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "35", "metadata": {}, "source": [ "#### Add Experiments" @@ -422,7 +407,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -432,7 +417,7 @@ }, { "cell_type": "markdown", - "id": "38", + "id": "37", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -446,7 +431,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -457,7 +442,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "39", "metadata": {}, "source": [ "#### Set Minimizer" @@ -466,7 +451,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -475,7 +460,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "41", "metadata": {}, "source": [ "#### Plot Measured vs Calculated (Before Fit)" @@ -484,7 +469,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "42", "metadata": {}, "outputs": [], "source": [ @@ -494,7 +479,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -503,7 +488,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "44", "metadata": {}, "source": [ "#### Set Fitting Parameters\n", @@ -515,7 +500,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -525,7 +510,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "46", "metadata": {}, "source": [ "Bragg experiment parameters." @@ -534,7 +519,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -549,7 +534,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "48", "metadata": {}, "source": [ "PDF experiment parameters." @@ -558,7 +543,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -571,7 +556,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "50", "metadata": {}, "source": [ "#### Show Free Parameters" @@ -580,7 +565,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "51", "metadata": {}, "outputs": [], "source": [ @@ -589,7 +574,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "52", "metadata": {}, "source": [ "#### Run Fitting" @@ -598,7 +583,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -608,7 +593,7 @@ }, { "cell_type": "markdown", - "id": "55", + "id": "54", "metadata": {}, "source": [ "#### Plot Measured vs Calculated (After Fit)" @@ -617,7 +602,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -627,7 +612,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "56", "metadata": { "lines_to_next_cell": 2 }, @@ -639,7 +624,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "57", "metadata": {}, "outputs": [], "source": [] diff --git a/docs/docs/tutorials/ed-17.ipynb b/docs/docs/tutorials/ed-17.ipynb index 08eb5a20..e9369b01 100644 --- a/docs/docs/tutorials/ed-17.ipynb +++ b/docs/docs/tutorials/ed-17.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1c3cb779", + "id": "79fce5be", "metadata": { "tags": [ "hide-in-docs" @@ -19,38 +19,23 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: Co2SiO4, D20 (T-scan)\n", "\n", "This example demonstrates a Rietveld refinement of the Co2SiO4 crystal\n", "structure using constant-wavelength neutron powder diffraction data\n", - "from D20 at ILL. A sequential refinement of the same structure against\n", - "a temperature scan is performed to show how to manage multiple\n", - "experiments in a project." + "from D20 at ILL. A sequential refinement is performed against a\n", + "temperature scan using `fit_sequential`, which processes each data\n", + "file independently without loading all datasets into memory at once." ] }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -59,7 +44,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -68,7 +53,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Step 1: Define Project\n", @@ -79,7 +64,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -88,26 +73,26 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ - "Set output verbosity level to \"short\" to show only one-line status\n", - "messages during the analysis process." + "The project must be saved before running sequential fitting, so that\n", + "results can be written to `analysis/results.csv`." ] }, { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ - "project.verbosity = 'short'" + "project.save_as('data/cosio_project', temporary=False)" ] }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "## Step 2: Define Crystal Structure\n", @@ -121,7 +106,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -131,7 +116,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "#### Set Space Group" @@ -140,7 +125,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -150,7 +135,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -159,7 +144,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -170,7 +155,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -179,7 +164,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -241,13 +226,15 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ - "## Step 3: Define Experiments\n", + "## Step 3: Define Template Experiment\n", "\n", - "This section shows how to add experiments, configure their parameters,\n", - "and link the structures defined above.\n", + "For sequential fitting, we create a single template experiment from\n", + "the first data file. This template defines the instrument, peak\n", + "profile, background, and linked phases that will be reused for every\n", + "data file in the scan.\n", "\n", "#### Download Measured Data" ] @@ -255,46 +242,58 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ - "file_path = ed.download_data(id=27, destination='data')" + "zip_path = ed.download_data(id=24, destination='data')" ] }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ - "#### Create Experiments and Set Temperature" + "#### Extract Data Files" ] }, { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ - "data_paths = ed.extract_data_paths_from_zip(file_path)\n", - "for i, data_path in enumerate(data_paths, start=1):\n", - " name = f'd20_{i}'\n", - " project.experiments.add_from_data_path(\n", - " name=name,\n", - " data_path=data_path,\n", - " )\n", - " expt = project.experiments[name]\n", - " expt.diffrn.ambient_temperature = ed.extract_metadata(\n", - " file_path=data_path,\n", - " pattern=r'^TEMP\\s+([0-9.]+)',\n", - " )" + "data_dir = 'data/d20_scan'\n", + "data_paths = ed.extract_data_paths_from_zip(zip_path, destination=data_dir)" ] }, { "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "#### Create Template Experiment from the First File" + ] + }, + { + "cell_type": "code", + "execution_count": null, "id": "20", "metadata": {}, + "outputs": [], + "source": [ + "project.experiments.add_from_data_path(\n", + " name='d20',\n", + " data_path=data_paths[0],\n", + ")\n", + "expt = project.experiments['d20']" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, "source": [ "#### Set Instrument" ] @@ -302,18 +301,17 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ - "for expt in project.experiments:\n", - " expt.instrument.setup_wavelength = 1.87\n", - " expt.instrument.calib_twotheta_offset = 0.29" + "expt.instrument.setup_wavelength = 1.87\n", + "expt.instrument.calib_twotheta_offset = 0.29" ] }, { "cell_type": "markdown", - "id": "22", + "id": "23", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -322,20 +320,19 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ - "for expt in project.experiments:\n", - " expt.peak.broad_gauss_u = 0.24\n", - " expt.peak.broad_gauss_v = -0.53\n", - " expt.peak.broad_gauss_w = 0.38\n", - " expt.peak.broad_lorentz_y = 0.02" + "expt.peak.broad_gauss_u = 0.24\n", + "expt.peak.broad_gauss_v = -0.53\n", + "expt.peak.broad_gauss_w = 0.38\n", + "expt.peak.broad_lorentz_y = 0.02" ] }, { "cell_type": "markdown", - "id": "24", + "id": "25", "metadata": {}, "source": [ "#### Set Excluded Regions" @@ -344,18 +341,17 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ - "for expt in project.experiments:\n", - " expt.excluded_regions.create(id='1', start=0, end=8)\n", - " expt.excluded_regions.create(id='2', start=150, end=180)" + "expt.excluded_regions.create(id='1', start=0, end=8)\n", + "expt.excluded_regions.create(id='2', start=150, end=180)" ] }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "#### Set Background" @@ -364,30 +360,29 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "28", "metadata": {}, "outputs": [], "source": [ - "for expt in project.experiments:\n", - " expt.background.create(id='1', x=8, y=609)\n", - " expt.background.create(id='2', x=9, y=581)\n", - " expt.background.create(id='3', x=10, y=563)\n", - " expt.background.create(id='4', x=11, y=540)\n", - " expt.background.create(id='5', x=12, y=520)\n", - " expt.background.create(id='6', x=15, y=507)\n", - " expt.background.create(id='7', x=25, y=463)\n", - " expt.background.create(id='8', x=30, y=434)\n", - " expt.background.create(id='9', x=50, y=451)\n", - " expt.background.create(id='10', x=70, y=431)\n", - " expt.background.create(id='11', x=90, y=414)\n", - " expt.background.create(id='12', x=110, y=361)\n", - " expt.background.create(id='13', x=130, y=292)\n", - " expt.background.create(id='14', x=150, y=241)" + "expt.background.create(id='1', x=8, y=609)\n", + "expt.background.create(id='2', x=9, y=581)\n", + "expt.background.create(id='3', x=10, y=563)\n", + "expt.background.create(id='4', x=11, y=540)\n", + "expt.background.create(id='5', x=12, y=520)\n", + "expt.background.create(id='6', x=15, y=507)\n", + "expt.background.create(id='7', x=25, y=463)\n", + "expt.background.create(id='8', x=30, y=434)\n", + "expt.background.create(id='9', x=50, y=451)\n", + "expt.background.create(id='10', x=70, y=431)\n", + "expt.background.create(id='11', x=90, y=414)\n", + "expt.background.create(id='12', x=110, y=361)\n", + "expt.background.create(id='13', x=130, y=292)\n", + "expt.background.create(id='14', x=150, y=241)" ] }, { "cell_type": "markdown", - "id": "28", + "id": "29", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -396,28 +391,27 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "30", "metadata": {}, "outputs": [], "source": [ - "for expt in project.experiments:\n", - " expt.linked_phases.create(id='cosio', scale=1.2)" + "expt.linked_phases.create(id='cosio', scale=1.2)" ] }, { "cell_type": "markdown", - "id": "30", + "id": "31", "metadata": {}, "source": [ "## Step 4: Perform Analysis\n", "\n", "This section shows how to set free parameters, define constraints,\n", - "and run the refinement." + "and run the sequential refinement." ] }, { "cell_type": "markdown", - "id": "31", + "id": "32", "metadata": {}, "source": [ "#### Set Free Parameters" @@ -426,7 +420,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -457,27 +451,26 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "34", "metadata": {}, "outputs": [], "source": [ - "for expt in project.experiments:\n", - " expt.linked_phases['cosio'].scale.free = True\n", + "expt.linked_phases['cosio'].scale.free = True\n", "\n", - " expt.instrument.calib_twotheta_offset.free = True\n", + "expt.instrument.calib_twotheta_offset.free = True\n", "\n", - " expt.peak.broad_gauss_u.free = True\n", - " expt.peak.broad_gauss_v.free = True\n", - " expt.peak.broad_gauss_w.free = True\n", - " expt.peak.broad_lorentz_y.free = True\n", + "expt.peak.broad_gauss_u.free = True\n", + "expt.peak.broad_gauss_v.free = True\n", + "expt.peak.broad_gauss_w.free = True\n", + "expt.peak.broad_lorentz_y.free = True\n", "\n", - " for point in expt.background:\n", - " point.y.free = True" + "for point in expt.background:\n", + " point.y.free = True" ] }, { "cell_type": "markdown", - "id": "34", + "id": "35", "metadata": {}, "source": [ "#### Set Constraints\n", @@ -488,7 +481,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -504,7 +497,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "37", "metadata": {}, "source": [ "Set constraints." @@ -513,75 +506,129 @@ { "cell_type": "code", "execution_count": null, - "id": "37", - "metadata": { - "lines_to_next_cell": 2 - }, + "id": "38", + "metadata": {}, "outputs": [], "source": [ - "project.analysis.constraints.create(\n", - " expression='biso_Co2 = biso_Co1',\n", - ")" + "project.analysis.constraints.create(expression='biso_Co2 = biso_Co1')" ] }, { "cell_type": "markdown", - "id": "38", + "id": "39", "metadata": {}, "source": [ - "#### Set Fit Mode" + "#### Run Single Fitting\n", + "\n", + "This is the fitting of the first dataset to optimize the initial\n", + "parameters for the sequential fitting. This step is optional but can\n", + "help with convergence and speed of the sequential fitting, especially\n", + "if the initial parameters are far from optimal." ] }, { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "40", "metadata": {}, "outputs": [], "source": [ - "project.analysis.fit_mode.mode = 'single'" + "project.analysis.fit()" ] }, { "cell_type": "markdown", - "id": "40", + "id": "41", "metadata": {}, "source": [ - "#### Run Fitting" + "#### Run Sequential Fitting\n", + "\n", + "Set output verbosity level to \"short\" to show only one-line status\n", + "messages during the analysis process." ] }, { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "42", "metadata": {}, "outputs": [], "source": [ - "project.analysis.fit()" + "project.verbosity = 'short'" ] }, { "cell_type": "markdown", - "id": "42", + "id": "43", + "metadata": { + "lines_to_next_cell": 2 + }, + "source": [ + "\n", + "Define a callback that extracts the temperature from each data file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44", "metadata": {}, + "outputs": [], "source": [ - "#### Plot Measured vs Calculated" + "def extract_diffrn(file_path):\n", + " temperature = ed.extract_metadata(\n", + " file_path=file_path,\n", + " pattern=r'^TEMP\\s+([0-9.]+)',\n", + " )\n", + " return {'ambient_temperature': temperature}" + ] + }, + { + "cell_type": "markdown", + "id": "45", + "metadata": {}, + "source": [ + "Run the sequential fit over all data files in the scan directory." ] }, { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "46", "metadata": {}, "outputs": [], "source": [ - "last_expt_name = project.experiments.names[-1]\n", - "project.plot_meas_vs_calc(expt_name=last_expt_name, show_residual=True)" + "project.analysis.fit_sequential(\n", + " data_dir=data_dir,\n", + " extract_diffrn=extract_diffrn,\n", + " max_workers='auto',\n", + ")" ] }, { "cell_type": "markdown", - "id": "44", + "id": "47", + "metadata": {}, + "source": [ + "#### Replay a Dataset\n", + "\n", + "Apply fitted parameters from the last CSV row and plot the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48", + "metadata": {}, + "outputs": [], + "source": [ + "project.apply_params_from_csv(row_index=-1)\n", + "project.plot_meas_vs_calc(expt_name='d20', show_residual=True)" + ] + }, + { + "cell_type": "markdown", + "id": "49", "metadata": {}, "source": [ "#### Plot Parameter Evolution\n", @@ -592,16 +639,16 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "50", "metadata": {}, "outputs": [], "source": [ - "temperature = project.experiments[0].diffrn.ambient_temperature" + "temperature = expt.diffrn.ambient_temperature" ] }, { "cell_type": "markdown", - "id": "46", + "id": "51", "metadata": {}, "source": [ "Plot unit cell parameters vs. temperature." @@ -610,7 +657,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -621,7 +668,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "53", "metadata": {}, "source": [ "Plot isotropic displacement parameters vs. temperature." @@ -630,7 +677,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -643,7 +690,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "55", "metadata": {}, "source": [ "Plot selected fractional coordinates vs. temperature." @@ -652,7 +699,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "56", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-18.ipynb b/docs/docs/tutorials/ed-18.ipynb index 9f51a133..60ce7707 100644 --- a/docs/docs/tutorials/ed-18.ipynb +++ b/docs/docs/tutorials/ed-18.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a3ec0a8c", + "id": "71e56392", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Load Project and Fit: LBCO, HRPT\n", @@ -45,135 +30,110 @@ "how to load a previously saved project from a directory and run\n", "refinement — all in just a few lines of code.\n", "\n", - "The project is first created and saved as a setup step (this would\n", - "normally be done once and the directory would already exist on disk).\n", - "Then the saved project is loaded back and fitted.\n", - "\n", "For details on how to define structures and experiments, see the other\n", "tutorials." ] }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ - "## Import Library" + "## Import Modules" ] }, { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ - "import easydiffraction as ed" + "from easydiffraction import Project\n", + "from easydiffraction import download_data\n", + "from easydiffraction import extract_project_from_zip" ] }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ - "## Setup: Create and Save a Project\n", - "\n", - "This step creates a project from CIF files and saves it to a\n", - "directory. In practice, the project directory would already exist\n", - "on disk from a previous session." + "## Download Project Archive" ] }, { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ - "# Create a project from CIF files\n", - "project = ed.Project()\n", - "project.structures.add_from_cif_path(ed.download_data(id=1, destination='data'))\n", - "project.experiments.add_from_cif_path(ed.download_data(id=2, destination='data'))" + "zip_path = download_data(id=28, destination='data')" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "6", + "cell_type": "markdown", + "id": "5", "metadata": {}, - "outputs": [], "source": [ - "project.analysis.aliases.create(\n", - " label='biso_La',\n", - " param=project.structures['lbco'].atom_sites['La'].b_iso,\n", - ")\n", - "project.analysis.aliases.create(\n", - " label='biso_Ba',\n", - " param=project.structures['lbco'].atom_sites['Ba'].b_iso,\n", - ")\n", - "\n", - "project.analysis.aliases.create(\n", - " label='occ_La',\n", - " param=project.structures['lbco'].atom_sites['La'].occupancy,\n", - ")\n", - "project.analysis.aliases.create(\n", - " label='occ_Ba',\n", - " param=project.structures['lbco'].atom_sites['Ba'].occupancy,\n", - ")\n", - "\n", - "project.analysis.constraints.create(expression='biso_Ba = biso_La')\n", - "project.analysis.constraints.create(expression='occ_Ba = 1 - occ_La')\n", - "\n", - "project.structures['lbco'].atom_sites['La'].occupancy.free = True" + "## Extract Project" ] }, { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ - "# Save to a directory\n", - "project.save_as('lbco_project')" + "project_dir = extract_project_from_zip(zip_path, destination='data')" ] }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ - "## Step 1: Load Project from Directory" + "## Load Project" ] }, { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ - "project = ed.Project.load('lbco_project')" + "project = Project.load(project_dir)" ] }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ - "## Step 2: Perform Analysis" + "## Perform Analysis" ] }, { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ "project.analysis.fit()" ] }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "## Show Results" + ] + }, { "cell_type": "code", "execution_count": null, @@ -184,10 +144,18 @@ "project.analysis.show_fit_results()" ] }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "## Plot Meas vs Calc" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -196,20 +164,20 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "15", "metadata": {}, "source": [ - "## Step 3: Show Project Summary" + "## Save Project" ] }, { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ - "project.summary.show_report()" + "project.save()" ] } ], diff --git a/docs/docs/tutorials/ed-2.ipynb b/docs/docs/tutorials/ed-2.ipynb index cb95e408..00d769ad 100644 --- a/docs/docs/tutorials/ed-2.ipynb +++ b/docs/docs/tutorials/ed-2.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d9a613b4", + "id": "b94f1ffd", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: LBCO, HRPT\n", @@ -63,7 +48,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -72,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -81,7 +66,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Step 1: Define Project" @@ -90,7 +75,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +84,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "## Step 2: Define Structure" @@ -108,7 +93,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -118,7 +103,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -128,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -139,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -149,7 +134,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -195,7 +180,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Step 3: Define Experiment" @@ -204,7 +189,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -214,7 +199,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -230,7 +215,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -240,7 +225,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -251,7 +236,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -264,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -278,7 +263,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -289,7 +274,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -298,7 +283,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "20", "metadata": {}, "source": [ "## Step 4: Perform Analysis" @@ -307,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -322,7 +307,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": { "lines_to_next_cell": 2 }, @@ -347,7 +332,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -358,7 +343,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-3.ipynb b/docs/docs/tutorials/ed-3.ipynb index 3dd711e2..78c3ed5c 100644 --- a/docs/docs/tutorials/ed-3.ipynb +++ b/docs/docs/tutorials/ed-3.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d59d709c", + "id": "725cf769", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: LBCO, HRPT\n", @@ -61,7 +46,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -70,7 +55,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -79,7 +64,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Step 1: Create a Project\n", @@ -89,7 +74,7 @@ }, { "cell_type": "markdown", - "id": "5", + "id": "4", "metadata": {}, "source": [ "#### Create Project" @@ -98,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -107,7 +92,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "6", "metadata": {}, "source": [ "#### Set Project Metadata" @@ -116,7 +101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -129,7 +114,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "#### Show Project Metadata as CIF" @@ -138,7 +123,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +132,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "10", "metadata": {}, "source": [ "#### Save Project\n", @@ -160,7 +145,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -169,7 +154,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "12", "metadata": {}, "source": [ "#### Set Up Data Plotter" @@ -177,7 +162,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "Show supported plotting engines." @@ -186,7 +171,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -195,7 +180,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "Show current plotting configuration." @@ -204,7 +189,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -213,7 +198,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "Set plotting engine." @@ -222,7 +207,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -233,7 +218,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "## Step 2: Define Structure\n", @@ -244,7 +229,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "20", "metadata": {}, "source": [ "#### Add Structure" @@ -253,7 +238,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -262,7 +247,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "22", "metadata": {}, "source": [ "#### Show Defined Structures\n", @@ -276,7 +261,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -285,7 +270,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "24", "metadata": {}, "source": [ "#### Set Space Group\n", @@ -296,7 +281,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -306,7 +291,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "26", "metadata": {}, "source": [ "#### Set Unit Cell\n", @@ -317,7 +302,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -326,7 +311,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "28", "metadata": {}, "source": [ "#### Set Atom Sites\n", @@ -337,7 +322,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -383,7 +368,7 @@ }, { "cell_type": "markdown", - "id": "31", + "id": "30", "metadata": {}, "source": [ "#### Show Structure as CIF" @@ -392,7 +377,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -401,7 +386,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "32", "metadata": {}, "source": [ "#### Show Structure Structure" @@ -410,7 +395,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -419,7 +404,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "34", "metadata": {}, "source": [ "#### Save Project State\n", @@ -432,7 +417,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -441,7 +426,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "36", "metadata": {}, "source": [ "## Step 3: Define Experiment\n", @@ -452,7 +437,7 @@ }, { "cell_type": "markdown", - "id": "38", + "id": "37", "metadata": {}, "source": [ "#### Download Measured Data\n", @@ -463,7 +448,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -472,7 +457,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "39", "metadata": {}, "source": [ "#### Add Diffraction Experiment" @@ -481,7 +466,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -496,7 +481,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "41", "metadata": {}, "source": [ "#### Show Defined Experiments" @@ -505,7 +490,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "42", "metadata": {}, "outputs": [], "source": [ @@ -514,7 +499,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "43", "metadata": {}, "source": [ "#### Show Measured Data" @@ -523,7 +508,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -532,7 +517,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "45", "metadata": {}, "source": [ "#### Set Instrument\n", @@ -543,7 +528,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -553,7 +538,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "47", "metadata": {}, "source": [ "#### Set Peak Profile\n", @@ -564,7 +549,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -573,7 +558,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "49", "metadata": {}, "source": [ "Show the current peak profile type." @@ -582,7 +567,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -591,7 +576,7 @@ }, { "cell_type": "markdown", - "id": "52", + "id": "51", "metadata": {}, "source": [ "Select the desired peak profile type." @@ -600,7 +585,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -609,7 +594,7 @@ }, { "cell_type": "markdown", - "id": "54", + "id": "53", "metadata": {}, "source": [ "Modify default peak profile parameters." @@ -618,7 +603,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -631,7 +616,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "55", "metadata": {}, "source": [ "#### Set Background" @@ -639,7 +624,7 @@ }, { "cell_type": "markdown", - "id": "57", + "id": "56", "metadata": {}, "source": [ "Show supported background types." @@ -648,7 +633,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "57", "metadata": {}, "outputs": [], "source": [ @@ -657,7 +642,7 @@ }, { "cell_type": "markdown", - "id": "59", + "id": "58", "metadata": {}, "source": [ "Show current background type." @@ -666,7 +651,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "59", "metadata": {}, "outputs": [], "source": [ @@ -675,7 +660,7 @@ }, { "cell_type": "markdown", - "id": "61", + "id": "60", "metadata": {}, "source": [ "Select the desired background type." @@ -684,7 +669,7 @@ { "cell_type": "code", "execution_count": null, - "id": "62", + "id": "61", "metadata": {}, "outputs": [], "source": [ @@ -693,7 +678,7 @@ }, { "cell_type": "markdown", - "id": "63", + "id": "62", "metadata": {}, "source": [ "Add background points." @@ -702,7 +687,7 @@ { "cell_type": "code", "execution_count": null, - "id": "64", + "id": "63", "metadata": {}, "outputs": [], "source": [ @@ -715,7 +700,7 @@ }, { "cell_type": "markdown", - "id": "65", + "id": "64", "metadata": {}, "source": [ "Show current background points." @@ -724,7 +709,7 @@ { "cell_type": "code", "execution_count": null, - "id": "66", + "id": "65", "metadata": {}, "outputs": [], "source": [ @@ -733,7 +718,7 @@ }, { "cell_type": "markdown", - "id": "67", + "id": "66", "metadata": {}, "source": [ "#### Set Linked Phases\n", @@ -744,7 +729,7 @@ { "cell_type": "code", "execution_count": null, - "id": "68", + "id": "67", "metadata": {}, "outputs": [], "source": [ @@ -753,7 +738,7 @@ }, { "cell_type": "markdown", - "id": "69", + "id": "68", "metadata": {}, "source": [ "#### Show Experiment as CIF" @@ -762,7 +747,7 @@ { "cell_type": "code", "execution_count": null, - "id": "70", + "id": "69", "metadata": {}, "outputs": [], "source": [ @@ -771,7 +756,7 @@ }, { "cell_type": "markdown", - "id": "71", + "id": "70", "metadata": {}, "source": [ "#### Save Project State" @@ -780,7 +765,7 @@ { "cell_type": "code", "execution_count": null, - "id": "72", + "id": "71", "metadata": {}, "outputs": [], "source": [ @@ -789,7 +774,7 @@ }, { "cell_type": "markdown", - "id": "73", + "id": "72", "metadata": {}, "source": [ "## Step 4: Perform Analysis\n", @@ -805,7 +790,7 @@ { "cell_type": "code", "execution_count": null, - "id": "74", + "id": "73", "metadata": {}, "outputs": [], "source": [ @@ -814,7 +799,7 @@ }, { "cell_type": "markdown", - "id": "75", + "id": "74", "metadata": {}, "source": [ "Show current calculation engine for this experiment." @@ -823,7 +808,7 @@ { "cell_type": "code", "execution_count": null, - "id": "76", + "id": "75", "metadata": {}, "outputs": [], "source": [ @@ -832,7 +817,7 @@ }, { "cell_type": "markdown", - "id": "77", + "id": "76", "metadata": {}, "source": [ "Select the desired calculation engine." @@ -841,7 +826,7 @@ { "cell_type": "code", "execution_count": null, - "id": "78", + "id": "77", "metadata": {}, "outputs": [], "source": [ @@ -850,7 +835,7 @@ }, { "cell_type": "markdown", - "id": "79", + "id": "78", "metadata": {}, "source": [ "#### Show Calculated Data" @@ -859,7 +844,7 @@ { "cell_type": "code", "execution_count": null, - "id": "80", + "id": "79", "metadata": {}, "outputs": [], "source": [ @@ -868,7 +853,7 @@ }, { "cell_type": "markdown", - "id": "81", + "id": "80", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -877,7 +862,7 @@ { "cell_type": "code", "execution_count": null, - "id": "82", + "id": "81", "metadata": {}, "outputs": [], "source": [ @@ -887,7 +872,7 @@ { "cell_type": "code", "execution_count": null, - "id": "83", + "id": "82", "metadata": {}, "outputs": [], "source": [ @@ -896,7 +881,7 @@ }, { "cell_type": "markdown", - "id": "84", + "id": "83", "metadata": {}, "source": [ "#### Show Parameters\n", @@ -907,7 +892,7 @@ { "cell_type": "code", "execution_count": null, - "id": "85", + "id": "84", "metadata": {}, "outputs": [], "source": [ @@ -916,7 +901,7 @@ }, { "cell_type": "markdown", - "id": "86", + "id": "85", "metadata": {}, "source": [ "Show all fittable parameters." @@ -925,7 +910,7 @@ { "cell_type": "code", "execution_count": null, - "id": "87", + "id": "86", "metadata": {}, "outputs": [], "source": [ @@ -934,7 +919,7 @@ }, { "cell_type": "markdown", - "id": "88", + "id": "87", "metadata": {}, "source": [ "Show only free parameters." @@ -943,7 +928,7 @@ { "cell_type": "code", "execution_count": null, - "id": "89", + "id": "88", "metadata": {}, "outputs": [], "source": [ @@ -952,7 +937,7 @@ }, { "cell_type": "markdown", - "id": "90", + "id": "89", "metadata": {}, "source": [ "Show how to access parameters in the code." @@ -961,7 +946,7 @@ { "cell_type": "code", "execution_count": null, - "id": "91", + "id": "90", "metadata": {}, "outputs": [], "source": [ @@ -970,7 +955,7 @@ }, { "cell_type": "markdown", - "id": "92", + "id": "91", "metadata": {}, "source": [ "#### Set Fit Mode\n", @@ -981,7 +966,7 @@ { "cell_type": "code", "execution_count": null, - "id": "93", + "id": "92", "metadata": {}, "outputs": [], "source": [ @@ -990,7 +975,7 @@ }, { "cell_type": "markdown", - "id": "94", + "id": "93", "metadata": {}, "source": [ "Show current fit mode." @@ -999,7 +984,7 @@ { "cell_type": "code", "execution_count": null, - "id": "95", + "id": "94", "metadata": {}, "outputs": [], "source": [ @@ -1008,7 +993,7 @@ }, { "cell_type": "markdown", - "id": "96", + "id": "95", "metadata": {}, "source": [ "Select desired fit mode." @@ -1017,7 +1002,7 @@ { "cell_type": "code", "execution_count": null, - "id": "97", + "id": "96", "metadata": {}, "outputs": [], "source": [ @@ -1026,7 +1011,7 @@ }, { "cell_type": "markdown", - "id": "98", + "id": "97", "metadata": {}, "source": [ "#### Set Minimizer\n", @@ -1037,7 +1022,7 @@ { "cell_type": "code", "execution_count": null, - "id": "99", + "id": "98", "metadata": {}, "outputs": [], "source": [ @@ -1046,7 +1031,7 @@ }, { "cell_type": "markdown", - "id": "100", + "id": "99", "metadata": {}, "source": [ "Show current fitting engine." @@ -1055,7 +1040,7 @@ { "cell_type": "code", "execution_count": null, - "id": "101", + "id": "100", "metadata": {}, "outputs": [], "source": [ @@ -1064,7 +1049,7 @@ }, { "cell_type": "markdown", - "id": "102", + "id": "101", "metadata": {}, "source": [ "Select desired fitting engine." @@ -1073,7 +1058,7 @@ { "cell_type": "code", "execution_count": null, - "id": "103", + "id": "102", "metadata": {}, "outputs": [], "source": [ @@ -1082,7 +1067,7 @@ }, { "cell_type": "markdown", - "id": "104", + "id": "103", "metadata": {}, "source": [ "### Perform Fit 1/5\n", @@ -1093,7 +1078,7 @@ { "cell_type": "code", "execution_count": null, - "id": "105", + "id": "104", "metadata": {}, "outputs": [], "source": [ @@ -1102,7 +1087,7 @@ }, { "cell_type": "markdown", - "id": "106", + "id": "105", "metadata": {}, "source": [ "Set experiment parameters to be refined." @@ -1111,7 +1096,7 @@ { "cell_type": "code", "execution_count": null, - "id": "107", + "id": "106", "metadata": {}, "outputs": [], "source": [ @@ -1126,7 +1111,7 @@ }, { "cell_type": "markdown", - "id": "108", + "id": "107", "metadata": {}, "source": [ "Show free parameters after selection." @@ -1135,7 +1120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "109", + "id": "108", "metadata": {}, "outputs": [], "source": [ @@ -1144,7 +1129,7 @@ }, { "cell_type": "markdown", - "id": "110", + "id": "109", "metadata": {}, "source": [ "#### Run Fitting" @@ -1153,7 +1138,7 @@ { "cell_type": "code", "execution_count": null, - "id": "111", + "id": "110", "metadata": {}, "outputs": [], "source": [ @@ -1163,7 +1148,7 @@ }, { "cell_type": "markdown", - "id": "112", + "id": "111", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1172,7 +1157,7 @@ { "cell_type": "code", "execution_count": null, - "id": "113", + "id": "112", "metadata": {}, "outputs": [], "source": [ @@ -1182,7 +1167,7 @@ { "cell_type": "code", "execution_count": null, - "id": "114", + "id": "113", "metadata": {}, "outputs": [], "source": [ @@ -1191,7 +1176,7 @@ }, { "cell_type": "markdown", - "id": "115", + "id": "114", "metadata": {}, "source": [ "#### Save Project State" @@ -1200,7 +1185,7 @@ { "cell_type": "code", "execution_count": null, - "id": "116", + "id": "115", "metadata": {}, "outputs": [], "source": [ @@ -1209,7 +1194,7 @@ }, { "cell_type": "markdown", - "id": "117", + "id": "116", "metadata": {}, "source": [ "### Perform Fit 2/5\n", @@ -1220,7 +1205,7 @@ { "cell_type": "code", "execution_count": null, - "id": "118", + "id": "117", "metadata": {}, "outputs": [], "source": [ @@ -1232,7 +1217,7 @@ }, { "cell_type": "markdown", - "id": "119", + "id": "118", "metadata": {}, "source": [ "Show free parameters after selection." @@ -1241,7 +1226,7 @@ { "cell_type": "code", "execution_count": null, - "id": "120", + "id": "119", "metadata": {}, "outputs": [], "source": [ @@ -1250,7 +1235,7 @@ }, { "cell_type": "markdown", - "id": "121", + "id": "120", "metadata": {}, "source": [ "#### Run Fitting" @@ -1259,7 +1244,7 @@ { "cell_type": "code", "execution_count": null, - "id": "122", + "id": "121", "metadata": {}, "outputs": [], "source": [ @@ -1269,7 +1254,7 @@ }, { "cell_type": "markdown", - "id": "123", + "id": "122", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1278,7 +1263,7 @@ { "cell_type": "code", "execution_count": null, - "id": "124", + "id": "123", "metadata": {}, "outputs": [], "source": [ @@ -1288,7 +1273,7 @@ { "cell_type": "code", "execution_count": null, - "id": "125", + "id": "124", "metadata": {}, "outputs": [], "source": [ @@ -1297,7 +1282,7 @@ }, { "cell_type": "markdown", - "id": "126", + "id": "125", "metadata": {}, "source": [ "#### Save Project State" @@ -1306,7 +1291,7 @@ { "cell_type": "code", "execution_count": null, - "id": "127", + "id": "126", "metadata": {}, "outputs": [], "source": [ @@ -1315,7 +1300,7 @@ }, { "cell_type": "markdown", - "id": "128", + "id": "127", "metadata": {}, "source": [ "### Perform Fit 3/5\n", @@ -1326,7 +1311,7 @@ { "cell_type": "code", "execution_count": null, - "id": "129", + "id": "128", "metadata": {}, "outputs": [], "source": [ @@ -1338,7 +1323,7 @@ }, { "cell_type": "markdown", - "id": "130", + "id": "129", "metadata": {}, "source": [ "Show free parameters after selection." @@ -1347,7 +1332,7 @@ { "cell_type": "code", "execution_count": null, - "id": "131", + "id": "130", "metadata": {}, "outputs": [], "source": [ @@ -1356,7 +1341,7 @@ }, { "cell_type": "markdown", - "id": "132", + "id": "131", "metadata": {}, "source": [ "#### Run Fitting" @@ -1365,7 +1350,7 @@ { "cell_type": "code", "execution_count": null, - "id": "133", + "id": "132", "metadata": {}, "outputs": [], "source": [ @@ -1375,7 +1360,7 @@ }, { "cell_type": "markdown", - "id": "134", + "id": "133", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1384,7 +1369,7 @@ { "cell_type": "code", "execution_count": null, - "id": "135", + "id": "134", "metadata": {}, "outputs": [], "source": [ @@ -1394,7 +1379,7 @@ { "cell_type": "code", "execution_count": null, - "id": "136", + "id": "135", "metadata": {}, "outputs": [], "source": [ @@ -1403,7 +1388,7 @@ }, { "cell_type": "markdown", - "id": "137", + "id": "136", "metadata": {}, "source": [ "#### Save Project State" @@ -1412,7 +1397,7 @@ { "cell_type": "code", "execution_count": null, - "id": "138", + "id": "137", "metadata": {}, "outputs": [], "source": [ @@ -1421,7 +1406,7 @@ }, { "cell_type": "markdown", - "id": "139", + "id": "138", "metadata": {}, "source": [ "### Perform Fit 4/5\n", @@ -1434,7 +1419,7 @@ { "cell_type": "code", "execution_count": null, - "id": "140", + "id": "139", "metadata": {}, "outputs": [], "source": [ @@ -1450,7 +1435,7 @@ }, { "cell_type": "markdown", - "id": "141", + "id": "140", "metadata": {}, "source": [ "Set constraints." @@ -1459,7 +1444,7 @@ { "cell_type": "code", "execution_count": null, - "id": "142", + "id": "141", "metadata": {}, "outputs": [], "source": [ @@ -1468,7 +1453,7 @@ }, { "cell_type": "markdown", - "id": "143", + "id": "142", "metadata": {}, "source": [ "Show defined constraints." @@ -1477,7 +1462,7 @@ { "cell_type": "code", "execution_count": null, - "id": "144", + "id": "143", "metadata": {}, "outputs": [], "source": [ @@ -1486,7 +1471,7 @@ }, { "cell_type": "markdown", - "id": "145", + "id": "144", "metadata": {}, "source": [ "Show free parameters." @@ -1495,7 +1480,7 @@ { "cell_type": "code", "execution_count": null, - "id": "146", + "id": "145", "metadata": {}, "outputs": [], "source": [ @@ -1504,7 +1489,7 @@ }, { "cell_type": "markdown", - "id": "147", + "id": "146", "metadata": {}, "source": [ "#### Run Fitting" @@ -1513,7 +1498,7 @@ { "cell_type": "code", "execution_count": null, - "id": "148", + "id": "147", "metadata": {}, "outputs": [], "source": [ @@ -1523,7 +1508,7 @@ }, { "cell_type": "markdown", - "id": "149", + "id": "148", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1532,7 +1517,7 @@ { "cell_type": "code", "execution_count": null, - "id": "150", + "id": "149", "metadata": {}, "outputs": [], "source": [ @@ -1542,7 +1527,7 @@ { "cell_type": "code", "execution_count": null, - "id": "151", + "id": "150", "metadata": {}, "outputs": [], "source": [ @@ -1551,7 +1536,7 @@ }, { "cell_type": "markdown", - "id": "152", + "id": "151", "metadata": {}, "source": [ "#### Save Project State" @@ -1560,7 +1545,7 @@ { "cell_type": "code", "execution_count": null, - "id": "153", + "id": "152", "metadata": {}, "outputs": [], "source": [ @@ -1569,7 +1554,7 @@ }, { "cell_type": "markdown", - "id": "154", + "id": "153", "metadata": {}, "source": [ "### Perform Fit 5/5\n", @@ -1582,7 +1567,7 @@ { "cell_type": "code", "execution_count": null, - "id": "155", + "id": "154", "metadata": {}, "outputs": [], "source": [ @@ -1598,7 +1583,7 @@ }, { "cell_type": "markdown", - "id": "156", + "id": "155", "metadata": {}, "source": [ "Set more constraints." @@ -1607,7 +1592,7 @@ { "cell_type": "code", "execution_count": null, - "id": "157", + "id": "156", "metadata": {}, "outputs": [], "source": [ @@ -1618,7 +1603,7 @@ }, { "cell_type": "markdown", - "id": "158", + "id": "157", "metadata": {}, "source": [ "Show defined constraints." @@ -1627,7 +1612,7 @@ { "cell_type": "code", "execution_count": null, - "id": "159", + "id": "158", "metadata": { "lines_to_next_cell": 2 }, @@ -1638,7 +1623,7 @@ }, { "cell_type": "markdown", - "id": "160", + "id": "159", "metadata": {}, "source": [ "Set structure parameters to be refined." @@ -1647,7 +1632,7 @@ { "cell_type": "code", "execution_count": null, - "id": "161", + "id": "160", "metadata": {}, "outputs": [], "source": [ @@ -1656,7 +1641,7 @@ }, { "cell_type": "markdown", - "id": "162", + "id": "161", "metadata": {}, "source": [ "Show free parameters after selection." @@ -1665,7 +1650,7 @@ { "cell_type": "code", "execution_count": null, - "id": "163", + "id": "162", "metadata": {}, "outputs": [], "source": [ @@ -1674,7 +1659,7 @@ }, { "cell_type": "markdown", - "id": "164", + "id": "163", "metadata": {}, "source": [ "#### Run Fitting" @@ -1683,7 +1668,7 @@ { "cell_type": "code", "execution_count": null, - "id": "165", + "id": "164", "metadata": {}, "outputs": [], "source": [ @@ -1693,7 +1678,7 @@ }, { "cell_type": "markdown", - "id": "166", + "id": "165", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -1702,7 +1687,7 @@ { "cell_type": "code", "execution_count": null, - "id": "167", + "id": "166", "metadata": {}, "outputs": [], "source": [ @@ -1712,7 +1697,7 @@ { "cell_type": "code", "execution_count": null, - "id": "168", + "id": "167", "metadata": {}, "outputs": [], "source": [ @@ -1721,7 +1706,7 @@ }, { "cell_type": "markdown", - "id": "169", + "id": "168", "metadata": {}, "source": [ "#### Save Project State" @@ -1730,7 +1715,7 @@ { "cell_type": "code", "execution_count": null, - "id": "170", + "id": "169", "metadata": {}, "outputs": [], "source": [ @@ -1739,7 +1724,7 @@ }, { "cell_type": "markdown", - "id": "171", + "id": "170", "metadata": {}, "source": [ "## Step 5: Summary\n", @@ -1749,7 +1734,7 @@ }, { "cell_type": "markdown", - "id": "172", + "id": "171", "metadata": {}, "source": [ "#### Show Project Summary" @@ -1758,7 +1743,7 @@ { "cell_type": "code", "execution_count": null, - "id": "173", + "id": "172", "metadata": {}, "outputs": [], "source": [ @@ -1768,7 +1753,7 @@ { "cell_type": "code", "execution_count": null, - "id": "174", + "id": "173", "metadata": {}, "outputs": [], "source": [] diff --git a/docs/docs/tutorials/ed-4.ipynb b/docs/docs/tutorials/ed-4.ipynb index 5bba8e84..fe06bd1e 100644 --- a/docs/docs/tutorials/ed-4.ipynb +++ b/docs/docs/tutorials/ed-4.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0e6027e8", + "id": "16833253", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: PbSO4, NPD + XRD\n", @@ -54,7 +39,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -63,7 +48,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -75,7 +60,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Define Structure\n", @@ -89,7 +74,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -98,7 +83,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "#### Set Space Group" @@ -107,7 +92,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -116,7 +101,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -125,7 +110,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -136,7 +121,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -145,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": { "lines_to_next_cell": 2 }, @@ -200,7 +185,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Define Experiments\n", @@ -216,7 +201,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -225,7 +210,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "#### Create Experiment" @@ -234,7 +219,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -247,7 +232,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "#### Set Instrument" @@ -256,7 +241,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -266,7 +251,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -275,7 +260,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -288,7 +273,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "#### Set Background" @@ -296,7 +281,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "20", "metadata": {}, "source": [ "Select the background type." @@ -305,7 +290,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -314,7 +299,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "22", "metadata": {}, "source": [ "Add background points." @@ -323,7 +308,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -342,7 +327,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "24", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -351,7 +336,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -360,7 +345,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "26", "metadata": {}, "source": [ "### Experiment 2: xrd\n", @@ -371,7 +356,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -380,7 +365,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "28", "metadata": {}, "source": [ "#### Create Experiment" @@ -389,7 +374,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -402,7 +387,7 @@ }, { "cell_type": "markdown", - "id": "31", + "id": "30", "metadata": {}, "source": [ "#### Set Instrument" @@ -411,7 +396,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -421,7 +406,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "32", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -430,7 +415,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -443,7 +428,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "34", "metadata": {}, "source": [ "#### Set Background" @@ -451,7 +436,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "35", "metadata": {}, "source": [ "Select background type." @@ -460,7 +445,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -469,7 +454,7 @@ }, { "cell_type": "markdown", - "id": "38", + "id": "37", "metadata": {}, "source": [ "Add background points." @@ -478,7 +463,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -495,7 +480,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "39", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -504,7 +489,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -513,7 +498,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "41", "metadata": {}, "source": [ "## Define Project\n", @@ -527,7 +512,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "42", "metadata": {}, "outputs": [], "source": [ @@ -536,7 +521,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "43", "metadata": {}, "source": [ "#### Add Structure" @@ -545,7 +530,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -554,7 +539,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "45", "metadata": {}, "source": [ "#### Add Experiments" @@ -563,7 +548,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -573,7 +558,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "47", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -587,7 +572,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -596,7 +581,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "49", "metadata": {}, "source": [ "#### Set Minimizer" @@ -605,7 +590,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -614,7 +599,7 @@ }, { "cell_type": "markdown", - "id": "52", + "id": "51", "metadata": {}, "source": [ "#### Set Fitting Parameters\n", @@ -625,7 +610,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -636,7 +621,7 @@ }, { "cell_type": "markdown", - "id": "54", + "id": "53", "metadata": {}, "source": [ "Set experiment parameters to be optimized." @@ -645,7 +630,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -662,7 +647,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -681,7 +666,7 @@ }, { "cell_type": "markdown", - "id": "57", + "id": "56", "metadata": {}, "source": [ "#### Perform Fit" @@ -690,7 +675,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "57", "metadata": {}, "outputs": [], "source": [ @@ -700,7 +685,7 @@ }, { "cell_type": "markdown", - "id": "59", + "id": "58", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -709,7 +694,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "59", "metadata": {}, "outputs": [], "source": [ @@ -719,7 +704,7 @@ { "cell_type": "code", "execution_count": null, - "id": "61", + "id": "60", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-5.ipynb b/docs/docs/tutorials/ed-5.ipynb index ef7e672a..f94b7564 100644 --- a/docs/docs/tutorials/ed-5.ipynb +++ b/docs/docs/tutorials/ed-5.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1302dcf1", + "id": "a3281949", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: Co2SiO4, D20\n", @@ -48,7 +33,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -57,7 +42,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +54,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Define Structure\n", @@ -83,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -92,7 +77,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "#### Set Space Group" @@ -101,7 +86,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -111,7 +96,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -120,7 +105,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -131,7 +116,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -140,7 +125,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -202,7 +187,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Define Experiment\n", @@ -216,7 +201,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -225,7 +210,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "#### Create Experiment" @@ -234,7 +219,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -243,7 +228,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "#### Set Instrument" @@ -252,7 +237,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -262,7 +247,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -271,7 +256,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -282,7 +267,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "#### Set Background" @@ -291,7 +276,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -313,7 +298,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -322,7 +307,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -331,7 +316,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "23", "metadata": {}, "source": [ "## Define Project\n", @@ -345,7 +330,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -354,7 +339,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "25", "metadata": {}, "source": [ "#### Set Plotting Engine" @@ -363,7 +348,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -374,7 +359,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "27", "metadata": {}, "source": [ "#### Add Structure" @@ -383,7 +368,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -392,7 +377,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "29", "metadata": {}, "source": [ "#### Add Experiment" @@ -401,7 +386,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -410,7 +395,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "31", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -424,7 +409,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -433,7 +418,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "33", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -442,7 +427,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -452,7 +437,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -461,7 +446,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "36", "metadata": {}, "source": [ "#### Set Free Parameters" @@ -470,7 +455,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -501,7 +486,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -520,7 +505,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "39", "metadata": {}, "source": [ "#### Set Constraints\n", @@ -531,7 +516,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -547,7 +532,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "41", "metadata": {}, "source": [ "Set constraints." @@ -556,7 +541,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "42", "metadata": { "lines_to_next_cell": 2 }, @@ -569,7 +554,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "43", "metadata": {}, "source": [ "#### Run Fitting" @@ -578,7 +563,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -588,7 +573,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "45", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -597,7 +582,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -607,7 +592,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -616,7 +601,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "48", "metadata": {}, "source": [ "## Summary\n", @@ -626,7 +611,7 @@ }, { "cell_type": "markdown", - "id": "50", + "id": "49", "metadata": {}, "source": [ "#### Show Project Summary" @@ -635,7 +620,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51", + "id": "50", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-6.ipynb b/docs/docs/tutorials/ed-6.ipynb index c4e2d34d..92130f6b 100644 --- a/docs/docs/tutorials/ed-6.ipynb +++ b/docs/docs/tutorials/ed-6.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a1e678e6", + "id": "48d300a4", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: HS, HRPT\n", @@ -48,7 +33,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -57,7 +42,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +54,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Define Structure\n", @@ -83,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -92,7 +77,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "#### Set Space Group" @@ -101,7 +86,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -111,7 +96,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": { "lines_to_next_cell": 2 }, @@ -122,7 +107,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -132,7 +117,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -141,7 +126,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -194,7 +179,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Define Experiment\n", @@ -208,7 +193,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -217,7 +202,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "#### Create Experiment" @@ -226,7 +211,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -235,7 +220,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "#### Set Instrument" @@ -244,7 +229,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -254,7 +239,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -263,7 +248,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -276,7 +261,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "#### Set Background" @@ -285,7 +270,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -302,7 +287,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -311,7 +296,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -320,7 +305,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "23", "metadata": {}, "source": [ "## Define Project\n", @@ -334,7 +319,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -343,7 +328,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "25", "metadata": {}, "source": [ "#### Set Plotting Engine" @@ -352,7 +337,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -363,7 +348,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "27", "metadata": {}, "source": [ "#### Add Structure" @@ -372,7 +357,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -381,7 +366,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "29", "metadata": {}, "source": [ "#### Add Experiment" @@ -390,7 +375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -399,7 +384,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "31", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -413,7 +398,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -422,7 +407,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "33", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -431,7 +416,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -441,7 +426,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -450,7 +435,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "36", "metadata": {}, "source": [ "### Perform Fit 1/5\n", @@ -461,7 +446,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -474,7 +459,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "38", "metadata": {}, "source": [ "Show free parameters after selection." @@ -483,7 +468,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -492,7 +477,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "40", "metadata": {}, "source": [ "#### Run Fitting" @@ -501,7 +486,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -511,7 +496,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "42", "metadata": {}, "outputs": [], "source": [ @@ -520,7 +505,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "43", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -529,7 +514,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "44", "metadata": {}, "outputs": [], "source": [ @@ -539,7 +524,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -548,7 +533,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "46", "metadata": {}, "source": [ "### Perform Fit 2/5\n", @@ -559,7 +544,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -574,7 +559,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "48", "metadata": {}, "source": [ "Show free parameters after selection." @@ -583,7 +568,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -592,7 +577,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "50", "metadata": {}, "source": [ "#### Run Fitting" @@ -601,7 +586,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "51", "metadata": {}, "outputs": [], "source": [ @@ -611,7 +596,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -620,7 +605,7 @@ }, { "cell_type": "markdown", - "id": "54", + "id": "53", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -629,7 +614,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -639,7 +624,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -648,7 +633,7 @@ }, { "cell_type": "markdown", - "id": "57", + "id": "56", "metadata": {}, "source": [ "### Perform Fit 3/5\n", @@ -659,7 +644,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "57", "metadata": {}, "outputs": [], "source": [ @@ -672,7 +657,7 @@ }, { "cell_type": "markdown", - "id": "59", + "id": "58", "metadata": {}, "source": [ "Show free parameters after selection." @@ -681,7 +666,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "59", "metadata": {}, "outputs": [], "source": [ @@ -690,7 +675,7 @@ }, { "cell_type": "markdown", - "id": "61", + "id": "60", "metadata": {}, "source": [ "#### Run Fitting" @@ -699,7 +684,7 @@ { "cell_type": "code", "execution_count": null, - "id": "62", + "id": "61", "metadata": {}, "outputs": [], "source": [ @@ -709,7 +694,7 @@ { "cell_type": "code", "execution_count": null, - "id": "63", + "id": "62", "metadata": {}, "outputs": [], "source": [ @@ -718,7 +703,7 @@ }, { "cell_type": "markdown", - "id": "64", + "id": "63", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -727,7 +712,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65", + "id": "64", "metadata": {}, "outputs": [], "source": [ @@ -737,7 +722,7 @@ { "cell_type": "code", "execution_count": null, - "id": "66", + "id": "65", "metadata": {}, "outputs": [], "source": [ @@ -746,7 +731,7 @@ }, { "cell_type": "markdown", - "id": "67", + "id": "66", "metadata": {}, "source": [ "### Perform Fit 4/5\n", @@ -757,7 +742,7 @@ { "cell_type": "code", "execution_count": null, - "id": "68", + "id": "67", "metadata": {}, "outputs": [], "source": [ @@ -770,7 +755,7 @@ }, { "cell_type": "markdown", - "id": "69", + "id": "68", "metadata": {}, "source": [ "Show free parameters after selection." @@ -779,7 +764,7 @@ { "cell_type": "code", "execution_count": null, - "id": "70", + "id": "69", "metadata": {}, "outputs": [], "source": [ @@ -788,7 +773,7 @@ }, { "cell_type": "markdown", - "id": "71", + "id": "70", "metadata": {}, "source": [ "#### Run Fitting" @@ -797,7 +782,7 @@ { "cell_type": "code", "execution_count": null, - "id": "72", + "id": "71", "metadata": {}, "outputs": [], "source": [ @@ -807,7 +792,7 @@ { "cell_type": "code", "execution_count": null, - "id": "73", + "id": "72", "metadata": {}, "outputs": [], "source": [ @@ -816,7 +801,7 @@ }, { "cell_type": "markdown", - "id": "74", + "id": "73", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -825,7 +810,7 @@ { "cell_type": "code", "execution_count": null, - "id": "75", + "id": "74", "metadata": {}, "outputs": [], "source": [ @@ -835,7 +820,7 @@ { "cell_type": "code", "execution_count": null, - "id": "76", + "id": "75", "metadata": {}, "outputs": [], "source": [ @@ -844,7 +829,7 @@ }, { "cell_type": "markdown", - "id": "77", + "id": "76", "metadata": {}, "source": [ "## Summary\n", @@ -854,7 +839,7 @@ }, { "cell_type": "markdown", - "id": "78", + "id": "77", "metadata": {}, "source": [ "#### Show Project Summary" @@ -863,7 +848,7 @@ { "cell_type": "code", "execution_count": null, - "id": "79", + "id": "78", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-7.ipynb b/docs/docs/tutorials/ed-7.ipynb index 7284f08d..12ad852a 100644 --- a/docs/docs/tutorials/ed-7.ipynb +++ b/docs/docs/tutorials/ed-7.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5dd7adf5", + "id": "8cc5d312", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: Si, SEPD\n", @@ -48,7 +33,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -57,7 +42,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +54,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Define Structure\n", @@ -83,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -92,7 +77,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "#### Set Space Group" @@ -101,7 +86,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -111,7 +96,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -120,7 +105,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -129,7 +114,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -138,7 +123,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -154,7 +139,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Define Experiment\n", @@ -168,7 +153,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -177,7 +162,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "#### Create Experiment" @@ -186,7 +171,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -197,7 +182,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "#### Set Instrument" @@ -206,7 +191,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -218,7 +203,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -227,7 +212,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -241,7 +226,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "#### Set Peak Asymmetry" @@ -250,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -260,7 +245,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "#### Set Background" @@ -269,7 +254,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -280,7 +265,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "23", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -289,7 +274,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -298,7 +283,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "25", "metadata": {}, "source": [ "## Define Project\n", @@ -312,7 +297,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -321,7 +306,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "27", "metadata": {}, "source": [ "#### Add Structure" @@ -330,7 +315,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -339,7 +324,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "29", "metadata": {}, "source": [ "#### Add Experiment" @@ -348,7 +333,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -357,7 +342,7 @@ }, { "cell_type": "markdown", - "id": "32", + "id": "31", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -371,7 +356,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -380,7 +365,7 @@ }, { "cell_type": "markdown", - "id": "34", + "id": "33", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -389,7 +374,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "34", "metadata": {}, "outputs": [], "source": [ @@ -399,7 +384,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "35", "metadata": {}, "source": [ "### Perform Fit 1/5\n", @@ -410,7 +395,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -422,7 +407,7 @@ }, { "cell_type": "markdown", - "id": "38", + "id": "37", "metadata": {}, "source": [ "Show free parameters after selection." @@ -431,7 +416,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "38", "metadata": {}, "outputs": [], "source": [ @@ -440,7 +425,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "39", "metadata": {}, "source": [ "#### Run Fitting" @@ -449,7 +434,7 @@ { "cell_type": "code", "execution_count": null, - "id": "41", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -459,7 +444,7 @@ }, { "cell_type": "markdown", - "id": "42", + "id": "41", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -468,7 +453,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "42", "metadata": {}, "outputs": [], "source": [ @@ -478,7 +463,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -487,7 +472,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "44", "metadata": {}, "source": [ "### Perform Fit 2/5\n", @@ -498,7 +483,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -508,7 +493,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "46", "metadata": {}, "source": [ "Show free parameters after selection." @@ -517,7 +502,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -526,7 +511,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "48", "metadata": {}, "source": [ "#### Run Fitting" @@ -535,7 +520,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -545,7 +530,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "50", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -554,7 +539,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "51", "metadata": {}, "outputs": [], "source": [ @@ -564,7 +549,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53", + "id": "52", "metadata": {}, "outputs": [], "source": [ @@ -573,7 +558,7 @@ }, { "cell_type": "markdown", - "id": "54", + "id": "53", "metadata": {}, "source": [ "### Perform Fit 3/5\n", @@ -584,7 +569,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -594,7 +579,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "55", "metadata": {}, "source": [ "Set more parameters to be refined." @@ -603,7 +588,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57", + "id": "56", "metadata": {}, "outputs": [], "source": [ @@ -614,7 +599,7 @@ }, { "cell_type": "markdown", - "id": "58", + "id": "57", "metadata": {}, "source": [ "Show free parameters after selection." @@ -623,7 +608,7 @@ { "cell_type": "code", "execution_count": null, - "id": "59", + "id": "58", "metadata": {}, "outputs": [], "source": [ @@ -632,7 +617,7 @@ }, { "cell_type": "markdown", - "id": "60", + "id": "59", "metadata": {}, "source": [ "#### Run Fitting" @@ -641,7 +626,7 @@ { "cell_type": "code", "execution_count": null, - "id": "61", + "id": "60", "metadata": {}, "outputs": [], "source": [ @@ -651,7 +636,7 @@ }, { "cell_type": "markdown", - "id": "62", + "id": "61", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -660,7 +645,7 @@ { "cell_type": "code", "execution_count": null, - "id": "63", + "id": "62", "metadata": {}, "outputs": [], "source": [ @@ -670,7 +655,7 @@ { "cell_type": "code", "execution_count": null, - "id": "64", + "id": "63", "metadata": {}, "outputs": [], "source": [ @@ -679,7 +664,7 @@ }, { "cell_type": "markdown", - "id": "65", + "id": "64", "metadata": {}, "source": [ "### Perform Fit 4/5\n", @@ -690,7 +675,7 @@ { "cell_type": "code", "execution_count": null, - "id": "66", + "id": "65", "metadata": {}, "outputs": [], "source": [ @@ -699,7 +684,7 @@ }, { "cell_type": "markdown", - "id": "67", + "id": "66", "metadata": {}, "source": [ "Show free parameters after selection." @@ -708,7 +693,7 @@ { "cell_type": "code", "execution_count": null, - "id": "68", + "id": "67", "metadata": {}, "outputs": [], "source": [ @@ -717,7 +702,7 @@ }, { "cell_type": "markdown", - "id": "69", + "id": "68", "metadata": {}, "source": [ "#### Run Fitting" @@ -726,7 +711,7 @@ { "cell_type": "code", "execution_count": null, - "id": "70", + "id": "69", "metadata": {}, "outputs": [], "source": [ @@ -736,7 +721,7 @@ }, { "cell_type": "markdown", - "id": "71", + "id": "70", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -745,7 +730,7 @@ { "cell_type": "code", "execution_count": null, - "id": "72", + "id": "71", "metadata": {}, "outputs": [], "source": [ @@ -755,7 +740,7 @@ { "cell_type": "code", "execution_count": null, - "id": "73", + "id": "72", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-8.ipynb b/docs/docs/tutorials/ed-8.ipynb index 79e03966..80aec4e4 100644 --- a/docs/docs/tutorials/ed-8.ipynb +++ b/docs/docs/tutorials/ed-8.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f45eca14", + "id": "0bc22f40", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: NCAF, WISH\n", @@ -51,7 +36,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -60,7 +45,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -72,7 +57,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Define Structure\n", @@ -86,7 +71,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -95,7 +80,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "#### Set Space Group" @@ -104,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -114,7 +99,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -123,7 +108,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -132,7 +117,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -141,7 +126,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -203,7 +188,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Define Experiment\n", @@ -217,7 +202,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -227,7 +212,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -236,7 +221,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "14", "metadata": {}, "source": [ "#### Create Experiment" @@ -245,7 +230,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -259,7 +244,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -272,7 +257,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "#### Set Instrument" @@ -281,7 +266,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -294,7 +279,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -306,7 +291,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "20", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -315,7 +300,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -331,7 +316,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -346,7 +331,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "23", "metadata": {}, "source": [ "#### Set Background" @@ -355,7 +340,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -399,7 +384,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -441,7 +426,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "26", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -450,7 +435,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -460,7 +445,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -469,7 +454,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "29", "metadata": {}, "source": [ "#### Set Excluded Regions" @@ -478,7 +463,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -489,7 +474,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -499,7 +484,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "32", "metadata": {}, "source": [ "## Define Project\n", @@ -513,7 +498,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -522,7 +507,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "34", "metadata": {}, "source": [ "#### Set Plotting Engine" @@ -531,7 +516,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -542,7 +527,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "36", "metadata": {}, "source": [ "#### Add Structure" @@ -551,7 +536,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -560,7 +545,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "38", "metadata": {}, "source": [ "#### Add Experiment" @@ -569,7 +554,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -579,7 +564,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "40", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -593,7 +578,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -602,7 +587,7 @@ }, { "cell_type": "markdown", - "id": "43", + "id": "42", "metadata": {}, "source": [ "#### Set Fit Mode" @@ -611,7 +596,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -620,7 +605,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "44", "metadata": {}, "source": [ "#### Set Free Parameters" @@ -629,7 +614,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -644,7 +629,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "46", "metadata": {}, "outputs": [], "source": [ @@ -667,7 +652,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "47", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -676,7 +661,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -686,7 +671,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -695,7 +680,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "50", "metadata": {}, "source": [ "#### Run Fitting" @@ -704,7 +689,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "51", "metadata": {}, "outputs": [], "source": [ @@ -714,7 +699,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "52", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -723,7 +708,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -733,7 +718,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -742,7 +727,7 @@ }, { "cell_type": "markdown", - "id": "56", + "id": "55", "metadata": {}, "source": [ "## Summary\n", @@ -752,7 +737,7 @@ }, { "cell_type": "markdown", - "id": "57", + "id": "56", "metadata": {}, "source": [ "#### Show Project Summary" @@ -761,7 +746,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "57", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/docs/tutorials/ed-9.ipynb b/docs/docs/tutorials/ed-9.ipynb index b9b845da..cc1f0ba4 100644 --- a/docs/docs/tutorials/ed-9.ipynb +++ b/docs/docs/tutorials/ed-9.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e2e25ed0", + "id": "9c2a5d62", "metadata": { "tags": [ "hide-in-docs" @@ -19,24 +19,9 @@ " %pip install easydiffraction" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "# Check whether easydiffraction is installed; install it if needed.\n", - "# Required for remote environments such as Google Colab.\n", - "import importlib.util\n", - "\n", - "if importlib.util.find_spec('easydiffraction') is None:\n", - " %pip install easydiffraction" - ] - }, { "cell_type": "markdown", - "id": "1", + "id": "0", "metadata": {}, "source": [ "# Structure Refinement: LBCO+Si, McStas\n", @@ -48,7 +33,7 @@ }, { "cell_type": "markdown", - "id": "2", + "id": "1", "metadata": {}, "source": [ "## Import Library" @@ -57,7 +42,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +54,7 @@ }, { "cell_type": "markdown", - "id": "4", + "id": "3", "metadata": {}, "source": [ "## Define Structures\n", @@ -83,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -92,7 +77,7 @@ }, { "cell_type": "markdown", - "id": "6", + "id": "5", "metadata": {}, "source": [ "#### Set Space Group" @@ -101,7 +86,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -111,7 +96,7 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -120,7 +105,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -129,7 +114,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -138,7 +123,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -184,7 +169,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "### Create Structure 2: Si" @@ -193,7 +178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -202,7 +187,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "#### Set Space Group" @@ -211,7 +196,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -221,7 +206,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "#### Set Unit Cell" @@ -230,7 +215,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -239,7 +224,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "#### Set Atom Sites" @@ -248,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -265,7 +250,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "## Define Experiment\n", @@ -279,7 +264,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -288,7 +273,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "#### Create Experiment" @@ -297,7 +282,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -313,7 +298,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "23", "metadata": {}, "source": [ "#### Set Instrument" @@ -322,7 +307,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -334,7 +319,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "25", "metadata": {}, "source": [ "#### Set Peak Profile" @@ -343,7 +328,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -359,7 +344,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "27", "metadata": {}, "source": [ "#### Set Background" @@ -367,7 +352,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "28", "metadata": {}, "source": [ "Select the background type." @@ -376,7 +361,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -385,7 +370,7 @@ }, { "cell_type": "markdown", - "id": "31", + "id": "30", "metadata": {}, "source": [ "Add background points." @@ -394,7 +379,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -415,7 +400,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "32", "metadata": {}, "source": [ "#### Set Linked Phases" @@ -424,7 +409,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -434,7 +419,7 @@ }, { "cell_type": "markdown", - "id": "35", + "id": "34", "metadata": {}, "source": [ "## Define Project\n", @@ -448,7 +433,7 @@ { "cell_type": "code", "execution_count": null, - "id": "36", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -457,7 +442,7 @@ }, { "cell_type": "markdown", - "id": "37", + "id": "36", "metadata": {}, "source": [ "#### Add Structures" @@ -466,7 +451,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -476,7 +461,7 @@ }, { "cell_type": "markdown", - "id": "39", + "id": "38", "metadata": {}, "source": [ "#### Show Structures" @@ -485,7 +470,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40", + "id": "39", "metadata": {}, "outputs": [], "source": [ @@ -494,7 +479,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "40", "metadata": {}, "source": [ "#### Add Experiments" @@ -503,7 +488,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -512,7 +497,7 @@ }, { "cell_type": "markdown", - "id": "43", + "id": "42", "metadata": {}, "source": [ "#### Set Excluded Regions\n", @@ -523,7 +508,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -532,7 +517,7 @@ }, { "cell_type": "markdown", - "id": "45", + "id": "44", "metadata": {}, "source": [ "Add excluded regions." @@ -541,7 +526,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -551,7 +536,7 @@ }, { "cell_type": "markdown", - "id": "47", + "id": "46", "metadata": {}, "source": [ "Show excluded regions." @@ -560,7 +545,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -569,7 +554,7 @@ }, { "cell_type": "markdown", - "id": "49", + "id": "48", "metadata": {}, "source": [ "Show measured data after adding excluded regions." @@ -578,7 +563,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "49", "metadata": {}, "outputs": [], "source": [ @@ -587,7 +572,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "50", "metadata": {}, "source": [ "Show experiment as CIF." @@ -596,7 +581,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "51", "metadata": {}, "outputs": [], "source": [ @@ -605,7 +590,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "52", "metadata": {}, "source": [ "## Perform Analysis\n", @@ -619,7 +604,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -628,7 +613,7 @@ }, { "cell_type": "markdown", - "id": "55", + "id": "54", "metadata": {}, "source": [ "#### Set Fitting Parameters\n", @@ -639,7 +624,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "55", "metadata": {}, "outputs": [], "source": [ @@ -652,7 +637,7 @@ }, { "cell_type": "markdown", - "id": "57", + "id": "56", "metadata": {}, "source": [ "Set experiment parameters to be optimized." @@ -661,7 +646,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "57", "metadata": {}, "outputs": [], "source": [ @@ -682,7 +667,7 @@ }, { "cell_type": "markdown", - "id": "59", + "id": "58", "metadata": {}, "source": [ "#### Perform Fit" @@ -691,7 +676,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "59", "metadata": {}, "outputs": [], "source": [ @@ -701,7 +686,7 @@ }, { "cell_type": "markdown", - "id": "61", + "id": "60", "metadata": {}, "source": [ "#### Plot Measured vs Calculated" @@ -710,7 +695,7 @@ { "cell_type": "code", "execution_count": null, - "id": "62", + "id": "61", "metadata": {}, "outputs": [], "source": [ @@ -720,7 +705,7 @@ { "cell_type": "code", "execution_count": null, - "id": "63", + "id": "62", "metadata": {}, "outputs": [], "source": [] From 8a30ae1b50b1a34c9bff734c986d90504a2cbc3d Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 00:46:35 +0200 Subject: [PATCH 23/51] Remove CSV writing from fit() to fix sequential crash recovery --- src/easydiffraction/analysis/analysis.py | 58 ------------------------ src/easydiffraction/utils/utils.py | 2 +- 2 files changed, 1 insertion(+), 59 deletions(-) diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index 4218b480..80c5c1a3 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: BSD-3-Clause from contextlib import suppress -from pathlib import Path import numpy as np import pandas as pd @@ -645,44 +644,6 @@ def fit(self, verbosity: str | None = None) -> None: expt_names = experiments.names num_expts = len(expt_names) - # CSV setup: write results if the project has been saved - csv_path = None - csv_header = None - csv_free_names = None - csv_diffrn_fields = None - if self.project.info.path is not None: - from easydiffraction.analysis.sequential import _META_COLUMNS # noqa: PLC0415 - from easydiffraction.analysis.sequential import _append_to_csv # noqa: PLC0415 - from easydiffraction.analysis.sequential import _write_csv_header # noqa: PLC0415 - - csv_path = Path(self.project.info.path) / 'analysis' / 'results.csv' - csv_path.parent.mkdir(parents=True, exist_ok=True) - - all_params = ( - self.project.structures.parameters + self.project.experiments.parameters - ) - csv_free_names = [ - p.unique_name - for p in all_params - if isinstance(p, Parameter) and not p.constrained and p.free - ] - - first_expt = list(experiments.values())[0] - csv_diffrn_fields = [] - if hasattr(first_expt, 'diffrn'): - csv_diffrn_fields = [ - p.name - for p in first_expt.diffrn.parameters - if hasattr(p, 'name') and p.name not in ('type',) - ] - - csv_header = list(_META_COLUMNS) - csv_header.extend(f'diffrn.{f}' for f in csv_diffrn_fields) - for name in csv_free_names: - csv_header.append(name) - csv_header.append(f'{name}.uncertainty') - _write_csv_header(csv_path, csv_header) - # Short mode: print header and create display handle once short_headers = ['experiment', 'χ²', 'iterations', 'status'] short_alignments = ['left', 'right', 'right', 'center'] @@ -730,25 +691,6 @@ def fit(self, verbosity: str | None = None) -> None: self._parameter_snapshots[expt_name] = snapshot self.fit_results = results - # Append row to CSV - if csv_path is not None: - row = { - 'file_path': expt_name, - 'fit_success': results.success, - 'chi_squared': results.chi_square, - 'reduced_chi_squared': results.reduced_chi_square, - 'n_iterations': (self.fitter.minimizer.tracker.best_iteration or 0), - } - if hasattr(experiment, 'diffrn') and csv_diffrn_fields: - for p in experiment.diffrn.parameters: - if hasattr(p, 'name') and p.name not in ('type',): - row[f'diffrn.{p.name}'] = p.value - for uname in csv_free_names: - if uname in snapshot: - row[uname] = snapshot[uname]['value'] - row[f'{uname}.uncertainty'] = snapshot[uname]['uncertainty'] - _append_to_csv(csv_path, csv_header, [row]) - # Short mode: append one summary row and update in-place if verb is VerbosityEnum.SHORT: chi2_str = ( diff --git a/src/easydiffraction/utils/utils.py b/src/easydiffraction/utils/utils.py index 50af41fe..0108422d 100644 --- a/src/easydiffraction/utils/utils.py +++ b/src/easydiffraction/utils/utils.py @@ -73,7 +73,7 @@ def _fetch_data_index() -> dict: _validate_url(index_url) # macOS: sha256sum index.json - index_hash = 'sha256:1032db0c04ef713c3f5209020a14b18dcdc3cfa4d995664ae5c9f5096f4508d4' + index_hash = 'sha256:dfde966a084579c2103b0d35ed3e8688ddc6941335e251d3e1735a792ca06144' destination_dirname = 'easydiffraction' destination_fname = 'data-index.json' cache_dir = pooch.os_cache(destination_dirname) From 592a2c5e96095dac8295592a0f5a7bd0b4b8f0f0 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 13:33:07 +0200 Subject: [PATCH 24/51] Fix extract_project_from_zip to find project.cif from zip contents --- src/easydiffraction/io/ascii.py | 17 ++++++++++------- tests/unit/easydiffraction/io/test_ascii.py | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/src/easydiffraction/io/ascii.py b/src/easydiffraction/io/ascii.py index 45061787..1bba03b0 100644 --- a/src/easydiffraction/io/ascii.py +++ b/src/easydiffraction/io/ascii.py @@ -58,15 +58,18 @@ def extract_project_from_zip( extract_dir = Path(tempfile.mkdtemp(prefix='ed_zip_')) with zipfile.ZipFile(zip_path, 'r') as zf: - zf.extractall(extract_dir) + # Determine the project directory from the archive contents + # *before* extraction, so we are not confused by unrelated + # project.cif files already present in the destination. + project_cif_entries = [name for name in zf.namelist() if name.endswith('project.cif')] + if not project_cif_entries: + msg = f'No project.cif found in ZIP archive: {zip_path}' + raise ValueError(msg) - # Find the project directory (the one containing project.cif) - project_cifs = list(extract_dir.rglob('project.cif')) - if not project_cifs: - msg = f'No project.cif found in ZIP archive: {zip_path}' - raise ValueError(msg) + zf.extractall(extract_dir) - return str(project_cifs[0].parent.resolve()) + project_cif_path = extract_dir / project_cif_entries[0] + return str(project_cif_path.parent.resolve()) def extract_data_paths_from_zip( diff --git a/tests/unit/easydiffraction/io/test_ascii.py b/tests/unit/easydiffraction/io/test_ascii.py index 1410f8e9..ab180701 100644 --- a/tests/unit/easydiffraction/io/test_ascii.py +++ b/tests/unit/easydiffraction/io/test_ascii.py @@ -65,6 +65,23 @@ def test_destination_creates_directory(self, tmp_path): assert dest.is_dir() assert 'proj' in result + def test_ignores_other_project_cif_in_destination(self, tmp_path): + """Only finds project.cif from the zip, not pre-existing ones.""" + dest = tmp_path / 'data' + # Pre-create another project directory in the destination + other_project = dest / 'aaa_other' / 'project.cif' + other_project.parent.mkdir(parents=True) + other_project.write_text('other\n') + + zip_path = tmp_path / 'proj.zip' + with zipfile.ZipFile(zip_path, 'w') as zf: + zf.writestr('target_project/project.cif', 'correct\n') + + result = extract_project_from_zip(zip_path, destination=dest) + + assert 'target_project' in result + assert 'aaa_other' not in result + class TestExtractDataPathsFromZip: """Tests for extract_data_paths_from_zip.""" From 9df29ab1b2c8d9073c339a792e0169ada79784d2 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 16:13:05 +0200 Subject: [PATCH 25/51] Add unit tests --- .../analysis/test_sequential.py | 301 ++++++++++++++++++ .../unit/easydiffraction/core/test_factory.py | 246 +++++++++++++- .../easydiffraction/core/test_metadata.py | 101 ++++++ 3 files changed, 647 insertions(+), 1 deletion(-) create mode 100644 tests/unit/easydiffraction/analysis/test_sequential.py create mode 100644 tests/unit/easydiffraction/core/test_metadata.py diff --git a/tests/unit/easydiffraction/analysis/test_sequential.py b/tests/unit/easydiffraction/analysis/test_sequential.py new file mode 100644 index 00000000..856aa8ec --- /dev/null +++ b/tests/unit/easydiffraction/analysis/test_sequential.py @@ -0,0 +1,301 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Unit tests for sequential fitting helper functions.""" + +from __future__ import annotations + +import csv + +import pytest + +from easydiffraction.analysis.sequential import SequentialFitTemplate +from easydiffraction.analysis.sequential import _META_COLUMNS +from easydiffraction.analysis.sequential import _append_to_csv +from easydiffraction.analysis.sequential import _build_csv_header +from easydiffraction.analysis.sequential import _read_csv_for_recovery +from easydiffraction.analysis.sequential import _write_csv_header + + +# ------------------------------------------------------------------ +# Fixture: a minimal template +# ------------------------------------------------------------------ + + +def _minimal_template( + free_names=None, + diffrn_fields=None, +): + if free_names is None: + free_names = ['cell.a', 'cell.b'] + if diffrn_fields is None: + diffrn_fields = [] + return SequentialFitTemplate( + structure_cif='', + experiment_cif='', + initial_params={}, + free_param_unique_names=free_names, + alias_defs=[], + constraint_defs=[], + constraints_enabled=False, + minimizer_tag='lmfit', + calculator_tag='cryspy', + diffrn_field_names=diffrn_fields, + ) + + +# ------------------------------------------------------------------ +# _build_csv_header +# ------------------------------------------------------------------ + + +class TestBuildCsvHeader: + def test_meta_columns_first(self): + template = _minimal_template(free_names=[], diffrn_fields=[]) + header = _build_csv_header(template) + assert header == list(_META_COLUMNS) + + def test_diffrn_fields_after_meta(self): + template = _minimal_template( + free_names=[], + diffrn_fields=['ambient_temperature'], + ) + header = _build_csv_header(template) + assert header[-1] == 'diffrn.ambient_temperature' + + def test_param_columns_with_uncertainty(self): + template = _minimal_template(free_names=['cell.a']) + header = _build_csv_header(template) + assert 'cell.a' in header + assert 'cell.a.uncertainty' in header + # Uncertainty follows value + idx = header.index('cell.a') + assert header[idx + 1] == 'cell.a.uncertainty' + + def test_full_header_order(self): + template = _minimal_template( + free_names=['p1', 'p2'], + diffrn_fields=['temp'], + ) + header = _build_csv_header(template) + expected = [ + *_META_COLUMNS, + 'diffrn.temp', + 'p1', + 'p1.uncertainty', + 'p2', + 'p2.uncertainty', + ] + assert header == expected + + +# ------------------------------------------------------------------ +# _write_csv_header / _append_to_csv +# ------------------------------------------------------------------ + + +class TestCsvWriteAndAppend: + def test_write_creates_file_with_header(self, tmp_path): + csv_path = tmp_path / 'results.csv' + header = ['file_path', 'chi_squared', 'param_a'] + _write_csv_header(csv_path, header) + + with csv_path.open() as f: + reader = csv.reader(f) + first_row = next(reader) + assert first_row == header + + def test_append_adds_rows(self, tmp_path): + csv_path = tmp_path / 'results.csv' + header = ['file_path', 'value'] + _write_csv_header(csv_path, header) + + _append_to_csv( + csv_path, + header, + [ + {'file_path': 'a.dat', 'value': 1.0}, + {'file_path': 'b.dat', 'value': 2.0}, + ], + ) + + with csv_path.open() as f: + rows = list(csv.DictReader(f)) + assert len(rows) == 2 + assert rows[0]['file_path'] == 'a.dat' + assert rows[1]['value'] == '2.0' + + def test_append_ignores_extra_keys(self, tmp_path): + csv_path = tmp_path / 'results.csv' + header = ['file_path'] + _write_csv_header(csv_path, header) + + _append_to_csv( + csv_path, + header, + [ + {'file_path': 'a.dat', 'extra_key': 'ignored'}, + ], + ) + + with csv_path.open() as f: + rows = list(csv.DictReader(f)) + assert len(rows) == 1 + assert 'extra_key' not in rows[0] + + +# ------------------------------------------------------------------ +# _read_csv_for_recovery +# ------------------------------------------------------------------ + + +class TestReadCsvForRecovery: + def test_returns_empty_when_no_file(self, tmp_path): + csv_path = tmp_path / 'nonexistent.csv' + fitted, params = _read_csv_for_recovery(csv_path) + assert fitted == set() + assert params is None + + def test_returns_fitted_file_paths(self, tmp_path): + csv_path = tmp_path / 'results.csv' + header = list(_META_COLUMNS) + ['cell.a', 'cell.a.uncertainty'] + _write_csv_header(csv_path, header) + _append_to_csv( + csv_path, + header, + [ + { + 'file_path': '/data/a.dat', + 'fit_success': 'True', + 'chi_squared': '5.0', + 'reduced_chi_squared': '2.5', + 'n_iterations': '10', + 'cell.a': '3.89', + 'cell.a.uncertainty': '0.01', + }, + { + 'file_path': '/data/b.dat', + 'fit_success': 'False', + 'chi_squared': '', + 'reduced_chi_squared': '', + 'n_iterations': '0', + 'cell.a': '', + 'cell.a.uncertainty': '', + }, + ], + ) + + fitted, params = _read_csv_for_recovery(csv_path) + assert fitted == {'/data/a.dat', '/data/b.dat'} + + def test_returns_last_successful_params(self, tmp_path): + csv_path = tmp_path / 'results.csv' + header = list(_META_COLUMNS) + ['cell.a', 'cell.a.uncertainty'] + _write_csv_header(csv_path, header) + _append_to_csv( + csv_path, + header, + [ + { + 'file_path': 'a.dat', + 'fit_success': 'True', + 'chi_squared': '5.0', + 'reduced_chi_squared': '2.5', + 'n_iterations': '10', + 'cell.a': '3.89', + 'cell.a.uncertainty': '0.01', + }, + { + 'file_path': 'b.dat', + 'fit_success': 'True', + 'chi_squared': '4.0', + 'reduced_chi_squared': '2.0', + 'n_iterations': '8', + 'cell.a': '3.90', + 'cell.a.uncertainty': '0.02', + }, + ], + ) + + _, params = _read_csv_for_recovery(csv_path) + assert params is not None + # Should return the LAST successful row's params + assert params['cell.a'] == pytest.approx(3.90) + + def test_skips_meta_columns_and_diffrn_and_uncertainty(self, tmp_path): + csv_path = tmp_path / 'results.csv' + header = list(_META_COLUMNS) + [ + 'diffrn.temp', + 'cell.a', + 'cell.a.uncertainty', + ] + _write_csv_header(csv_path, header) + _append_to_csv( + csv_path, + header, + [ + { + 'file_path': 'a.dat', + 'fit_success': 'True', + 'chi_squared': '5.0', + 'reduced_chi_squared': '2.5', + 'n_iterations': '10', + 'diffrn.temp': '300', + 'cell.a': '3.89', + 'cell.a.uncertainty': '0.01', + }, + ], + ) + + _, params = _read_csv_for_recovery(csv_path) + assert params is not None + assert 'cell.a' in params + # Meta columns, diffrn, and uncertainty should be excluded + assert 'file_path' not in params + assert 'fit_success' not in params + assert 'diffrn.temp' not in params + assert 'cell.a.uncertainty' not in params + + def test_returns_none_params_when_no_successful_rows(self, tmp_path): + csv_path = tmp_path / 'results.csv' + header = list(_META_COLUMNS) + ['cell.a', 'cell.a.uncertainty'] + _write_csv_header(csv_path, header) + _append_to_csv( + csv_path, + header, + [ + { + 'file_path': 'a.dat', + 'fit_success': 'False', + 'chi_squared': '', + 'reduced_chi_squared': '', + 'n_iterations': '0', + 'cell.a': '', + 'cell.a.uncertainty': '', + }, + ], + ) + + _, params = _read_csv_for_recovery(csv_path) + assert params is None + + +# ------------------------------------------------------------------ +# SequentialFitTemplate +# ------------------------------------------------------------------ + + +class TestSequentialFitTemplate: + def test_is_frozen(self): + template = _minimal_template() + with pytest.raises(AttributeError): + template.minimizer_tag = 'bumps' + + def test_fields_accessible(self): + template = _minimal_template( + free_names=['cell.a'], + diffrn_fields=['temp'], + ) + assert template.free_param_unique_names == ['cell.a'] + assert template.diffrn_field_names == ['temp'] + assert template.minimizer_tag == 'lmfit' + assert template.calculator_tag == 'cryspy' diff --git a/tests/unit/easydiffraction/core/test_factory.py b/tests/unit/easydiffraction/core/test_factory.py index 78150ea5..430d9694 100644 --- a/tests/unit/easydiffraction/core/test_factory.py +++ b/tests/unit/easydiffraction/core/test_factory.py @@ -1,2 +1,246 @@ -# SPDX-FileCopyrightText: 2025 EasyScience contributors +# SPDX-FileCopyrightText: 2026 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause +"""Tests for FactoryBase: registration, creation, defaults, and querying.""" + +from __future__ import annotations + +import pytest + +from easydiffraction.core.factory import FactoryBase +from easydiffraction.core.metadata import CalculatorSupport +from easydiffraction.core.metadata import Compatibility +from easydiffraction.core.metadata import TypeInfo + + +# ------------------------------------------------------------------ +# Helpers: a fresh factory + stub classes for each test +# ------------------------------------------------------------------ + + +def _make_factory(): + """Return a fresh FactoryBase subclass with its own registry.""" + + class _Factory(FactoryBase): + _default_rules = {frozenset(): 'alpha'} + + return _Factory + + +def _make_stub(tag, description='', compatibility=None, calculator_support=None): + """Return a stub class with the given TypeInfo.""" + + class _Stub: + type_info = TypeInfo(tag=tag, description=description) + + if compatibility is not None: + _Stub.compatibility = compatibility + if calculator_support is not None: + _Stub.calculator_support = calculator_support + return _Stub + + +# ------------------------------------------------------------------ +# Registration +# ------------------------------------------------------------------ + + +class TestRegister: + def test_register_adds_class_to_registry(self): + factory = _make_factory() + stub = _make_stub('alpha') + factory.register(stub) + assert stub in factory._registry + + def test_register_returns_class_unmodified(self): + factory = _make_factory() + stub = _make_stub('alpha') + result = factory.register(stub) + assert result is stub + + def test_register_multiple_classes(self): + factory = _make_factory() + stub_a = _make_stub('a') + stub_b = _make_stub('b') + factory.register(stub_a) + factory.register(stub_b) + assert len(factory._registry) == 2 + + def test_subclass_registries_are_independent(self): + class _FactoryA(FactoryBase): + _default_rules = {frozenset(): 'a'} + + class _FactoryB(FactoryBase): + _default_rules = {frozenset(): 'b'} + + stub_a = _make_stub('a') + stub_b = _make_stub('b') + _FactoryA.register(stub_a) + _FactoryB.register(stub_b) + assert stub_a in _FactoryA._registry + assert stub_a not in _FactoryB._registry + assert stub_b in _FactoryB._registry + assert stub_b not in _FactoryA._registry + + +# ------------------------------------------------------------------ +# Supported tags +# ------------------------------------------------------------------ + + +class TestSupportedTags: + def test_returns_empty_list_for_empty_registry(self): + factory = _make_factory() + assert factory.supported_tags() == [] + + def test_returns_tags_from_registered_classes(self): + factory = _make_factory() + factory.register(_make_stub('alpha')) + factory.register(_make_stub('beta')) + tags = factory.supported_tags() + assert 'alpha' in tags + assert 'beta' in tags + assert len(tags) == 2 + + +# ------------------------------------------------------------------ +# Default tag resolution +# ------------------------------------------------------------------ + + +class TestDefaultTag: + def test_universal_fallback(self): + factory = _make_factory() + factory.register(_make_stub('alpha')) + assert factory.default_tag() == 'alpha' + + def test_specific_rule_wins_over_universal(self): + class _Factory(FactoryBase): + _default_rules = { + frozenset(): 'fallback', + frozenset({('mode', 'fast')}): 'fast_impl', + } + + assert _Factory.default_tag(mode='fast') == 'fast_impl' + + def test_largest_subset_wins(self): + class _Factory(FactoryBase): + _default_rules = { + frozenset(): 'fallback', + frozenset({('a', 1)}): 'one_match', + frozenset({('a', 1), ('b', 2)}): 'two_match', + } + + assert _Factory.default_tag(a=1, b=2) == 'two_match' + + def test_raises_when_no_rule_matches(self): + class _Factory(FactoryBase): + _default_rules = { + frozenset({('mode', 'fast')}): 'fast_impl', + } + + with pytest.raises(ValueError, match='No default rule matches'): + _Factory.default_tag(mode='slow') + + def test_raises_for_empty_rules(self): + class _Factory(FactoryBase): + _default_rules = {} + + with pytest.raises(ValueError, match='No default rule matches'): + _Factory.default_tag() + + +# ------------------------------------------------------------------ +# Creation +# ------------------------------------------------------------------ + + +class TestCreate: + def test_creates_instance_of_registered_class(self): + factory = _make_factory() + stub = _make_stub('alpha') + factory.register(stub) + instance = factory.create('alpha') + assert isinstance(instance, stub) + + def test_raises_for_unknown_tag(self): + factory = _make_factory() + factory.register(_make_stub('alpha')) + with pytest.raises(ValueError, match="Unsupported type: 'unknown'"): + factory.create('unknown') + + def test_raises_for_empty_registry(self): + factory = _make_factory() + with pytest.raises(ValueError, match="Unsupported type: 'anything'"): + factory.create('anything') + + +# ------------------------------------------------------------------ +# create_default_for +# ------------------------------------------------------------------ + + +class TestCreateDefaultFor: + def test_creates_default_instance(self): + factory = _make_factory() + stub = _make_stub('alpha') + factory.register(stub) + instance = factory.create_default_for() + assert isinstance(instance, stub) + + +# ------------------------------------------------------------------ +# supported_for (filtering by compatibility and calculator) +# ------------------------------------------------------------------ + + +class TestSupportedFor: + def test_returns_all_when_no_filters(self): + factory = _make_factory() + factory.register(_make_stub('a')) + factory.register(_make_stub('b')) + result = factory.supported_for() + assert len(result) == 2 + + def test_filters_by_compatibility(self): + factory = _make_factory() + compat_a = Compatibility(sample_form=frozenset({'powder'})) + compat_b = Compatibility(sample_form=frozenset({'single_crystal'})) + factory.register( + _make_stub('a', compatibility=compat_a), + ) + factory.register( + _make_stub('b', compatibility=compat_b), + ) + result = factory.supported_for(sample_form='powder') + assert len(result) == 1 + assert result[0].type_info.tag == 'a' + + def test_filters_by_calculator(self): + factory = _make_factory() + calc_a = CalculatorSupport(calculators=frozenset({'cryspy'})) + calc_b = CalculatorSupport(calculators=frozenset({'crysfml'})) + factory.register( + _make_stub('a', calculator_support=calc_a), + ) + factory.register( + _make_stub('b', calculator_support=calc_b), + ) + result = factory.supported_for(calculator='cryspy') + assert len(result) == 1 + assert result[0].type_info.tag == 'a' + + def test_no_compat_means_accepts_all(self): + factory = _make_factory() + factory.register(_make_stub('a')) # no compatibility attr + result = factory.supported_for(sample_form='anything') + assert len(result) == 1 + + def test_empty_compat_frozenset_means_accepts_all(self): + factory = _make_factory() + compat = Compatibility() # all frozensets empty + factory.register(_make_stub('a', compatibility=compat)) + result = factory.supported_for( + sample_form='powder', + scattering_type='bragg', + ) + assert len(result) == 1 diff --git a/tests/unit/easydiffraction/core/test_metadata.py b/tests/unit/easydiffraction/core/test_metadata.py new file mode 100644 index 00000000..f8327a3f --- /dev/null +++ b/tests/unit/easydiffraction/core/test_metadata.py @@ -0,0 +1,101 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for metadata dataclasses: TypeInfo, Compatibility, CalculatorSupport.""" + +from __future__ import annotations + +import pytest + +from easydiffraction.core.metadata import CalculatorSupport +from easydiffraction.core.metadata import Compatibility +from easydiffraction.core.metadata import TypeInfo + + +# ------------------------------------------------------------------ +# TypeInfo +# ------------------------------------------------------------------ + + +class TestTypeInfo: + def test_tag_and_description(self): + info = TypeInfo(tag='pseudo-voigt', description='Pseudo-Voigt peak') + assert info.tag == 'pseudo-voigt' + assert info.description == 'Pseudo-Voigt peak' + + def test_default_description_is_empty(self): + info = TypeInfo(tag='test') + assert info.description == '' + + def test_is_frozen(self): + info = TypeInfo(tag='test') + with pytest.raises(AttributeError): + info.tag = 'other' + + +# ------------------------------------------------------------------ +# Compatibility +# ------------------------------------------------------------------ + + +class TestCompatibility: + def test_empty_compat_accepts_anything(self): + compat = Compatibility() + assert compat.supports( + sample_form='powder', + scattering_type='bragg', + beam_mode='cwl', + radiation_probe='neutron', + ) + + def test_matches_when_value_in_frozenset(self): + compat = Compatibility(sample_form=frozenset({'powder', 'single_crystal'})) + assert compat.supports(sample_form='powder') + assert compat.supports(sample_form='single_crystal') + + def test_rejects_when_value_not_in_frozenset(self): + compat = Compatibility(sample_form=frozenset({'powder'})) + assert not compat.supports(sample_form='single_crystal') + + def test_none_values_are_ignored(self): + compat = Compatibility(sample_form=frozenset({'powder'})) + assert compat.supports(sample_form=None) + assert compat.supports() + + def test_multiple_axes(self): + compat = Compatibility( + sample_form=frozenset({'powder'}), + beam_mode=frozenset({'cwl'}), + ) + assert compat.supports(sample_form='powder', beam_mode='cwl') + assert not compat.supports(sample_form='powder', beam_mode='tof') + + def test_is_frozen(self): + compat = Compatibility() + with pytest.raises(AttributeError): + compat.sample_form = frozenset({'powder'}) + + +# ------------------------------------------------------------------ +# CalculatorSupport +# ------------------------------------------------------------------ + + +class TestCalculatorSupport: + def test_empty_calculators_accepts_any(self): + support = CalculatorSupport() + assert support.supports('cryspy') + assert support.supports('anything') + + def test_matches_when_calculator_in_set(self): + support = CalculatorSupport(calculators=frozenset({'cryspy', 'crysfml'})) + assert support.supports('cryspy') + assert support.supports('crysfml') + + def test_rejects_when_calculator_not_in_set(self): + support = CalculatorSupport(calculators=frozenset({'cryspy'})) + assert not support.supports('pdffit2') + + def test_is_frozen(self): + support = CalculatorSupport() + with pytest.raises(AttributeError): + support.calculators = frozenset({'new'}) From 6cf1bf254ec8df21a3ca6b5fa14855259aa1e32a Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 16:13:40 +0200 Subject: [PATCH 26/51] Add functional tests --- pixi.lock | 4 +- pixi.toml | 1 + pyproject.toml | 2 +- tests/functional/conftest.py | 29 +++ tests/functional/test_experiment_workflow.py | 130 ++++++++++++ tests/functional/test_fitting_workflow.py | 185 ++++++++++++++++++ tests/functional/test_project_lifecycle.py | 118 +++++++++++ tests/functional/test_structure_workflow.py | 139 +++++++++++++ .../functional/test_switchable_categories.py | 65 ++++++ 9 files changed, 670 insertions(+), 3 deletions(-) create mode 100644 tests/functional/conftest.py create mode 100644 tests/functional/test_experiment_workflow.py create mode 100644 tests/functional/test_fitting_workflow.py create mode 100644 tests/functional/test_project_lifecycle.py create mode 100644 tests/functional/test_structure_workflow.py create mode 100644 tests/functional/test_switchable_categories.py diff --git a/pixi.lock b/pixi.lock index bad9c518..86558730 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+dev16 - sha256: 0eb0448bd4a2c86436efcbc125ccbb1dd6e77155dbff3b2e41b69b7780c5733a + version: 0.11.1+devdirty28 + sha256: b5f40819c823325eba37344a6661ba2508c37401aeea74228b4d97f4e6f90730 requires_dist: - asciichartpy - asteval diff --git a/pixi.toml b/pixi.toml index 9d9cb3fd..7ec6ed93 100644 --- a/pixi.toml +++ b/pixi.toml @@ -93,6 +93,7 @@ default = { features = ['default', 'py-max'] } ################## unit-tests = 'python -m pytest tests/unit/ --color=yes -v' +functional-tests = 'python -m pytest tests/functional/ --color=yes -v' integration-tests = 'python -m pytest tests/integration/ --color=yes -n auto -v' script-tests = 'python -m pytest tools/test_scripts.py --color=yes -n auto -v' notebook-tests = 'python -m pytest --nbmake docs/docs/tutorials/ --nbmake-timeout=1200 --color=yes -n auto -v' diff --git a/pyproject.toml b/pyproject.toml index 76cb25c3..caf41bd5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -169,7 +169,7 @@ source = ['src'] # Limit coverage to the source code directory [tool.coverage.report] show_missing = true # Show missing lines skip_covered = false # Skip files with 100% coverage in the report -fail_under = 60 # Minimum coverage percentage to pass +fail_under = 70 # Minimum coverage percentage to pass ########################## # Configuration for pytest diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py new file mode 100644 index 00000000..6da03de1 --- /dev/null +++ b/tests/functional/conftest.py @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Shared fixtures for functional (API-behaviour) tests.""" + +from __future__ import annotations + +import tempfile + +import pytest + +TEMP_DIR = tempfile.gettempdir() + + +@pytest.fixture +def project(tmp_path): + """Create a minimal unsaved Project for functional tests.""" + from easydiffraction import Project # noqa: PLC0415 + + return Project(name='func_test') + + +@pytest.fixture +def saved_project(tmp_path): + """Create a minimal Project saved to a temp directory.""" + from easydiffraction import Project # noqa: PLC0415 + + project = Project(name='func_test') + project.save_as(str(tmp_path / 'func_project')) + return project diff --git a/tests/functional/test_experiment_workflow.py b/tests/functional/test_experiment_workflow.py new file mode 100644 index 00000000..ffc3da25 --- /dev/null +++ b/tests/functional/test_experiment_workflow.py @@ -0,0 +1,130 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Functional tests for experiment workflow: create, configure, verify params.""" + +from __future__ import annotations + +import tempfile + +import pytest + +from easydiffraction import Project +from easydiffraction import download_data + +TEMP_DIR = tempfile.gettempdir() + + +def _make_project_with_experiment(): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + + # Add a structure (required for experiment linking) + project.structures.create(name='lbco') + s = project.structures['lbco'] + s.space_group.name_h_m = 'P m -3 m' + s.cell.length_a = 3.89 + s.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.5, + ) + + # Add experiment from data file + data_path = download_data(id=3, destination=TEMP_DIR) + project.experiments.add_from_data_path( + name='hrpt', + data_path=data_path, + ) + return project + + +class TestExperimentCreation: + def test_add_experiment_from_data_path(self): + project = _make_project_with_experiment() + assert len(project.experiments) == 1 + assert 'hrpt' in project.experiments.names + + def test_access_experiment_by_name(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + assert expt is not None + + +class TestInstrument: + def test_set_wavelength(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.instrument.setup_wavelength = 1.494 + assert expt.instrument.setup_wavelength.value == pytest.approx(1.494) + + def test_set_twotheta_offset(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.instrument.calib_twotheta_offset = 0.5 + assert expt.instrument.calib_twotheta_offset.value == pytest.approx(0.5) + + def test_twotheta_offset_is_fittable(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.instrument.calib_twotheta_offset.free = True + assert expt.instrument.calib_twotheta_offset.free is True + + +class TestPeakProfile: + def test_set_peak_profile_params(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.peak.broad_gauss_u = 0.1 + expt.peak.broad_gauss_v = -0.2 + expt.peak.broad_gauss_w = 0.3 + assert expt.peak.broad_gauss_u.value == pytest.approx(0.1) + assert expt.peak.broad_gauss_v.value == pytest.approx(-0.2) + assert expt.peak.broad_gauss_w.value == pytest.approx(0.3) + + +class TestBackground: + def test_create_background_points(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.background.create(id='1', x=10, y=170) + expt.background.create(id='2', x=165, y=170) + assert len(expt.background) == 2 + + def test_background_y_is_fittable(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.background.create(id='1', x=10, y=170) + expt.background['1'].y.free = True + assert expt.background['1'].y.free is True + + +class TestLinkedPhases: + def test_create_linked_phase(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.linked_phases.create(id='lbco', scale=9.0) + assert len(expt.linked_phases) == 1 + + def test_linked_phase_scale_is_fittable(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.linked_phases.create(id='lbco', scale=9.0) + expt.linked_phases['lbco'].scale.free = True + assert expt.linked_phases['lbco'].scale.free is True + + +class TestExcludedRegions: + def test_create_excluded_regions(self): + project = _make_project_with_experiment() + expt = project.experiments['hrpt'] + expt.excluded_regions.create(id='1', start=0, end=10) + expt.excluded_regions.create(id='2', start=160, end=180) + assert len(expt.excluded_regions) == 2 diff --git a/tests/functional/test_fitting_workflow.py b/tests/functional/test_fitting_workflow.py new file mode 100644 index 00000000..9250bd9d --- /dev/null +++ b/tests/functional/test_fitting_workflow.py @@ -0,0 +1,185 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Functional tests for analysis: aliases, constraints, fitting.""" + +from __future__ import annotations + +import tempfile + +import pytest + +from easydiffraction import Project +from easydiffraction import download_data + +TEMP_DIR = tempfile.gettempdir() + + +def _make_fit_ready_project(): + """Build a minimal project ready for fitting.""" + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + + # Structure + project.structures.create(name='lbco') + s = project.structures['lbco'] + s.space_group.name_h_m = 'P m -3 m' + s.cell.length_a = 3.89 + s.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.5, + ) + s.atom_sites.create( + label='Ba', + type_symbol='Ba', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.5, + ) + s.atom_sites.create( + label='Co', + type_symbol='Co', + fract_x=0.5, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='b', + b_iso=0.5, + ) + s.atom_sites.create( + label='O', + type_symbol='O', + fract_x=0, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='c', + b_iso=0.5, + ) + + # Experiment + data_path = download_data(id=3, destination=TEMP_DIR) + project.experiments.add_from_data_path( + name='hrpt', + data_path=data_path, + ) + expt = project.experiments['hrpt'] + expt.instrument.setup_wavelength = 1.494 + expt.instrument.calib_twotheta_offset = 0.6225 + expt.peak.broad_gauss_u = 0.0834 + expt.peak.broad_gauss_v = -0.1168 + expt.peak.broad_gauss_w = 0.123 + expt.peak.broad_lorentz_x = 0 + expt.peak.broad_lorentz_y = 0.0797 + expt.background.create(id='1', x=10, y=170) + expt.background.create(id='2', x=165, y=170) + expt.linked_phases.create(id='lbco', scale=9.0) + + # Free parameters + s.cell.length_a.free = True + expt.linked_phases['lbco'].scale.free = True + expt.instrument.calib_twotheta_offset.free = True + expt.background['1'].y.free = True + expt.background['2'].y.free = True + + return project + + +class TestAliases: + def test_create_alias(self): + project = _make_fit_ready_project() + s = project.structures['lbco'] + project.analysis.aliases.create( + label='biso_La', + param=s.atom_sites['La'].b_iso, + ) + assert len(project.analysis.aliases) == 1 + + def test_create_multiple_aliases(self): + project = _make_fit_ready_project() + s = project.structures['lbco'] + project.analysis.aliases.create( + label='biso_La', + param=s.atom_sites['La'].b_iso, + ) + project.analysis.aliases.create( + label='biso_Ba', + param=s.atom_sites['Ba'].b_iso, + ) + assert len(project.analysis.aliases) == 2 + + +class TestConstraints: + def test_create_constraint(self): + project = _make_fit_ready_project() + s = project.structures['lbco'] + project.analysis.aliases.create( + label='biso_La', + param=s.atom_sites['La'].b_iso, + ) + project.analysis.aliases.create( + label='biso_Ba', + param=s.atom_sites['Ba'].b_iso, + ) + project.analysis.constraints.create( + expression='biso_Ba = biso_La', + ) + assert len(project.analysis.constraints) == 1 + + +class TestFitting: + def test_fit_produces_results(self): + project = _make_fit_ready_project() + project.analysis.fit(verbosity='silent') + assert project.analysis.fit_results is not None + assert project.analysis.fit_results.success is True + + def test_fit_improves_chi_squared(self): + project = _make_fit_ready_project() + project.analysis.fit(verbosity='silent') + results = project.analysis.fit_results + assert results.reduced_chi_square is not None + # A well-configured fit should get reasonable chi-squared + assert results.reduced_chi_square < 100 + + def test_fit_updates_parameter_values(self): + project = _make_fit_ready_project() + initial_a = project.structures['lbco'].cell.length_a.value + project.analysis.fit(verbosity='silent') + fitted_a = project.structures['lbco'].cell.length_a.value + # Fitting should have adjusted the cell parameter + assert fitted_a != pytest.approx(initial_a, abs=1e-6) + + def test_fit_with_constraints(self): + project = _make_fit_ready_project() + s = project.structures['lbco'] + s.atom_sites['La'].b_iso.free = True + s.atom_sites['Ba'].b_iso.free = True + + project.analysis.aliases.create( + label='biso_La', + param=s.atom_sites['La'].b_iso, + ) + project.analysis.aliases.create( + label='biso_Ba', + param=s.atom_sites['Ba'].b_iso, + ) + project.analysis.constraints.create( + expression='biso_Ba = biso_La', + ) + + project.analysis.fit(verbosity='silent') + assert project.analysis.fit_results.success is True + # Constrained params should be equal after fitting + la_biso = s.atom_sites['La'].b_iso.value + ba_biso = s.atom_sites['Ba'].b_iso.value + assert la_biso == pytest.approx(ba_biso, rel=1e-3) diff --git a/tests/functional/test_project_lifecycle.py b/tests/functional/test_project_lifecycle.py new file mode 100644 index 00000000..7d9c5a19 --- /dev/null +++ b/tests/functional/test_project_lifecycle.py @@ -0,0 +1,118 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Functional tests for Project lifecycle: create, save, load.""" + +from __future__ import annotations + +import pytest + +from easydiffraction import Project + + +class TestProjectCreate: + def test_create_default_project(self): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + assert project.name == 'untitled_project' + + def test_create_named_project(self): + Project._loading = True + try: + project = Project(name='my_project') + finally: + Project._loading = False + assert project.name == 'my_project' + + def test_project_has_empty_structures(self): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + assert len(project.structures) == 0 + + def test_project_has_empty_experiments(self): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + assert len(project.experiments) == 0 + + def test_project_has_analysis(self): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + assert project.analysis is not None + + +class TestProjectSaveLoad: + def test_save_creates_directory_structure(self, tmp_path): + Project._loading = True + try: + project = Project(name='test') + finally: + Project._loading = False + project.save_as(str(tmp_path / 'proj')) + + assert (tmp_path / 'proj' / 'project.cif').is_file() + assert (tmp_path / 'proj' / 'structures').is_dir() + assert (tmp_path / 'proj' / 'experiments').is_dir() + assert (tmp_path / 'proj' / 'analysis').is_dir() + + def test_save_and_load_preserves_name(self, tmp_path): + Project._loading = True + try: + project = Project(name='round_trip') + finally: + Project._loading = False + project.save_as(str(tmp_path / 'proj')) + + loaded = Project.load(str(tmp_path / 'proj')) + assert loaded.name == 'round_trip' + + def test_load_nonexistent_raises(self, tmp_path): + with pytest.raises(FileNotFoundError): + Project.load(str(tmp_path / 'nonexistent')) + + +class TestProjectVerbosity: + def test_default_verbosity_is_full(self): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + assert project.verbosity == 'full' + + def test_set_verbosity_short(self): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + project.verbosity = 'short' + assert project.verbosity == 'short' + + def test_set_verbosity_silent(self): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + project.verbosity = 'silent' + assert project.verbosity == 'silent' + + def test_invalid_verbosity_raises(self): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + with pytest.raises(ValueError, match='invalid'): + project.verbosity = 'invalid' diff --git a/tests/functional/test_structure_workflow.py b/tests/functional/test_structure_workflow.py new file mode 100644 index 00000000..4ed2da3f --- /dev/null +++ b/tests/functional/test_structure_workflow.py @@ -0,0 +1,139 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Functional tests for structure workflow: create, set properties, verify params.""" + +from __future__ import annotations + +import pytest + +from easydiffraction import Project + + +def _make_project(): + Project._loading = True + try: + return Project() + finally: + Project._loading = False + + +class TestStructureCreation: + def test_create_structure(self): + project = _make_project() + project.structures.create(name='test') + assert len(project.structures) == 1 + assert 'test' in project.structures.names + + def test_access_structure_by_name(self): + project = _make_project() + project.structures.create(name='lbco') + structure = project.structures['lbco'] + assert structure is not None + + def test_access_nonexistent_structure_raises(self): + project = _make_project() + with pytest.raises(KeyError): + _ = project.structures['nonexistent'] + + +class TestSpaceGroup: + def test_set_space_group(self): + project = _make_project() + project.structures.create(name='test') + s = project.structures['test'] + s.space_group.name_h_m = 'P m -3 m' + assert s.space_group.name_h_m.value == 'P m -3 m' + + +class TestCell: + def test_set_cell_parameters(self): + project = _make_project() + project.structures.create(name='test') + s = project.structures['test'] + s.cell.length_a = 5.0 + s.cell.length_b = 6.0 + s.cell.length_c = 7.0 + assert s.cell.length_a.value == pytest.approx(5.0) + assert s.cell.length_b.value == pytest.approx(6.0) + assert s.cell.length_c.value == pytest.approx(7.0) + + def test_cell_parameters_are_fittable(self): + project = _make_project() + project.structures.create(name='test') + s = project.structures['test'] + s.cell.length_a.free = True + assert s.cell.length_a.free is True + + +class TestAtomSites: + def test_create_atom_site(self): + project = _make_project() + project.structures.create(name='test') + s = project.structures['test'] + s.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + b_iso=0.5, + ) + assert len(s.atom_sites) == 1 + + def test_access_atom_site_by_label(self): + project = _make_project() + project.structures.create(name='test') + s = project.structures['test'] + s.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + b_iso=0.5, + ) + atom = s.atom_sites['La'] + assert atom.fract_x.value == pytest.approx(0) + assert atom.b_iso.value == pytest.approx(0.5) + + def test_atom_site_fract_is_fittable(self): + project = _make_project() + project.structures.create(name='test') + s = project.structures['test'] + s.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0.1, + fract_y=0.2, + fract_z=0.3, + wyckoff_letter='a', + b_iso=0.5, + ) + s.atom_sites['La'].fract_x.free = True + assert s.atom_sites['La'].fract_x.free is True + + def test_multiple_atom_sites(self): + project = _make_project() + project.structures.create(name='test') + s = project.structures['test'] + s.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + b_iso=0.5, + ) + s.atom_sites.create( + label='O', + type_symbol='O', + fract_x=0.5, + fract_y=0.5, + fract_z=0, + wyckoff_letter='c', + b_iso=0.3, + ) + assert len(s.atom_sites) == 2 diff --git a/tests/functional/test_switchable_categories.py b/tests/functional/test_switchable_categories.py new file mode 100644 index 00000000..9b130152 --- /dev/null +++ b/tests/functional/test_switchable_categories.py @@ -0,0 +1,65 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Functional tests for switchable categories: type getters/setters.""" + +from __future__ import annotations + +import tempfile + +from easydiffraction import Project +from easydiffraction import download_data + +TEMP_DIR = tempfile.gettempdir() + + +def _make_project_with_experiment(): + Project._loading = True + try: + project = Project() + finally: + Project._loading = False + + project.structures.create(name='s') + data_path = download_data(id=3, destination=TEMP_DIR) + project.experiments.add_from_data_path(name='e', data_path=data_path) + return project + + +# ------------------------------------------------------------------ +# Analysis switchable categories +# ------------------------------------------------------------------ + + +class TestAnalysisSwitchableCategories: + def test_aliases_type_default(self): + project = _make_project_with_experiment() + assert project.analysis.aliases_type is not None + + def test_constraints_type_default(self): + project = _make_project_with_experiment() + assert project.analysis.constraints_type is not None + + def test_fit_mode_type_default(self): + project = _make_project_with_experiment() + assert project.analysis.fit_mode_type is not None + + def test_minimizer_default(self): + project = _make_project_with_experiment() + assert project.analysis.current_minimizer is not None + + +# ------------------------------------------------------------------ +# Experiment switchable categories +# ------------------------------------------------------------------ + + +class TestExperimentSwitchableCategories: + def test_background_type_has_getter(self): + project = _make_project_with_experiment() + expt = project.experiments['e'] + assert expt.background_type is not None + + def test_calculator_type_has_getter(self): + project = _make_project_with_experiment() + expt = project.experiments['e'] + assert expt.calculator_type is not None From 28ccbe7b04f21114382941403c6f89347053fe39 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 16:50:02 +0200 Subject: [PATCH 27/51] Add unit tests --- .../test_crystallography_coverage.py | 98 ++++++++ .../experiment/item/test_base_coverage.py | 225 ++++++++++++++++++ .../experiment/item/test_bragg_sc_coverage.py | 187 +++++++++++++++ .../experiment/item/test_enums_coverage.py | 178 ++++++++++++++ .../experiment/item/test_factory_coverage.py | 90 +++++++ .../structure/item/test_base_coverage.py | 178 ++++++++++++++ .../structure/test_collection_coverage.py | 54 +++++ 7 files changed, 1010 insertions(+) create mode 100644 tests/unit/easydiffraction/crystallography/test_crystallography_coverage.py create mode 100644 tests/unit/easydiffraction/datablocks/experiment/item/test_base_coverage.py create mode 100644 tests/unit/easydiffraction/datablocks/experiment/item/test_bragg_sc_coverage.py create mode 100644 tests/unit/easydiffraction/datablocks/experiment/item/test_enums_coverage.py create mode 100644 tests/unit/easydiffraction/datablocks/experiment/item/test_factory_coverage.py create mode 100644 tests/unit/easydiffraction/datablocks/structure/item/test_base_coverage.py create mode 100644 tests/unit/easydiffraction/datablocks/structure/test_collection_coverage.py diff --git a/tests/unit/easydiffraction/crystallography/test_crystallography_coverage.py b/tests/unit/easydiffraction/crystallography/test_crystallography_coverage.py new file mode 100644 index 00000000..7cac34a8 --- /dev/null +++ b/tests/unit/easydiffraction/crystallography/test_crystallography_coverage.py @@ -0,0 +1,98 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for crystallographic symmetry constraint functions.""" + +from easydiffraction.crystallography.crystallography import apply_cell_symmetry_constraints + + +# ------------------------------------------------------------------ +# apply_cell_symmetry_constraints +# ------------------------------------------------------------------ + + +def _make_cell(a=5.0, b=6.0, c=7.0, alpha=80.0, beta=85.0, gamma=75.0): + return { + 'lattice_a': a, + 'lattice_b': b, + 'lattice_c': c, + 'angle_alpha': alpha, + 'angle_beta': beta, + 'angle_gamma': gamma, + } + + +class TestApplyCellSymmetryConstraints: + def test_cubic(self): + cell = _make_cell(a=4.0, b=5.0, c=6.0) + result = apply_cell_symmetry_constraints(cell, 'F m -3 m') # IT 225 + assert result['lattice_a'] == 4.0 + assert result['lattice_b'] == 4.0 + assert result['lattice_c'] == 4.0 + assert result['angle_alpha'] == 90.0 + assert result['angle_beta'] == 90.0 + assert result['angle_gamma'] == 90.0 + + def test_tetragonal(self): + cell = _make_cell(a=4.0, b=5.0, c=6.0) + result = apply_cell_symmetry_constraints(cell, 'P 4/m m m') # IT 123 + assert result['lattice_a'] == 4.0 + assert result['lattice_b'] == 4.0 + assert result['lattice_c'] == 6.0 # c remains unchanged + assert result['angle_alpha'] == 90.0 + assert result['angle_beta'] == 90.0 + assert result['angle_gamma'] == 90.0 + + def test_orthorhombic(self): + cell = _make_cell(a=4.0, b=5.0, c=6.0) + result = apply_cell_symmetry_constraints(cell, 'P m m m') # IT 47 + assert result['lattice_a'] == 4.0 + assert result['lattice_b'] == 5.0 + assert result['lattice_c'] == 6.0 + assert result['angle_alpha'] == 90.0 + assert result['angle_beta'] == 90.0 + assert result['angle_gamma'] == 90.0 + + def test_hexagonal(self): + cell = _make_cell(a=4.0, b=5.0, c=6.0) + result = apply_cell_symmetry_constraints(cell, 'P 63/m m c') # IT 194 + assert result['lattice_a'] == 4.0 + assert result['lattice_b'] == 4.0 + assert result['lattice_c'] == 6.0 + assert result['angle_alpha'] == 90.0 + assert result['angle_beta'] == 90.0 + assert result['angle_gamma'] == 120.0 + + def test_trigonal(self): + cell = _make_cell(a=4.0, b=5.0, c=6.0) + result = apply_cell_symmetry_constraints(cell, 'R -3 m') # IT 166 + assert result['lattice_a'] == 4.0 + assert result['lattice_b'] == 4.0 + assert result['angle_alpha'] == 90.0 + assert result['angle_beta'] == 90.0 + assert result['angle_gamma'] == 120.0 + + def test_monoclinic(self): + cell = _make_cell(a=4.0, b=5.0, c=6.0, beta=100.0) + result = apply_cell_symmetry_constraints(cell, 'P 21/c') # IT 14 + assert result['lattice_a'] == 4.0 + assert result['lattice_b'] == 5.0 + assert result['lattice_c'] == 6.0 + assert result['angle_alpha'] == 90.0 + assert result['angle_beta'] == 100.0 # beta unconstrained + assert result['angle_gamma'] == 90.0 + + def test_triclinic(self): + cell = _make_cell(a=4.0, b=5.0, c=6.0, alpha=80.0, beta=85.0, gamma=75.0) + result = apply_cell_symmetry_constraints(cell, 'P 1') # IT 1 + assert result['lattice_a'] == 4.0 + assert result['lattice_b'] == 5.0 + assert result['lattice_c'] == 6.0 + assert result['angle_alpha'] == 80.0 + assert result['angle_beta'] == 85.0 + assert result['angle_gamma'] == 75.0 + + def test_invalid_name_hm_returns_cell_unchanged(self): + cell = _make_cell() + original = dict(cell) + result = apply_cell_symmetry_constraints(cell, 'NOT A REAL SG') + assert result == original diff --git a/tests/unit/easydiffraction/datablocks/experiment/item/test_base_coverage.py b/tests/unit/easydiffraction/datablocks/experiment/item/test_base_coverage.py new file mode 100644 index 00000000..ed9d2995 --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/experiment/item/test_base_coverage.py @@ -0,0 +1,225 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for ExperimentBase and PdExperimentBase switchable categories.""" + +from easydiffraction.datablocks.experiment.categories.experiment_type import ExperimentType +from easydiffraction.datablocks.experiment.item.base import ExperimentBase +from easydiffraction.datablocks.experiment.item.base import PdExperimentBase +from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum +from easydiffraction.datablocks.experiment.item.enums import RadiationProbeEnum +from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum +from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum + + +# ------------------------------------------------------------------ +# Helpers +# ------------------------------------------------------------------ + + +def _mk_type_powder_cwl_bragg(): + et = ExperimentType() + et._set_sample_form(SampleFormEnum.POWDER.value) + et._set_beam_mode(BeamModeEnum.CONSTANT_WAVELENGTH.value) + et._set_radiation_probe(RadiationProbeEnum.NEUTRON.value) + et._set_scattering_type(ScatteringTypeEnum.BRAGG.value) + return et + + +class ConcretePd(PdExperimentBase): + def _load_ascii_data_to_experiment(self, data_path: str) -> int: + return 0 + + +class ConcreteBase(ExperimentBase): + def _load_ascii_data_to_experiment(self, data_path: str) -> int: + return 0 + + +# ------------------------------------------------------------------ +# ExperimentBase +# ------------------------------------------------------------------ + + +class TestExperimentBaseName: + def test_name_getter(self): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + assert ex.name == 'ex1' + + def test_name_setter(self): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + ex.name = 'ex2' + assert ex.name == 'ex2' + + def test_type_property(self): + et = _mk_type_powder_cwl_bragg() + ex = ConcreteBase(name='ex1', type=et) + assert ex.type is et + + +class TestExperimentBaseDiffrn: + def test_diffrn_defaults(self): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + assert ex.diffrn is not None + assert isinstance(ex.diffrn_type, str) + + def test_diffrn_type_invalid(self): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + old_type = ex.diffrn_type + ex.diffrn_type = 'nonexistent' + assert ex.diffrn_type == old_type + + def test_show_supported_diffrn_types(self, capsys): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + ex.show_supported_diffrn_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_diffrn_type(self, capsys): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + ex.show_current_diffrn_type() + out = capsys.readouterr().out + assert ex.diffrn_type in out + + +class TestExperimentBaseCalculator: + def test_calculator_auto_resolves(self): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + # calculator should auto-resolve on first access + assert ex.calculator is not None + + def test_calculator_type_auto_resolves(self): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + ct = ex.calculator_type + assert isinstance(ct, str) + assert len(ct) > 0 + + def test_calculator_type_invalid(self): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + _ = ex.calculator_type # trigger resolve + old = ex.calculator_type + ex.calculator_type = 'bogus-engine' + assert ex.calculator_type == old + + def test_show_supported_calculator_types(self, capsys): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + ex.show_supported_calculator_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_calculator_type(self, capsys): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + ex.show_current_calculator_type() + out = capsys.readouterr().out + assert ex.calculator_type in out + + +class TestExperimentBaseAsCif: + def test_as_cif_returns_str(self): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + cif = ex.as_cif + assert isinstance(cif, str) + + def test_show_as_cif(self, capsys): + ex = ConcreteBase(name='ex1', type=_mk_type_powder_cwl_bragg()) + ex.show_as_cif() + out = capsys.readouterr().out + assert 'ex1' in out + + +# ------------------------------------------------------------------ +# PdExperimentBase +# ------------------------------------------------------------------ + + +class TestPdExperimentLinkedPhases: + def test_linked_phases_defaults(self): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + assert ex.linked_phases is not None + assert isinstance(ex.linked_phases_type, str) + + def test_linked_phases_type_invalid(self): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + old_type = ex.linked_phases_type + ex.linked_phases_type = 'nonexistent' + assert ex.linked_phases_type == old_type + + def test_show_supported_linked_phases_types(self, capsys): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + ex.show_supported_linked_phases_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_linked_phases_type(self, capsys): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + ex.show_current_linked_phases_type() + out = capsys.readouterr().out + assert ex.linked_phases_type in out + + +class TestPdExperimentExcludedRegions: + def test_excluded_regions_defaults(self): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + assert ex.excluded_regions is not None + assert isinstance(ex.excluded_regions_type, str) + + def test_excluded_regions_type_invalid(self): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + old_type = ex.excluded_regions_type + ex.excluded_regions_type = 'nonexistent' + assert ex.excluded_regions_type == old_type + + def test_show_supported_excluded_regions_types(self, capsys): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + ex.show_supported_excluded_regions_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_excluded_regions_type(self, capsys): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + ex.show_current_excluded_regions_type() + out = capsys.readouterr().out + assert ex.excluded_regions_type in out + + +class TestPdExperimentData: + def test_data_defaults(self): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + assert ex.data is not None + assert isinstance(ex.data_type, str) + + def test_data_type_invalid(self): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + old_type = ex.data_type + ex.data_type = 'nonexistent' + assert ex.data_type == old_type + + def test_show_supported_data_types(self, capsys): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + ex.show_supported_data_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_data_type(self, capsys): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + ex.show_current_data_type() + out = capsys.readouterr().out + assert ex.data_type in out + + +class TestPdExperimentPeak: + def test_peak_defaults(self): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + assert ex.peak is not None + assert ex.peak_profile_type is not None + + def test_show_supported_peak_profile_types(self, capsys): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + ex.show_supported_peak_profile_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_peak_profile_type(self, capsys): + ex = ConcretePd(name='pd1', type=_mk_type_powder_cwl_bragg()) + ex.show_current_peak_profile_type() + out = capsys.readouterr().out + assert str(ex.peak_profile_type) in out diff --git a/tests/unit/easydiffraction/datablocks/experiment/item/test_bragg_sc_coverage.py b/tests/unit/easydiffraction/datablocks/experiment/item/test_bragg_sc_coverage.py new file mode 100644 index 00000000..69c86b0c --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/experiment/item/test_bragg_sc_coverage.py @@ -0,0 +1,187 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Additional tests for single-crystal experiment classes.""" + +import numpy as np +import pytest + +from easydiffraction.datablocks.experiment.categories.experiment_type import ExperimentType +from easydiffraction.datablocks.experiment.item.bragg_sc import CwlScExperiment +from easydiffraction.datablocks.experiment.item.bragg_sc import TofScExperiment +from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum +from easydiffraction.datablocks.experiment.item.enums import RadiationProbeEnum +from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum +from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum +from easydiffraction.utils.logging import Logger + + +def _mk_type_sc_cwl(): + et = ExperimentType() + et._set_sample_form(SampleFormEnum.SINGLE_CRYSTAL.value) + et._set_beam_mode(BeamModeEnum.CONSTANT_WAVELENGTH.value) + et._set_radiation_probe(RadiationProbeEnum.NEUTRON.value) + et._set_scattering_type(ScatteringTypeEnum.BRAGG.value) + return et + + +def _mk_type_sc_tof(): + et = ExperimentType() + et._set_sample_form(SampleFormEnum.SINGLE_CRYSTAL.value) + et._set_beam_mode(BeamModeEnum.TIME_OF_FLIGHT.value) + et._set_radiation_probe(RadiationProbeEnum.NEUTRON.value) + et._set_scattering_type(ScatteringTypeEnum.BRAGG.value) + return et + + +class TestCwlScExperiment: + def test_init(self): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + assert ex.name == 'cwl_sc' + assert ex.type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL.value + + def test_type_info(self): + assert CwlScExperiment.type_info.tag == 'bragg-sc-cwl' + + def test_load_ascii_5col(self, tmp_path): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + data = np.column_stack([ + np.array([1, 0, 0]), + np.array([0, 1, 0]), + np.array([0, 0, 1]), + np.array([100.0, 200.0, 300.0]), + np.array([10.0, 20.0, 30.0]), + ]) + p = tmp_path / 'sc_data.dat' + np.savetxt(p, data) + n = ex._load_ascii_data_to_experiment(str(p)) + assert n == 3 + + def test_load_ascii_too_few_columns(self, tmp_path, monkeypatch): + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.RAISE, raising=True) + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + data = np.column_stack([np.array([1, 2, 3]), np.array([4, 5, 6])]) + p = tmp_path / 'bad.dat' + np.savetxt(p, data) + with pytest.raises(ValueError, match='at least 5 columns'): + ex._load_ascii_data_to_experiment(str(p)) + + def test_switchable_categories(self): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + # extinction + assert ex.extinction is not None + assert isinstance(ex.extinction_type, str) + # linked crystal + assert ex.linked_crystal is not None + assert isinstance(ex.linked_crystal_type, str) + # instrument + assert ex.instrument is not None + assert isinstance(ex.instrument_type, str) + # data + assert ex.data is not None + assert isinstance(ex.data_type, str) + + def test_extinction_type_invalid(self): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + old = ex.extinction_type + ex.extinction_type = 'bogus' + assert ex.extinction_type == old + + def test_linked_crystal_type_invalid(self): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + old = ex.linked_crystal_type + ex.linked_crystal_type = 'bogus' + assert ex.linked_crystal_type == old + + def test_show_supported_extinction_types(self, capsys): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + ex.show_supported_extinction_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_extinction_type(self, capsys): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + ex.show_current_extinction_type() + out = capsys.readouterr().out + assert ex.extinction_type in out + + def test_show_supported_linked_crystal_types(self, capsys): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + ex.show_supported_linked_crystal_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_linked_crystal_type(self, capsys): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + ex.show_current_linked_crystal_type() + out = capsys.readouterr().out + assert ex.linked_crystal_type in out + + def test_show_supported_instrument_types(self, capsys): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + ex.show_supported_instrument_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_instrument_type(self, capsys): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + ex.show_current_instrument_type() + out = capsys.readouterr().out + assert ex.instrument_type in out + + def test_show_supported_data_types(self, capsys): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + ex.show_supported_data_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_data_type(self, capsys): + ex = CwlScExperiment(name='cwl_sc', type=_mk_type_sc_cwl()) + ex.show_current_data_type() + out = capsys.readouterr().out + assert ex.data_type in out + + +class TestTofScExperiment: + def test_init(self): + ex = TofScExperiment(name='tof_sc', type=_mk_type_sc_tof()) + assert ex.name == 'tof_sc' + assert ex.type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT.value + + def test_type_info(self): + assert TofScExperiment.type_info.tag == 'bragg-sc-tof' + + def test_load_ascii_6col(self, tmp_path): + ex = TofScExperiment(name='tof_sc', type=_mk_type_sc_tof()) + data = np.column_stack([ + np.array([1, 0, 0]), + np.array([0, 1, 0]), + np.array([0, 0, 1]), + np.array([100.0, 200.0, 300.0]), + np.array([10.0, 20.0, 30.0]), + np.array([1.54, 1.54, 1.54]), + ]) + p = tmp_path / 'tof_sc_data.dat' + np.savetxt(p, data) + n = ex._load_ascii_data_to_experiment(str(p)) + assert n == 3 + + def test_load_ascii_too_few_columns(self, tmp_path, monkeypatch): + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.RAISE, raising=True) + ex = TofScExperiment(name='tof_sc', type=_mk_type_sc_tof()) + data = np.column_stack([ + np.array([1, 2]), + np.array([0, 1]), + np.array([0, 0]), + np.array([100.0, 200.0]), + np.array([10.0, 20.0]), + ]) + p = tmp_path / 'bad.dat' + np.savetxt(p, data) + with pytest.raises(ValueError, match='at least 6 columns'): + ex._load_ascii_data_to_experiment(str(p)) + + def test_load_ascii_nonexistent_file(self, monkeypatch): + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.RAISE, raising=True) + ex = TofScExperiment(name='tof_sc', type=_mk_type_sc_tof()) + with pytest.raises(OSError, match='No such file'): + ex._load_ascii_data_to_experiment('/no/such/file.dat') diff --git a/tests/unit/easydiffraction/datablocks/experiment/item/test_enums_coverage.py b/tests/unit/easydiffraction/datablocks/experiment/item/test_enums_coverage.py new file mode 100644 index 00000000..febdeb31 --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/experiment/item/test_enums_coverage.py @@ -0,0 +1,178 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for experiment enum description methods and defaults.""" + +from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum +from easydiffraction.datablocks.experiment.item.enums import PeakProfileTypeEnum +from easydiffraction.datablocks.experiment.item.enums import RadiationProbeEnum +from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum +from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum + + +# ------------------------------------------------------------------ +# SampleFormEnum +# ------------------------------------------------------------------ + + +class TestSampleFormEnum: + def test_values(self): + assert SampleFormEnum.POWDER == 'powder' + assert SampleFormEnum.SINGLE_CRYSTAL == 'single crystal' + + def test_default(self): + assert SampleFormEnum.default() is SampleFormEnum.POWDER + + def test_description_powder(self): + desc = SampleFormEnum.POWDER.description() + assert isinstance(desc, str) + assert 'Powder' in desc or 'powder' in desc.lower() + + def test_description_single_crystal(self): + desc = SampleFormEnum.SINGLE_CRYSTAL.description() + assert isinstance(desc, str) + assert 'crystal' in desc.lower() + + +# ------------------------------------------------------------------ +# ScatteringTypeEnum +# ------------------------------------------------------------------ + + +class TestScatteringTypeEnum: + def test_values(self): + assert ScatteringTypeEnum.BRAGG == 'bragg' + assert ScatteringTypeEnum.TOTAL == 'total' + + def test_default(self): + assert ScatteringTypeEnum.default() is ScatteringTypeEnum.BRAGG + + def test_description_bragg(self): + desc = ScatteringTypeEnum.BRAGG.description() + assert isinstance(desc, str) + assert 'Bragg' in desc + + def test_description_total(self): + desc = ScatteringTypeEnum.TOTAL.description() + assert isinstance(desc, str) + assert 'Total' in desc or 'PDF' in desc + + +# ------------------------------------------------------------------ +# RadiationProbeEnum +# ------------------------------------------------------------------ + + +class TestRadiationProbeEnum: + def test_values(self): + assert RadiationProbeEnum.NEUTRON == 'neutron' + assert RadiationProbeEnum.XRAY == 'xray' + + def test_default(self): + assert RadiationProbeEnum.default() is RadiationProbeEnum.NEUTRON + + def test_description_neutron(self): + desc = RadiationProbeEnum.NEUTRON.description() + assert isinstance(desc, str) + assert 'Neutron' in desc or 'neutron' in desc.lower() + + def test_description_xray(self): + desc = RadiationProbeEnum.XRAY.description() + assert isinstance(desc, str) + assert 'ray' in desc.lower() + + +# ------------------------------------------------------------------ +# BeamModeEnum +# ------------------------------------------------------------------ + + +class TestBeamModeEnum: + def test_values(self): + assert BeamModeEnum.CONSTANT_WAVELENGTH == 'constant wavelength' + assert BeamModeEnum.TIME_OF_FLIGHT == 'time-of-flight' + + def test_default(self): + assert BeamModeEnum.default() is BeamModeEnum.CONSTANT_WAVELENGTH + + def test_description_cwl(self): + desc = BeamModeEnum.CONSTANT_WAVELENGTH.description() + assert isinstance(desc, str) + assert 'CW' in desc or 'wavelength' in desc.lower() + + def test_description_tof(self): + desc = BeamModeEnum.TIME_OF_FLIGHT.description() + assert isinstance(desc, str) + assert 'TOF' in desc or 'time' in desc.lower() + + +# ------------------------------------------------------------------ +# PeakProfileTypeEnum +# ------------------------------------------------------------------ + + +class TestPeakProfileTypeEnum: + def test_default_bragg_cwl(self): + result = PeakProfileTypeEnum.default( + scattering_type=ScatteringTypeEnum.BRAGG, + beam_mode=BeamModeEnum.CONSTANT_WAVELENGTH, + ) + assert result is PeakProfileTypeEnum.PSEUDO_VOIGT + + def test_default_bragg_tof(self): + result = PeakProfileTypeEnum.default( + scattering_type=ScatteringTypeEnum.BRAGG, + beam_mode=BeamModeEnum.TIME_OF_FLIGHT, + ) + assert result is PeakProfileTypeEnum.PSEUDO_VOIGT_IKEDA_CARPENTER + + def test_default_total_cwl(self): + result = PeakProfileTypeEnum.default( + scattering_type=ScatteringTypeEnum.TOTAL, + beam_mode=BeamModeEnum.CONSTANT_WAVELENGTH, + ) + assert result is PeakProfileTypeEnum.GAUSSIAN_DAMPED_SINC + + def test_default_total_tof(self): + result = PeakProfileTypeEnum.default( + scattering_type=ScatteringTypeEnum.TOTAL, + beam_mode=BeamModeEnum.TIME_OF_FLIGHT, + ) + assert result is PeakProfileTypeEnum.GAUSSIAN_DAMPED_SINC + + def test_default_none_uses_defaults(self): + result = PeakProfileTypeEnum.default() + expected = PeakProfileTypeEnum.default( + scattering_type=ScatteringTypeEnum.default(), + beam_mode=BeamModeEnum.default(), + ) + assert result is expected + + def test_description_pseudo_voigt(self): + desc = PeakProfileTypeEnum.PSEUDO_VOIGT.description() + assert isinstance(desc, str) + assert 'Pseudo-Voigt' in desc + + def test_description_split_pseudo_voigt(self): + desc = PeakProfileTypeEnum.SPLIT_PSEUDO_VOIGT.description() + assert isinstance(desc, str) + assert 'Split' in desc + + def test_description_thompson_cox_hastings(self): + desc = PeakProfileTypeEnum.THOMPSON_COX_HASTINGS.description() + assert isinstance(desc, str) + assert 'Thompson' in desc + + def test_description_pseudo_voigt_ikeda_carpenter(self): + desc = PeakProfileTypeEnum.PSEUDO_VOIGT_IKEDA_CARPENTER.description() + assert isinstance(desc, str) + assert 'Ikeda' in desc + + def test_description_pseudo_voigt_back_to_back(self): + desc = PeakProfileTypeEnum.PSEUDO_VOIGT_BACK_TO_BACK.description() + assert isinstance(desc, str) + assert 'Back-to-Back' in desc + + def test_description_gaussian_damped_sinc(self): + desc = PeakProfileTypeEnum.GAUSSIAN_DAMPED_SINC.description() + assert isinstance(desc, str) + assert 'sinc' in desc.lower() or 'PDF' in desc diff --git a/tests/unit/easydiffraction/datablocks/experiment/item/test_factory_coverage.py b/tests/unit/easydiffraction/datablocks/experiment/item/test_factory_coverage.py new file mode 100644 index 00000000..c258d8ff --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/experiment/item/test_factory_coverage.py @@ -0,0 +1,90 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Additional tests for ExperimentFactory creation paths.""" + +import pytest + +from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum +from easydiffraction.datablocks.experiment.item.enums import RadiationProbeEnum +from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum +from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum +from easydiffraction.datablocks.experiment.item.factory import ExperimentFactory + + +class TestExperimentFactoryFromScratch: + def test_powder_bragg_cwl(self): + ex = ExperimentFactory.from_scratch( + name='test_pd', + sample_form='powder', + beam_mode='constant wavelength', + radiation_probe='neutron', + scattering_type='bragg', + ) + assert ex.name == 'test_pd' + assert ex.type.sample_form.value == SampleFormEnum.POWDER.value + assert ex.type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH.value + assert ex.type.scattering_type.value == ScatteringTypeEnum.BRAGG.value + + def test_powder_bragg_tof(self): + ex = ExperimentFactory.from_scratch( + name='test_tof', + sample_form='powder', + beam_mode='time-of-flight', + radiation_probe='neutron', + scattering_type='bragg', + ) + assert ex.name == 'test_tof' + assert ex.type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT.value + + def test_single_crystal_cwl(self): + ex = ExperimentFactory.from_scratch( + name='test_sc', + sample_form='single crystal', + beam_mode='constant wavelength', + radiation_probe='neutron', + scattering_type='bragg', + ) + assert ex.name == 'test_sc' + assert ex.type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL.value + + def test_single_crystal_tof(self): + ex = ExperimentFactory.from_scratch( + name='test_sc_tof', + sample_form='single crystal', + beam_mode='time-of-flight', + radiation_probe='neutron', + scattering_type='bragg', + ) + assert ex.name == 'test_sc_tof' + assert ex.type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT.value + + def test_total_scattering(self): + ex = ExperimentFactory.from_scratch( + name='test_total', + sample_form='powder', + scattering_type='total', + ) + assert ex.type.scattering_type.value == ScatteringTypeEnum.TOTAL.value + + def test_defaults_used_when_none(self): + ex = ExperimentFactory.from_scratch(name='defaults') + assert ex.type.sample_form.value == SampleFormEnum.default().value + assert ex.type.beam_mode.value == BeamModeEnum.default().value + assert ex.type.scattering_type.value == ScatteringTypeEnum.default().value + assert ex.type.radiation_probe.value == RadiationProbeEnum.default().value + + +class TestExperimentFactoryInstantiationBlocked: + def test_direct_instantiation_raises(self): + with pytest.raises(AttributeError, match='class methods'): + ExperimentFactory() + + +class TestExperimentFactoryCreateExperimentType: + def test_partial_overrides(self): + et = ExperimentFactory._create_experiment_type( + sample_form='single crystal', + ) + assert et.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL.value + # others get defaults + assert et.beam_mode.value == BeamModeEnum.default().value diff --git a/tests/unit/easydiffraction/datablocks/structure/item/test_base_coverage.py b/tests/unit/easydiffraction/datablocks/structure/item/test_base_coverage.py new file mode 100644 index 00000000..e0f763ce --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/structure/item/test_base_coverage.py @@ -0,0 +1,178 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for Structure switchable-category wiring.""" + +import pytest + +from easydiffraction.datablocks.structure.categories.atom_sites import AtomSites +from easydiffraction.datablocks.structure.categories.atom_sites.factory import AtomSitesFactory +from easydiffraction.datablocks.structure.categories.cell import Cell +from easydiffraction.datablocks.structure.categories.cell.factory import CellFactory +from easydiffraction.datablocks.structure.categories.space_group import SpaceGroup +from easydiffraction.datablocks.structure.categories.space_group.factory import SpaceGroupFactory +from easydiffraction.datablocks.structure.item.base import Structure + + +# ------------------------------------------------------------------ +# Fixture +# ------------------------------------------------------------------ + + +@pytest.fixture +def structure(): + return Structure(name='test_struct') + + +# ------------------------------------------------------------------ +# Name property +# ------------------------------------------------------------------ + + +class TestStructureName: + def test_initial_name(self, structure): + assert structure.name == 'test_struct' + + def test_setter(self, structure): + structure.name = 'renamed' + assert structure.name == 'renamed' + + def test_setter_type_check(self, structure): + with pytest.raises(TypeError): + structure.name = 123 + + +# ------------------------------------------------------------------ +# Cell (switchable-category) +# ------------------------------------------------------------------ + + +class TestStructureCell: + def test_default_cell_type(self, structure): + assert structure.cell_type == CellFactory.default_tag() + + def test_cell_returns_cell_instance(self, structure): + assert isinstance(structure.cell, Cell) + + def test_cell_type_setter_valid(self, structure, capsys): + supported = CellFactory.supported_tags() + assert len(supported) > 0 + tag = supported[0] + structure.cell_type = tag + assert structure.cell_type == tag + + def test_cell_type_setter_invalid_keeps_old(self, structure): + old_type = structure.cell_type + structure.cell_type = 'nonexistent-type' + assert structure.cell_type == old_type + + def test_cell_setter_replaces_instance(self, structure): + new_cell = CellFactory.create(CellFactory.default_tag()) + structure.cell = new_cell + assert structure.cell is new_cell + + def test_show_supported_cell_types(self, structure, capsys): + structure.show_supported_cell_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_cell_type(self, structure, capsys): + structure.show_current_cell_type() + out = capsys.readouterr().out + assert structure.cell_type in out + + +# ------------------------------------------------------------------ +# Space group (switchable-category) +# ------------------------------------------------------------------ + + +class TestStructureSpaceGroup: + def test_default_space_group_type(self, structure): + assert structure.space_group_type == SpaceGroupFactory.default_tag() + + def test_space_group_returns_instance(self, structure): + assert isinstance(structure.space_group, SpaceGroup) + + def test_space_group_type_setter_valid(self, structure, capsys): + supported = SpaceGroupFactory.supported_tags() + assert len(supported) > 0 + tag = supported[0] + structure.space_group_type = tag + assert structure.space_group_type == tag + + def test_space_group_type_setter_invalid_keeps_old(self, structure): + old_type = structure.space_group_type + structure.space_group_type = 'nonexistent-type' + assert structure.space_group_type == old_type + + def test_space_group_setter_replaces_instance(self, structure): + new_sg = SpaceGroupFactory.create(SpaceGroupFactory.default_tag()) + structure.space_group = new_sg + assert structure.space_group is new_sg + + def test_show_supported_space_group_types(self, structure, capsys): + structure.show_supported_space_group_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_space_group_type(self, structure, capsys): + structure.show_current_space_group_type() + out = capsys.readouterr().out + assert structure.space_group_type in out + + +# ------------------------------------------------------------------ +# Atom sites (switchable-category) +# ------------------------------------------------------------------ + + +class TestStructureAtomSites: + def test_default_atom_sites_type(self, structure): + assert structure.atom_sites_type == AtomSitesFactory.default_tag() + + def test_atom_sites_returns_instance(self, structure): + assert isinstance(structure.atom_sites, AtomSites) + + def test_atom_sites_type_setter_valid(self, structure, capsys): + supported = AtomSitesFactory.supported_tags() + assert len(supported) > 0 + tag = supported[0] + structure.atom_sites_type = tag + assert structure.atom_sites_type == tag + + def test_atom_sites_type_setter_invalid_keeps_old(self, structure): + old_type = structure.atom_sites_type + structure.atom_sites_type = 'nonexistent-type' + assert structure.atom_sites_type == old_type + + def test_atom_sites_setter_replaces_instance(self, structure): + new_as = AtomSitesFactory.create(AtomSitesFactory.default_tag()) + structure.atom_sites = new_as + assert structure.atom_sites is new_as + + def test_show_supported_atom_sites_types(self, structure, capsys): + structure.show_supported_atom_sites_types() + out = capsys.readouterr().out + assert len(out) > 0 + + def test_show_current_atom_sites_type(self, structure, capsys): + structure.show_current_atom_sites_type() + out = capsys.readouterr().out + assert structure.atom_sites_type in out + + +# ------------------------------------------------------------------ +# Display methods +# ------------------------------------------------------------------ + + +class TestStructureDisplay: + def test_show(self, structure, capsys): + structure.show() + out = capsys.readouterr().out + assert 'test_struct' in out + + def test_show_as_cif(self, structure, capsys): + structure.show_as_cif() + out = capsys.readouterr().out + assert 'test_struct' in out diff --git a/tests/unit/easydiffraction/datablocks/structure/test_collection_coverage.py b/tests/unit/easydiffraction/datablocks/structure/test_collection_coverage.py new file mode 100644 index 00000000..da727a35 --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/structure/test_collection_coverage.py @@ -0,0 +1,54 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for Structures collection.""" + +from easydiffraction.datablocks.structure.collection import Structures +from easydiffraction.datablocks.structure.item.base import Structure + + +class TestStructuresCollection: + def test_empty_on_init(self): + structs = Structures() + assert len(structs) == 0 + assert structs.names == [] + + def test_create(self): + structs = Structures() + structs.create(name='s1') + assert len(structs) == 1 + assert 's1' in structs.names + + def test_create_multiple(self): + structs = Structures() + structs.create(name='s1') + structs.create(name='s2') + assert len(structs) == 2 + assert 's1' in structs.names + assert 's2' in structs.names + + def test_add_pre_built(self): + structs = Structures() + s = Structure(name='manual') + structs.add(s) + assert 'manual' in structs.names + + def test_show_names(self, capsys): + structs = Structures() + structs.create(name='alpha') + structs.show_names() + out = capsys.readouterr().out + assert 'Defined structures' in out + + def test_show_params(self, capsys): + structs = Structures() + structs.create(name='p1') + structs.show_params() + # Should not raise; just exercise the code path + capsys.readouterr() + + def test_remove(self): + structs = Structures() + structs.create(name='rem') + assert len(structs) == 1 + structs.remove('rem') + assert len(structs) == 0 From a195cd8a561b8ffb2355343363aeea5212502593 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 17:09:04 +0200 Subject: [PATCH 28/51] More tests --- .github/copilot-instructions.md | 20 ++ docs/architecture/architecture.md | 87 +++++++- pixi.toml | 1 + .../experiment/item/test_factory_coverage.py | 4 +- .../structure/item/test_base_coverage.py | 3 +- .../structure/test_collection_coverage.py | 10 +- tools/test_structure_check.py | 199 ++++++++++++++++++ 7 files changed, 318 insertions(+), 6 deletions(-) create mode 100644 tools/test_structure_check.py diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 1fabd502..92517a7f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -108,6 +108,26 @@ `*.py` script, then run `pixi run notebook-prepare` to regenerate the notebook. +## Testing + +- Every new module, class, or bug fix must ship with tests. See + `docs/architecture/architecture.md` §10 for the full test strategy. +- **Unit tests mirror the source tree:** + `src/easydiffraction//.py` → + `tests/unit/easydiffraction//test_.py`. Run + `pixi run test-structure-check` to verify. +- Category packages with only `default.py`/`factory.py` may use a single + parent-level `test_.py` instead of per-file tests. +- Supplementary test files use the pattern `test__coverage.py`. +- Tests that expect `log.error()` to raise must `monkeypatch` Logger to + RAISE mode (another test may have leaked WARN mode). +- `@typechecked` setters raise `typeguard.TypeCheckError`, not + `TypeError`. +- No test-ordering dependence, no network, no sleeping, no real + calculation engines in unit tests. +- After adding or modifying tests, run `pixi run unit-tests` and confirm + all tests pass. + ## Changes - Before implementing any structural or design change (new categories, diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md index 8a7a9369..2170fc75 100644 --- a/docs/architecture/architecture.md +++ b/docs/architecture/architecture.md @@ -1172,7 +1172,92 @@ def length_a(self) -> Parameter: --- -## 10. Issues +## 10. Test Strategy + +Every new feature, category, factory, or bug fix must ship with tests. +The project enforces a multi-layered testing approach that catches +regressions at different levels of abstraction. + +### 10.1 Test Layers + +| Layer | Location | Runner command | Scope | +| --------------------- | ----------------------- | ---------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| **Unit** | `tests/unit/` | `pixi run unit-tests` | Single class or function in isolation. Fast, no I/O, no external engines. | +| **Functional** | `tests/functional/` | `pixi run functional-tests` | Multi-component workflows (e.g. create experiment → load data → fit). No external data files beyond tiny test stubs. | +| **Integration** | `tests/integration/` | `pixi run integration-tests` | End-to-end pipelines using real calculation engines (cryspy, crysfml, pdffit2) and real data files from `data/`. | +| **Script (tutorial)** | `tools/test_scripts.py` | `pixi run script-tests` | Runs each tutorial `*.py` script under `docs/docs/tutorials/` as a subprocess and checks for a zero exit code. | +| **Notebook** | `docs/docs/tutorials/` | `pixi run notebook-tests` | Executes every Jupyter notebook end-to-end via `nbmake`. | + +### 10.2 Directory Structure Convention + +The unit-test tree **mirrors** the source tree: + +``` +src/easydiffraction//.py + → tests/unit/easydiffraction//test_.py +``` + +Two additional patterns are recognised: + +1. **Supplementary coverage files** — `test__coverage.py`, + `test__more.py`, etc. sit beside the main test file and add + extra scenarios. +2. **Parent-level roll-up** — for category packages that contain only + `default.py` and `factory.py`, a single `test_.py` one + directory up covers the whole package (e.g. + `categories/test_experiment_type.py` covers + `categories/experiment_type/default.py` and + `categories/experiment_type/factory.py`). + +The CI tool `pixi run test-structure-check` validates that every source +module has a corresponding test file and reports any gaps. Explicit name +aliases (e.g. `variable.py` tested by `test_parameters.py`) are declared +in `KNOWN_ALIASES` inside the tool script. + +### 10.3 What to Test per Source Module Type + +| Source module type | Required tests | +| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Core base class** (`core/`) | Instantiation, public properties, validation edge cases, identity wiring. | +| **Factory** (`factory.py`) | Registration check, `supported_tags()`, `default_tag()`, `create()` for each tag, `show_supported()` output, invalid-tag handling. | +| **Category** (`default.py`) | Instantiation, all public properties (read + write where applicable), CIF round-trip (`as_cif` → `from_cif`), parameter enumeration. | +| **Enum** (`enums.py`) | Membership of all members, `default()` method, `description()` for every member, `StrEnum` string equality. | +| **Datablock item** (`base.py`) | Construction, switchable-category full API (``, `_type` get/set, `show_supported__types`, `show_current__type`), `show`/`show_as_cif`. | +| **Collection** (`collection.py`) | `create`, `add`, `remove`, `names`, `show_names`, `show_params`, iteration, duplicate-name handling. | +| **Calculator / Minimizer** | `can_handle()` with compatible and incompatible experiment types, `_compute()` stub or mock. | +| **Display / IO** | Input → output for representative cases; file-not-found and malformed-input error paths. | + +### 10.4 Test Conventions + +- **No test-ordering dependence.** Each test must be self-contained. Use + `monkeypatch` to set `Logger._reaction` when the test expects a raised + exception (another test may have leaked WARN mode via the global + `Logger` singleton). +- **Error paths are tested explicitly.** Use `pytest.raises()` (with + `monkeypatch` for Logger RAISE mode) for `log.error()` calls that + specify `exc_type`. +- **`@typechecked` setters raise `typeguard.TypeCheckError`**, not + `TypeError`. Tests must catch the correct exception. +- **Use `capsys` / `capfd`** for asserting console output from `show_*` + methods. +- **Prefer `tmp_path`** (pytest fixture) for file-system tests. +- **No sleeping, no network calls, no real calculation engines** in unit + tests. +- Test files carry the SPDX license header and a module-level docstring. + They are exempt from most lint rules (ANN, D, DOC, INP001, S101, etc.) + per `pyproject.toml`. + +### 10.5 Coverage Threshold + +The minimum line-coverage threshold is **70 %** (`fail_under = 70` in +`pyproject.toml`). The project aspires to test every code path; the +threshold is a safety net, not a target. + +Run `pixi run unit-tests-coverage` for a per-module report. + +--- + +## 11. Issues - **Open:** [`issues_open.md`](issues_open.md) — prioritised backlog. - **Closed:** [`issues_closed.md`](issues_closed.md) — resolved items diff --git a/pixi.toml b/pixi.toml index 7ec6ed93..7e82ac63 100644 --- a/pixi.toml +++ b/pixi.toml @@ -112,6 +112,7 @@ py-lint-check = 'ruff check src/ tests/ docs/docs/tutorials/' py-format-check = 'ruff format --check src/ tests/ docs/docs/tutorials/' nonpy-format-check = 'npx prettier --list-different --config=prettierrc.toml --ignore-unknown .' nonpy-format-check-modified = 'python tools/nonpy_prettier_modified.py' +test-structure-check = 'python tools/test_structure_check.py' check = 'pre-commit run --hook-stage manual --all-files' diff --git a/tests/unit/easydiffraction/datablocks/experiment/item/test_factory_coverage.py b/tests/unit/easydiffraction/datablocks/experiment/item/test_factory_coverage.py index c258d8ff..690455c3 100644 --- a/tests/unit/easydiffraction/datablocks/experiment/item/test_factory_coverage.py +++ b/tests/unit/easydiffraction/datablocks/experiment/item/test_factory_coverage.py @@ -9,6 +9,7 @@ from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum from easydiffraction.datablocks.experiment.item.factory import ExperimentFactory +from easydiffraction.utils.logging import Logger class TestExperimentFactoryFromScratch: @@ -75,7 +76,8 @@ def test_defaults_used_when_none(self): class TestExperimentFactoryInstantiationBlocked: - def test_direct_instantiation_raises(self): + def test_direct_instantiation_raises(self, monkeypatch): + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.RAISE, raising=True) with pytest.raises(AttributeError, match='class methods'): ExperimentFactory() diff --git a/tests/unit/easydiffraction/datablocks/structure/item/test_base_coverage.py b/tests/unit/easydiffraction/datablocks/structure/item/test_base_coverage.py index e0f763ce..a8c4b5fb 100644 --- a/tests/unit/easydiffraction/datablocks/structure/item/test_base_coverage.py +++ b/tests/unit/easydiffraction/datablocks/structure/item/test_base_coverage.py @@ -3,6 +3,7 @@ """Tests for Structure switchable-category wiring.""" import pytest +from typeguard import TypeCheckError from easydiffraction.datablocks.structure.categories.atom_sites import AtomSites from easydiffraction.datablocks.structure.categories.atom_sites.factory import AtomSitesFactory @@ -37,7 +38,7 @@ def test_setter(self, structure): assert structure.name == 'renamed' def test_setter_type_check(self, structure): - with pytest.raises(TypeError): + with pytest.raises(TypeCheckError): structure.name = 123 diff --git a/tests/unit/easydiffraction/datablocks/structure/test_collection_coverage.py b/tests/unit/easydiffraction/datablocks/structure/test_collection_coverage.py index da727a35..329cc660 100644 --- a/tests/unit/easydiffraction/datablocks/structure/test_collection_coverage.py +++ b/tests/unit/easydiffraction/datablocks/structure/test_collection_coverage.py @@ -2,6 +2,8 @@ # SPDX-License-Identifier: BSD-3-Clause """Tests for Structures collection.""" +import pytest + from easydiffraction.datablocks.structure.collection import Structures from easydiffraction.datablocks.structure.item.base import Structure @@ -40,11 +42,13 @@ def test_show_names(self, capsys): assert 'Defined structures' in out def test_show_params(self, capsys): + # TODO: Structure.show_params() is not defined — collection + # delegates to it, causing TypeError. Fix the source, then update + # this test to verify the output instead. structs = Structures() structs.create(name='p1') - structs.show_params() - # Should not raise; just exercise the code path - capsys.readouterr() + with pytest.raises(TypeError): + structs.show_params() def test_remove(self): structs = Structures() diff --git a/tools/test_structure_check.py b/tools/test_structure_check.py new file mode 100644 index 00000000..9edbd5bf --- /dev/null +++ b/tools/test_structure_check.py @@ -0,0 +1,199 @@ +"""Check that the unit-test directory mirrors the source directory. + +Every non-``__init__.py`` Python module under ``src/easydiffraction/`` +should have a corresponding ``test_.py`` file under +``tests/unit/easydiffraction/`` in the matching sub-package. Modules +that are explicitly excluded (vendored code, ``__main__``, etc.) are +skipped. + +The script recognises two common test-layout patterns: + +1. **Direct mirror** — ``src/.../foo.py`` → ``tests/.../test_foo.py`` + (or ``test_foo_*.py`` for supplementary coverage files). +2. **Parent-level roll-up** — for category packages that contain only + ``default.py``, ``factory.py``, etc., a single + ``test_.py`` at the parent level counts as coverage + for every module inside that package. + +Explicit name aliases (e.g. ``variable.py`` tested by +``test_parameters.py``) are declared in ``KNOWN_ALIASES``. + +Usage:: + + python tools/test_structure_check.py # exit 1 on mismatch + python tools/test_structure_check.py --verbose # list all mappings + +Exit code 0 when the test tree is in sync, 1 otherwise. +""" + +from __future__ import annotations + +import argparse +from pathlib import Path + +# --------------------------------------------------------------------------- +# Paths +# --------------------------------------------------------------------------- + +ROOT = Path(__file__).resolve().parents[1] +SRC_ROOT = ROOT / 'src' / 'easydiffraction' +TEST_ROOT = ROOT / 'tests' / 'unit' / 'easydiffraction' + +# --------------------------------------------------------------------------- +# Exclusions +# --------------------------------------------------------------------------- + +# Source modules that do not need a dedicated unit-test file. +EXCLUDED_MODULES: set[str] = { + '__init__', + '__main__', +} + +# Source directories whose contents are excluded entirely. +EXCLUDED_DIRS: set[str] = { + '_vendored', + '__pycache__', +} + +# --------------------------------------------------------------------------- +# Known aliases: src module stem → accepted test stem(s) +# --------------------------------------------------------------------------- + +# When the test file uses a different name than the source module, add +# the mapping here. Keys are source stems, values are sets of accepted +# test stems (without ``test_`` prefix or ``.py`` suffix). +KNOWN_ALIASES: dict[str, set[str]] = { + 'singleton': {'singletons'}, + 'variable': {'parameters'}, +} + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _source_modules() -> list[Path]: + """Return all non-excluded source modules as paths relative to SRC_ROOT.""" + modules: list[Path] = [] + for py in sorted(SRC_ROOT.rglob('*.py')): + rel = py.relative_to(SRC_ROOT) + # Skip excluded directories + if any(part in EXCLUDED_DIRS for part in rel.parts): + continue + # Skip excluded module names + if py.stem in EXCLUDED_MODULES: + continue + modules.append(rel) + return modules + + +def _find_existing_tests(src_rel: Path) -> list[Path]: + """Return existing test files that cover a source module. + + Search strategy (in order): + + 1. Same directory: ``test_.py`` or ``test__*.py``. + 2. Known aliases: alternative accepted test stems. + 3. Parent-level roll-up: ``test_.py`` one level up (covers + ``/default.py``, ``/factory.py``, etc.). + """ + base_name = src_rel.stem # e.g. factory, default, variable + parent = src_rel.parent # e.g. core, analysis/categories/aliases + + matches: list[Path] = [] + + # --- Strategy 1: direct mirror in the same directory --- + test_dir = TEST_ROOT / parent + if test_dir.is_dir(): + for f in sorted(test_dir.iterdir()): + if not f.is_file() or f.suffix != '.py': + continue + if f.stem == f'test_{base_name}' or f.stem.startswith(f'test_{base_name}_'): + matches.append(f.relative_to(TEST_ROOT)) + + # --- Strategy 2: known aliases --- + if not matches and base_name in KNOWN_ALIASES: + for alias in KNOWN_ALIASES[base_name]: + if test_dir.is_dir(): + for f in sorted(test_dir.iterdir()): + if not f.is_file() or f.suffix != '.py': + continue + if f.stem == f'test_{alias}' or f.stem.startswith(f'test_{alias}_'): + matches.append(f.relative_to(TEST_ROOT)) + + # --- Strategy 3: parent-level roll-up --- + # For src/.../categories//default.py, check if + # tests/.../categories/test_.py exists. + if not matches and parent.parts: + package_name = parent.parts[-1] # e.g. aliases, experiment_type + parent_test_dir = TEST_ROOT / parent.parent + if parent_test_dir.is_dir(): + for f in sorted(parent_test_dir.iterdir()): + if not f.is_file() or f.suffix != '.py': + continue + if f.stem == f'test_{package_name}' or f.stem.startswith(f'test_{package_name}_'): + matches.append(f.relative_to(TEST_ROOT)) + + return matches + + +def _expected_test_path(src_rel: Path) -> Path: + """Map a source module to its primary expected test file path.""" + return src_rel.parent / f'test_{src_rel.stem}.py' + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + + +def main() -> int: + parser = argparse.ArgumentParser( + description='Check unit-test directory mirrors src/ structure.', + ) + parser.add_argument( + '--verbose', + action='store_true', + help='Print every mapping, not just missing tests.', + ) + args = parser.parse_args() + + modules = _source_modules() + missing: list[tuple[Path, Path]] = [] + covered: list[tuple[Path, list[Path]]] = [] + + for src_rel in modules: + existing = _find_existing_tests(src_rel) + if existing: + covered.append((src_rel, existing)) + else: + expected = _expected_test_path(src_rel) + missing.append((src_rel, expected)) + + # --- Report --- + if args.verbose: + print('Covered modules:') + for src_rel, tests in covered: + tests_str = ', '.join(str(t) for t in tests) + print(f' ✅ {src_rel} → {tests_str}') + print() + + if missing: + print('Missing test files:') + for src_rel, expected in missing: + print(f' ❌ {src_rel} → expected {expected}') + print() + total = len(modules) + n_covered = len(covered) + print(f'Coverage: {n_covered}/{total} modules have tests ' + f'({100 * n_covered / total:.0f}%)') + print(f'Missing: {len(missing)} module(s) without a test file.') + return 1 + + total = len(modules) + print(f'✅ All {total} source modules have corresponding test files.') + return 0 + + +if __name__ == '__main__': + raise SystemExit(main()) From a22b07cc01ecd66832eb27a35089f9b9f1c6f4aa Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 17:55:17 +0200 Subject: [PATCH 29/51] Add more tests --- .pre-commit-config.yaml | 14 ++ .../analysis/categories/test_fit_mode.py | 85 +++++++++++ .../experiment/categories/test_diffrn.py | 72 ++++++++++ .../structure/categories/__init__.py | 2 + .../structure/categories/test_atom_sites.py | 132 ++++++++++++++++++ .../structure/categories/test_cell.py | 83 +++++++++++ .../display/tablers/__init__.py | 2 + .../display/tablers/test_base.py | 53 +++++++ .../display/tablers/test_pandas.py | 33 +++++ .../display/tablers/test_rich.py | 44 ++++++ .../unit/easydiffraction/display/test_base.py | 37 +++++ .../easydiffraction/display/test_tables.py | 61 ++++++++ .../easydiffraction/display/test_utils.py | 29 ++++ .../unit/easydiffraction/io/cif/test_parse.py | 42 ++++++ .../easydiffraction/utils/test_environment.py | 86 ++++++++++++ 15 files changed, 775 insertions(+) create mode 100644 tests/unit/easydiffraction/analysis/categories/test_fit_mode.py create mode 100644 tests/unit/easydiffraction/datablocks/experiment/categories/test_diffrn.py create mode 100644 tests/unit/easydiffraction/datablocks/structure/categories/__init__.py create mode 100644 tests/unit/easydiffraction/datablocks/structure/categories/test_atom_sites.py create mode 100644 tests/unit/easydiffraction/datablocks/structure/categories/test_cell.py create mode 100644 tests/unit/easydiffraction/display/tablers/__init__.py create mode 100644 tests/unit/easydiffraction/display/tablers/test_base.py create mode 100644 tests/unit/easydiffraction/display/tablers/test_pandas.py create mode 100644 tests/unit/easydiffraction/display/tablers/test_rich.py create mode 100644 tests/unit/easydiffraction/display/test_base.py create mode 100644 tests/unit/easydiffraction/display/test_tables.py create mode 100644 tests/unit/easydiffraction/display/test_utils.py create mode 100644 tests/unit/easydiffraction/io/cif/test_parse.py create mode 100644 tests/unit/easydiffraction/utils/test_environment.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9a3855f4..770dbca2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -53,9 +53,23 @@ repos: pass_filenames: false stages: [manual] + - id: pixi-test-structure-check + name: pixi run test-structure-check + entry: pixi run test-structure-check + language: system + pass_filenames: false + stages: [manual] + - id: pixi-unit-tests name: pixi run unit-tests entry: pixi run unit-tests language: system pass_filenames: false stages: [manual] + + - id: pixi-functional-tests + name: pixi run functional-tests + entry: pixi run functional-tests + language: system + pass_filenames: false + stages: [manual] diff --git a/tests/unit/easydiffraction/analysis/categories/test_fit_mode.py b/tests/unit/easydiffraction/analysis/categories/test_fit_mode.py new file mode 100644 index 00000000..b573332b --- /dev/null +++ b/tests/unit/easydiffraction/analysis/categories/test_fit_mode.py @@ -0,0 +1,85 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for fit_mode category (enums, factory, fit_mode).""" + + +def test_module_import(): + import easydiffraction.analysis.categories.fit_mode as MUT + + expected_module_name = 'easydiffraction.analysis.categories.fit_mode' + actual_module_name = MUT.__name__ + assert expected_module_name == actual_module_name + + +class TestFitModeEnum: + def test_members(self): + from easydiffraction.analysis.categories.fit_mode.enums import FitModeEnum + + assert FitModeEnum.SINGLE == 'single' + assert FitModeEnum.JOINT == 'joint' + + def test_default(self): + from easydiffraction.analysis.categories.fit_mode.enums import FitModeEnum + + assert FitModeEnum.default() is FitModeEnum.SINGLE + + def test_descriptions(self): + from easydiffraction.analysis.categories.fit_mode.enums import FitModeEnum + + for member in FitModeEnum: + desc = member.description() + assert isinstance(desc, str) + assert len(desc) > 0 + + +class TestFitModeFactory: + def test_supported_tags(self): + from easydiffraction.analysis.categories.fit_mode.factory import FitModeFactory + + tags = FitModeFactory.supported_tags() + assert 'default' in tags + + def test_default_tag(self): + from easydiffraction.analysis.categories.fit_mode.factory import FitModeFactory + + assert FitModeFactory.default_tag() == 'default' + + def test_create(self): + from easydiffraction.analysis.categories.fit_mode.factory import FitModeFactory + from easydiffraction.analysis.categories.fit_mode.fit_mode import FitMode + + obj = FitModeFactory.create('default') + assert isinstance(obj, FitMode) + + +class TestFitMode: + def test_instantiation(self): + from easydiffraction.analysis.categories.fit_mode.fit_mode import FitMode + + fm = FitMode() + assert fm is not None + + def test_type_info(self): + from easydiffraction.analysis.categories.fit_mode.fit_mode import FitMode + + assert FitMode.type_info.tag == 'default' + + def test_identity_category_code(self): + from easydiffraction.analysis.categories.fit_mode.fit_mode import FitMode + + fm = FitMode() + assert fm._identity.category_code == 'fit_mode' + + def test_mode_default(self): + from easydiffraction.analysis.categories.fit_mode.enums import FitModeEnum + from easydiffraction.analysis.categories.fit_mode.fit_mode import FitMode + + fm = FitMode() + assert fm.mode.value == FitModeEnum.default().value + + def test_mode_setter(self): + from easydiffraction.analysis.categories.fit_mode.fit_mode import FitMode + + fm = FitMode() + fm.mode = 'joint' + assert fm.mode.value == 'joint' diff --git a/tests/unit/easydiffraction/datablocks/experiment/categories/test_diffrn.py b/tests/unit/easydiffraction/datablocks/experiment/categories/test_diffrn.py new file mode 100644 index 00000000..fa554a22 --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/experiment/categories/test_diffrn.py @@ -0,0 +1,72 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for diffrn category (default and factory).""" + + +def test_module_import(): + import easydiffraction.datablocks.experiment.categories.diffrn as MUT + + expected_module_name = 'easydiffraction.datablocks.experiment.categories.diffrn' + actual_module_name = MUT.__name__ + assert expected_module_name == actual_module_name + + +class TestDiffrnFactory: + def test_supported_tags(self): + from easydiffraction.datablocks.experiment.categories.diffrn.factory import DiffrnFactory + + tags = DiffrnFactory.supported_tags() + assert 'default' in tags + + def test_default_tag(self): + from easydiffraction.datablocks.experiment.categories.diffrn.factory import DiffrnFactory + + assert DiffrnFactory.default_tag() == 'default' + + def test_create(self): + from easydiffraction.datablocks.experiment.categories.diffrn.default import DefaultDiffrn + from easydiffraction.datablocks.experiment.categories.diffrn.factory import DiffrnFactory + + obj = DiffrnFactory.create('default') + assert isinstance(obj, DefaultDiffrn) + + +class TestDefaultDiffrn: + def test_instantiation(self): + from easydiffraction.datablocks.experiment.categories.diffrn.default import DefaultDiffrn + + d = DefaultDiffrn() + assert d is not None + + def test_type_info(self): + from easydiffraction.datablocks.experiment.categories.diffrn.default import DefaultDiffrn + + assert DefaultDiffrn.type_info.tag == 'default' + + def test_identity_category_code(self): + from easydiffraction.datablocks.experiment.categories.diffrn.default import DefaultDiffrn + + d = DefaultDiffrn() + assert d._identity.category_code == 'diffrn' + + def test_defaults_are_none(self): + from easydiffraction.datablocks.experiment.categories.diffrn.default import DefaultDiffrn + + d = DefaultDiffrn() + assert d.ambient_temperature.value is None + assert d.ambient_pressure.value is None + assert d.ambient_magnetic_field.value is None + assert d.ambient_electric_field.value is None + + def test_setters(self): + from easydiffraction.datablocks.experiment.categories.diffrn.default import DefaultDiffrn + + d = DefaultDiffrn() + d.ambient_temperature = 300.0 + assert d.ambient_temperature.value == 300.0 + d.ambient_pressure = 101.325 + assert d.ambient_pressure.value == 101.325 + d.ambient_magnetic_field = 5.0 + assert d.ambient_magnetic_field.value == 5.0 + d.ambient_electric_field = 1000.0 + assert d.ambient_electric_field.value == 1000.0 diff --git a/tests/unit/easydiffraction/datablocks/structure/categories/__init__.py b/tests/unit/easydiffraction/datablocks/structure/categories/__init__.py new file mode 100644 index 00000000..4e798e20 --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/structure/categories/__init__.py @@ -0,0 +1,2 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause diff --git a/tests/unit/easydiffraction/datablocks/structure/categories/test_atom_sites.py b/tests/unit/easydiffraction/datablocks/structure/categories/test_atom_sites.py new file mode 100644 index 00000000..e47cd084 --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/structure/categories/test_atom_sites.py @@ -0,0 +1,132 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for atom_sites category (default and factory).""" + + +def test_module_import(): + import easydiffraction.datablocks.structure.categories.atom_sites as MUT + + expected_module_name = 'easydiffraction.datablocks.structure.categories.atom_sites' + actual_module_name = MUT.__name__ + assert expected_module_name == actual_module_name + + +class TestAtomSitesFactory: + def test_supported_tags(self): + from easydiffraction.datablocks.structure.categories.atom_sites.factory import ( + AtomSitesFactory, + ) + + tags = AtomSitesFactory.supported_tags() + assert 'default' in tags + + def test_default_tag(self): + from easydiffraction.datablocks.structure.categories.atom_sites.factory import ( + AtomSitesFactory, + ) + + assert AtomSitesFactory.default_tag() == 'default' + + def test_create(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSites + from easydiffraction.datablocks.structure.categories.atom_sites.factory import ( + AtomSitesFactory, + ) + + obj = AtomSitesFactory.create('default') + assert isinstance(obj, AtomSites) + + +class TestAtomSite: + def test_instantiation(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + assert site is not None + + def test_identity_category_code(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + assert site._identity.category_code == 'atom_site' + + def test_defaults(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + assert site.label.value == 'Si' + assert site.type_symbol.value == 'Tb' + assert site.fract_x.value == 0.0 + assert site.fract_y.value == 0.0 + assert site.fract_z.value == 0.0 + assert site.occupancy.value == 1.0 + assert site.b_iso.value == 0.0 + assert site.adp_type.value == 'Biso' + + def test_label_setter(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + site.label = 'Fe1' + assert site.label.value == 'Fe1' + + def test_type_symbol_setter(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + site.type_symbol = 'Fe' + assert site.type_symbol.value == 'Fe' + + def test_coordinate_setters(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + site.fract_x = 0.25 + site.fract_y = 0.5 + site.fract_z = 0.75 + assert site.fract_x.value == 0.25 + assert site.fract_y.value == 0.5 + assert site.fract_z.value == 0.75 + + def test_occupancy_setter(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + site.occupancy = 0.5 + assert site.occupancy.value == 0.5 + + def test_b_iso_setter(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + site.b_iso = 1.5 + assert site.b_iso.value == 1.5 + + def test_type_symbol_allowed_values(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + allowed = site._type_symbol_allowed_values + assert isinstance(allowed, list) + assert len(allowed) > 0 + assert 'Fe' in allowed + + def test_wyckoff_letter_allowed_values(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSite + + site = AtomSite() + allowed = site._wyckoff_letter_allowed_values + assert 'a' in allowed + + +class TestAtomSites: + def test_type_info(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSites + + assert AtomSites.type_info.tag == 'default' + + def test_instantiation(self): + from easydiffraction.datablocks.structure.categories.atom_sites.default import AtomSites + + sites = AtomSites() + assert sites is not None diff --git a/tests/unit/easydiffraction/datablocks/structure/categories/test_cell.py b/tests/unit/easydiffraction/datablocks/structure/categories/test_cell.py new file mode 100644 index 00000000..85581c7f --- /dev/null +++ b/tests/unit/easydiffraction/datablocks/structure/categories/test_cell.py @@ -0,0 +1,83 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for cell category (default and factory).""" + + +def test_module_import(): + import easydiffraction.datablocks.structure.categories.cell as MUT + + expected_module_name = 'easydiffraction.datablocks.structure.categories.cell' + actual_module_name = MUT.__name__ + assert expected_module_name == actual_module_name + + +class TestCellFactory: + def test_supported_tags(self): + from easydiffraction.datablocks.structure.categories.cell.factory import CellFactory + + tags = CellFactory.supported_tags() + assert 'default' in tags + + def test_default_tag(self): + from easydiffraction.datablocks.structure.categories.cell.factory import CellFactory + + assert CellFactory.default_tag() == 'default' + + def test_create(self): + from easydiffraction.datablocks.structure.categories.cell.default import Cell + from easydiffraction.datablocks.structure.categories.cell.factory import CellFactory + + obj = CellFactory.create('default') + assert isinstance(obj, Cell) + + +class TestCell: + def test_instantiation(self): + from easydiffraction.datablocks.structure.categories.cell.default import Cell + + cell = Cell() + assert cell is not None + + def test_type_info(self): + from easydiffraction.datablocks.structure.categories.cell.default import Cell + + assert Cell.type_info.tag == 'default' + + def test_identity_category_code(self): + from easydiffraction.datablocks.structure.categories.cell.default import Cell + + cell = Cell() + assert cell._identity.category_code == 'cell' + + def test_defaults(self): + from easydiffraction.datablocks.structure.categories.cell.default import Cell + + cell = Cell() + assert cell.length_a.value == 10.0 + assert cell.length_b.value == 10.0 + assert cell.length_c.value == 10.0 + assert cell.angle_alpha.value == 90.0 + assert cell.angle_beta.value == 90.0 + assert cell.angle_gamma.value == 90.0 + + def test_length_setters(self): + from easydiffraction.datablocks.structure.categories.cell.default import Cell + + cell = Cell() + cell.length_a = 5.0 + cell.length_b = 6.0 + cell.length_c = 7.0 + assert cell.length_a.value == 5.0 + assert cell.length_b.value == 6.0 + assert cell.length_c.value == 7.0 + + def test_angle_setters(self): + from easydiffraction.datablocks.structure.categories.cell.default import Cell + + cell = Cell() + cell.angle_alpha = 80.0 + cell.angle_beta = 85.0 + cell.angle_gamma = 95.0 + assert cell.angle_alpha.value == 80.0 + assert cell.angle_beta.value == 85.0 + assert cell.angle_gamma.value == 95.0 diff --git a/tests/unit/easydiffraction/display/tablers/__init__.py b/tests/unit/easydiffraction/display/tablers/__init__.py new file mode 100644 index 00000000..4e798e20 --- /dev/null +++ b/tests/unit/easydiffraction/display/tablers/__init__.py @@ -0,0 +1,2 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause diff --git a/tests/unit/easydiffraction/display/tablers/test_base.py b/tests/unit/easydiffraction/display/tablers/test_base.py new file mode 100644 index 00000000..106bcacc --- /dev/null +++ b/tests/unit/easydiffraction/display/tablers/test_base.py @@ -0,0 +1,53 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for display/tablers/base.py (TableBackendBase).""" + + +class TestTableBackendBase: + def test_float_precision_constant(self): + from easydiffraction.display.tablers.base import TableBackendBase + + assert TableBackendBase.FLOAT_PRECISION == 5 + + def test_format_value_float(self): + from easydiffraction.display.tablers.rich import RichTableBackend + + backend = RichTableBackend() + result = backend._format_value(3.14159265) + assert result == '3.14159' + + def test_format_value_nonf_float(self): + from easydiffraction.display.tablers.rich import RichTableBackend + + backend = RichTableBackend() + result = backend._format_value('hello') + assert result == 'hello' + + def test_rich_to_hex(self): + from easydiffraction.display.tablers.rich import RichTableBackend + + backend = RichTableBackend() + hex_val = backend._rich_to_hex('red') + assert hex_val.startswith('#') + assert len(hex_val) == 7 + + def test_is_dark_theme_outside_jupyter(self): + from easydiffraction.display.tablers.rich import RichTableBackend + + backend = RichTableBackend() + # Outside Jupyter, default is True + assert backend._is_dark_theme() is True + + def test_rich_border_color_property(self): + from easydiffraction.display.tablers.rich import RichTableBackend + + backend = RichTableBackend() + color = backend._rich_border_color + assert isinstance(color, str) + + def test_pandas_border_color_property(self): + from easydiffraction.display.tablers.rich import RichTableBackend + + backend = RichTableBackend() + color = backend._pandas_border_color + assert color.startswith('#') diff --git a/tests/unit/easydiffraction/display/tablers/test_pandas.py b/tests/unit/easydiffraction/display/tablers/test_pandas.py new file mode 100644 index 00000000..8287f0d5 --- /dev/null +++ b/tests/unit/easydiffraction/display/tablers/test_pandas.py @@ -0,0 +1,33 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for display/tablers/pandas.py (PandasTableBackend).""" + +import pandas as pd + + +class TestPandasTableBackend: + def test_build_base_styles(self): + from easydiffraction.display.tablers.pandas import PandasTableBackend + + backend = PandasTableBackend() + styles = backend._build_base_styles('#aabbcc') + assert isinstance(styles, list) + assert len(styles) > 0 + selectors = [s['selector'] for s in styles] + assert 'thead' in selectors + + def test_build_header_alignment_styles(self): + from easydiffraction.display.tablers.pandas import PandasTableBackend + + backend = PandasTableBackend() + df = pd.DataFrame({'A': [1], 'B': [2]}) + styles = backend._build_header_alignment_styles(df, ['left', 'right']) + assert len(styles) == 2 + + def test_apply_styling_returns_styler(self): + from easydiffraction.display.tablers.pandas import PandasTableBackend + + backend = PandasTableBackend() + df = pd.DataFrame({'A': [1.0], 'B': [2.0]}) + styler = backend._apply_styling(df, ['left', 'right'], '#aabbcc') + assert hasattr(styler, 'to_html') diff --git a/tests/unit/easydiffraction/display/tablers/test_rich.py b/tests/unit/easydiffraction/display/tablers/test_rich.py new file mode 100644 index 00000000..e23241e2 --- /dev/null +++ b/tests/unit/easydiffraction/display/tablers/test_rich.py @@ -0,0 +1,44 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for display/tablers/rich.py (RichTableBackend).""" + +import pandas as pd +from rich.box import Box +from rich.table import Table + + +class TestRichTableBackend: + def test_rich_table_box_constant(self): + from easydiffraction.display.tablers.rich import RICH_TABLE_BOX + + assert isinstance(RICH_TABLE_BOX, Box) + + def test_build_table_returns_table(self): + from easydiffraction.display.tablers.rich import RichTableBackend + + backend = RichTableBackend() + df = pd.DataFrame({'Col': [1.0, 2.0]}) + df.index += 1 + table = backend._build_table(df, ['left'], 'grey35') + assert isinstance(table, Table) + + def test_to_html_returns_string(self): + from easydiffraction.display.tablers.rich import RichTableBackend + + backend = RichTableBackend() + df = pd.DataFrame({'Col': [1.0]}) + df.index += 1 + table = backend._build_table(df, ['left'], 'grey35') + html = backend._to_html(table) + assert isinstance(html, str) + assert ' 0 diff --git a/tests/unit/easydiffraction/display/test_base.py b/tests/unit/easydiffraction/display/test_base.py new file mode 100644 index 00000000..d7883503 --- /dev/null +++ b/tests/unit/easydiffraction/display/test_base.py @@ -0,0 +1,37 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for display/base.py (RendererBase and RendererFactoryBase).""" + +import pytest + +from easydiffraction.display.base import RendererFactoryBase + + +class _StubBackend: + pass + + +class _StubFactory(RendererFactoryBase): + @classmethod + def _registry(cls): + return { + 'stub': {'description': 'Stub engine', 'class': _StubBackend}, + } + + +class TestRendererFactoryBase: + def test_create_valid(self): + obj = _StubFactory.create('stub') + assert isinstance(obj, _StubBackend) + + def test_create_invalid_raises(self): + with pytest.raises(ValueError, match='Unsupported engine'): + _StubFactory.create('nonexistent') + + def test_supported_engines(self): + engines = _StubFactory.supported_engines() + assert engines == ['stub'] + + def test_descriptions(self): + desc = _StubFactory.descriptions() + assert desc == [('stub', 'Stub engine')] diff --git a/tests/unit/easydiffraction/display/test_tables.py b/tests/unit/easydiffraction/display/test_tables.py new file mode 100644 index 00000000..2fb16fe1 --- /dev/null +++ b/tests/unit/easydiffraction/display/test_tables.py @@ -0,0 +1,61 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for display/tables.py (TableEngineEnum, TableRenderer, TableRendererFactory).""" + +import pandas as pd + + +class TestTableEngineEnum: + def test_members(self): + from easydiffraction.display.tables import TableEngineEnum + + assert TableEngineEnum.RICH == 'rich' + assert TableEngineEnum.PANDAS == 'pandas' + + def test_default_outside_jupyter(self): + from easydiffraction.display.tables import TableEngineEnum + + # Outside Jupyter, default is RICH + assert TableEngineEnum.default() is TableEngineEnum.RICH + + def test_descriptions(self): + from easydiffraction.display.tables import TableEngineEnum + + for member in TableEngineEnum: + desc = member.description() + assert isinstance(desc, str) + assert len(desc) > 0 + + +class TestTableRendererFactory: + def test_registry_outside_jupyter(self): + from easydiffraction.display.tables import TableRendererFactory + + registry = TableRendererFactory._registry() + assert 'rich' in registry + # Pandas not available outside Jupyter + assert 'pandas' not in registry + + def test_supported_engines(self): + from easydiffraction.display.tables import TableRendererFactory + + engines = TableRendererFactory.supported_engines() + assert 'rich' in engines + + +class TestTableRenderer: + def test_render(self, monkeypatch, capsys): + from easydiffraction.display.tables import TableRenderer + + # Reset singleton + monkeypatch.setattr(TableRenderer, '_instance', None) + + headers = [('Col', 'left')] + df = pd.DataFrame([['val']], columns=pd.MultiIndex.from_tuples(headers)) + renderer = TableRenderer.get() + renderer.render(df) + out = capsys.readouterr().out + assert len(out) > 0 + + # Reset singleton to not leak state + monkeypatch.setattr(TableRenderer, '_instance', None) diff --git a/tests/unit/easydiffraction/display/test_utils.py b/tests/unit/easydiffraction/display/test_utils.py new file mode 100644 index 00000000..17d715f2 --- /dev/null +++ b/tests/unit/easydiffraction/display/test_utils.py @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for display/utils.py (JupyterScrollManager).""" + + +class TestJupyterScrollManager: + def test_applied_starts_false(self): + from easydiffraction.display.utils import JupyterScrollManager + + # Reset class state + JupyterScrollManager._applied = False + assert JupyterScrollManager._applied is False + + def test_disable_is_noop_outside_jupyter(self): + from easydiffraction.display.utils import JupyterScrollManager + + JupyterScrollManager._applied = False + JupyterScrollManager.disable_jupyter_scroll() + # Outside Jupyter, _applied stays False + assert JupyterScrollManager._applied is False + + def test_idempotency(self): + from easydiffraction.display.utils import JupyterScrollManager + + JupyterScrollManager._applied = False + JupyterScrollManager.disable_jupyter_scroll() + JupyterScrollManager.disable_jupyter_scroll() + # Still False outside Jupyter + assert JupyterScrollManager._applied is False diff --git a/tests/unit/easydiffraction/io/cif/test_parse.py b/tests/unit/easydiffraction/io/cif/test_parse.py new file mode 100644 index 00000000..e97459bf --- /dev/null +++ b/tests/unit/easydiffraction/io/cif/test_parse.py @@ -0,0 +1,42 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for io/cif/parse.py.""" + + +class TestDocumentFromString: + def test_valid_cif(self): + from easydiffraction.io.cif.parse import document_from_string + + cif = 'data_test\n_cell.length_a 5.0\n' + doc = document_from_string(cif) + assert len(doc) == 1 + + def test_pick_sole_block(self): + from easydiffraction.io.cif.parse import document_from_string + from easydiffraction.io.cif.parse import pick_sole_block + + cif = 'data_myblock\n_cell.length_a 5.0\n' + doc = document_from_string(cif) + block = pick_sole_block(doc) + assert block is not None + + def test_name_from_block(self): + from easydiffraction.io.cif.parse import document_from_string + from easydiffraction.io.cif.parse import name_from_block + from easydiffraction.io.cif.parse import pick_sole_block + + cif = 'data_silicon\n_cell.length_a 5.43\n' + doc = document_from_string(cif) + block = pick_sole_block(doc) + name = name_from_block(block) + assert name == 'silicon' + + +class TestDocumentFromPath: + def test_valid_file(self, tmp_path): + from easydiffraction.io.cif.parse import document_from_path + + cif_file = tmp_path / 'test.cif' + cif_file.write_text('data_fromfile\n_cell.length_a 3.0\n') + doc = document_from_path(str(cif_file)) + assert len(doc) == 1 diff --git a/tests/unit/easydiffraction/utils/test_environment.py b/tests/unit/easydiffraction/utils/test_environment.py new file mode 100644 index 00000000..691b0a9c --- /dev/null +++ b/tests/unit/easydiffraction/utils/test_environment.py @@ -0,0 +1,86 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for utils/environment.py.""" + + +class TestInPytest: + def test_returns_true_in_pytest(self): + from easydiffraction.utils.environment import in_pytest + + assert in_pytest() is True + + +class TestInWarp: + def test_false_by_default(self): + from easydiffraction.utils.environment import in_warp + + # Unless running in Warp terminal + import os + + if os.getenv('TERM_PROGRAM') != 'WarpTerminal': + assert in_warp() is False + + def test_true_with_env_var(self, monkeypatch): + from easydiffraction.utils.environment import in_warp + + monkeypatch.setenv('TERM_PROGRAM', 'WarpTerminal') + assert in_warp() is True + + +class TestInPycharm: + def test_false_by_default(self, monkeypatch): + from easydiffraction.utils.environment import in_pycharm + + monkeypatch.delenv('PYCHARM_HOSTED', raising=False) + assert in_pycharm() is False + + def test_true_with_env_var(self, monkeypatch): + from easydiffraction.utils.environment import in_pycharm + + monkeypatch.setenv('PYCHARM_HOSTED', '1') + assert in_pycharm() is True + + +class TestInJupyter: + def test_false_in_tests(self): + from easydiffraction.utils.environment import in_jupyter + + assert in_jupyter() is False + + +class TestInGithubCi: + def test_false_without_env(self, monkeypatch): + from easydiffraction.utils.environment import in_github_ci + + monkeypatch.delenv('GITHUB_ACTIONS', raising=False) + assert in_github_ci() is False + + def test_true_with_env(self, monkeypatch): + from easydiffraction.utils.environment import in_github_ci + + monkeypatch.setenv('GITHUB_ACTIONS', 'true') + assert in_github_ci() is True + + +class TestIpythonHelpers: + def test_is_ipython_display_handle_with_none(self): + from easydiffraction.utils.environment import is_ipython_display_handle + + assert is_ipython_display_handle(None) is False + + def test_is_ipython_display_handle_with_string(self): + from easydiffraction.utils.environment import is_ipython_display_handle + + assert is_ipython_display_handle('not a handle') is False + + def test_can_update_ipython_display(self): + from easydiffraction.utils.environment import can_update_ipython_display + + # IPython is installed in our test environment + result = can_update_ipython_display() + assert isinstance(result, bool) + + def test_can_use_ipython_display_with_none(self): + from easydiffraction.utils.environment import can_use_ipython_display + + assert can_use_ipython_display(None) is False From 29d66a735946bd1ec0e289e4ad15f77be1783529 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 19:48:50 +0200 Subject: [PATCH 30/51] Replace deprecated loadData with load_data from diffpy --- src/easydiffraction/datablocks/experiment/item/total_pd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/easydiffraction/datablocks/experiment/item/total_pd.py b/src/easydiffraction/datablocks/experiment/item/total_pd.py index 35fc6737..a3208a72 100644 --- a/src/easydiffraction/datablocks/experiment/item/total_pd.py +++ b/src/easydiffraction/datablocks/experiment/item/total_pd.py @@ -66,12 +66,12 @@ def _load_ascii_data_to_experiment(self, data_path: str) -> int: If the data file has fewer than two columns. """ try: - from diffpy.utils.parsers.loaddata import loadData # noqa: PLC0415 + from diffpy.utils.parsers import load_data # noqa: PLC0415 except ImportError: msg = 'diffpy module not found.' raise ImportError(msg) from None try: - data = loadData(data_path) + data = load_data(data_path) except Exception as e: msg = f'Failed to read data from {data_path}: {e}' raise OSError(msg) from e From 4d78b49b1f04395dead7fe182eff9816dcb3cc7d Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 20:35:51 +0200 Subject: [PATCH 31/51] Suppress specific warnings in pyproject.toml for better test output --- pixi.lock | 4 ++-- pyproject.toml | 10 ++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/pixi.lock b/pixi.lock index 86558730..0bf6a5da 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty28 - sha256: b5f40819c823325eba37344a6661ba2508c37401aeea74228b4d97f4e6f90730 + version: 0.11.1+devdirty34 + sha256: 98844d7e54295472444bc52561790e364298c7db2cf588dbd95253c473a9d65c requires_dist: - asciichartpy - asteval diff --git a/pyproject.toml b/pyproject.toml index caf41bd5..3a55a2da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,6 +182,16 @@ fail_under = 70 # Minimum coverage percentage to pass addopts = '--import-mode=importlib' markers = ['fast: mark test as fast (should be run on every push)'] testpaths = ['tests'] +filterwarnings = [ + # TEMPRORARY: Suppress some warnings + # uncertainties 3.x warns on UFloat(value, 0); our CIF parser + # intentionally creates zero-uncertainty values for free parameters. + 'ignore:Using UFloat objects with std_dev==0:UserWarning:uncertainties', + # diffpy internals call their own deprecated APIs; nothing we can fix. + "ignore:'diffpy\\.structure\\.GetSpaceGroup':DeprecationWarning", + "ignore:'diffpy\\.structure\\.expandPosition':DeprecationWarning", + "ignore:'diffpy\\.structure\\.Structure\\.writeStr':DeprecationWarning", +] ######################## # Configuration for ruff From e695f4dca9a6403ce69b51217e06d79c457efb86 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 21:06:46 +0200 Subject: [PATCH 32/51] Enable RUF rules and fix all 297 violations --- pixi.lock | 4 +-- pyproject.toml | 7 +++- .../analysis/calculators/factory.py | 4 ++- .../analysis/categories/aliases/default.py | 2 +- .../analysis/categories/aliases/factory.py | 4 ++- .../categories/constraints/factory.py | 4 ++- .../analysis/categories/fit_mode/factory.py | 4 ++- .../joint_fit_experiments/factory.py | 4 ++- src/easydiffraction/analysis/fitting.py | 8 +++-- .../analysis/minimizers/factory.py | 4 ++- src/easydiffraction/analysis/sequential.py | 32 +++++++++---------- src/easydiffraction/core/factory.py | 5 +-- src/easydiffraction/core/variable.py | 4 +-- .../crystallography/space_groups.py | 4 +-- .../categories/background/factory.py | 6 +++- .../experiment/categories/data/bragg_pd.py | 4 +-- .../experiment/categories/data/factory.py | 6 +++- .../experiment/categories/diffrn/factory.py | 4 ++- .../categories/excluded_regions/factory.py | 4 ++- .../categories/experiment_type/factory.py | 4 ++- .../categories/extinction/factory.py | 4 ++- .../experiment/categories/extinction/shelx.py | 4 +-- .../categories/instrument/factory.py | 6 +++- .../experiment/categories/instrument/tof.py | 16 +++++----- .../categories/linked_crystal/factory.py | 4 ++- .../categories/linked_phases/factory.py | 4 ++- .../experiment/categories/peak/cwl.py | 2 +- .../experiment/categories/peak/cwl_mixins.py | 2 +- .../experiment/categories/peak/factory.py | 6 +++- .../experiment/categories/peak/tof.py | 2 +- .../experiment/categories/peak/tof_mixins.py | 28 ++++++++-------- .../datablocks/experiment/item/base.py | 2 +- .../datablocks/experiment/item/factory.py | 3 +- .../categories/atom_sites/factory.py | 4 ++- .../structure/categories/cell/factory.py | 4 ++- .../categories/space_group/default.py | 2 +- .../categories/space_group/factory.py | 4 ++- src/easydiffraction/display/plotters/base.py | 2 +- src/easydiffraction/display/plotting.py | 2 +- src/easydiffraction/display/utils.py | 2 +- src/easydiffraction/io/cif/serialize.py | 14 ++++---- src/easydiffraction/project/project.py | 2 +- src/easydiffraction/utils/environment.py | 2 +- src/easydiffraction/utils/logging.py | 11 ++++--- src/easydiffraction/utils/utils.py | 14 ++++---- tests/functional/conftest.py | 4 +-- .../fitting/test_cif_round_trip.py | 4 +-- .../integration/fitting/test_project_load.py | 2 +- tests/integration/fitting/test_sequential.py | 4 +-- .../analysis/test_sequential.py | 11 ++++--- .../easydiffraction/core/test_diagnostic.py | 2 +- .../categories/data/test_factory.py | 9 ------ tests/unit/easydiffraction/io/test_ascii.py | 2 +- 53 files changed, 175 insertions(+), 127 deletions(-) diff --git a/pixi.lock b/pixi.lock index 0bf6a5da..f61d83c6 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty34 - sha256: 98844d7e54295472444bc52561790e364298c7db2cf588dbd95253c473a9d65c + version: 0.11.1+devdirty35 + sha256: 0360b3b8adefaf7a9a9440e18cbbdc7e2c249149c9543b7dd67e33ab41a60216 requires_dist: - asciichartpy - asteval diff --git a/pyproject.toml b/pyproject.toml index 3a55a2da..bb09188a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -235,7 +235,7 @@ select = [ 'NPY', # https://docs.astral.sh/ruff/rules/#numpy-specific-rules-npy 'PGH', # https://docs.astral.sh/ruff/rules/#pygrep-hooks-pgh 'PERF', # https://docs.astral.sh/ruff/rules/#perflint-perf - #'RUF', # https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf + 'RUF', # https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf 'TRY', # https://docs.astral.sh/ruff/rules/#tryceratops-try 'UP', # https://docs.astral.sh/ruff/rules/#pyupgrade-up # pycodestyle (E, W) rules @@ -306,6 +306,8 @@ ignore = [ 'D', # https://docs.astral.sh/ruff/rules/#pydocstyle-d 'DOC', # https://docs.astral.sh/ruff/rules/#pydoclint-doc 'INP001', # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ + 'RUF012', # https://docs.astral.sh/ruff/rules/mutable-class-default/ (test stubs use mutable defaults) + 'RUF069', # https://docs.astral.sh/ruff/rules/unreliable-float-equality/ (exact comparisons in assertions) 'S101', # https://docs.astral.sh/ruff/rules/assert/ # Temporary: 'ARG001', @@ -332,6 +334,9 @@ ignore = [ ] 'docs/**' = [ 'INP001', # https://docs.astral.sh/ruff/rules/implicit-namespace-package/ + 'RUF001', # https://docs.astral.sh/ruff/rules/ambiguous-unicode-character-string/ (scientific symbols) + 'RUF002', # https://docs.astral.sh/ruff/rules/ambiguous-unicode-character-docstring/ (scientific symbols) + 'RUF003', # https://docs.astral.sh/ruff/rules/ambiguous-unicode-character-comment/ (en-dashes in headings) 'T201', # https://docs.astral.sh/ruff/rules/print/ # Temporary: 'ANN', diff --git a/src/easydiffraction/analysis/calculators/factory.py b/src/easydiffraction/analysis/calculators/factory.py index a1cda626..6744d86d 100644 --- a/src/easydiffraction/analysis/calculators/factory.py +++ b/src/easydiffraction/analysis/calculators/factory.py @@ -9,6 +9,8 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase from easydiffraction.datablocks.experiment.item.enums import CalculatorEnum from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum @@ -22,7 +24,7 @@ class CalculatorFactory(FactoryBase): available for creation. """ - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset({ ('scattering_type', ScatteringTypeEnum.BRAGG), }): CalculatorEnum.CRYSPY, diff --git a/src/easydiffraction/analysis/categories/aliases/default.py b/src/easydiffraction/analysis/categories/aliases/default.py index 8aac2cdc..eef6201a 100644 --- a/src/easydiffraction/analysis/categories/aliases/default.py +++ b/src/easydiffraction/analysis/categories/aliases/default.py @@ -54,7 +54,7 @@ def __init__(self) -> None: # Direct reference to the Parameter object (runtime only). # Stored via object.__setattr__ to avoid parent-chain mutation. - object.__setattr__(self, '_param_ref', None) # noqa: PLC2801 + object.__setattr__(self, '_param_ref', None) self._identity.category_code = 'alias' self._identity.category_entry_name = lambda: str(self.label.value) diff --git a/src/easydiffraction/analysis/categories/aliases/factory.py b/src/easydiffraction/analysis/categories/aliases/factory.py index f2bebe43..4ca72d76 100644 --- a/src/easydiffraction/analysis/categories/aliases/factory.py +++ b/src/easydiffraction/analysis/categories/aliases/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class AliasesFactory(FactoryBase): """Create alias collections by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/analysis/categories/constraints/factory.py b/src/easydiffraction/analysis/categories/constraints/factory.py index 682c9684..54656220 100644 --- a/src/easydiffraction/analysis/categories/constraints/factory.py +++ b/src/easydiffraction/analysis/categories/constraints/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class ConstraintsFactory(FactoryBase): """Create constraint collections by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/analysis/categories/fit_mode/factory.py b/src/easydiffraction/analysis/categories/fit_mode/factory.py index f10485f8..662b90c4 100644 --- a/src/easydiffraction/analysis/categories/fit_mode/factory.py +++ b/src/easydiffraction/analysis/categories/fit_mode/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class FitModeFactory(FactoryBase): """Create fit-mode category items by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/analysis/categories/joint_fit_experiments/factory.py b/src/easydiffraction/analysis/categories/joint_fit_experiments/factory.py index 57666098..992af727 100644 --- a/src/easydiffraction/analysis/categories/joint_fit_experiments/factory.py +++ b/src/easydiffraction/analysis/categories/joint_fit_experiments/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class JointFitExperimentsFactory(FactoryBase): """Create joint-fit experiment collections by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/analysis/fitting.py b/src/easydiffraction/analysis/fitting.py index aa281082..dc7dffd9 100644 --- a/src/easydiffraction/analysis/fitting.py +++ b/src/easydiffraction/analysis/fitting.py @@ -188,17 +188,19 @@ def _residual_function( # Prepare weights for joint fitting num_expts: int = len(experiments) - _weights = np.ones(num_expts) if weights is None else np.asarray(weights, dtype=np.float64) + norm_weights = ( + np.ones(num_expts) if weights is None else np.asarray(weights, dtype=np.float64) + ) # Normalize weights so they sum to num_expts # We should obtain the same reduced chi_squared when a single # dataset is split into two parts and fit together. If weights # sum to one, then reduced chi_squared will be half as large as # expected. - _weights = _weights * (num_expts / np.sum(_weights)) + norm_weights = norm_weights * (num_expts / np.sum(norm_weights)) residuals: list[float] = [] - for experiment, weight in zip(experiments, _weights, strict=True): + for experiment, weight in zip(experiments, norm_weights, strict=True): # Update experiment-specific calculations experiment._update_categories(called_by_minimizer=True) diff --git a/src/easydiffraction/analysis/minimizers/factory.py b/src/easydiffraction/analysis/minimizers/factory.py index 18f67cc6..e14f2116 100644 --- a/src/easydiffraction/analysis/minimizers/factory.py +++ b/src/easydiffraction/analysis/minimizers/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class MinimizerFactory(FactoryBase): """Factory for creating minimizer instances.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'lmfit', } diff --git a/src/easydiffraction/analysis/sequential.py b/src/easydiffraction/analysis/sequential.py index 9c3b45f6..f1f7b110 100644 --- a/src/easydiffraction/analysis/sequential.py +++ b/src/easydiffraction/analysis/sequential.py @@ -101,7 +101,7 @@ def _fit_worker( # 3. Load experiment from template CIF # (full config + template data) project.experiments.add_from_cif_str(template.experiment_cif) - expt = list(project.experiments.values())[0] + expt = next(iter(project.experiments.values())) # 4. Replace data from the new data path expt._load_ascii_data_to_experiment(data_path) @@ -135,7 +135,7 @@ def _fit_worker( # 10. Collect results result.update(_collect_results(project, template)) - except Exception as exc: # noqa: BLE001 + except Exception as exc: result['fit_success'] = False result['chi_squared'] = None result['reduced_chi_squared'] = None @@ -423,8 +423,8 @@ def _build_template(project: object) -> SequentialFitTemplate: """ from easydiffraction.core.variable import Parameter # noqa: PLC0415 - structure = list(project.structures.values())[0] - experiment = list(project.experiments.values())[0] + structure = next(iter(project.structures.values())) + experiment = next(iter(project.experiments.values())) # Collect free parameter unique_names and initial values all_params = project.structures.parameters + project.experiments.parameters @@ -671,16 +671,16 @@ def fit_sequential( # bootstrap has no path to re-import the script. ``_fit_worker`` # lives in this module (not ``__main__``), so it is still resolved # via normal pickle/import machinery. - _main_mod = sys.modules.get('__main__') - _main_file_bak = getattr(_main_mod, '__file__', None) - _main_spec_bak = getattr(_main_mod, '__spec__', None) + main_mod = sys.modules.get('__main__') + main_file_bak = getattr(main_mod, '__file__', None) + main_spec_bak = getattr(main_mod, '__spec__', None) if max_workers > 1: # Hide __main__ origin from spawn - if _main_mod is not None and _main_file_bak is not None: - _main_mod.__file__ = None # type: ignore[assignment] - if _main_mod is not None and _main_spec_bak is not None: - _main_mod.__spec__ = None + if main_mod is not None and main_file_bak is not None: + main_mod.__file__ = None # type: ignore[assignment] + if main_mod is not None and main_spec_bak is not None: + main_mod.__spec__ = None spawn_ctx = mp.get_context('spawn') pool_cm = ProcessPoolExecutor( @@ -708,7 +708,7 @@ def fit_sequential( diffrn_values = extract_diffrn(result['file_path']) for key, val in diffrn_values.items(): result[f'diffrn.{key}'] = val - except Exception as exc: # noqa: BLE001 + except Exception as exc: log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') # Write to CSV @@ -729,10 +729,10 @@ def fit_sequential( template = replace(template, initial_params=last_ok['params']) finally: # Restore __main__ attributes - if _main_mod is not None and _main_file_bak is not None: - _main_mod.__file__ = _main_file_bak - if _main_mod is not None and _main_spec_bak is not None: - _main_mod.__spec__ = _main_spec_bak + if main_mod is not None and main_file_bak is not None: + main_mod.__file__ = main_file_bak + if main_mod is not None and main_spec_bak is not None: + main_mod.__spec__ = main_spec_bak if verb is not VerbosityEnum.SILENT: total_fitted = len(already_fitted) + len(remaining) diff --git a/src/easydiffraction/core/factory.py b/src/easydiffraction/core/factory.py index 4557cd22..58b22f01 100644 --- a/src/easydiffraction/core/factory.py +++ b/src/easydiffraction/core/factory.py @@ -10,6 +10,7 @@ from __future__ import annotations from typing import Any +from typing import ClassVar from easydiffraction.utils.logging import console from easydiffraction.utils.utils import render_table @@ -28,8 +29,8 @@ class FactoryBase: independent ``_registry`` list. """ - _registry: list[type] = [] - _default_rules: dict[frozenset[tuple[str, Any]], str] = {} + _registry: ClassVar[list[type]] = [] + _default_rules: ClassVar[dict[frozenset[tuple[str, Any]], str]] = {} def __init_subclass__(cls, **kwargs: object) -> None: """Give each subclass its own independent registry and rules.""" diff --git a/src/easydiffraction/core/variable.py b/src/easydiffraction/core/variable.py index 6d987fd1..c13bf28d 100644 --- a/src/easydiffraction/core/variable.py +++ b/src/easydiffraction/core/variable.py @@ -43,7 +43,7 @@ def __init__( *, value_spec: AttributeSpec, name: str, - description: str = None, + description: str | None = None, ) -> None: """ Initialize the descriptor with validation and identity. @@ -54,7 +54,7 @@ def __init__( Validation specification for the value. name : str Local name of the descriptor within its category. - description : str, default=None + description : str | None, default=None Optional human-readable description. """ super().__init__() diff --git a/src/easydiffraction/crystallography/space_groups.py b/src/easydiffraction/crystallography/space_groups.py index 4047b8c5..052b4c21 100644 --- a/src/easydiffraction/crystallography/space_groups.py +++ b/src/easydiffraction/crystallography/space_groups.py @@ -9,7 +9,7 @@ """ import gzip -import pickle # noqa: S403 - trusted internal pickle file (package data only) +import pickle # noqa: S403 from pathlib import Path @@ -20,7 +20,7 @@ def _restricted_pickle_load(file_obj: object) -> object: The archive lives in the package; no user-controlled input enters this function. If distribution process changes, revisit. """ - data = pickle.load(file_obj) # noqa: S301 - trusted internal pickle (see docstring) + data = pickle.load(file_obj) # noqa: S301 return data diff --git a/src/easydiffraction/datablocks/experiment/categories/background/factory.py b/src/easydiffraction/datablocks/experiment/categories/background/factory.py index c4d300c8..ac635f08 100644 --- a/src/easydiffraction/datablocks/experiment/categories/background/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/background/factory.py @@ -2,6 +2,10 @@ # SPDX-License-Identifier: BSD-3-Clause """Background factory — delegates entirely to ``FactoryBase``.""" +from __future__ import annotations + +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase from easydiffraction.datablocks.experiment.categories.background.enums import BackgroundTypeEnum @@ -9,6 +13,6 @@ class BackgroundFactory(FactoryBase): """Create background collections by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): BackgroundTypeEnum.LINE_SEGMENT, } diff --git a/src/easydiffraction/datablocks/experiment/categories/data/bragg_pd.py b/src/easydiffraction/datablocks/experiment/categories/data/bragg_pd.py index 00e62be4..beda3749 100644 --- a/src/easydiffraction/datablocks/experiment/categories/data/bragg_pd.py +++ b/src/easydiffraction/datablocks/experiment/categories/data/bragg_pd.py @@ -238,7 +238,7 @@ def __init__(self) -> None: self._time_of_flight = NumericDescriptor( name='time_of_flight', description='Measured time for time-of-flight neutron measurement.', - units='µs', + units='μs', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(ge=0), @@ -253,7 +253,7 @@ def __init__(self) -> None: @property def time_of_flight(self) -> NumericDescriptor: """ - Measured time for time-of-flight neutron measurement (µs). + Measured time for time-of-flight neutron measurement (μs). Reading this property returns the underlying ``NumericDescriptor`` object. diff --git a/src/easydiffraction/datablocks/experiment/categories/data/factory.py b/src/easydiffraction/datablocks/experiment/categories/data/factory.py index d8cdcf12..703a1fe7 100644 --- a/src/easydiffraction/datablocks/experiment/categories/data/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/data/factory.py @@ -2,6 +2,10 @@ # SPDX-License-Identifier: BSD-3-Clause """Data collection factory — delegates to ``FactoryBase``.""" +from __future__ import annotations + +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum @@ -11,7 +15,7 @@ class DataFactory(FactoryBase): """Factory for creating diffraction data collections.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset({ ('sample_form', SampleFormEnum.POWDER), ('scattering_type', ScatteringTypeEnum.BRAGG), diff --git a/src/easydiffraction/datablocks/experiment/categories/diffrn/factory.py b/src/easydiffraction/datablocks/experiment/categories/diffrn/factory.py index ef5fb719..be076276 100644 --- a/src/easydiffraction/datablocks/experiment/categories/diffrn/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/diffrn/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class DiffrnFactory(FactoryBase): """Create diffraction ambient-conditions category instances.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/datablocks/experiment/categories/excluded_regions/factory.py b/src/easydiffraction/datablocks/experiment/categories/excluded_regions/factory.py index e12fb0c0..8c0aa8d3 100644 --- a/src/easydiffraction/datablocks/experiment/categories/excluded_regions/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/excluded_regions/factory.py @@ -6,12 +6,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class ExcludedRegionsFactory(FactoryBase): """Create excluded-regions collections by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/datablocks/experiment/categories/experiment_type/factory.py b/src/easydiffraction/datablocks/experiment/categories/experiment_type/factory.py index 05f0d2d9..4d26f8f0 100644 --- a/src/easydiffraction/datablocks/experiment/categories/experiment_type/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/experiment_type/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class ExperimentTypeFactory(FactoryBase): """Create experiment-type descriptors by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/datablocks/experiment/categories/extinction/factory.py b/src/easydiffraction/datablocks/experiment/categories/extinction/factory.py index 4e4bd9ed..608e2574 100644 --- a/src/easydiffraction/datablocks/experiment/categories/extinction/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/extinction/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class ExtinctionFactory(FactoryBase): """Create extinction correction models by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'shelx', } diff --git a/src/easydiffraction/datablocks/experiment/categories/extinction/shelx.py b/src/easydiffraction/datablocks/experiment/categories/extinction/shelx.py index dd736a1a..42ffb810 100644 --- a/src/easydiffraction/datablocks/experiment/categories/extinction/shelx.py +++ b/src/easydiffraction/datablocks/experiment/categories/extinction/shelx.py @@ -47,7 +47,7 @@ def __init__(self) -> None: self._radius = Parameter( name='radius', description='Crystal radius for extinction correction', - units='µm', + units='μm', value_spec=AttributeSpec( default=1.0, validator=RangeValidator(), @@ -82,7 +82,7 @@ def mosaicity(self, value: float) -> None: @property def radius(self) -> Parameter: """ - Crystal radius for extinction correction (µm). + Crystal radius for extinction correction (μm). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. diff --git a/src/easydiffraction/datablocks/experiment/categories/instrument/factory.py b/src/easydiffraction/datablocks/experiment/categories/instrument/factory.py index fce8ad5c..5700e844 100644 --- a/src/easydiffraction/datablocks/experiment/categories/instrument/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/instrument/factory.py @@ -2,6 +2,10 @@ # SPDX-License-Identifier: BSD-3-Clause """Instrument factory — delegates to ``FactoryBase``.""" +from __future__ import annotations + +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum @@ -10,7 +14,7 @@ class InstrumentFactory(FactoryBase): """Create instrument instances for supported modes.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset({ ('beam_mode', BeamModeEnum.CONSTANT_WAVELENGTH), ('sample_form', SampleFormEnum.POWDER), diff --git a/src/easydiffraction/datablocks/experiment/categories/instrument/tof.py b/src/easydiffraction/datablocks/experiment/categories/instrument/tof.py index 7e1db98e..89f13fab 100644 --- a/src/easydiffraction/datablocks/experiment/categories/instrument/tof.py +++ b/src/easydiffraction/datablocks/experiment/categories/instrument/tof.py @@ -70,7 +70,7 @@ def __init__(self) -> None: self._calib_d_to_tof_offset: Parameter = Parameter( name='d_to_tof_offset', description='TOF offset', - units='µs', + units='μs', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(), @@ -80,7 +80,7 @@ def __init__(self) -> None: self._calib_d_to_tof_linear: Parameter = Parameter( name='d_to_tof_linear', description='TOF linear conversion', - units='µs/Å', + units='μs/Å', value_spec=AttributeSpec( default=10000.0, validator=RangeValidator(), @@ -90,7 +90,7 @@ def __init__(self) -> None: self._calib_d_to_tof_quad: Parameter = Parameter( name='d_to_tof_quad', description='TOF quadratic correction', - units='µs/Ų', + units='μs/Ų', value_spec=AttributeSpec( default=-0.00001, # TODO: Fix CrysPy to accept 0 validator=RangeValidator(), @@ -100,7 +100,7 @@ def __init__(self) -> None: self._calib_d_to_tof_recip: Parameter = Parameter( name='d_to_tof_recip', description='TOF reciprocal velocity correction', - units='µs·Å', + units='μs·Å', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(), @@ -125,7 +125,7 @@ def setup_twotheta_bank(self, value: float) -> None: @property def calib_d_to_tof_offset(self) -> Parameter: """ - TOF offset (µs). + TOF offset (μs). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -139,7 +139,7 @@ def calib_d_to_tof_offset(self, value: float) -> None: @property def calib_d_to_tof_linear(self) -> Parameter: """ - TOF linear conversion (µs/Å). + TOF linear conversion (μs/Å). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -153,7 +153,7 @@ def calib_d_to_tof_linear(self, value: float) -> None: @property def calib_d_to_tof_quad(self) -> Parameter: """ - TOF quadratic correction (µs/Ų). + TOF quadratic correction (μs/Ų). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -167,7 +167,7 @@ def calib_d_to_tof_quad(self, value: float) -> None: @property def calib_d_to_tof_recip(self) -> Parameter: """ - TOF reciprocal velocity correction (µs·Å). + TOF reciprocal velocity correction (μs·Å). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. diff --git a/src/easydiffraction/datablocks/experiment/categories/linked_crystal/factory.py b/src/easydiffraction/datablocks/experiment/categories/linked_crystal/factory.py index b34b8073..9715c3c3 100644 --- a/src/easydiffraction/datablocks/experiment/categories/linked_crystal/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/linked_crystal/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class LinkedCrystalFactory(FactoryBase): """Create linked-crystal references by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/datablocks/experiment/categories/linked_phases/factory.py b/src/easydiffraction/datablocks/experiment/categories/linked_phases/factory.py index 56970ee8..65095dc6 100644 --- a/src/easydiffraction/datablocks/experiment/categories/linked_phases/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/linked_phases/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class LinkedPhasesFactory(FactoryBase): """Create linked-phases collections by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/datablocks/experiment/categories/peak/cwl.py b/src/easydiffraction/datablocks/experiment/categories/peak/cwl.py index a2b4f63b..76b3c663 100644 --- a/src/easydiffraction/datablocks/experiment/categories/peak/cwl.py +++ b/src/easydiffraction/datablocks/experiment/categories/peak/cwl.py @@ -70,7 +70,7 @@ class CwlThompsonCoxHastings( CwlBroadeningMixin, FcjAsymmetryMixin, ): - """Thompson–Cox–Hastings with FCJ asymmetry for CWL mode.""" + """Thompson-Cox-Hastings with FCJ asymmetry for CWL mode.""" type_info = TypeInfo( tag='thompson-cox-hastings', diff --git a/src/easydiffraction/datablocks/experiment/categories/peak/cwl_mixins.py b/src/easydiffraction/datablocks/experiment/categories/peak/cwl_mixins.py index 6e4f29c8..be379772 100644 --- a/src/easydiffraction/datablocks/experiment/categories/peak/cwl_mixins.py +++ b/src/easydiffraction/datablocks/experiment/categories/peak/cwl_mixins.py @@ -255,7 +255,7 @@ def asym_empir_4(self, value: float) -> None: class FcjAsymmetryMixin: - """Finger–Cox–Jephcoat (FCJ) asymmetry parameters.""" + """Finger-Cox-Jephcoat (FCJ) asymmetry parameters.""" def __init__(self) -> None: super().__init__() diff --git a/src/easydiffraction/datablocks/experiment/categories/peak/factory.py b/src/easydiffraction/datablocks/experiment/categories/peak/factory.py index ca196748..f6add493 100644 --- a/src/easydiffraction/datablocks/experiment/categories/peak/factory.py +++ b/src/easydiffraction/datablocks/experiment/categories/peak/factory.py @@ -2,6 +2,10 @@ # SPDX-License-Identifier: BSD-3-Clause """Peak profile factory — delegates to ``FactoryBase``.""" +from __future__ import annotations + +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum from easydiffraction.datablocks.experiment.item.enums import PeakProfileTypeEnum @@ -11,7 +15,7 @@ class PeakFactory(FactoryBase): """Factory for creating peak profile objects.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset({ ('scattering_type', ScatteringTypeEnum.BRAGG), ('beam_mode', BeamModeEnum.CONSTANT_WAVELENGTH), diff --git a/src/easydiffraction/datablocks/experiment/categories/peak/tof.py b/src/easydiffraction/datablocks/experiment/categories/peak/tof.py index 59c0b9e3..31437c3c 100644 --- a/src/easydiffraction/datablocks/experiment/categories/peak/tof.py +++ b/src/easydiffraction/datablocks/experiment/categories/peak/tof.py @@ -45,7 +45,7 @@ class TofPseudoVoigtIkedaCarpenter( TofBroadeningMixin, IkedaCarpenterAsymmetryMixin, ): - """TOF pseudo-Voigt with Ikeda–Carpenter asymmetry.""" + """TOF pseudo-Voigt with Ikeda-Carpenter asymmetry.""" type_info = TypeInfo( tag='pseudo-voigt * ikeda-carpenter', diff --git a/src/easydiffraction/datablocks/experiment/categories/peak/tof_mixins.py b/src/easydiffraction/datablocks/experiment/categories/peak/tof_mixins.py index 8093d877..29463992 100644 --- a/src/easydiffraction/datablocks/experiment/categories/peak/tof_mixins.py +++ b/src/easydiffraction/datablocks/experiment/categories/peak/tof_mixins.py @@ -4,7 +4,7 @@ Time-of-flight (TOF) peak-profile component classes. Defines classes that add Gaussian/Lorentz broadening, mixing, and -Ikeda–Carpenter asymmetry parameters used by TOF peak shapes. This +Ikeda-Carpenter asymmetry parameters used by TOF peak shapes. This module provides classes that add broadening and asymmetry parameters. They are composed into concrete peak classes elsewhere via multiple inheritance. @@ -25,7 +25,7 @@ def __init__(self) -> None: self._broad_gauss_sigma_0 = Parameter( name='gauss_sigma_0', description='Gaussian broadening (instrumental resolution)', - units='µs²', + units='μs²', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(), @@ -35,7 +35,7 @@ def __init__(self) -> None: self._broad_gauss_sigma_1 = Parameter( name='gauss_sigma_1', description='Gaussian broadening (dependent on d-spacing)', - units='µs/Å', + units='μs/Å', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(), @@ -45,7 +45,7 @@ def __init__(self) -> None: self._broad_gauss_sigma_2 = Parameter( name='gauss_sigma_2', description='Gaussian broadening (instrument-dependent term)', - units='µs²/Ų', + units='μs²/Ų', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(), @@ -55,7 +55,7 @@ def __init__(self) -> None: self._broad_lorentz_gamma_0 = Parameter( name='lorentz_gamma_0', description='Lorentzian broadening (microstrain effects)', - units='µs', + units='μs', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(), @@ -65,7 +65,7 @@ def __init__(self) -> None: self._broad_lorentz_gamma_1 = Parameter( name='lorentz_gamma_1', description='Lorentzian broadening (dependent on d-spacing)', - units='µs/Å', + units='μs/Å', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(), @@ -75,7 +75,7 @@ def __init__(self) -> None: self._broad_lorentz_gamma_2 = Parameter( name='lorentz_gamma_2', description='Lorentzian broadening (instrument-dependent term)', - units='µs²/Ų', + units='μs²/Ų', value_spec=AttributeSpec( default=0.0, validator=RangeValidator(), @@ -110,7 +110,7 @@ def __init__(self) -> None: @property def broad_gauss_sigma_0(self) -> Parameter: """ - Gaussian broadening (instrumental resolution) (µs²). + Gaussian broadening (instrumental resolution) (μs²). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -124,7 +124,7 @@ def broad_gauss_sigma_0(self, value: float) -> None: @property def broad_gauss_sigma_1(self) -> Parameter: """ - Gaussian broadening (dependent on d-spacing) (µs/Å). + Gaussian broadening (dependent on d-spacing) (μs/Å). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -138,7 +138,7 @@ def broad_gauss_sigma_1(self, value: float) -> None: @property def broad_gauss_sigma_2(self) -> Parameter: """ - Gaussian broadening (instrument-dependent term) (µs²/Ų). + Gaussian broadening (instrument-dependent term) (μs²/Ų). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -152,7 +152,7 @@ def broad_gauss_sigma_2(self, value: float) -> None: @property def broad_lorentz_gamma_0(self) -> Parameter: """ - Lorentzian broadening (microstrain effects) (µs). + Lorentzian broadening (microstrain effects) (μs). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -166,7 +166,7 @@ def broad_lorentz_gamma_0(self, value: float) -> None: @property def broad_lorentz_gamma_1(self) -> Parameter: """ - Lorentzian broadening (dependent on d-spacing) (µs/Å). + Lorentzian broadening (dependent on d-spacing) (μs/Å). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -180,7 +180,7 @@ def broad_lorentz_gamma_1(self, value: float) -> None: @property def broad_lorentz_gamma_2(self) -> Parameter: """ - Lorentzian broadening (instrument-dependent term) (µs²/Ų). + Lorentzian broadening (instrument-dependent term) (μs²/Ų). Reading this property returns the underlying ``Parameter`` object. Assigning to it updates the parameter value. @@ -221,7 +221,7 @@ def broad_mix_beta_1(self, value: float) -> None: class IkedaCarpenterAsymmetryMixin: - """Ikeda–Carpenter asymmetry parameters.""" + """Ikeda-Carpenter asymmetry parameters.""" def __init__(self) -> None: super().__init__() diff --git a/src/easydiffraction/datablocks/experiment/item/base.py b/src/easydiffraction/datablocks/experiment/item/base.py index d3a0f4a0..6b83e786 100644 --- a/src/easydiffraction/datablocks/experiment/item/base.py +++ b/src/easydiffraction/datablocks/experiment/item/base.py @@ -568,7 +568,7 @@ def _load_ascii_data_to_experiment(self, data_path: str) -> int: ---------- data_path : str Path to data file with columns compatible with the beam mode - (e.g. 2θ/I/σ for CWL, TOF/I/σ for TOF). + (e.g. 2theta/I/sigma for CWL, TOF/I/sigma for TOF). Returns ------- diff --git a/src/easydiffraction/datablocks/experiment/item/factory.py b/src/easydiffraction/datablocks/experiment/item/factory.py index 5c0b3094..fea39321 100644 --- a/src/easydiffraction/datablocks/experiment/item/factory.py +++ b/src/easydiffraction/datablocks/experiment/item/factory.py @@ -11,6 +11,7 @@ from __future__ import annotations from typing import TYPE_CHECKING +from typing import ClassVar from typeguard import typechecked @@ -36,7 +37,7 @@ class ExperimentFactory(FactoryBase): """Creates Experiment instances with only relevant attributes.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset({ ('scattering_type', ScatteringTypeEnum.BRAGG), ('sample_form', SampleFormEnum.POWDER), diff --git a/src/easydiffraction/datablocks/structure/categories/atom_sites/factory.py b/src/easydiffraction/datablocks/structure/categories/atom_sites/factory.py index c91b3dda..c66399f3 100644 --- a/src/easydiffraction/datablocks/structure/categories/atom_sites/factory.py +++ b/src/easydiffraction/datablocks/structure/categories/atom_sites/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class AtomSitesFactory(FactoryBase): """Create atom-sites collections by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/datablocks/structure/categories/cell/factory.py b/src/easydiffraction/datablocks/structure/categories/cell/factory.py index 6817b2d7..7afb388f 100644 --- a/src/easydiffraction/datablocks/structure/categories/cell/factory.py +++ b/src/easydiffraction/datablocks/structure/categories/cell/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class CellFactory(FactoryBase): """Create unit-cell categories by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/datablocks/structure/categories/space_group/default.py b/src/easydiffraction/datablocks/structure/categories/space_group/default.py index a91cc554..d55247bc 100644 --- a/src/easydiffraction/datablocks/structure/categories/space_group/default.py +++ b/src/easydiffraction/datablocks/structure/categories/space_group/default.py @@ -90,7 +90,7 @@ def _reset_it_coordinate_system_code(self) -> None: @property def _name_h_m_allowed_values(self) -> list[str]: """ - Return the list of recognised Hermann–Mauguin short symbols. + Return the list of recognised Hermann-Mauguin short symbols. Returns ------- diff --git a/src/easydiffraction/datablocks/structure/categories/space_group/factory.py b/src/easydiffraction/datablocks/structure/categories/space_group/factory.py index 9ef8611d..4dd617aa 100644 --- a/src/easydiffraction/datablocks/structure/categories/space_group/factory.py +++ b/src/easydiffraction/datablocks/structure/categories/space_group/factory.py @@ -4,12 +4,14 @@ from __future__ import annotations +from typing import ClassVar + from easydiffraction.core.factory import FactoryBase class SpaceGroupFactory(FactoryBase): """Create space-group categories by tag.""" - _default_rules = { + _default_rules: ClassVar[dict] = { frozenset(): 'default', } diff --git a/src/easydiffraction/display/plotters/base.py b/src/easydiffraction/display/plotters/base.py index f3a3b86c..8fb018e6 100644 --- a/src/easydiffraction/display/plotters/base.py +++ b/src/easydiffraction/display/plotters/base.py @@ -88,7 +88,7 @@ class XAxisType(StrEnum): ScatteringTypeEnum.BRAGG, XAxisType.TIME_OF_FLIGHT, ): [ - 'TOF (µs)', + 'TOF (μs)', 'Intensity (arb. units)', ], ( diff --git a/src/easydiffraction/display/plotting.py b/src/easydiffraction/display/plotting.py index 92a3a031..2d0b7c68 100644 --- a/src/easydiffraction/display/plotting.py +++ b/src/easydiffraction/display/plotting.py @@ -199,7 +199,7 @@ def _prepare_powder_data( need_calc : bool, default=False Whether ``intensity_calc`` is required. show_residual : bool, default=False - If ``True``, compute meas − calc residual. + If ``True``, compute meas - calc residual. Returns ------- diff --git a/src/easydiffraction/display/utils.py b/src/easydiffraction/display/utils.py index 17c6fa94..5daba0b4 100644 --- a/src/easydiffraction/display/utils.py +++ b/src/easydiffraction/display/utils.py @@ -8,7 +8,7 @@ from easydiffraction.utils.environment import in_jupyter from easydiffraction.utils.logging import log -# Optional import – safe even if IPython is not installed +# Optional import - safe even if IPython is not installed try: from IPython.display import HTML from IPython.display import display diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index fa035981..d91e99f5 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -168,7 +168,7 @@ def category_collection_to_cif( lines: list[str] = [] # Header - first_item = list(collection.values())[0] + first_item = next(iter(collection.values())) lines.append('loop_') for p in first_item.parameters: tags = p._cif_handler.names # type: ignore[attr-defined] @@ -354,17 +354,17 @@ def project_info_from_cif(info: object, cif_text: str) -> None: doc = gemmi.cif.read_string(_wrap_in_data_block(cif_text, 'project')) block = doc.sole_block() - _read_cif_string = _make_cif_string_reader(block) + read_cif_string = _make_cif_string_reader(block) - name = _read_cif_string('_project.id') + name = read_cif_string('_project.id') if name is not None: info.name = name - title = _read_cif_string('_project.title') + title = read_cif_string('_project.title') if title is not None: info.title = title - description = _read_cif_string('_project.description') + description = read_cif_string('_project.description') if description is not None: info.description = description @@ -388,10 +388,10 @@ def analysis_from_cif(analysis: object, cif_text: str) -> None: doc = gemmi.cif.read_string(_wrap_in_data_block(cif_text, 'analysis')) block = doc.sole_block() - _read_cif_string = _make_cif_string_reader(block) + read_cif_string = _make_cif_string_reader(block) # Restore minimizer selection - engine = _read_cif_string('_analysis.fitting_engine') + engine = read_cif_string('_analysis.fitting_engine') if engine is not None: from easydiffraction.analysis.fitting import Fitter # noqa: PLC0415 diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index 5bc96e79..50fb5b1e 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -417,7 +417,7 @@ def apply_params_from_csv(self, row_index: int) -> None: # 1. Reload data if file_path points to a real file file_path = row.get('file_path', '') if file_path and pathlib.Path(file_path).is_file(): - experiment = list(self.experiments.values())[0] + experiment = next(iter(self.experiments.values())) experiment._load_ascii_data_to_experiment(file_path) # 2. Override parameter values diff --git a/src/easydiffraction/utils/environment.py b/src/easydiffraction/utils/environment.py index 5e028d3b..d9f85669 100644 --- a/src/easydiffraction/utils/environment.py +++ b/src/easydiffraction/utils/environment.py @@ -154,7 +154,7 @@ def can_update_ipython_display() -> bool: update a display handle. """ try: - from IPython.display import HTML # type: ignore[import-not-found] # noqa: F401, PLC0415 + pass # type: ignore[import-not-found] except Exception: return False else: diff --git a/src/easydiffraction/utils/logging.py b/src/easydiffraction/utils/logging.py index f7f5ef0b..13deeaa4 100644 --- a/src/easydiffraction/utils/logging.py +++ b/src/easydiffraction/utils/logging.py @@ -19,6 +19,7 @@ from enum import IntEnum from enum import auto from typing import TYPE_CHECKING +from typing import ClassVar if TYPE_CHECKING: # pragma: no cover from types import TracebackType @@ -46,12 +47,12 @@ class IconifiedRichHandler(RichHandler): """RichHandler using icons (compact) or names (verbose).""" - _icons = { + _icons: ClassVar[dict] = { logging.CRITICAL: '💀', logging.ERROR: '❌', logging.WARNING: '⚠️', logging.DEBUG: '⚙️', - logging.INFO: 'ℹ️', + logging.INFO: 'ℹ️', # noqa: RUF001 } def __init__(self, *args: object, mode: str = 'compact', **kwargs: object) -> None: @@ -74,7 +75,7 @@ def get_level_text(self, record: logging.LogRecord) -> Text: """ if self.mode == 'compact': icon = self._icons.get(record.levelno, record.levelname) - if in_warp() and not in_jupyter() and icon in {'⚠️', '⚙️', 'ℹ️'}: + if in_warp() and not in_jupyter() and icon in {'⚠️', '⚙️', 'ℹ️'}: # noqa: RUF001 icon += ' ' # add space to align with two-char icons return Text(icon) # Use RichHandler's default level text for verbose mode @@ -325,10 +326,10 @@ def _suppress_traceback(logger: object) -> object: def suppress_jupyter_traceback(*args: object, **kwargs: object) -> None: """Log only the exception message.""" try: - _evalue = ( + evalue = ( args[2] if len(args) > 2 else kwargs.get('_evalue') or kwargs.get('evalue') ) - logger.error(str(_evalue)) + logger.error(str(evalue)) except Exception as err: logger.debug('Jupyter traceback suppressor failed: %r', err) diff --git a/src/easydiffraction/utils/utils.py b/src/easydiffraction/utils/utils.py index 0108422d..cf9a9f78 100644 --- a/src/easydiffraction/utils/utils.py +++ b/src/easydiffraction/utils/utils.py @@ -317,12 +317,12 @@ def _safe_urlopen(request_or_url: object) -> object: # type: ignore[no-untyped- if parsed.scheme != 'https': # pragma: no cover - sanity check msg = 'Only https URLs are permitted' raise ValueError(msg) - elif isinstance(request_or_url, urllib.request.Request): # noqa: S310 - request object inspected, not opened + elif isinstance(request_or_url, urllib.request.Request): # noqa: S310 parsed = urllib.parse.urlparse(request_or_url.full_url) if parsed.scheme != 'https': # pragma: no cover msg = 'Only https URLs are permitted' raise ValueError(msg) - return urllib.request.urlopen(request_or_url) # noqa: S310 - validated https only + return urllib.request.urlopen(request_or_url) # noqa: S310 def _resolve_tutorial_url(url_template: str) -> str: @@ -578,13 +578,13 @@ def tof_to_d( Parameters ---------- tof : np.ndarray - Time-of-flight values (µs). Must be a NumPy array. + Time-of-flight values (μs). Must be a NumPy array. offset : float - Calibration offset (µs). + Calibration offset (μs). linear : float - Linear calibration coefficient (µs/Å). + Linear calibration coefficient (μs/Å). quad : float - Quadratic calibration coefficient (µs/Ų). + Quadratic calibration coefficient (μs/Ų). quad_eps : float, default=1e-20 Threshold to treat ``quad`` as zero. @@ -620,7 +620,7 @@ def tof_to_d( # TOF ≈ offset + linear * d => # d ≈ (tof - offset) / linear if abs(quad) < quad_eps: - if linear != 0.0: + if abs(linear) > quad_eps: d = (tof - offset) / linear # Keep only positive, finite results valid = np.isfinite(d) & (d > 0) diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py index 6da03de1..4ca5f7e9 100644 --- a/tests/functional/conftest.py +++ b/tests/functional/conftest.py @@ -14,7 +14,7 @@ @pytest.fixture def project(tmp_path): """Create a minimal unsaved Project for functional tests.""" - from easydiffraction import Project # noqa: PLC0415 + from easydiffraction import Project return Project(name='func_test') @@ -22,7 +22,7 @@ def project(tmp_path): @pytest.fixture def saved_project(tmp_path): """Create a minimal Project saved to a temp directory.""" - from easydiffraction import Project # noqa: PLC0415 + from easydiffraction import Project project = Project(name='func_test') project.save_as(str(tmp_path / 'func_project')) diff --git a/tests/integration/fitting/test_cif_round_trip.py b/tests/integration/fitting/test_cif_round_trip.py index b089027b..4ff5c02d 100644 --- a/tests/integration/fitting/test_cif_round_trip.py +++ b/tests/integration/fitting/test_cif_round_trip.py @@ -219,8 +219,8 @@ def test_experiment_cif_round_trip_preserves_data() -> None: ) # First and last data point two_theta and intensity_meas - orig_first = list(original.data.values())[0] - loaded_first = list(loaded.data.values())[0] + orig_first = next(iter(original.data.values())) + loaded_first = next(iter(loaded.data.values())) orig_last = list(original.data.values())[-1] loaded_last = list(loaded.data.values())[-1] diff --git a/tests/integration/fitting/test_project_load.py b/tests/integration/fitting/test_project_load.py index 789482f3..53d8643f 100644 --- a/tests/integration/fitting/test_project_load.py +++ b/tests/integration/fitting/test_project_load.py @@ -126,7 +126,7 @@ def _collect_param_snapshot(project: Project) -> dict[str, float]: def _collect_free_flags(project: Project) -> dict[str, bool]: """Return ``{unique_name: free}`` for fittable parameters.""" - from easydiffraction.core.variable import Parameter # noqa: PLC0415 + from easydiffraction.core.variable import Parameter return {p.unique_name: p.free for p in project.parameters if isinstance(p, Parameter)} diff --git a/tests/integration/fitting/test_sequential.py b/tests/integration/fitting/test_sequential.py index 12c14bea..fc82cfc7 100644 --- a/tests/integration/fitting/test_sequential.py +++ b/tests/integration/fitting/test_sequential.py @@ -341,12 +341,12 @@ def test_apply_params_from_csv_loads_data_and_params(tmp_path) -> None: project.apply_params_from_csv(row_index=1) # Verify the parameter value was overridden - model = list(project.structures.values())[0] + model = next(iter(project.structures.values())) assert_almost_equal(model.cell.length_a.value, expected_a, decimal=5) # Verify that the experiment has measured data loaded # (from the file_path in that CSV row) - expt = list(project.experiments.values())[0] + expt = next(iter(project.experiments.values())) assert expt.data.intensity_meas is not None diff --git a/tests/unit/easydiffraction/analysis/test_sequential.py b/tests/unit/easydiffraction/analysis/test_sequential.py index 856aa8ec..3179a0b1 100644 --- a/tests/unit/easydiffraction/analysis/test_sequential.py +++ b/tests/unit/easydiffraction/analysis/test_sequential.py @@ -157,7 +157,7 @@ def test_returns_empty_when_no_file(self, tmp_path): def test_returns_fitted_file_paths(self, tmp_path): csv_path = tmp_path / 'results.csv' - header = list(_META_COLUMNS) + ['cell.a', 'cell.a.uncertainty'] + header = [*_META_COLUMNS, 'cell.a', 'cell.a.uncertainty'] _write_csv_header(csv_path, header) _append_to_csv( csv_path, @@ -184,12 +184,12 @@ def test_returns_fitted_file_paths(self, tmp_path): ], ) - fitted, params = _read_csv_for_recovery(csv_path) + fitted, _params = _read_csv_for_recovery(csv_path) assert fitted == {'/data/a.dat', '/data/b.dat'} def test_returns_last_successful_params(self, tmp_path): csv_path = tmp_path / 'results.csv' - header = list(_META_COLUMNS) + ['cell.a', 'cell.a.uncertainty'] + header = [*_META_COLUMNS, 'cell.a', 'cell.a.uncertainty'] _write_csv_header(csv_path, header) _append_to_csv( csv_path, @@ -223,7 +223,8 @@ def test_returns_last_successful_params(self, tmp_path): def test_skips_meta_columns_and_diffrn_and_uncertainty(self, tmp_path): csv_path = tmp_path / 'results.csv' - header = list(_META_COLUMNS) + [ + header = [ + *_META_COLUMNS, 'diffrn.temp', 'cell.a', 'cell.a.uncertainty', @@ -257,7 +258,7 @@ def test_skips_meta_columns_and_diffrn_and_uncertainty(self, tmp_path): def test_returns_none_params_when_no_successful_rows(self, tmp_path): csv_path = tmp_path / 'results.csv' - header = list(_META_COLUMNS) + ['cell.a', 'cell.a.uncertainty'] + header = [*_META_COLUMNS, 'cell.a', 'cell.a.uncertainty'] _write_csv_header(csv_path, header) _append_to_csv( csv_path, diff --git a/tests/unit/easydiffraction/core/test_diagnostic.py b/tests/unit/easydiffraction/core/test_diagnostic.py index cda7ce98..1ad3b67d 100644 --- a/tests/unit/easydiffraction/core/test_diagnostic.py +++ b/tests/unit/easydiffraction/core/test_diagnostic.py @@ -28,6 +28,6 @@ def test_diagnostics_error_and_debug_monkeypatch(monkeypatch: pytest.MonkeyPatch assert dummy.last[0] == 'debug' Diagnostics.type_mismatch('x', value=3, expected_type=int) - kind, msg, exc = dummy.last + kind, _msg, exc = dummy.last assert kind == 'error' assert issubclass(exc, TypeError) diff --git a/tests/unit/easydiffraction/datablocks/experiment/categories/data/test_factory.py b/tests/unit/easydiffraction/datablocks/experiment/categories/data/test_factory.py index 4b59aa67..5132591a 100644 --- a/tests/unit/easydiffraction/datablocks/experiment/categories/data/test_factory.py +++ b/tests/unit/easydiffraction/datablocks/experiment/categories/data/test_factory.py @@ -6,9 +6,6 @@ def test_data_factory_default_and_errors(): # Ensure concrete classes are registered - from easydiffraction.datablocks.experiment.categories.data import bragg_pd # noqa: F401 - from easydiffraction.datablocks.experiment.categories.data import bragg_sc # noqa: F401 - from easydiffraction.datablocks.experiment.categories.data import total_pd # noqa: F401 from easydiffraction.datablocks.experiment.categories.data.factory import DataFactory # Explicit type by tag @@ -35,9 +32,6 @@ def test_data_factory_default_and_errors(): def test_data_factory_default_tag_resolution(): # Ensure concrete classes are registered - from easydiffraction.datablocks.experiment.categories.data import bragg_pd # noqa: F401 - from easydiffraction.datablocks.experiment.categories.data import bragg_sc # noqa: F401 - from easydiffraction.datablocks.experiment.categories.data import total_pd # noqa: F401 from easydiffraction.datablocks.experiment.categories.data.factory import DataFactory from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum @@ -76,9 +70,6 @@ def test_data_factory_default_tag_resolution(): def test_data_factory_supported_tags(): # Ensure concrete classes are registered - from easydiffraction.datablocks.experiment.categories.data import bragg_pd # noqa: F401 - from easydiffraction.datablocks.experiment.categories.data import bragg_sc # noqa: F401 - from easydiffraction.datablocks.experiment.categories.data import total_pd # noqa: F401 from easydiffraction.datablocks.experiment.categories.data.factory import DataFactory tags = DataFactory.supported_tags() diff --git a/tests/unit/easydiffraction/io/test_ascii.py b/tests/unit/easydiffraction/io/test_ascii.py index ab180701..45627982 100644 --- a/tests/unit/easydiffraction/io/test_ascii.py +++ b/tests/unit/easydiffraction/io/test_ascii.py @@ -50,7 +50,7 @@ def test_raises_value_error_no_project_cif(self, tmp_path): with zipfile.ZipFile(zip_path, 'w') as zf: zf.writestr('data.dat', '1 2 3\n') - with pytest.raises(ValueError, match='No project.cif found'): + with pytest.raises(ValueError, match=r'No project\.cif found'): extract_project_from_zip(zip_path) def test_destination_creates_directory(self, tmp_path): From d9b17b217e356219e9b8bd6fef98e82d6e118cd3 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 22:00:44 +0200 Subject: [PATCH 33/51] Enable PRL rules (phase 1) --- .github/copilot-instructions.md | 11 ++++++++ docs/architecture/architecture.md | 20 +++++++++++++++ pixi.lock | 4 +-- pyproject.toml | 20 ++++++++++++++- src/easydiffraction/analysis/analysis.py | 8 +++--- .../analysis/calculators/crysfml.py | 6 ++--- .../analysis/calculators/cryspy.py | 4 +-- .../analysis/calculators/pdffit.py | 4 +-- src/easydiffraction/analysis/fitting.py | 2 +- .../analysis/minimizers/dfols.py | 6 ++--- .../analysis/minimizers/lmfit.py | 6 ++--- src/easydiffraction/analysis/sequential.py | 2 +- src/easydiffraction/core/category.py | 6 ++--- src/easydiffraction/core/collection.py | 2 +- src/easydiffraction/core/datablock.py | 2 +- src/easydiffraction/core/diagnostic.py | 5 +++- src/easydiffraction/core/validation.py | 2 +- .../experiment/categories/data/bragg_pd.py | 7 ++++-- .../datablocks/experiment/item/base.py | 14 +++++------ .../datablocks/experiment/item/bragg_pd.py | 19 +++++++++----- .../datablocks/experiment/item/bragg_sc.py | 8 ++++-- .../datablocks/experiment/item/total_pd.py | 14 ++++++++--- .../datablocks/structure/item/base.py | 6 ++--- src/easydiffraction/display/plotters/ascii.py | 7 +++--- .../display/plotters/plotly.py | 14 ++++++----- src/easydiffraction/display/plotting.py | 5 ++-- src/easydiffraction/display/tablers/base.py | 6 +++-- src/easydiffraction/display/tablers/pandas.py | 9 ++++--- src/easydiffraction/display/tablers/rich.py | 3 ++- src/easydiffraction/io/cif/serialize.py | 25 +++++++++++++------ src/easydiffraction/utils/environment.py | 13 +++------- src/easydiffraction/utils/logging.py | 7 +++++- tools/gen_tests_scaffold.py | 2 +- tools/param_consistency.py | 7 ++++-- 34 files changed, 186 insertions(+), 90 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 92517a7f..b9fc88ae 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -60,6 +60,17 @@ with both getter and setter) or **read-only** (property with getter only). If internal code needs to mutate a read-only property, add a private `_set_` method instead of exposing a public setter. +- Lint complexity thresholds (`max-args`, `max-branches`, + `max-statements`, `max-locals`, `max-nested-blocks`, etc. in + `pyproject.toml`) are intentional code-quality guardrails. They are not + arbitrary numbers. When code violates a threshold, it is a signal that + the function or class needs refactoring — not that the threshold needs + raising. Do not raise thresholds, add `# noqa` comments, or use any + other mechanism to silence complexity violations. Instead, refactor the + code (extract helpers, introduce parameter objects, flatten nesting, + etc.). For complex refactors that touch many lines or change public + API, propose a refactoring plan and wait for approval before + proceeding. ## Architecture diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md index 2170fc75..e476877a 100644 --- a/docs/architecture/architecture.md +++ b/docs/architecture/architecture.md @@ -1170,6 +1170,26 @@ def length_a(self) -> Parameter: - The CI tool `pixi run param-consistency-check` validates compliance; `pixi run param-consistency-fix` auto-fixes violations. +### 9.9 Lint Complexity Thresholds + +The Pylint-style complexity limits configured in `pyproject.toml` +(`max-args`, `max-branches`, `max-statements`, `max-locals`, +`max-nested-blocks`, etc.) are **intentional code-quality guardrails**, +not arbitrary numbers. A violation is a signal that the function or class +needs refactoring — not that the threshold needs raising. + +**Rules:** + +- **Do not raise thresholds.** The current values represent the project's + design intent for maximum acceptable complexity. +- **Do not add `# noqa` comments** (or any other mechanism) to silence + complexity rules such as `PLR0912`, `PLR0913`, `PLR0914`, `PLR0915`, + `PLR0917`, `PLR1702`. +- **Refactor the code instead:** extract helper functions, introduce + parameter objects, flatten nesting, use early returns, etc. +- **For complex refactors** that touch many lines or change public API, + propose a refactoring plan and wait for approval before proceeding. + --- ## 10. Test Strategy diff --git a/pixi.lock b/pixi.lock index f61d83c6..4bf24531 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty35 - sha256: 0360b3b8adefaf7a9a9440e18cbbdc7e2c249149c9543b7dd67e33ab41a60216 + version: 0.11.1+devdirty36 + sha256: 413ec5f57c3e7fc4e3dcb4a1e2421851b246cb0a25239e4cde585d75e412b086 requires_dist: - asciichartpy - asteval diff --git a/pyproject.toml b/pyproject.toml index bb09188a..4454fd95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -244,7 +244,7 @@ select = [ # Pylint (PL) rules 'PLC', # https://docs.astral.sh/ruff/rules/#convention-plc 'PLE', # https://docs.astral.sh/ruff/rules/#error-ple - #'PLR', # https://docs.astral.sh/ruff/rules/#refactor-plr + 'PLR', # https://docs.astral.sh/ruff/rules/#refactor-plr 'PLW', # https://docs.astral.sh/ruff/rules/#warning-plw # flake8 rules #'A', # https://docs.astral.sh/ruff/rules/#flake8-builtins-a @@ -369,6 +369,24 @@ max-doc-length = 72 [tool.ruff.lint.pydocstyle] convention = 'numpy' +[tool.ruff.lint.pylint] +# Maximum number of arguments for function/method definitions +max-args = 8 +# Maximum number of positional arguments for function/method definitions +max-positional-args = 8 +# Maximum number of public methods for a class +max-public-methods = 30 +# Maximum number of return statements in a function/method body +max-returns = 8 +# Maximum number of branches in a function/method body +max-branches = 16 +# Maximum number of statements in a function/method body +max-statements = 60 +# Maximum number of local variables in a function/method body +max-locals = 20 +# Maximum number of nested blocks in a function/method body +max-nested-blocks = 6 + ############################# # Configuration for pydoclint ############################# diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index 80c5c1a3..644c0058 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -150,7 +150,7 @@ def aliases_type(self, new_type: str) -> None: console.paragraph('Aliases type changed to') console.print(new_type) - def show_supported_aliases_types(self) -> None: + def show_supported_aliases_types(self) -> None: # noqa: PLR6301 """Print a table of supported aliases collection types.""" AliasesFactory.show_supported() @@ -191,7 +191,7 @@ def constraints_type(self, new_type: str) -> None: console.paragraph('Constraints type changed to') console.print(new_type) - def show_supported_constraints_types(self) -> None: + def show_supported_constraints_types(self) -> None: # noqa: PLR6301 """Print a table of supported constraints collection types.""" ConstraintsFactory.show_supported() @@ -200,8 +200,8 @@ def show_current_constraints_type(self) -> None: console.paragraph('Current constraints type') console.print(self._constraints_type) + @staticmethod def _get_params_as_dataframe( - self, params: list[NumericDescriptor | Parameter], ) -> pd.DataFrame: """ @@ -531,7 +531,7 @@ def fit_mode_type(self, new_type: str) -> None: console.paragraph('Fit-mode type changed to') console.print(new_type) - def show_supported_fit_mode_types(self) -> None: + def show_supported_fit_mode_types(self) -> None: # noqa: PLR6301 """Print a table of supported fit-mode category types.""" FitModeFactory.show_supported() diff --git a/src/easydiffraction/analysis/calculators/crysfml.py b/src/easydiffraction/analysis/calculators/crysfml.py index 3410150a..3454ce28 100644 --- a/src/easydiffraction/analysis/calculators/crysfml.py +++ b/src/easydiffraction/analysis/calculators/crysfml.py @@ -100,7 +100,7 @@ def calculate_pattern( y = [] return y - def _adjust_pattern_length( + def _adjust_pattern_length( # noqa: PLR6301 self, pattern: list[float], target_length: int, @@ -153,7 +153,7 @@ def _crysfml_dict( 'experiments': [experiment_dict], } - def _convert_structure_to_dict( + def _convert_structure_to_dict( # noqa: PLR6301 self, structure: Structure, ) -> dict[str, Any]: @@ -198,7 +198,7 @@ def _convert_structure_to_dict( return structure_dict - def _convert_experiment_to_dict( + def _convert_experiment_to_dict( # noqa: PLR6301 self, experiment: ExperimentBase, ) -> dict[str, Any]: diff --git a/src/easydiffraction/analysis/calculators/cryspy.py b/src/easydiffraction/analysis/calculators/cryspy.py index 1607c5cf..895b71fd 100644 --- a/src/easydiffraction/analysis/calculators/cryspy.py +++ b/src/easydiffraction/analysis/calculators/cryspy.py @@ -349,7 +349,7 @@ def _recreate_cryspy_obj( return cryspy_obj - def _convert_structure_to_cryspy_cif( + def _convert_structure_to_cryspy_cif( # noqa: PLR6301 self, structure: Structure, ) -> str: @@ -368,7 +368,7 @@ def _convert_structure_to_cryspy_cif( """ return structure.as_cif - def _convert_experiment_to_cryspy_cif( + def _convert_experiment_to_cryspy_cif( # noqa: PLR6301 self, experiment: ExperimentBase, linked_structure: object, diff --git a/src/easydiffraction/analysis/calculators/pdffit.py b/src/easydiffraction/analysis/calculators/pdffit.py index 7abe8d31..67864cea 100644 --- a/src/easydiffraction/analysis/calculators/pdffit.py +++ b/src/easydiffraction/analysis/calculators/pdffit.py @@ -58,7 +58,7 @@ def name(self) -> str: """Short identifier of this calculator engine.""" return 'pdffit' - def calculate_structure_factors( + def calculate_structure_factors( # noqa: PLR6301 self, structures: object, experiments: object, @@ -84,7 +84,7 @@ def calculate_structure_factors( print('[pdffit] Calculating HKLs (not applicable)...') return [] - def calculate_pattern( + def calculate_pattern( # noqa: PLR6301 self, structure: Structure, experiment: ExperimentBase, diff --git a/src/easydiffraction/analysis/fitting.py b/src/easydiffraction/analysis/fitting.py index dc7dffd9..4050d1e5 100644 --- a/src/easydiffraction/analysis/fitting.py +++ b/src/easydiffraction/analysis/fitting.py @@ -197,7 +197,7 @@ def _residual_function( # dataset is split into two parts and fit together. If weights # sum to one, then reduced chi_squared will be half as large as # expected. - norm_weights = norm_weights * (num_expts / np.sum(norm_weights)) + norm_weights *= num_expts / np.sum(norm_weights) residuals: list[float] = [] for experiment, weight in zip(experiments, norm_weights, strict=True): diff --git a/src/easydiffraction/analysis/minimizers/dfols.py b/src/easydiffraction/analysis/minimizers/dfols.py index 6e724298..1177ee4e 100644 --- a/src/easydiffraction/analysis/minimizers/dfols.py +++ b/src/easydiffraction/analysis/minimizers/dfols.py @@ -31,7 +31,7 @@ def __init__( # Intentionally unused, accepted for API compatibility del kwargs - def _prepare_solver_args(self, parameters: list[object]) -> dict[str, object]: + def _prepare_solver_args(self, parameters: list[object]) -> dict[str, object]: # noqa: PLR6301 x0 = [] bounds_lower = [] bounds_upper = [] @@ -47,7 +47,7 @@ def _run_solver(self, objective_function: object, **kwargs: object) -> object: bounds = kwargs.get('bounds') return solve(objective_function, x0=x0, bounds=bounds, maxfun=self.max_iterations) - def _sync_result_to_parameters( + def _sync_result_to_parameters( # noqa: PLR6301 self, parameters: list[object], raw_result: object, @@ -73,7 +73,7 @@ def _sync_result_to_parameters( # calculate later if needed param.uncertainty = None - def _check_success(self, raw_result: object) -> bool: + def _check_success(self, raw_result: object) -> bool: # noqa: PLR6301 """ Determine success from DFO-LS result dictionary. diff --git a/src/easydiffraction/analysis/minimizers/lmfit.py b/src/easydiffraction/analysis/minimizers/lmfit.py index d185ef3c..6b09ebbe 100644 --- a/src/easydiffraction/analysis/minimizers/lmfit.py +++ b/src/easydiffraction/analysis/minimizers/lmfit.py @@ -33,7 +33,7 @@ def __init__( max_iterations=max_iterations, ) - def _prepare_solver_args( + def _prepare_solver_args( # noqa: PLR6301 self, parameters: list[object], ) -> dict[str, object]: @@ -88,7 +88,7 @@ def _run_solver(self, objective_function: object, **kwargs: object) -> object: max_nfev=self.max_iterations, ) - def _sync_result_to_parameters( + def _sync_result_to_parameters( # noqa: PLR6301 self, parameters: list[object], raw_result: object, @@ -113,7 +113,7 @@ def _sync_result_to_parameters( param._set_value_from_minimizer(param_result.value) param.uncertainty = getattr(param_result, 'stderr', None) - def _check_success(self, raw_result: object) -> bool: + def _check_success(self, raw_result: object) -> bool: # noqa: PLR6301 """ Determine success from lmfit MinimizerResult. diff --git a/src/easydiffraction/analysis/sequential.py b/src/easydiffraction/analysis/sequential.py index f1f7b110..53f643fe 100644 --- a/src/easydiffraction/analysis/sequential.py +++ b/src/easydiffraction/analysis/sequential.py @@ -455,7 +455,7 @@ def _build_template(project: object) -> SequentialFitTemplate: diffrn_field_names.extend( p.name for p in experiment.diffrn.parameters - if hasattr(p, 'name') and p.name not in ('type',) + if hasattr(p, 'name') and p.name not in {'type'} ) return SequentialFitTemplate( diff --git a/src/easydiffraction/core/category.py b/src/easydiffraction/core/category.py index f963db3b..19320ef5 100644 --- a/src/easydiffraction/core/category.py +++ b/src/easydiffraction/core/category.py @@ -30,7 +30,7 @@ def __str__(self) -> str: return f'<{name} ({params})>' # TODO: Common for all categories - def _update(self, called_by_minimizer: bool = False) -> None: + def _update(self, called_by_minimizer: bool = False) -> None: # noqa: PLR6301 del called_by_minimizer pass @@ -171,7 +171,7 @@ class CategoryCollection(CollectionBase): # TODO: Common for all categories _update_priority = 10 # Default. Lower values run first. - def _key_for(self, item: object) -> str | None: + def _key_for(self, item: object) -> str | None: # noqa: PLR6301 """Return the category-level identity key for *item*.""" return item._identity.category_entry_name @@ -194,7 +194,7 @@ def __str__(self) -> str: return f'<{name} collection ({size} items)>' # TODO: Common for all categories - def _update(self, called_by_minimizer: bool = False) -> None: + def _update(self, called_by_minimizer: bool = False) -> None: # noqa: PLR6301 del called_by_minimizer pass diff --git a/src/easydiffraction/core/collection.py b/src/easydiffraction/core/collection.py index c28e19e7..520bf94f 100644 --- a/src/easydiffraction/core/collection.py +++ b/src/easydiffraction/core/collection.py @@ -113,7 +113,7 @@ def remove(self, name: str) -> None: """ del self[name] - def _key_for(self, item: GuardedBase) -> str | None: + def _key_for(self, item: GuardedBase) -> str | None: # noqa: PLR6301 """ Return the identity key for *item*. diff --git a/src/easydiffraction/core/datablock.py b/src/easydiffraction/core/datablock.py index 5d497e4c..ac49fbc2 100644 --- a/src/easydiffraction/core/datablock.py +++ b/src/easydiffraction/core/datablock.py @@ -149,7 +149,7 @@ class DatablockCollection(CollectionBase): :meth:`add` with the resulting item. """ - def _key_for(self, item: object) -> str | None: + def _key_for(self, item: object) -> str | None: # noqa: PLR6301 """Return the datablock-level identity key for *item*.""" return item._identity.datablock_entry_name diff --git a/src/easydiffraction/core/diagnostic.py b/src/easydiffraction/core/diagnostic.py index 634798a2..3ba50cb9 100644 --- a/src/easydiffraction/core/diagnostic.py +++ b/src/easydiffraction/core/diagnostic.py @@ -11,6 +11,9 @@ from easydiffraction.utils.logging import log +# Maximum number of allowed attributes to list explicitly in messages +_MAX_LISTED_ALLOWED = 10 + class Diagnostics: """Centralized logger for attribute errors and validation hints.""" @@ -209,7 +212,7 @@ def _build_allowed(allowed: object, label: str = 'Allowed attributes') -> str: # allowed may be a set, list, or other iterable if allowed: allowed_list = list(allowed) - if len(allowed_list) <= 10: + if len(allowed_list) <= _MAX_LISTED_ALLOWED: s = ', '.join(map(repr, sorted(allowed_list))) return f' {label}: {s}.' return f' ({len(allowed_list)} {label.lower()} not listed here).' diff --git a/src/easydiffraction/core/validation.py b/src/easydiffraction/core/validation.py index e75b34d4..a5e4ffba 100644 --- a/src/easydiffraction/core/validation.py +++ b/src/easydiffraction/core/validation.py @@ -93,8 +93,8 @@ def validated( """ raise NotImplementedError + @staticmethod def _fallback( - self, current: object = None, default: object = None, ) -> object: diff --git a/src/easydiffraction/datablocks/experiment/categories/data/bragg_pd.py b/src/easydiffraction/datablocks/experiment/categories/data/bragg_pd.py index beda3749..378dce8a 100644 --- a/src/easydiffraction/datablocks/experiment/categories/data/bragg_pd.py +++ b/src/easydiffraction/datablocks/experiment/categories/data/bragg_pd.py @@ -25,6 +25,9 @@ from easydiffraction.utils.utils import tof_to_d from easydiffraction.utils.utils import twotheta_to_d +# Uncertainty values below this threshold are replaced with 1.0 +_MIN_UNCERTAINTY = 0.0001 + class PdDataPointBaseMixin: """Single base data point mixin for powder diffraction data.""" @@ -448,8 +451,8 @@ def intensity_meas_su(self) -> np.ndarray: (p.intensity_meas_su.value for p in self._calc_items), dtype=float, # TODO: needed? DataTypes.NUMERIC? ) - # Replace values smaller than 0.0001 with 1.0 - modified = np.where(original < 0.0001, 1.0, original) + # Replace values smaller than _MIN_UNCERTAINTY with 1.0 + modified = np.where(original < _MIN_UNCERTAINTY, 1.0, original) return modified @property diff --git a/src/easydiffraction/datablocks/experiment/item/base.py b/src/easydiffraction/datablocks/experiment/item/base.py index 6b83e786..12e71351 100644 --- a/src/easydiffraction/datablocks/experiment/item/base.py +++ b/src/easydiffraction/datablocks/experiment/item/base.py @@ -113,7 +113,7 @@ def diffrn_type(self, new_type: str) -> None: console.paragraph(f"Diffrn type for experiment '{self.name}' changed to") console.print(new_type) - def show_supported_diffrn_types(self) -> None: + def show_supported_diffrn_types(self) -> None: # noqa: PLR6301 """Print a table of supported diffraction conditions types.""" DiffrnFactory.show_supported() @@ -336,7 +336,7 @@ def extinction_type(self, new_type: str) -> None: console.paragraph(f"Extinction type for experiment '{self.name}' changed to") console.print(new_type) - def show_supported_extinction_types(self) -> None: + def show_supported_extinction_types(self) -> None: # noqa: PLR6301 """Print a table of supported extinction correction models.""" ExtinctionFactory.show_supported() @@ -383,7 +383,7 @@ def linked_crystal_type(self, new_type: str) -> None: console.paragraph(f"Linked crystal type for experiment '{self.name}' changed to") console.print(new_type) - def show_supported_linked_crystal_types(self) -> None: + def show_supported_linked_crystal_types(self) -> None: # noqa: PLR6301 """Print a table of supported linked-crystal reference types.""" LinkedCrystalFactory.show_supported() @@ -484,7 +484,7 @@ def data_type(self, new_type: str) -> None: console.paragraph(f"Data type for experiment '{self.name}' changed to") console.print(new_type) - def show_supported_data_types(self) -> None: + def show_supported_data_types(self) -> None: # noqa: PLR6301 """Print a table of supported data collection types.""" DataFactory.show_supported() @@ -611,7 +611,7 @@ def linked_phases_type(self, new_type: str) -> None: console.paragraph(f"Linked phases type for experiment '{self.name}' changed to") console.print(new_type) - def show_supported_linked_phases_types(self) -> None: + def show_supported_linked_phases_types(self) -> None: # noqa: PLR6301 """Print a table of supported linked-phases collection types.""" LinkedPhasesFactory.show_supported() @@ -654,7 +654,7 @@ def excluded_regions_type(self, new_type: str) -> None: console.paragraph(f"Excluded regions type for experiment '{self.name}' changed to") console.print(new_type) - def show_supported_excluded_regions_types(self) -> None: + def show_supported_excluded_regions_types(self) -> None: # noqa: PLR6301 """Print a table of supported excluded-regions types.""" ExcludedRegionsFactory.show_supported() @@ -700,7 +700,7 @@ def data_type(self, new_type: str) -> None: console.paragraph(f"Data type for experiment '{self.name}' changed to") console.print(new_type) - def show_supported_data_types(self) -> None: + def show_supported_data_types(self) -> None: # noqa: PLR6301 """Print a table of supported data collection types.""" DataFactory.show_supported() diff --git a/src/easydiffraction/datablocks/experiment/item/bragg_pd.py b/src/easydiffraction/datablocks/experiment/item/bragg_pd.py index d4773d2a..37b02d99 100644 --- a/src/easydiffraction/datablocks/experiment/item/bragg_pd.py +++ b/src/easydiffraction/datablocks/experiment/item/bragg_pd.py @@ -23,6 +23,13 @@ if TYPE_CHECKING: from easydiffraction.datablocks.experiment.categories.experiment_type import ExperimentType +# Minimum number of columns required in an ASCII data file +_MIN_COLUMNS_XY = 2 +_MIN_COLUMNS_XY_SY = 3 + +# Uncertainty values below this threshold are replaced with 1.0 +_MIN_UNCERTAINTY = 0.0001 + @ExperimentFactory.register class BraggPdExperiment(PdExperimentBase): @@ -81,14 +88,14 @@ def _load_ascii_data_to_experiment( """ data = load_numeric_block(data_path) - if data.shape[1] < 2: + if data.shape[1] < _MIN_COLUMNS_XY: log.error( 'Data file must have at least two columns: x and y.', exc_type=ValueError, ) return 0 - if data.shape[1] < 3: + if data.shape[1] < _MIN_COLUMNS_XY_SY: log.warning('No uncertainty (sy) column provided. Defaulting to sqrt(y).') # Extract x, y data @@ -99,11 +106,11 @@ def _load_ascii_data_to_experiment( x = np.round(x, 4) # Determine sy from column 3 if available, otherwise use sqrt(y) - sy = data[:, 2] if data.shape[1] > 2 else np.sqrt(y) + sy = data[:, 2] if data.shape[1] > _MIN_COLUMNS_XY else np.sqrt(y) - # Replace values smaller than 0.0001 with 1.0 + # Replace values smaller than _MIN_UNCERTAINTY with 1.0 # TODO: Not used if loading from cif file? - sy = np.where(sy < 0.0001, 1.0, sy) + sy = np.where(sy < _MIN_UNCERTAINTY, 1.0, sy) # Set the experiment data self.data._create_items_set_xcoord_and_id(x) @@ -209,7 +216,7 @@ def background(self) -> object: """Active background model for this experiment.""" return self._background - def show_supported_background_types(self) -> None: + def show_supported_background_types(self) -> None: # noqa: PLR6301 """Print a table of supported background types.""" BackgroundFactory.show_supported() diff --git a/src/easydiffraction/datablocks/experiment/item/bragg_sc.py b/src/easydiffraction/datablocks/experiment/item/bragg_sc.py index 3cb1a96c..7d2a9546 100644 --- a/src/easydiffraction/datablocks/experiment/item/bragg_sc.py +++ b/src/easydiffraction/datablocks/experiment/item/bragg_sc.py @@ -18,6 +18,10 @@ if TYPE_CHECKING: from easydiffraction.datablocks.experiment.categories.experiment_type import ExperimentType +# Minimum number of columns required in CWL and TOF single-crystal files +_MIN_COLUMNS_CWL_SC = 5 +_MIN_COLUMNS_TOF_SC = 6 + @ExperimentFactory.register class CwlScExperiment(ScExperimentBase): @@ -60,7 +64,7 @@ def _load_ascii_data_to_experiment(self, data_path: str) -> int: """ data = load_numeric_block(data_path) - if data.shape[1] < 5: + if data.shape[1] < _MIN_COLUMNS_CWL_SC: log.error( 'Data file must have at least 5 columns: h, k, l, Iobs, sIobs.', exc_type=ValueError, @@ -132,7 +136,7 @@ def _load_ascii_data_to_experiment(self, data_path: str) -> int: ) return 0 - if data.shape[1] < 6: + if data.shape[1] < _MIN_COLUMNS_TOF_SC: log.error( 'Data file must have at least 6 columns: h, k, l, Iobs, sIobs, wavelength.', exc_type=ValueError, diff --git a/src/easydiffraction/datablocks/experiment/item/total_pd.py b/src/easydiffraction/datablocks/experiment/item/total_pd.py index a3208a72..cc7105ae 100644 --- a/src/easydiffraction/datablocks/experiment/item/total_pd.py +++ b/src/easydiffraction/datablocks/experiment/item/total_pd.py @@ -18,6 +18,10 @@ if TYPE_CHECKING: from easydiffraction.datablocks.experiment.categories.experiment_type import ExperimentType +# Minimum number of columns required in an ASCII data file +_MIN_COLUMNS_XY = 2 +_MIN_COLUMNS_XY_SY = 3 + @ExperimentFactory.register class TotalPdExperiment(PdExperimentBase): @@ -76,17 +80,21 @@ def _load_ascii_data_to_experiment(self, data_path: str) -> int: msg = f'Failed to read data from {data_path}: {e}' raise OSError(msg) from e - if data.shape[1] < 2: + if data.shape[1] < _MIN_COLUMNS_XY: msg = 'Data file must have at least two columns: x and y.' raise ValueError(msg) default_sy = 0.03 - if data.shape[1] < 3: + if data.shape[1] < _MIN_COLUMNS_XY_SY: print(f'Warning: No uncertainty (sy) column provided. Defaulting to {default_sy}.') x = data[:, 0] y = data[:, 1] - sy = data[:, 2] if data.shape[1] > 2 else np.full_like(y, fill_value=default_sy) + sy = ( + data[:, 2] + if data.shape[1] > _MIN_COLUMNS_XY + else np.full_like(y, fill_value=default_sy) + ) self.data._create_items_set_xcoord_and_id(x) self.data._set_g_r_meas(y) diff --git a/src/easydiffraction/datablocks/structure/item/base.py b/src/easydiffraction/datablocks/structure/item/base.py index 8181f1db..19c38df5 100644 --- a/src/easydiffraction/datablocks/structure/item/base.py +++ b/src/easydiffraction/datablocks/structure/item/base.py @@ -113,7 +113,7 @@ def cell_type(self, new_type: str) -> None: console.paragraph(f"Cell type for structure '{self.name}' changed to") console.print(new_type) - def show_supported_cell_types(self) -> None: + def show_supported_cell_types(self) -> None: # noqa: PLR6301 """Print a table of supported unit-cell types.""" CellFactory.show_supported() @@ -172,7 +172,7 @@ def space_group_type(self, new_type: str) -> None: console.paragraph(f"Space group type for structure '{self.name}' changed to") console.print(new_type) - def show_supported_space_group_types(self) -> None: + def show_supported_space_group_types(self) -> None: # noqa: PLR6301 """Print a table of supported space-group types.""" SpaceGroupFactory.show_supported() @@ -231,7 +231,7 @@ def atom_sites_type(self, new_type: str) -> None: console.paragraph(f"Atom sites type for structure '{self.name}' changed to") console.print(new_type) - def show_supported_atom_sites_types(self) -> None: + def show_supported_atom_sites_types(self) -> None: # noqa: PLR6301 """Print a table of supported atom-sites collection types.""" AtomSitesFactory.show_supported() diff --git a/src/easydiffraction/display/plotters/ascii.py b/src/easydiffraction/display/plotters/ascii.py index 32ee45ed..4d4edd69 100644 --- a/src/easydiffraction/display/plotters/ascii.py +++ b/src/easydiffraction/display/plotters/ascii.py @@ -26,7 +26,8 @@ class AsciiPlotter(PlotterBase): """Terminal-based plotter using ASCII art.""" - def _get_legend_item(self, label: str) -> str: + @staticmethod + def _get_legend_item(label: str) -> str: """ Return a colored legend entry for a given series label. @@ -103,8 +104,8 @@ def plot_powder( print(padded) + @staticmethod def plot_single_crystal( - self, x_calc: object, y_meas: object, y_meas_su: object, @@ -182,8 +183,8 @@ def plot_single_crystal( print(f' {x_axis}') console.print(f'{" " * (width - 3)}{axes_labels[0]}') + @staticmethod def plot_scatter( - self, x: object, y: object, sy: object, diff --git a/src/easydiffraction/display/plotters/plotly.py b/src/easydiffraction/display/plotters/plotly.py index 6fc84a85..77d351e1 100644 --- a/src/easydiffraction/display/plotters/plotly.py +++ b/src/easydiffraction/display/plotters/plotly.py @@ -37,8 +37,8 @@ class PlotlyPlotter(PlotterBase): if in_pycharm(): pio.renderers.default = 'browser' + @staticmethod def _get_powder_trace( - self, x: object, y: object, label: str, @@ -75,8 +75,8 @@ def _get_powder_trace( return trace + @staticmethod def _get_single_crystal_trace( - self, x_calc: object, y_meas: object, y_meas_su: object, @@ -119,7 +119,8 @@ def _get_single_crystal_trace( return trace - def _get_diagonal_shape(self) -> dict: + @staticmethod + def _get_diagonal_shape() -> dict: """ Create a diagonal reference line shape. @@ -143,7 +144,8 @@ def _get_diagonal_shape(self) -> dict: 'line': {'width': 0.5}, } - def _get_config(self) -> dict: + @staticmethod + def _get_config() -> dict: """ Return the Plotly figure configuration. @@ -163,8 +165,8 @@ def _get_config(self) -> dict: ], } + @staticmethod def _get_figure( - self, data: object, layout: object, ) -> object: @@ -218,8 +220,8 @@ def _show_figure( ) display(HTML(html_fig)) + @staticmethod def _get_layout( - self, title: str, axes_labels: object, **kwargs: object, diff --git a/src/easydiffraction/display/plotting.py b/src/easydiffraction/display/plotting.py index 2d0b7c68..7970e8db 100644 --- a/src/easydiffraction/display/plotting.py +++ b/src/easydiffraction/display/plotting.py @@ -155,8 +155,8 @@ def _filtered_y_array( return filtered_y_array + @staticmethod def _get_axes_labels( - self, sample_form: object, scattering_type: object, x_axis: object, @@ -261,7 +261,8 @@ def _prepare_powder_data( 'x_axis': x_axis, } - def _resolve_x_axis(self, expt_type: object, x: object) -> tuple: + @staticmethod + def _resolve_x_axis(expt_type: object, x: object) -> tuple: """ Determine the x-axis type from experiment metadata. diff --git a/src/easydiffraction/display/tablers/base.py b/src/easydiffraction/display/tablers/base.py index 869c5a17..cef9a62e 100644 --- a/src/easydiffraction/display/tablers/base.py +++ b/src/easydiffraction/display/tablers/base.py @@ -51,7 +51,8 @@ def _format_value(self, value: object) -> object: """ return self._float_fmt(value) if isinstance(value, float) else str(value) - def _is_dark_theme(self) -> bool: + @staticmethod + def _is_dark_theme() -> bool: """ Return True when a dark theme is detected in Jupyter. @@ -68,7 +69,8 @@ def _is_dark_theme(self) -> bool: return is_dark() - def _rich_to_hex(self, color: str) -> str: + @staticmethod + def _rich_to_hex(color: str) -> str: """ Convert a Rich color name to a CSS-style hex string. diff --git a/src/easydiffraction/display/tablers/pandas.py b/src/easydiffraction/display/tablers/pandas.py index 20e38564..d6565513 100644 --- a/src/easydiffraction/display/tablers/pandas.py +++ b/src/easydiffraction/display/tablers/pandas.py @@ -19,7 +19,8 @@ class PandasTableBackend(TableBackendBase): """Render tables using the pandas Styler in Jupyter environments.""" - def _build_base_styles(self, color: str) -> list[dict]: + @staticmethod + def _build_base_styles(color: str) -> list[dict]: """ Return base CSS table styles for a given border color. @@ -79,7 +80,8 @@ def _build_base_styles(self, color: str) -> list[dict]: }, ] - def _build_header_alignment_styles(self, df: object, alignments: object) -> list[dict]: + @staticmethod + def _build_header_alignment_styles(df: object, alignments: object) -> list[dict]: """ Generate header cell alignment styles per column. @@ -136,7 +138,8 @@ def _apply_styling(self, df: object, alignments: object, color: str) -> object: ) return styler - def _update_display(self, styler: object, display_handle: object) -> None: + @staticmethod + def _update_display(styler: object, display_handle: object) -> None: """ Single, consistent update path for Jupyter. diff --git a/src/easydiffraction/display/tablers/rich.py b/src/easydiffraction/display/tablers/rich.py index baad5fcd..017e6d04 100644 --- a/src/easydiffraction/display/tablers/rich.py +++ b/src/easydiffraction/display/tablers/rich.py @@ -39,7 +39,8 @@ class RichTableBackend(TableBackendBase): """Render tables to terminal or Jupyter using the Rich library.""" - def _to_html(self, table: Table) -> str: + @staticmethod + def _to_html(table: Table) -> str: """ Render a Rich table to HTML using an off-screen console. diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index d91e99f5..1b1d3792 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -21,6 +21,12 @@ from easydiffraction.core.category import CategoryItem from easydiffraction.core.variable import GenericDescriptorBase +# Maximum CIF description length before using semicolon-delimited block +_CIF_DESCRIPTION_WRAP_LEN = 60 + +# Minimum string length to check for surrounding quotes +_MIN_QUOTED_LEN = 2 + def format_value(value: object) -> str: """ @@ -251,7 +257,7 @@ def project_info_to_cif(info: object) -> str: if ' ' in title: title = f"'{title}'" - if len(info.description) > 60: + if len(info.description) > _CIF_DESCRIPTION_WRAP_LEN: description = f'\n;\n{info.description}\n;' elif info.description: description = f'{info.description}' @@ -434,10 +440,10 @@ def _read(tag: str) -> str | None: return None raw = vals[0] # CIF unknown / inapplicable markers - if raw in ('?', '.'): + if raw in {'?', '.'}: return None # Strip surrounding quotes - if len(raw) >= 2 and raw[0] == raw[-1] and raw[0] in {"'", '"'}: + if len(raw) >= _MIN_QUOTED_LEN and raw[0] == raw[-1] and raw[0] in {"'", '"'}: raw = raw[1:-1] return raw @@ -486,7 +492,7 @@ def param_from_cif( raw = found_values[idx] # CIF unknown / inapplicable markers → keep default - if raw in ('?', '.'): + if raw in {'?', '.'}: return # If numeric, parse with uncertainty if present @@ -501,7 +507,7 @@ def param_from_cif( # If string, strip quotes if present elif self._value_type == DataTypes.STRING: - if len(raw) >= 2 and raw[0] == raw[-1] and raw[0] in {"'", '"'}: + if len(raw) >= _MIN_QUOTED_LEN and raw[0] == raw[-1] and raw[0] in {"'", '"'}: self.value = raw[1:-1] else: self.value = raw @@ -593,7 +599,7 @@ def _get_loop(block: object, category_item: object) -> object | None: raw = array[row_idx][col_idx] # CIF unknown / inapplicable markers → keep default - if raw in ('?', '.'): + if raw in {'?', '.'}: break # If numeric, parse with uncertainty if present @@ -609,7 +615,12 @@ def _get_loop(block: object, category_item: object) -> object | None: # If string, strip quotes if present # TODO: Make a helper function for this elif param._value_type == DataTypes.STRING: - if len(raw) >= 2 and raw[0] == raw[-1] and raw[0] in {"'", '"'}: + is_quoted = ( + len(raw) >= _MIN_QUOTED_LEN + and raw[0] == raw[-1] + and raw[0] in {"'", '"'} + ) + if is_quoted: param.value = raw[1:-1] else: param.value = raw diff --git a/src/easydiffraction/utils/environment.py b/src/easydiffraction/utils/environment.py index d9f85669..9a194991 100644 --- a/src/easydiffraction/utils/environment.py +++ b/src/easydiffraction/utils/environment.py @@ -75,9 +75,7 @@ def in_jupyter() -> bool: ipython_mod = None else: ipython_mod = IPython - if ipython_mod is None: - return False - if in_pycharm(): + if ipython_mod is None or in_pycharm(): return False if in_colab(): return True @@ -91,15 +89,10 @@ def in_jupyter() -> bool: has_cfg = hasattr(ip, 'config') and isinstance(ip.config, dict) if has_cfg and 'IPKernelApp' in ip.config: # type: ignore[index] return True - shell = ip.__class__.__name__ - if shell == 'ZMQInteractiveShell': # Jupyter or qtconsole - return True - if shell == 'TerminalInteractiveShell': - return False + # Jupyter or qtconsole use ZMQInteractiveShell + return ip.__class__.__name__ == 'ZMQInteractiveShell' # noqa: TRY300 except Exception: return False - else: - return False def in_github_ci() -> bool: diff --git a/src/easydiffraction/utils/logging.py b/src/easydiffraction/utils/logging.py index 13deeaa4..49f6be05 100644 --- a/src/easydiffraction/utils/logging.py +++ b/src/easydiffraction/utils/logging.py @@ -325,9 +325,14 @@ def _suppress_traceback(logger: object) -> object: def suppress_jupyter_traceback(*args: object, **kwargs: object) -> None: """Log only the exception message.""" + # IPython's custom_exc handler passes + # (shell, etype, evalue, tb, tb_offset) + evalue_arg_index = 2 try: evalue = ( - args[2] if len(args) > 2 else kwargs.get('_evalue') or kwargs.get('evalue') + args[evalue_arg_index] + if len(args) > evalue_arg_index + else kwargs.get('_evalue') or kwargs.get('evalue') ) logger.error(str(evalue)) except Exception as err: diff --git a/tools/gen_tests_scaffold.py b/tools/gen_tests_scaffold.py index 336bebb9..51e9fb3e 100644 --- a/tools/gen_tests_scaffold.py +++ b/tools/gen_tests_scaffold.py @@ -80,7 +80,7 @@ def ensure_package_dirs(dir_path: Path) -> None: # but we still want to ensure __init__.py at TESTS_ROOT for part in dir_path.relative_to(TESTS_ROOT).parts: (current / '__init__.py').touch(exist_ok=True) - current = current / part + current /= part # Ensure the final directory also has __init__.py (current / '__init__.py').touch(exist_ok=True) diff --git a/tools/param_consistency.py b/tools/param_consistency.py index cd63989c..1c459e05 100644 --- a/tools/param_consistency.py +++ b/tools/param_consistency.py @@ -90,6 +90,9 @@ def length_a(self) -> Parameter: 'StringDescriptor': 'str', } +# Minimum number of setter args to have a value parameter (self + value) +_MIN_SETTER_ARGS = 2 + # --------------------------------------------------------- # Data structures @@ -485,14 +488,14 @@ def _analyze_property( setter_args = prop.setter.args.args setter_param = ( setter_args[1].arg - if len(setter_args) >= 2 + if len(setter_args) >= _MIN_SETTER_ARGS else 'value' ) expected_ann = _SETTER_ANN[desc.type_name] actual_val_ann = None if ( - len(setter_args) >= 2 + len(setter_args) >= _MIN_SETTER_ARGS and setter_args[1].annotation ): actual_val_ann = _ann_str( From 9bc0461030f09ce188739ade69d093e312a12b96 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 22:09:02 +0200 Subject: [PATCH 34/51] Use ruff default PLR thresholds with max-args=6 --- .github/copilot-instructions.md | 16 +++++++----- docs/architecture/architecture.md | 26 +++++++++++++++---- pixi.lock | 4 +-- pyproject.toml | 21 ++++----------- .../analysis/fit_helpers/reporting.py | 5 +--- 5 files changed, 38 insertions(+), 34 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index b9fc88ae..77675257 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -63,13 +63,15 @@ - Lint complexity thresholds (`max-args`, `max-branches`, `max-statements`, `max-locals`, `max-nested-blocks`, etc. in `pyproject.toml`) are intentional code-quality guardrails. They are not - arbitrary numbers. When code violates a threshold, it is a signal that - the function or class needs refactoring — not that the threshold needs - raising. Do not raise thresholds, add `# noqa` comments, or use any - other mechanism to silence complexity violations. Instead, refactor the - code (extract helpers, introduce parameter objects, flatten nesting, - etc.). For complex refactors that touch many lines or change public - API, propose a refactoring plan and wait for approval before + arbitrary numbers — the project uses ruff's defaults (with `max-args` + and `max-positional-args` set to 6 instead of 5 to account for ruff + counting `self`/`cls`). When code violates a threshold, it is a signal + that the function or class needs refactoring — not that the threshold + needs raising. Do not raise thresholds, add `# noqa` comments, or use + any other mechanism to silence complexity violations. Instead, refactor + the code (extract helpers, introduce parameter objects, flatten + nesting, etc.). For complex refactors that touch many lines or change + public API, propose a refactoring plan and wait for approval before proceeding. ## Architecture diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md index e476877a..5a71b784 100644 --- a/docs/architecture/architecture.md +++ b/docs/architecture/architecture.md @@ -1172,11 +1172,27 @@ def length_a(self) -> Parameter: ### 9.9 Lint Complexity Thresholds -The Pylint-style complexity limits configured in `pyproject.toml` -(`max-args`, `max-branches`, `max-statements`, `max-locals`, -`max-nested-blocks`, etc.) are **intentional code-quality guardrails**, -not arbitrary numbers. A violation is a signal that the function or class -needs refactoring — not that the threshold needs raising. +The Pylint-style complexity limits in `pyproject.toml` are **intentional +code-quality guardrails**, not arbitrary numbers. A violation is a signal +that the function or class needs refactoring — not that the threshold +needs raising. + +The project uses **ruff's defaults** for all PLR thresholds, with one +exception: `max-args` and `max-positional-args` are set to **6** instead +of the ruff default of 5, because ruff counts `self`/`cls` while +traditional pylint does not. Setting 6 in ruff matches pylint's standard +limit of 5 real parameters per function. + +| Threshold | Value | Rule | +| -------------------- | ----- | ------- | +| `max-args` | 6 | PLR0913 | +| `max-positional-args`| 6 | PLR0917 | +| `max-branches` | 12 | PLR0912 | +| `max-statements` | 50 | PLR0915 | +| `max-locals` | 15 | PLR0914 | +| `max-nested-blocks` | 5 | PLR1702 | +| `max-returns` | 6 | PLR0911 | +| `max-public-methods` | 20 | PLR0904 | **Rules:** diff --git a/pixi.lock b/pixi.lock index 4bf24531..7a1e6d18 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty36 - sha256: 413ec5f57c3e7fc4e3dcb4a1e2421851b246cb0a25239e4cde585d75e412b086 + version: 0.11.1+devdirty37 + sha256: e539e3ac0f6beb96be004e85b00dd6280257acc9d1cba7aa077b87aa81653674 requires_dist: - asciichartpy - asteval diff --git a/pyproject.toml b/pyproject.toml index 4454fd95..e0e3ad48 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -370,22 +370,11 @@ max-doc-length = 72 convention = 'numpy' [tool.ruff.lint.pylint] -# Maximum number of arguments for function/method definitions -max-args = 8 -# Maximum number of positional arguments for function/method definitions -max-positional-args = 8 -# Maximum number of public methods for a class -max-public-methods = 30 -# Maximum number of return statements in a function/method body -max-returns = 8 -# Maximum number of branches in a function/method body -max-branches = 16 -# Maximum number of statements in a function/method body -max-statements = 60 -# Maximum number of local variables in a function/method body -max-locals = 20 -# Maximum number of nested blocks in a function/method body -max-nested-blocks = 6 +# Ruff counts `self`/`cls` in max-args; traditional pylint does not. +# Setting 6 here matches pylint's default of 5 (excluding self). +max-args = 6 +max-positional-args = 6 + ############################# # Configuration for pydoclint diff --git a/src/easydiffraction/analysis/fit_helpers/reporting.py b/src/easydiffraction/analysis/fit_helpers/reporting.py index 18af57aa..35897597 100644 --- a/src/easydiffraction/analysis/fit_helpers/reporting.py +++ b/src/easydiffraction/analysis/fit_helpers/reporting.py @@ -23,7 +23,6 @@ def __init__( self, success: bool = False, parameters: list[object] | None = None, - chi_square: float | None = None, reduced_chi_square: float | None = None, message: str = '', iterations: int = 0, @@ -41,8 +40,6 @@ def __init__( Indicates if the fit was successful. parameters : list[object] | None, default=None List of parameters used in the fit. - chi_square : float | None, default=None - Chi-square value of the fit. reduced_chi_square : float | None, default=None Reduced chi-square value of the fit. message : str, default='' @@ -62,7 +59,7 @@ def __init__( """ self.success: bool = success self.parameters: list[object] = parameters if parameters is not None else [] - self.chi_square: float | None = chi_square + self.chi_square: float | None = None self.reduced_chi_square: float | None = reduced_chi_square self.message: str = message self.iterations: int = iterations From fb518e4ae66b7ab18d48584093413401b2478635 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 22:18:09 +0200 Subject: [PATCH 35/51] Refactor FitResults to fix PLR complexity violations in reporting.py --- pixi.toml | 7 +- .../analysis/fit_helpers/reporting.py | 102 ++++++++++-------- 2 files changed, 63 insertions(+), 46 deletions(-) diff --git a/pixi.toml b/pixi.toml index 7e82ac63..8ae7a65f 100644 --- a/pixi.toml +++ b/pixi.toml @@ -98,7 +98,10 @@ integration-tests = 'python -m pytest tests/integration/ --color=yes -n auto -v' script-tests = 'python -m pytest tools/test_scripts.py --color=yes -n auto -v' notebook-tests = 'python -m pytest --nbmake docs/docs/tutorials/ --nbmake-timeout=1200 --color=yes -n auto -v' -test = { depends-on = ['unit-tests'] } +test = { depends-on = [ + 'unit-tests', + 'functional-tests', +] } ########### # ✔️ Checks @@ -155,12 +158,14 @@ raw-metrics-json = 'radon raw -s -j src/' ############# unit-tests-coverage = 'pixi run unit-tests --cov=src/easydiffraction --cov-report=term-missing' +functional-tests-coverage = 'pixi run functional-tests --cov=src/easydiffraction --cov-report=term-missing' integration-tests-coverage = 'pixi run integration-tests --cov=src/easydiffraction --cov-report=term-missing' docstring-coverage = 'interrogate -c pyproject.toml src/easydiffraction' cov = { depends-on = [ 'docstring-coverage', 'unit-tests-coverage', + 'functional-tests-coverage', 'integration-tests-coverage', ] } diff --git a/src/easydiffraction/analysis/fit_helpers/reporting.py b/src/easydiffraction/analysis/fit_helpers/reporting.py index 35897597..91e21718 100644 --- a/src/easydiffraction/analysis/fit_helpers/reporting.py +++ b/src/easydiffraction/analysis/fit_helpers/reporting.py @@ -24,8 +24,6 @@ def __init__( success: bool = False, parameters: list[object] | None = None, reduced_chi_square: float | None = None, - message: str = '', - iterations: int = 0, engine_result: object | None = None, starting_parameters: list[object] | None = None, fitting_time: float | None = None, @@ -42,10 +40,6 @@ def __init__( List of parameters used in the fit. reduced_chi_square : float | None, default=None Reduced chi-square value of the fit. - message : str, default='' - Message related to the fit. - iterations : int, default=0 - Number of iterations performed. engine_result : object | None, default=None Result from the fitting engine. starting_parameters : list[object] | None, default=None @@ -61,8 +55,8 @@ def __init__( self.parameters: list[object] = parameters if parameters is not None else [] self.chi_square: float | None = None self.reduced_chi_square: float | None = reduced_chi_square - self.message: str = message - self.iterations: int = iterations + self.message: str = '' + self.iterations: int = 0 self.engine_result: object | None = engine_result self.result: object | None = None self.starting_parameters: list[object] = ( @@ -147,46 +141,64 @@ def display_results( 'right', ] - rows = [] - for param in self.parameters: - datablock_entry_name = ( - param._identity.datablock_entry_name - ) # getattr(param, 'datablock_name', 'N/A') - category_code = param._identity.category_code # getattr(param, 'category_key', 'N/A') - category_entry_name = ( - param._identity.category_entry_name or '' - ) # getattr(param, 'category_entry_name', 'N/A') - name = getattr(param, 'name', 'N/A') - start = ( - f'{getattr(param, "_fit_start_value", "N/A"):.4f}' - if param._fit_start_value is not None - else 'N/A' - ) - fitted = f'{param.value:.4f}' if param.value is not None else 'N/A' - uncertainty = f'{param.uncertainty:.4f}' if param.uncertainty is not None else 'N/A' - units = getattr(param, 'units', 'N/A') - - if param._fit_start_value and param.value: - change = ((param.value - param._fit_start_value) / param._fit_start_value) * 100 - arrow = '↑' if change > 0 else '↓' - relative_change = f'{abs(change):.2f} % {arrow}' - else: - relative_change = 'N/A' - - rows.append([ - datablock_entry_name, - category_code, - category_entry_name, - name, - start, - fitted, - uncertainty, - units, - relative_change, - ]) + rows = [_build_parameter_row(p) for p in self.parameters] render_table( columns_headers=headers, columns_alignment=alignments, columns_data=rows, ) + + +def _build_parameter_row(param: object) -> list[str]: + """ + Build a single table row for a fitted parameter. + + Parameters + ---------- + param : object + Fitted parameter descriptor. + + Returns + ------- + list[str] + Column values for the parameter row. + """ + name = getattr(param, 'name', 'N/A') + start = f'{param._fit_start_value:.4f}' if param._fit_start_value is not None else 'N/A' + fitted = f'{param.value:.4f}' if param.value is not None else 'N/A' + uncertainty = f'{param.uncertainty:.4f}' if param.uncertainty is not None else 'N/A' + units = getattr(param, 'units', 'N/A') + relative_change = _compute_relative_change(param) + return [ + param._identity.datablock_entry_name, + param._identity.category_code, + param._identity.category_entry_name or '', + name, + start, + fitted, + uncertainty, + units, + relative_change, + ] + + +def _compute_relative_change(param: object) -> str: + """ + Compute percentage change between start and fitted values. + + Parameters + ---------- + param : object + Fitted parameter descriptor. + + Returns + ------- + str + Formatted change string or ``'N/A'``. + """ + if not param._fit_start_value or not param.value: + return 'N/A' + change = ((param.value - param._fit_start_value) / param._fit_start_value) * 100 + arrow = '↑' if change > 0 else '↓' + return f'{abs(change):.2f} % {arrow}' From 1ac7d60f67d211d33328b73987d62f48726304bb Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 22:22:32 +0200 Subject: [PATCH 36/51] Remove verbosity param from Experiments.add_from_data_path --- src/easydiffraction/datablocks/experiment/collection.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/easydiffraction/datablocks/experiment/collection.py b/src/easydiffraction/datablocks/experiment/collection.py index fb30d013..77248748 100644 --- a/src/easydiffraction/datablocks/experiment/collection.py +++ b/src/easydiffraction/datablocks/experiment/collection.py @@ -106,7 +106,6 @@ def add_from_data_path( beam_mode: str | None = None, radiation_probe: str | None = None, scattering_type: str | None = None, - verbosity: str | None = None, ) -> None: """ Add an experiment from a data file path. @@ -125,13 +124,8 @@ def add_from_data_path( Radiation probe (e.g. ``'neutron'``). scattering_type : str | None, default=None Scattering type (e.g. ``'bragg'``). - verbosity : str | None, default=None - Console output verbosity: ``'full'`` for multi-line output, - ``'short'`` for a one-line status message, or ``'silent'`` - for no output. When ``None``, uses ``project.verbosity``. """ - if verbosity is None and self._parent is not None: - verbosity = self._parent.verbosity + verbosity = self._parent.verbosity if self._parent is not None else None verb = VerbosityEnum(verbosity) if verbosity is not None else VerbosityEnum.FULL experiment = ExperimentFactory.from_data_path( name=name, From b8babaa4a0c63a5ef8a43c7069133c9f6b4ac8eb Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 22:31:03 +0200 Subject: [PATCH 37/51] Move data-loading print from ExperimentFactory to collection --- .../datablocks/experiment/collection.py | 10 +++++++--- .../datablocks/experiment/item/factory.py | 13 +------------ 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/src/easydiffraction/datablocks/experiment/collection.py b/src/easydiffraction/datablocks/experiment/collection.py index 77248748..16bfe5a4 100644 --- a/src/easydiffraction/datablocks/experiment/collection.py +++ b/src/easydiffraction/datablocks/experiment/collection.py @@ -127,15 +127,19 @@ def add_from_data_path( """ verbosity = self._parent.verbosity if self._parent is not None else None verb = VerbosityEnum(verbosity) if verbosity is not None else VerbosityEnum.FULL - experiment = ExperimentFactory.from_data_path( + experiment = ExperimentFactory.from_scratch( name=name, - data_path=data_path, sample_form=sample_form, beam_mode=beam_mode, radiation_probe=radiation_probe, scattering_type=scattering_type, - verbosity=verb, ) + num_points = experiment._load_ascii_data_to_experiment(data_path) + if verb is VerbosityEnum.FULL: + console.paragraph('Data loaded successfully') + console.print(f"Experiment 🔬 '{name}'. Number of data points: {num_points}.") + elif verb is VerbosityEnum.SHORT: + console.print(f"✅ Data loaded: Experiment 🔬 '{name}'. {num_points} points.") self.add(experiment) # TODO: Move to DatablockCollection? diff --git a/src/easydiffraction/datablocks/experiment/item/factory.py b/src/easydiffraction/datablocks/experiment/item/factory.py index fea39321..24156028 100644 --- a/src/easydiffraction/datablocks/experiment/item/factory.py +++ b/src/easydiffraction/datablocks/experiment/item/factory.py @@ -24,8 +24,6 @@ from easydiffraction.io.cif.parse import document_from_string from easydiffraction.io.cif.parse import name_from_block from easydiffraction.io.cif.parse import pick_sole_block -from easydiffraction.utils.enums import VerbosityEnum -from easydiffraction.utils.logging import console from easydiffraction.utils.logging import log if TYPE_CHECKING: @@ -232,7 +230,6 @@ def from_data_path( beam_mode: str | None = None, radiation_probe: str | None = None, scattering_type: str | None = None, - verbosity: VerbosityEnum = VerbosityEnum.FULL, ) -> ExperimentBase: """ Create an experiment from a raw data ASCII file. @@ -251,8 +248,6 @@ def from_data_path( Radiation probe (e.g. ``'neutron'``). scattering_type : str | None, default=None Scattering type (e.g. ``'bragg'``). - verbosity : VerbosityEnum, default=VerbosityEnum.FULL - Console output verbosity. Returns ------- @@ -267,12 +262,6 @@ def from_data_path( scattering_type=scattering_type, ) - num_points = expt_obj._load_ascii_data_to_experiment(data_path) - - if verbosity is VerbosityEnum.FULL: - console.paragraph('Data loaded successfully') - console.print(f"Experiment 🔬 '{name}'. Number of data points: {num_points}.") - elif verbosity is VerbosityEnum.SHORT: - console.print(f"✅ Data loaded: Experiment 🔬 '{name}'. {num_points} points.") + expt_obj._load_ascii_data_to_experiment(data_path) return expt_obj From bd3b2eba5bff7a6b68b51bb75cdaf157fb0d809d Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 22:46:15 +0200 Subject: [PATCH 38/51] Fix plotting tests and add numpy conversion in _prepare_powder_context --- src/easydiffraction/display/plotting.py | 126 ++++++++---------- src/easydiffraction/project/project.py | 3 +- .../easydiffraction/display/test_plotting.py | 19 ++- 3 files changed, 72 insertions(+), 76 deletions(-) diff --git a/src/easydiffraction/display/plotting.py b/src/easydiffraction/display/plotting.py index 7970e8db..f3fbce29 100644 --- a/src/easydiffraction/display/plotting.py +++ b/src/easydiffraction/display/plotting.py @@ -164,7 +164,7 @@ def _get_axes_labels( """Look up axis labels for the experiment / x-axis.""" return DEFAULT_AXES_LABELS[sample_form, scattering_type, x_axis] - def _prepare_powder_data( + def _prepare_powder_context( self, pattern: object, expt_name: str, @@ -172,12 +172,9 @@ def _prepare_powder_data( x_min: object, x_max: object, x: object, - need_meas: bool = False, - need_calc: bool = False, - show_residual: bool = False, ) -> dict | None: """ - Validate, resolve axes, auto-range, and filter arrays. + Resolve axes, auto-range, and filter x-array. Parameters ---------- @@ -194,35 +191,23 @@ def _prepare_powder_data( Optional maximum x-axis limit. x : object Explicit x-axis type or ``None``. - need_meas : bool, default=False - Whether ``intensity_meas`` is required. - need_calc : bool, default=False - Whether ``intensity_calc`` is required. - show_residual : bool, default=False - If ``True``, compute meas - calc residual. Returns ------- dict | None - A dict with keys ``x_filtered``, ``y_series``, ``y_labels``, - ``axes_labels``, and ``x_axis``; or ``None`` when a required - array is missing. + A dict with keys ``x_filtered``, ``x_array``, ``x_min``, + ``x_max``, and ``axes_labels``; or ``None`` when the x-array + is missing. """ x_axis, x_name, sample_form, scattering_type, _ = self._resolve_x_axis(expt_type, x) # Get x-array from pattern - x_array = getattr(pattern, x_axis, None) - if x_array is None: + x_raw = getattr(pattern, x_axis, None) + if x_raw is None: log.error(f'No {x_name} data available for experiment {expt_name}') return None - # Validate required intensities - if need_meas and pattern.intensity_meas is None: - log.error(f'No measured data available for experiment {expt_name}') - return None - if need_calc and pattern.intensity_calc is None: - log.error(f'No calculated data available for experiment {expt_name}') - return None + x_array = np.asarray(x_raw) # Auto-range for ASCII engine x_min, x_max = self._auto_x_range_for_ascii(pattern, x_array, x_min, x_max) @@ -230,35 +215,14 @@ def _prepare_powder_data( # Filter x x_filtered = self._filtered_y_array(x_array, x_array, x_min, x_max) - # Filter y arrays and build series / labels - y_series = [] - y_labels = [] - - y_meas = None - if need_meas: - y_meas = self._filtered_y_array(pattern.intensity_meas, x_array, x_min, x_max) - y_series.append(y_meas) - y_labels.append('meas') - - y_calc = None - if need_calc: - y_calc = self._filtered_y_array(pattern.intensity_calc, x_array, x_min, x_max) - y_series.append(y_calc) - y_labels.append('calc') - - if show_residual and y_meas is not None and y_calc is not None: - y_resid = y_meas - y_calc - y_series.append(y_resid) - y_labels.append('resid') - axes_labels = self._get_axes_labels(sample_form, scattering_type, x_axis) return { 'x_filtered': x_filtered, - 'y_series': y_series, - 'y_labels': y_labels, + 'x_array': x_array, + 'x_min': x_min, + 'x_max': x_max, 'axes_labels': axes_labels, - 'x_axis': x_axis, } @staticmethod @@ -399,22 +363,28 @@ def plot_meas( X-axis type (``'two_theta'``, ``'time_of_flight'``, or ``'d_spacing'``). If ``None``, auto-detected from beam mode. """ - ctx = self._prepare_powder_data( + ctx = self._prepare_powder_context( pattern, expt_name, expt_type, x_min, x_max, x, - need_meas=True, ) if ctx is None: return + if pattern.intensity_meas is None: + log.error(f'No measured data available for experiment {expt_name}') + return + y_meas = self._filtered_y_array( + pattern.intensity_meas, ctx['x_array'], ctx['x_min'], ctx['x_max'] + ) + self._backend.plot_powder( x=ctx['x_filtered'], - y_series=ctx['y_series'], - labels=ctx['y_labels'], + y_series=[y_meas], + labels=['meas'], axes_labels=ctx['axes_labels'], title=f"Measured data for experiment 🔬 '{expt_name}'", height=self.height, @@ -449,22 +419,28 @@ def plot_calc( X-axis type (``'two_theta'``, ``'time_of_flight'``, or ``'d_spacing'``). If ``None``, auto-detected from beam mode. """ - ctx = self._prepare_powder_data( + ctx = self._prepare_powder_context( pattern, expt_name, expt_type, x_min, x_max, x, - need_calc=True, ) if ctx is None: return + if pattern.intensity_calc is None: + log.error(f'No calculated data available for experiment {expt_name}') + return + y_calc = self._filtered_y_array( + pattern.intensity_calc, ctx['x_array'], ctx['x_min'], ctx['x_max'] + ) + self._backend.plot_powder( x=ctx['x_filtered'], - y_series=ctx['y_series'], - labels=ctx['y_labels'], + y_series=[y_calc], + labels=['calc'], axes_labels=ctx['axes_labels'], title=f"Calculated data for experiment 🔬 '{expt_name}'", height=self.height, @@ -472,9 +448,8 @@ def plot_calc( def plot_meas_vs_calc( self, - pattern: object, + experiment: object, expt_name: str, - expt_type: object, x_min: object = None, x_max: object = None, show_residual: bool = False, @@ -494,13 +469,10 @@ def plot_meas_vs_calc( Parameters ---------- - pattern : object - Data pattern object with meas/calc arrays. + experiment : object + Experiment instance with ``.data`` and ``.type`` attributes. expt_name : str Experiment name for the title. - expt_type : object - Experiment type with sample_form, scattering, and beam - enums. x_min : object, default=None Optional minimum x-axis limit. x_max : object, default=None @@ -511,6 +483,9 @@ def plot_meas_vs_calc( X-axis type. If ``None``, auto-detected from sample form and beam mode. """ + pattern = experiment.data + expt_type = experiment.type + x_axis, _, sample_form, scattering_type, _ = self._resolve_x_axis(expt_type, x) # Validate required data (before x-array check, matching @@ -545,26 +520,37 @@ def plot_meas_vs_calc( return # Line plot (PD or SC with d_spacing/sin_theta_over_lambda) - # TODO: Rename from _prepare_powder_data as it also supports - # single crystal line plots - ctx = self._prepare_powder_data( + ctx = self._prepare_powder_context( pattern, expt_name, expt_type, x_min, x_max, x, - need_meas=True, - need_calc=True, - show_residual=show_residual, ) if ctx is None: return + y_series = [] + y_labels = [] + y_meas = self._filtered_y_array( + pattern.intensity_meas, ctx['x_array'], ctx['x_min'], ctx['x_max'] + ) + y_series.append(y_meas) + y_labels.append('meas') + y_calc = self._filtered_y_array( + pattern.intensity_calc, ctx['x_array'], ctx['x_min'], ctx['x_max'] + ) + y_series.append(y_calc) + y_labels.append('calc') + if show_residual: + y_series.append(y_meas - y_calc) + y_labels.append('resid') + self._backend.plot_powder( x=ctx['x_filtered'], - y_series=ctx['y_series'], - labels=ctx['y_labels'], + y_series=y_series, + labels=y_labels, axes_labels=ctx['axes_labels'], title=title, height=self.height, diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index 50fb5b1e..af411907 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -561,9 +561,8 @@ def plot_meas_vs_calc( experiment = self.experiments[expt_name] self.plotter.plot_meas_vs_calc( - experiment.data, + experiment, expt_name, - experiment.type, x_min=x_min, x_max=x_max, show_residual=show_residual, diff --git a/tests/unit/easydiffraction/display/test_plotting.py b/tests/unit/easydiffraction/display/test_plotting.py index acea6a66..356c793e 100644 --- a/tests/unit/easydiffraction/display/test_plotting.py +++ b/tests/unit/easydiffraction/display/test_plotting.py @@ -55,11 +55,14 @@ def test_plotter_factory_supported_and_unsupported(): PlotterFactory.create('nope') -def test_plotter_error_paths_and_filtering(capsys): +def test_plotter_error_paths_and_filtering(capsys, monkeypatch): from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum from easydiffraction.display.plotting import Plotter + from easydiffraction.utils.logging import Logger + + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.WARN, raising=True) class Ptn: def __init__( @@ -95,18 +98,26 @@ def __init__(self): out = capsys.readouterr().out assert 'No calculated data available for experiment E' in out + class Expt: + def __init__(self, pattern, expt_type): + self.data = pattern + self.type = expt_type + p.plot_meas_vs_calc( - Ptn(two_theta=None, intensity_meas=None, intensity_calc=None), 'E', ExptType() + Expt(Ptn(two_theta=None, intensity_meas=None, intensity_calc=None), ExptType()), + 'E', ) out = capsys.readouterr().out assert 'No measured data available for experiment E' in out p.plot_meas_vs_calc( - Ptn(two_theta=[1], intensity_meas=None, intensity_calc=[1]), 'E', ExptType() + Expt(Ptn(two_theta=[1], intensity_meas=None, intensity_calc=[1]), ExptType()), + 'E', ) out = capsys.readouterr().out assert 'No measured data available for experiment E' in out p.plot_meas_vs_calc( - Ptn(two_theta=[1], intensity_meas=[1], intensity_calc=None), 'E', ExptType() + Expt(Ptn(two_theta=[1], intensity_meas=[1], intensity_calc=None), ExptType()), + 'E', ) out = capsys.readouterr().out assert 'No calculated data available for experiment E' in out From cc35eb8663abffabd5e855e52e7a7dc6acbbb28c Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 23:44:18 +0200 Subject: [PATCH 39/51] Update tests, tutorials, and docs to use analysis.display API --- .github/copilot-instructions.md | 22 +- docs/architecture/architecture.md | 38 +- .../architecture/sequential_fitting_design.md | 17 +- docs/docs/tutorials/ed-1.py | 4 +- docs/docs/tutorials/ed-10.py | 4 +- docs/docs/tutorials/ed-11.py | 4 +- docs/docs/tutorials/ed-12.py | 4 +- docs/docs/tutorials/ed-13.py | 12 +- docs/docs/tutorials/ed-14.py | 6 +- docs/docs/tutorials/ed-15.py | 6 +- docs/docs/tutorials/ed-16.py | 12 +- docs/docs/tutorials/ed-17.py | 28 +- docs/docs/tutorials/ed-18.py | 4 +- docs/docs/tutorials/ed-2.py | 4 +- docs/docs/tutorials/ed-3.py | 60 +- docs/docs/tutorials/ed-4.py | 6 +- docs/docs/tutorials/ed-5.py | 10 +- docs/docs/tutorials/ed-6.py | 36 +- docs/docs/tutorials/ed-7.py | 36 +- docs/docs/tutorials/ed-8.py | 10 +- docs/docs/tutorials/ed-9.py | 8 +- .../user-guide/analysis-workflow/analysis.md | 4 +- docs/docs/user-guide/first-steps.md | 23 +- pixi.lock | 4 +- pixi.toml | 5 +- pyproject.toml | 5 +- src/easydiffraction/analysis/analysis.py | 915 ++++++++++-------- .../analysis/calculators/cryspy.py | 541 ++++++----- src/easydiffraction/analysis/sequential.py | 401 +++++--- .../crystallography/crystallography.py | 107 +- src/easydiffraction/display/plotting.py | 178 +++- src/easydiffraction/io/cif/serialize.py | 75 +- src/easydiffraction/project/project.py | 217 +---- tests/integration/fitting/test_sequential.py | 3 - .../easydiffraction/analysis/test_analysis.py | 15 +- .../analysis/test_analysis_access_params.py | 4 +- .../analysis/test_analysis_show_empty.py | 12 +- .../easydiffraction/display/test_plotting.py | 16 +- tmp/_read_cif.py | 2 +- tmp/basic_single-fit_pd-neut-cwl_LBCO-HRPT.py | 24 +- tmp/short.py | 2 +- tmp/short2.py | 2 +- 42 files changed, 1666 insertions(+), 1220 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 77675257..d5abc83e 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -62,17 +62,17 @@ private `_set_` method instead of exposing a public setter. - Lint complexity thresholds (`max-args`, `max-branches`, `max-statements`, `max-locals`, `max-nested-blocks`, etc. in - `pyproject.toml`) are intentional code-quality guardrails. They are not - arbitrary numbers — the project uses ruff's defaults (with `max-args` - and `max-positional-args` set to 6 instead of 5 to account for ruff - counting `self`/`cls`). When code violates a threshold, it is a signal - that the function or class needs refactoring — not that the threshold - needs raising. Do not raise thresholds, add `# noqa` comments, or use - any other mechanism to silence complexity violations. Instead, refactor - the code (extract helpers, introduce parameter objects, flatten - nesting, etc.). For complex refactors that touch many lines or change - public API, propose a refactoring plan and wait for approval before - proceeding. + `pyproject.toml`) are intentional code-quality guardrails. They are + not arbitrary numbers — the project uses ruff's defaults (with + `max-args` and `max-positional-args` set to 6 instead of 5 to account + for ruff counting `self`/`cls`). When code violates a threshold, it is + a signal that the function or class needs refactoring — not that the + threshold needs raising. Do not raise thresholds, add `# noqa` + comments, or use any other mechanism to silence complexity violations. + Instead, refactor the code (extract helpers, introduce parameter + objects, flatten nesting, etc.). For complex refactors that touch many + lines or change public API, propose a refactoring plan and wait for + approval before proceeding. ## Architecture diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md index 5a71b784..7faac40e 100644 --- a/docs/architecture/architecture.md +++ b/docs/architecture/architecture.md @@ -857,7 +857,7 @@ project.experiments['hrpt'].calculator_type = 'cryspy' project.analysis.current_minimizer = 'lmfit' # Plot before fitting -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # Select free parameters project.structures['lbco'].cell.length_a.free = True @@ -866,14 +866,14 @@ project.experiments['hrpt'].instrument.calib_twotheta_offset.free = True project.experiments['hrpt'].background['10'].y.free = True # Inspect free parameters -project.analysis.show_free_params() +project.analysis.display.free_params() # Fit and show results project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # Plot after fitting -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # Save project.save() @@ -1173,9 +1173,9 @@ def length_a(self) -> Parameter: ### 9.9 Lint Complexity Thresholds The Pylint-style complexity limits in `pyproject.toml` are **intentional -code-quality guardrails**, not arbitrary numbers. A violation is a signal -that the function or class needs refactoring — not that the threshold -needs raising. +code-quality guardrails**, not arbitrary numbers. A violation is a +signal that the function or class needs refactoring — not that the +threshold needs raising. The project uses **ruff's defaults** for all PLR thresholds, with one exception: `max-args` and `max-positional-args` are set to **6** instead @@ -1183,21 +1183,21 @@ of the ruff default of 5, because ruff counts `self`/`cls` while traditional pylint does not. Setting 6 in ruff matches pylint's standard limit of 5 real parameters per function. -| Threshold | Value | Rule | -| -------------------- | ----- | ------- | -| `max-args` | 6 | PLR0913 | -| `max-positional-args`| 6 | PLR0917 | -| `max-branches` | 12 | PLR0912 | -| `max-statements` | 50 | PLR0915 | -| `max-locals` | 15 | PLR0914 | -| `max-nested-blocks` | 5 | PLR1702 | -| `max-returns` | 6 | PLR0911 | -| `max-public-methods` | 20 | PLR0904 | +| Threshold | Value | Rule | +| --------------------- | ----- | ------- | +| `max-args` | 6 | PLR0913 | +| `max-positional-args` | 6 | PLR0917 | +| `max-branches` | 12 | PLR0912 | +| `max-statements` | 50 | PLR0915 | +| `max-locals` | 15 | PLR0914 | +| `max-nested-blocks` | 5 | PLR1702 | +| `max-returns` | 6 | PLR0911 | +| `max-public-methods` | 20 | PLR0904 | **Rules:** -- **Do not raise thresholds.** The current values represent the project's - design intent for maximum acceptable complexity. +- **Do not raise thresholds.** The current values represent the + project's design intent for maximum acceptable complexity. - **Do not add `# noqa` comments** (or any other mechanism) to silence complexity rules such as `PLR0912`, `PLR0913`, `PLR0914`, `PLR0915`, `PLR0917`, `PLR1702`. diff --git a/docs/architecture/sequential_fitting_design.md b/docs/architecture/sequential_fitting_design.md index 0513c89f..292020b5 100644 --- a/docs/architecture/sequential_fitting_design.md +++ b/docs/architecture/sequential_fitting_design.md @@ -233,7 +233,7 @@ project.analysis.apply_constraints() # ── Initial fit on the template ────────────────────────── project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # ── Save project (defines project path) ────────────────── project.save_as(dir_path='cosio_project') @@ -334,7 +334,7 @@ results persistent, portable, and usable by external tools. ```python # Plot parameter evolution (reads from analysis/results.csv) -project.plot_param_series( +project.plotter.plot_param_series( param=structure.cell.length_a, versus=expt.diffrn.ambient_temperature, ) @@ -363,7 +363,7 @@ project = ed.Project.load('cosio_project') project.apply_params_from_csv(row=500) # Plot (uses the template experiment with overridden params) -project.plot_meas_vs_calc(expt_name='template') +project.plotter.plot_meas_vs_calc(expt_name='template') ``` The CSV row index identifies the dataset. `apply_params_from_csv`: @@ -1117,11 +1117,12 @@ propagation, diffrn callback, precondition validation. > and existing `fit()` single-mode (Phase 4). Remove the old > `_parameter_snapshots` dict. -**Implemented:** `Plotter.plot_param_series()` reads CSV via pandas. -`Plotter.plot_param_series_from_snapshots()` preserves backward -compatibility for `fit()` single-mode (no CSV yet). -`Project.plot_param_series()` tries CSV first, falls back to snapshots. -Axis labels derived from live descriptor objects. +**Implemented:** `Plotter.plot_param_series()` resolves CSV vs snapshots +automatically via the project reference. +`Plotter._plot_param_series_from_csv()` reads CSV via pandas. +`Plotter._plot_param_series_from_snapshots()` preserves backward +compatibility for `fit()` single-mode (no CSV yet). Axis labels derived +from live descriptor objects. #### PR 11 — Parallel fitting (max_workers > 1) ✅ diff --git a/docs/docs/tutorials/ed-1.py b/docs/docs/tutorials/ed-1.py index 51e4e8e6..768ca701 100644 --- a/docs/docs/tutorials/ed-1.py +++ b/docs/docs/tutorials/ed-1.py @@ -62,13 +62,13 @@ # %% # Show fit results summary -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% project.experiments.show_names() # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% [markdown] # ## Step 5: Show Project Summary diff --git a/docs/docs/tutorials/ed-10.py b/docs/docs/tutorials/ed-10.py index 1cad89ab..aafde761 100644 --- a/docs/docs/tutorials/ed-10.py +++ b/docs/docs/tutorials/ed-10.py @@ -82,10 +82,10 @@ # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # ## Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='pdf', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='pdf', show_residual=True) diff --git a/docs/docs/tutorials/ed-11.py b/docs/docs/tutorials/ed-11.py index a16dbec7..24d7795e 100644 --- a/docs/docs/tutorials/ed-11.py +++ b/docs/docs/tutorials/ed-11.py @@ -95,10 +95,10 @@ # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # ## Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='nomad', show_residual=False) +project.plotter.plot_meas_vs_calc(expt_name='nomad', show_residual=False) diff --git a/docs/docs/tutorials/ed-12.py b/docs/docs/tutorials/ed-12.py index b6701709..d14c42fe 100644 --- a/docs/docs/tutorials/ed-12.py +++ b/docs/docs/tutorials/ed-12.py @@ -116,10 +116,10 @@ # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # ## Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='xray_pdf') +project.plotter.plot_meas_vs_calc(expt_name='xray_pdf') diff --git a/docs/docs/tutorials/ed-13.py b/docs/docs/tutorials/ed-13.py index 42996532..b3c6f693 100644 --- a/docs/docs/tutorials/ed-13.py +++ b/docs/docs/tutorials/ed-13.py @@ -586,7 +586,7 @@ # - show only free parameters of the project. # %% -project_1.analysis.show_free_params() +project_1.analysis.display.free_params() # %% [markdown] # #### Visualize Diffraction Patterns @@ -614,7 +614,7 @@ # %% project_1.analysis.fit() -project_1.analysis.show_fit_results() +project_1.analysis.display.fit_results() # %% [markdown] # #### Check Fit Results @@ -1110,7 +1110,7 @@ project_2.plot_meas_vs_calc(expt_name='sim_lbco') project_2.analysis.fit() -project_2.analysis.show_fit_results() +project_2.analysis.display.fit_results() # %% [markdown] # #### Exercise 5.3: Find the Misfit in the Fit @@ -1179,7 +1179,7 @@ project_2.structures['lbco'].cell.length_a.free = True project_2.analysis.fit() -project_2.analysis.show_fit_results() +project_2.analysis.display.fit_results() project_2.plot_meas_vs_calc(expt_name='sim_lbco') @@ -1258,7 +1258,7 @@ project_2.experiments['sim_lbco'].peak.asym_alpha_1.free = True project_2.analysis.fit() -project_2.analysis.show_fit_results() +project_2.analysis.display.fit_results() project_2.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1.35, x_max=1.40) @@ -1426,7 +1426,7 @@ # Now we can perform the fit with both phases included. project_2.analysis.fit() -project_2.analysis.show_fit_results() +project_2.analysis.display.fit_results() # Let's plot the measured diffraction pattern and the calculated # diffraction pattern both for the full range and for a zoomed-in region diff --git a/docs/docs/tutorials/ed-14.py b/docs/docs/tutorials/ed-14.py index eaa3da2a..22502ecd 100644 --- a/docs/docs/tutorials/ed-14.py +++ b/docs/docs/tutorials/ed-14.py @@ -75,7 +75,7 @@ # ## Step 4: Perform Analysis # %% -project.plot_meas_vs_calc(expt_name='heidi') +project.plotter.plot_meas_vs_calc(expt_name='heidi') # %% experiment.linked_crystal.scale.free = True @@ -91,7 +91,7 @@ # %% # Show fit results summary -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% experiment.show_as_cif() @@ -100,7 +100,7 @@ project.experiments.show_names() # %% -project.plot_meas_vs_calc(expt_name='heidi') +project.plotter.plot_meas_vs_calc(expt_name='heidi') # %% [markdown] # ## Step 5: Show Project Summary diff --git a/docs/docs/tutorials/ed-15.py b/docs/docs/tutorials/ed-15.py index 4ae4933a..617cad88 100644 --- a/docs/docs/tutorials/ed-15.py +++ b/docs/docs/tutorials/ed-15.py @@ -66,7 +66,7 @@ # ## Step 4: Perform Analysis # %% -project.plot_meas_vs_calc(expt_name='senju') +project.plotter.plot_meas_vs_calc(expt_name='senju') # %% experiment.linked_crystal.scale.free = True @@ -82,7 +82,7 @@ # %% # Show fit results summary -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% # experiment.show_as_cif() @@ -91,7 +91,7 @@ project.experiments.show_names() # %% -project.plot_meas_vs_calc(expt_name='senju') +project.plotter.plot_meas_vs_calc(expt_name='senju') # %% [markdown] # ## Step 5: Show Project Summary diff --git a/docs/docs/tutorials/ed-16.py b/docs/docs/tutorials/ed-16.py index e57f8449..214dfe25 100644 --- a/docs/docs/tutorials/ed-16.py +++ b/docs/docs/tutorials/ed-16.py @@ -196,10 +196,10 @@ # #### Plot Measured vs Calculated (Before Fit) # %% -project.plot_meas_vs_calc(expt_name='sepd', show_residual=False) +project.plotter.plot_meas_vs_calc(expt_name='sepd', show_residual=False) # %% -project.plot_meas_vs_calc(expt_name='nomad', show_residual=False) +project.plotter.plot_meas_vs_calc(expt_name='nomad', show_residual=False) # %% [markdown] # #### Set Fitting Parameters @@ -237,23 +237,23 @@ # #### Show Free Parameters # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated (After Fit) # %% -project.plot_meas_vs_calc(expt_name='sepd', show_residual=False) +project.plotter.plot_meas_vs_calc(expt_name='sepd', show_residual=False) # %% -project.plot_meas_vs_calc(expt_name='nomad', show_residual=False) +project.plotter.plot_meas_vs_calc(expt_name='nomad', show_residual=False) # %% diff --git a/docs/docs/tutorials/ed-17.py b/docs/docs/tutorials/ed-17.py index eb8bcd2a..4e71ba5a 100644 --- a/docs/docs/tutorials/ed-17.py +++ b/docs/docs/tutorials/ed-17.py @@ -310,7 +310,7 @@ def extract_diffrn(file_path): # %% project.apply_params_from_csv(row_index=-1) -project.plot_meas_vs_calc(expt_name='d20', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='d20', show_residual=True) # %% [markdown] # #### Plot Parameter Evolution @@ -324,26 +324,26 @@ def extract_diffrn(file_path): # Plot unit cell parameters vs. temperature. # %% -project.plot_param_series(structure.cell.length_a, versus=temperature) -project.plot_param_series(structure.cell.length_b, versus=temperature) -project.plot_param_series(structure.cell.length_c, versus=temperature) +project.plotter.plot_param_series(structure.cell.length_a, versus=temperature) +project.plotter.plot_param_series(structure.cell.length_b, versus=temperature) +project.plotter.plot_param_series(structure.cell.length_c, versus=temperature) # %% [markdown] # Plot isotropic displacement parameters vs. temperature. # %% -project.plot_param_series(structure.atom_sites['Co1'].b_iso, versus=temperature) -project.plot_param_series(structure.atom_sites['Si'].b_iso, versus=temperature) -project.plot_param_series(structure.atom_sites['O1'].b_iso, versus=temperature) -project.plot_param_series(structure.atom_sites['O2'].b_iso, versus=temperature) -project.plot_param_series(structure.atom_sites['O3'].b_iso, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['Co1'].b_iso, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['Si'].b_iso, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['O1'].b_iso, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['O2'].b_iso, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['O3'].b_iso, versus=temperature) # %% [markdown] # Plot selected fractional coordinates vs. temperature. # %% -project.plot_param_series(structure.atom_sites['Co2'].fract_x, versus=temperature) -project.plot_param_series(structure.atom_sites['Co2'].fract_z, versus=temperature) -project.plot_param_series(structure.atom_sites['O1'].fract_z, versus=temperature) -project.plot_param_series(structure.atom_sites['O2'].fract_z, versus=temperature) -project.plot_param_series(structure.atom_sites['O3'].fract_z, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['Co2'].fract_x, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['Co2'].fract_z, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['O1'].fract_z, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['O2'].fract_z, versus=temperature) +project.plotter.plot_param_series(structure.atom_sites['O3'].fract_z, versus=temperature) diff --git a/docs/docs/tutorials/ed-18.py b/docs/docs/tutorials/ed-18.py index f4485dbc..ee07a708 100644 --- a/docs/docs/tutorials/ed-18.py +++ b/docs/docs/tutorials/ed-18.py @@ -44,13 +44,13 @@ # ## Show Results # %% -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # ## Plot Meas vs Calc # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% [markdown] # ## Save Project diff --git a/docs/docs/tutorials/ed-2.py b/docs/docs/tutorials/ed-2.py index 3c8d033e..4dd78389 100644 --- a/docs/docs/tutorials/ed-2.py +++ b/docs/docs/tutorials/ed-2.py @@ -160,7 +160,7 @@ # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) diff --git a/docs/docs/tutorials/ed-3.py b/docs/docs/tutorials/ed-3.py index 1a79d789..a8b84b7d 100644 --- a/docs/docs/tutorials/ed-3.py +++ b/docs/docs/tutorials/ed-3.py @@ -227,7 +227,7 @@ # #### Show Measured Data # %% -project.plot_meas(expt_name='hrpt') +project.plotter.plot_meas(expt_name='hrpt') # %% [markdown] # #### Set Instrument @@ -354,16 +354,16 @@ # #### Show Calculated Data # %% -project.plot_calc(expt_name='hrpt') +project.plotter.plot_calc(expt_name='hrpt') # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) # %% [markdown] # #### Show Parameters @@ -371,25 +371,25 @@ # Show all parameters of the project. # %% -# project.analysis.show_all_params() +# project.analysis.display.all_params() # %% [markdown] # Show all fittable parameters. # %% -project.analysis.show_fittable_params() +project.analysis.display.fittable_params() # %% [markdown] # Show only free parameters. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # Show how to access parameters in the code. # %% -# project.analysis.how_to_access_parameters() +# project.analysis.display.how_to_access_parameters() # %% [markdown] # #### Set Fit Mode @@ -455,23 +455,23 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) # %% [markdown] # #### Save Project State @@ -494,23 +494,23 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) # %% [markdown] # #### Save Project State @@ -533,23 +533,23 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) # %% [markdown] # #### Save Project State @@ -584,29 +584,29 @@ # Show defined constraints. # %% -project.analysis.show_constraints() +project.analysis.display.constraints() # %% [markdown] # Show free parameters. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) # %% [markdown] # #### Save Project State @@ -643,7 +643,7 @@ # Show defined constraints. # %% -project.analysis.show_constraints() +project.analysis.display.constraints() # %% [markdown] @@ -656,23 +656,23 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=38, x_max=41, show_residual=True) # %% [markdown] # #### Save Project State diff --git a/docs/docs/tutorials/ed-4.py b/docs/docs/tutorials/ed-4.py index 3275deab..e2e1942b 100644 --- a/docs/docs/tutorials/ed-4.py +++ b/docs/docs/tutorials/ed-4.py @@ -313,13 +313,13 @@ # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='npd', x_min=35.5, x_max=38.3, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='npd', x_min=35.5, x_max=38.3, show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='xrd', x_min=29.0, x_max=30.4, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='xrd', x_min=29.0, x_max=30.4, show_residual=True) diff --git a/docs/docs/tutorials/ed-5.py b/docs/docs/tutorials/ed-5.py index 4e41a905..58a339f0 100644 --- a/docs/docs/tutorials/ed-5.py +++ b/docs/docs/tutorials/ed-5.py @@ -202,10 +202,10 @@ # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='d20', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='d20', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='d20', x_min=41, x_max=54, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='d20', x_min=41, x_max=54, show_residual=True) # %% [markdown] # #### Set Free Parameters @@ -276,16 +276,16 @@ # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='d20', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='d20', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='d20', x_min=41, x_max=54, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='d20', x_min=41, x_max=54, show_residual=True) # %% [markdown] # ## Summary diff --git a/docs/docs/tutorials/ed-6.py b/docs/docs/tutorials/ed-6.py index e0339c91..70c1c194 100644 --- a/docs/docs/tutorials/ed-6.py +++ b/docs/docs/tutorials/ed-6.py @@ -190,10 +190,10 @@ # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) # %% [markdown] # ### Perform Fit 1/5 @@ -211,7 +211,7 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting @@ -220,16 +220,16 @@ project.analysis.fit() # %% -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) # %% [markdown] # ### Perform Fit 2/5 @@ -249,7 +249,7 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting @@ -258,16 +258,16 @@ project.analysis.fit() # %% -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) # %% [markdown] # ### Perform Fit 3/5 @@ -285,7 +285,7 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting @@ -294,16 +294,16 @@ project.analysis.fit() # %% -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) # %% [markdown] # ### Perform Fit 4/5 @@ -321,7 +321,7 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting @@ -330,16 +330,16 @@ project.analysis.fit() # %% -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=48, x_max=51, show_residual=True) # %% [markdown] # ## Summary diff --git a/docs/docs/tutorials/ed-7.py b/docs/docs/tutorials/ed-7.py index cd719154..033a5fcc 100644 --- a/docs/docs/tutorials/ed-7.py +++ b/docs/docs/tutorials/ed-7.py @@ -149,8 +149,8 @@ # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='sepd', show_residual=True) -project.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) # %% [markdown] # ### Perform Fit 1/5 @@ -167,23 +167,23 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='sepd', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) # %% [markdown] # ### Perform Fit 2/5 @@ -198,23 +198,23 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='sepd', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) # %% [markdown] # ### Perform Fit 3/5 @@ -237,23 +237,23 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='sepd', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) # %% [markdown] # ### Perform Fit 4/5 @@ -267,20 +267,20 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='sepd', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='sepd', x_min=23200, x_max=23700, show_residual=True) diff --git a/docs/docs/tutorials/ed-8.py b/docs/docs/tutorials/ed-8.py index b8cdf0bd..5c5cea9a 100644 --- a/docs/docs/tutorials/ed-8.py +++ b/docs/docs/tutorials/ed-8.py @@ -344,26 +344,26 @@ # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='wish_5_6', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='wish_5_6', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='wish_4_7', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='wish_4_7', show_residual=True) # %% [markdown] # #### Run Fitting # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='wish_5_6', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='wish_5_6', show_residual=True) # %% -project.plot_meas_vs_calc(expt_name='wish_4_7', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='wish_4_7', show_residual=True) # %% [markdown] # ## Summary diff --git a/docs/docs/tutorials/ed-9.py b/docs/docs/tutorials/ed-9.py index 34da9359..c13a8af6 100644 --- a/docs/docs/tutorials/ed-9.py +++ b/docs/docs/tutorials/ed-9.py @@ -230,7 +230,7 @@ # Show measured data as loaded from the file. # %% -project.plot_meas(expt_name='mcstas') +project.plotter.plot_meas(expt_name='mcstas') # %% [markdown] # Add excluded regions. @@ -249,7 +249,7 @@ # Show measured data after adding excluded regions. # %% -project.plot_meas(expt_name='mcstas') +project.plotter.plot_meas(expt_name='mcstas') # %% [markdown] # Show experiment as CIF. @@ -303,12 +303,12 @@ # %% project.analysis.fit() -project.analysis.show_fit_results() +project.analysis.display.fit_results() # %% [markdown] # #### Plot Measured vs Calculated # %% -project.plot_meas_vs_calc(expt_name='mcstas') +project.plotter.plot_meas_vs_calc(expt_name='mcstas') # %% diff --git a/docs/docs/user-guide/analysis-workflow/analysis.md b/docs/docs/user-guide/analysis-workflow/analysis.md index 76aaa699..86dbaec7 100644 --- a/docs/docs/user-guide/analysis-workflow/analysis.md +++ b/docs/docs/user-guide/analysis-workflow/analysis.md @@ -251,7 +251,7 @@ To plot the measured vs calculated data after the fit, you can use the `plot_meas_vs_calc` method of the `analysis` object: ```python -project.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) +project.plotter.plot_meas_vs_calc(expt_name='hrpt', show_residual=True) ``` ## Constraints @@ -319,7 +319,7 @@ To view the defined constraints, you can use the `show_constraints` method: ```python -project.analysis.show_constraints() +project.analysis.display.constraints() ``` The example of the output is: diff --git a/docs/docs/user-guide/first-steps.md b/docs/docs/user-guide/first-steps.md index b4366684..2f443398 100644 --- a/docs/docs/user-guide/first-steps.md +++ b/docs/docs/user-guide/first-steps.md @@ -125,22 +125,23 @@ project.show_available_minimizers() EasyDiffraction provides several methods for showing the available parameters grouped in different categories. For example, you can use: -- `project.analysis.show_all_params()` – to display all available +- `project.analysis.display.all_params()` – to display all available parameters for the analysis step. -- `project.analysis.show_fittable_params()` – to display only the +- `project.analysis.display.fittable_params()` – to display only the parameters that can be fitted during the analysis. -- `project.analysis.show_free_params()` – to display the parameters that - are currently free to be adjusted during the fitting process. +- `project.analysis.display.free_params()` – to display the parameters + that are currently free to be adjusted during the fitting process. -Finally, you can use the `project.analysis.how_to_access_parameters()` -method to get a brief overview of how to access and modify parameters in -the analysis step, along with their unique identifiers in the CIF -format. This can be particularly useful for users who are new to the -EasyDiffraction API or those who want to quickly understand how to work -with parameters in their projects. +Finally, you can use the +`project.analysis.display.how_to_access_parameters()` method to get a +brief overview of how to access and modify parameters in the analysis +step, along with their unique identifiers in the CIF format. This can be +particularly useful for users who are new to the EasyDiffraction API or +those who want to quickly understand how to work with parameters in +their projects. An example of the output for the -`project.analysis.how_to_access_parameters()` method is: +`project.analysis.display.how_to_access_parameters()` method is: | | Code variable | Unique ID for CIF | | --- | --------------------------------------------------- | -------------------------------- | diff --git a/pixi.lock b/pixi.lock index 7a1e6d18..ccc7e32c 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty37 - sha256: e539e3ac0f6beb96be004e85b00dd6280257acc9d1cba7aa077b87aa81653674 + version: 0.11.1+devdirty42 + sha256: 8b87b54bac3ff3c4832bb827e7bc7e201d696a6c33b5a71f36c2e5f59a05b18e requires_dist: - asciichartpy - asteval diff --git a/pixi.toml b/pixi.toml index 8ae7a65f..30986cf8 100644 --- a/pixi.toml +++ b/pixi.toml @@ -98,10 +98,7 @@ integration-tests = 'python -m pytest tests/integration/ --color=yes -n auto -v' script-tests = 'python -m pytest tools/test_scripts.py --color=yes -n auto -v' notebook-tests = 'python -m pytest --nbmake docs/docs/tutorials/ --nbmake-timeout=1200 --color=yes -n auto -v' -test = { depends-on = [ - 'unit-tests', - 'functional-tests', -] } +test = { depends-on = ['unit-tests', 'functional-tests'] } ########### # ✔️ Checks diff --git a/pyproject.toml b/pyproject.toml index e0e3ad48..30b6953c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -236,8 +236,8 @@ select = [ 'PGH', # https://docs.astral.sh/ruff/rules/#pygrep-hooks-pgh 'PERF', # https://docs.astral.sh/ruff/rules/#perflint-perf 'RUF', # https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf - 'TRY', # https://docs.astral.sh/ruff/rules/#tryceratops-try - 'UP', # https://docs.astral.sh/ruff/rules/#pyupgrade-up + 'TRY', # https://docs.astral.sh/ruff/rules/#tryceratops-try + 'UP', # https://docs.astral.sh/ruff/rules/#pyupgrade-up # pycodestyle (E, W) rules 'E', # https://docs.astral.sh/ruff/rules/#error-e 'W', # https://docs.astral.sh/ruff/rules/#warning-w @@ -375,7 +375,6 @@ convention = 'numpy' max-args = 6 max-positional-args = 6 - ############################# # Configuration for pydoclint ############################# diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index 644c0058..568b790e 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -28,231 +28,88 @@ from easydiffraction.utils.utils import render_table -class Analysis: - """ - High-level orchestration of analysis tasks for a Project. - - This class wires calculators and minimizers, exposes a compact - interface for parameters, constraints and results, and coordinates - computations across the project's structures and experiments. +def _discover_property_rows(cls: type) -> list[list[str]]: """ + Discover public properties from the class MRO. - def __init__(self, project: object) -> None: - """ - Create a new Analysis instance bound to a project. - - Parameters - ---------- - project : object - The project that owns models and experiments. - """ - self.project = project - self._aliases_type: str = AliasesFactory.default_tag() - self.aliases = AliasesFactory.create(self._aliases_type) - self._constraints_type: str = ConstraintsFactory.default_tag() - self.constraints = ConstraintsFactory.create(self._constraints_type) - self.constraints_handler = ConstraintsHandler.get() - self._fit_mode_type: str = FitModeFactory.default_tag() - self._fit_mode = FitModeFactory.create(self._fit_mode_type) - self._joint_fit_experiments = JointFitExperiments() - self.fitter = Fitter('lmfit') - self.fit_results = None - self._parameter_snapshots: dict[str, dict[str, dict]] = {} - - def help(self) -> None: - """Print a summary of analysis properties and methods.""" - console.paragraph("Help for 'Analysis'") - - cls = type(self) - - # Auto-discover properties from MRO - seen_props: dict = {} - for base in cls.mro(): - for key, attr in base.__dict__.items(): - if key.startswith('_') or not isinstance(attr, property): - continue - if key not in seen_props: - seen_props[key] = attr - - prop_rows = [] - for i, key in enumerate(sorted(seen_props), 1): - prop = seen_props[key] - writable = '✓' if prop.fset else '✗' - doc = GuardedBase._first_sentence(prop.fget.__doc__ if prop.fget else None) - prop_rows.append([str(i), key, writable, doc]) - - if prop_rows: - console.paragraph('Properties') - render_table( - columns_headers=['#', 'Name', 'Writable', 'Description'], - columns_alignment=['right', 'left', 'center', 'left'], - columns_data=prop_rows, - ) - - # Auto-discover methods from MRO - seen_methods: set = set() - methods_list: list = [] - for base in cls.mro(): - for key, attr in base.__dict__.items(): - if key.startswith('_') or key in seen_methods: - continue - if isinstance(attr, property): - continue - raw = attr - if isinstance(raw, (staticmethod, classmethod)): - raw = raw.__func__ - if callable(raw): - seen_methods.add(key) - methods_list.append((key, raw)) - - method_rows = [] - for i, (key, method) in enumerate(sorted(methods_list), 1): - doc = GuardedBase._first_sentence(getattr(method, '__doc__', None)) - method_rows.append([str(i), f'{key}()', doc]) - - if method_rows: - console.paragraph('Methods') - render_table( - columns_headers=['#', 'Name', 'Description'], - columns_alignment=['right', 'left', 'left'], - columns_data=method_rows, - ) - - # ------------------------------------------------------------------ - # Aliases (switchable-category pattern) - # ------------------------------------------------------------------ - - @property - def aliases_type(self) -> str: - """Tag of the active aliases collection type.""" - return self._aliases_type - - @aliases_type.setter - def aliases_type(self, new_type: str) -> None: - """ - Switch to a different aliases collection type. - - Parameters - ---------- - new_type : str - Aliases tag (e.g. ``'default'``). - """ - supported_tags = AliasesFactory.supported_tags() - if new_type not in supported_tags: - log.warning( - f"Unsupported aliases type '{new_type}'. " - f'Supported: {supported_tags}. ' - f"For more information, use 'show_supported_aliases_types()'", - ) - return - self.aliases = AliasesFactory.create(new_type) - self._aliases_type = new_type - console.paragraph('Aliases type changed to') - console.print(new_type) - - def show_supported_aliases_types(self) -> None: # noqa: PLR6301 - """Print a table of supported aliases collection types.""" - AliasesFactory.show_supported() - - def show_current_aliases_type(self) -> None: - """Print the currently used aliases collection type.""" - console.paragraph('Current aliases type') - console.print(self._aliases_type) - - # ------------------------------------------------------------------ - # Constraints (switchable-category pattern) - # ------------------------------------------------------------------ - - @property - def constraints_type(self) -> str: - """Tag of the active constraints collection type.""" - return self._constraints_type - - @constraints_type.setter - def constraints_type(self, new_type: str) -> None: - """ - Switch to a different constraints collection type. - - Parameters - ---------- - new_type : str - Constraints tag (e.g. ``'default'``). - """ - supported_tags = ConstraintsFactory.supported_tags() - if new_type not in supported_tags: - log.warning( - f"Unsupported constraints type '{new_type}'. " - f'Supported: {supported_tags}. ' - f"For more information, use 'show_supported_constraints_types()'", - ) - return - self.constraints = ConstraintsFactory.create(new_type) - self._constraints_type = new_type - console.paragraph('Constraints type changed to') - console.print(new_type) - - def show_supported_constraints_types(self) -> None: # noqa: PLR6301 - """Print a table of supported constraints collection types.""" - ConstraintsFactory.show_supported() + Parameters + ---------- + cls : type + The class to inspect. - def show_current_constraints_type(self) -> None: - """Print the currently used constraints collection type.""" - console.paragraph('Current constraints type') - console.print(self._constraints_type) + Returns + ------- + list[list[str]] + Table rows with ``[index, name, writable, description]``. + """ + seen: dict = {} + for base in cls.mro(): + for key, attr in base.__dict__.items(): + if key.startswith('_') or not isinstance(attr, property): + continue + if key not in seen: + seen[key] = attr + + rows = [] + for i, key in enumerate(sorted(seen), 1): + prop = seen[key] + writable = '✓' if prop.fset else '✗' + doc = GuardedBase._first_sentence(prop.fget.__doc__ if prop.fget else None) + rows.append([str(i), key, writable, doc]) + return rows + + +def _discover_method_rows(cls: type) -> list[list[str]]: + """ + Discover public methods from the class MRO. - @staticmethod - def _get_params_as_dataframe( - params: list[NumericDescriptor | Parameter], - ) -> pd.DataFrame: - """ - Convert a list of parameters to a DataFrame. + Parameters + ---------- + cls : type + The class to inspect. - Parameters - ---------- - params : list[NumericDescriptor | Parameter] - List of DescriptorFloat or Parameter objects. + Returns + ------- + list[list[str]] + Table rows with ``[index, name(), description]``. + """ + seen_methods: set = set() + methods_list: list = [] + for base in cls.mro(): + for key, attr in base.__dict__.items(): + if key.startswith('_') or key in seen_methods: + continue + if isinstance(attr, property): + continue + raw = attr + if isinstance(raw, (staticmethod, classmethod)): + raw = raw.__func__ + if callable(raw): + seen_methods.add(key) + methods_list.append((key, raw)) + + rows = [] + for i, (key, method) in enumerate(sorted(methods_list), 1): + doc = GuardedBase._first_sentence(getattr(method, '__doc__', None)) + rows.append([str(i), f'{key}()', doc]) + return rows + + +class AnalysisDisplay: + """ + Display helper - parameter tables, CIF, and fit results. - Returns - ------- - pd.DataFrame - A pandas DataFrame containing parameter information. - """ - records = [] - for param in params: - record = {} - # TODO: Merge into one. Add field if attr exists - # TODO: f'{param.value!r}' for StringDescriptor? - if isinstance(param, (StringDescriptor, NumericDescriptor, Parameter)): - record = { - ('fittable', 'left'): False, - ('datablock', 'left'): param._identity.datablock_entry_name, - ('category', 'left'): param._identity.category_code, - ('entry', 'left'): param._identity.category_entry_name or '', - ('parameter', 'left'): param.name, - ('value', 'right'): param.value, - } - if isinstance(param, (NumericDescriptor, Parameter)): - record |= { - ('units', 'left'): param.units, - } - if isinstance(param, Parameter): - record |= { - ('fittable', 'left'): True, - ('free', 'left'): param.free, - ('min', 'right'): param.fit_min, - ('max', 'right'): param.fit_max, - ('uncertainty', 'right'): param.uncertainty or '', - } - records.append(record) + Accessed via ``analysis.display``. + """ - df = pd.DataFrame.from_records(records) - df.columns = pd.MultiIndex.from_tuples(df.columns) - return df + def __init__(self, analysis: 'Analysis') -> None: + self._analysis = analysis - def show_all_params(self) -> None: + def all_params(self) -> None: """Print all parameters for structures and experiments.""" - structures_params = self.project.structures.parameters - experiments_params = self.project.experiments.parameters + project = self._analysis.project + structures_params = project.structures.parameters + experiments_params = project.experiments.parameters if not structures_params and not experiments_params: log.warning('No parameters found.') @@ -270,19 +127,20 @@ def show_all_params(self) -> None: ] console.paragraph('All parameters for all structures (🧩 data blocks)') - df = self._get_params_as_dataframe(structures_params) + df = Analysis._get_params_as_dataframe(structures_params) filtered_df = df[filtered_headers] tabler.render(filtered_df) console.paragraph('All parameters for all experiments (🔬 data blocks)') - df = self._get_params_as_dataframe(experiments_params) + df = Analysis._get_params_as_dataframe(experiments_params) filtered_df = df[filtered_headers] tabler.render(filtered_df) - def show_fittable_params(self) -> None: + def fittable_params(self) -> None: """Print all fittable parameters.""" - structures_params = self.project.structures.fittable_parameters - experiments_params = self.project.experiments.fittable_parameters + project = self._analysis.project + structures_params = project.structures.fittable_parameters + experiments_params = project.experiments.fittable_parameters if not structures_params and not experiments_params: log.warning('No fittable parameters found.') @@ -302,19 +160,20 @@ def show_fittable_params(self) -> None: ] console.paragraph('Fittable parameters for all structures (🧩 data blocks)') - df = self._get_params_as_dataframe(structures_params) + df = Analysis._get_params_as_dataframe(structures_params) filtered_df = df[filtered_headers] tabler.render(filtered_df) console.paragraph('Fittable parameters for all experiments (🔬 data blocks)') - df = self._get_params_as_dataframe(experiments_params) + df = Analysis._get_params_as_dataframe(experiments_params) filtered_df = df[filtered_headers] tabler.render(filtered_df) - def show_free_params(self) -> None: + def free_params(self) -> None: """Print only currently free (varying) parameters.""" - structures_params = self.project.structures.free_parameters - experiments_params = self.project.experiments.free_parameters + project = self._analysis.project + structures_params = project.structures.free_parameters + experiments_params = project.experiments.free_parameters free_params = structures_params + experiments_params if not free_params: @@ -338,7 +197,7 @@ def show_free_params(self) -> None: console.paragraph( 'Free parameters for both structures (🧩 data blocks) and experiments (🔬 data blocks)' ) - df = self._get_params_as_dataframe(free_params) + df = Analysis._get_params_as_dataframe(free_params) filtered_df = df[filtered_headers] tabler.render(filtered_df) @@ -349,8 +208,9 @@ def how_to_access_parameters(self) -> None: The output explains how to reference specific parameters in code. """ - structures_params = self.project.structures.parameters - experiments_params = self.project.experiments.parameters + project = self._analysis.project + structures_params = project.structures.parameters + experiments_params = project.experiments.parameters all_params = { 'structures': structures_params, 'experiments': experiments_params, @@ -377,7 +237,7 @@ def how_to_access_parameters(self) -> None: ] columns_data = [] - project_varname = self.project._varname + project_varname = project._varname for datablock_code, params in all_params.items(): for param in params: if isinstance(param, (StringDescriptor, NumericDescriptor, Parameter)): @@ -407,15 +267,16 @@ def how_to_access_parameters(self) -> None: columns_data=columns_data, ) - def show_parameter_cif_uids(self) -> None: + def parameter_cif_uids(self) -> None: """ Show CIF unique IDs for all parameters. The output explains which unique identifiers are used when creating CIF-based constraints. """ - structures_params = self.project.structures.parameters - experiments_params = self.project.experiments.parameters + project = self._analysis.project + structures_params = project.structures.parameters + experiments_params = project.experiments.parameters all_params = { 'structures': structures_params, 'experiments': experiments_params, @@ -465,6 +326,245 @@ def show_parameter_cif_uids(self) -> None: columns_data=columns_data, ) + def constraints(self) -> None: + """Print a table of all user-defined symbolic constraints.""" + analysis = self._analysis + if not analysis.constraints._items: + log.warning('No constraints defined.') + return + + rows = [[constraint.expression.value] for constraint in analysis.constraints] + + console.paragraph('User defined constraints') + render_table( + columns_headers=['expression'], + columns_alignment=['left'], + columns_data=rows, + ) + console.print(f'Constraints enabled: {analysis.constraints.enabled}') + + def fit_results(self) -> None: + """ + Display a summary of the fit results. + + Renders the fit quality metrics (reduced χ², R-factors) and a + table of fitted parameters with their starting values, final + values, and uncertainties. + + This method should be called after :meth:`Analysis.fit` + completes. If no fit has been performed yet, a warning is + logged. + """ + analysis = self._analysis + if analysis.fit_results is None: + log.warning('No fit results available. Run fit() first.') + return + + structures = analysis.project.structures + experiments = list(analysis.project.experiments.values()) + + analysis.fitter._process_fit_results(structures, experiments) + + def as_cif(self) -> None: + """Render the analysis section as CIF in console.""" + cif_text: str = self._analysis.as_cif() + paragraph_title: str = 'Analysis 🧮 info as cif' + console.paragraph(paragraph_title) + render_cif(cif_text) + + +class Analysis: + """ + High-level orchestration of analysis tasks for a Project. + + This class wires calculators and minimizers, exposes a compact + interface for parameters, constraints and results, and coordinates + computations across the project's structures and experiments. + """ + + def __init__(self, project: object) -> None: + """ + Create a new Analysis instance bound to a project. + + Parameters + ---------- + project : object + The project that owns models and experiments. + """ + self.project = project + self._aliases_type: str = AliasesFactory.default_tag() + self.aliases = AliasesFactory.create(self._aliases_type) + self._constraints_type: str = ConstraintsFactory.default_tag() + self.constraints = ConstraintsFactory.create(self._constraints_type) + self.constraints_handler = ConstraintsHandler.get() + self._fit_mode_type: str = FitModeFactory.default_tag() + self._fit_mode = FitModeFactory.create(self._fit_mode_type) + self._joint_fit_experiments = JointFitExperiments() + self.fitter = Fitter('lmfit') + self.fit_results = None + self._parameter_snapshots: dict[str, dict[str, dict]] = {} + self._display = AnalysisDisplay(self) + + @property + def display(self) -> AnalysisDisplay: + """Display helper for parameter tables, CIF, and fit results.""" + return self._display + + def help(self) -> None: + """Print a summary of analysis properties and methods.""" + console.paragraph("Help for 'Analysis'") + + cls = type(self) + + prop_rows = _discover_property_rows(cls) + if prop_rows: + console.paragraph('Properties') + render_table( + columns_headers=['#', 'Name', 'Writable', 'Description'], + columns_alignment=['right', 'left', 'center', 'left'], + columns_data=prop_rows, + ) + + method_rows = _discover_method_rows(cls) + if method_rows: + console.paragraph('Methods') + render_table( + columns_headers=['#', 'Name', 'Description'], + columns_alignment=['right', 'left', 'left'], + columns_data=method_rows, + ) + + # ------------------------------------------------------------------ + # Aliases (switchable-category pattern) + # ------------------------------------------------------------------ + + @property + def aliases_type(self) -> str: + """Tag of the active aliases collection type.""" + return self._aliases_type + + @aliases_type.setter + def aliases_type(self, new_type: str) -> None: + """ + Switch to a different aliases collection type. + + Parameters + ---------- + new_type : str + Aliases tag (e.g. ``'default'``). + """ + supported_tags = AliasesFactory.supported_tags() + if new_type not in supported_tags: + log.warning( + f"Unsupported aliases type '{new_type}'. " + f'Supported: {supported_tags}. ' + f"For more information, use 'show_supported_aliases_types()'", + ) + return + self.aliases = AliasesFactory.create(new_type) + self._aliases_type = new_type + console.paragraph('Aliases type changed to') + console.print(new_type) + + def show_supported_aliases_types(self) -> None: # noqa: PLR6301 + """Print a table of supported aliases collection types.""" + AliasesFactory.show_supported() + + def show_current_aliases_type(self) -> None: + """Print the currently used aliases collection type.""" + console.paragraph('Current aliases type') + console.print(self._aliases_type) + + # ------------------------------------------------------------------ + # Constraints (switchable-category pattern) + # ------------------------------------------------------------------ + + @property + def constraints_type(self) -> str: + """Tag of the active constraints collection type.""" + return self._constraints_type + + @constraints_type.setter + def constraints_type(self, new_type: str) -> None: + """ + Switch to a different constraints collection type. + + Parameters + ---------- + new_type : str + Constraints tag (e.g. ``'default'``). + """ + supported_tags = ConstraintsFactory.supported_tags() + if new_type not in supported_tags: + log.warning( + f"Unsupported constraints type '{new_type}'. " + f'Supported: {supported_tags}. ' + f"For more information, use 'show_supported_constraints_types()'", + ) + return + self.constraints = ConstraintsFactory.create(new_type) + self._constraints_type = new_type + console.paragraph('Constraints type changed to') + console.print(new_type) + + def show_supported_constraints_types(self) -> None: # noqa: PLR6301 + """Print a table of supported constraints collection types.""" + ConstraintsFactory.show_supported() + + def show_current_constraints_type(self) -> None: + """Print the currently used constraints collection type.""" + console.paragraph('Current constraints type') + console.print(self._constraints_type) + + @staticmethod + def _get_params_as_dataframe( + params: list[NumericDescriptor | Parameter], + ) -> pd.DataFrame: + """ + Convert a list of parameters to a DataFrame. + + Parameters + ---------- + params : list[NumericDescriptor | Parameter] + List of DescriptorFloat or Parameter objects. + + Returns + ------- + pd.DataFrame + A pandas DataFrame containing parameter information. + """ + records = [] + for param in params: + record = {} + # TODO: Merge into one. Add field if attr exists + # TODO: f'{param.value!r}' for StringDescriptor? + if isinstance(param, (StringDescriptor, NumericDescriptor, Parameter)): + record = { + ('fittable', 'left'): False, + ('datablock', 'left'): param._identity.datablock_entry_name, + ('category', 'left'): param._identity.category_code, + ('entry', 'left'): param._identity.category_entry_name or '', + ('parameter', 'left'): param.name, + ('value', 'right'): param.value, + } + if isinstance(param, (NumericDescriptor, Parameter)): + record |= { + ('units', 'left'): param.units, + } + if isinstance(param, Parameter): + record |= { + ('fittable', 'left'): True, + ('free', 'left'): param.free, + ('min', 'right'): param.fit_min, + ('max', 'right'): param.fit_max, + ('uncertainty', 'right'): param.uncertainty or '', + } + records.append(record) + + df = pd.DataFrame.from_records(records) + df.columns = pd.MultiIndex.from_tuples(df.columns) + return df + def show_current_minimizer(self) -> None: """Print the name of the currently selected minimizer.""" console.paragraph('Current minimizer') @@ -549,28 +649,12 @@ def joint_fit_experiments(self) -> object: """Per-experiment weight collection for joint fitting.""" return self._joint_fit_experiments - def show_constraints(self) -> None: - """Print a table of all user-defined symbolic constraints.""" - if not self.constraints._items: - log.warning('No constraints defined.') - return - - rows = [[constraint.expression.value] for constraint in self.constraints] - - console.paragraph('User defined constraints') - render_table( - columns_headers=['expression'], - columns_alignment=['left'], - columns_data=rows, - ) - console.print(f'Constraints enabled: {self.constraints.enabled}') - def fit(self, verbosity: str | None = None) -> None: """ Execute fitting for all experiments. This method performs the optimization but does not display - results automatically. Call :meth:`show_fit_results` after + results automatically. Call :meth:`display.fit_results` after fitting to see a summary of the fit quality and parameter values. @@ -615,111 +699,203 @@ def fit(self, verbosity: str | None = None) -> None: # Run the fitting process mode = FitModeEnum(self._fit_mode.mode.value) if mode is FitModeEnum.JOINT: - # Auto-populate joint_fit_experiments if empty - if not len(self._joint_fit_experiments): - for id in experiments.names: - self._joint_fit_experiments.create(id=id, weight=0.5) - if verb is not VerbosityEnum.SILENT: - console.paragraph( - f"Using all experiments 🔬 {experiments.names} for '{mode.value}' fitting" - ) - # Resolve weights to a plain numpy array - experiments_list = list(experiments.values()) - weights_list = [ - self._joint_fit_experiments[name].weight.value for name in experiments.names - ] - weights_array = np.array(weights_list, dtype=np.float64) + self._fit_joint(verb, structures, experiments) + elif mode is FitModeEnum.SINGLE: + self._fit_single(verb, structures, experiments) + else: + msg = f'Fit mode {mode.value} not implemented yet.' + raise NotImplementedError(msg) + + # After fitting, save the project + if self.project.info.path is not None: + self.project.save() + + def _fit_joint( + self, + verb: VerbosityEnum, + structures: object, + experiments: object, + ) -> None: + """ + Run joint fitting across all experiments with weights. + + Parameters + ---------- + verb : VerbosityEnum + Output verbosity. + structures : object + Project structures collection. + experiments : object + Project experiments collection. + """ + mode = FitModeEnum.JOINT + # Auto-populate joint_fit_experiments if empty + if not len(self._joint_fit_experiments): + for id in experiments.names: + self._joint_fit_experiments.create(id=id, weight=0.5) + if verb is not VerbosityEnum.SILENT: + console.paragraph( + f"Using all experiments 🔬 {experiments.names} for '{mode.value}' fitting" + ) + # Resolve weights to a plain numpy array + experiments_list = list(experiments.values()) + weights_list = [ + self._joint_fit_experiments[name].weight.value for name in experiments.names + ] + weights_array = np.array(weights_list, dtype=np.float64) + self.fitter.fit( + structures, + experiments_list, + weights=weights_array, + analysis=self, + verbosity=verb, + ) + + # After fitting, get the results + self.fit_results = self.fitter.results + + def _fit_single( + self, + verb: VerbosityEnum, + structures: object, + experiments: object, + ) -> None: + """ + Run single-mode fitting for each experiment independently. + + Parameters + ---------- + verb : VerbosityEnum + Output verbosity. + structures : object + Project structures collection. + experiments : object + Project experiments collection. + """ + mode = FitModeEnum.SINGLE + expt_names = experiments.names + + short_display_handle = self._fit_single_print_header(verb, expt_names, mode) + short_rows: list[list[str]] = [] + + for _idx, expt_name in enumerate(expt_names, start=1): + if verb is VerbosityEnum.FULL: + console.print(f"📋 Using experiment 🔬 '{expt_name}' for '{mode.value}' fitting") + + experiment = experiments[expt_name] self.fitter.fit( structures, - experiments_list, - weights=weights_array, + [experiment], analysis=self, verbosity=verb, ) - # After fitting, get the results - self.fit_results = self.fitter.results + # After fitting, snapshot parameter values before + # they get overwritten by the next experiment's fit + results = self.fitter.results + self._snapshot_params(expt_name, results) + self.fit_results = results - elif mode is FitModeEnum.SINGLE: - expt_names = experiments.names - num_expts = len(expt_names) - - # Short mode: print header and create display handle once - short_headers = ['experiment', 'χ²', 'iterations', 'status'] - short_alignments = ['left', 'right', 'right', 'center'] - short_rows: list[list[str]] = [] - short_display_handle: object | None = None - if verb is not VerbosityEnum.SILENT: - console.paragraph('Standard fitting') + # Short mode: append one summary row and update in-place if verb is VerbosityEnum.SHORT: - first = expt_names[0] - last = expt_names[-1] - minimizer_name = self.fitter.selection - console.print( - f"📋 Using {num_expts} experiments 🔬 from '{first}' to " - f"'{last}' for '{mode.value}' fitting" + self._fit_single_update_short_table( + short_rows, expt_name, results, short_display_handle ) - console.print(f"🚀 Starting fit process with '{minimizer_name}'...") - console.print('📈 Goodness-of-fit (reduced χ²) per experiment:') - short_display_handle = _make_display_handle() - - for _idx, expt_name in enumerate(expt_names, start=1): - if verb is VerbosityEnum.FULL: - console.print( - f"📋 Using experiment 🔬 '{expt_name}' for '{mode.value}' fitting" - ) - experiment = experiments[expt_name] - experiments_list = [experiment] - self.fitter.fit( - structures, - experiments_list, - analysis=self, - verbosity=verb, - ) + # Short mode: close the display handle + if short_display_handle is not None and hasattr(short_display_handle, 'close'): + with suppress(Exception): + short_display_handle.close() - # After fitting, snapshot parameter values before - # they get overwritten by the next experiment's fit - results = self.fitter.results - snapshot: dict[str, dict] = {} - for param in results.parameters: - snapshot[param.unique_name] = { - 'value': param.value, - 'uncertainty': param.uncertainty, - 'units': param.units, - } - self._parameter_snapshots[expt_name] = snapshot - self.fit_results = results - - # Short mode: append one summary row and update in-place - if verb is VerbosityEnum.SHORT: - chi2_str = ( - f'{results.reduced_chi_square:.2f}' - if results.reduced_chi_square is not None - else '—' - ) - iters = str(self.fitter.minimizer.tracker.best_iteration or 0) - status = '✅' if results.success else '❌' - short_rows.append([expt_name, chi2_str, iters, status]) - render_table( - columns_headers=short_headers, - columns_alignment=short_alignments, - columns_data=short_rows, - display_handle=short_display_handle, - ) + @staticmethod + def _fit_single_print_header( + verb: VerbosityEnum, + expt_names: list[str], + mode: FitModeEnum, + ) -> object | None: + """ + Print the header for single-mode fitting. - # Short mode: close the display handle - if short_display_handle is not None and hasattr(short_display_handle, 'close'): - with suppress(Exception): - short_display_handle.close() + Parameters + ---------- + verb : VerbosityEnum + Output verbosity. + expt_names : list[str] + Experiment names. + mode : FitModeEnum + The fit mode enum. - else: - msg = f'Fit mode {mode.value} not implemented yet.' - raise NotImplementedError(msg) + Returns + ------- + object | None + Display handle for short mode, or ``None``. + """ + if verb is not VerbosityEnum.SILENT: + console.paragraph('Standard fitting') + if verb is not VerbosityEnum.SHORT: + return None + num_expts = len(expt_names) + console.print( + f"📋 Using {num_expts} experiments 🔬 from '{expt_names[0]}' to " + f"'{expt_names[-1]}' for '{mode.value}' fitting" + ) + console.print("🚀 Starting fit process with 'lmfit'...") + console.print('📈 Goodness-of-fit (reduced χ²) per experiment:') + return _make_display_handle() - # After fitting, save the project - if self.project.info.path is not None: - self.project.save() + def _snapshot_params(self, expt_name: str, results: object) -> None: + """ + Snapshot parameter values for a single experiment. + + Parameters + ---------- + expt_name : str + Experiment name key for the snapshot dict. + results : object + Fit results with ``.parameters`` list. + """ + snapshot: dict[str, dict] = {} + for param in results.parameters: + snapshot[param.unique_name] = { + 'value': param.value, + 'uncertainty': param.uncertainty, + 'units': param.units, + } + self._parameter_snapshots[expt_name] = snapshot + + def _fit_single_update_short_table( + self, + short_rows: list[list[str]], + expt_name: str, + results: object, + display_handle: object | None, + ) -> None: + """ + Append a summary row for short-mode display. + + Parameters + ---------- + short_rows : list[list[str]] + Accumulated rows (mutated in place). + expt_name : str + Experiment name. + results : object + Fit results. + display_handle : object | None + Display handle for in-place table update. + """ + chi2_str = ( + f'{results.reduced_chi_square:.2f}' if results.reduced_chi_square is not None else '—' + ) + iters = str(self.fitter.minimizer.tracker.best_iteration or 0) + status = '✅' if results.success else '❌' + short_rows.append([expt_name, chi2_str, iters, status]) + render_table( + columns_headers=['experiment', 'χ²', 'iterations', 'status'], + columns_alignment=['left', 'right', 'right', 'center'], + columns_data=short_rows, + display_handle=display_handle, + ) def fit_sequential( self, @@ -766,39 +942,23 @@ def fit_sequential( # Apply constraints before building the template self._update_categories() - _fit_seq( - analysis=self, - data_dir=data_dir, - max_workers=max_workers, - chunk_size=chunk_size, - file_pattern=file_pattern, - extract_diffrn=extract_diffrn, - verbosity=verbosity, - ) - - def show_fit_results(self) -> None: - """ - Display a summary of the fit results. - - Renders the fit quality metrics (reduced χ², R-factors) and a - table of fitted parameters with their starting values, final - values, and uncertainties. - - This method should be called after :meth:`fit` completes. If no - fit has been performed yet, a warning is logged. - - Example:: - - project.analysis.fit() project.analysis.show_fit_results() - """ - if self.fit_results is None: - log.warning('No fit results available. Run fit() first.') - return - - structures = self.project.structures - experiments = list(self.project.experiments.values()) - - self.fitter._process_fit_results(structures, experiments) + # Temporarily override project verbosity if caller provided one + original_verbosity = None + if verbosity is not None: + original_verbosity = self.project.verbosity + self.project.verbosity = verbosity + try: + _fit_seq( + analysis=self, + data_dir=data_dir, + max_workers=max_workers, + chunk_size=chunk_size, + file_pattern=file_pattern, + extract_diffrn=extract_diffrn, + ) + finally: + if original_verbosity is not None: + self.project.verbosity = original_verbosity def _update_categories(self, called_by_minimizer: bool = False) -> None: """ @@ -831,10 +991,3 @@ def as_cif(self) -> str: """ self._update_categories() return analysis_to_cif(self) - - def show_as_cif(self) -> None: - """Render the analysis section as CIF in console.""" - cif_text: str = self.as_cif() - paragraph_title: str = 'Analysis 🧮 info as cif' - console.paragraph(paragraph_title) - render_cif(cif_text) diff --git a/src/easydiffraction/analysis/calculators/cryspy.py b/src/easydiffraction/analysis/calculators/cryspy.py index 895b71fd..4030b2b9 100644 --- a/src/easydiffraction/analysis/calculators/cryspy.py +++ b/src/easydiffraction/analysis/calculators/cryspy.py @@ -217,12 +217,26 @@ def _recreate_cryspy_dict( cryspy_dict = copy.deepcopy(self._cryspy_dicts[combined_name]) cryspy_model_id = f'crystal_{structure.name}' - cryspy_model_dict = cryspy_dict[cryspy_model_id] + self._update_structure_in_cryspy_dict(cryspy_dict[cryspy_model_id], structure) + self._update_experiment_in_cryspy_dict(cryspy_dict, experiment) - ################################ - # Update structure parameters - ################################ + return cryspy_dict + + @staticmethod + def _update_structure_in_cryspy_dict( + cryspy_model_dict: dict[str, Any], + structure: Structure, + ) -> None: + """ + Update structure parameters in the Cryspy model dictionary. + Parameters + ---------- + cryspy_model_dict : dict[str, Any] + The ``crystal_`` sub-dict. + structure : Structure + The source structure. + """ # Cell cryspy_cell = cryspy_model_dict['unit_cell_parameters'] cryspy_cell[0] = structure.cell.length_a.value @@ -249,10 +263,21 @@ def _recreate_cryspy_dict( for idx, atom_site in enumerate(structure.atom_sites): cryspy_biso[idx] = atom_site.b_iso.value - ############################## - # Update experiment parameters - ############################## + @staticmethod + def _update_experiment_in_cryspy_dict( + cryspy_dict: dict[str, Any], + experiment: ExperimentBase, + ) -> None: + """ + Update experiment parameters in the Cryspy dictionary. + Parameters + ---------- + cryspy_dict : dict[str, Any] + The full Cryspy dictionary. + experiment : ExperimentBase + The source experiment. + """ if experiment.type.sample_form.value == SampleFormEnum.POWDER: if experiment.type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: cryspy_expt_name = f'pd_{experiment.name}' @@ -310,8 +335,6 @@ def _recreate_cryspy_dict( cryspy_expt_dict['extinction_radius'][0] = experiment.extinction.radius.value cryspy_expt_dict['extinction_mosaicity'][0] = experiment.extinction.mosaicity.value - return cryspy_dict - def _recreate_cryspy_obj( self, structure: Structure, @@ -388,225 +411,303 @@ def _convert_experiment_to_cryspy_cif( # noqa: PLR6301 str The Cryspy CIF string representation of the experiment. """ - # Try to get experiment attributes expt_type = getattr(experiment, 'type', None) instrument = getattr(experiment, 'instrument', None) peak = getattr(experiment, 'peak', None) extinction = getattr(experiment, 'extinction', None) - # Add experiment datablock name cif_lines = [f'data_{experiment.name}'] - # Add experiment type attribute dat - if expt_type is not None: - cif_lines.append('') - radiation_probe = expt_type.radiation_probe.value - radiation_probe = radiation_probe.replace('neutron', 'neutrons') - radiation_probe = radiation_probe.replace('xray', 'X-rays') - cif_lines.append(f'_setup_radiation {radiation_probe}') - - # Add instrument attribute data - if instrument: - # Restrict to only attributes relevant for the beam mode to - # avoid probing non-existent guarded attributes (which - # triggers diagnostics). - if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - if expt_type.sample_form.value == SampleFormEnum.POWDER: - instrument_mapping = { - 'setup_wavelength': '_setup_wavelength', - 'calib_twotheta_offset': '_setup_offset_2theta', - } - elif expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: - instrument_mapping = { - 'setup_wavelength': '_setup_wavelength', - } - # Add dummy 0.0 value for _setup_field required by - # Cryspy - cif_lines.append('') - cif_lines.append('_setup_field 0.0') - elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - if expt_type.sample_form.value == SampleFormEnum.POWDER: - instrument_mapping = { - 'setup_twotheta_bank': '_tof_parameters_2theta_bank', - 'calib_d_to_tof_offset': '_tof_parameters_Zero', - 'calib_d_to_tof_linear': '_tof_parameters_Dtt1', - 'calib_d_to_tof_quad': '_tof_parameters_dtt2', - } - elif expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: - instrument_mapping = {} # TODO: Check this mapping! - # Add dummy 0.0 value for _setup_field required by - # Cryspy - cif_lines.append('') - cif_lines.append('_setup_field 0.0') - cif_lines.append('') - for local_attr_name, engine_key_name in instrument_mapping.items(): - # attr_obj = instrument.__dict__.get(local_attr_name) - attr_obj = getattr(instrument, local_attr_name) - if attr_obj is not None: - cif_lines.append(f'{engine_key_name} {attr_obj.value}') - - # Add peak attribute data - if peak: - if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - peak_mapping = { - 'broad_gauss_u': '_pd_instr_resolution_U', - 'broad_gauss_v': '_pd_instr_resolution_V', - 'broad_gauss_w': '_pd_instr_resolution_W', - 'broad_lorentz_x': '_pd_instr_resolution_X', - 'broad_lorentz_y': '_pd_instr_resolution_Y', - } - elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - peak_mapping = { - 'broad_gauss_sigma_0': '_tof_profile_sigma0', - 'broad_gauss_sigma_1': '_tof_profile_sigma1', - 'broad_gauss_sigma_2': '_tof_profile_sigma2', - 'broad_mix_beta_0': '_tof_profile_beta0', - 'broad_mix_beta_1': '_tof_profile_beta1', - 'asym_alpha_0': '_tof_profile_alpha0', - 'asym_alpha_1': '_tof_profile_alpha1', - } - cif_lines.append('_tof_profile_peak_shape Gauss') - cif_lines.append('') - for local_attr_name, engine_key_name in peak_mapping.items(): - # attr_obj = peak.__dict__.get(local_attr_name) - attr_obj = getattr(peak, local_attr_name) - if attr_obj is not None: - cif_lines.append(f'{engine_key_name} {attr_obj.value}') - - # Add extinction attribute data - if extinction and expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: - extinction_mapping = { - 'mosaicity': '_extinction_mosaicity', - 'radius': '_extinction_radius', + # Experiment metadata sections + _cif_radiation_probe(cif_lines, expt_type) + _cif_instrument_section(cif_lines, expt_type, instrument) + _cif_peak_section(cif_lines, expt_type, peak) + _cif_extinction_section(cif_lines, expt_type, extinction) + + # Powder range data (also returns min/max for background) + twotheta_min, twotheta_max = _cif_range_section(cif_lines, expt_type, experiment) + + # Structure sections + _cif_orient_matrix_section(cif_lines, expt_type) + _cif_phase_section(cif_lines, expt_type, linked_structure) + _cif_background_section(cif_lines, expt_type, twotheta_min, twotheta_max) + + # Measured data + _cif_measured_data_section(cif_lines, expt_type, experiment) + + return '\n'.join(cif_lines) + + +def _cif_radiation_probe( + cif_lines: list[str], + expt_type: object | None, +) -> None: + """Append radiation probe line to CIF.""" + if expt_type is None: + return + cif_lines.append('') + radiation_probe = expt_type.radiation_probe.value + radiation_probe = radiation_probe.replace('neutron', 'neutrons') + radiation_probe = radiation_probe.replace('xray', 'X-rays') + cif_lines.append(f'_setup_radiation {radiation_probe}') + + +def _cif_instrument_section( + cif_lines: list[str], + expt_type: object | None, + instrument: object | None, +) -> None: + """Append instrument attribute lines to CIF.""" + if not instrument: + return + + instrument_mapping: dict[str, str] = {} + if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: + if expt_type.sample_form.value == SampleFormEnum.POWDER: + instrument_mapping = { + 'setup_wavelength': '_setup_wavelength', + 'calib_twotheta_offset': '_setup_offset_2theta', } + elif expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: + instrument_mapping = {'setup_wavelength': '_setup_wavelength'} cif_lines.append('') - cif_lines.append('_extinction_model gauss') - for local_attr_name, engine_key_name in extinction_mapping.items(): - attr_obj = getattr(extinction, local_attr_name) - if attr_obj is not None: - cif_lines.append(f'{engine_key_name} {attr_obj.value}') - - # Add range data + cif_lines.append('_setup_field 0.0') + elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: if expt_type.sample_form.value == SampleFormEnum.POWDER: - x_data = experiment.data.x - twotheta_min = f'{np.round(x_data.min(), 5):.5f}' # float(x_data.min()) - twotheta_max = f'{np.round(x_data.max(), 5):.5f}' # float(x_data.max()) - cif_lines.append('') - if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - cif_lines.append(f'_range_2theta_min {twotheta_min}') - cif_lines.append(f'_range_2theta_max {twotheta_max}') - elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - cif_lines.append(f'_range_time_min {twotheta_min}') - cif_lines.append(f'_range_time_max {twotheta_max}') - - # Add orientation matrix data - # Hardcoded example values for now, as we don't use them yet, - # but Cryspy requires them for single crystal data. - if expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: - cif_lines.append('') - cif_lines.append('_diffrn_orient_matrix_type CCSL') - cif_lines.append('_diffrn_orient_matrix_ub_11 -0.088033') - cif_lines.append('_diffrn_orient_matrix_ub_12 -0.088004') - cif_lines.append('_diffrn_orient_matrix_ub_13 0.069970') - cif_lines.append('_diffrn_orient_matrix_ub_21 0.034058') - cif_lines.append('_diffrn_orient_matrix_ub_22 -0.188170') - cif_lines.append('_diffrn_orient_matrix_ub_23 -0.013039') - cif_lines.append('_diffrn_orient_matrix_ub_31 0.223600') - cif_lines.append('_diffrn_orient_matrix_ub_32 0.125751') - cif_lines.append('_diffrn_orient_matrix_ub_33 0.029490') - - # Add phase data - if expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: - cif_lines.append('') - cif_lines.append(f'_phase_label {linked_structure.name}') - cif_lines.append('_phase_scale 1.0') - elif expt_type.sample_form.value == SampleFormEnum.POWDER: + instrument_mapping = { + 'setup_twotheta_bank': '_tof_parameters_2theta_bank', + 'calib_d_to_tof_offset': '_tof_parameters_Zero', + 'calib_d_to_tof_linear': '_tof_parameters_Dtt1', + 'calib_d_to_tof_quad': '_tof_parameters_dtt2', + } + elif expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: + instrument_mapping = {} # TODO: Check this mapping! cif_lines.append('') - cif_lines.append('loop_') - cif_lines.append('_phase_label') - cif_lines.append('_phase_scale') - cif_lines.append(f'{linked_structure.name} 1.0') - - # Add background data - if expt_type.sample_form.value == SampleFormEnum.POWDER: - if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - cif_lines.append('') - cif_lines.append('loop_') - cif_lines.append('_pd_background_2theta') - cif_lines.append('_pd_background_intensity') - cif_lines.append(f'{twotheta_min} 0.0') - cif_lines.append(f'{twotheta_max} 0.0') - elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - cif_lines.append('') - cif_lines.append('loop_') - cif_lines.append('_tof_backgroundpoint_time') # TODO: !!!!???? - cif_lines.append('_tof_backgroundpoint_intensity') # TODO: !!!!???? - cif_lines.append(f'{twotheta_min} 0.0') # TODO: !!!!???? - cif_lines.append(f'{twotheta_max} 0.0') # TODO: !!!!???? - - # Add measured data: Single crystal - if expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: - if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - cif_lines.append('') - cif_lines.append('loop_') - cif_lines.append('_diffrn_refln_index_h') - cif_lines.append('_diffrn_refln_index_k') - cif_lines.append('_diffrn_refln_index_l') - cif_lines.append('_diffrn_refln_intensity') - cif_lines.append('_diffrn_refln_intensity_sigma') - indices_h = experiment.data.index_h - indices_k = experiment.data.index_k - indices_l = experiment.data.index_l - y_data = experiment.data.intensity_meas - sy_data = experiment.data.intensity_meas_su - for index_h, index_k, index_l, y_val, sy_val in zip( - indices_h, indices_k, indices_l, y_data, sy_data, strict=True - ): - cif_lines.append( - f'{index_h:4.0f}{index_k:4.0f}{index_l:4.0f} {y_val:.5f} {sy_val:.5f}' - ) - elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - cif_lines.append('') - cif_lines.append('loop_') - cif_lines.append('_diffrn_refln_index_h') - cif_lines.append('_diffrn_refln_index_k') - cif_lines.append('_diffrn_refln_index_l') - cif_lines.append('_diffrn_refln_intensity') - cif_lines.append('_diffrn_refln_intensity_sigma') - cif_lines.append('_diffrn_refln_wavelength') - indices_h = experiment.data.index_h - indices_k = experiment.data.index_k - indices_l = experiment.data.index_l - y_data = experiment.data.intensity_meas - sy_data = experiment.data.intensity_meas_su - wl_data = experiment.data.wavelength - for index_h, index_k, index_l, y_val, sy_val, wl_val in zip( - indices_h, indices_k, indices_l, y_data, sy_data, wl_data, strict=True - ): - cif_lines.append( - f'{index_h:4.0f}{index_k:4.0f}{index_l:4.0f} {y_val:.5f} ' - f'{sy_val:.5f} {wl_val:.5f}' - ) - # Add measured data: Powder - elif expt_type.sample_form.value == SampleFormEnum.POWDER: - if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - cif_lines.append('') - cif_lines.append('loop_') - cif_lines.append('_pd_meas_2theta') - cif_lines.append('_pd_meas_intensity') - cif_lines.append('_pd_meas_intensity_sigma') - elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - cif_lines.append('') - cif_lines.append('loop_') - cif_lines.append('_tof_meas_time') - cif_lines.append('_tof_meas_intensity') - cif_lines.append('_tof_meas_intensity_sigma') - y_data = experiment.data.intensity_meas - sy_data = experiment.data.intensity_meas_su - for x_val, y_val, sy_val in zip(x_data, y_data, sy_data, strict=True): - cif_lines.append(f' {x_val:.5f} {y_val:.5f} {sy_val:.5f}') - - # Combine all lines into a single CIF string - cryspy_experiment_cif = '\n'.join(cif_lines) - - return cryspy_experiment_cif + cif_lines.append('_setup_field 0.0') + + cif_lines.append('') + for local_attr_name, engine_key_name in instrument_mapping.items(): + attr_obj = getattr(instrument, local_attr_name) + if attr_obj is not None: + cif_lines.append(f'{engine_key_name} {attr_obj.value}') + + +def _cif_peak_section( + cif_lines: list[str], + expt_type: object | None, + peak: object | None, +) -> None: + """Append peak profile lines to CIF.""" + if not peak: + return + + peak_mapping: dict[str, str] = {} + if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: + peak_mapping = { + 'broad_gauss_u': '_pd_instr_resolution_U', + 'broad_gauss_v': '_pd_instr_resolution_V', + 'broad_gauss_w': '_pd_instr_resolution_W', + 'broad_lorentz_x': '_pd_instr_resolution_X', + 'broad_lorentz_y': '_pd_instr_resolution_Y', + } + elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: + peak_mapping = { + 'broad_gauss_sigma_0': '_tof_profile_sigma0', + 'broad_gauss_sigma_1': '_tof_profile_sigma1', + 'broad_gauss_sigma_2': '_tof_profile_sigma2', + 'broad_mix_beta_0': '_tof_profile_beta0', + 'broad_mix_beta_1': '_tof_profile_beta1', + 'asym_alpha_0': '_tof_profile_alpha0', + 'asym_alpha_1': '_tof_profile_alpha1', + } + cif_lines.append('_tof_profile_peak_shape Gauss') + + cif_lines.append('') + for local_attr_name, engine_key_name in peak_mapping.items(): + attr_obj = getattr(peak, local_attr_name) + if attr_obj is not None: + cif_lines.append(f'{engine_key_name} {attr_obj.value}') + + +def _cif_extinction_section( + cif_lines: list[str], + expt_type: object | None, + extinction: object | None, +) -> None: + """Append extinction lines to CIF (single crystal only).""" + if not extinction or expt_type.sample_form.value != SampleFormEnum.SINGLE_CRYSTAL: + return + extinction_mapping = { + 'mosaicity': '_extinction_mosaicity', + 'radius': '_extinction_radius', + } + cif_lines.append('') + cif_lines.append('_extinction_model gauss') + for local_attr_name, engine_key_name in extinction_mapping.items(): + attr_obj = getattr(extinction, local_attr_name) + if attr_obj is not None: + cif_lines.append(f'{engine_key_name} {attr_obj.value}') + + +def _cif_range_section( + cif_lines: list[str], + expt_type: object | None, + experiment: ExperimentBase, +) -> tuple[str, str]: + """ + Append range lines to CIF and return (min, max) strings. + + Parameters + ---------- + cif_lines : list[str] + Accumulator list of CIF lines (mutated in place). + expt_type : object | None + Experiment type metadata with ``sample_form`` and ``beam_mode``. + experiment : ExperimentBase + Experiment whose data range is queried. + + Returns + ------- + tuple[str, str] + Formatted min and max strings (empty if not powder). + """ + if expt_type.sample_form.value != SampleFormEnum.POWDER: + return '', '' + + x_data = experiment.data.x + twotheta_min = f'{np.round(x_data.min(), 5):.5f}' + twotheta_max = f'{np.round(x_data.max(), 5):.5f}' + cif_lines.append('') + if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: + cif_lines.append(f'_range_2theta_min {twotheta_min}') + cif_lines.append(f'_range_2theta_max {twotheta_max}') + elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: + cif_lines.append(f'_range_time_min {twotheta_min}') + cif_lines.append(f'_range_time_max {twotheta_max}') + return twotheta_min, twotheta_max + + +def _cif_orient_matrix_section( + cif_lines: list[str], + expt_type: object | None, +) -> None: + """Append hardcoded orientation matrix for single crystal.""" + if expt_type.sample_form.value != SampleFormEnum.SINGLE_CRYSTAL: + return + cif_lines.append('') + cif_lines.append('_diffrn_orient_matrix_type CCSL') + for tag, val in [ + ('ub_11', '-0.088033'), + ('ub_12', '-0.088004'), + ('ub_13', ' 0.069970'), + ('ub_21', ' 0.034058'), + ('ub_22', '-0.188170'), + ('ub_23', '-0.013039'), + ('ub_31', ' 0.223600'), + ('ub_32', ' 0.125751'), + ('ub_33', ' 0.029490'), + ]: + cif_lines.append(f'_diffrn_orient_matrix_{tag} {val}') + + +def _cif_phase_section( + cif_lines: list[str], + expt_type: object | None, + linked_structure: object, +) -> None: + """Append phase label/scale to CIF.""" + cif_lines.append('') + if expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: + cif_lines.append(f'_phase_label {linked_structure.name}') + cif_lines.append('_phase_scale 1.0') + elif expt_type.sample_form.value == SampleFormEnum.POWDER: + cif_lines.append('loop_') + cif_lines.append('_phase_label') + cif_lines.append('_phase_scale') + cif_lines.append(f'{linked_structure.name} 1.0') + + +def _cif_background_section( + cif_lines: list[str], + expt_type: object | None, + twotheta_min: str, + twotheta_max: str, +) -> None: + """Append background loop for powder data.""" + if expt_type.sample_form.value != SampleFormEnum.POWDER: + return + cif_lines.append('') + cif_lines.append('loop_') + if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: + cif_lines.append('_pd_background_2theta') + cif_lines.append('_pd_background_intensity') + elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: + cif_lines.append('_tof_backgroundpoint_time') # TODO: !!!!???? + cif_lines.append('_tof_backgroundpoint_intensity') # TODO: !!!!???? + cif_lines.append(f'{twotheta_min} 0.0') # TODO: !!!!???? + cif_lines.append(f'{twotheta_max} 0.0') # TODO: !!!!???? + + +def _cif_measured_data_section( + cif_lines: list[str], + expt_type: object | None, + experiment: ExperimentBase, +) -> None: + """Append measured data loop to CIF.""" + if expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: + _cif_measured_data_sc(cif_lines, expt_type, experiment) + elif expt_type.sample_form.value == SampleFormEnum.POWDER: + _cif_measured_data_pd(cif_lines, expt_type, experiment) + + +def _cif_measured_data_sc( + cif_lines: list[str], + expt_type: object | None, + experiment: ExperimentBase, +) -> None: + """Append single crystal measured data loop.""" + data = experiment.data + cif_lines.append('') + cif_lines.append('loop_') + cif_lines.append('_diffrn_refln_index_h') + cif_lines.append('_diffrn_refln_index_k') + cif_lines.append('_diffrn_refln_index_l') + cif_lines.append('_diffrn_refln_intensity') + cif_lines.append('_diffrn_refln_intensity_sigma') + + is_tof = expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT + if is_tof: + cif_lines.append('_diffrn_refln_wavelength') + + for i in range(len(data.index_h)): + line = ( + f'{data.index_h[i]:4.0f}{data.index_k[i]:4.0f}{data.index_l[i]:4.0f}' + f' {data.intensity_meas[i]:.5f} {data.intensity_meas_su[i]:.5f}' + ) + if is_tof: + line += f' {data.wavelength[i]:.5f}' + cif_lines.append(line) + + +def _cif_measured_data_pd( + cif_lines: list[str], + expt_type: object | None, + experiment: ExperimentBase, +) -> None: + """Append powder measured data loop.""" + cif_lines.append('') + cif_lines.append('loop_') + if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: + cif_lines.append('_pd_meas_2theta') + cif_lines.append('_pd_meas_intensity') + cif_lines.append('_pd_meas_intensity_sigma') + elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: + cif_lines.append('_tof_meas_time') + cif_lines.append('_tof_meas_intensity') + cif_lines.append('_tof_meas_intensity_sigma') + + x_data = experiment.data.x + y_data = experiment.data.intensity_meas + sy_data = experiment.data.intensity_meas_su + for x_val, y_val, sy_val in zip(x_data, y_data, sy_data, strict=True): + cif_lines.append(f' {x_val:.5f} {y_val:.5f} {sy_val:.5f}') diff --git a/src/easydiffraction/analysis/sequential.py b/src/easydiffraction/analysis/sequential.py index 53f643fe..a1cd79a4 100644 --- a/src/easydiffraction/analysis/sequential.py +++ b/src/easydiffraction/analysis/sequential.py @@ -353,6 +353,33 @@ def _append_to_csv( writer.writerow(result) +def _extract_params_from_row(row: dict[str, str]) -> dict[str, float]: + """ + Extract parameter values from a single CSV row. + + Skips meta columns, diffrn columns, uncertainty columns, and empty + values. Non-numeric values are silently ignored. + + Parameters + ---------- + row : dict[str, str] + A single CSV row as a dict. + + Returns + ------- + dict[str, float] + Parameter name → float value mapping. + """ + params: dict[str, float] = {} + for key, val in row.items(): + if key in _META_COLUMNS or key.startswith('diffrn.') or key.endswith('.uncertainty'): + continue + if val: + with contextlib.suppress(ValueError, TypeError): + params[key] = float(val) + return params + + def _read_csv_for_recovery( csv_path: Path, ) -> tuple[set[str], dict[str, float] | None]: @@ -383,18 +410,7 @@ def _read_csv_for_recovery( if file_path: fitted.add(file_path) if row.get('fit_success', '').lower() == 'true': - # Extract parameter values from this row - params: dict[str, float] = {} - for key, val in row.items(): - if key in _META_COLUMNS: - continue - if key.startswith('diffrn.'): - continue - if key.endswith('.uncertainty'): - continue - if val: - with contextlib.suppress(ValueError, TypeError): - params[key] = float(val) + params = _extract_params_from_row(row) if params: last_params = params @@ -524,60 +540,57 @@ def _report_chunk_progress( print(f' {status} {Path(r["file_path"]).name}: χ² = {rchi2_str}') +def _apply_diffrn_metadata( + results: list[dict[str, Any]], + extract_diffrn: Callable, +) -> None: + """ + Enrich result dicts with diffrn metadata from a user callback. + + Calls *extract_diffrn* for each result and merges the returned + key/value pairs into the result dict under ``diffrn.`` keys. + Failures are logged as warnings and do not interrupt processing. + + Parameters + ---------- + results : list[dict[str, Any]] + Worker result dicts (mutated in place). + extract_diffrn : Callable + User callback: ``f(file_path) → {field: value}``. + """ + for result in results: + try: + diffrn_values = extract_diffrn(result['file_path']) + for key, val in diffrn_values.items(): + result[f'diffrn.{key}'] = val + except Exception as exc: + log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') + + # ------------------------------------------------------------------ # Main orchestration # ------------------------------------------------------------------ -def fit_sequential( - analysis: object, - data_dir: str, - max_workers: int | str = 1, - chunk_size: int | None = None, - file_pattern: str = '*', - extract_diffrn: Callable | None = None, - verbosity: str | None = None, -) -> None: +def _check_seq_preconditions(project: object) -> list[str]: """ - Run sequential fitting over all data files in a directory. + Validate sequential fitting preconditions. Parameters ---------- - analysis : object - The ``Analysis`` instance (owns project reference). - data_dir : str - Path to directory containing data files. - max_workers : int | str, default=1 - Number of parallel worker processes. ``1`` = sequential (no - subprocess overhead). ``'auto'`` = physical CPU count. Uses - ``ProcessPoolExecutor`` with ``spawn`` context when > 1. - chunk_size : int | None, default=None - Files per chunk. Default ``None`` uses ``max_workers``. - file_pattern : str, default='*' - Glob pattern to filter files in *data_dir*. - extract_diffrn : Callable | None, default=None - User callback: ``f(file_path) → {diffrn_field: value}``. - verbosity : str | None, default=None - ``'full'``, ``'short'``, ``'silent'``. Default: project - verbosity. + project : object + The project to validate. + + Returns + ------- + list[str] + Data file paths from the template experiment. Raises ------ ValueError - If preconditions are not met (e.g. multiple structures, missing - project path, no free parameters). + If preconditions are not met. """ - # Guard against re-entry in spawned child processes. With the - # ``spawn`` multiprocessing context the child re-imports __main__, - # which re-executes the user script and would call fit_sequential - # again, causing infinite process spawning. - if mp.parent_process() is not None: - return - - project = analysis.project - verb = VerbosityEnum(verbosity if verbosity is not None else project.verbosity) - - # ── Preconditions ──────────────────────────────────────────── if len(project.structures) != 1: msg = f'Sequential fitting requires exactly 1 structure, found {len(project.structures)}.' raise ValueError(msg) @@ -593,9 +606,6 @@ def fit_sequential( msg = 'Project must be saved before sequential fitting. Call save_as() first.' raise ValueError(msg) - # Discover data files - data_paths = extract_data_paths_from_dir(data_dir, file_pattern=file_pattern) - from easydiffraction.core.variable import Parameter # noqa: PLC0415 free_params = [ @@ -605,10 +615,29 @@ def fit_sequential( msg = 'No free parameters found. Mark at least one parameter as free.' raise ValueError(msg) - # ── Build template ─────────────────────────────────────────── - template = _build_template(project) - # ── CSV setup and crash recovery ───────────────────────────── +def _setup_csv_and_recovery( + project: object, + template: SequentialFitTemplate, + verb: VerbosityEnum, +) -> tuple[Path, list[str], set[str], SequentialFitTemplate]: + """ + Set up CSV and perform crash recovery. + + Parameters + ---------- + project : object + The project instance. + template : SequentialFitTemplate + The fit template. + verb : VerbosityEnum + Output verbosity. + + Returns + ------- + tuple[Path, list[str], set[str], SequentialFitTemplate] + CSV path, header, already-fitted set, and updated template. + """ csv_path = project.info.path / 'analysis' / 'results.csv' csv_path.parent.mkdir(parents=True, exist_ok=True) header = _build_csv_header(template) @@ -620,20 +649,38 @@ def fit_sequential( log.info(f'Resuming: {num_skipped} files already fitted, skipping.') if verb is not VerbosityEnum.SILENT: print(f'📂 Resuming from CSV: {num_skipped} files already fitted.') - # Seed from recovered params if available if recovered_params is not None: template = replace(template, initial_params=recovered_params) else: _write_csv_header(csv_path, header) - # Filter out already-fitted files - remaining = [p for p in data_paths if p not in already_fitted] - if not remaining: - if verb is not VerbosityEnum.SILENT: - print('✅ All files already fitted. Nothing to do.') - return + return csv_path, header, already_fitted, template + + +def _resolve_workers( + max_workers: int | str, + chunk_size: int | None, +) -> tuple[int, int]: + """ + Resolve worker count and chunk size. - # ── Resolve workers and chunk size ─────────────────────────── + Parameters + ---------- + max_workers : int | str + Worker count or ``'auto'``. + chunk_size : int | None + Explicit chunk size or ``None``. + + Returns + ------- + tuple[int, int] + Resolved (max_workers, chunk_size). + + Raises + ------ + ValueError + If max_workers is invalid. + """ if isinstance(max_workers, str) and max_workers == 'auto': import os # noqa: PLC0415 @@ -646,42 +693,32 @@ def fit_sequential( if chunk_size is None: chunk_size = max_workers - # ── Chunk and fit ──────────────────────────────────────────── - chunks = [remaining[i : i + chunk_size] for i in range(0, len(remaining), chunk_size)] - total_chunks = len(chunks) + return max_workers, chunk_size - if verb is not VerbosityEnum.SILENT: - minimizer_name = analysis.fitter.selection - console.paragraph('Sequential fitting') - console.print(f"🚀 Starting fit process with '{minimizer_name}'...") - console.print( - f'📋 {len(remaining)} files in {total_chunks} chunks (max_workers={max_workers})' - ) - console.print('📈 Goodness-of-fit (reduced χ²):') - # Create a process pool for parallel dispatch, or a no-op context - # for single-worker mode (avoids process-spawn overhead). - # - # When max_workers > 1 we use ``spawn`` context, which normally - # re-imports ``__main__`` in every child process. If the user runs - # a script without an ``if __name__ == '__main__':`` guard the - # whole script would re-execute in every worker, causing infinite - # process spawning. To prevent this we temporarily hide - # ``__main__.__file__`` and ``__main__.__spec__`` so that the spawn - # bootstrap has no path to re-import the script. ``_fit_worker`` - # lives in this module (not ``__main__``), so it is still resolved - # via normal pickle/import machinery. +def _create_pool_context(max_workers: int) -> tuple[object, object, object, object]: + """ + Create a process pool context manager and back up __main__ state. + + Parameters + ---------- + max_workers : int + Number of workers. ``1`` → nullcontext. + + Returns + ------- + tuple[object, object, object, object] + ``(pool_cm, main_mod, main_file_bak, main_spec_bak)``. + """ main_mod = sys.modules.get('__main__') main_file_bak = getattr(main_mod, '__file__', None) main_spec_bak = getattr(main_mod, '__spec__', None) if max_workers > 1: - # Hide __main__ origin from spawn if main_mod is not None and main_file_bak is not None: main_mod.__file__ = None # type: ignore[assignment] if main_mod is not None and main_spec_bak is not None: main_mod.__spec__ = None - spawn_ctx = mp.get_context('spawn') pool_cm = ProcessPoolExecutor( max_workers=max_workers, @@ -691,50 +728,148 @@ def fit_sequential( else: pool_cm = contextlib.nullcontext() + return pool_cm, main_mod, main_file_bak, main_spec_bak + + +def _restore_main_state( + main_mod: object, + main_file_bak: object, + main_spec_bak: object, +) -> None: + """Restore ``__main__`` attributes after pool execution.""" + if main_mod is not None and main_file_bak is not None: + main_mod.__file__ = main_file_bak + if main_mod is not None and main_spec_bak is not None: + main_mod.__spec__ = main_spec_bak + + +def _run_fit_loop( + pool_cm: object, + chunks: list[list[str]], + template: SequentialFitTemplate, + csv_info: tuple[Path, list[str]], + extract_diffrn: Callable | None, + verb: VerbosityEnum, +) -> None: + """ + Execute the chunk-based fitting loop. + + Parameters + ---------- + pool_cm : object + Pool context manager (ProcessPoolExecutor or nullcontext). + chunks : list[list[str]] + Chunked file paths. + template : SequentialFitTemplate + Starting template (updated via propagation). + csv_info : tuple[Path, list[str]] + Tuple of ``(csv_path, header)``. + extract_diffrn : Callable | None + User callback for diffrn metadata. + verb : VerbosityEnum + Output verbosity. + """ + csv_path, header = csv_info + total_chunks = len(chunks) + with pool_cm as executor: + for chunk_idx, chunk in enumerate(chunks, start=1): + if executor is not None: + templates = [template] * len(chunk) + results = list(executor.map(_fit_worker, templates, chunk)) + else: + results = [_fit_worker(template, path) for path in chunk] + + if extract_diffrn is not None: + _apply_diffrn_metadata(results, extract_diffrn) + + _append_to_csv(csv_path, header, results) + _report_chunk_progress(chunk_idx, total_chunks, results, verb) + + # Propagate last successful params + last_ok = _find_last_successful(results) + if last_ok is not None: + template = replace(template, initial_params=last_ok['params']) + + +def _find_last_successful(results: list[dict[str, Any]]) -> dict[str, Any] | None: + """Return the last successful result dict, or None.""" + for r in reversed(results): + if r.get('fit_success') and r.get('params'): + return r + return None + + +def fit_sequential( + analysis: object, + data_dir: str, + max_workers: int | str = 1, + chunk_size: int | None = None, + file_pattern: str = '*', + extract_diffrn: Callable | None = None, +) -> None: + """ + Run sequential fitting over all data files in a directory. + + Parameters + ---------- + analysis : object + The ``Analysis`` instance (owns project reference). + data_dir : str + Path to directory containing data files. + max_workers : int | str, default=1 + Number of parallel worker processes. ``1`` = sequential (no + subprocess overhead). ``'auto'`` = physical CPU count. Uses + ``ProcessPoolExecutor`` with ``spawn`` context when > 1. + chunk_size : int | None, default=None + Files per chunk. Default ``None`` uses ``max_workers``. + file_pattern : str, default='*' + Glob pattern to filter files in *data_dir*. + extract_diffrn : Callable | None, default=None + User callback: ``f(file_path) → {diffrn_field: value}``. + """ + if mp.parent_process() is not None: + return + + project = analysis.project + verb = VerbosityEnum(project.verbosity) + + _check_seq_preconditions(project) + + data_paths = extract_data_paths_from_dir(data_dir, file_pattern=file_pattern) + template = _build_template(project) + + csv_path, header, already_fitted, template = _setup_csv_and_recovery( + project, + template, + verb, + ) + + remaining = [p for p in data_paths if p not in already_fitted] + if not remaining: + if verb is not VerbosityEnum.SILENT: + print('✅ All files already fitted. Nothing to do.') + return + + max_workers, chunk_size = _resolve_workers(max_workers, chunk_size) + chunks = [remaining[i : i + chunk_size] for i in range(0, len(remaining), chunk_size)] + + if verb is not VerbosityEnum.SILENT: + console.paragraph('Sequential fitting') + console.print(f"🚀 Starting fit process with '{analysis.fitter.selection}'...") + console.print( + f'📋 {len(remaining)} files in {len(chunks)} chunks (max_workers={max_workers})' + ) + console.print('📈 Goodness-of-fit (reduced χ²):') + + pool_cm, main_mod, main_file_bak, main_spec_bak = _create_pool_context(max_workers) try: - with pool_cm as executor: - for chunk_idx, chunk in enumerate(chunks, start=1): - # Dispatch: parallel or sequential - if executor is not None: - templates = [template] * len(chunk) - results = list(executor.map(_fit_worker, templates, chunk)) - else: - results = [_fit_worker(template, path) for path in chunk] - - # Extract diffrn metadata in the main process - if extract_diffrn is not None: - for result in results: - try: - diffrn_values = extract_diffrn(result['file_path']) - for key, val in diffrn_values.items(): - result[f'diffrn.{key}'] = val - except Exception as exc: - log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') - - # Write to CSV - _append_to_csv(csv_path, header, results) - - # Report progress - _report_chunk_progress(chunk_idx, total_chunks, results, verb) - - # Propagate: use last successful file's - # params as starting values - last_ok = None - for r in reversed(results): - if r.get('fit_success') and r.get('params'): - last_ok = r - break - - if last_ok is not None: - template = replace(template, initial_params=last_ok['params']) + _run_fit_loop(pool_cm, chunks, template, (csv_path, header), extract_diffrn, verb) finally: - # Restore __main__ attributes - if main_mod is not None and main_file_bak is not None: - main_mod.__file__ = main_file_bak - if main_mod is not None and main_spec_bak is not None: - main_mod.__spec__ = main_spec_bak + _restore_main_state(main_mod, main_file_bak, main_spec_bak) if verb is not VerbosityEnum.SILENT: - total_fitted = len(already_fitted) + len(remaining) - print(f'✅ Sequential fitting complete: {total_fitted} files processed.') + print( + f'✅ Sequential fitting complete: ' + f'{len(already_fitted) + len(remaining)} files processed.' + ) print(f'📄 Results saved to: {csv_path}') diff --git a/src/easydiffraction/crystallography/crystallography.py b/src/easydiffraction/crystallography/crystallography.py index b6b84861..525ac99a 100644 --- a/src/easydiffraction/crystallography/crystallography.py +++ b/src/easydiffraction/crystallography/crystallography.py @@ -6,7 +6,6 @@ from cryspy.A_functions_base.function_2_space_group import get_crystal_system_by_it_number from cryspy.A_functions_base.function_2_space_group import get_it_number_by_name_hm_short from sympy import Expr -from sympy import Symbol from sympy import simplify from sympy import symbols from sympy import sympify @@ -87,19 +86,16 @@ def apply_cell_symmetry_constraints( return cell -def apply_atom_site_symmetry_constraints( - atom_site: dict[str, Any], +def _get_wyckoff_exprs( name_hm: str, coord_code: int, wyckoff_letter: str, -) -> dict[str, Any]: +) -> list[Expr] | None: """ - Apply symmetry constraints to atom site coordinates. + Look up the first Wyckoff position and parse it into sympy Exprs. Parameters ---------- - atom_site : dict[str, Any] - Dictionary containing atom position data. name_hm : str Hermann-Mauguin symbol of the space group. coord_code : int @@ -109,46 +105,87 @@ def apply_atom_site_symmetry_constraints( Returns ------- - dict[str, Any] - The atom_site dictionary with applied symmetry constraints. + list[Expr] | None + Three sympy expressions for x, y, z components, or ``None`` on + failure. """ it_number = get_it_number_by_name_hm_short(name_hm) if it_number is None: - error_msg = f"Failed to get IT_number for name_H-M '{name_hm}'" - log.error(error_msg) # TODO: ValueError? Diagnostics? - return atom_site + log.error(f"Failed to get IT_number for name_H-M '{name_hm}'") + return None - it_coordinate_system_code = coord_code - if it_coordinate_system_code is None: - error_msg = 'IT_coordinate_system_code is not set' - log.error(error_msg) # TODO: ValueError? Diagnostics? - return atom_site + if coord_code is None: + log.error('IT_coordinate_system_code is not set') + return None - space_group_entry = SPACE_GROUPS[it_number, it_coordinate_system_code] - wyckoff_positions = space_group_entry['Wyckoff_positions'][wyckoff_letter] - coords_xyz = wyckoff_positions['coords_xyz'] - - first_position = coords_xyz[0] + entry = SPACE_GROUPS[it_number, coord_code] + first_position = entry['Wyckoff_positions'][wyckoff_letter]['coords_xyz'][0] components = first_position.strip('()').split(',') - parsed_exprs: list[Expr] = [sympify(comp.strip()) for comp in components] + return [sympify(comp.strip()) for comp in components] + - x_val: Expr = sympify(atom_site['fract_x']) - y_val: Expr = sympify(atom_site['fract_y']) - z_val: Expr = sympify(atom_site['fract_z']) +def _apply_fract_constraints( + atom_site: dict[str, Any], + parsed_exprs: list[Expr], +) -> None: + """ + Evaluate and apply fractional coordinate constraints in place. - substitutions: dict[str, Expr] = {'x': x_val, 'y': y_val, 'z': z_val} + For each axis (x, y, z), if the coordinate is fully determined by + symmetry (the symbol does not appear in any expression as a free + symbol), substitutes the numeric values and overwrites the entry. - axes: tuple[str, ...] = ('x', 'y', 'z') + Parameters + ---------- + atom_site : dict[str, Any] + Dictionary containing atom position data (mutated in place). + parsed_exprs : list[Expr] + Three sympy expressions from the Wyckoff position. + """ x, y, z = symbols('x y z') - symbols_xyz: tuple[Symbol, ...] = (x, y, z) + symbols_xyz = (x, y, z) + axes = ('x', 'y', 'z') + substitutions = { + 'x': sympify(atom_site['fract_x']), + 'y': sympify(atom_site['fract_y']), + 'z': sympify(atom_site['fract_z']), + } for i, axis in enumerate(axes): - symbol = symbols_xyz[i] - is_free = any(symbol in expr.free_symbols for expr in parsed_exprs) - + is_free = any(symbols_xyz[i] in expr.free_symbols for expr in parsed_exprs) if not is_free: - evaluated = parsed_exprs[i].subs(substitutions) - simplified = simplify(evaluated) - atom_site[f'fract_{axis}'] = float(simplified) + evaluated = simplify(parsed_exprs[i].subs(substitutions)) + atom_site[f'fract_{axis}'] = float(evaluated) + + +def apply_atom_site_symmetry_constraints( + atom_site: dict[str, Any], + name_hm: str, + coord_code: int, + wyckoff_letter: str, +) -> dict[str, Any]: + """ + Apply symmetry constraints to atom site coordinates. + + Parameters + ---------- + atom_site : dict[str, Any] + Dictionary containing atom position data. + name_hm : str + Hermann-Mauguin symbol of the space group. + coord_code : int + Coordinate system code. + wyckoff_letter : str + Wyckoff position letter. + + Returns + ------- + dict[str, Any] + The atom_site dictionary with applied symmetry constraints. + """ + parsed_exprs = _get_wyckoff_exprs(name_hm, coord_code, wyckoff_letter) + if parsed_exprs is None: + return atom_site + _apply_fract_constraints(atom_site, parsed_exprs) return atom_site diff --git a/src/easydiffraction/display/plotting.py b/src/easydiffraction/display/plotting.py index f3fbce29..24988926 100644 --- a/src/easydiffraction/display/plotting.py +++ b/src/easydiffraction/display/plotting.py @@ -7,6 +7,7 @@ consistent configuration surface and engine handling. """ +import pathlib from enum import StrEnum import numpy as np @@ -66,11 +67,25 @@ def __init__(self) -> None: self._x_max = DEFAULT_MAX # Chart height self.height = DEFAULT_HEIGHT + # Back-reference to the owning Project (set via _set_project) + self._project = None # ------------------------------------------------------------------ # Private class methods # ------------------------------------------------------------------ + def _set_project(self, project: object) -> None: + """Wire the owning project for high-level plot methods.""" + self._project = project + + def _update_project_categories(self, expt_name: str) -> None: + """Update all project categories before plotting.""" + for structure in self._project.structures: + structure._update_categories() + self._project.analysis._update_categories() + experiment = self._project.experiments[expt_name] + experiment._update_categories() + @classmethod def _factory(cls) -> type[RendererFactoryBase]: # type: ignore[override] return PlotterFactory @@ -335,6 +350,155 @@ def show_config(self) -> None: TableRenderer.get().render(df) def plot_meas( + self, + expt_name: str, + x_min: float | None = None, + x_max: float | None = None, + x: object | None = None, + ) -> None: + """ + Plot measured diffraction data for an experiment. + + Parameters + ---------- + expt_name : str + Name of the experiment to plot. + x_min : float | None, default=None + Lower bound for the x-axis range. + x_max : float | None, default=None + Upper bound for the x-axis range. + x : object | None, default=None + Optional explicit x-axis data to override stored values. + """ + self._update_project_categories(expt_name) + experiment = self._project.experiments[expt_name] + self._plot_meas_data( + experiment.data, + expt_name, + experiment.type, + x_min=x_min, + x_max=x_max, + x=x, + ) + + def plot_calc( + self, + expt_name: str, + x_min: float | None = None, + x_max: float | None = None, + x: object | None = None, + ) -> None: + """ + Plot calculated diffraction pattern for an experiment. + + Parameters + ---------- + expt_name : str + Name of the experiment to plot. + x_min : float | None, default=None + Lower bound for the x-axis range. + x_max : float | None, default=None + Upper bound for the x-axis range. + x : object | None, default=None + Optional explicit x-axis data to override stored values. + """ + self._update_project_categories(expt_name) + experiment = self._project.experiments[expt_name] + self._plot_calc_data( + experiment.data, + expt_name, + experiment.type, + x_min=x_min, + x_max=x_max, + x=x, + ) + + def plot_meas_vs_calc( + self, + expt_name: str, + x_min: float | None = None, + x_max: float | None = None, + show_residual: bool = False, + x: object | None = None, + ) -> None: + """ + Plot measured vs calculated data for an experiment. + + Parameters + ---------- + expt_name : str + Name of the experiment to plot. + x_min : float | None, default=None + Lower bound for the x-axis range. + x_max : float | None, default=None + Upper bound for the x-axis range. + show_residual : bool, default=False + When ``True``, include the residual (difference) curve. + x : object | None, default=None + Optional explicit x-axis data to override stored values. + """ + self._update_project_categories(expt_name) + experiment = self._project.experiments[expt_name] + self._plot_meas_vs_calc_data( + experiment, + expt_name, + x_min=x_min, + x_max=x_max, + show_residual=show_residual, + x=x, + ) + + def plot_param_series( + self, + param: object, + versus: object | None = None, + ) -> None: + """ + Plot a parameter's value across sequential fit results. + + When a ``results.csv`` file exists in the project's + ``analysis/`` directory, data is read from CSV. Otherwise, + falls back to in-memory parameter snapshots (produced by + ``fit()`` in single mode). + + Parameters + ---------- + param : object + Parameter descriptor whose ``unique_name`` identifies the + values to plot. + versus : object | None, default=None + A diffrn descriptor (e.g. + ``expt.diffrn.ambient_temperature``) whose value is used as + the x-axis for each experiment. When ``None``, the + experiment sequence number is used instead. + """ + unique_name = param.unique_name + + # Try CSV first (produced by fit_sequential or future fit) + csv_path = None + if self._project.info.path is not None: + candidate = pathlib.Path(self._project.info.path) / 'analysis' / 'results.csv' + if candidate.is_file(): + csv_path = str(candidate) + + if csv_path is not None: + self._plot_param_series_from_csv( + csv_path=csv_path, + unique_name=unique_name, + param_descriptor=param, + versus_descriptor=versus, + ) + else: + # Fallback: in-memory snapshots from fit() single mode + versus_name = versus.name if versus is not None else None + self._plot_param_series_from_snapshots( + unique_name, + versus_name, + self._project.experiments, + self._project.analysis._parameter_snapshots, + ) + + def _plot_meas_data( self, pattern: object, expt_name: str, @@ -360,8 +524,7 @@ def plot_meas( x_max : object, default=None Optional maximum x-axis limit. x : object, default=None - X-axis type (``'two_theta'``, ``'time_of_flight'``, or - ``'d_spacing'``). If ``None``, auto-detected from beam mode. + X-axis type. If ``None``, auto-detected from beam mode. """ ctx = self._prepare_powder_context( pattern, @@ -390,7 +553,7 @@ def plot_meas( height=self.height, ) - def plot_calc( + def _plot_calc_data( self, pattern: object, expt_name: str, @@ -416,8 +579,7 @@ def plot_calc( x_max : object, default=None Optional maximum x-axis limit. x : object, default=None - X-axis type (``'two_theta'``, ``'time_of_flight'``, or - ``'d_spacing'``). If ``None``, auto-detected from beam mode. + X-axis type. If ``None``, auto-detected from beam mode. """ ctx = self._prepare_powder_context( pattern, @@ -446,7 +608,7 @@ def plot_calc( height=self.height, ) - def plot_meas_vs_calc( + def _plot_meas_vs_calc_data( self, experiment: object, expt_name: str, @@ -556,7 +718,7 @@ def plot_meas_vs_calc( height=self.height, ) - def plot_param_series( + def _plot_param_series_from_csv( self, csv_path: str, unique_name: str, @@ -629,7 +791,7 @@ def plot_param_series( height=self.height, ) - def plot_param_series_from_snapshots( + def _plot_param_series_from_snapshots( self, unique_name: str, versus_name: str | None, diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index 1b1d3792..896870f9 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -527,6 +527,46 @@ def category_item_from_cif( param.from_cif(block, idx=idx) +def _set_param_from_raw_cif_value( + param: GenericDescriptorBase, + raw: str, +) -> None: + """ + Parse a raw CIF string and set the parameter value. + + Handles numeric values (with optional uncertainty in brackets), + quoted strings, and unknown/inapplicable CIF markers. + + Parameters + ---------- + param : GenericDescriptorBase + The parameter to update. + raw : str + The raw string from the CIF loop cell. + """ + # CIF unknown / inapplicable markers → keep default + if raw in {'?', '.'}: + return + + if param._value_type == DataTypes.NUMERIC: + has_brackets = '(' in raw + u = str_to_ufloat(raw) + param.value = u.n + if has_brackets and hasattr(param, 'free'): + param.free = True # type: ignore[attr-defined] + if not np.isnan(u.s) and hasattr(param, 'uncertainty'): + param.uncertainty = u.s # type: ignore[attr-defined] + + # If string, strip quotes if present + # TODO: Make a helper function for this + elif param._value_type == DataTypes.STRING: + is_quoted = len(raw) >= _MIN_QUOTED_LEN and raw[0] == raw[-1] and raw[0] in {"'", '"'} + param.value = raw[1:-1] if is_quoted else raw + + else: + log.debug(f'Unrecognized type: {param._value_type}') + + def category_collection_from_cif( self: CategoryCollection, block: gemmi.cif.Block, @@ -593,40 +633,7 @@ def _get_loop(block: object, category_item: object) -> object | None: for cif_name in param._cif_handler.names: if cif_name in loop.tags: col_idx = loop.tags.index(cif_name) - # TODO: The following is duplication of # param_from_cif - raw = array[row_idx][col_idx] - - # CIF unknown / inapplicable markers → keep default - if raw in {'?', '.'}: - break - - # If numeric, parse with uncertainty if present - if param._value_type == DataTypes.NUMERIC: - has_brackets = '(' in raw - u = str_to_ufloat(raw) - param.value = u.n - if has_brackets and hasattr(param, 'free'): - param.free = True # type: ignore[attr-defined] - if not np.isnan(u.s) and hasattr(param, 'uncertainty'): - param.uncertainty = u.s # type: ignore[attr-defined] - - # If string, strip quotes if present - # TODO: Make a helper function for this - elif param._value_type == DataTypes.STRING: - is_quoted = ( - len(raw) >= _MIN_QUOTED_LEN - and raw[0] == raw[-1] - and raw[0] in {"'", '"'} - ) - if is_quoted: - param.value = raw[1:-1] - else: - param.value = raw - - # Other types are not supported - else: - log.debug(f'Unrecognized type: {param._value_type}') - + _set_param_from_raw_cif_value(param, array[row_idx][col_idx]) break diff --git a/src/easydiffraction/project/project.py b/src/easydiffraction/project/project.py index af411907..e4e15f80 100644 --- a/src/easydiffraction/project/project.py +++ b/src/easydiffraction/project/project.py @@ -24,6 +24,39 @@ from easydiffraction.utils.logging import log +def _apply_csv_row_to_params( + row: object, + columns: object, + param_map: dict[str, object], + meta_columns: set[str], +) -> None: + """ + Override parameter values and uncertainties from a CSV row. + + Parameters + ---------- + row : object + A pandas Series representing one CSV row. + columns : object + The DataFrame column index. + param_map : dict[str, object] + Map of ``unique_name`` → live Parameter objects. + meta_columns : set[str] + Column names to skip (non-parameter metadata). + """ + import pandas as pd # noqa: PLC0415 + + for col_name in columns: + if col_name in meta_columns or col_name.startswith('diffrn.'): + continue + if col_name.endswith('.uncertainty'): + base_name = col_name.removesuffix('.uncertainty') + if base_name in param_map and pd.notna(row[col_name]): + param_map[base_name].uncertainty = float(row[col_name]) + elif col_name in param_map and pd.notna(row[col_name]): + param_map[col_name].value = float(row[col_name]) + + class Project(GuardedBase): """ Central API for managing a diffraction data analysis project. @@ -50,6 +83,7 @@ def __init__( self._experiments = Experiments() self._tabler = TableRenderer.get() self._plotter = Plotter() + self._plotter._set_project(self) self._analysis = Analysis(self) self._summary = Summary(self) self._saved = False @@ -371,7 +405,7 @@ def apply_params_from_csv(self, row_index: int) -> None: sequential-fit results where ``file_path`` points to a real file) reloads the measured data into the template experiment. - After calling this method, ``plot_meas_vs_calc()`` will show the + After calling this method, ``plotter.plot_meas_vs_calc()`` will fit for that specific dataset. Parameters @@ -420,32 +454,14 @@ def apply_params_from_csv(self, row_index: int) -> None: experiment = next(iter(self.experiments.values())) experiment._load_ascii_data_to_experiment(file_path) - # 2. Override parameter values + # 2. Override parameter values and uncertainties all_params = self.structures.parameters + self.experiments.parameters param_map = { p.unique_name: p for p in all_params if isinstance(p, Parameter) and hasattr(p, 'unique_name') } - - skip_cols = set(_META_COLUMNS) - for col_name in df.columns: - if col_name in skip_cols: - continue - if col_name.startswith('diffrn.'): - continue - if col_name.endswith('.uncertainty'): - continue - if col_name in param_map and pd.notna(row[col_name]): - param_map[col_name].value = float(row[col_name]) - - # 3. Apply uncertainties - for col_name in df.columns: - if not col_name.endswith('.uncertainty'): - continue - base_name = col_name.removesuffix('.uncertainty') - if base_name in param_map and pd.notna(row[col_name]): - param_map[base_name].uncertainty = float(row[col_name]) + _apply_csv_row_to_params(row, df.columns, param_map, set(_META_COLUMNS)) # 4. Force recalculation: data was replaced directly (bypassing # value setters), so the dirty flag may not be set. @@ -455,162 +471,3 @@ def apply_params_from_csv(self, row_index: int) -> None: experiment._need_categories_update = True log.info(f'Applied parameters from CSV row {row_index} (file: {file_path}).') - - # ------------------------------------------ - # Plotting - # ------------------------------------------ - - def _update_categories(self, expt_name: str) -> None: - for structure in self.structures: - structure._update_categories() - self.analysis._update_categories() - experiment = self.experiments[expt_name] - experiment._update_categories() - - def plot_meas( - self, - expt_name: str, - x_min: float | None = None, - x_max: float | None = None, - x: object | None = None, - ) -> None: - """ - Plot measured diffraction data for an experiment. - - Parameters - ---------- - expt_name : str - Name of the experiment to plot. - x_min : float | None, default=None - Lower bound for the x-axis range. - x_max : float | None, default=None - Upper bound for the x-axis range. - x : object | None, default=None - Optional explicit x-axis data to override stored values. - """ - self._update_categories(expt_name) - experiment = self.experiments[expt_name] - - self.plotter.plot_meas( - experiment.data, - expt_name, - experiment.type, - x_min=x_min, - x_max=x_max, - x=x, - ) - - def plot_calc( - self, - expt_name: str, - x_min: float | None = None, - x_max: float | None = None, - x: object | None = None, - ) -> None: - """ - Plot calculated diffraction pattern for an experiment. - - Parameters - ---------- - expt_name : str - Name of the experiment to plot. - x_min : float | None, default=None - Lower bound for the x-axis range. - x_max : float | None, default=None - Upper bound for the x-axis range. - x : object | None, default=None - Optional explicit x-axis data to override stored values. - """ - self._update_categories(expt_name) - experiment = self.experiments[expt_name] - - self.plotter.plot_calc( - experiment.data, - expt_name, - experiment.type, - x_min=x_min, - x_max=x_max, - x=x, - ) - - def plot_meas_vs_calc( - self, - expt_name: str, - x_min: float | None = None, - x_max: float | None = None, - show_residual: bool = False, - x: object | None = None, - ) -> None: - """ - Plot measured vs calculated data for an experiment. - - Parameters - ---------- - expt_name : str - Name of the experiment to plot. - x_min : float | None, default=None - Lower bound for the x-axis range. - x_max : float | None, default=None - Upper bound for the x-axis range. - show_residual : bool, default=False - When ``True``, include the residual (difference) curve. - x : object | None, default=None - Optional explicit x-axis data to override stored values. - """ - self._update_categories(expt_name) - experiment = self.experiments[expt_name] - - self.plotter.plot_meas_vs_calc( - experiment, - expt_name, - x_min=x_min, - x_max=x_max, - show_residual=show_residual, - x=x, - ) - - def plot_param_series(self, param: object, versus: object | None = None) -> None: - """ - Plot a parameter's value across sequential fit results. - - When a ``results.csv`` file exists in the project's - ``analysis/`` directory, data is read from CSV. Otherwise, - falls back to in-memory parameter snapshots (produced by - ``fit()`` in single mode). - - Parameters - ---------- - param : object - Parameter descriptor whose ``unique_name`` identifies the - values to plot. - versus : object | None, default=None - A diffrn descriptor (e.g. - ``expt.diffrn.ambient_temperature``) whose value is used as - the x-axis for each experiment. When ``None``, the - experiment sequence number is used instead. - """ - unique_name = param.unique_name - - # Try CSV first (produced by fit_sequential or future fit) - csv_path = None - if self.info.path is not None: - candidate = pathlib.Path(self.info.path) / 'analysis' / 'results.csv' - if candidate.is_file(): - csv_path = str(candidate) - - if csv_path is not None: - self.plotter.plot_param_series( - csv_path=csv_path, - unique_name=unique_name, - param_descriptor=param, - versus_descriptor=versus, - ) - else: - # Fallback: in-memory snapshots from fit() single mode - versus_name = versus.name if versus is not None else None - self.plotter.plot_param_series_from_snapshots( - unique_name, - versus_name, - self.experiments, - self.analysis._parameter_snapshots, - ) diff --git a/tests/integration/fitting/test_sequential.py b/tests/integration/fitting/test_sequential.py index fc82cfc7..4fb80209 100644 --- a/tests/integration/fitting/test_sequential.py +++ b/tests/integration/fitting/test_sequential.py @@ -16,7 +16,6 @@ from easydiffraction import Project from easydiffraction import StructureFactory from easydiffraction import download_data -from easydiffraction.utils.enums import VerbosityEnum TEMP_DIR = tempfile.gettempdir() @@ -76,7 +75,6 @@ def _create_sequential_project(tmp_path: Path) -> tuple[Project, str]: expt = ExperimentFactory.from_data_path( name='template', data_path=data_path, - verbosity=VerbosityEnum.SILENT, ) expt.instrument.setup_wavelength = 1.494 expt.instrument.calib_twotheta_offset = 0.6225 @@ -250,7 +248,6 @@ def test_fit_sequential_requires_saved_project(tmp_path) -> None: expt = ExperimentFactory.from_data_path( name='e', data_path=data_path, - verbosity=VerbosityEnum.SILENT, ) expt.linked_phases.create(id='s', scale=1.0) expt.linked_phases['s'].scale.free = True diff --git a/tests/unit/easydiffraction/analysis/test_analysis.py b/tests/unit/easydiffraction/analysis/test_analysis.py index 0b0a8944..fc4f0135 100644 --- a/tests/unit/easydiffraction/analysis/test_analysis.py +++ b/tests/unit/easydiffraction/analysis/test_analysis.py @@ -110,11 +110,10 @@ def test_analysis_help(capsys): assert 'Properties' in out assert 'Methods' in out assert 'fit()' in out - assert 'show_fit_results()' in out -def test_show_fit_results_warns_when_no_results(capsys): - """Test that show_fit_results logs a warning when fit() has not been run.""" +def test_display_fit_results_warns_when_no_results(capsys): + """Test that display.fit_results logs a warning when fit() has not been run.""" from easydiffraction.analysis.analysis import Analysis a = Analysis(project=_make_project_with_names([])) @@ -122,13 +121,13 @@ def test_show_fit_results_warns_when_no_results(capsys): # Ensure fit_results is not set assert not hasattr(a, 'fit_results') or a.fit_results is None - a.show_fit_results() + a.display.fit_results() out = capsys.readouterr().out assert 'No fit results available' in out -def test_show_fit_results_calls_process_fit_results(monkeypatch): - """Test that show_fit_results delegates to fitter._process_fit_results.""" +def test_display_fit_results_calls_process_fit_results(monkeypatch): + """Test that display.fit_results delegates to fitter._process_fit_results.""" from easydiffraction.analysis.analysis import Analysis # Track if _process_fit_results was called @@ -158,12 +157,12 @@ def values(self): a = Analysis(project=project) - # Set up fit_results so show_fit_results doesn't return early + # Set up fit_results so display.fit_results doesn't return early a.fit_results = object() # Mock the fitter's _process_fit_results method monkeypatch.setattr(a.fitter, '_process_fit_results', mock_process_fit_results) - a.show_fit_results() + a.display.fit_results() assert process_called['called'], '_process_fit_results should be called' diff --git a/tests/unit/easydiffraction/analysis/test_analysis_access_params.py b/tests/unit/easydiffraction/analysis/test_analysis_access_params.py index bdd5ead0..b7d9f895 100644 --- a/tests/unit/easydiffraction/analysis/test_analysis_access_params.py +++ b/tests/unit/easydiffraction/analysis/test_analysis_access_params.py @@ -49,7 +49,7 @@ def fake_render_table(**kwargs): monkeypatch.setattr(analysis_mod, 'render_table', fake_render_table) a = Analysis(Project()) - a.how_to_access_parameters() + a.display.how_to_access_parameters() out = capsys.readouterr().out assert 'How to access parameters' in out @@ -74,7 +74,7 @@ def fake_render_table2(**kwargs): captured2.update(kwargs) monkeypatch.setattr(analysis_mod, 'render_table', fake_render_table2) - a.show_parameter_cif_uids() + a.display.parameter_cif_uids() headers2 = captured2.get('columns_headers') or [] data2 = captured2.get('columns_data') or [] assert 'Unique Identifier for CIF Constraints' in headers2 diff --git a/tests/unit/easydiffraction/analysis/test_analysis_show_empty.py b/tests/unit/easydiffraction/analysis/test_analysis_show_empty.py index 7f2895b4..4b8674fc 100644 --- a/tests/unit/easydiffraction/analysis/test_analysis_show_empty.py +++ b/tests/unit/easydiffraction/analysis/test_analysis_show_empty.py @@ -25,12 +25,12 @@ class P: a = Analysis(project=P()) - # show_all_params -> warning path - a.show_all_params() - # show_fittable_params -> warning path - a.show_fittable_params() - # show_free_params -> warning path - a.show_free_params() + # display.all_params -> warning path + a.display.all_params() + # display.fittable_params -> warning path + a.display.fittable_params() + # display.free_params -> warning path + a.display.free_params() out = capsys.readouterr().out assert ( diff --git a/tests/unit/easydiffraction/display/test_plotting.py b/tests/unit/easydiffraction/display/test_plotting.py index 356c793e..840a61df 100644 --- a/tests/unit/easydiffraction/display/test_plotting.py +++ b/tests/unit/easydiffraction/display/test_plotting.py @@ -82,19 +82,19 @@ def __init__(self): p = Plotter() # Error paths (now log errors via console; messages are printed) - p.plot_meas(Ptn(two_theta=None, intensity_meas=None), 'E', ExptType()) + p._plot_meas_data(Ptn(two_theta=None, intensity_meas=None), 'E', ExptType()) out = capsys.readouterr().out assert 'No two_theta data available for experiment E' in out - p.plot_meas(Ptn(two_theta=[1], intensity_meas=None), 'E', ExptType()) + p._plot_meas_data(Ptn(two_theta=[1], intensity_meas=None), 'E', ExptType()) out = capsys.readouterr().out assert 'No measured data available for experiment E' in out - p.plot_calc(Ptn(two_theta=None, intensity_calc=None), 'E', ExptType()) + p._plot_calc_data(Ptn(two_theta=None, intensity_calc=None), 'E', ExptType()) out = capsys.readouterr().out assert 'No two_theta data available for experiment E' in out - p.plot_calc(Ptn(two_theta=[1], intensity_calc=None), 'E', ExptType()) + p._plot_calc_data(Ptn(two_theta=[1], intensity_calc=None), 'E', ExptType()) out = capsys.readouterr().out assert 'No calculated data available for experiment E' in out @@ -103,19 +103,19 @@ def __init__(self, pattern, expt_type): self.data = pattern self.type = expt_type - p.plot_meas_vs_calc( + p._plot_meas_vs_calc_data( Expt(Ptn(two_theta=None, intensity_meas=None, intensity_calc=None), ExptType()), 'E', ) out = capsys.readouterr().out assert 'No measured data available for experiment E' in out - p.plot_meas_vs_calc( + p._plot_meas_vs_calc_data( Expt(Ptn(two_theta=[1], intensity_meas=None, intensity_calc=[1]), ExptType()), 'E', ) out = capsys.readouterr().out assert 'No measured data available for experiment E' in out - p.plot_meas_vs_calc( + p._plot_meas_vs_calc_data( Expt(Ptn(two_theta=[1], intensity_meas=[1], intensity_calc=None), ExptType()), 'E', ) @@ -163,6 +163,6 @@ def __init__(self): p = Plotter() p.engine = 'asciichartpy' # ensure AsciiPlotter - p.plot_meas(Ptn(), 'E', ExptType()) + p._plot_meas_data(Ptn(), 'E', ExptType()) assert called['labels'] == ('meas',) assert 'Measured data' in called['title'] diff --git a/tmp/_read_cif.py b/tmp/_read_cif.py index 1c273019..3e08bbfb 100644 --- a/tmp/_read_cif.py +++ b/tmp/_read_cif.py @@ -168,7 +168,7 @@ line_segment.y.free = True # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% project.analysis.fit() diff --git a/tmp/basic_single-fit_pd-neut-cwl_LBCO-HRPT.py b/tmp/basic_single-fit_pd-neut-cwl_LBCO-HRPT.py index 18db2b22..05e82bc9 100644 --- a/tmp/basic_single-fit_pd-neut-cwl_LBCO-HRPT.py +++ b/tmp/basic_single-fit_pd-neut-cwl_LBCO-HRPT.py @@ -413,25 +413,25 @@ # Show all parameters of the project. # %% -project.analysis.show_all_params() +project.analysis.display.all_params() # %% [markdown] # Show all fittable parameters. # %% -project.analysis.show_fittable_params() +project.analysis.display.fittable_params() # %% [markdown] # Show only free parameters. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # Show how to access parameters in the code. # %% -project.analysis.how_to_access_parameters() +project.analysis.display.how_to_access_parameters() # %% [markdown] # #### Set Fit Mode @@ -483,7 +483,7 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting @@ -523,7 +523,7 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting @@ -561,7 +561,7 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting @@ -611,13 +611,13 @@ # Show defined constraints. # %% -project.analysis.show_constraints() +project.analysis.display.constraints() # %% [markdown] # Show free parameters before applying constraints. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # Apply constraints. @@ -629,7 +629,7 @@ # Show free parameters after applying constraints. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting @@ -682,7 +682,7 @@ # Show defined constraints. # %% -project.analysis.show_constraints() +project.analysis.display.constraints() # %% [markdown] # Apply constraints. @@ -700,7 +700,7 @@ # Show free parameters after selection. # %% -project.analysis.show_free_params() +project.analysis.display.free_params() # %% [markdown] # #### Run Fitting diff --git a/tmp/short.py b/tmp/short.py index 4c731568..18b7c744 100644 --- a/tmp/short.py +++ b/tmp/short.py @@ -91,4 +91,4 @@ models['lbco'].cell.length_a.free = True print('----', models['lbco'].cell.length_a.free) -# proj.analysis.show_free_params() +# proj.analysis.display.free_params() diff --git a/tmp/short2.py b/tmp/short2.py index d1bd5eb1..63751a27 100644 --- a/tmp/short2.py +++ b/tmp/short2.py @@ -220,7 +220,7 @@ def set_as_initial(): print('----', models['lbco'].cell.length_a.free) -proj.analysis.show_free_params() +proj.analysis.display.free_params() proj.analysis.fit() # proj.plotter.engine = 'plotly' From 8b2d9afaebacec56ffc61d80b4aac26803c5dec4 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sat, 4 Apr 2026 23:59:28 +0200 Subject: [PATCH 40/51] Reduce McCabe complexity for C90 rule compliance --- pixi.lock | 4 +- pyproject.toml | 2 +- src/easydiffraction/io/cif/serialize.py | 37 ++- .../analysis/calculators/test_pdffit.py | 124 +++++----- .../summary/test_summary_details.py | 221 +++++++++--------- 5 files changed, 213 insertions(+), 175 deletions(-) diff --git a/pixi.lock b/pixi.lock index ccc7e32c..2fccea46 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty42 - sha256: 8b87b54bac3ff3c4832bb827e7bc7e201d696a6c33b5a71f36c2e5f59a05b18e + version: 0.11.1+devdirty43 + sha256: 84c95eaa08425acda985053e8c854202b9e7626545dbb06624c9bc14d40dd992 requires_dist: - asciichartpy - asteval diff --git a/pyproject.toml b/pyproject.toml index 30b6953c..66201907 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -225,7 +225,7 @@ quote-style = 'single' # But double quotes in docstrings (PEP 8, PEP 25 [tool.ruff.lint] select = [ # Various rules - #'C90', # https://docs.astral.sh/ruff/rules/#mccabe-c90 + 'C90', # https://docs.astral.sh/ruff/rules/#mccabe-c90 'D', # https://docs.astral.sh/ruff/rules/#pydocstyle-d 'F', # https://docs.astral.sh/ruff/rules/#pyflakes-f 'FLY', # https://docs.astral.sh/ruff/rules/#flynt-fly diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index 896870f9..d7b78ed5 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -567,6 +567,33 @@ def _set_param_from_raw_cif_value( log.debug(f'Unrecognized type: {param._value_type}') +def _find_loop_for_category( + block: object, + category_item: object, +) -> object | None: + """ + Find the first CIF loop that matches a category item's parameters. + + Parameters + ---------- + block : object + Parsed CIF block to search. + category_item : object + Category item whose parameters provide CIF names. + + Returns + ------- + object | None + The matching loop, or ``None`` if not found. + """ + for param in category_item.parameters: + for name in param._cif_handler.names: + loop = block.find_loop(name).get_loop() + if loop is not None: + return loop + return None + + def category_collection_from_cif( self: CategoryCollection, block: gemmi.cif.Block, @@ -599,15 +626,7 @@ def category_collection_from_cif( # Iterate over category parameters and their possible CIF names # trying to find the whole loop it belongs to inside the CIF block - def _get_loop(block: object, category_item: object) -> object | None: - for param in category_item.parameters: - for name in param._cif_handler.names: - loop = block.find_loop(name).get_loop() - if loop is not None: - return loop - return None - - loop = _get_loop(block, category_item) + loop = _find_loop_for_category(block, category_item) # If no loop found if loop is None: diff --git a/tests/unit/easydiffraction/analysis/calculators/test_pdffit.py b/tests/unit/easydiffraction/analysis/calculators/test_pdffit.py index 20d17c17..6475cb79 100644 --- a/tests/unit/easydiffraction/analysis/calculators/test_pdffit.py +++ b/tests/unit/easydiffraction/analysis/calculators/test_pdffit.py @@ -23,77 +23,87 @@ def test_pdffit_engine_flag_and_hkl_message(capsys): assert 'HKLs (not applicable)' in printed -def test_pdffit_cif_v2_to_v1_regex_behavior(monkeypatch): - # Exercise the regex conversion path indirectly by providing minimal objects - from easydiffraction.analysis.calculators.pdffit import PdffitCalculator +# -- Stub classes for test_pdffit_cif_v2_to_v1_regex_behavior ---------- - class DummyParam: - def __init__(self, v): - self.value = v - - class DummyPeak: - # provide required attributes used in calculation - def __init__(self): - self.sharp_delta_1 = DummyParam(0.0) - self.sharp_delta_2 = DummyParam(0.0) - self.damp_particle_diameter = DummyParam(0.0) - self.cutoff_q = DummyParam(1.0) - self.damp_q = DummyParam(0.0) - self.broad_q = DummyParam(0.0) - - class DummyLinkedPhases(dict): - def __getitem__(self, k): - return type('LP', (), {'scale': DummyParam(1.0)})() - - class DummyExperiment: - def __init__(self): - self.name = 'E' - self.peak = DummyPeak() - self.data = type('D', (), {'x': np.linspace(0.0, 1.0, 5)})() - self.type = type('T', (), {'radiation_probe': type('P', (), {'value': 'neutron'})()})() - self.linked_phases = DummyLinkedPhases() - - class DummyStructure: - name = 'PhaseA' - - @property - def as_cif(self): - # CIF v2-like tags with dots between letters - return '_atom.site.label A1\n_cell.length_a 1.0' - # Monkeypatch PdfFit and parser to avoid real engine usage - import easydiffraction.analysis.calculators.pdffit as mod +class _DummyParam: + def __init__(self, v): + self.value = v + + +class _DummyPeak: + def __init__(self): + self.sharp_delta_1 = _DummyParam(0.0) + self.sharp_delta_2 = _DummyParam(0.0) + self.damp_particle_diameter = _DummyParam(0.0) + self.cutoff_q = _DummyParam(1.0) + self.damp_q = _DummyParam(0.0) + self.broad_q = _DummyParam(0.0) + + +class _DummyLinkedPhases(dict): + def __getitem__(self, k): + return type('LP', (), {'scale': _DummyParam(1.0)})() + + +class _DummyExperiment: + def __init__(self): + self.name = 'E' + self.peak = _DummyPeak() + self.data = type('D', (), {'x': np.linspace(0.0, 1.0, 5)})() + self.type = type('T', (), {'radiation_probe': type('P', (), {'value': 'neutron'})()})() + self.linked_phases = _DummyLinkedPhases() + + +class _DummyStructure: + name = 'PhaseA' - class FakePdf: - def add_structure(self, s): - pass + @property + def as_cif(self): + return '_atom.site.label A1\n_cell.length_a 1.0' - def setvar(self, *a, **k): - pass - def read_data_lists(self, *a, **k): - pass +class _FakePdf: + def add_structure(self, s): + pass - def calc(self): - pass + def setvar(self, *a, **k): + pass - def getpdf_fit(self): - return [0.0, 0.0, 0.0, 0.0, 0.0] + def read_data_lists(self, *a, **k): + pass - class FakeParser: - def parse(self, text): - # Ensure the dot between letters is converted to underscore - assert '_atom_site_label' in text or '_atom.site.label' not in text - return object() + def calc(self): + pass + + def getpdf_fit(self): + return [0.0, 0.0, 0.0, 0.0, 0.0] + + +class _FakeParser: + def parse(self, text): + assert '_atom_site_label' in text or '_atom.site.label' not in text + return object() + + +# ---------------------------------------------------------------------- + + +def test_pdffit_cif_v2_to_v1_regex_behavior(monkeypatch): + # Exercise the regex conversion path indirectly by providing minimal objects + from easydiffraction.analysis.calculators.pdffit import PdffitCalculator + + # Monkeypatch PdfFit and parser to avoid real engine usage + import easydiffraction.analysis.calculators.pdffit as mod - monkeypatch.setattr(mod, 'PdfFit', FakePdf) - monkeypatch.setattr(mod, 'pdffit_cif_parser', lambda: FakeParser()) + monkeypatch.setattr(mod, 'PdfFit', _FakePdf) + monkeypatch.setattr(mod, 'pdffit_cif_parser', lambda: _FakeParser()) monkeypatch.setattr(mod, 'redirect_stdout', lambda *a, **k: None) monkeypatch.setattr(mod, '_pdffit_devnull', None, raising=False) calc = PdffitCalculator() pattern = calc.calculate_pattern( - DummyStructure(), DummyExperiment(), called_by_minimizer=False + _DummyStructure(), _DummyExperiment(), called_by_minimizer=False ) assert isinstance(pattern, np.ndarray) assert pattern.shape[0] == 5 diff --git a/tests/unit/easydiffraction/summary/test_summary_details.py b/tests/unit/easydiffraction/summary/test_summary_details.py index 4dbce104..2ada0f57 100644 --- a/tests/unit/easydiffraction/summary/test_summary_details.py +++ b/tests/unit/easydiffraction/summary/test_summary_details.py @@ -1,116 +1,125 @@ # SPDX-FileCopyrightText: 2025 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause +# -- Stub classes for test_summary_crystallographic_and_experimental --- + + +class _Val: + def __init__(self, v): + self.value = v + + +class _CellParam: + def __init__(self, name, value): + self.name = name + self.value = value + + +class _Cell: + @property + def parameters(self): + return [ + _CellParam('length_a', 5.4321), + _CellParam('angle_alpha', 90.0), + ] + + +class _Site: + def __init__(self, label, typ, x, y, z, occ, biso): + self.label = _Val(label) + self.type_symbol = _Val(typ) + self.fract_x = _Val(x) + self.fract_y = _Val(y) + self.fract_z = _Val(z) + self.occupancy = _Val(occ) + self.b_iso = _Val(biso) + + +class _Model: + def __init__(self): + self.name = 'phaseA' + self.space_group = type('SG', (), {'name_h_m': _Val('P 1')})() + self.cell = _Cell() + self.atom_sites = [_Site('Na1', 'Na', 0.1, 0.2, 0.3, 1.0, 0.5)] + + +class _Instr: + def __init__(self): + self.setup_wavelength = _Val(1.23456) + self.calib_twotheta_offset = _Val(0.12345) + + def _public_attrs(self): + return ['setup_wavelength', 'calib_twotheta_offset'] + + +class _Peak: + def __init__(self): + self.broad_gauss_u = _Val(0.1) + self.broad_gauss_v = _Val(0.2) + self.broad_gauss_w = _Val(0.3) + self.broad_lorentz_x = _Val(0.4) + self.broad_lorentz_y = _Val(0.5) + + def _public_attrs(self): + return [ + 'broad_gauss_u', + 'broad_gauss_v', + 'broad_gauss_w', + 'broad_lorentz_x', + 'broad_lorentz_y', + ] + + +class _Expt: + def __init__(self): + self.name = 'exp1' + typ = type( + 'T', + (), + { + 'sample_form': _Val('powder'), + 'radiation_probe': _Val('neutron'), + 'beam_mode': _Val('constant wavelength'), + }, + ) + self.type = typ() + self.instrument = _Instr() + self.peak_profile_type = 'pseudo-Voigt' + self.peak = _Peak() + + def _public_attrs(self): + return ['instrument', 'peak_profile_type', 'peak'] + + +class _Info: + title = 'T' + description = '' + + +class _StubProject: + def __init__(self): + self.info = _Info() + self.structures = {'phaseA': _Model()} + self.experiments = {'exp1': _Expt()} + + class A: + current_minimizer = 'lmfit' + + class R: + reduced_chi_square = 1.23 + + fit_results = R() + + self.analysis = A() + + +# ---------------------------------------------------------------------- + def test_summary_crystallographic_and_experimental_sections(capsys): from easydiffraction.summary.summary import Summary - # Build a minimal structure stub that exposes required attributes - class Val: - def __init__(self, v): - self.value = v - - class CellParam: - def __init__(self, name, value): - self.name = name - self.value = value - - class Model: - def __init__(self): - self.name = 'phaseA' - self.space_group = type('SG', (), {'name_h_m': Val('P 1')})() - - class Cell: - @property - def parameters(self_inner): - return [ - CellParam('length_a', 5.4321), - CellParam('angle_alpha', 90.0), - ] - - self.cell = Cell() - - class Site: - def __init__(self, label, typ, x, y, z, occ, biso): - self.label = Val(label) - self.type_symbol = Val(typ) - self.fract_x = Val(x) - self.fract_y = Val(y) - self.fract_z = Val(z) - self.occupancy = Val(occ) - self.b_iso = Val(biso) - - self.atom_sites = [Site('Na1', 'Na', 0.1, 0.2, 0.3, 1.0, 0.5)] - - # Minimal experiment stub with instrument and peak info - class Expt: - def __init__(self): - self.name = 'exp1' - typ = type( - 'T', - (), - { - 'sample_form': Val('powder'), - 'radiation_probe': Val('neutron'), - 'beam_mode': Val('constant wavelength'), - }, - ) - self.type = typ() - - class Instr: - def __init__(self): - self.setup_wavelength = Val(1.23456) - self.calib_twotheta_offset = Val(0.12345) - - def _public_attrs(self): - return ['setup_wavelength', 'calib_twotheta_offset'] - - self.instrument = Instr() - self.peak_profile_type = 'pseudo-Voigt' - - class Peak: - def __init__(self): - self.broad_gauss_u = Val(0.1) - self.broad_gauss_v = Val(0.2) - self.broad_gauss_w = Val(0.3) - self.broad_lorentz_x = Val(0.4) - self.broad_lorentz_y = Val(0.5) - - def _public_attrs(self): - return [ - 'broad_gauss_u', - 'broad_gauss_v', - 'broad_gauss_w', - 'broad_lorentz_x', - 'broad_lorentz_y', - ] - - self.peak = Peak() - - def _public_attrs(self): - return ['instrument', 'peak_profile_type', 'peak'] - - class Info: - title = 'T' - description = '' - - class Project: - def __init__(self): - self.info = Info() - self.structures = {'phaseA': Model()} - self.experiments = {'exp1': Expt()} - - class A: - current_minimizer = 'lmfit' - - class R: - reduced_chi_square = 1.23 - - fit_results = R() - - self.analysis = A() - - s = Summary(Project()) + s = Summary(_StubProject()) # Run both sections separately for targeted assertions s.show_crystallographic_data() s.show_experimental_data() From 24f23970e7e4a9a9aa4494edf603709c587032bf Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 00:09:19 +0200 Subject: [PATCH 41/51] Fix all FURB rule violations --- pixi.lock | 4 +- pyproject.toml | 10 +- src/easydiffraction/analysis/analysis.py | 2 +- .../analysis/calculators/cryspy.py | 96 +++++++++++-------- src/easydiffraction/analysis/sequential.py | 7 +- .../categories/space_group/default.py | 2 +- src/easydiffraction/io/cif/serialize.py | 17 ++-- .../dream/test_analyze_reduced_data.py | 3 +- .../analysis/calculators/test_pdffit.py | 4 +- .../analysis/fit_helpers/test_metrics.py | 4 +- .../analysis/minimizers/test_lmfit.py | 3 +- .../easydiffraction/core/test_collection.py | 2 +- .../display/tablers/test_base.py | 4 +- 13 files changed, 88 insertions(+), 70 deletions(-) diff --git a/pixi.lock b/pixi.lock index 2fccea46..7f3a3c23 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty43 - sha256: 84c95eaa08425acda985053e8c854202b9e7626545dbb06624c9bc14d40dd992 + version: 0.11.1+devdirty44 + sha256: ef1cbe99f8c48157b1e1a1069e3807a0a6b4fc35c84d310513d5fb1d39f61d1b requires_dist: - asciichartpy - asteval diff --git a/pyproject.toml b/pyproject.toml index 66201907..8c6b5767 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -225,11 +225,11 @@ quote-style = 'single' # But double quotes in docstrings (PEP 8, PEP 25 [tool.ruff.lint] select = [ # Various rules - 'C90', # https://docs.astral.sh/ruff/rules/#mccabe-c90 - 'D', # https://docs.astral.sh/ruff/rules/#pydocstyle-d - 'F', # https://docs.astral.sh/ruff/rules/#pyflakes-f - 'FLY', # https://docs.astral.sh/ruff/rules/#flynt-fly - #'FURB', # https://docs.astral.sh/ruff/rules/#refurb-furb + 'C90', # https://docs.astral.sh/ruff/rules/#mccabe-c90 + 'D', # https://docs.astral.sh/ruff/rules/#pydocstyle-d + 'F', # https://docs.astral.sh/ruff/rules/#pyflakes-f + 'FLY', # https://docs.astral.sh/ruff/rules/#flynt-fly + 'FURB', # https://docs.astral.sh/ruff/rules/#refurb-furb 'I', # https://docs.astral.sh/ruff/rules/#isort-i 'N', # https://docs.astral.sh/ruff/rules/#pep8-naming-n 'NPY', # https://docs.astral.sh/ruff/rules/#numpy-specific-rules-npy diff --git a/src/easydiffraction/analysis/analysis.py b/src/easydiffraction/analysis/analysis.py index 568b790e..91d5f4e1 100644 --- a/src/easydiffraction/analysis/analysis.py +++ b/src/easydiffraction/analysis/analysis.py @@ -778,7 +778,7 @@ def _fit_single( short_display_handle = self._fit_single_print_header(verb, expt_names, mode) short_rows: list[list[str]] = [] - for _idx, expt_name in enumerate(expt_names, start=1): + for expt_name in expt_names: if verb is VerbosityEnum.FULL: console.print(f"📋 Using experiment 🔬 '{expt_name}' for '{mode.value}' fitting") diff --git a/src/easydiffraction/analysis/calculators/cryspy.py b/src/easydiffraction/analysis/calculators/cryspy.py index 4030b2b9..1f5910d7 100644 --- a/src/easydiffraction/analysis/calculators/cryspy.py +++ b/src/easydiffraction/analysis/calculators/cryspy.py @@ -470,8 +470,7 @@ def _cif_instrument_section( } elif expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: instrument_mapping = {'setup_wavelength': '_setup_wavelength'} - cif_lines.append('') - cif_lines.append('_setup_field 0.0') + cif_lines.extend(('', '_setup_field 0.0')) elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: if expt_type.sample_form.value == SampleFormEnum.POWDER: instrument_mapping = { @@ -482,8 +481,7 @@ def _cif_instrument_section( } elif expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: instrument_mapping = {} # TODO: Check this mapping! - cif_lines.append('') - cif_lines.append('_setup_field 0.0') + cif_lines.extend(('', '_setup_field 0.0')) cif_lines.append('') for local_attr_name, engine_key_name in instrument_mapping.items(): @@ -541,8 +539,7 @@ def _cif_extinction_section( 'mosaicity': '_extinction_mosaicity', 'radius': '_extinction_radius', } - cif_lines.append('') - cif_lines.append('_extinction_model gauss') + cif_lines.extend(('', '_extinction_model gauss')) for local_attr_name, engine_key_name in extinction_mapping.items(): attr_obj = getattr(extinction, local_attr_name) if attr_obj is not None: @@ -579,11 +576,15 @@ def _cif_range_section( twotheta_max = f'{np.round(x_data.max(), 5):.5f}' cif_lines.append('') if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - cif_lines.append(f'_range_2theta_min {twotheta_min}') - cif_lines.append(f'_range_2theta_max {twotheta_max}') + cif_lines.extend(( + f'_range_2theta_min {twotheta_min}', + f'_range_2theta_max {twotheta_max}', + )) elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - cif_lines.append(f'_range_time_min {twotheta_min}') - cif_lines.append(f'_range_time_max {twotheta_max}') + cif_lines.extend(( + f'_range_time_min {twotheta_min}', + f'_range_time_max {twotheta_max}', + )) return twotheta_min, twotheta_max @@ -594,8 +595,7 @@ def _cif_orient_matrix_section( """Append hardcoded orientation matrix for single crystal.""" if expt_type.sample_form.value != SampleFormEnum.SINGLE_CRYSTAL: return - cif_lines.append('') - cif_lines.append('_diffrn_orient_matrix_type CCSL') + cif_lines.extend(('', '_diffrn_orient_matrix_type CCSL')) for tag, val in [ ('ub_11', '-0.088033'), ('ub_12', '-0.088004'), @@ -618,13 +618,17 @@ def _cif_phase_section( """Append phase label/scale to CIF.""" cif_lines.append('') if expt_type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL: - cif_lines.append(f'_phase_label {linked_structure.name}') - cif_lines.append('_phase_scale 1.0') + cif_lines.extend(( + f'_phase_label {linked_structure.name}', + '_phase_scale 1.0', + )) elif expt_type.sample_form.value == SampleFormEnum.POWDER: - cif_lines.append('loop_') - cif_lines.append('_phase_label') - cif_lines.append('_phase_scale') - cif_lines.append(f'{linked_structure.name} 1.0') + cif_lines.extend(( + 'loop_', + '_phase_label', + '_phase_scale', + f'{linked_structure.name} 1.0', + )) def _cif_background_section( @@ -636,16 +640,21 @@ def _cif_background_section( """Append background loop for powder data.""" if expt_type.sample_form.value != SampleFormEnum.POWDER: return - cif_lines.append('') - cif_lines.append('loop_') + cif_lines.extend(('', 'loop_')) if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - cif_lines.append('_pd_background_2theta') - cif_lines.append('_pd_background_intensity') + cif_lines.extend(( + '_pd_background_2theta', + '_pd_background_intensity', + )) elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - cif_lines.append('_tof_backgroundpoint_time') # TODO: !!!!???? - cif_lines.append('_tof_backgroundpoint_intensity') # TODO: !!!!???? - cif_lines.append(f'{twotheta_min} 0.0') # TODO: !!!!???? - cif_lines.append(f'{twotheta_max} 0.0') # TODO: !!!!???? + cif_lines.extend(( + '_tof_backgroundpoint_time', # TODO: !!!!???? + '_tof_backgroundpoint_intensity', # TODO: !!!!???? + )) + cif_lines.extend(( + f'{twotheta_min} 0.0', # TODO: !!!!???? + f'{twotheta_max} 0.0', # TODO: !!!!???? + )) def _cif_measured_data_section( @@ -667,13 +676,15 @@ def _cif_measured_data_sc( ) -> None: """Append single crystal measured data loop.""" data = experiment.data - cif_lines.append('') - cif_lines.append('loop_') - cif_lines.append('_diffrn_refln_index_h') - cif_lines.append('_diffrn_refln_index_k') - cif_lines.append('_diffrn_refln_index_l') - cif_lines.append('_diffrn_refln_intensity') - cif_lines.append('_diffrn_refln_intensity_sigma') + cif_lines.extend(( + '', + 'loop_', + '_diffrn_refln_index_h', + '_diffrn_refln_index_k', + '_diffrn_refln_index_l', + '_diffrn_refln_intensity', + '_diffrn_refln_intensity_sigma', + )) is_tof = expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT if is_tof: @@ -695,16 +706,19 @@ def _cif_measured_data_pd( experiment: ExperimentBase, ) -> None: """Append powder measured data loop.""" - cif_lines.append('') - cif_lines.append('loop_') + cif_lines.extend(('', 'loop_')) if expt_type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH: - cif_lines.append('_pd_meas_2theta') - cif_lines.append('_pd_meas_intensity') - cif_lines.append('_pd_meas_intensity_sigma') + cif_lines.extend(( + '_pd_meas_2theta', + '_pd_meas_intensity', + '_pd_meas_intensity_sigma', + )) elif expt_type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT: - cif_lines.append('_tof_meas_time') - cif_lines.append('_tof_meas_intensity') - cif_lines.append('_tof_meas_intensity_sigma') + cif_lines.extend(( + '_tof_meas_time', + '_tof_meas_intensity', + '_tof_meas_intensity_sigma', + )) x_data = experiment.data.x y_data = experiment.data.intensity_meas diff --git a/src/easydiffraction/analysis/sequential.py b/src/easydiffraction/analysis/sequential.py index a1cd79a4..2e66c20f 100644 --- a/src/easydiffraction/analysis/sequential.py +++ b/src/easydiffraction/analysis/sequential.py @@ -306,8 +306,7 @@ def _build_csv_header( header = list(_META_COLUMNS) header.extend(f'diffrn.{field}' for field in template.diffrn_field_names) for name in template.free_param_unique_names: - header.append(name) - header.append(f'{name}.uncertainty') + header.extend((name, f'{name}.uncertainty')) return header @@ -469,9 +468,7 @@ def _build_template(project: object) -> SequentialFitTemplate: diffrn_field_names: list[str] = [] if hasattr(experiment, 'diffrn'): diffrn_field_names.extend( - p.name - for p in experiment.diffrn.parameters - if hasattr(p, 'name') and p.name not in {'type'} + p.name for p in experiment.diffrn.parameters if hasattr(p, 'name') and p.name != 'type' ) return SequentialFitTemplate( diff --git a/src/easydiffraction/datablocks/structure/categories/space_group/default.py b/src/easydiffraction/datablocks/structure/categories/space_group/default.py index d55247bc..e04015f2 100644 --- a/src/easydiffraction/datablocks/structure/categories/space_group/default.py +++ b/src/easydiffraction/datablocks/structure/categories/space_group/default.py @@ -113,7 +113,7 @@ def _it_coordinate_system_code_allowed_values(self) -> list[str]: it_number = get_it_number_by_name_hm_short(name) codes = get_it_coordinate_system_codes_by_it_number(it_number) codes = [str(code) for code in codes] - return codes if codes else [''] + return codes or [''] @property def _it_coordinate_system_code_default_value(self) -> str: diff --git a/src/easydiffraction/io/cif/serialize.py b/src/easydiffraction/io/cif/serialize.py index d7b78ed5..a55361a5 100644 --- a/src/easydiffraction/io/cif/serialize.py +++ b/src/easydiffraction/io/cif/serialize.py @@ -303,16 +303,17 @@ def analysis_to_cif(analysis: object) -> str: """Render analysis metadata, aliases, and constraints to CIF.""" cur_min = format_value(analysis.current_minimizer) lines: list[str] = [] - lines.append(f'_analysis.fitting_engine {cur_min}') - lines.append(analysis.fit_mode.as_cif) - lines.append('') - lines.append(analysis.aliases.as_cif) - lines.append('') - lines.append(analysis.constraints.as_cif) + lines.extend(( + f'_analysis.fitting_engine {cur_min}', + analysis.fit_mode.as_cif, + '', + analysis.aliases.as_cif, + '', + analysis.constraints.as_cif, + )) jfe_cif = analysis.joint_fit_experiments.as_cif if jfe_cif: - lines.append('') - lines.append(jfe_cif) + lines.extend(('', jfe_cif)) return '\n'.join(lines) diff --git a/tests/integration/scipp-analysis/dream/test_analyze_reduced_data.py b/tests/integration/scipp-analysis/dream/test_analyze_reduced_data.py index 23821ee2..b6a5da69 100644 --- a/tests/integration/scipp-analysis/dream/test_analyze_reduced_data.py +++ b/tests/integration/scipp-analysis/dream/test_analyze_reduced_data.py @@ -37,8 +37,7 @@ def prepared_cif_path( """Prepare CIF file with experiment type tags for easydiffraction. """ - with Path(cif_path).open() as f: - content = f.read() + content = Path(cif_path).read_text() # Add experiment type tags if missing for tag, value in EXPT_TYPE_TAGS.items(): diff --git a/tests/unit/easydiffraction/analysis/calculators/test_pdffit.py b/tests/unit/easydiffraction/analysis/calculators/test_pdffit.py index 6475cb79..f317ae4e 100644 --- a/tests/unit/easydiffraction/analysis/calculators/test_pdffit.py +++ b/tests/unit/easydiffraction/analysis/calculators/test_pdffit.py @@ -1,6 +1,8 @@ # SPDX-FileCopyrightText: 2025 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause +import collections + import numpy as np @@ -41,7 +43,7 @@ def __init__(self): self.broad_q = _DummyParam(0.0) -class _DummyLinkedPhases(dict): +class _DummyLinkedPhases(collections.UserDict): def __getitem__(self, k): return type('LP', (), {'scale': _DummyParam(1.0)})() diff --git a/tests/unit/easydiffraction/analysis/fit_helpers/test_metrics.py b/tests/unit/easydiffraction/analysis/fit_helpers/test_metrics.py index eff28fc0..83de8900 100644 --- a/tests/unit/easydiffraction/analysis/fit_helpers/test_metrics.py +++ b/tests/unit/easydiffraction/analysis/fit_helpers/test_metrics.py @@ -1,6 +1,8 @@ # SPDX-FileCopyrightText: 2025 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause +import collections + import numpy as np @@ -43,7 +45,7 @@ def __init__(self): def _update_categories(self, called_by_minimizer=False): pass - class DummyStructures(dict): + class DummyStructures(collections.UserDict): pass y_obs, y_calc, y_err = M.get_reliability_inputs(DummyStructures(), [Expt()]) diff --git a/tests/unit/easydiffraction/analysis/minimizers/test_lmfit.py b/tests/unit/easydiffraction/analysis/minimizers/test_lmfit.py index 977b3431..8c35a0ca 100644 --- a/tests/unit/easydiffraction/analysis/minimizers/test_lmfit.py +++ b/tests/unit/easydiffraction/analysis/minimizers/test_lmfit.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: 2025 EasyScience contributors # SPDX-License-Identifier: BSD-3-Clause +import collections import types import numpy as np @@ -41,7 +42,7 @@ def __init__(self, value, stderr=None): self.value = value self.stderr = stderr - class FakeParams(dict): + class FakeParams(collections.UserDict): def add(self, name, value, vary, min, max): self[name] = types.SimpleNamespace(value=value, vary=vary, min=min, max=max) diff --git a/tests/unit/easydiffraction/core/test_collection.py b/tests/unit/easydiffraction/core/test_collection.py index 470bff84..920b3a72 100644 --- a/tests/unit/easydiffraction/core/test_collection.py +++ b/tests/unit/easydiffraction/core/test_collection.py @@ -126,7 +126,7 @@ def as_cif(self) -> str: # Invalid key type with pytest.raises(TypeError): - c[3.14] + c[1.5] def test_collection_datablock_keyed_items(): diff --git a/tests/unit/easydiffraction/display/tablers/test_base.py b/tests/unit/easydiffraction/display/tablers/test_base.py index 106bcacc..8dd7d1e9 100644 --- a/tests/unit/easydiffraction/display/tablers/test_base.py +++ b/tests/unit/easydiffraction/display/tablers/test_base.py @@ -2,6 +2,8 @@ # SPDX-License-Identifier: BSD-3-Clause """Tests for display/tablers/base.py (TableBackendBase).""" +import math + class TestTableBackendBase: def test_float_precision_constant(self): @@ -13,7 +15,7 @@ def test_format_value_float(self): from easydiffraction.display.tablers.rich import RichTableBackend backend = RichTableBackend() - result = backend._format_value(3.14159265) + result = backend._format_value(math.pi) assert result == '3.14159' def test_format_value_nonf_float(self): From a4e4756d33a298012ade81a9c35bef2bc0878f3c Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 00:17:01 +0200 Subject: [PATCH 42/51] Fix all PIE rule violations --- pixi.lock | 4 ++-- pyproject.toml | 6 +++--- src/easydiffraction/analysis/calculators/base.py | 4 ---- src/easydiffraction/analysis/minimizers/base.py | 4 ---- src/easydiffraction/core/category.py | 2 -- .../datablocks/experiment/categories/background/base.py | 1 - src/easydiffraction/datablocks/experiment/item/base.py | 2 -- src/easydiffraction/display/plotters/base.py | 3 --- src/easydiffraction/display/tablers/base.py | 1 - src/easydiffraction/project/project_info.py | 1 - tests/unit/easydiffraction/analysis/test_analysis.py | 1 - tests/unit/easydiffraction/utils/test_utils.py | 4 ++-- 12 files changed, 7 insertions(+), 26 deletions(-) diff --git a/pixi.lock b/pixi.lock index 7f3a3c23..71d08407 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty44 - sha256: ef1cbe99f8c48157b1e1a1069e3807a0a6b4fc35c84d310513d5fb1d39f61d1b + version: 0.11.1+devdirty45 + sha256: 051c7e6c16475380a428a85039726de0e65e2c54818e4576c6a2ac2c1f0832f6 requires_dist: - asciichartpy - asteval diff --git a/pyproject.toml b/pyproject.toml index 8c6b5767..86e6161a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -151,7 +151,7 @@ default-tag = 'v999.0.0' # https://interrogate.readthedocs.io/en/latest/ [tool.interrogate] -fail-under = 35 # Minimum docstring coverage percentage to pass +fail-under = 75 # Minimum docstring coverage percentage to pass verbose = 1 #exclude = ['src/**/__init__.py'] @@ -169,7 +169,7 @@ source = ['src'] # Limit coverage to the source code directory [tool.coverage.report] show_missing = true # Show missing lines skip_covered = false # Skip files with 100% coverage in the report -fail_under = 70 # Minimum coverage percentage to pass +fail_under = 75 # Minimum coverage percentage to pass ########################## # Configuration for pytest @@ -265,7 +265,7 @@ select = [ 'INP', # https://docs.astral.sh/ruff/rules/#flake8-no-pep420-inp 'ISC', # https://docs.astral.sh/ruff/rules/#flake8-implicit-str-concat-isc 'LOG', # https://docs.astral.sh/ruff/rules/#flake8-logging-log - #'PIE', # https://docs.astral.sh/ruff/rules/#flake8-pie-pie + 'PIE', # https://docs.astral.sh/ruff/rules/#flake8-pie-pie 'PT', # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt 'PTH', # https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth 'PYI', # https://docs.astral.sh/ruff/rules/#flake8-pyi-pyi diff --git a/src/easydiffraction/analysis/calculators/base.py b/src/easydiffraction/analysis/calculators/base.py index bd667ac8..ed36991d 100644 --- a/src/easydiffraction/analysis/calculators/base.py +++ b/src/easydiffraction/analysis/calculators/base.py @@ -18,13 +18,11 @@ class CalculatorBase(ABC): @abstractmethod def name(self) -> str: """Short identifier of the calculation engine.""" - pass @property @abstractmethod def engine_imported(self) -> bool: """True if the underlying calculation library is available.""" - pass @abstractmethod def calculate_structure_factors( @@ -34,7 +32,6 @@ def calculate_structure_factors( called_by_minimizer: bool, ) -> None: """Calculate structure factors for one experiment.""" - pass @abstractmethod def calculate_pattern( @@ -61,4 +58,3 @@ def calculate_pattern( np.ndarray The calculated diffraction pattern as a NumPy array. """ - pass diff --git a/src/easydiffraction/analysis/minimizers/base.py b/src/easydiffraction/analysis/minimizers/base.py index 5412a5ec..fd4387ea 100644 --- a/src/easydiffraction/analysis/minimizers/base.py +++ b/src/easydiffraction/analysis/minimizers/base.py @@ -80,7 +80,6 @@ def _prepare_solver_args(self, parameters: list[Any]) -> dict[str, Any]: dict[str, Any] Mapping of keyword arguments to pass into ``_run_solver``. """ - pass @abstractmethod def _run_solver( @@ -89,7 +88,6 @@ def _run_solver( engine_parameters: dict[str, object], ) -> object: """Execute the concrete solver and return its raw result.""" - pass @abstractmethod def _sync_result_to_parameters( @@ -98,7 +96,6 @@ def _sync_result_to_parameters( parameters: list[object], ) -> None: """Copy raw_result values back to parameters in-place.""" - pass def _finalize_fit( self, @@ -135,7 +132,6 @@ def _finalize_fit( @abstractmethod def _check_success(self, raw_result: object) -> bool: """Determine whether the fit was successful.""" - pass def fit( self, diff --git a/src/easydiffraction/core/category.py b/src/easydiffraction/core/category.py index 19320ef5..6921dc8b 100644 --- a/src/easydiffraction/core/category.py +++ b/src/easydiffraction/core/category.py @@ -32,7 +32,6 @@ def __str__(self) -> str: # TODO: Common for all categories def _update(self, called_by_minimizer: bool = False) -> None: # noqa: PLR6301 del called_by_minimizer - pass @property def unique_name(self) -> str: @@ -196,7 +195,6 @@ def __str__(self) -> str: # TODO: Common for all categories def _update(self, called_by_minimizer: bool = False) -> None: # noqa: PLR6301 del called_by_minimizer - pass @property def unique_name(self) -> str | None: diff --git a/src/easydiffraction/datablocks/experiment/categories/background/base.py b/src/easydiffraction/datablocks/experiment/categories/background/base.py index 913cb764..433c4aa7 100644 --- a/src/easydiffraction/datablocks/experiment/categories/background/base.py +++ b/src/easydiffraction/datablocks/experiment/categories/background/base.py @@ -20,4 +20,3 @@ class BackgroundBase(CategoryCollection): @abstractmethod def show(self) -> None: """Print a human-readable view of background components.""" - pass diff --git a/src/easydiffraction/datablocks/experiment/item/base.py b/src/easydiffraction/datablocks/experiment/item/base.py index 12e71351..7a769ba8 100644 --- a/src/easydiffraction/datablocks/experiment/item/base.py +++ b/src/easydiffraction/datablocks/experiment/item/base.py @@ -296,7 +296,6 @@ def _load_ascii_data_to_experiment(self, data_path: str) -> None: Path to data file with columns compatible with the beam mode. """ - pass # ------------------------------------------------------------------ # Extinction (switchable-category pattern) @@ -575,7 +574,6 @@ def _load_ascii_data_to_experiment(self, data_path: str) -> int: int Number of loaded data points. """ - pass @property def linked_phases(self) -> object: diff --git a/src/easydiffraction/display/plotters/base.py b/src/easydiffraction/display/plotters/base.py index 8fb018e6..d8ad2b48 100644 --- a/src/easydiffraction/display/plotters/base.py +++ b/src/easydiffraction/display/plotters/base.py @@ -195,7 +195,6 @@ def plot_powder( height : int | None Backend-specific height (text rows or pixels). """ - pass @abstractmethod def plot_single_crystal( @@ -228,7 +227,6 @@ def plot_single_crystal( height : int | None Backend-specific height (text rows or pixels). """ - pass @abstractmethod def plot_scatter( @@ -258,4 +256,3 @@ def plot_scatter( height : int | None Backend-specific height (text rows or pixels). """ - pass diff --git a/src/easydiffraction/display/tablers/base.py b/src/easydiffraction/display/tablers/base.py index cef9a62e..49e3fa0e 100644 --- a/src/easydiffraction/display/tablers/base.py +++ b/src/easydiffraction/display/tablers/base.py @@ -125,4 +125,3 @@ def render( object Backend-defined return value (commonly ``None``). """ - pass diff --git a/src/easydiffraction/project/project_info.py b/src/easydiffraction/project/project_info.py index dcba2fba..94247f33 100644 --- a/src/easydiffraction/project/project_info.py +++ b/src/easydiffraction/project/project_info.py @@ -119,7 +119,6 @@ def update_last_modified(self) -> None: def parameters(self) -> None: """List parameters (not implemented).""" - pass # TODO: Consider moving to io.cif.serialize def as_cif(self) -> str: diff --git a/tests/unit/easydiffraction/analysis/test_analysis.py b/tests/unit/easydiffraction/analysis/test_analysis.py index fc4f0135..9eea433f 100644 --- a/tests/unit/easydiffraction/analysis/test_analysis.py +++ b/tests/unit/easydiffraction/analysis/test_analysis.py @@ -140,7 +140,6 @@ def mock_process_fit_results(structures, experiments): # Create a mock project with structures and experiments class MockProject: structures = object() - experiments = object() _varname = 'proj' class experiments_cls: diff --git a/tests/unit/easydiffraction/utils/test_utils.py b/tests/unit/easydiffraction/utils/test_utils.py index 598f73d3..3c9c510b 100644 --- a/tests/unit/easydiffraction/utils/test_utils.py +++ b/tests/unit/easydiffraction/utils/test_utils.py @@ -212,7 +212,7 @@ def failing_urlopen(url): def test_list_tutorials_empty_index(monkeypatch, capsys): import easydiffraction.utils.utils as MUT - monkeypatch.setattr(MUT, '_fetch_tutorials_index', lambda: {}) + monkeypatch.setattr(MUT, '_fetch_tutorials_index', dict) MUT.list_tutorials() out = capsys.readouterr().out assert 'No tutorials available' in out @@ -315,7 +315,7 @@ def test_show_version_prints(capsys, monkeypatch): def test_download_all_tutorials_empty_index(monkeypatch, capsys): import easydiffraction.utils.utils as MUT - monkeypatch.setattr(MUT, '_fetch_tutorials_index', lambda: {}) + monkeypatch.setattr(MUT, '_fetch_tutorials_index', dict) result = MUT.download_all_tutorials() assert result == [] out = capsys.readouterr().out From e33bd4b431af25b4acf93888be21d178ac33aa58 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 00:29:34 +0200 Subject: [PATCH 43/51] Fix all BLE001 blind-except violations --- pixi.lock | 4 ++-- pyproject.toml | 12 +++++----- .../analysis/fit_helpers/tracking.py | 2 +- src/easydiffraction/analysis/sequential.py | 12 ++++++++-- src/easydiffraction/core/category.py | 2 +- src/easydiffraction/core/singleton.py | 2 +- src/easydiffraction/display/tablers/pandas.py | 4 ++-- src/easydiffraction/display/tablers/rich.py | 6 ++--- src/easydiffraction/display/utils.py | 4 ++-- src/easydiffraction/io/ascii.py | 2 +- src/easydiffraction/utils/environment.py | 12 +++++----- src/easydiffraction/utils/logging.py | 22 ++++++++++++++----- src/easydiffraction/utils/utils.py | 8 +++---- .../experiment/item/test_total_pd.py | 2 +- .../unit/easydiffraction/utils/test_utils.py | 2 +- 15 files changed, 57 insertions(+), 39 deletions(-) diff --git a/pixi.lock b/pixi.lock index 71d08407..56840b10 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty45 - sha256: 051c7e6c16475380a428a85039726de0e65e2c54818e4576c6a2ac2c1f0832f6 + version: 0.11.1+devdirty46 + sha256: 935979bb55116a208c10fb7913fcc8dac9b067d0ac2cb8df8e4dbd9849d35c18 requires_dist: - asciichartpy - asteval diff --git a/pyproject.toml b/pyproject.toml index 86e6161a..f74cccbc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -252,12 +252,12 @@ select = [ 'ARG', # https://docs.astral.sh/ruff/rules/#flake8-unused-arguments-arg 'ASYNC', # https://docs.astral.sh/ruff/rules/#flake8-async-async 'B', # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b - #'BLE', # https://docs.astral.sh/ruff/rules/#flake8-blind-except-ble - 'C4', # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 - 'COM', # https://docs.astral.sh/ruff/rules/#flake8-commas-com - 'DTZ', # https://docs.astral.sh/ruff/rules/#flake8-datetimez-dtz - 'EM', # https://docs.astral.sh/ruff/rules/#flake8-errmsg-em - 'FA', # https://docs.astral.sh/ruff/rules/#flake8-future-annotations-fa + 'BLE', # https://docs.astral.sh/ruff/rules/#flake8-blind-except-ble + 'C4', # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 + 'COM', # https://docs.astral.sh/ruff/rules/#flake8-commas-com + 'DTZ', # https://docs.astral.sh/ruff/rules/#flake8-datetimez-dtz + 'EM', # https://docs.astral.sh/ruff/rules/#flake8-errmsg-em + 'FA', # https://docs.astral.sh/ruff/rules/#flake8-future-annotations-fa #'FBT', # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt #'FIX', # https://docs.astral.sh/ruff/rules/#flake8-fixme-fix 'G', # https://docs.astral.sh/ruff/rules/#flake8-logging-format-g diff --git a/src/easydiffraction/analysis/fit_helpers/tracking.py b/src/easydiffraction/analysis/fit_helpers/tracking.py index 92da3698..99f9c8b6 100644 --- a/src/easydiffraction/analysis/fit_helpers/tracking.py +++ b/src/easydiffraction/analysis/fit_helpers/tracking.py @@ -23,7 +23,7 @@ try: from rich.live import Live -except Exception: # pragma: no cover - rich always available in app env +except ImportError: # pragma: no cover - rich always available in app env Live = None # type: ignore[assignment] from easydiffraction.utils.logging import ConsoleManager diff --git a/src/easydiffraction/analysis/sequential.py b/src/easydiffraction/analysis/sequential.py index 2e66c20f..22e16b38 100644 --- a/src/easydiffraction/analysis/sequential.py +++ b/src/easydiffraction/analysis/sequential.py @@ -135,7 +135,15 @@ def _fit_worker( # 10. Collect results result.update(_collect_results(project, template)) - except Exception as exc: + except ( + RuntimeError, + ValueError, + TypeError, + ArithmeticError, + KeyError, + IndexError, + OSError, + ) as exc: result['fit_success'] = False result['chi_squared'] = None result['reduced_chi_squared'] = None @@ -560,7 +568,7 @@ def _apply_diffrn_metadata( diffrn_values = extract_diffrn(result['file_path']) for key, val in diffrn_values.items(): result[f'diffrn.{key}'] = val - except Exception as exc: + except (RuntimeError, ValueError, TypeError, KeyError, AttributeError, OSError) as exc: log.warning(f'extract_diffrn failed for {result["file_path"]}: {exc}') diff --git a/src/easydiffraction/core/category.py b/src/easydiffraction/core/category.py index 6921dc8b..1d8710ed 100644 --- a/src/easydiffraction/core/category.py +++ b/src/easydiffraction/core/category.py @@ -82,7 +82,7 @@ def help(self) -> None: prop = seen[key] try: val = getattr(self, key) - except Exception: + except (AttributeError, TypeError, ValueError): val = None if isinstance(val, GenericDescriptorBase): p_idx += 1 diff --git a/src/easydiffraction/core/singleton.py b/src/easydiffraction/core/singleton.py index a4ac6b28..0871dbde 100644 --- a/src/easydiffraction/core/singleton.py +++ b/src/easydiffraction/core/singleton.py @@ -117,5 +117,5 @@ def apply(self) -> None: # Update its value and mark it as constrained param._set_value_constrained(rhs_value) - except Exception as error: + except (ValueError, TypeError, ArithmeticError, KeyError, AttributeError) as error: print(f"Failed to apply constraint '{lhs_alias} = {rhs_expr}': {error}") diff --git a/src/easydiffraction/display/tablers/pandas.py b/src/easydiffraction/display/tablers/pandas.py index d6565513..b88a6d08 100644 --- a/src/easydiffraction/display/tablers/pandas.py +++ b/src/easydiffraction/display/tablers/pandas.py @@ -7,7 +7,7 @@ try: from IPython.display import HTML from IPython.display import display -except Exception: +except ImportError: HTML = None display = None @@ -161,7 +161,7 @@ def _update_display(styler: object, display_handle: object) -> None: try: html = styler.to_html() display_handle.update(HTML(html)) - except Exception as err: + except (TypeError, ValueError, AttributeError, RuntimeError, OSError) as err: log.debug(f'Pandas DisplayHandle update failed: {err!r}') else: return diff --git a/src/easydiffraction/display/tablers/rich.py b/src/easydiffraction/display/tablers/rich.py index 017e6d04..903b33ef 100644 --- a/src/easydiffraction/display/tablers/rich.py +++ b/src/easydiffraction/display/tablers/rich.py @@ -13,7 +13,7 @@ try: from IPython.display import HTML from IPython.display import display -except Exception: +except ImportError: HTML = None display = None @@ -132,7 +132,7 @@ def _update_display(self, table: Table, display_handle: object) -> None: try: html = self._to_html(table) display_handle.update(HTML(html)) - except Exception as err: + except (TypeError, ValueError, AttributeError, RuntimeError, OSError) as err: log.debug(f'Rich to HTML DisplayHandle update failed: {err!r}') else: return @@ -141,7 +141,7 @@ def _update_display(self, table: Table, display_handle: object) -> None: else: try: display_handle.update(table) - except Exception as err: + except (TypeError, ValueError, AttributeError, RuntimeError, OSError) as err: log.debug(f'Rich live handle update failed: {err!r}') else: return diff --git a/src/easydiffraction/display/utils.py b/src/easydiffraction/display/utils.py index 5daba0b4..5a130164 100644 --- a/src/easydiffraction/display/utils.py +++ b/src/easydiffraction/display/utils.py @@ -12,7 +12,7 @@ try: from IPython.display import HTML from IPython.display import display -except Exception: +except ImportError: display = None HTML = None @@ -42,5 +42,5 @@ def disable_jupyter_scroll(cls) -> None: try: display(HTML(css)) cls._applied = True - except Exception: + except (TypeError, ValueError, AttributeError, RuntimeError, OSError): log.debug('Failed to inject Jupyter CSS to disable scrolling.') diff --git a/src/easydiffraction/io/ascii.py b/src/easydiffraction/io/ascii.py index 1bba03b0..2ddd69e3 100644 --- a/src/easydiffraction/io/ascii.py +++ b/src/easydiffraction/io/ascii.py @@ -246,7 +246,7 @@ def load_numeric_block(data_path: str | Path) -> np.ndarray: for start in range(len(lines)): try: return np.loadtxt(StringIO('\n'.join(lines[start:]))) - except Exception as e: + except ValueError as e: last_error = e msg = f'Failed to read numeric data from {data_path}: {last_error}' diff --git a/src/easydiffraction/utils/environment.py b/src/easydiffraction/utils/environment.py index 9a194991..e2df97d4 100644 --- a/src/easydiffraction/utils/environment.py +++ b/src/easydiffraction/utils/environment.py @@ -91,7 +91,7 @@ def in_jupyter() -> bool: return True # Jupyter or qtconsole use ZMQInteractiveShell return ip.__class__.__name__ == 'ZMQInteractiveShell' # noqa: TRY300 - except Exception: + except (NameError, AttributeError): return False @@ -128,14 +128,14 @@ def is_ipython_display_handle(obj: object) -> bool: try: return isinstance(obj, DisplayHandle) - except Exception: + except TypeError: return False - except Exception: + except ImportError: # Fallback heuristic when IPython is unavailable try: mod = getattr(getattr(obj, '__class__', None), '__module__', '') return isinstance(mod, str) and mod.startswith('IPython') - except Exception: + except (TypeError, AttributeError): return False @@ -148,7 +148,7 @@ def can_update_ipython_display() -> bool: """ try: pass # type: ignore[import-not-found] - except Exception: + except ImportError: return False else: return True @@ -163,5 +163,5 @@ def can_use_ipython_display(handle: object) -> bool: """ try: return is_ipython_display_handle(handle) and can_update_ipython_display() - except Exception: + except (ImportError, TypeError, AttributeError): return False diff --git a/src/easydiffraction/utils/logging.py b/src/easydiffraction/utils/logging.py index 49f6be05..a94aadde 100644 --- a/src/easydiffraction/utils/logging.py +++ b/src/easydiffraction/utils/logging.py @@ -33,6 +33,7 @@ from rich.console import Group from rich.console import RenderableType from rich.logging import RichHandler +from rich.markup import MarkupError from rich.text import Text from easydiffraction.utils.environment import in_jupyter @@ -55,7 +56,12 @@ class IconifiedRichHandler(RichHandler): logging.INFO: 'ℹ️', # noqa: RUF001 } - def __init__(self, *args: object, mode: str = 'compact', **kwargs: object) -> None: + def __init__( + self, + *args: object, + mode: str = 'compact', + **kwargs: object, + ) -> None: super().__init__(*args, **kwargs) self.mode = mode @@ -81,7 +87,11 @@ def get_level_text(self, record: logging.LogRecord) -> Text: # Use RichHandler's default level text for verbose mode return super().get_level_text(record) - def render_message(self, record: logging.LogRecord, message: str) -> Text: + def render_message( + self, + record: logging.LogRecord, + message: str, + ) -> Text: """ Render the log message body as a Rich Text object. @@ -100,7 +110,7 @@ def render_message(self, record: logging.LogRecord, message: str) -> Text: if self.mode == 'compact': try: return Text.from_markup(message) - except Exception: + except (ValueError, KeyError, TypeError, MarkupError): return Text(str(message)) return super().render_message(record, message) @@ -130,7 +140,7 @@ def _detect_width() -> int: min_width = ConsoleManager._MIN_CONSOLE_WIDTH try: width = shutil.get_terminal_size().columns - except Exception: + except (ValueError, OSError): width = min_width return max(width, min_width) @@ -335,7 +345,7 @@ def suppress_jupyter_traceback(*args: object, **kwargs: object) -> None: else kwargs.get('_evalue') or kwargs.get('evalue') ) logger.error(str(evalue)) - except Exception as err: + except (IndexError, TypeError, AttributeError, ValueError) as err: logger.debug('Jupyter traceback suppressor failed: %r', err) return suppress_jupyter_traceback @@ -358,7 +368,7 @@ def install_jupyter_traceback_suppressor(logger: logging.Logger) -> None: ip.set_custom_exc( (BaseException,), ExceptionHookManager._suppress_traceback(logger) ) - except Exception as err: + except (ImportError, AttributeError, TypeError) as err: msg = f'Failed to install Jupyter traceback suppressor: {err!r}' logger.debug(msg) diff --git a/src/easydiffraction/utils/utils.py b/src/easydiffraction/utils/utils.py index cf9a9f78..b98d30cf 100644 --- a/src/easydiffraction/utils/utils.py +++ b/src/easydiffraction/utils/utils.py @@ -114,7 +114,7 @@ def _fetch_tutorials_index() -> dict: _validate_url(index_url) with _safe_urlopen(index_url) as response: return json.load(response) - except Exception as e: + except (OSError, ValueError) as e: log.warning( f'Failed to fetch tutorials index from {index_url}: {e}', exc_type=UserWarning, @@ -247,7 +247,7 @@ def stripped_package_version(package_name: str) -> str | None: try: v = Version(v_str) return str(v.public) - except Exception: + except ValueError: return v_str @@ -488,7 +488,7 @@ def download_all_tutorials( overwrite=overwrite, ) downloaded_paths.append(path) - except Exception as e: + except (OSError, ValueError) as e: log.warning(f'Failed to download tutorial #{tutorial_id}: {e}') console.print(f'✅ Downloaded {len(downloaded_paths)} tutorials to "{destination}/"') @@ -742,5 +742,5 @@ def str_to_ufloat(s: str | None, default: float | None = None) -> UFloat: s = s[:-2] + '(0)' try: return ufloat_fromstr(s) - except Exception: + except ValueError: return ufloat(default, np.nan) diff --git a/tests/unit/easydiffraction/datablocks/experiment/item/test_total_pd.py b/tests/unit/easydiffraction/datablocks/experiment/item/test_total_pd.py index d021dc34..33b3476d 100644 --- a/tests/unit/easydiffraction/datablocks/experiment/item/test_total_pd.py +++ b/tests/unit/easydiffraction/datablocks/experiment/item/test_total_pd.py @@ -36,7 +36,7 @@ def test_load_ascii_data_pdf(tmp_path: pytest.TempPathFactory): # Try to import loadData; if diffpy isn't installed, expect ImportError try: has_diffpy = True - except Exception: + except ImportError: has_diffpy = False if not has_diffpy: diff --git a/tests/unit/easydiffraction/utils/test_utils.py b/tests/unit/easydiffraction/utils/test_utils.py index 3c9c510b..13564ca4 100644 --- a/tests/unit/easydiffraction/utils/test_utils.py +++ b/tests/unit/easydiffraction/utils/test_utils.py @@ -198,7 +198,7 @@ def test_fetch_tutorials_index_returns_empty_on_error(monkeypatch): # Force urlopen to fail def failing_urlopen(url): msg = 'Network error' - raise Exception(msg) + raise OSError(msg) monkeypatch.setattr(MUT, '_safe_urlopen', failing_urlopen) # Clear cache to ensure fresh fetch From eeef5bfa35ea1997f89c68688934c5b05d6e6c02 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 00:32:53 +0200 Subject: [PATCH 44/51] Add test-all task --- pixi.lock | 4 ++-- pixi.toml | 6 ++++++ pyproject.toml | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/pixi.lock b/pixi.lock index 56840b10..9e2499f3 100644 --- a/pixi.lock +++ b/pixi.lock @@ -4865,8 +4865,8 @@ packages: requires_python: '>=3.5' - pypi: ./ name: easydiffraction - version: 0.11.1+devdirty46 - sha256: 935979bb55116a208c10fb7913fcc8dac9b067d0ac2cb8df8e4dbd9849d35c18 + version: 0.11.1+devdirty47 + sha256: 1059823770118b360c15b30769760b0241b4683b54a446a6fb3d42a2677943b6 requires_dist: - asciichartpy - asteval diff --git a/pixi.toml b/pixi.toml index 30986cf8..577edbef 100644 --- a/pixi.toml +++ b/pixi.toml @@ -99,6 +99,12 @@ script-tests = 'python -m pytest tools/test_scripts.py --color=yes -n auto -v' notebook-tests = 'python -m pytest --nbmake docs/docs/tutorials/ --nbmake-timeout=1200 --color=yes -n auto -v' test = { depends-on = ['unit-tests', 'functional-tests'] } +test-all = { depends-on = [ + 'unit-tests', + 'functional-tests', + 'integration-tests', + 'script-tests', +] } ########### # ✔️ Checks diff --git a/pyproject.toml b/pyproject.toml index f74cccbc..ec33f255 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -329,6 +329,7 @@ ignore = [ 'PLR', 'PLW', 'SIM117', + 'SLF', 'TRY', 'W505', ] From 77e67221a49b0281d8c9316fedcd115f3444fd05 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 00:37:51 +0200 Subject: [PATCH 45/51] Fix tutorial --- docs/docs/tutorials/ed-13.py | 38 ++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/docs/tutorials/ed-13.py b/docs/docs/tutorials/ed-13.py index b3c6f693..b0513746 100644 --- a/docs/docs/tutorials/ed-13.py +++ b/docs/docs/tutorials/ed-13.py @@ -159,7 +159,7 @@ # project.plotter.engine = 'plotly' # %% -project_1.plot_meas(expt_name='sim_si') +project_1.plotter.plot_meas(expt_name='sim_si') # %% [markdown] # If you zoom in on the highest TOF peak (around 120,000 μs), you will @@ -194,7 +194,7 @@ # plot and is not used in the fitting process. # %% -project_1.plot_meas(expt_name='sim_si') +project_1.plotter.plot_meas(expt_name='sim_si') # %% [markdown] # #### Set Instrument Parameters @@ -599,7 +599,7 @@ # this comparison. # %% -project_1.plot_meas_vs_calc(expt_name='sim_si') +project_1.plotter.plot_meas_vs_calc(expt_name='sim_si') # %% [markdown] # #### Run Fitting @@ -639,7 +639,7 @@ # pattern is now based on the refined parameters. # %% -project_1.plot_meas_vs_calc(expt_name='sim_si') +project_1.plotter.plot_meas_vs_calc(expt_name='sim_si') # %% [markdown] # #### TOF vs d-spacing @@ -670,7 +670,7 @@ # setting the `d_spacing` parameter to `True`. # %% -project_1.plot_meas_vs_calc(expt_name='sim_si', x='d_spacing') +project_1.plotter.plot_meas_vs_calc(expt_name='sim_si', x='d_spacing') # %% [markdown] # As you can see, the calculated diffraction pattern now matches the @@ -781,12 +781,12 @@ # **Solution:** # %% tags=["solution", "hide-input"] -project_2.plot_meas(expt_name='sim_lbco') +project_2.plotter.plot_meas(expt_name='sim_lbco') project_2.experiments['sim_lbco'].excluded_regions.create(id='1', start=0, end=55000) project_2.experiments['sim_lbco'].excluded_regions.create(id='2', start=105500, end=200000) -project_2.plot_meas(expt_name='sim_lbco') +project_2.plotter.plot_meas(expt_name='sim_lbco') # %% [markdown] # #### Exercise 2.2: Set Instrument Parameters @@ -1107,7 +1107,7 @@ # **Solution:** # %% tags=["solution", "hide-input"] -project_2.plot_meas_vs_calc(expt_name='sim_lbco') +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco') project_2.analysis.fit() project_2.analysis.display.fit_results() @@ -1152,7 +1152,7 @@ # peak positions. # %% tags=["solution", "hide-input"] -project_2.plot_meas_vs_calc(expt_name='sim_lbco') +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco') # %% [markdown] # #### Exercise 5.4: Refine the LBCO Lattice Parameter @@ -1181,7 +1181,7 @@ project_2.analysis.fit() project_2.analysis.display.fit_results() -project_2.plot_meas_vs_calc(expt_name='sim_lbco') +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco') # %% [markdown] # One of the main goals of this study was to refine the lattice @@ -1208,7 +1208,7 @@ # **Solution:** # %% tags=["solution", "hide-input"] -project_2.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing') +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing') # %% [markdown] # #### Exercise 5.6: Refine the Peak Profile Parameters @@ -1225,7 +1225,7 @@ # perfectly describe the peak at about 1.38 Å, as can be seen below: # %% -project_2.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1.35, x_max=1.40) +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1.35, x_max=1.40) # %% [markdown] # The peak profile parameters are determined based on both the @@ -1260,7 +1260,7 @@ project_2.analysis.fit() project_2.analysis.display.fit_results() -project_2.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1.35, x_max=1.40) +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1.35, x_max=1.40) # %% [markdown] # #### Exercise 5.7: Find Undefined Features @@ -1283,7 +1283,7 @@ # **Solution:** # %% tags=["solution", "hide-input"] -project_2.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1.53, x_max=1.7) +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1.53, x_max=1.7) # %% [markdown] # #### Exercise 5.8: Identify the Cause of the Unexplained Peaks @@ -1348,8 +1348,8 @@ # confirm this hypothesis. # %% tags=["solution", "hide-input"] -project_1.plot_meas_vs_calc(expt_name='sim_si', x='d_spacing', x_min=1, x_max=1.7) -project_2.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1, x_max=1.7) +project_1.plotter.plot_meas_vs_calc(expt_name='sim_si', x='d_spacing', x_min=1, x_max=1.7) +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco', x='d_spacing', x_min=1, x_max=1.7) # %% [markdown] # #### Exercise 5.10: Create a Second Structure – Si as Impurity @@ -1416,7 +1416,7 @@ # Before optimizing the parameters, we can visualize the measured # diffraction pattern and the calculated diffraction pattern based on # the two phases: LBCO and Si. -project_2.plot_meas_vs_calc(expt_name='sim_lbco') +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco') # As you can see, the calculated pattern is now the sum of both phases, # and Si peaks are visible in the calculated pattern. However, their @@ -1432,8 +1432,8 @@ # diffraction pattern both for the full range and for a zoomed-in region # around the previously unexplained peak near 95,000 μs. The calculated # pattern will be the sum of the two phases. -project_2.plot_meas_vs_calc(expt_name='sim_lbco') -project_2.plot_meas_vs_calc(expt_name='sim_lbco', x_min=88000, x_max=101000) +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco') +project_2.plotter.plot_meas_vs_calc(expt_name='sim_lbco', x_min=88000, x_max=101000) # %% [markdown] # All previously unexplained peaks are now accounted for in the pattern, From c92ab7ec68aace34d1055c9cacfc8cfecd7ddd63 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 00:42:01 +0200 Subject: [PATCH 46/51] Update diagrams and clean up --- docs/architecture/package-structure-full.md | 133 +++-- docs/architecture/package-structure-short.md | 77 ++- tools/convert_google_docstrings_to_numpy.py | 539 ------------------- 3 files changed, 159 insertions(+), 590 deletions(-) delete mode 100644 tools/convert_google_docstrings_to_numpy.py diff --git a/docs/architecture/package-structure-full.md b/docs/architecture/package-structure-full.md index 74662449..12b25e79 100644 --- a/docs/architecture/package-structure-full.md +++ b/docs/architecture/package-structure-full.md @@ -16,16 +16,36 @@ │ │ └── 📄 pdffit.py │ │ └── 🏷️ class PdffitCalculator │ ├── 📁 categories -│ │ ├── 📄 __init__.py -│ │ ├── 📄 aliases.py -│ │ │ ├── 🏷️ class Alias -│ │ │ └── 🏷️ class Aliases -│ │ ├── 📄 constraints.py -│ │ │ ├── 🏷️ class Constraint -│ │ │ └── 🏷️ class Constraints -│ │ └── 📄 joint_fit_experiments.py -│ │ ├── 🏷️ class JointFitExperiment -│ │ └── 🏷️ class JointFitExperiments +│ │ ├── 📁 aliases +│ │ │ ├── 📄 __init__.py +│ │ │ ├── 📄 default.py +│ │ │ │ ├── 🏷️ class Alias +│ │ │ │ └── 🏷️ class Aliases +│ │ │ └── 📄 factory.py +│ │ │ └── 🏷️ class AliasesFactory +│ │ ├── 📁 constraints +│ │ │ ├── 📄 __init__.py +│ │ │ ├── 📄 default.py +│ │ │ │ ├── 🏷️ class Constraint +│ │ │ │ └── 🏷️ class Constraints +│ │ │ └── 📄 factory.py +│ │ │ └── 🏷️ class ConstraintsFactory +│ │ ├── 📁 fit_mode +│ │ │ ├── 📄 __init__.py +│ │ │ ├── 📄 enums.py +│ │ │ │ └── 🏷️ class FitModeEnum +│ │ │ ├── 📄 factory.py +│ │ │ │ └── 🏷️ class FitModeFactory +│ │ │ └── 📄 fit_mode.py +│ │ │ └── 🏷️ class FitMode +│ │ ├── 📁 joint_fit_experiments +│ │ │ ├── 📄 __init__.py +│ │ │ ├── 📄 default.py +│ │ │ │ ├── 🏷️ class JointFitExperiment +│ │ │ │ └── 🏷️ class JointFitExperiments +│ │ │ └── 📄 factory.py +│ │ │ └── 🏷️ class JointFitExperimentsFactory +│ │ └── 📄 __init__.py │ ├── 📁 fit_helpers │ │ ├── 📄 __init__.py │ │ ├── 📄 metrics.py @@ -46,9 +66,12 @@ │ │ └── 🏷️ class LmfitMinimizer │ ├── 📄 __init__.py │ ├── 📄 analysis.py +│ │ ├── 🏷️ class AnalysisDisplay │ │ └── 🏷️ class Analysis -│ └── 📄 fitting.py -│ └── 🏷️ class Fitter +│ ├── 📄 fitting.py +│ │ └── 🏷️ class Fitter +│ └── 📄 sequential.py +│ └── 🏷️ class SequentialFitTemplate ├── 📁 core │ ├── 📄 __init__.py │ ├── 📄 category.py @@ -73,7 +96,6 @@ │ │ └── 🏷️ class CalculatorSupport │ ├── 📄 singleton.py │ │ ├── 🏷️ class SingletonBase -│ │ ├── 🏷️ class UidMapHandler │ │ └── 🏷️ class ConstraintsHandler │ ├── 📄 validation.py │ │ ├── 🏷️ class DataTypeHints @@ -134,6 +156,31 @@ │ │ │ │ ├── 🏷️ class TotalDataPoint │ │ │ │ ├── 🏷️ class TotalDataBase │ │ │ │ └── 🏷️ class TotalData +│ │ │ ├── 📁 diffrn +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ │ └── 🏷️ class DefaultDiffrn +│ │ │ │ └── 📄 factory.py +│ │ │ │ └── 🏷️ class DiffrnFactory +│ │ │ ├── 📁 excluded_regions +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ │ ├── 🏷️ class ExcludedRegion +│ │ │ │ │ └── 🏷️ class ExcludedRegions +│ │ │ │ └── 📄 factory.py +│ │ │ │ └── 🏷️ class ExcludedRegionsFactory +│ │ │ ├── 📁 experiment_type +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ │ └── 🏷️ class ExperimentType +│ │ │ │ └── 📄 factory.py +│ │ │ │ └── 🏷️ class ExperimentTypeFactory +│ │ │ ├── 📁 extinction +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 factory.py +│ │ │ │ │ └── 🏷️ class ExtinctionFactory +│ │ │ │ └── 📄 shelx.py +│ │ │ │ └── 🏷️ class ShelxExtinction │ │ │ ├── 📁 instrument │ │ │ │ ├── 📄 __init__.py │ │ │ │ ├── 📄 base.py @@ -147,6 +194,19 @@ │ │ │ │ └── 📄 tof.py │ │ │ │ ├── 🏷️ class TofScInstrument │ │ │ │ └── 🏷️ class TofPdInstrument +│ │ │ ├── 📁 linked_crystal +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ │ └── 🏷️ class LinkedCrystal +│ │ │ │ └── 📄 factory.py +│ │ │ │ └── 🏷️ class LinkedCrystalFactory +│ │ │ ├── 📁 linked_phases +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ │ ├── 🏷️ class LinkedPhase +│ │ │ │ │ └── 🏷️ class LinkedPhases +│ │ │ │ └── 📄 factory.py +│ │ │ │ └── 🏷️ class LinkedPhasesFactory │ │ │ ├── 📁 peak │ │ │ │ ├── 📄 __init__.py │ │ │ │ ├── 📄 base.py @@ -172,19 +232,7 @@ │ │ │ │ │ └── 🏷️ class TotalGaussianDampedSinc │ │ │ │ └── 📄 total_mixins.py │ │ │ │ └── 🏷️ class TotalBroadeningMixin -│ │ │ ├── 📄 __init__.py -│ │ │ ├── 📄 excluded_regions.py -│ │ │ │ ├── 🏷️ class ExcludedRegion -│ │ │ │ └── 🏷️ class ExcludedRegions -│ │ │ ├── 📄 experiment_type.py -│ │ │ │ └── 🏷️ class ExperimentType -│ │ │ ├── 📄 extinction.py -│ │ │ │ └── 🏷️ class Extinction -│ │ │ ├── 📄 linked_crystal.py -│ │ │ │ └── 🏷️ class LinkedCrystal -│ │ │ └── 📄 linked_phases.py -│ │ │ ├── 🏷️ class LinkedPhase -│ │ │ └── 🏷️ class LinkedPhases +│ │ │ └── 📄 __init__.py │ │ ├── 📁 item │ │ │ ├── 📄 __init__.py │ │ │ ├── 📄 base.py @@ -212,14 +260,26 @@ │ │ └── 🏷️ class Experiments │ ├── 📁 structure │ │ ├── 📁 categories -│ │ │ ├── 📄 __init__.py -│ │ │ ├── 📄 atom_sites.py -│ │ │ │ ├── 🏷️ class AtomSite -│ │ │ │ └── 🏷️ class AtomSites -│ │ │ ├── 📄 cell.py -│ │ │ │ └── 🏷️ class Cell -│ │ │ └── 📄 space_group.py -│ │ │ └── 🏷️ class SpaceGroup +│ │ │ ├── 📁 atom_sites +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ │ ├── 🏷️ class AtomSite +│ │ │ │ │ └── 🏷️ class AtomSites +│ │ │ │ └── 📄 factory.py +│ │ │ │ └── 🏷️ class AtomSitesFactory +│ │ │ ├── 📁 cell +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ │ └── 🏷️ class Cell +│ │ │ │ └── 📄 factory.py +│ │ │ │ └── 🏷️ class CellFactory +│ │ │ ├── 📁 space_group +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ │ └── 🏷️ class SpaceGroup +│ │ │ │ └── 📄 factory.py +│ │ │ │ └── 🏷️ class SpaceGroupFactory +│ │ │ └── 📄 __init__.py │ │ ├── 📁 item │ │ │ ├── 📄 __init__.py │ │ │ ├── 📄 base.py @@ -269,7 +329,8 @@ │ │ │ └── 🏷️ class CifHandler │ │ ├── 📄 parse.py │ │ └── 📄 serialize.py -│ └── 📄 __init__.py +│ ├── 📄 __init__.py +│ └── 📄 ascii.py ├── 📁 project │ ├── 📄 __init__.py │ ├── 📄 project.py @@ -288,6 +349,8 @@ │ │ ├── 📄 __init__.py │ │ └── 📄 theme_detect.py │ ├── 📄 __init__.py +│ ├── 📄 enums.py +│ │ └── 🏷️ class VerbosityEnum │ ├── 📄 environment.py │ ├── 📄 logging.py │ │ ├── 🏷️ class IconifiedRichHandler diff --git a/docs/architecture/package-structure-short.md b/docs/architecture/package-structure-short.md index efe89066..5bbc788f 100644 --- a/docs/architecture/package-structure-short.md +++ b/docs/architecture/package-structure-short.md @@ -11,10 +11,24 @@ │ │ ├── 📄 factory.py │ │ └── 📄 pdffit.py │ ├── 📁 categories -│ │ ├── 📄 __init__.py -│ │ ├── 📄 aliases.py -│ │ ├── 📄 constraints.py -│ │ └── 📄 joint_fit_experiments.py +│ │ ├── 📁 aliases +│ │ │ ├── 📄 __init__.py +│ │ │ ├── 📄 default.py +│ │ │ └── 📄 factory.py +│ │ ├── 📁 constraints +│ │ │ ├── 📄 __init__.py +│ │ │ ├── 📄 default.py +│ │ │ └── 📄 factory.py +│ │ ├── 📁 fit_mode +│ │ │ ├── 📄 __init__.py +│ │ │ ├── 📄 enums.py +│ │ │ ├── 📄 factory.py +│ │ │ └── 📄 fit_mode.py +│ │ ├── 📁 joint_fit_experiments +│ │ │ ├── 📄 __init__.py +│ │ │ ├── 📄 default.py +│ │ │ └── 📄 factory.py +│ │ └── 📄 __init__.py │ ├── 📁 fit_helpers │ │ ├── 📄 __init__.py │ │ ├── 📄 metrics.py @@ -28,7 +42,8 @@ │ │ └── 📄 lmfit.py │ ├── 📄 __init__.py │ ├── 📄 analysis.py -│ └── 📄 fitting.py +│ ├── 📄 fitting.py +│ └── 📄 sequential.py ├── 📁 core │ ├── 📄 __init__.py │ ├── 📄 category.py @@ -62,12 +77,36 @@ │ │ │ │ ├── 📄 bragg_sc.py │ │ │ │ ├── 📄 factory.py │ │ │ │ └── 📄 total_pd.py +│ │ │ ├── 📁 diffrn +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ └── 📄 factory.py +│ │ │ ├── 📁 excluded_regions +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ └── 📄 factory.py +│ │ │ ├── 📁 experiment_type +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ └── 📄 factory.py +│ │ │ ├── 📁 extinction +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 factory.py +│ │ │ │ └── 📄 shelx.py │ │ │ ├── 📁 instrument │ │ │ │ ├── 📄 __init__.py │ │ │ │ ├── 📄 base.py │ │ │ │ ├── 📄 cwl.py │ │ │ │ ├── 📄 factory.py │ │ │ │ └── 📄 tof.py +│ │ │ ├── 📁 linked_crystal +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ └── 📄 factory.py +│ │ │ ├── 📁 linked_phases +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ └── 📄 factory.py │ │ │ ├── 📁 peak │ │ │ │ ├── 📄 __init__.py │ │ │ │ ├── 📄 base.py @@ -78,12 +117,7 @@ │ │ │ │ ├── 📄 tof_mixins.py │ │ │ │ ├── 📄 total.py │ │ │ │ └── 📄 total_mixins.py -│ │ │ ├── 📄 __init__.py -│ │ │ ├── 📄 excluded_regions.py -│ │ │ ├── 📄 experiment_type.py -│ │ │ ├── 📄 extinction.py -│ │ │ ├── 📄 linked_crystal.py -│ │ │ └── 📄 linked_phases.py +│ │ │ └── 📄 __init__.py │ │ ├── 📁 item │ │ │ ├── 📄 __init__.py │ │ │ ├── 📄 base.py @@ -96,10 +130,19 @@ │ │ └── 📄 collection.py │ ├── 📁 structure │ │ ├── 📁 categories -│ │ │ ├── 📄 __init__.py -│ │ │ ├── 📄 atom_sites.py -│ │ │ ├── 📄 cell.py -│ │ │ └── 📄 space_group.py +│ │ │ ├── 📁 atom_sites +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ └── 📄 factory.py +│ │ │ ├── 📁 cell +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ └── 📄 factory.py +│ │ │ ├── 📁 space_group +│ │ │ │ ├── 📄 __init__.py +│ │ │ │ ├── 📄 default.py +│ │ │ │ └── 📄 factory.py +│ │ │ └── 📄 __init__.py │ │ ├── 📁 item │ │ │ ├── 📄 __init__.py │ │ │ ├── 📄 base.py @@ -129,7 +172,8 @@ │ │ ├── 📄 handler.py │ │ ├── 📄 parse.py │ │ └── 📄 serialize.py -│ └── 📄 __init__.py +│ ├── 📄 __init__.py +│ └── 📄 ascii.py ├── 📁 project │ ├── 📄 __init__.py │ ├── 📄 project.py @@ -145,6 +189,7 @@ │ │ ├── 📄 __init__.py │ │ └── 📄 theme_detect.py │ ├── 📄 __init__.py +│ ├── 📄 enums.py │ ├── 📄 environment.py │ ├── 📄 logging.py │ └── 📄 utils.py diff --git a/tools/convert_google_docstrings_to_numpy.py b/tools/convert_google_docstrings_to_numpy.py deleted file mode 100644 index 5fcb14e2..00000000 --- a/tools/convert_google_docstrings_to_numpy.py +++ /dev/null @@ -1,539 +0,0 @@ -#!/usr/bin/env python3 -"""Convert Google-style Python docstrings to numpydoc style.""" - -from __future__ import annotations - -import ast -import inspect -import re -import sys -import textwrap -from pathlib import Path - -from docstring_parser import DocstringStyle -from docstring_parser import compose -from docstring_parser import parse -from format_docstring.docstring_rewriter import calc_abs_pos -from format_docstring.docstring_rewriter import calc_line_starts -from format_docstring.docstring_rewriter import find_docstring -from format_docstring.docstring_rewriter import rebuild_literal - -SECTION_NAMES = ( - 'Args', - 'Arguments', - 'Returns', - 'Raises', - 'Yields', - 'Attributes', - 'Examples', - 'Notes', -) -GOOGLE_SECTION_RE = re.compile( - r'(?m)^(?P[ \t]*)(?P
' - + '|'.join(SECTION_NAMES) - + r'):\s*(?P\S.*)?$' -) -NUMPY_SECTION_RE = re.compile(r'(?m)^[^\n]+\n-+\n') -SECTION_KINDS_WITH_ITEMS = {'Args', 'Arguments', 'Attributes'} -PRESERVE_BLOCK_SECTIONS = {'Examples', 'Notes'} -GENERIC_ITEM_SECTIONS = {'Raises', 'Returns', 'Yields'} -GENERIC_ITEM_RE = re.compile( - r'(?[A-Za-z_][A-Za-z0-9_\.\[\], \|\(\)]{0,80}?)\s*:' -) - - -def _iter_python_files(paths: list[Path]) -> list[Path]: - files: list[Path] = [] - for path in paths: - if path.is_file() and path.suffix == '.py': - files.append(path) - continue - - if not path.exists(): - continue - - for file_path in sorted(path.rglob('*.py')): - if '_vendored' in file_path.parts: - continue - if '.pixi' in file_path.parts: - continue - files.append(file_path) - - return files - - -def _collect_names(node: ast.AST) -> list[str]: - names: list[str] = [] - - if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): - args = list(node.args.posonlyargs) + list(node.args.args) - args += list(node.args.kwonlyargs) - names.extend(arg.arg for arg in args) - if node.args.vararg is not None: - names.append(node.args.vararg.arg) - if node.args.kwarg is not None: - names.append(node.args.kwarg.arg) - return [name for name in names if name not in {'self', 'cls'}] - - if isinstance(node, ast.ClassDef): - init_method = next( - ( - stmt - for stmt in node.body - if isinstance(stmt, ast.FunctionDef) and stmt.name == '__init__' - ), - None, - ) - if init_method is not None: - names.extend(_collect_names(init_method)) - - for stmt in node.body: - if isinstance(stmt, ast.AnnAssign) and isinstance(stmt.target, ast.Name): - names.append(stmt.target.id) - elif isinstance(stmt, ast.Assign): - for target in stmt.targets: - if isinstance(target, ast.Name): - names.append(target.id) - - return list(dict.fromkeys(names)) - - -def _strip_blank_edges(lines: list[str]) -> list[str]: - start = 0 - end = len(lines) - while start < end and not lines[start].strip(): - start += 1 - while end > start and not lines[end - 1].strip(): - end -= 1 - return lines[start:end] - - -def _join_wrapped_lines(lines: list[str]) -> str: - parts: list[str] = [] - for line in lines: - text = re.sub(r'\s+', ' ', line.strip()) - if not text: - continue - if parts and parts[-1].endswith('-') and not parts[-1].endswith(' -'): - parts[-1] = parts[-1][:-1] + text - else: - parts.append(text) - return ' '.join(parts) - - -def _collapse_whitespace(lines: list[str]) -> str: - return _join_wrapped_lines(lines) - - -def _repair_named_items(block_lines: list[str], names: list[str]) -> list[str] | None: - flat = _collapse_whitespace(block_lines) - if not flat or not names: - return None - - label_pattern = '|'.join(re.escape(name) for name in sorted(set(names), key=len, reverse=True)) - item_re = re.compile( - rf'(?\*{{0,2}}(?:{label_pattern})(?:\s*\([^)]*\))?)\s*:' - ) - matches = list(item_re.finditer(flat)) - if not matches or matches[0].start() != 0: - return None - - repaired: list[str] = [] - for index, match in enumerate(matches): - start = match.end() - end = matches[index + 1].start() if index + 1 < len(matches) else len(flat) - description = flat[start:end].strip() - repaired.append(f' {match.group("label")}: {description}' if description else f' {match.group("label")}:') - return repaired - - -def _repair_generic_items(block_lines: list[str]) -> list[str] | None: - flat = _collapse_whitespace(block_lines) - if not flat: - return None - - matches = list(GENERIC_ITEM_RE.finditer(flat)) - if not matches or matches[0].start() != 0: - return None - - repaired: list[str] = [] - for index, match in enumerate(matches): - start = match.end() - end = matches[index + 1].start() if index + 1 < len(matches) else len(flat) - description = flat[start:end].strip() - repaired.append(f' {match.group("label")}: {description}' if description else f' {match.group("label")}:') - return repaired - - -def _repair_section(section: str, block_lines: list[str], names: list[str]) -> list[str]: - stripped = _strip_blank_edges(block_lines) - if not stripped: - return [] - - if section in SECTION_KINDS_WITH_ITEMS: - flat = _collapse_whitespace(stripped).lower().rstrip('.') - if flat == 'none': - return [] - repaired = _repair_named_items(stripped, names) - if repaired is not None: - return repaired - - if section in GENERIC_ITEM_SECTIONS: - repaired = _repair_generic_items(stripped) - if repaired is not None: - return repaired - - if section in PRESERVE_BLOCK_SECTIONS: - return [f' {line}' if line else '' for line in stripped] - - flat = _collapse_whitespace(stripped) - return [f' {flat}'] if flat else [] - - -def _repair_inline_sections(docstring: str, names: list[str]) -> str: - cleaned = inspect.cleandoc(docstring.replace('\r\n', '\n')) - lines = cleaned.split('\n') - out: list[str] = [] - index = 0 - - while index < len(lines): - raw_line = lines[index] - heading = GOOGLE_SECTION_RE.match(raw_line) - if heading is None: - out.append(raw_line.rstrip()) - index += 1 - continue - - section = heading.group('section') - section_name = 'Args' if section == 'Arguments' else section - out.append(f'{section_name}:') - - block_lines: list[str] = [] - rest = heading.group('rest') - if rest: - block_lines.append(rest) - - index += 1 - while index < len(lines): - next_line = lines[index] - if GOOGLE_SECTION_RE.match(next_line): - break - if ( - section_name not in PRESERVE_BLOCK_SECTIONS - and not next_line.strip() - and index + 1 < len(lines) - and lines[index + 1].strip() - and GOOGLE_SECTION_RE.match(lines[index + 1]) is None - ): - break - block_lines.append(next_line.rstrip()) - index += 1 - - out.extend(_repair_section(section_name, block_lines, names)) - - return '\n'.join(out) - - -def _looks_google(docstring: str) -> bool: - return bool(GOOGLE_SECTION_RE.search(docstring)) - - -def _looks_numpydoc(docstring: str) -> bool: - return bool(NUMPY_SECTION_RE.search(docstring)) - - -def _meta_kinds(parsed) -> set[str]: - kinds: set[str] = set() - for meta in parsed.meta: - args = getattr(meta, 'args', None) or [] - if not args: - continue - kinds.add(str(args[0]).lower()) - return kinds - - -def _contains_unparsed_sections(parsed) -> bool: - for text in (parsed.short_description, parsed.long_description): - if text and GOOGLE_SECTION_RE.search(text): - return True - return False - - -def _has_section_heading(docstring: str, section: str) -> bool: - return re.search(rf'(?m)^[ \t]*{re.escape(section)}:\s*(?:\S.*)?$', docstring) is not None - - -def _is_safe_conversion(docstring: str, parsed) -> bool: - if '::' in docstring: - return False - - kinds = _meta_kinds(parsed) - if _contains_unparsed_sections(parsed): - return False - - expectations = { - 'Args': 'param', - 'Arguments': 'param', - 'Attributes': 'attribute', - 'Returns': 'returns', - 'Raises': 'raises', - 'Yields': 'yields', - 'Examples': 'examples', - } - for section, expected_kind in expectations.items(): - if _has_section_heading(docstring, section) and expected_kind not in kinds: - return False - - return True - - -def _is_section_header(lines: list[str], index: int) -> bool: - return index + 1 < len(lines) and bool(lines[index].strip()) and set(lines[index + 1].strip()) == {'-'} - - -def _wrap_paragraph(lines: list[str], width: int, indent: str = '') -> list[str]: - if not lines: - return [] - - text = _join_wrapped_lines(lines) - if not text: - return [''] if lines else [] - - return textwrap.wrap( - text, - width=width, - initial_indent=indent, - subsequent_indent=indent, - break_long_words=False, - break_on_hyphens=False, - ) - - -def _format_freeform_block(lines: list[str], width: int = 72, indent: str = '') -> list[str]: - stripped = _strip_blank_edges(lines) - if not stripped: - return [] - - formatted: list[str] = [] - paragraph: list[str] = [] - for line in stripped: - if not line.strip(): - if paragraph: - formatted.extend(_wrap_paragraph(paragraph, width=width, indent=indent)) - paragraph = [] - if formatted and formatted[-1] != '': - formatted.append('') - continue - - content = line.strip() - if content.startswith(('>>>', '...')): - if paragraph: - formatted.extend(_wrap_paragraph(paragraph, width=width, indent=indent)) - paragraph = [] - formatted.append(f'{indent}{content}') - continue - - paragraph.append(content) - - if paragraph: - formatted.extend(_wrap_paragraph(paragraph, width=width, indent=indent)) - - return formatted - - -def _format_named_section(block_lines: list[str]) -> list[str]: - lines = _strip_blank_edges(block_lines) - if not lines: - return [] - - formatted: list[str] = [] - index = 0 - while index < len(lines): - if not lines[index].strip(): - index += 1 - continue - - header = lines[index].strip() - formatted.append(header) - index += 1 - - description: list[str] = [] - while index < len(lines): - line = lines[index] - if not line.strip(): - index += 1 - if description: - break - continue - if not line.startswith(' ') and not line.startswith('\t'): - break - description.append(line.strip()) - index += 1 - - if description: - formatted.extend(_wrap_paragraph(description, width=68, indent=' ')) - elif formatted and formatted[-1] != '': - formatted.append('') - - if formatted and formatted[-1] == '': - formatted.pop() - return formatted - - -def _format_return_like_section(block_lines: list[str]) -> list[str]: - lines = _strip_blank_edges(block_lines) - if not lines: - return [] - - first = next((line for line in lines if line.strip()), '') - if first.startswith((' ', '\t')): - return _format_freeform_block(lines, width=68, indent=' ') - - return _format_named_section(lines) - - -def _format_numpydoc_output(docstring: str) -> str: - lines = docstring.strip('\n').splitlines() - formatted: list[str] = [] - index = 0 - - preamble: list[str] = [] - while index < len(lines) and not _is_section_header(lines, index): - preamble.append(lines[index]) - index += 1 - formatted.extend(_format_freeform_block(preamble)) - - while index < len(lines): - if not _is_section_header(lines, index): - index += 1 - continue - - if formatted and formatted[-1] != '': - formatted.append('') - heading = lines[index].strip() - underline = lines[index + 1].strip() - formatted.extend([heading, underline]) - index += 2 - - block: list[str] = [] - while index < len(lines) and not _is_section_header(lines, index): - block.append(lines[index]) - index += 1 - - if heading in {'Parameters', 'Attributes'}: - formatted.extend(_format_named_section(block)) - elif heading in {'Returns', 'Raises', 'Yields'}: - formatted.extend(_format_return_like_section(block)) - else: - formatted.extend(_format_freeform_block(block)) - - return '\n'.join(_strip_blank_edges(formatted)) - - -def _convert_docstring(docstring: str, names: list[str]) -> str | None: - cleaned = inspect.cleandoc(docstring) - if not _looks_google(cleaned): - return None - - repaired = _repair_inline_sections(cleaned, names) - - try: - parsed = parse(repaired, style=DocstringStyle.GOOGLE) - except Exception: - return None - - if not _is_safe_conversion(repaired, parsed): - return None - - converted = _format_numpydoc_output(compose(parsed, style=DocstringStyle.NUMPYDOC)) - return converted if converted != cleaned else None - - -def _reformat_numpydoc_docstring(docstring: str) -> str | None: - cleaned = inspect.cleandoc(docstring) - if not _looks_numpydoc(cleaned): - return None - - formatted = _format_numpydoc_output(cleaned) - return formatted if formatted != cleaned else None - - -def _format_multiline_docstring(content: str, indent: int) -> str: - indent_str = ' ' * indent - lines = content.strip('\n').splitlines() - body = '\n'.join(f'{indent_str}{line}' if line else '' for line in lines) - return f'\n{body}\n{indent_str}' - - -def _convert_file(path: Path) -> bool: - source_code = path.read_text() - tree = ast.parse(source_code, type_comments=True) - line_starts = calc_line_starts(source_code) - replacements: list[tuple[int, int, str]] = [] - - nodes: list[ast.AST] = [tree] - nodes.extend(ast.walk(tree)) - - for node in nodes: - if not isinstance(node, (ast.Module, ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): - continue - - docstring_obj = find_docstring(node) - if docstring_obj is None: - continue - - value = docstring_obj.value - end_lineno = getattr(value, 'end_lineno', None) - end_col_offset = getattr(value, 'end_col_offset', None) - if end_lineno is None or end_col_offset is None: - continue - - docstring = ast.get_docstring(node, clean=False) - if docstring is None: - continue - - converted = _convert_docstring(docstring, _collect_names(node)) - if converted is None: - converted = _reformat_numpydoc_docstring(docstring) - if converted is None: - continue - - start = calc_abs_pos(source_code, line_starts, value.lineno, value.col_offset) - end = calc_abs_pos(source_code, line_starts, end_lineno, end_col_offset) - original_literal = source_code[start:end] - leading_indent = getattr(value, 'col_offset', 0) - formatted = _format_multiline_docstring(converted, leading_indent) - new_literal = rebuild_literal(original_literal, formatted) - if new_literal is None or new_literal == original_literal: - continue - - replacements.append((start, end, new_literal)) - - if not replacements: - return False - - replacements.sort(reverse=True) - new_source = source_code - for start, end, replacement in replacements: - new_source = new_source[:start] + replacement + new_source[end:] - - compile(new_source, str(path), 'exec') - path.write_text(new_source) - return True - - -def main(argv: list[str]) -> int: - input_paths = [Path(arg) for arg in argv] if argv else [Path('src'), Path('tools')] - changed = 0 - - for path in _iter_python_files(input_paths): - if _convert_file(path): - changed += 1 - print(f'Converted {path}') - - print(f'Converted docstrings in {changed} file(s).') - return 0 - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) From 75d48ec7b98e34c9f0ccc4a5e601a22a1c697949 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 00:57:05 +0200 Subject: [PATCH 47/51] Harden pickle and urlopen against security audit findings --- .../crystallography/space_groups.py | 76 +++++++++++++++++-- src/easydiffraction/utils/utils.py | 3 + 2 files changed, 73 insertions(+), 6 deletions(-) diff --git a/src/easydiffraction/crystallography/space_groups.py b/src/easydiffraction/crystallography/space_groups.py index 052b4c21..e370d116 100644 --- a/src/easydiffraction/crystallography/space_groups.py +++ b/src/easydiffraction/crystallography/space_groups.py @@ -8,20 +8,84 @@ involved. """ +import builtins import gzip +import io import pickle # noqa: S403 from pathlib import Path +from typing import override +_SAFE_BUILTINS = frozenset({ + 'dict', + 'frozenset', + 'list', + 'set', + 'tuple', +}) -def _restricted_pickle_load(file_obj: object) -> object: + +class _RestrictedUnpickler(pickle.Unpickler): # noqa: S301 + """ + Unpickler that only allows safe built-in types. + + Rejects any ``GLOBAL`` opcode that references modules or classes + outside of ``builtins``, limiting deserialisation to plain Python + data structures (dicts, lists, tuples, sets, frozensets) plus + primitive scalars (str, int, float, bool, None) which the pickle + protocol handles without ``GLOBAL``. + """ + + @override + def find_class( + self, + module: str, + name: str, + ) -> type: + """ + Allow only safe built-in types. + + Parameters + ---------- + module : str + The module name from the pickle stream. + name : str + The class/function name from the pickle stream. + + Returns + ------- + type + The resolved built-in type. + + Raises + ------ + pickle.UnpicklingError + If the requested type is not in the safe set. + """ + if module == 'builtins' and name in _SAFE_BUILTINS: + return getattr(builtins, name) + msg = f'Restricted unpickler refused {module}.{name}' + raise pickle.UnpicklingError(msg) + + +def _restricted_pickle_load(file_obj: io.BufferedIOBase) -> object: """ - Load pickle data from an internal gz file (trusted boundary). + Load pickle data using a restricted unpickler. + + Only safe built-in types (dict, list, tuple, set, frozenset, and + primitive scalars) are permitted. The archive lives in the package; + no user-controlled input enters this function. + + Parameters + ---------- + file_obj : io.BufferedIOBase + Binary file object to read pickle data from. - The archive lives in the package; no user-controlled input enters - this function. If distribution process changes, revisit. + Returns + ------- + object + The deserialised Python data structure. """ - data = pickle.load(file_obj) # noqa: S301 - return data + return _RestrictedUnpickler(file_obj).load() def _load() -> object: diff --git a/src/easydiffraction/utils/utils.py b/src/easydiffraction/utils/utils.py index b98d30cf..7d278a6c 100644 --- a/src/easydiffraction/utils/utils.py +++ b/src/easydiffraction/utils/utils.py @@ -322,6 +322,9 @@ def _safe_urlopen(request_or_url: object) -> object: # type: ignore[no-untyped- if parsed.scheme != 'https': # pragma: no cover msg = 'Only https URLs are permitted' raise ValueError(msg) + else: + msg = f'Expected str or Request, got {type(request_or_url).__name__}' + raise TypeError(msg) return urllib.request.urlopen(request_or_url) # noqa: S310 From eb193e41b6161349ed19c7f02f09ca6ca16f1a8e Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 01:10:33 +0200 Subject: [PATCH 48/51] Increase integration test coverage --- pixi.toml | 1 - tests/integration/fitting/conftest.py | 89 ++++++++++ .../fitting/test_analysis_display.py | 95 ++++++++++ .../fitting/test_exploration_help.py | 163 ++++++++++++++++++ tests/integration/fitting/test_plotting.py | 29 ++++ .../fitting/test_summary_report.py | 36 ++++ 6 files changed, 412 insertions(+), 1 deletion(-) create mode 100644 tests/integration/fitting/conftest.py create mode 100644 tests/integration/fitting/test_analysis_display.py create mode 100644 tests/integration/fitting/test_exploration_help.py create mode 100644 tests/integration/fitting/test_plotting.py create mode 100644 tests/integration/fitting/test_summary_report.py diff --git a/pixi.toml b/pixi.toml index 577edbef..b67cc0d9 100644 --- a/pixi.toml +++ b/pixi.toml @@ -168,7 +168,6 @@ docstring-coverage = 'interrogate -c pyproject.toml src/easydiffraction' cov = { depends-on = [ 'docstring-coverage', 'unit-tests-coverage', - 'functional-tests-coverage', 'integration-tests-coverage', ] } diff --git a/tests/integration/fitting/conftest.py b/tests/integration/fitting/conftest.py new file mode 100644 index 00000000..83be2e17 --- /dev/null +++ b/tests/integration/fitting/conftest.py @@ -0,0 +1,89 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared fixtures for integration tests.""" + +import tempfile + +import pytest + +from easydiffraction import ExperimentFactory +from easydiffraction import Project +from easydiffraction import StructureFactory +from easydiffraction import download_data + +TEMP_DIR = tempfile.gettempdir() + + +@pytest.fixture(scope='session') +def lbco_fitted_project(): + """Build and fit an LBCO CWL project (session-scoped for reuse).""" + model = StructureFactory.from_scratch(name='lbco') + model.space_group.name_h_m = 'P m -3 m' + model.cell.length_a = 3.88 + model.atom_sites.create( + label='La', + type_symbol='La', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.1, + ) + model.atom_sites.create( + label='Ba', + type_symbol='Ba', + fract_x=0, + fract_y=0, + fract_z=0, + wyckoff_letter='a', + occupancy=0.5, + b_iso=0.1, + ) + model.atom_sites.create( + label='Co', + type_symbol='Co', + fract_x=0.5, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='b', + b_iso=0.1, + ) + model.atom_sites.create( + label='O', + type_symbol='O', + fract_x=0, + fract_y=0.5, + fract_z=0.5, + wyckoff_letter='c', + b_iso=0.1, + ) + + data_path = download_data(id=3, destination=TEMP_DIR) + expt = ExperimentFactory.from_data_path(name='hrpt', data_path=data_path) + expt.instrument.setup_wavelength = 1.494 + expt.instrument.calib_twotheta_offset = 0 + expt.peak.broad_gauss_u = 0.1 + expt.peak.broad_gauss_v = -0.1 + expt.peak.broad_gauss_w = 0.2 + expt.peak.broad_lorentz_x = 0 + expt.peak.broad_lorentz_y = 0 + expt.linked_phases.create(id='lbco', scale=5.0) + expt.background.create(id='1', x=10, y=170) + expt.background.create(id='2', x=165, y=170) + + project = Project() + project.structures.add(model) + project.experiments.add(expt) + project.analysis.current_minimizer = 'lmfit' + + model.cell.length_a.free = True + expt.linked_phases['lbco'].scale.free = True + expt.instrument.calib_twotheta_offset.free = True + expt.background['1'].y.free = True + expt.background['2'].y.free = True + + project.analysis.fit(verbosity='silent') + + return project diff --git a/tests/integration/fitting/test_analysis_display.py b/tests/integration/fitting/test_analysis_display.py new file mode 100644 index 00000000..7c7b1da3 --- /dev/null +++ b/tests/integration/fitting/test_analysis_display.py @@ -0,0 +1,95 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause + +"""Integration tests for Analysis display methods and CIF serialization.""" + + +def test_display_all_params(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.display.all_params() + + +def test_display_fittable_params(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.display.fittable_params() + + +def test_display_free_params(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.display.free_params() + + +def test_display_how_to_access_parameters(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.display.how_to_access_parameters() + + +def test_display_parameter_cif_uids(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.display.parameter_cif_uids() + + +def test_display_constraints_empty(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.display.constraints() + + +def test_display_fit_results(lbco_fitted_project): + project = lbco_fitted_project + assert project.analysis.fit_results is not None + project.analysis.display.fit_results() + + +def test_display_as_cif(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.display.as_cif() + + +def test_analysis_as_cif(lbco_fitted_project): + project = lbco_fitted_project + cif_text = project.analysis.as_cif() + assert isinstance(cif_text, str) + assert len(cif_text) > 0 + + +def test_analysis_help(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.help() + + +def test_show_current_minimizer(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.show_current_minimizer() + + +def test_show_available_minimizers(lbco_fitted_project): + from easydiffraction.analysis.analysis import Analysis + + Analysis.show_available_minimizers() + + +def test_show_supported_aliases_types(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.show_supported_aliases_types() + project.analysis.show_current_aliases_type() + + +def test_show_supported_constraints_types(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.show_supported_constraints_types() + project.analysis.show_current_constraints_type() + + +def test_show_supported_fit_mode_types(lbco_fitted_project): + project = lbco_fitted_project + project.analysis.show_supported_fit_mode_types() + project.analysis.show_current_fit_mode_type() + + +def test_fit_results_attributes(lbco_fitted_project): + project = lbco_fitted_project + results = project.analysis.fit_results + assert results is not None + assert results.reduced_chi_square is not None + assert results.reduced_chi_square > 0 + assert isinstance(results.success, bool) diff --git a/tests/integration/fitting/test_exploration_help.py b/tests/integration/fitting/test_exploration_help.py new file mode 100644 index 00000000..ba41ff1b --- /dev/null +++ b/tests/integration/fitting/test_exploration_help.py @@ -0,0 +1,163 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause + +"""Integration tests for help(), show_as_cif(), and switchable-category show methods.""" + + +def test_project_str(lbco_fitted_project): + project = lbco_fitted_project + text = str(project) + assert 'Project' in text + assert '1 structures' in text + assert '1 experiments' in text + + +def test_project_help(lbco_fitted_project): + project = lbco_fitted_project + project.help() + + +def test_project_full_name(lbco_fitted_project): + project = lbco_fitted_project + assert project.full_name == project.name + + +def test_structure_help(lbco_fitted_project): + project = lbco_fitted_project + model = project.structures['lbco'] + model.help() + + +def test_structure_show_as_cif(lbco_fitted_project): + project = lbco_fitted_project + model = project.structures['lbco'] + model.show_as_cif() + + +def test_structure_as_cif(lbco_fitted_project): + project = lbco_fitted_project + model = project.structures['lbco'] + cif_text = model.as_cif + assert isinstance(cif_text, str) + assert '_space_group' in cif_text + + +def test_structure_switchable_category_types(lbco_fitted_project): + project = lbco_fitted_project + model = project.structures['lbco'] + # Cell + model.show_supported_cell_types() + model.show_current_cell_type() + assert isinstance(model.cell_type, str) + # Space group + model.show_supported_space_group_types() + model.show_current_space_group_type() + assert isinstance(model.space_group_type, str) + # Atom sites + model.show_supported_atom_sites_types() + model.show_current_atom_sites_type() + assert isinstance(model.atom_sites_type, str) + + +def test_experiment_help(lbco_fitted_project): + project = lbco_fitted_project + expt = project.experiments['hrpt'] + expt.help() + + +def test_experiment_show_as_cif(lbco_fitted_project): + project = lbco_fitted_project + expt = project.experiments['hrpt'] + expt.show_as_cif() + + +def test_experiment_as_cif(lbco_fitted_project): + project = lbco_fitted_project + expt = project.experiments['hrpt'] + cif_text = expt.as_cif + assert isinstance(cif_text, str) + assert len(cif_text) > 0 + + +def test_experiment_switchable_category_types(lbco_fitted_project): + project = lbco_fitted_project + expt = project.experiments['hrpt'] + # Instrument + expt.show_supported_instrument_types() + expt.show_current_instrument_type() + assert isinstance(expt.instrument_type, str) + # Background + expt.show_supported_background_types() + expt.show_current_background_type() + assert isinstance(expt.background_type, str) + # Peak profile + expt.show_supported_peak_profile_types() + expt.show_current_peak_profile_type() + assert isinstance(expt.peak_profile_type, str) + # Linked phases + expt.show_supported_linked_phases_types() + expt.show_current_linked_phases_type() + assert isinstance(expt.linked_phases_type, str) + # Calculator + expt.show_supported_calculator_types() + expt.show_current_calculator_type() + assert isinstance(expt.calculator_type, str) + # Diffrn + expt.show_supported_diffrn_types() + expt.show_current_diffrn_type() + assert isinstance(expt.diffrn_type, str) + + +def test_experiment_data_info(lbco_fitted_project): + project = lbco_fitted_project + expt = project.experiments['hrpt'] + # Data access + assert expt.data is not None + assert expt.data.x is not None + assert len(expt.data.x) > 0 + assert expt.data.intensity_meas is not None + + +def test_structure_cell_properties(lbco_fitted_project): + project = lbco_fitted_project + model = project.structures['lbco'] + # Access cell parameters + assert model.cell.length_a.value > 0 + params = model.cell.parameters + assert len(params) > 0 + + +def test_structure_atom_sites_iteration(lbco_fitted_project): + project = lbco_fitted_project + model = project.structures['lbco'] + count = 0 + for site in model.atom_sites: + assert site.label.value is not None + assert site.type_symbol.value is not None + count += 1 + assert count == 4 + + +def test_structures_collection_names(lbco_fitted_project): + project = lbco_fitted_project + names = project.structures.names + assert 'lbco' in names + # Parameters + params = project.structures.parameters + assert len(params) > 0 + fittable = project.structures.fittable_parameters + assert len(fittable) > 0 + free = project.structures.free_parameters + assert len(free) > 0 + + +def test_experiments_collection_names(lbco_fitted_project): + project = lbco_fitted_project + names = project.experiments.names + assert 'hrpt' in names + params = project.experiments.parameters + assert len(params) > 0 + fittable = project.experiments.fittable_parameters + assert len(fittable) > 0 + free = project.experiments.free_parameters + assert len(free) > 0 diff --git a/tests/integration/fitting/test_plotting.py b/tests/integration/fitting/test_plotting.py new file mode 100644 index 00000000..8d1a6603 --- /dev/null +++ b/tests/integration/fitting/test_plotting.py @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause + +"""Integration tests for the Plotter facade on a fitted project.""" + + +def test_plot_meas(lbco_fitted_project): + project = lbco_fitted_project + project.plotter.plot_meas(expt_name='hrpt') + + +def test_plot_calc(lbco_fitted_project): + project = lbco_fitted_project + project.plotter.plot_calc(expt_name='hrpt') + + +def test_plot_meas_vs_calc(lbco_fitted_project): + project = lbco_fitted_project + project.plotter.plot_meas_vs_calc(expt_name='hrpt') + + +def test_plot_meas_with_range(lbco_fitted_project): + project = lbco_fitted_project + project.plotter.plot_meas(expt_name='hrpt', x_min=20, x_max=80) + + +def test_plot_meas_vs_calc_with_range(lbco_fitted_project): + project = lbco_fitted_project + project.plotter.plot_meas_vs_calc(expt_name='hrpt', x_min=20, x_max=80) diff --git a/tests/integration/fitting/test_summary_report.py b/tests/integration/fitting/test_summary_report.py new file mode 100644 index 00000000..5a16b299 --- /dev/null +++ b/tests/integration/fitting/test_summary_report.py @@ -0,0 +1,36 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause + +"""Integration tests for Summary report generation and CIF export.""" + + +def test_show_report(lbco_fitted_project): + project = lbco_fitted_project + project.summary.show_report() + + +def test_show_project_info(lbco_fitted_project): + project = lbco_fitted_project + project.summary.show_project_info() + + +def test_show_crystallographic_data(lbco_fitted_project): + project = lbco_fitted_project + project.summary.show_crystallographic_data() + + +def test_show_experimental_data(lbco_fitted_project): + project = lbco_fitted_project + project.summary.show_experimental_data() + + +def test_show_fitting_details(lbco_fitted_project): + project = lbco_fitted_project + project.summary.show_fitting_details() + + +def test_summary_as_cif(lbco_fitted_project): + project = lbco_fitted_project + cif_text = project.summary.as_cif() + assert isinstance(cif_text, str) + assert len(cif_text) > 0 From b20b9044cda7382a71d62a760f5ca023f5d53e44 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 01:21:18 +0200 Subject: [PATCH 49/51] Add unit test coverage for utils.py helper functions --- .../utils/test_utils_coverage.py | 461 ++++++++++++++++++ 1 file changed, 461 insertions(+) create mode 100644 tests/unit/easydiffraction/utils/test_utils_coverage.py diff --git a/tests/unit/easydiffraction/utils/test_utils_coverage.py b/tests/unit/easydiffraction/utils/test_utils_coverage.py new file mode 100644 index 00000000..d4ae5853 --- /dev/null +++ b/tests/unit/easydiffraction/utils/test_utils_coverage.py @@ -0,0 +1,461 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause + +"""Supplementary unit tests for easydiffraction.utils.utils — coverage gaps.""" + +import urllib.request + +import numpy as np +import pytest + + +# --- _validate_url ----------------------------------------------------------- + + +def test_validate_url_accepts_http(): + import easydiffraction.utils.utils as MUT + + # Should not raise for http + MUT._validate_url('http://example.com/file.cif') + + +def test_validate_url_accepts_https(): + import easydiffraction.utils.utils as MUT + + # Should not raise for https + MUT._validate_url('https://example.com/file.cif') + + +# --- _filename_for_id_from_url ------------------------------------------------ + + +def test_filename_for_id_from_url_with_extension(): + import easydiffraction.utils.utils as MUT + + result = MUT._filename_for_id_from_url(12, 'https://example.com/data/file.xye') + assert result == 'ed-12.xye' + + +def test_filename_for_id_from_url_cif_extension(): + import easydiffraction.utils.utils as MUT + + result = MUT._filename_for_id_from_url('3', 'https://example.com/path/model.cif') + assert result == 'ed-3.cif' + + +def test_filename_for_id_from_url_no_extension(): + import easydiffraction.utils.utils as MUT + + result = MUT._filename_for_id_from_url(7, 'https://example.com/path/noext') + assert result == 'ed-7' + + +# --- _normalize_known_hash ---------------------------------------------------- + + +def test_normalize_known_hash_none(): + import easydiffraction.utils.utils as MUT + + assert MUT._normalize_known_hash(None) is None + + +def test_normalize_known_hash_empty_string(): + import easydiffraction.utils.utils as MUT + + assert MUT._normalize_known_hash('') is None + + +def test_normalize_known_hash_placeholder(): + import easydiffraction.utils.utils as MUT + + assert MUT._normalize_known_hash('sha256:...') is None + + +def test_normalize_known_hash_placeholder_uppercase(): + import easydiffraction.utils.utils as MUT + + assert MUT._normalize_known_hash('SHA256:...') is None + + +def test_normalize_known_hash_valid(): + import easydiffraction.utils.utils as MUT + + h = 'sha256:abc123' + assert MUT._normalize_known_hash(h) == h + + +def test_normalize_known_hash_strips_whitespace(): + import easydiffraction.utils.utils as MUT + + h = ' sha256:abc123 ' + assert MUT._normalize_known_hash(h) == 'sha256:abc123' + + +# --- stripped_package_version ------------------------------------------------- + + +def test_stripped_package_version_returns_public(): + import easydiffraction.utils.utils as MUT + + # numpy is always installed in the test env + result = MUT.stripped_package_version('numpy') + assert result is not None + assert '+' not in result # no local segment + + +def test_stripped_package_version_missing_package(): + import easydiffraction.utils.utils as MUT + + result = MUT.stripped_package_version('__definitely_not_installed__') + assert result is None + + +def test_stripped_package_version_strips_local(monkeypatch): + import easydiffraction.utils.utils as MUT + + monkeypatch.setattr(MUT, 'package_version', lambda name: '1.2.3+local456') + result = MUT.stripped_package_version('mypkg') + assert result == '1.2.3' + + +def test_stripped_package_version_invalid_version(monkeypatch): + import easydiffraction.utils.utils as MUT + + monkeypatch.setattr(MUT, 'package_version', lambda name: 'not-a-version!!!') + result = MUT.stripped_package_version('mypkg') + assert result == 'not-a-version!!!' + + +# --- _is_dev_version --------------------------------------------------------- + + +def test_is_dev_version_none_version(monkeypatch): + import easydiffraction.utils.utils as MUT + + monkeypatch.setattr(MUT, 'package_version', lambda name: None) + assert MUT._is_dev_version('easydiffraction') is True + + +# --- _safe_urlopen ------------------------------------------------------------ + + +def test_safe_urlopen_rejects_non_https_string(): + import easydiffraction.utils.utils as MUT + + with pytest.raises(ValueError, match='Only https URLs are permitted'): + MUT._safe_urlopen('http://example.com/file') + + +def test_safe_urlopen_rejects_non_https_request(): + import easydiffraction.utils.utils as MUT + + req = urllib.request.Request('http://example.com/file') + with pytest.raises(ValueError, match='Only https URLs are permitted'): + MUT._safe_urlopen(req) + + +def test_safe_urlopen_rejects_invalid_type(): + import easydiffraction.utils.utils as MUT + + with pytest.raises(TypeError, match='Expected str or Request, got int'): + MUT._safe_urlopen(42) + + +# --- _resolve_tutorial_url ---------------------------------------------------- + + +def test_resolve_tutorial_url_replaces_version(monkeypatch): + import easydiffraction.utils.utils as MUT + + monkeypatch.setattr(MUT, '_get_version_for_url', lambda: '1.0.0') + template = 'https://example.com/{version}/tutorials/ed-1.ipynb' + result = MUT._resolve_tutorial_url(template) + assert result == 'https://example.com/1.0.0/tutorials/ed-1.ipynb' + + +def test_resolve_tutorial_url_dev(monkeypatch): + import easydiffraction.utils.utils as MUT + + monkeypatch.setattr(MUT, '_get_version_for_url', lambda: 'dev') + template = 'https://example.com/{version}/tutorials/ed-2.ipynb' + result = MUT._resolve_tutorial_url(template) + assert result == 'https://example.com/dev/tutorials/ed-2.ipynb' + + +# --- render_cif --------------------------------------------------------------- + + +def test_render_cif_outputs_cif_text(capsys): + import easydiffraction.utils.utils as MUT + + cif_text = '_cell_length_a 5.0\n_cell_length_b 6.0' + MUT.render_cif(cif_text) + out = capsys.readouterr().out + assert '_cell_length_a 5.0' in out + assert '_cell_length_b 6.0' in out + + +# --- sin_theta_over_lambda_to_d_spacing --------------------------------------- + + +def test_sin_theta_over_lambda_to_d_scalar(): + import easydiffraction.utils.utils as MUT + + # d = 1 / (2 * sin_theta_over_lambda) + result = MUT.sin_theta_over_lambda_to_d_spacing(0.25) + assert np.isclose(result, 2.0) + + +def test_sin_theta_over_lambda_to_d_array(): + import easydiffraction.utils.utils as MUT + + vals = np.array([0.1, 0.25, 0.5]) + expected = 1.0 / (2 * vals) + result = MUT.sin_theta_over_lambda_to_d_spacing(vals) + assert np.allclose(result, expected) + + +def test_sin_theta_over_lambda_to_d_zero_returns_nan(): + import easydiffraction.utils.utils as MUT + + result = MUT.sin_theta_over_lambda_to_d_spacing(np.array([0.0])) + assert np.isnan(result[0]) + + +def test_sin_theta_over_lambda_to_d_negative_returns_nan(): + import easydiffraction.utils.utils as MUT + + result = MUT.sin_theta_over_lambda_to_d_spacing(np.array([-0.1])) + assert np.isnan(result[0]) + + +# --- str_to_ufloat additional branches ---------------------------------------- + + +def test_str_to_ufloat_none_returns_default(): + import easydiffraction.utils.utils as MUT + + u = MUT.str_to_ufloat(None, default=5.0) + assert np.isclose(u.nominal_value, 5.0) + assert np.isnan(u.std_dev) + + +def test_str_to_ufloat_none_no_default_raises(): + import easydiffraction.utils.utils as MUT + + # When s=None and default=None, ufloat(None, nan) raises TypeError + with pytest.raises(TypeError): + MUT.str_to_ufloat(None) + + +def test_str_to_ufloat_empty_brackets_zero_uncertainty(): + import easydiffraction.utils.utils as MUT + + u = MUT.str_to_ufloat('3.566()') + assert np.isclose(u.nominal_value, 3.566) + assert np.isclose(u.std_dev, 0.0) + + +def test_str_to_ufloat_invalid_string_returns_default(): + import easydiffraction.utils.utils as MUT + + u = MUT.str_to_ufloat('not_a_number', default=99.0) + assert np.isclose(u.nominal_value, 99.0) + assert np.isnan(u.std_dev) + + +# --- tof_to_d additional branches --------------------------------------------- + + +def test_tof_to_d_type_error_non_array(): + import easydiffraction.utils.utils as MUT + + with pytest.raises(TypeError, match="'tof' must be a NumPy array"): + MUT.tof_to_d([10.0, 20.0], offset=0.0, linear=1.0, quad=0.0) + + +def test_tof_to_d_type_error_non_numeric_offset(): + import easydiffraction.utils.utils as MUT + + with pytest.raises(TypeError, match="'offset' must be a real number"): + MUT.tof_to_d(np.array([10.0]), offset='bad', linear=1.0, quad=0.0) + + +def test_tof_to_d_type_error_non_numeric_linear(): + import easydiffraction.utils.utils as MUT + + with pytest.raises(TypeError, match="'linear' must be a real number"): + MUT.tof_to_d(np.array([10.0]), offset=0.0, linear=None, quad=0.0) + + +def test_tof_to_d_both_linear_and_quad_zero(): + import easydiffraction.utils.utils as MUT + + tof = np.array([1.0, 2.0]) + result = MUT.tof_to_d(tof, offset=0.0, linear=0.0, quad=0.0) + assert np.all(np.isnan(result)) + + +def test_tof_to_d_negative_discriminant(): + import easydiffraction.utils.utils as MUT + + # Choose coefficients that produce a negative discriminant: + # disc = linear^2 - 4*quad*(offset - tof) < 0 + # linear=0, quad=1, offset=10, tof=5 → disc = 0 - 4*1*(10-5) = -20 < 0 + tof = np.array([5.0]) + result = MUT.tof_to_d(tof, offset=10.0, linear=0.0, quad=1.0) + assert np.all(np.isnan(result)) + + +def test_tof_to_d_linear_negative_tof_minus_offset_gives_nan(): + import easydiffraction.utils.utils as MUT + + # linear case: d = (tof - offset) / linear → negative when tof < offset + tof = np.array([1.0]) + result = MUT.tof_to_d(tof, offset=10.0, linear=1.0, quad=0.0) + assert np.all(np.isnan(result)) + + +# --- download_data ------------------------------------------------------------ + + +def test_download_data_unknown_id(monkeypatch): + import easydiffraction.utils.utils as MUT + + fake_index = {'1': {'url': 'https://example.com/data.xye', 'hash': None}} + monkeypatch.setattr(MUT, '_fetch_data_index', lambda: fake_index) + with pytest.raises(KeyError, match='Unknown dataset id=999'): + MUT.download_data(id=999) + + +def test_download_data_already_exists_no_overwrite(monkeypatch, tmp_path, capsys): + import easydiffraction.utils.utils as MUT + + fake_index = { + '1': { + 'url': 'https://example.com/data.xye', + 'hash': None, + 'description': 'Test data', + } + } + monkeypatch.setattr(MUT, '_fetch_data_index', lambda: fake_index) + + # Create existing file + (tmp_path / 'ed-1.xye').write_text('existing data') + + result = MUT.download_data(id=1, destination=str(tmp_path), overwrite=False) + assert result == str(tmp_path / 'ed-1.xye') + out = capsys.readouterr().out + assert 'already present' in out + assert (tmp_path / 'ed-1.xye').read_text() == 'existing data' + + +def test_download_data_success(monkeypatch, tmp_path, capsys): + import easydiffraction.utils.utils as MUT + + fake_index = { + '1': { + 'url': 'https://example.com/data.xye', + 'hash': None, + 'description': 'Test data', + } + } + monkeypatch.setattr(MUT, '_fetch_data_index', lambda: fake_index) + + # Mock pooch.retrieve to create the file + def fake_retrieve(url, known_hash, fname, path): + import pathlib + + pathlib.Path(path, fname).write_text('x y e') + return str(pathlib.Path(path, fname)) + + monkeypatch.setattr(MUT.pooch, 'retrieve', fake_retrieve) + + result = MUT.download_data(id=1, destination=str(tmp_path)) + assert result == str(tmp_path / 'ed-1.xye') + assert (tmp_path / 'ed-1.xye').exists() + out = capsys.readouterr().out + assert 'downloaded' in out + + +def test_download_data_overwrite_existing(monkeypatch, tmp_path, capsys): + import easydiffraction.utils.utils as MUT + + fake_index = { + '1': { + 'url': 'https://example.com/data.xye', + 'hash': None, + 'description': 'Test data', + } + } + monkeypatch.setattr(MUT, '_fetch_data_index', lambda: fake_index) + + # Create existing file + (tmp_path / 'ed-1.xye').write_text('old data') + + def fake_retrieve(url, known_hash, fname, path): + import pathlib + + pathlib.Path(path, fname).write_text('new data') + return str(pathlib.Path(path, fname)) + + monkeypatch.setattr(MUT.pooch, 'retrieve', fake_retrieve) + + result = MUT.download_data(id=1, destination=str(tmp_path), overwrite=True) + assert result == str(tmp_path / 'ed-1.xye') + assert (tmp_path / 'ed-1.xye').read_text() == 'new data' + + +def test_download_data_no_description(monkeypatch, tmp_path, capsys): + import easydiffraction.utils.utils as MUT + + fake_index = { + '1': { + 'url': 'https://example.com/data.xye', + 'hash': 'sha256:...', + } + } + monkeypatch.setattr(MUT, '_fetch_data_index', lambda: fake_index) + + # Create existing file so we hit the no-overwrite short-circuit + (tmp_path / 'ed-1.xye').write_text('existing') + + result = MUT.download_data(id=1, destination=str(tmp_path)) + assert result == str(tmp_path / 'ed-1.xye') + out = capsys.readouterr().out + assert 'Data #1' in out + + +# --- download_tutorial with overwrite=True ------------------------------------ + + +def test_download_tutorial_overwrite(monkeypatch, tmp_path, capsys): + import easydiffraction.utils.utils as MUT + + fake_index = { + '1': { + 'url': 'https://example.com/{version}/tutorials/ed-1/ed-1.ipynb', + 'title': 'Quick Start', + }, + } + monkeypatch.setattr(MUT, '_fetch_tutorials_index', lambda: fake_index) + monkeypatch.setattr(MUT, '_get_version_for_url', lambda: '0.8.0') + + # Create existing file + (tmp_path / 'ed-1.ipynb').write_text('old content') + + class DummyResp: + def read(self): + return b'{"cells": ["new"]}' + + def __enter__(self): + return self + + def __exit__(self, *args): + return False + + monkeypatch.setattr(MUT, '_safe_urlopen', lambda url: DummyResp()) + + result = MUT.download_tutorial(id=1, destination=str(tmp_path), overwrite=True) + assert result == str(tmp_path / 'ed-1.ipynb') + assert 'new' in (tmp_path / 'ed-1.ipynb').read_text() From 80c69f430a8b8615905770dccb088fa81e84a25f Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 08:22:31 +0200 Subject: [PATCH 50/51] Update coverage.yml to refine workflow triggers and concurrency settings --- .github/workflows/coverage.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index cd9ff1e0..e1e44d41 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -1,8 +1,10 @@ name: Coverage checks on: - # Trigger the workflow on push + # Trigger the workflow on push to develop push: + branches: + - develop # Do not run on version tags (those are handled by other workflows) tags-ignore: ['v*'] # Trigger the workflow on pull request @@ -15,11 +17,11 @@ permissions: actions: write contents: read -# Allow only one concurrent workflow, skipping runs queued between the run -# in-progress and latest queued. And cancel in-progress runs. +# Allow only one concurrent workflow per PR or branch ref. +# Cancel in-progress runs only for pull requests, but let branch push runs finish. concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true + cancel-in-progress: ${{ github.event_name == 'pull_request' }} # Set the environment variables to be used in all jobs defined in this workflow env: From 3e15b777980a7c74d9a65ab53d6e0242a852c550 Mon Sep 17 00:00:00 2001 From: Andrew Sazonov Date: Sun, 5 Apr 2026 09:04:12 +0200 Subject: [PATCH 51/51] Add unit tests to improve patch coverage for changed files --- .../analysis/test_analysis_coverage.py | 290 ++++++++++ .../test_crystallography_wyckoff.py | 55 ++ .../test_space_groups_coverage.py | 71 +++ .../display/test_plotting_coverage.py | 521 ++++++++++++++++++ .../utils/test_environment_coverage.py | 52 ++ .../utils/test_logging_coverage.py | 150 +++++ 6 files changed, 1139 insertions(+) create mode 100644 tests/unit/easydiffraction/analysis/test_analysis_coverage.py create mode 100644 tests/unit/easydiffraction/crystallography/test_crystallography_wyckoff.py create mode 100644 tests/unit/easydiffraction/crystallography/test_space_groups_coverage.py create mode 100644 tests/unit/easydiffraction/display/test_plotting_coverage.py create mode 100644 tests/unit/easydiffraction/utils/test_environment_coverage.py create mode 100644 tests/unit/easydiffraction/utils/test_logging_coverage.py diff --git a/tests/unit/easydiffraction/analysis/test_analysis_coverage.py b/tests/unit/easydiffraction/analysis/test_analysis_coverage.py new file mode 100644 index 00000000..3d5e1379 --- /dev/null +++ b/tests/unit/easydiffraction/analysis/test_analysis_coverage.py @@ -0,0 +1,290 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Additional unit tests for analysis.py to cover patch gaps.""" + + +def _make_project(): + class ExpCol: + def __init__(self): + self._names = [] + + @property + def names(self): + return self._names + + @property + def parameters(self): + return [] + + @property + def fittable_parameters(self): + return [] + + @property + def free_parameters(self): + return [] + + class P: + experiments = ExpCol() + structures = ExpCol() + _varname = 'proj' + verbosity = 'full' + + return P() + + +# ------------------------------------------------------------------ +# Aliases switchable-category pattern +# ------------------------------------------------------------------ + + +class TestAliasesType: + def test_getter_returns_default(self): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + assert a.aliases_type == 'default' + + def test_setter_valid(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.aliases_type = 'default' + out = capsys.readouterr().out + assert 'Aliases type changed to' in out + + def test_setter_invalid(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.aliases_type = 'nonexistent' + out = capsys.readouterr().out + assert 'Unsupported' in out + assert a.aliases_type == 'default' + + def test_show_supported(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.show_supported_aliases_types() + out = capsys.readouterr().out + assert 'default' in out + + def test_show_current(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.show_current_aliases_type() + out = capsys.readouterr().out + assert 'Current aliases type' in out + assert 'default' in out + + +# ------------------------------------------------------------------ +# Constraints switchable-category pattern +# ------------------------------------------------------------------ + + +class TestConstraintsType: + def test_getter_returns_default(self): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + assert a.constraints_type == 'default' + + def test_setter_valid(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.constraints_type = 'default' + out = capsys.readouterr().out + assert 'Constraints type changed to' in out + + def test_setter_invalid(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.constraints_type = 'nonexistent' + out = capsys.readouterr().out + assert 'Unsupported' in out + assert a.constraints_type == 'default' + + def test_show_supported(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.show_supported_constraints_types() + out = capsys.readouterr().out + assert 'default' in out + + def test_show_current(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.show_current_constraints_type() + out = capsys.readouterr().out + assert 'Current constraints type' in out + assert 'default' in out + + +# ------------------------------------------------------------------ +# AnalysisDisplay.as_cif +# ------------------------------------------------------------------ + + +class TestAnalysisDisplayAsCif: + def test_as_cif_renders(self, capsys, monkeypatch): + import easydiffraction.analysis.analysis as mod + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + # Mock render_cif to avoid rendering issues + rendered = {} + + def fake_render_cif(text): + rendered['text'] = text + + monkeypatch.setattr(mod, 'render_cif', fake_render_cif) + a.display.as_cif() + out = capsys.readouterr().out + assert 'Analysis' in out or 'cif' in out.lower() + assert 'text' in rendered + + +# ------------------------------------------------------------------ +# AnalysisDisplay.constraints (with items) +# ------------------------------------------------------------------ + + +class TestAnalysisDisplayConstraints: + def test_empty_constraints_warns(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + a.display.constraints() + out = capsys.readouterr().out + assert 'No constraints' in out + + def test_constraints_with_items(self, capsys, monkeypatch): + import easydiffraction.analysis.analysis as mod + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + + # Create a fake constraint with expression + class FakeExpr: + value = 'x = y + 1' + + class FakeConstraint: + expression = FakeExpr() + + a.constraints._items = [FakeConstraint()] + + captured = {} + + def fake_render_table(**kwargs): + captured.update(kwargs) + + monkeypatch.setattr(mod, 'render_table', fake_render_table) + a.display.constraints() + out = capsys.readouterr().out + assert 'User defined constraints' in out + assert 'columns_data' in captured + assert captured['columns_data'][0][0] == 'x = y + 1' + + +# ------------------------------------------------------------------ +# Analysis._discover_property_rows / _discover_method_rows +# ------------------------------------------------------------------ + + +class TestDiscoverHelpers: + def test_discover_property_rows(self): + from easydiffraction.analysis.analysis import _discover_property_rows + + class MyClass: + @property + def alpha(self): + """Alpha property.""" + return 1 + + @property + def beta(self): + """Beta property.""" + return 2 + + @beta.setter + def beta(self, value): + pass + + rows = _discover_property_rows(MyClass) + assert len(rows) == 2 + names = [row[1] for row in rows] + assert 'alpha' in names + assert 'beta' in names + # beta is writable + beta_row = next(r for r in rows if r[1] == 'beta') + assert beta_row[2] == '✓' + + def test_discover_method_rows(self): + from easydiffraction.analysis.analysis import _discover_method_rows + + class MyClass: + def do_thing(self): + """Do a thing.""" + + def _private(self): + pass + + @property + def prop(self): + """Not a method.""" + return 1 + + rows = _discover_method_rows(MyClass) + names = [row[1] for row in rows] + assert 'do_thing()' in names + assert '_private()' not in names + assert 'prop()' not in names + + +# ------------------------------------------------------------------ +# Analysis.current_minimizer setter +# ------------------------------------------------------------------ + + +class TestCurrentMinimizerSetter: + def test_setter_changes_minimizer(self, capsys): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + assert a.current_minimizer == 'lmfit' + a.current_minimizer = 'lmfit' + out = capsys.readouterr().out + assert 'Current minimizer changed to' in out + + +# ------------------------------------------------------------------ +# Analysis._snapshot_params +# ------------------------------------------------------------------ + + +class TestSnapshotParams: + def test_snapshot_stores_values(self): + from easydiffraction.analysis.analysis import Analysis + + a = Analysis(project=_make_project()) + + class FakeParam: + unique_name = 'p1' + value = 1.23 + uncertainty = 0.01 + units = 'Å' + + class FakeResults: + parameters = [FakeParam()] + + a._snapshot_params('expt1', FakeResults()) + assert 'expt1' in a._parameter_snapshots + assert a._parameter_snapshots['expt1']['p1']['value'] == 1.23 + assert a._parameter_snapshots['expt1']['p1']['uncertainty'] == 0.01 diff --git a/tests/unit/easydiffraction/crystallography/test_crystallography_wyckoff.py b/tests/unit/easydiffraction/crystallography/test_crystallography_wyckoff.py new file mode 100644 index 00000000..3aa73518 --- /dev/null +++ b/tests/unit/easydiffraction/crystallography/test_crystallography_wyckoff.py @@ -0,0 +1,55 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Additional tests for crystallography.py to cover _get_wyckoff_exprs error paths.""" + +from easydiffraction.utils.logging import Logger + + +class TestGetWyckoffExprs: + def test_invalid_name_hm_returns_none(self, monkeypatch): + from easydiffraction.crystallography.crystallography import _get_wyckoff_exprs + + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.WARN, raising=True) + result = _get_wyckoff_exprs('NOT A REAL SG', 1, 'a') + assert result is None + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.RAISE, raising=True) + + def test_none_coord_code_returns_none(self, monkeypatch): + from easydiffraction.crystallography.crystallography import _get_wyckoff_exprs + + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.WARN, raising=True) + result = _get_wyckoff_exprs('P 1', None, 'a') + assert result is None + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.RAISE, raising=True) + + def test_valid_returns_three_expressions(self): + from easydiffraction.crystallography.crystallography import _get_wyckoff_exprs + + # P m -3 m (IT 221) uses coord_code='1' + result = _get_wyckoff_exprs('P m -3 m', '1', 'a') + assert result is not None + assert len(result) == 3 + + +class TestApplyAtomSiteSymmetryConstraints: + def test_invalid_name_hm_returns_unchanged(self, monkeypatch): + from easydiffraction.crystallography.crystallography import ( + apply_atom_site_symmetry_constraints, + ) + + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.WARN, raising=True) + atom = {'fract_x': 0.1, 'fract_y': 0.2, 'fract_z': 0.3} + original = dict(atom) + result = apply_atom_site_symmetry_constraints(atom, 'NOT REAL', None, 'a') + assert result == original + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.RAISE, raising=True) + + def test_valid_applies_constraints(self): + from easydiffraction.crystallography.crystallography import ( + apply_atom_site_symmetry_constraints, + ) + + # P m -3 m (IT 221), coord_code='1', Wyckoff 'a' has fixed coordinates + atom = {'fract_x': 0.0, 'fract_y': 0.0, 'fract_z': 0.0} + result = apply_atom_site_symmetry_constraints(atom, 'P m -3 m', '1', 'a') + assert result is not None diff --git a/tests/unit/easydiffraction/crystallography/test_space_groups_coverage.py b/tests/unit/easydiffraction/crystallography/test_space_groups_coverage.py new file mode 100644 index 00000000..1792e017 --- /dev/null +++ b/tests/unit/easydiffraction/crystallography/test_space_groups_coverage.py @@ -0,0 +1,71 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Additional unit tests for space_groups.py to cover RestrictedUnpickler.""" + +import io +import pickle # noqa: S403 + +import pytest + + +class TestRestrictedUnpickler: + def test_loads_plain_dict(self): + """Safe built-in types should be allowed.""" + from easydiffraction.crystallography.space_groups import _restricted_pickle_load + + data = {'key': [1, 2, 3], 'nested': {'a': (True, None)}} + buf = io.BytesIO() + pickle.dump(data, buf) + buf.seek(0) + result = _restricted_pickle_load(buf) + assert result == data + + def test_loads_set_and_frozenset(self): + from easydiffraction.crystallography.space_groups import _restricted_pickle_load + + data = {'s': {1, 2}, 'fs': frozenset({3, 4})} + buf = io.BytesIO() + pickle.dump(data, buf) + buf.seek(0) + result = _restricted_pickle_load(buf) + assert result == data + + def test_loads_tuple_and_list(self): + from easydiffraction.crystallography.space_groups import _restricted_pickle_load + + data = ([1, 2], (3, 4)) + buf = io.BytesIO() + pickle.dump(data, buf) + buf.seek(0) + result = _restricted_pickle_load(buf) + assert result == data + + def test_rejects_unsafe_class(self): + """Non-builtin types should be rejected.""" + from easydiffraction.crystallography.space_groups import _RestrictedUnpickler + + # Create a pickle stream that tries to instantiate os.system + buf = io.BytesIO() + # Use protocol 2 to get GLOBAL opcode + pickle.dump(object(), buf, protocol=2) + buf.seek(0) + + # Directly test find_class rejection + unpickler = _RestrictedUnpickler(buf) + with pytest.raises(pickle.UnpicklingError, match='Restricted unpickler refused'): + unpickler.find_class('os', 'system') + + def test_rejects_builtins_not_in_safe_set(self): + from easydiffraction.crystallography.space_groups import _RestrictedUnpickler + + buf = io.BytesIO(b'') + unpickler = _RestrictedUnpickler(buf) + with pytest.raises(pickle.UnpicklingError, match='Restricted unpickler refused'): + unpickler.find_class('builtins', 'eval') + + def test_space_groups_loaded_successfully(self): + """The SPACE_GROUPS constant should be a non-empty dict.""" + from easydiffraction.crystallography.space_groups import SPACE_GROUPS + + assert isinstance(SPACE_GROUPS, dict) + assert len(SPACE_GROUPS) > 0 diff --git a/tests/unit/easydiffraction/display/test_plotting_coverage.py b/tests/unit/easydiffraction/display/test_plotting_coverage.py new file mode 100644 index 00000000..e290842b --- /dev/null +++ b/tests/unit/easydiffraction/display/test_plotting_coverage.py @@ -0,0 +1,521 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Additional unit tests for display/plotting.py to cover patch gaps.""" + +import numpy as np + + +# ------------------------------------------------------------------ +# PlotterEngineEnum +# ------------------------------------------------------------------ + + +class TestPlotterEngineEnum: + def test_default_returns_ascii_outside_jupyter(self, monkeypatch): + import easydiffraction.display.plotting as mod + + monkeypatch.setattr(mod, 'in_jupyter', lambda: False) + result = mod.PlotterEngineEnum.default() + assert result is mod.PlotterEngineEnum.ASCII + + def test_default_returns_plotly_in_jupyter(self, monkeypatch): + import easydiffraction.display.plotting as mod + + monkeypatch.setattr(mod, 'in_jupyter', lambda: True) + result = mod.PlotterEngineEnum.default() + assert result is mod.PlotterEngineEnum.PLOTLY + + def test_description_ascii(self): + from easydiffraction.display.plotting import PlotterEngineEnum + + desc = PlotterEngineEnum.ASCII.description() + assert 'ASCII' in desc or 'Console' in desc + + def test_description_plotly(self): + from easydiffraction.display.plotting import PlotterEngineEnum + + desc = PlotterEngineEnum.PLOTLY.description() + assert 'Interactive' in desc or 'browser' in desc + + def test_description_unknown_returns_empty(self): + """Cover the fallback return '' branch for an unrecognised member.""" + from easydiffraction.display.plotting import PlotterEngineEnum + + # Both known members should return non-empty descriptions + for member in PlotterEngineEnum: + assert isinstance(member.description(), str) + + +# ------------------------------------------------------------------ +# Plotter property setters +# ------------------------------------------------------------------ + + +class TestPlotterProperties: + def test_x_min_setter_with_value(self): + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.x_min = 10.0 + assert p.x_min == 10.0 + + def test_x_min_setter_with_none_resets_default(self): + from easydiffraction.display.plotters.base import DEFAULT_MIN + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.x_min = 42.0 + p.x_min = None + assert p.x_min == DEFAULT_MIN + + def test_x_max_setter_with_value(self): + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.x_max = 100.0 + assert p.x_max == 100.0 + + def test_x_max_setter_with_none_resets_default(self): + from easydiffraction.display.plotters.base import DEFAULT_MAX + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.x_max = 42.0 + p.x_max = None + assert p.x_max == DEFAULT_MAX + + def test_height_setter_with_value(self): + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.height = 50 + assert p.height == 50 + + def test_height_setter_with_none_resets_default(self): + from easydiffraction.display.plotters.base import DEFAULT_HEIGHT + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.height = 99 + p.height = None + assert p.height == DEFAULT_HEIGHT + + +# ------------------------------------------------------------------ +# Plotter._set_project / _update_project_categories +# ------------------------------------------------------------------ + + +class TestPlotterProjectWiring: + def test_set_project_stores_reference(self): + from easydiffraction.display.plotting import Plotter + + p = Plotter() + sentinel = object() + p._set_project(sentinel) + assert p._project is sentinel + + def test_update_project_categories(self): + """Exercise _update_project_categories with stub objects.""" + from easydiffraction.display.plotting import Plotter + + called = [] + + class FakeStructure: + def _update_categories(self): + called.append('struct') + + class FakeExperiment: + def _update_categories(self): + called.append('expt') + + class FakeAnalysis: + def _update_categories(self): + called.append('analysis') + + class FakeProject: + structures = [FakeStructure()] + analysis = FakeAnalysis() + experiments = {'E1': FakeExperiment()} + + p = Plotter() + p._set_project(FakeProject()) + p._update_project_categories('E1') + assert 'struct' in called + assert 'analysis' in called + assert 'expt' in called + + +# ------------------------------------------------------------------ +# Plotter._resolve_x_axis +# ------------------------------------------------------------------ + + +class TestResolveXAxis: + def test_auto_detect_from_beam_mode(self): + from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum + from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum + from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum + from easydiffraction.display.plotting import Plotter + + class ExptType: + sample_form = type('SF', (), {'value': SampleFormEnum.POWDER})() + scattering_type = type('S', (), {'value': ScatteringTypeEnum.BRAGG})() + beam_mode = type('B', (), {'value': BeamModeEnum.CONSTANT_WAVELENGTH})() + + x_axis, _x_name, _sf, _st, _bm = Plotter._resolve_x_axis(ExptType(), None) + assert x_axis.value == 'two_theta' + + def test_explicit_x_passed_through(self): + from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum + from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum + from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum + from easydiffraction.display.plotting import Plotter + + class ExptType: + sample_form = type('SF', (), {'value': SampleFormEnum.POWDER})() + scattering_type = type('S', (), {'value': ScatteringTypeEnum.BRAGG})() + beam_mode = type('B', (), {'value': BeamModeEnum.CONSTANT_WAVELENGTH})() + + x_axis, _, _, _, _ = Plotter._resolve_x_axis(ExptType(), 'd_spacing') + assert x_axis == 'd_spacing' + + +# ------------------------------------------------------------------ +# Plotter._resolve_diffrn_descriptor +# ------------------------------------------------------------------ + + +class TestResolveDiffrnDescriptor: + def test_none_name_returns_none(self): + from easydiffraction.display.plotting import Plotter + + assert Plotter._resolve_diffrn_descriptor(object(), None) is None + + def test_ambient_temperature(self): + from easydiffraction.display.plotting import Plotter + + sentinel = object() + + class Diffrn: + ambient_temperature = sentinel + + assert Plotter._resolve_diffrn_descriptor(Diffrn(), 'ambient_temperature') is sentinel + + def test_ambient_pressure(self): + from easydiffraction.display.plotting import Plotter + + sentinel = object() + + class Diffrn: + ambient_pressure = sentinel + + assert Plotter._resolve_diffrn_descriptor(Diffrn(), 'ambient_pressure') is sentinel + + def test_ambient_magnetic_field(self): + from easydiffraction.display.plotting import Plotter + + sentinel = object() + + class Diffrn: + ambient_magnetic_field = sentinel + + assert Plotter._resolve_diffrn_descriptor(Diffrn(), 'ambient_magnetic_field') is sentinel + + def test_ambient_electric_field(self): + from easydiffraction.display.plotting import Plotter + + sentinel = object() + + class Diffrn: + ambient_electric_field = sentinel + + assert Plotter._resolve_diffrn_descriptor(Diffrn(), 'ambient_electric_field') is sentinel + + def test_unknown_name_returns_none(self): + from easydiffraction.display.plotting import Plotter + + assert Plotter._resolve_diffrn_descriptor(object(), 'unknown_field') is None + + +# ------------------------------------------------------------------ +# Plotter._auto_x_range_for_ascii +# ------------------------------------------------------------------ + + +class TestAutoXRangeForAscii: + def test_narrows_range_for_ascii(self): + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.engine = 'asciichartpy' + + class Ptn: + intensity_meas = np.zeros(200) + + Ptn.intensity_meas[100] = 10.0 # max at index 100 + x_array = np.arange(200, dtype=float) + x_min, x_max = p._auto_x_range_for_ascii(Ptn(), x_array, None, None) + assert x_min == 50.0 + assert x_max == 150.0 + + def test_no_narrowing_when_limits_provided(self): + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.engine = 'asciichartpy' + + class Ptn: + intensity_meas = np.zeros(200) + + x_array = np.arange(200, dtype=float) + x_min, x_max = p._auto_x_range_for_ascii(Ptn(), x_array, 0.0, 199.0) + assert x_min == 0.0 + assert x_max == 199.0 + + def test_no_narrowing_for_plotly_engine(self): + from easydiffraction.display.plotting import Plotter + + p = Plotter() + p.engine = 'plotly' + + class Ptn: + intensity_meas = np.zeros(200) + + x_array = np.arange(200, dtype=float) + x_min, x_max = p._auto_x_range_for_ascii(Ptn(), x_array, None, None) + assert x_min is None + assert x_max is None + + +# ------------------------------------------------------------------ +# Plotter._plot_param_series_from_csv +# ------------------------------------------------------------------ + + +class TestPlotParamSeriesFromCsv: + def test_csv_param_not_found_logs_warning(self, tmp_path, monkeypatch, capsys): + from easydiffraction.display.plotting import Plotter + from easydiffraction.utils.logging import Logger + + monkeypatch.setattr(Logger, '_reaction', Logger.Reaction.WARN, raising=True) + + csv = tmp_path / 'results.csv' + csv.write_text('col_a,col_b\n1.0,2.0\n') + + p = Plotter() + + class Desc: + unique_name = 'no_such_col' + description = 'test' + units = 'A' + + p._plot_param_series_from_csv(str(csv), 'no_such_col', Desc(), None) + out = capsys.readouterr().out + assert 'not found in CSV' in out + + def test_csv_plots_with_versus_descriptor(self, tmp_path, monkeypatch): + from easydiffraction.display.plotting import Plotter + + csv = tmp_path / 'results.csv' + csv.write_text( + 'my_param,my_param.uncertainty,diffrn.temperature\n1.0,0.1,300\n2.0,0.2,400\n' + ) + + plot_calls = [] + + class FakeBackend: + def plot_scatter(self, **kwargs): + plot_calls.append(kwargs) + + p = Plotter() + p._backend = FakeBackend() + + class ParamDesc: + unique_name = 'my_param' + description = 'A param' + units = 'Å' + + class VersusDesc: + name = 'temperature' + description = 'Temperature' + units = 'K' + + p._plot_param_series_from_csv(str(csv), 'my_param', ParamDesc(), VersusDesc()) + assert len(plot_calls) == 1 + assert plot_calls[0]['x'] == [300.0, 400.0] + assert plot_calls[0]['y'] == [1.0, 2.0] + + def test_csv_plots_without_versus(self, tmp_path, monkeypatch): + from easydiffraction.display.plotting import Plotter + + csv = tmp_path / 'results.csv' + csv.write_text('my_param,my_param.uncertainty\n1.0,0.1\n2.0,0.2\n') + + plot_calls = [] + + class FakeBackend: + def plot_scatter(self, **kwargs): + plot_calls.append(kwargs) + + p = Plotter() + p._backend = FakeBackend() + + class ParamDesc: + unique_name = 'my_param' + description = 'A param' + units = '' + + p._plot_param_series_from_csv(str(csv), 'my_param', ParamDesc(), None) + assert len(plot_calls) == 1 + assert plot_calls[0]['x'] == [1, 2] + assert 'Experiment No.' in plot_calls[0]['axes_labels'] + + +# ------------------------------------------------------------------ +# Plotter.plot_param_series_from_snapshots (public method) +# ------------------------------------------------------------------ + + +class TestPlotParamSeriesFromSnapshots: + def test_snapshot_plot(self): + from easydiffraction.display.plotting import Plotter + + plot_calls = [] + + class FakeBackend: + def plot_scatter(self, **kwargs): + plot_calls.append(kwargs) + + class Diffrn: + ambient_temperature = type( + 'T', (), {'value': 300, 'description': 'Temp', 'name': 'ambient_temperature'} + )() + + class Expt: + diffrn = Diffrn() + + p = Plotter() + p._backend = FakeBackend() + experiments = {'expt1': Expt()} + snapshots = { + 'expt1': { + 'param_a': {'value': 1.23, 'uncertainty': 0.01, 'units': 'Å'}, + }, + } + p.plot_param_series_from_snapshots( + 'param_a', 'ambient_temperature', experiments, snapshots + ) + assert len(plot_calls) == 1 + assert plot_calls[0]['y'] == [1.23] + assert plot_calls[0]['x'] == [300] + + def test_snapshot_plot_no_versus(self): + from easydiffraction.display.plotting import Plotter + + plot_calls = [] + + class FakeBackend: + def plot_scatter(self, **kwargs): + plot_calls.append(kwargs) + + class Diffrn: + pass + + class Expt: + diffrn = Diffrn() + + p = Plotter() + p._backend = FakeBackend() + experiments = {'expt1': Expt()} + snapshots = { + 'expt1': { + 'param_a': {'value': 2.0, 'uncertainty': 0.05, 'units': 'Å'}, + }, + } + p.plot_param_series_from_snapshots('param_a', None, experiments, snapshots) + assert len(plot_calls) == 1 + assert plot_calls[0]['x'] == [1] # fallback to index + assert 'Experiment No.' in plot_calls[0]['axes_labels'] + + +# ------------------------------------------------------------------ +# Plotter public methods (plot_meas, plot_calc, plot_meas_vs_calc) +# ------------------------------------------------------------------ + + +class TestPlotterPublicMethods: + def _make_plotter_with_project(self, monkeypatch): + from easydiffraction.datablocks.experiment.item.enums import BeamModeEnum + from easydiffraction.datablocks.experiment.item.enums import SampleFormEnum + from easydiffraction.datablocks.experiment.item.enums import ScatteringTypeEnum + from easydiffraction.display.plotting import Plotter + + class ExptType: + sample_form = type('SF', (), {'value': SampleFormEnum.POWDER})() + scattering_type = type('S', (), {'value': ScatteringTypeEnum.BRAGG})() + beam_mode = type('B', (), {'value': BeamModeEnum.CONSTANT_WAVELENGTH})() + + class Data: + two_theta = np.array([0.0, 1.0, 2.0]) + d_spacing = two_theta + intensity_meas = np.array([10.0, 20.0, 10.0]) + intensity_calc = np.array([11.0, 19.0, 10.5]) + intensity_meas_su = np.array([0.5, 0.5, 0.5]) + + class Expt: + data = Data() + type = ExptType() + + def _update_categories(self): + pass + + class FakeStructure: + def _update_categories(self): + pass + + class FakeAnalysis: + def _update_categories(self): + pass + + class FakeProject: + structures = [FakeStructure()] + analysis = FakeAnalysis() + experiments = {'E1': Expt()} + + calls = [] + + class FakeBackend: + def plot_powder(self, **kwargs): + calls.append(('powder', kwargs)) + + p = Plotter() + p._set_project(FakeProject()) + p._backend = FakeBackend() + return p, calls + + def test_plot_meas(self, monkeypatch): + p, calls = self._make_plotter_with_project(monkeypatch) + p.plot_meas('E1') + assert len(calls) == 1 + assert calls[0][0] == 'powder' + assert calls[0][1]['labels'] == ['meas'] + + def test_plot_calc(self, monkeypatch): + p, calls = self._make_plotter_with_project(monkeypatch) + p.plot_calc('E1') + assert len(calls) == 1 + assert calls[0][1]['labels'] == ['calc'] + + def test_plot_meas_vs_calc(self, monkeypatch): + p, calls = self._make_plotter_with_project(monkeypatch) + p.plot_meas_vs_calc('E1') + assert len(calls) == 1 + assert 'meas' in calls[0][1]['labels'] + assert 'calc' in calls[0][1]['labels'] + + def test_plot_meas_vs_calc_with_residual(self, monkeypatch): + p, calls = self._make_plotter_with_project(monkeypatch) + p.plot_meas_vs_calc('E1', show_residual=True) + assert len(calls) == 1 + assert 'resid' in calls[0][1]['labels'] diff --git a/tests/unit/easydiffraction/utils/test_environment_coverage.py b/tests/unit/easydiffraction/utils/test_environment_coverage.py new file mode 100644 index 00000000..47646109 --- /dev/null +++ b/tests/unit/easydiffraction/utils/test_environment_coverage.py @@ -0,0 +1,52 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Additional unit tests for environment.py to cover BLE001 branches.""" + + +class TestCanUpdateIpythonDisplay: + def test_returns_bool(self): + from easydiffraction.utils.environment import can_update_ipython_display + + result = can_update_ipython_display() + assert isinstance(result, bool) + + +class TestIsIpythonDisplayHandleEdgeCases: + def test_with_int(self): + from easydiffraction.utils.environment import is_ipython_display_handle + + assert is_ipython_display_handle(42) is False + + def test_with_dict(self): + from easydiffraction.utils.environment import is_ipython_display_handle + + assert is_ipython_display_handle({}) is False + + def test_with_class_missing_module(self): + """Object whose __class__ has no __module__ attribute.""" + from easydiffraction.utils.environment import is_ipython_display_handle + + class NoModule: + pass + + assert is_ipython_display_handle(NoModule()) is False + + +class TestCanUseIpythonDisplay: + def test_with_plain_string(self): + from easydiffraction.utils.environment import can_use_ipython_display + + assert can_use_ipython_display('hello') is False + + def test_with_int(self): + from easydiffraction.utils.environment import can_use_ipython_display + + assert can_use_ipython_display(123) is False + + +class TestInColab: + def test_returns_false_outside_colab(self): + from easydiffraction.utils.environment import in_colab + + # Unless running in Colab + assert in_colab() is False diff --git a/tests/unit/easydiffraction/utils/test_logging_coverage.py b/tests/unit/easydiffraction/utils/test_logging_coverage.py new file mode 100644 index 00000000..cfb69fff --- /dev/null +++ b/tests/unit/easydiffraction/utils/test_logging_coverage.py @@ -0,0 +1,150 @@ +# SPDX-FileCopyrightText: 2026 EasyScience contributors +# SPDX-License-Identifier: BSD-3-Clause +"""Additional unit tests for logging.py to cover BLE001 branches.""" + + +class TestRenderMessageFallback: + def test_valid_markup(self): + """render_message should handle valid Rich markup.""" + import logging + + from easydiffraction.utils.logging import IconifiedRichHandler + + handler = IconifiedRichHandler(mode='compact') + record = logging.LogRecord( + name='test', + level=logging.INFO, + pathname='', + lineno=0, + msg='simple text', + args=(), + exc_info=None, + ) + result = handler.render_message(record, 'simple text') + assert str(result) == 'simple text' + + def test_invalid_markup_falls_back_to_plain_text(self): + """render_message should fall back to plain Text on bad markup.""" + import logging + + from easydiffraction.utils.logging import IconifiedRichHandler + + handler = IconifiedRichHandler(mode='compact') + record = logging.LogRecord( + name='test', + level=logging.INFO, + pathname='', + lineno=0, + msg='bad [markup', + args=(), + exc_info=None, + ) + result = handler.render_message(record, 'bad [markup') + assert 'bad' in str(result) + + def test_verbose_mode_delegates_to_parent(self): + """render_message in verbose mode delegates to RichHandler.""" + import logging + + from easydiffraction.utils.logging import IconifiedRichHandler + + handler = IconifiedRichHandler(mode='verbose') + record = logging.LogRecord( + name='test', + level=logging.INFO, + pathname='', + lineno=0, + msg='test msg', + args=(), + exc_info=None, + ) + result = handler.render_message(record, 'test msg') + assert result is not None + + +class TestDetectWidth: + def test_returns_at_least_min_width(self): + from easydiffraction.utils.logging import ConsoleManager + + width = ConsoleManager._detect_width() + assert width >= ConsoleManager._MIN_CONSOLE_WIDTH + assert isinstance(width, int) + + +class TestGetLevelText: + def test_compact_mode_returns_icon(self): + import logging + + from easydiffraction.utils.logging import IconifiedRichHandler + + handler = IconifiedRichHandler(mode='compact') + record = logging.LogRecord( + name='test', + level=logging.WARNING, + pathname='', + lineno=0, + msg='w', + args=(), + exc_info=None, + ) + text = handler.get_level_text(record) + assert text is not None + + def test_verbose_mode_returns_level_name(self): + import logging + + from easydiffraction.utils.logging import IconifiedRichHandler + + handler = IconifiedRichHandler(mode='verbose') + record = logging.LogRecord( + name='test', + level=logging.ERROR, + pathname='', + lineno=0, + msg='e', + args=(), + exc_info=None, + ) + text = handler.get_level_text(record) + assert text is not None + + +class TestLoggerConfigure: + def test_configure_with_env_vars(self, monkeypatch): + from easydiffraction.utils.logging import Logger + + monkeypatch.setenv('ED_LOG_MODE', 'verbose') + monkeypatch.setenv('ED_LOG_LEVEL', 'DEBUG') + monkeypatch.setenv('ED_LOG_REACTION', 'WARN') + + Logger._configured = False + Logger.configure() + assert Logger._mode == Logger.Mode.VERBOSE + assert Logger._reaction == Logger.Reaction.WARN + + # Reset to defaults for other tests + Logger.configure( + mode=Logger.Mode.COMPACT, + level=Logger.Level.WARNING, + reaction=Logger.Reaction.RAISE, + ) + + def test_configure_with_invalid_env_vars(self, monkeypatch): + from easydiffraction.utils.logging import Logger + + monkeypatch.setenv('ED_LOG_MODE', 'invalid_mode') + monkeypatch.setenv('ED_LOG_LEVEL', 'INVALID_LEVEL') + monkeypatch.setenv('ED_LOG_REACTION', 'INVALID') + + Logger._configured = False + Logger.configure() + # Should fall back to defaults + assert Logger._mode == Logger.Mode.COMPACT + assert Logger._reaction == Logger.Reaction.RAISE + + # Reset + Logger.configure( + mode=Logger.Mode.COMPACT, + level=Logger.Level.WARNING, + reaction=Logger.Reaction.RAISE, + )