diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 563dd9b..5d5e740 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,9 +3,17 @@ version: 2 updates: - - package-ecosystem: "github-actions" + - package-ecosystem: + - "github-actions" + - "pre-commit" directory: "/" schedule: interval: "daily" labels: - "Bot" + groups: + github-actions: + patterns: + - '*' + cooldown: + default-days: 7 diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index 008a57b..1ea9d43 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -1,5 +1,8 @@ name: Documentation +# no permissions by default +permissions: {} + on: pull_request: push: @@ -12,15 +15,21 @@ on: jobs: build-docs: runs-on: ubuntu-latest + permissions: + contents: write + defaults: + run: + shell: bash -l {0} steps: - name: checkout - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 + persist-credentials: false - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v2 + uses: mamba-org/setup-micromamba@add3a49764cedee8ee24e82dfde87f5bc2914462 # v2.0.7 with: environment-name: TEST init-shell: bash @@ -29,12 +38,10 @@ jobs: - name: Install oceans - shell: bash -l {0} run: | python -m pip install -e . --no-deps --force-reinstall - name: Build documentation - shell: bash -l {0} run: | set -e pushd docs @@ -43,7 +50,7 @@ jobs: - name: Deploy if: success() && github.event_name == 'release' - uses: peaceiris/actions-gh-pages@v4 + uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: docs/build/html diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml index 7ed7518..cb89818 100644 --- a/.github/workflows/pypi.yml +++ b/.github/workflows/pypi.yml @@ -9,44 +9,50 @@ on: types: - published +defaults: + run: + shell: bash + jobs: - packages: + pypi-publish: + name: Upload release to PyPI runs-on: ubuntu-latest + environment: + name: pypi + url: https://pypi.org/p/erddapy/ + permissions: + id-token: write # IMPORTANT: this permission is mandatory for trusted publishing + steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + fetch-depth: 0 + persist-credentials: false - name: Set up Python - uses: actions/setup-python@v6 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: "3.x" - name: Get tags run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* - shell: bash - name: Install build tools run: | python -m pip install --upgrade pip build twine - shell: bash - - name: Build binary wheel run: python -m build --sdist --wheel . --outdir dist - name: CheckFiles run: | ls dist - shell: bash - name: Test wheels run: | cd dist && python -m pip install *.whl python -m twine check * - shell: bash - name: Publish a Python distribution to PyPI if: success() && github.event_name == 'release' - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_PASSWORD }} + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2f1a526..627ee2e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,26 +1,33 @@ name: Tests +# no permissions by default +permissions: {} + on: pull_request: push: - branches: [main] + branches: [ main ] jobs: run: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: [ "3.10", "3.11", "3.12" ] - os: [ubuntu-latest] + python-version: [ "3.10", "3.14"] + os: [ ubuntu-latest ] fail-fast: false + defaults: + run: + shell: bash -l {0} steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 + persist-credentials: false - name: Setup Micromamba Python ${{ matrix.python-version }} - uses: mamba-org/setup-micromamba@v2 + uses: mamba-org/setup-micromamba@add3a49764cedee8ee24e82dfde87f5bc2914462 # v2.0.7 with: environment-name: TEST init-shell: bash @@ -28,16 +35,13 @@ jobs: python=${{ matrix.python-version }} --file requirements.txt --file requirements-dev.txt --channel conda-forge - name: Install oceans - shell: bash -l {0} run: | python -m pip install -e . --no-deps --force-reinstall - name: Tests - shell: bash -l {0} run: | python -m pytest -n 2 -rxs --cov=oceans tests - name: Doctests - shell: bash -l {0} run: | python -m pytest -vv oceans --doctest-modules diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 959bcea..75b741c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v6.0.0 hooks: - id: check-ast - id: end-of-file-fixer @@ -15,42 +15,59 @@ repos: - id: debug-statements -# - repo: https://github.com/econchick/interrogate -# rev: 1.5.0 -# hooks: -# - id: interrogate -# exclude: ^(docs|setup.py|tests) -# args: [--config=pyproject.toml] - - repo: https://github.com/keewis/blackdoc - rev: v0.3.9 + rev: v0.4.6 hooks: - id: blackdoc -- repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.5.5 + +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.15.8 hooks: - id: ruff + args: ["--fix", "--show-fixes"] + - id: ruff-format + + +- repo: https://github.com/nbQA-dev/nbQA + rev: 1.9.1 + hooks: + - id: nbqa-check-ast + - id: nbqa-black + - id: nbqa-ruff + args: [ + --fix, + --config=ruff.toml, + ] + -- repo: https://github.com/psf/black - rev: 24.4.2 +- repo: https://github.com/woodruffw/zizmor-pre-commit + rev: v1.23.1 hooks: - - id: black - language_version: python3 + - id: zizmor + + +- repo: https://github.com/bdice/nb-strip-paths + rev: v0.1.0 + hooks: + - id: nb-strip-paths + - repo: https://github.com/codespell-project/codespell - rev: v2.3.0 + rev: v2.4.2 hooks: - id: codespell args: - --ignore-words-list=pres + - repo: https://github.com/asottile/add-trailing-comma - rev: v3.1.0 + rev: v4.0.0 hooks: - id: add-trailing-comma + - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.0" + rev: "v2.20.0" hooks: - id: pyproject-fmt diff --git a/oceans/RPSstuff.py b/oceans/RPSstuff.py index 33501e6..3ce4f31 100644 --- a/oceans/RPSstuff.py +++ b/oceans/RPSstuff.py @@ -1,14 +1,13 @@ from datetime import datetime import numpy as np -import numpy.ma as ma +from numpy import ma earth_radius = 6371.0e3 def h2hms(hours): - """ - Converts hours to hours, minutes, and seconds. + """Converts hours to hours, minutes, and seconds. Examples -------- @@ -24,8 +23,7 @@ def h2hms(hours): def hms2h(h, m=None, s=None): - """ - Converts hours, minutes, and seconds to hours. + """Converts hours, minutes, and seconds to hours. Examples -------- @@ -49,8 +47,7 @@ def hms2h(h, m=None, s=None): def ms2hms(millisecs): - """ - Converts milliseconds to integer hour, minute, seconds. + """Converts milliseconds to integer hour, minute, seconds. Examples -------- @@ -65,9 +62,8 @@ def ms2hms(millisecs): return hour, mn, sec -def julian(y, m=0, d=0, h=0, mi=0, s=0, noon=False): - """ - Converts Gregorian calendar dates to Julian dates +def julian(y, m=0, d=0, h=0, mi=0, s=0, *, noon=False): # noqa: PLR0913 + """Converts Gregorian calendar dates to Julian dates USAGE: [j]=julian(y,m,d,h) @@ -118,17 +114,11 @@ def julian(y, m=0, d=0, h=0, mi=0, s=0, noon=False): + 1721119 ) - if noon: - j = j + (h - 12) / 24 - else: - j = j + h / 24 - - return j + return j + (h - 12) / 24 if noon else j + h / 24 def jdrps2jdmat(jd): - """ - Convert Signell's Julian days to Matlab's Serial day + """Convert Signell's Julian days to Matlab's Serial day matlab's serial date = 1 at 0000 UTC, 1-Jan-0000. Examples @@ -141,8 +131,7 @@ def jdrps2jdmat(jd): def jdmat2jdrps(jdmat): - """ - Convert Matlab's Serial Day to Signell's Julian days + """Convert Matlab's Serial Day to Signell's Julian days matlab's serial date = 1 at 0000 UTC, 1-Jan-0000. Examples @@ -154,9 +143,8 @@ def jdmat2jdrps(jdmat): return jdmat + julian(0000, 1, 1, 0, 0, 0) - 1 -def gregorian(jd, noon=False): - """ - Converts decimal Julian days to Gregorian dates using the astronomical +def gregorian(jd, *, noon=False): + """Converts decimal Julian days to Gregorian dates using the astronomical conversion, but with time zero starting at midnight instead of noon. In this convention, Julian day 2440000 begins at 0000 hours, May 23, 1968. The Julian day does not have to be an integer, and with Matlab's double @@ -226,8 +214,7 @@ def gregorian(jd, noon=False): def s2hms(secs): - """ - Converts seconds to integer hour,minute,seconds + """Converts seconds to integer hour,minute,seconds Usage: hour, min, sec = s2hms(secs) Examples @@ -243,10 +230,8 @@ def s2hms(secs): return hr, mi, sc -# FIXME: STOPPED HERE def ss2(jd): - """ - Return Gregorian start and stop dates of Julian day variable + """Return Gregorian start and stop dates of Julian day variable Usage: start, stop = ss2(jd) """ @@ -256,8 +241,7 @@ def ss2(jd): def angled(h): - """ - ANGLED: Returns the phase angles in degrees of a matrix with complex + """ANGLED: Returns the phase angles in degrees of a matrix with complex elements. Usage: @@ -266,21 +250,17 @@ def angled(h): deg = angle in math convention (degrees counterclockwise from "east") """ - pd = np.angle(h, deg=True) - return pd + return np.angle(h, deg=True) def ij2ind(a, i, j): - m, n = a.shape - return m * i - j + 1 # TODO: Check this +1 + m, _ = a.shape + return m * i - j + 1 def ind2ij(a, ind): - """ - ind2ij returns i, j indices of array. - - """ - m, n = a.shape + """ind2ij returns i, j indices of array.""" + m, _ = a.shape j = np.ceil(ind / m) i = np.remainder(ind, m) i[i == 0] = m @@ -288,21 +268,16 @@ def ind2ij(a, ind): def rms(u): - """ - Compute root mean square for each column of matrix u. - - """ - # TODO: use an axis arg. + """Compute root mean square for each column of matrix u.""" if u.ndim > 1: - m, n = u.shape + m, _ = u.shape else: m = u.size return np.sqrt(np.sum(u**2) / m) def z0toCn(z0, H): - """ - Convert roughness height z0 to Chezy "C" and Manning's "n" which is a + """Convert roughness height z0 to Chezy "C" and Manning's "n" which is a function of the water depth Inputs: @@ -319,7 +294,6 @@ def z0toCn(z0, H): >>> C, n = z0toCn(0.003, np.arange(2, 200)) """ - k_s = 30 * z0 C = 18 * np.log10(12 * H / k_s) n = (H ** (1.0 / 6.0)) / C @@ -328,12 +302,8 @@ def z0toCn(z0, H): def z0tocd(z0, zr): - """ - Calculates CD at a given ZR corresponding to Z0. - - """ - cd = (0.4 * np.ones(z0.size) / np.log(zr / z0)) ** 2 - return cd + """Calculates CD at a given ZR corresponding to Z0.""" + return (0.4 * np.ones(z0.size) / np.log(zr / z0)) ** 2 def short_calc(amin, amax): @@ -344,62 +314,43 @@ def short_calc(amin, amax): def gsum(x, **kw): - """ - Just like sum, except that it skips over bad points. - - """ + """Just like sum, except that it skips over bad points.""" xnew = ma.masked_invalid(x) return np.sum(xnew, **kw) def gmean(x, **kw): - """ - Just like mean, except that it skips over bad points. - - """ + """Just like mean, except that it skips over bad points.""" xnew = ma.masked_invalid(x) return np.mean(xnew, **kw) def gmedian(x, **kw): - """ - Just like median, except that it skips over bad points. - - """ + """Just like median, except that it skips over bad points.""" xnew = ma.masked_invalid(x) return np.median(xnew, **kw) def gmin(x, **kw): - """ - Just like min, except that it skips over bad points. - - """ + """Just like min, except that it skips over bad points.""" xnew = ma.masked_invalid(x) return np.min(xnew, **kw) def gmax(x, **kw): - """ - Just like max, except that it skips over bad points. - - """ + """Just like max, except that it skips over bad points.""" xnew = ma.masked_invalid(x) return np.max(xnew, **kw) def gstd(x, **kw): - """ - Just like std, except that it skips over bad points. - - """ + """Just like std, except that it skips over bad points.""" xnew = ma.masked_invalid(x) return np.std(xnew, **kw) def near(x, x0, n=1): - """ - Given an 1D array `x` and a scalar `x0`, returns the `n` indices of the + """Given an 1D array `x` and a scalar `x0`, returns the `n` indices of the element of `x` closest to `x0`. """ @@ -409,10 +360,7 @@ def near(x, x0, n=1): def swantime(a): - """ - Converts SWAN default time format to datetime object. - - """ + """Converts SWAN default time format to datetime object.""" if isinstance(a, str): a = float(a) a = np.asanyarray(a) @@ -429,38 +377,34 @@ def swantime(a): a = a - mn / 1e4 sec = np.floor(a * 1e6) - return datetime(year, mon, day, hour, mn, sec) + return datetime(year, mon, day, hour, mn, sec, tzinfo=datetime.UTC) def shift(a, b, n): - """ - a and b are vectors + """A and b are vectors n is number of points of a to cut off anew and bnew will be the same length. """ - # la, lb = a.size, lb = b.size - anew = a[list(range(0 + n, len(a))), :] if len(anew) > len(b): - anew = anew[list(range(0, len(b))), :] + anew = anew[list(range(len(b))), :] bnew = b else: - bnew = b[list(range(0, len(anew))), :] + bnew = b[list(range(len(anew))), :] return anew, bnew def lagcor(a, b, n): - """ - Finds lagged correlations between two series. + """Finds lagged correlations between two series. a and b are two column vectors n is range of lags cor is correlation as fn of lag. """ cor = [] - for k in range(0, n + 1): + for k in range(n + 1): d1, d2 = shift(a, b, k) ind = ~np.isnan(d1 + d2) c = np.corrcoef(d1[ind], d2[ind]) @@ -471,8 +415,7 @@ def lagcor(a, b, n): def coast2bln(coast, bln_file): - """ - Converts a matlab coast (two column array w/ nan for line breaks) into + """Converts a matlab coast (two column array w/ nan for line breaks) into a Surfer blanking file. Where coast is a two column vector and bln_file is the output file name. @@ -480,15 +423,14 @@ def coast2bln(coast, bln_file): Needs `fixcoast`. """ - c2 = fixcoast(coast) ind = np.where(np.isnan(c2[:, 0]))[0] n = len(ind) - 1 bln = c2.copy() - for k in range(0, n - 1): + for k in range(n - 1): kk = list(range(ind[k] + 1, ind[k + 1])) - NP = int(len(kk)) + NP = len(kk) bln[ind[k], 0] = NP bln[ind[k], 1] = 1 @@ -497,8 +439,7 @@ def coast2bln(coast, bln_file): def fixcoast(coast): - """ - FIXCOAST Makes sure coastlines meet Signell's conventions. + """FIXCOAST Makes sure coastlines meet Signell's conventions. Fixes coastline is in the format we want. Assumes that lon/lat are in the first two columns of the matrix coast, and that coastline segments are @@ -507,7 +448,6 @@ def fixcoast(coast): rows contain NaNs. """ - ind = coast == -99999.0 coast[ind] = np.nan diff --git a/oceans/colormaps.py b/oceans/colormaps.py index 3229465..0e91982 100644 --- a/oceans/colormaps.py +++ b/oceans/colormaps.py @@ -1,12 +1,11 @@ -import os from colorsys import hsv_to_rgb -from glob import glob +from pathlib import Path import matplotlib.pyplot as plt import numpy as np from matplotlib import colors -cmap_path = os.path.join(os.path.dirname(__file__), "cmap_data") +cmap_path = Path(__file__).parent.joinpath("cmap_data") class Bunch(dict): @@ -16,21 +15,15 @@ def __init__(self, **kw): def get_color(color): - """ - https://stackoverflow.com/questions/10254207/color-and-line-writing-using-matplotlib - - """ + """https://stackoverflow.com/questions/10254207/color-and-line-writing-using-matplotlib""" for hue in range(color): - hue = 1.0 * hue / color - col = [int(x) for x in hsv_to_rgb(hue, 1.0, 230)] + h = 1.0 * hue / color + col = [int(x) for x in hsv_to_rgb(h, 1.0, 230)] yield "#{:02x}{:02x}{:02x}".format(*col) -def cmat2cmpl(rgb, reverse=False): - """ - Convert RGB matplotlib colormap. - - """ +def cmat2cmpl(rgb, *, reverse=False): + """Convert RGB matplotlib colormap.""" rgb = np.asanyarray(rgb) if reverse: rgb = np.flipud(rgb) @@ -38,11 +31,7 @@ def cmat2cmpl(rgb, reverse=False): def phasemap_cm(m=256): - """ - Colormap periodic/circular data (phase). - - """ - + """Colormap periodic/circular data (phase).""" theta = 2 * np.pi * np.arange(0, m) / m circ = np.exp(1j * theta) @@ -57,16 +46,11 @@ def phasemap_cm(m=256): green = np.abs(np.imag(vbluec * np.conj(vredc))) blue = np.abs(np.imag(vredc * np.conj(vgreenc))) - return ( - 1.5 - * np.c_[red, green, blue] - / np.abs(np.imag((vred - vgreen) * np.conj(vred - vblue))) - ) + return 1.5 * np.c_[red, green, blue] / np.abs(np.imag((vred - vgreen) * np.conj(vred - vblue))) def zebra_cm(a=4, m=0.5, n=256): - """ - Zebra palette colormap with NBANDS broad bands and NENTRIES rows in + """Zebra palette colormap with NBANDS broad bands and NENTRIES rows in the color map. The default is 4 broad bands @@ -91,12 +75,11 @@ def zebra_cm(a=4, m=0.5, n=256): hue = np.exp(-3.0 * x / n) sat = m + (1.0 - m) * (0.5 * (1.0 + sawtooth(2.0 * np.pi * x / (n / a)))) val = m + (1.0 - m) * 0.5 * (1.0 + np.cos(2.0 * np.pi * x / (n / a / 2.0))) - return np.array([hsv_to_rgb(h, s, v) for h, s, v in zip(hue, sat, val)]) + return np.array([hsv_to_rgb(h, s, v) for h, s, v in zip(hue, sat, val, strict=True)]) def ctopo_pos_neg_cm(m=256): - """ - Colormap for positive/negative data with gray scale only + """Colormap for positive/negative data with gray scale only original from cushman-roisin book cd-rom. """ @@ -106,11 +89,7 @@ def ctopo_pos_neg_cm(m=256): def avhrr_cm(m=256): - """ - AHVRR colormap used by NOAA Coastwatch. - - """ - + """AHVRR colormap used by NOAA Coastwatch.""" x = np.arange(0.0, m) / (m - 1) xr = [0.0, 0.2, 0.4, 0.5, 0.6, 0.8, 1.0] @@ -142,8 +121,8 @@ def load_cmap(fname): } # Data colormaps. -for fname in glob(f"{cmap_path}/*.dat"): - cmap = os.path.basename(fname).split(".")[0] +for fname in Path(cmap_path).glob("*.dat"): + cmap = Path(fname).stem data = load_cmap(fname) arrays.update({cmap: data}) @@ -157,7 +136,7 @@ def demo(): data = np.outer(np.arange(0, 1, 0.01), np.ones(10)) fig = plt.figure(figsize=(10, 5)) fig.subplots_adjust(top=0.8, bottom=0.05, left=0.01, right=0.99) - cmaps = sorted(m for m in cm.keys() if not m.endswith("_r")) + cmaps = sorted(m for m in cm.keys() if not m.endswith("_r")) # noqa: SIM118 length = len(cmaps) for k, cmap in enumerate(cmaps): plt.subplot(1, length + 1, k + 1) diff --git a/oceans/datasets.py b/oceans/datasets.py index edfe935..e539113 100644 --- a/oceans/datasets.py +++ b/oceans/datasets.py @@ -21,9 +21,8 @@ def _woa_variable(variable): } v = _VAR.get(variable) if not v: - raise ValueError( - f'Unrecognizable variable. Expected one of {list(_VAR.keys())}, got "{variable}".', - ) + msg = f'Unrecognizable variable. Expected one of {list(_VAR.keys())}, got "{variable}".' + raise ValueError(msg) return v @@ -39,10 +38,9 @@ def _woa_url(variable, time_period, resolution): f'annual time period, and "{pref}".', stacklevel=2, ) - return f"{base}/" f"{pref}/" f"{variable}_annual_1deg.nc" - else: - dddd = "decav" - pref = "woa18" + return f"{base}/{pref}/{variable}_annual_1deg.nc" + dddd = "decav" + pref = "woa18" grids = { "5": ("5deg", "5d"), @@ -51,9 +49,8 @@ def _woa_url(variable, time_period, resolution): } grid = grids.get(resolution) if not grid: - raise ValueError( - f'Unrecognizable resolution. Expected one of {list(grids.keys())}, got "{resolution}".', - ) + msg = f'Unrecognizable resolution. Expected one of {list(grids.keys())}, got "{resolution}".' + raise ValueError(msg) res = grid[0] gg = grid[1] @@ -79,35 +76,33 @@ def _woa_url(variable, time_period, resolution): time_period = time_period.lower() if len(time_period) == 3: - tt = [ - time_periods.get(k) - for k in time_periods.keys() - if k.startswith(time_period) - ][0] + tt = [time_periods.get(k) for k in time_periods if k.startswith(time_period)][0] elif len(time_period) == 2 and time_period in time_periods.values(): tt = time_period else: tt = time_periods.get(time_period) if not tt: - raise ValueError( - f"Unrecognizable time_period. " - f'Expected one of {list(time_periods.keys())}, got "{time_period}".', - ) + msg = f'Unrecognizable time_period. Expected one of {list(time_periods.keys())}, got "{time_period}".' + raise ValueError(msg) - url = ( + return ( f"{base}/" "/ncei/woa/" f"{variable}/decav/{res}/" f"{pref}_{dddd}_{v}{tt}_{gg}.nc" # '[PREF]_[DDDD]_[V][TT][FF][GG]' Is [FF] used? ) - return url @functools.lru_cache(maxsize=256) -def woa_profile(lon, lat, variable="temperature", time_period="annual", resolution="1"): - """ - Return a xarray DAtaset instance from a World Ocean Atlas variable at a +def woa_profile( + lon, + lat, + variable="temperature", + time_period="annual", + resolution="1", +): + """Return a xarray DAtaset instance from a World Ocean Atlas variable at a given lon, lat point. Parameters @@ -142,10 +137,14 @@ def woa_profile(lon, lat, variable="temperature", time_period="annual", resoluti >>> ax.invert_yaxis() """ - import cf_xarray # noqa + import cf_xarray # noqa: F401 import xarray as xr - url = _woa_url(variable=variable, time_period=time_period, resolution=resolution) + url = _woa_url( + variable=variable, + time_period=time_period, + resolution=resolution, + ) v = _woa_variable(variable) ds = xr.open_dataset(url, decode_times=False) @@ -154,7 +153,7 @@ def woa_profile(lon, lat, variable="temperature", time_period="annual", resoluti @functools.lru_cache(maxsize=256) -def woa_subset( +def woa_subset( # noqa: PLR0913 min_lon, max_lon, min_lat, @@ -162,10 +161,10 @@ def woa_subset( variable="temperature", time_period="annual", resolution="5", + *, full=False, ): - """ - Return an xarray Dataset instance from a World Ocean Atlas variable at a + """Return an xarray Dataset instance from a World Ocean Atlas variable at a given lon, lat bounding box. Parameters @@ -216,12 +215,14 @@ def woa_subset( >>> _ = ax.set_ylim(200, 0) """ - import cf_xarray # noqa + import cf_xarray # noqa: F401 import xarray as xr url = _woa_url(variable, time_period, resolution) ds = xr.open_dataset(url, decode_times=False) - ds = ds.cf.sel({"X": slice(min_lon, max_lon), "Y": slice(min_lat, max_lat)}) + ds = ds.cf.sel( + {"X": slice(min_lon, max_lon), "Y": slice(min_lat, max_lat)}, + ) v = _woa_variable(variable) if full: return ds @@ -239,9 +240,8 @@ def _download_etopo2(): @functools.lru_cache(maxsize=256) -def etopo_subset(min_lon, max_lon, min_lat, max_lat, tfile=None, smoo=False): - """ - Get a etopo subset. +def etopo_subset(min_lon, max_lon, min_lat, max_lat, tfile=None, *, smoo=False): # noqa: PLR0913 + """Get a etopo subset. Should work on any netCDF with x, y, data Examples @@ -254,6 +254,7 @@ def etopo_subset(min_lon, max_lon, min_lat, max_lat, tfile=None, smoo=False): >>> cs = ax.pcolormesh(lon, lat, bathy) Based on trondkristiansen contourICEMaps.py + """ if tfile is None: tfile = _download_etopo2() @@ -266,7 +267,7 @@ def etopo_subset(min_lon, max_lon, min_lat, max_lat, tfile=None, smoo=False): imin, imax, jmin, jmax = _get_indices(bbox, lons, lats) lon, lat = np.meshgrid(lons[imin:imax], lats[jmin:jmax]) - # FIXME: This assumes j, i order. + # Assumes j, i order. bathy = etopo.variables["z"][jmin:jmax, imin:imax] if smoo: @@ -278,8 +279,7 @@ def etopo_subset(min_lon, max_lon, min_lat, max_lat, tfile=None, smoo=False): def get_depth(lon, lat, tfile=None): - """ - Find the depths for each station on the etopo2 database. + """Find the depths for each station on the etopo2 database. Examples -------- @@ -304,9 +304,8 @@ def get_depth(lon, lat, tfile=None): return get_profile(lons, lats, bathy, lon, lat, mode="nearest", order=3) -def get_isobath(bbox, iso=-200, tfile=None, smoo=False): - """ - Finds an isobath on the etopo2 database and returns +def get_isobath(bbox, iso=-200, tfile=None, *, smoo=False): + """Finds an isobath on the etopo2 database and returns its lon, lat segments for plotting. Examples diff --git a/oceans/filters.py b/oceans/filters.py index 590ba27..7a04e16 100644 --- a/oceans/filters.py +++ b/oceans/filters.py @@ -2,8 +2,7 @@ def lanc(numwt, haf): - """ - Generates a numwt + 1 + numwt lanczos cosine low pass filter with -6dB + """Generates a numwt + 1 + numwt lanczos cosine low pass filter with -6dB (1/4 power, 1/2 amplitude) point at haf Parameters @@ -49,8 +48,7 @@ def lanc(numwt, haf): def smoo1(datain, window_len=11, window="hanning"): - """ - Smooth the data using a window with requested size. + """Smooth the data using a window with requested size. Parameters ---------- @@ -112,20 +110,21 @@ def smoo1(datain, window_len=11, window="hanning"): instead of a string. """ - datain = np.asarray(datain) if datain.ndim != 1: - raise ValueError("Smooth only accepts 1 dimension arrays.") + msg = "Smooth only accepts 1 dimension arrays." + raise ValueError(msg) if datain.size < window_len: - raise ValueError("Input vector needs to be bigger than window size.") + msg = "Input vector needs to be bigger than window size." + raise ValueError(msg) if window_len < 3: return datain if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]: - msg = "Window must be is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" # noqa + msg = "Window must be is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" raise ValueError(msg) s = np.r_[ @@ -134,18 +133,13 @@ def smoo1(datain, window_len=11, window="hanning"): 2 * datain[-1] - datain[-1:-window_len:-1], ] - if window == "flat": # Moving average. - w = np.ones(window_len, "d") - else: - w = eval("np." + window + "(window_len)") - + w = np.ones(window_len, "d") if window == "flat" else eval("np." + window + "(window_len)") # noqa: S307 data_out = np.convolve(w / w.sum(), s, mode="same") return data_out[window_len - 1 : -window_len + 1] -def smoo2(A, hei, wid, kind="hann", badflag=-9999, beta=14): - """ - Calculates the smoothed array 'As' from the original array 'A' using the +def smoo2(A, hei, wid, kind="hann", badflag=-9999, beta=14): # noqa: ARG001, PLR0915, PLR0913, C901 + """Calculates the smoothed array 'As' from the original array 'A' using the specified window of type 'kind' and shape ('hei', 'wid'). Usage: @@ -178,13 +172,16 @@ def smoo2(A, hei, wid, kind="hann", badflag=-9999, beta=14): # Checking window type and dimensions kinds = ["hann", "hamming", "blackman", "bartlett", "kaiser"] if kind not in kinds: - raise ValueError(f"Invalid window type requested: {kind}") + msg = f"Invalid window type requested: {kind}" + raise ValueError(msg) if (np.mod(hei, 2) == 0) or (np.mod(wid, 2) == 0): - raise ValueError("Window dimensions must be odd") + msg = "Window dimensions must be odd" + raise ValueError(msg) if (hei <= 1) or (wid <= 1): - raise ValueError("Window shape must be (3,3) or greater") + msg = "Window shape must be (3,3) or greater" + raise ValueError(msg) # Creating the 2D window. if kind == "kaiser": # If the window kind is kaiser (beta is required). @@ -198,9 +195,9 @@ def smoo2(A, hei, wid, kind="hann", badflag=-9999, beta=14): kind = "hanning" # Computing outer product to make a 2D window out of the original 1d # windows. - # TODO: Get rid of this evil eval. + # NB: Get rid of this evil eval. wstr = "np.outer(np." + kind + "(hei), np." + kind + "(wid))" - wdw = eval(wstr) + wdw = eval(wstr) # noqa: S307 A = np.asanyarray(A) Fnan = np.isnan(A) @@ -262,9 +259,8 @@ def smoo2(A, hei, wid, kind="hann", badflag=-9999, beta=14): return As -def weim(x, N, kind="hann", badflag=-9999, beta=14): - """ - Calculates the smoothed array 'xs' from the original array 'x' using the +def weim(x, N, kind="hann", badflag=-9999, beta=14): # noqa: ARG001 + """Calculates the smoothed array 'xs' from the original array 'x' using the specified window of type 'kind' and size 'N'. 'N' must be an odd number. Usage: @@ -302,10 +298,12 @@ def weim(x, N, kind="hann", badflag=-9999, beta=14): # Checking window type and dimensions. kinds = ["hann", "hamming", "blackman", "bartlett", "kaiser"] if kind not in kinds: - raise ValueError(f"Invalid window type requested: {kind}") + msg = f"Invalid window type requested: {kind}" + raise ValueError(msg) if np.mod(N, 2) == 0: - raise ValueError("Window size must be odd") + msg = "Window size must be odd" + raise ValueError(msg) # Creating the window. if kind == "kaiser": # If the window kind is kaiser (beta is required). @@ -321,8 +319,8 @@ def weim(x, N, kind="hann", badflag=-9999, beta=14): # 1D windows. wstr = "np." + kind + "(N)" - # FIXME: Do not use `eval`. - w = eval(wstr) + # NB: Do not use `eval`. + w = eval(wstr) # noqa: S307 x = np.asarray(x).flatten() Fnan = np.isnan(x).flatten() @@ -365,8 +363,7 @@ def weim(x, N, kind="hann", badflag=-9999, beta=14): def medfilt1(x, L=3): - """ - Median filter for 1d arrays. + """Median filter for 1d arrays. Performs a discrete one-dimensional median filter with window length `L` to input vector `x`. Produces a vector the same size as `x`. Boundaries are @@ -407,11 +404,8 @@ def medfilt1(x, L=3): >>> ax.grid(True) >>> y1min, y1max = np.min(xout) * 0.5, np.max(xout) * 2.0 >>> leg1 = ax.legend(["x (pseudo-random)", "xout"]) - >>> t1 = ax.set_title( - ... '''Median filter with window length %s. - ... Removes outliers, tracks remaining signal)''' - ... % L - ... ) + >>> t1 = ax.set_title('''Median filter with window length %s. + ... Removes outliers, tracks remaining signal)''' % L) >>> L = 103 >>> xout = medfilt1(x=x, L=L) >>> ax = plt.subplot(212) @@ -424,34 +418,33 @@ def medfilt1(x, L=3): >>> ax.grid(True) >>> y2min, y2max = np.min(xout) * 0.5, np.max(xout) * 2.0 >>> leg2 = ax.legend(["Same x (pseudo-random)", "xout"]) - >>> t2 = ax.set_title( - ... '''Median filter with window length %s. - ... Removes outliers and noise''' - ... % L - ... ) + >>> t2 = ax.set_title('''Median filter with window length %s. + ... Removes outliers and noise''' % L) >>> ax = plt.subplot(211) >>> lims1 = ax.set_ylim([min(y1min, y2min), max(y1max, y2max)]) >>> ax = plt.subplot(212) >>> lims2 = ax.set_ylim([min(y1min, y2min), max(y1max, y2max)]) """ - xin = np.atleast_1d(np.asanyarray(x)) N = len(x) L = int(L) # Ensure L is odd integer so median requires no interpolation. if L % 2 == 0: L += 1 + if N < 2: - raise ValueError("Input sequence must be >= 2.") + msg = "Input sequence must be >= 2." + raise ValueError(msg) return None if L < 2: - raise ValueError("Input filter window length must be >=2.") + msg = "Input filter window length must be >=2." + raise ValueError(msg) return None if L > N: - msg = "Input filter window length must be shorter than series: L = {:d}, len(x) = {:d}".format # noqa + msg = "Input filter window length must be shorter than series: L = {:d}, len(x) = {:d}".format raise ValueError(msg(L, N)) return None @@ -476,8 +469,7 @@ def medfilt1(x, L=3): def fft_lowpass(signal, low, high): - """ - Performs a low pass filer on the series. + """Performs a low pass filer on the series. low and high specifies the boundary of the filter. >>> from oceans.filters import fft_lowpass @@ -493,11 +485,7 @@ def fft_lowpass(signal, low, high): >>> legend = ax.legend() """ - - if len(signal) % 2: - result = np.fft.rfft(signal, len(signal)) - else: - result = np.fft.rfft(signal) + result = np.fft.rfft(signal, len(signal)) if len(signal) % 2 else np.fft.rfft(signal) freq = np.fft.fftfreq(len(signal))[: len(signal) // 2 + 1] factor = np.ones_like(freq) @@ -520,8 +508,7 @@ def fft_lowpass(signal, low, high): def md_trenberth(x): - """ - Returns the filtered series using the Trenberth filter as described + """Returns the filtered series using the Trenberth filter as described on Monthly Weather Review, vol. 112, No. 2, Feb 1984. Input data: series x of dimension 1Xn (must be at least dimension 11) @@ -571,8 +558,7 @@ def md_trenberth(x): def pl33tn(x, dt=1.0, T=33.0, mode="valid", t=None): - """ - Computes low-passed series from `x` using pl33 filter, with optional + """Computes low-passed series from `x` using pl33 filter, with optional sample interval `dt` (hours) and filter half-amplitude period T (hours) as input for non-hourly series. @@ -606,19 +592,20 @@ def pl33tn(x, dt=1.0, T=33.0, mode="valid", t=None): """ - import cf_xarray # noqa: F401 import pandas as pd import xarray as xr if isinstance(x, xr.Dataset | pd.DataFrame): - raise TypeError("Input a DataArray not a Dataset, or a Series not a DataFrame.") + msg = "Input a DataArray not a Dataset, or a Series not a DataFrame." + raise TypeError(msg) if isinstance(x, pd.Series) and not isinstance( x.index, pd.core.indexes.datetimes.DatetimeIndex, ): - raise TypeError("Input Series needs to have parsed datetime indices.") + msg = "Input Series needs to have parsed datetime indices." + raise TypeError(msg) # find dt in units of hours if isinstance(x, xr.DataArray): @@ -721,11 +708,7 @@ def pl33tn(x, dt=1.0, T=33.0, mode="valid", t=None): .dot(weight, dims="window") ) # update attrs - attrs = { - key: f"{value}, filtered" - for key, value in x.attrs.items() - if key != "units" - } + attrs = {key: f"{value}, filtered" for key, value in x.attrs.items() if key != "units"} xf.attrs = attrs elif isinstance(x, pd.Series): @@ -741,7 +724,6 @@ def pl33tn(x, dt=1.0, T=33.0, mode="valid", t=None): # times to match xf if t is not None: - # Nt = len(filter_time) tf = t[Nt - 1 : -Nt + 1] return xf, tf diff --git a/oceans/ocfis.py b/oceans/ocfis.py index 90650af..496bb0e 100644 --- a/oceans/ocfis.py +++ b/oceans/ocfis.py @@ -3,13 +3,12 @@ import gsw import numpy as np -import numpy.ma as ma import pandas as pd +from numpy import ma -def spdir2uv(spd, ang, deg=False): - """ - Computes u, v components from speed and direction. +def spdir2uv(spd, ang, *, deg=False): + """Computes u, v components from speed and direction. Parameters ---------- @@ -28,7 +27,6 @@ def spdir2uv(spd, ang, deg=False): meridional wind velocity [m s :sup:`-1`] """ - if deg: ang = np.deg2rad(ang) @@ -39,8 +37,7 @@ def spdir2uv(spd, ang, deg=False): def uv2spdir(u, v, mag=0, rot=0): - """ - Computes speed and direction from u, v components. + """Computes speed and direction from u, v components. Converts rectangular to polar coordinate, geographic convention Allows for rotation and magnetic declination correction. @@ -83,7 +80,6 @@ def uv2spdir(u, v, mag=0, rot=0): >>> _ = ax.set_ylim(0, np.max(ws)) """ - u, v, mag, rot = list(map(np.asarray, (u, v, mag, rot))) vec = u + 1j * v @@ -96,8 +92,7 @@ def uv2spdir(u, v, mag=0, rot=0): def del_eta_del_x(U, f, g, balance="geostrophic", R=None): - r""" - Calculate :mat: `\frac{\partial \eta} {\partial x}` for different + r"""Calculate :mat: `\frac{\partial \eta} {\partial x}` for different force balances Parameters @@ -114,7 +109,6 @@ def del_eta_del_x(U, f, g, balance="geostrophic", R=None): Radius """ - if balance == "geostrophic": detadx = f * U / g @@ -128,8 +122,7 @@ def del_eta_del_x(U, f, g, balance="geostrophic", R=None): def mld(SA, CT, p, criterion="pdvar"): - r""" - Compute the mixed layer depth. + r"""Compute the mixed layer depth. Parameters ---------- @@ -169,7 +162,6 @@ def mld(SA, CT, p, criterion="pdvar"): Washington, D.C. """ - SA, CT, p = list(map(np.asanyarray, (SA, CT, p))) SA, CT, p = np.broadcast_arrays(SA, CT, p) SA, CT, p = list(map(ma.masked_invalid, (SA, CT, p))) @@ -185,7 +177,7 @@ def mld(SA, CT, p, criterion="pdvar"): Tdiff = T0 - 0.5 # 0.8 on the matlab original if criterion == "temperature": - idx_mld = CT > Tdiff + idx_mld = Tdiff < CT elif criterion == "pdvar": pdvar_diff = gsw.rho(S0, Tdiff, p_min) - 1000.0 idx_mld = sigma <= pdvar_diff @@ -193,7 +185,8 @@ def mld(SA, CT, p, criterion="pdvar"): sig_diff = Sig0 + 0.125 idx_mld = sigma <= sig_diff else: - raise NameError(f"Unknown criteria {criterion}") + msg = f"Unknown criteria {criterion}" + raise NameError(msg) MLD = ma.masked_all_like(p) MLD[idx_mld] = p[idx_mld] @@ -202,8 +195,7 @@ def mld(SA, CT, p, criterion="pdvar"): def pcaben(u, v): - """ - Principal components of 2-d (e.g. current meter) data + """Principal components of 2-d (e.g. current meter) data calculates ellipse parameters for currents. Parameters @@ -252,11 +244,11 @@ def pcaben(u, v): ... print("Flatness: {}".format(flatness)) ... - Notes: + Notes + ----- https://pubs.usgs.gov/of/2002/of02-217/m-files/pcaben.m """ - u, v = np.broadcast_arrays(u, v) C = np.cov(u, v) @@ -287,8 +279,7 @@ def pcaben(u, v): def spec_rot(u, v): - """ - Compute the rotary spectra from u,v velocity components + """Compute the rotary spectra from u,v velocity components Parameters ---------- @@ -322,7 +313,6 @@ def spec_rot(u, v): J. Gonella Deep Sea Res., 833-846, 1972. """ - # Individual components Fourier series. fu, fv = list(map(np.fft.fft, (u, v))) @@ -336,8 +326,7 @@ def spec_rot(u, v): # Quadrature spectra. quv = -fu.real * fv.imag + fv.real * fu.imag - # Rotatory components - # TODO: Check the division, 4 (original code) or 8 (paper)? + # Rotatory components. cw = (pu + pv - 2 * quv) / 4.0 ccw = (pu + pv + 2 * quv) / 4.0 N = len(u) @@ -346,8 +335,7 @@ def spec_rot(u, v): def lagcorr(x, y, M=None): - """ - Compute lagged correlation between two series. + """Compute lagged correlation between two series. Follow emery and Thomson book "summation" notation. Parameters @@ -369,7 +357,6 @@ def lagcorr(x, y, M=None): TODO: Implement Emery and Thomson example. """ - x, y = list(map(np.asanyarray, (x, y))) if not M: @@ -390,8 +377,7 @@ def lagcorr(x, y, M=None): def complex_demodulation(series, f, fc, axis=-1): - """ - Perform a Complex Demodulation + """Perform a Complex Demodulation It acts as a bandpass filter for `f`. series => Time-Series object with data and datetime @@ -401,40 +387,32 @@ def complex_demodulation(series, f, fc, axis=-1): math : series.data * np.exp(2 * np.pi * 1j * (1 / T) * time_in_seconds) """ - import scipy.signal as signal + from scipy import signal # Time period ie freq = 1 / T T = 2 * np.pi / f - # fc = fc * 1 / T # Filipe # De-mean data. d = series.data - series.data.mean(axis=axis) dfs = d * np.exp(2 * np.pi * 1j * (1 / T) * series.time_in_seconds) # Make a 5th order butter filter. - # FIXME: Why 5th order? Try signal.buttord!? Wn = fc / series.Nyq [b, a] = signal.butter(5, Wn, btype="low") - # FIXME: These are a factor of a thousand different from Matlab, why? - cc = signal.filtfilt(b, a, dfs) # FIXME: * 1e3 + cc = signal.filtfilt(b, a, dfs) amplitude = 2 * np.abs(cc) - # TODO: Fix the outputs. - # phase = np.arctan2(np.imag(cc), np.real(cc)) - filtered_series = amplitude * np.exp( -2 * np.pi * 1j * (1 / T) * series.time_in_seconds, ) - new_series = filtered_series.real, series.time # Return cc, amplitude, phase, dfs, filtered_series - return new_series + return filtered_series.real, series.time def binave(datain, r): - """ - Averages vector data in bins of length r. The last bin may be the + """Averages vector data in bins of length r. The last bin may be the average of less than r elements. Useful for computing daily average time series (with r=24 for hourly data). @@ -524,26 +502,24 @@ def binave(datain, r): 08/05/1999: version 2.0 """ - datain, rows = np.asarray(datain), np.asarray(r, dtype=np.int64) if datain.ndim != 1: - raise ValueError("Must be a 1D array.") + msg = "Must be a 1D array." + raise ValueError(msg) if rows <= 0: - raise ValueError("Bin size R must be a positive integer.") + msg = "Bin size R must be a positive integer." + raise ValueError(msg) # Compute bin averaged series. lines = datain.size // r z = datain[0 : (lines * rows)].reshape(rows, lines, order="F") - bindata = np.r_[np.mean(z, axis=0), np.mean(datain[(lines * r) :])] - - return bindata + return np.r_[np.mean(z, axis=0), np.mean(datain[(lines * r) :])] def binavg(x, y, db): - """ - Bins y(x) into db spacing. The spacing is given in `x` units. + """Bins y(x) into db spacing. The spacing is given in `x` units. y = np.random.random(20) x = np.arange(len(y)) xb, yb = binavg(x, y, 2) @@ -563,19 +539,13 @@ def binavg(x, y, db): # But this is the center of the bins. xbin = xbin - (db / 2.0) - # FIXME there is an IndexError if I want to show this. - # for n in range(x.size): - # print xbin[inds[n]-1], "<=", x[n], "<", xbin[inds[n]] - - ybin = np.array([y[inds == i].mean() for i in range(0, len(xbin))]) - # xbin = np.array([x[inds == i].mean() for i in range(0, len(xbin))]) + ybin = np.array([y[inds == i].mean() for i in range(len(xbin))]) return xbin, ybin def bin_dates(self, freq, tz=None): - """ - Take a pandas time Series and return a new Series on the specified + """Take a pandas time Series and return a new Series on the specified frequency. Examples @@ -586,24 +556,26 @@ def bin_dates(self, freq, tz=None): >>> sig = np.random.rand(n) + 2 * np.cos(2 * np.pi * np.arange(n)) >>> dates = date_range(start="1/1/2000", periods=n, freq="h") >>> series = Series(data=sig, index=dates) - >>> new_series = bin_dates(series, freq="D", tz=None) + >>> new_series = bin_dates(series, freq="1D", tz=None) """ from pandas import date_range - new_index = date_range(start=self.index[0], end=self.index[-1], freq=freq, tz=tz) + new_index = date_range( + start=self.index[0], + end=self.index[-1], + freq=freq, + tz=tz, + ) new_series = self.groupby(new_index.asof).mean() # Averages at the center. - secs = pd.Timedelta(new_index.freq).total_seconds() - new_series.index = new_series.index.values + int(secs // 2) + secs = pd.Timedelta(freq).total_seconds() + new_series.index = new_series.index.to_numpy() + int(secs // 2) return new_series def series_spline(self): - """ - Fill NaNs using a spline interpolation. - - """ + """Fill NaNs using a spline interpolation.""" from pandas import Series, isnull from scipy.interpolate import InterpolatedUnivariateSpline @@ -624,9 +596,8 @@ def series_spline(self): return Series(result, index=self.index, name=self.name) -def despike(self, n=3, recursive=False): - """ - Replace spikes with np.nan. +def despike(self, n=3, *, recursive=False): + """Replace spikes with np.nan. Removing spikes that are >= n * std. default n = 3. @@ -653,8 +624,7 @@ def despike(self, n=3, recursive=False): def pol2cart(theta, radius, units="deg"): - """ - Convert from polar to Cartesian coordinates + """Convert from polar to Cartesian coordinates Examples -------- @@ -670,8 +640,7 @@ def pol2cart(theta, radius, units="deg"): def cart2pol(x, y): - """ - Convert from Cartesian to polar coordinates. + """Convert from Cartesian to polar coordinates. Examples -------- @@ -688,7 +657,7 @@ def cart2pol(x, y): def wrap_lon180(lon): lon = np.atleast_1d(lon).copy() - angles = np.logical_or((lon < -180), (180 < lon)) + angles = np.logical_or((lon < -180), (lon > 180)) lon[angles] = wrap_lon360(lon[angles] + 180) - 180 return lon @@ -707,9 +676,8 @@ def alphanum_key(s): return key -def get_profile(x, y, f, xi, yi, mode="nearest", order=3): - """ - Interpolate regular data. +def get_profile(x, y, f, xi, yi, mode="nearest", order=3): # noqa: PLR0913 + """Interpolate regular data. Parameters ---------- @@ -783,21 +751,16 @@ def get_profile(x, y, f, xi, yi, mode="nearest", order=3): def strip_mask(arr, fill_value=np.nan): - """ - Take a masked array and return its data(filled) + mask. - - """ + """Take a masked array and return its data(filled) + mask.""" if ma.isMaskedArray(arr): mask = np.ma.getmaskarray(arr) arr = np.ma.filled(arr, fill_value) return mask, arr - else: - return arr + return arr def shiftdim(x, n=None): - """ - Matlab-like shiftdim in python. + """Matlab-like shiftdim in python. Examples -------- @@ -822,19 +785,17 @@ def no_leading_ones(shape): if shape[0] == 1: shape = shape[1:] return no_leading_ones(shape) - else: - return shape + return shape if n is None: # returns the array B with the same number of # elements as X but with any leading singleton # dimensions removed. return x.reshape(no_leading_ones(x.shape)) - elif n >= 0: + if n >= 0: # When n is positive, shiftdim shifts the dimensions # to the left and wraps the n leading dimensions to the end. return x.transpose(np.roll(list(range(x.ndim)), -n)) - else: - # When n is negative, shiftdim shifts the dimensions - # to the right and pads with singletons. - return x.reshape((1,) * -n + x.shape) + # When n is negative, shiftdim shifts the dimensions + # to the right and pads with singletons. + return x.reshape((1,) * -n + x.shape) diff --git a/oceans/plotting.py b/oceans/plotting.py index 7a7bacc..e51c91f 100644 --- a/oceans/plotting.py +++ b/oceans/plotting.py @@ -1,17 +1,16 @@ -import matplotlib +import matplotlib # noqa: ICN001 import matplotlib.pyplot as plt import numpy as np -import numpy.ma as ma from matplotlib.artist import Artist from matplotlib.dates import date2num from matplotlib.lines import Line2D +from numpy import ma from oceans.ocfis import cart2pol def stick_plot(time, u, v, **kw): - """ - Parameters + """Parameters ---------- time: list/arrays of datetime objects u, v: list/arrays of 2D vector components. @@ -51,19 +50,19 @@ def stick_plot(time, u, v, **kw): angles = kw.pop("angles", "uv") ax = kw.pop("ax", None) + msg = ( + "Stickplot angles must be `uv` so that if *U*==*V* the angle of the " + "arrow on the plot is 45 degrees CCW from the *x*-axis." + ) if angles != "uv": - raise AssertionError( - "Stickplot angles must be `uv` so that" - "if *U*==*V* the angle of the arrow on" - "the plot is 45 degrees CCW from the *x*-axis.", - ) + raise AssertionError(msg) if isinstance(time, DatetimeIndex): time = time.to_pydatetime() time, u, v = list(map(np.asanyarray, (time, u, v))) if not ax: - fig, ax = plt.subplots() + _, ax = plt.subplots() q = ax.quiver( date2num(time), @@ -84,8 +83,7 @@ def stick_plot(time, u, v, **kw): def landmask(M, color="0.8"): - """ - Plot land mask. + """Plot land mask. Based on trondkristiansen mpl_util.py. """ @@ -105,8 +103,7 @@ def landmask(M, color="0.8"): def level_colormap(levels, cmap=None): - """ - Make a colormap based on an increasing sequence of levels. + """Make a colormap based on an increasing sequence of levels. Based on trondkristiansen.com mpl_util.py. """ @@ -137,16 +134,12 @@ def level_colormap(levels, cmap=None): def get_pointsxy(points): - """ - Return x, y of the given point object. - - """ + """Return x, y of the given point object.""" return points.get_xdata(), points.get_ydata() def compass(u, v, **arrowprops): - """ - Compass draws a graph that displays the vectors with + """Compass draws a graph that displays the vectors with components `u` and `v` as arrows from the origin. Examples @@ -157,7 +150,6 @@ def compass(u, v, **arrowprops): >>> fig, ax = compass(u, v) """ - # Create plot. fig, ax = plt.subplots(subplot_kw={"polar": True}) @@ -168,7 +160,7 @@ def compass(u, v, **arrowprops): kw.update(arrowprops) [ ax.annotate("", xy=(angle, radius), xytext=(0, 0), arrowprops=kw) - for angle, radius in zip(angles, radii) + for angle, radius in zip(angles, radii, strict=True) ] ax.set_ylim(0, np.max(radii)) @@ -177,10 +169,7 @@ def compass(u, v, **arrowprops): def plot_spectrum(data, fs): - """ - Plots a Single-Sided Amplitude Spectrum of y(t). - - """ + """Plots a Single-Sided Amplitude Spectrum of y(t).""" n = len(data) # Length of the signal. k = np.arange(n) T = n / fs @@ -200,8 +189,7 @@ def plot_spectrum(data, fs): class EditPoints: - """ - Edit points on a graph with the mouse. Handles only one set of points. + """Edit points on a graph with the mouse. Handles only one set of points. Key-bindings: 't' toggle on and off. (When on, you can move, delete, or add points.) @@ -228,10 +216,11 @@ class EditPoints: epsilon = 5 # Maximum pixel distance to count as a point hit. showpoint = True - def __init__(self, fig, ax, points, verbose=False): - matplotlib.interactive(True) + def __init__(self, fig, ax, points, *, verbose=False): + matplotlib.interactive(b=True) if points is None: - raise RuntimeError("First add points to a figure or canvas.") + msg = "First add points to a figure or canvas." + raise RuntimeError(msg) canvas = fig.canvas self.ax = ax self.dragged = None @@ -253,23 +242,23 @@ def __init__(self, fig, ax, points, verbose=False): canvas.mpl_connect("draw_event", self.draw_callback) canvas.mpl_connect("button_press_event", self.button_press_callback) canvas.mpl_connect("key_press_event", self.key_press_callback) - canvas.mpl_connect("button_release_event", self.button_release_callback) + canvas.mpl_connect( + "button_release_event", + self.button_release_callback, + ) canvas.mpl_connect("motion_notify_event", self.motion_notify_callback) self.canvas = canvas - def draw_callback(self, event): + def draw_callback(self, event): # noqa: ARG002 self.background = self.canvas.copy_from_bbox(self.ax.bbox) self.ax.draw_artist(self.line) self.canvas.blit(self.ax.bbox) if self.verbose: - print("\nDrawing...") # noqa + print("\nDrawing...") # noqa: T201 def points_changed(self, points): - """ - This method is called whenever the points object is called. - - """ + """This method is called whenever the points object is called.""" # Only copy the artist props to the line (except visibility). vis = self.line.get_visible() Artist.update_from(self.line, points) @@ -277,13 +266,10 @@ def points_changed(self, points): self.line.set_visible(vis) if self.verbose: - print("\nPoints modified.") # noqa + print("\nPoints modified.") # noqa: T201 def get_ind_under_point(self, event): - """ - Get the index of the point under mouse if within epsilon tolerance. - - """ + """Get the index of the point under mouse if within epsilon tolerance.""" # Display coordinates. arr = self.ax.transData.transform(self.points.get_xydata()) x, y = arr[:, 0], arr[:, 1] @@ -291,19 +277,16 @@ def get_ind_under_point(self, event): indseq = np.nonzero(np.equal(d, np.amin(d)))[0] ind = indseq[0] if self.verbose: - print("d[ind] {} epsilon {}".format(d[ind], self.epsilon)) # noqa + print(f"d[ind] {d[ind]} epsilon {self.epsilon}") # noqa: T201 if d[ind] >= self.epsilon: ind = None if self.verbose: - print(f"\nClicked at ({event.xdata}, {event.ydata})") # noqa + print(f"\nClicked at ({event.xdata}, {event.ydata})") # noqa: T201 return ind def button_press_callback(self, event): - """ - Whenever a mouse button is pressed. - - """ + """Whenever a mouse button is pressed.""" if not self.showpoint: return if not event.inaxes: @@ -317,28 +300,22 @@ def button_press_callback(self, event): self.pick_pos = (x[self._ind], y[self._ind]) if self.verbose: - print(f"\nGot point: ({self.pick_pos}), ind: {self._ind}") # noqa + print(f"\nGot point: ({self.pick_pos}), ind: {self._ind}") # noqa: T201 def button_release_callback(self, event): - """ - Whenever a mouse button is released. - - """ + """Whenever a mouse button is released.""" if not self.showpoint: return if not event.button: return self._ind = None if self.verbose: - print("\nButton released.") # noqa - - def key_press_callback(self, event): - """ - Whenever a key is pressed. + print("\nButton released.") # noqa: T201 - """ + def key_press_callback(self, event): # noqA: C901 + """Whenever a key is pressed.""" if not event.inaxes: - return + return None if event.key == "t": self.showpoint = not self.showpoint self.line.set_visible(self.showpoint) @@ -346,16 +323,16 @@ def key_press_callback(self, event): self._ind = None if self.verbose: - print(f"\nToggle {self.showpoint:d}") # noqa + print(f"\nToggle {self.showpoint:d}") # noqa: T201 return get_pointsxy(self.points) - elif event.key == "d": + if event.key == "d": x, y = get_pointsxy(self.points) ind = self.get_ind_under_point(event) if ind is not None: if self.verbose: - print( + print( # noqa: T201 f"\nDeleted ({x[ind]}, {y[ind]}) ind: {ind}", - ) # noqa + ) x = np.delete(x, ind) y = np.delete(y, ind) self.points.set_xdata(x) @@ -363,7 +340,7 @@ def key_press_callback(self, event): self.line.set_data(self.points.get_data()) elif event.key == "i": if self.verbose: - print("Insert point") # noqa + print("Insert point") # noqa: T201 xs = self.points.get_xdata() ex, ey = event.xdata, event.ydata for _i in range(len(xs) - 1): @@ -371,16 +348,14 @@ def key_press_callback(self, event): self.points.set_ydata(np.r_[self.points.get_ydata(), ey]) self.line.set_data(self.points.get_data()) if self.verbose: - print(f"\nInserting: ({ex}, {ey})") # noqa + print(f"\nInserting: ({ex}, {ey})") # noqa: T201 break self.canvas.draw() + return None def motion_notify_callback(self, event): - """ - On mouse movement. - - """ + """On mouse movement.""" if not self.showpoint: return if not self._ind: @@ -395,8 +370,8 @@ def motion_notify_callback(self, event): x[self._ind] = self.pick_pos[0] + dx y[self._ind] = self.pick_pos[1] + dy if self.verbose: - print(f"\nevent.xdata {event.xdata}") # noqa - print(f"\nevent.ydata {event.ydata}") # noqa + print(f"\nevent.xdata {event.xdata}") # noqa: T201 + print(f"\nevent.ydata {event.ydata}") # noqa: T201 self.points.set_xdata(x) self.points.set_ydata(y) self.line.set_data(list(zip(self.points.get_data()))) @@ -406,4 +381,4 @@ def motion_notify_callback(self, event): self.canvas.blit(self.ax.bbox) if self.verbose: - print("\nMoving") # noqa + print("\nMoving") # noqa: T201 diff --git a/oceans/sandbox/lines.py b/oceans/sandbox/lines.py index a5c0cac..dd0662f 100644 --- a/oceans/sandbox/lines.py +++ b/oceans/sandbox/lines.py @@ -1,13 +1,12 @@ -import os +from pathlib import Path import numpy as np -_default_path = os.path.join(os.path.dirname(__file__), "data") +_default_path = Path(__file__).parent.joinpath("data") def LineNormals2D(Vertices, Lines): - """ - This function calculates the normals, of the line points using the + """This function calculates the normals, of the line points using the neighboring points of each contour point, and forward an backward differences on the end points. @@ -26,7 +25,7 @@ def LineNormals2D(Vertices, Lines): -------- >>> import numpy as np >>> import matplotlib.pyplot as plt - >>> data = np.load(os.path.join(_default_path, "testdata.npz")) + >>> data = np.load(_default_path.joinpath("testdata.npz")) >>> Lines, Vertices = data["Lines"], data["Vertices"] >>> N = LineNormals2D(Vertices, Lines) >>> fig, ax = plt.subplots(nrows=1, ncols=1) @@ -49,7 +48,8 @@ def LineNormals2D(Vertices, Lines): np.arange(2, Vertices.shape[0] + 1), ] else: - raise ValueError(f"Expected np.array but got {Lines:!r}.") + msg = f"Expected np.array but got {Lines:!r}." + raise ValueError(msg) # Calculate tangent vectors. DT = Vertices[Lines[:, 0] - 1, :] - Vertices[Lines[:, 1] - 1, :] @@ -77,8 +77,7 @@ def LineNormals2D(Vertices, Lines): def LineCurvature2D(Vertices, Lines=None): - """ - This function calculates the curvature of a 2D line. It first fits + """This function calculates the curvature of a 2D line. It first fits polygons to the points. Then calculates the analytical curvature from the polygons. @@ -97,7 +96,7 @@ def LineCurvature2D(Vertices, Lines=None): -------- >>> import numpy as np >>> import matplotlib.pyplot as plt - >>> data = np.load(os.path.join(_default_path, "testdata.npz")) + >>> data = np.load(Path(_default_path).joinpath("testdata.npz")) >>> Lines, Vertices = data["Lines"], data["Vertices"] >>> k = LineCurvature2D(Vertices, Lines) >>> N = LineNormals2D(Vertices, Lines) @@ -128,7 +127,8 @@ def LineCurvature2D(Vertices, Lines=None): np.arange(2, Vertices.shape[0] + 1), ] else: - raise ValueError(f"Cannot recognized {Lines!r}.") + msg = f"Cannot recognized {Lines!r}." + raise ValueError(msg) # Get left and right neighbor of each points. Na = np.zeros(Vertices.shape[0], dtype=np.int64) @@ -180,45 +180,24 @@ def LineCurvature2D(Vertices, Lines=None): invM = inverse3(M) a = np.zeros_like(x) b = np.zeros_like(a) - a[:, 0] = ( - invM[:, 0, 0] * x[:, 0] + invM[:, 1, 0] * x[:, 1] + invM[:, 2, 0] * x[:, 2] - ) + a[:, 0] = invM[:, 0, 0] * x[:, 0] + invM[:, 1, 0] * x[:, 1] + invM[:, 2, 0] * x[:, 2] - a[:, 1] = ( - invM[:, 0, 1] * x[:, 0] + invM[:, 1, 1] * x[:, 1] + invM[:, 2, 1] * x[:, 2] - ) + a[:, 1] = invM[:, 0, 1] * x[:, 0] + invM[:, 1, 1] * x[:, 1] + invM[:, 2, 1] * x[:, 2] - a[:, 2] = ( - invM[:, 0, 2] * x[:, 0] + invM[:, 1, 2] * x[:, 1] + invM[:, 2, 2] * x[:, 2] - ) + a[:, 2] = invM[:, 0, 2] * x[:, 0] + invM[:, 1, 2] * x[:, 1] + invM[:, 2, 2] * x[:, 2] - b[:, 0] = ( - invM[:, 0, 0] * y[:, 0] + invM[:, 1, 0] * y[:, 1] + invM[:, 2, 0] * y[:, 2] - ) + b[:, 0] = invM[:, 0, 0] * y[:, 0] + invM[:, 1, 0] * y[:, 1] + invM[:, 2, 0] * y[:, 2] - b[:, 1] = ( - invM[:, 0, 1] * y[:, 0] + invM[:, 1, 1] * y[:, 1] + invM[:, 2, 1] * y[:, 2] - ) + b[:, 1] = invM[:, 0, 1] * y[:, 0] + invM[:, 1, 1] * y[:, 1] + invM[:, 2, 1] * y[:, 2] - b[:, 2] = ( - invM[:, 0, 2] * y[:, 0] + invM[:, 1, 2] * y[:, 1] + invM[:, 2, 2] * y[:, 2] - ) + b[:, 2] = invM[:, 0, 2] * y[:, 0] + invM[:, 1, 2] * y[:, 1] + invM[:, 2, 2] * y[:, 2] # Calculate the curvature from the fitted polygon. - k = ( - 2 - * (a[:, 1] * b[:, 2] - a[:, 2] * b[:, 1]) - / ((a[:, 1] ** 2 + b[:, 1] ** 2) ** (3 / 2)) - ) - - return k + return 2 * (a[:, 1] * b[:, 2] - a[:, 2] * b[:, 1]) / ((a[:, 1] ** 2 + b[:, 1] ** 2) ** (3 / 2)) def inverse3(M): - """ - This function does inv(M), but then for an array of 3x3 matrices. - - """ + """This function does inv(M), but then for an array of 3x3 matrices.""" adjM = np.zeros((M.shape[0], 3, 3)) adjM[:, 0, 0] = M[:, 4] * M[:, 8] - M[:, 7] * M[:, 5] adjM[:, 0, 1] = -(M[:, 3] * M[:, 8] - M[:, 6] * M[:, 5]) diff --git a/oceans/sw_extras/__init__.py b/oceans/sw_extras/__init__.py index a2764c0..ea88027 100644 --- a/oceans/sw_extras/__init__.py +++ b/oceans/sw_extras/__init__.py @@ -24,26 +24,26 @@ from oceans.sw_extras.waves import Waves __all__ = [ - N, - cor_beta, - cph, - cr_depth, - inertial_period, - kdpar, - o2sol_SP_pt_benson_krause_84, - photic_depth, - psu2ppt, - richnumb, - shear, - sigma_t, - sigmatheta, - soundspeed, - spice, - strat_period, - tcond, - visc, - zmld_boyer, - zmld_so, - gamma_GP_from_SP_pt, - Waves, + "N", + "Waves", + "cor_beta", + "cph", + "cr_depth", + "gamma_GP_from_SP_pt", + "inertial_period", + "kdpar", + "o2sol_SP_pt_benson_krause_84", + "photic_depth", + "psu2ppt", + "richnumb", + "shear", + "sigma_t", + "sigmatheta", + "soundspeed", + "spice", + "strat_period", + "tcond", + "visc", + "zmld_boyer", + "zmld_so", ] diff --git a/oceans/sw_extras/gamma_GP_from_SP_pt.py b/oceans/sw_extras/gamma_GP_from_SP_pt.py index 638c904..0f921b7 100644 --- a/oceans/sw_extras/gamma_GP_from_SP_pt.py +++ b/oceans/sw_extras/gamma_GP_from_SP_pt.py @@ -2,8 +2,7 @@ def in_polygon(xp, yp, polygon, transform=None, radius=0.0): - """ - Check is points `xp` and `yp` are inside the `polygon`. + """Check is points `xp` and `yp` are inside the `polygon`. Polygon is a `matplotlib.path.Path` object. https://stackoverflow.com/questions/21328854/shapely-and-matplotlib-point-in-polygon-not-accurate-with-geolocation @@ -21,14 +20,11 @@ def in_polygon(xp, yp, polygon, transform=None, radius=0.0): """ xp, yp = map(np.atleast_1d, (xp, yp)) points = np.atleast_2d([xp, yp]).T - return polygon.contains_points(points, transform=None, radius=0.0) + return polygon.contains_points(points, transform=transform, radius=radius) def gamma_G_north_atlantic(SP, pt): - """ - Polynomials definitions: North Atlantic. VERSION 1: WOCE dataset. - - """ + """Polynomials definitions: North Atlantic. VERSION 1: WOCE dataset.""" Fit = np.array( [ [0.0, 0.0, 0.868250629754601], @@ -72,10 +68,7 @@ def gamma_G_north_atlantic(SP, pt): def gamma_G_south_atlantic(SP, pt): - """ - Polynomials definitions: South Atlantic. VERSION 1: WOCE dataset. - - """ + """Polynomials definitions: South Atlantic. VERSION 1: WOCE dataset.""" Fit = np.array( [ [0.0, 0.0, 0.970176813506429], @@ -120,10 +113,7 @@ def gamma_G_south_atlantic(SP, pt): def gamma_G_pacific(SP, pt): - """ - Polynomials definitions: Pacific. VERSION 1: WOCE_dataset. - - """ + """Polynomials definitions: Pacific. VERSION 1: WOCE_dataset.""" Fit = np.array( [ [0.0, 0.0, 0.990419160678528], @@ -168,10 +158,7 @@ def gamma_G_pacific(SP, pt): def gamma_G_indian(SP, pt): - """ - Polynomials definitions: Indian. VERSION 1: WOCE_dataset. - - """ + """Polynomials definitions: Indian. VERSION 1: WOCE_dataset.""" Fit = np.array( [ [0.0, 0.0, 0.915127744449523], @@ -216,10 +203,7 @@ def gamma_G_indian(SP, pt): def gamma_G_southern_ocean(SP, pt, p): - """ - Polynomials definitions: Southern Ocean. VERSION 1: WOCE_dataset. - - """ + """Polynomials definitions: Southern Ocean. VERSION 1: WOCE_dataset.""" Fit_N = np.array( [ [0.0, 0.0, 0.874520046342081], @@ -290,18 +274,14 @@ def gamma_G_southern_ocean(SP, pt, p): gamma_A = gamma_A + Fit_S[k, 2] * (SP**i * pt**j) gamma_SOce_S = ( - gamma_A - * np.exp(-p / p_ref) - * (1.0 / 2.0 - 1.0 / 2.0 * np.tanh((40.0 * pt - pt_ref) / c_pt)) + gamma_A * np.exp(-p / p_ref) * (1.0 / 2.0 - 1.0 / 2.0 * np.tanh((40.0 * pt - pt_ref) / c_pt)) ) - gamma_SOce = gamma_SOce_N + gamma_SOce_S - return gamma_SOce + return gamma_SOce_N + gamma_SOce_S def gamma_GP_from_SP_pt(SP, pt, p, lon, lat): - """ - Global Polynomial of Neutral Density with respect to Practical Salinity + """Global Polynomial of Neutral Density with respect to Practical Salinity and potential temperature. Calculates the Global Polynomial of Neutral Density gammma_GP using an @@ -445,7 +425,6 @@ def gamma_GP_from_SP_pt(SP, pt, p, lon, lat): gamma_Pac = gamma_G_pacific(SP, pt) gamma_Ind = gamma_G_indian(SP, pt) gamma_SOce = gamma_G_southern_ocean(SP, pt, p) - # gamma_Arc = np.zeros_like(SP) * np.nan # Definition of the Indian part. io_lon = np.array( @@ -558,15 +537,18 @@ def gamma_GP_from_SP_pt(SP, pt, p, lon, lat): ) # Definition of the polygon filters. - io_polygon = Path(list(zip(io_lon, io_lat))) - po_polygon = Path(list(zip(po_lon, po_lat))) + io_polygon = Path(list(zip(io_lon, io_lat, strict=True))) + po_polygon = Path(list(zip(po_lon, po_lat, strict=True))) i_inter_indian_pacific = in_polygon(lon, lat, io_polygon) * in_polygon( lon, lat, po_polygon, ) - i_indian = np.logical_xor(in_polygon(lon, lat, io_polygon), i_inter_indian_pacific) + i_indian = np.logical_xor( + in_polygon(lon, lat, io_polygon), + i_inter_indian_pacific, + ) i_pacific = in_polygon(lon, lat, po_polygon) i_atlantic = (1 - i_pacific) * (1 - i_indian) @@ -593,6 +575,4 @@ def gamma_GP_from_SP_pt(SP, pt, p, lon, lat): gamma_GP[lat > 66.0] = np.nan # De-normalization. - gamma_GP = 20.0 * gamma_GP - 20 - - return gamma_GP + return 20.0 * gamma_GP - 20 diff --git a/oceans/sw_extras/sw_extras.py b/oceans/sw_extras/sw_extras.py index e7c106e..0b18615 100644 --- a/oceans/sw_extras/sw_extras.py +++ b/oceans/sw_extras/sw_extras.py @@ -6,8 +6,7 @@ def sigma_t(s, t, p): - """ - :math:`\\sigma_{t}` is the remainder of subtracting 1000 kg m :sup:`-3` + """:math:`\\sigma_{t}` is the remainder of subtracting 1000 kg m :sup:`-3` from the density of a sea water sample at atmospheric pressure. Parameters @@ -57,8 +56,7 @@ def sigma_t(s, t, p): def sigmatheta(s, t, p, pr=0): - """ - :math:`\\sigma_{\\theta}` is a measure of the density of ocean water + """:math:`\\sigma_{\\theta}` is a measure of the density of ocean water where the quantity :math:`\\sigma_{t}` is calculated using the potential temperature (:math:`\\theta`) rather than the in situ temperature and potential density of water mass relative to the specified reference @@ -109,8 +107,7 @@ def sigmatheta(s, t, p, pr=0): def N(bvfr2): - """ - Buoyancy frequency is the frequency with which a parcel or particle of + """Buoyancy frequency is the frequency with which a parcel or particle of fluid displaced a small vertical distance from its equilibrium position in a stable environment will oscillate. It will oscillate in simple harmonic motion with an angular frequency defined by @@ -157,8 +154,7 @@ def N(bvfr2): def cph(bvfr2): - """ - Buoyancy frequency in Cycles Per Hour. + """Buoyancy frequency in Cycles Per Hour. Parameters ---------- @@ -197,8 +193,7 @@ def cph(bvfr2): def shear(z, u, v=0): - r""" - Calculates the vertical shear for u, v velocity section. + r"""Calculates the vertical shear for u, v velocity section. .. math:: \\textrm{shear} = \\frac{\\partial (u^2 + v^2)^{0.5}}{\partial z} @@ -233,7 +228,7 @@ def shear(z, u, v=0): z, u, v = list(map(np.asanyarray, (z, u, v))) z, u, v = np.broadcast_arrays(z, u, v) - m, n = z.shape + m, _ = z.shape iup = np.arange(0, m - 1) ilo = np.arange(1, m) z_ave = (z[iup, :] + z[ilo, :]) / 2.0 @@ -245,8 +240,7 @@ def shear(z, u, v=0): def richnumb(bvfr2, S2): - r""" - Calculates the ratio of buoyancy to inertial forces which measures the + r"""Calculates the ratio of buoyancy to inertial forces which measures the stability of a fluid layer. this functions computes the gradient Richardson number in the form of: @@ -290,13 +284,11 @@ def richnumb(bvfr2, S2): """ bvfr2, S2 = list(map(np.asanyarray, (bvfr2, S2))) - # FIXME: check this for correctness. return bvfr2 / S2 def cor_beta(lat): - """ - Calculates the Coriolis :math:`\\beta` factor defined by: + """Calculates the Coriolis :math:`\\beta` factor defined by: .. math:: beta = 2 \\Omega \\cos(lat) @@ -337,8 +329,7 @@ def cor_beta(lat): def inertial_period(lat): - """ - Calculate the inertial period as: + """Calculate the inertial period as: .. math:: Ti = \\frac{2\\pi}{f} = \\frac{T_{sd}}{2\\sin\\phi} @@ -366,8 +357,7 @@ def inertial_period(lat): def strat_period(N): - """ - Stratification period is the inverse of the Buoyancy frequency and it + """Stratification period is the inverse of the Buoyancy frequency and it is defined by: .. math:: Tn = \\frac{2\\pi}{N} @@ -402,8 +392,7 @@ def strat_period(N): def visc(s, t, p): - """ - Calculates kinematic viscosity of sea-water. Based on Dan Kelley's fit + """Calculates kinematic viscosity of sea-water. Based on Dan Kelley's fit to Knauss's TABLE II-8. Parameters @@ -441,8 +430,7 @@ def visc(s, t, p): def tcond(s, t, p): - """ - Calculates thermal conductivity of sea-water. + """Calculates thermal conductivity of sea-water. Parameters ---------- @@ -479,21 +467,16 @@ def tcond(s, t, p): s, t, p = list(map(np.asanyarray, (s, t, p))) if False: # Castelli's option. - therm = 100.0 * ( - 5.5286e-3 + 3.4025e-8 * p + 1.8364e-5 * t - 3.3058e-9 * t**3 - ) # [W/m/K] + therm = 100.0 * (5.5286e-3 + 3.4025e-8 * p + 1.8364e-5 * t - 3.3058e-9 * t**3) # [W/m/K] # 1) Caldwell's option # 2 - simplified formula, accurate to 0.5% (eqn. 9) # in [cal/cm/C/sec] - therm = 0.001365 * ( - 1.0 + 0.003 * t - 1.025e-5 * t**2 + 0.0653 * (1e-4 * p) - 0.00029 * s - ) + therm = 0.001365 * (1.0 + 0.003 * t - 1.025e-5 * t**2 + 0.0653 * (1e-4 * p) - 0.00029 * s) return therm * 418.4 # [cal/cm/C/sec] ->[ W/m/K] def spice(s, t, p): - r""" - Compute sea spiciness as defined by Flament (2002). + r"""Compute sea spiciness as defined by Flament (2002). .. math:: \pi(\theta,s) = \sum^5_{i=0} \sum^4_{j=0} b_{ij}\theta^i(s-35)^i @@ -540,7 +523,6 @@ def spice(s, t, p): """ s, t, p = list(map(np.asanyarray, (s, t, p))) - # FIXME: I'm not sure about this next step. pt = sw.ptmp(s, t, p) B = np.zeros((6, 5)) @@ -595,26 +577,16 @@ def spice(s, t, p): def psu2ppt(psu): - """ - Converts salinity from PSU units to PPT + """Converts salinity from PSU units to PPT http://stommel.tamu.edu/~baum/paleo/ocean/node31.html#PracticalSalinityScale """ - a = [0.008, -0.1692, 25.3851, 14.0941, -7.0261, 2.7081] - return ( - a[1] - + a[2] * psu**0.5 - + a[3] * psu - + a[4] * psu**1.5 - + a[5] * psu**2 - + a[6] * psu**2.5 - ) + return a[1] + a[2] * psu**0.5 + a[3] * psu + a[4] * psu**1.5 + a[5] * psu**2 + a[6] * psu**2.5 def soundspeed(S, T, D, equation="mackenzie"): - """ - Various sound-speed equations. + """Various sound-speed equations. 1) soundspeed(s, t, d) returns the sound speed (m/sec) given vectors of salinity (ppt), temperature (deg C) and DEPTH (m) using the formula of Mackenzie: Mackenzie, K.V. "Nine-term Equation for @@ -705,34 +677,25 @@ def soundspeed(S, T, D, equation="mackenzie"): # S**1 TERM. A3 = (-3.389e-13 * T + 6.649e-12) * T + 1.100e-10 A2 = ((7.988e-12 * T - 1.6002e-10) * T + 9.1041e-9) * T - 3.9064e-7 - A1 = ( - ((-2.0122e-10 * T + 1.0507e-8) * T - 6.4885e-8) * T - 1.2580e-5 - ) * T + 9.4742e-5 + A1 = (((-2.0122e-10 * T + 1.0507e-8) * T - 6.4885e-8) * T - 1.2580e-5) * T + 9.4742e-5 A0 = (((-3.21e-8 * T + 2.006e-6) * T + 7.164e-5) * T - 1.262e-2) * T + 1.389 A = ((A3 * P + A2) * P + A1) * P + A0 # S**0 TERM. C3 = (-2.3643e-12 * T + 3.8504e-10) * T - 9.7729e-9 - C2 = ( - ((1.0405e-12 * T - 2.5335e-10) * T + 2.5974e-8) * T - 1.7107e-6 - ) * T + 3.1260e-5 - C1 = ( - ((-6.1185e-10 * T + 1.3621e-7) * T - 8.1788e-6) * T + 6.8982e-4 - ) * T + 0.153563 - C0 = ( - (((3.1464e-9 * T - 1.47800e-6) * T + 3.3420e-4) * T - 5.80852e-2) * T - + 5.03711 - ) * T + 1402.388 + C2 = (((1.0405e-12 * T - 2.5335e-10) * T + 2.5974e-8) * T - 1.7107e-6) * T + 3.1260e-5 + C1 = (((-6.1185e-10 * T + 1.3621e-7) * T - 8.1788e-6) * T + 6.8982e-4) * T + 0.153563 + C0 = ((((3.1464e-9 * T - 1.47800e-6) * T + 3.3420e-4) * T - 5.80852e-2) * T + 5.03711) * T + 1402.388 C = ((C3 * P + C2) * P + C1) * P + C0 # SOUND SPEED RETURN. ssp = C + (A + B * SR + D * S) * S else: - raise TypeError(f"Unrecognizable equation specified: {equation}") + msg = f"Unrecognizable equation specified: {equation}" + raise TypeError(msg) return ssp def photic_depth(z, par): - """ - Computes photic depth, based on 1% of surface PAR (Photosynthetically + """Computes photic depth, based on 1% of surface PAR (Photosynthetically Available Radiation). Parameters @@ -756,9 +719,8 @@ def photic_depth(z, par): def cr_depth(z, par): - """ - Computes Critical depth. Depth where 1% of surface PAR (Photosynthetically - Available Radiation). + """Computes Critical depth. Depth where 1% of surface PAR + (Photosynthetically Available Radiation). Parameters ---------- @@ -774,13 +736,11 @@ def cr_depth(z, par): """ ix = photic_depth(z, par)[1] - crdepth = z[ix][-1] - return crdepth + return z[ix][-1] def kdpar(z, par, boundary): - """ - Compute Kd value, since light extinction coefficient can be computed + """Compute Kd value, since light extinction coefficient can be computed from depth and Photossintetically Available Radiation (PAR). It will compute a linear regression through out following depths from boundary and them will be regressed to the upper depth to boundary @@ -820,8 +780,8 @@ def kdpar(z, par, boundary): xp = np.polyfit(z_z, np.log(par_z), 1) # Linear regression based on surface PAR, obtained from linear fitting. - # z = 0 - # PAR_surface = a(z) + b + # z @ 0 + # PAR_surface -> a(z) + b par_surface = np.exp(xp[1]) par = np.r_[par_surface, par] z = np.r_[0, z] @@ -832,8 +792,7 @@ def kdpar(z, par, boundary): def zmld_so(s, t, p, threshold=0.05, smooth=None): - """ - Computes mixed layer depth of Southern Ocean waters. + """Computes mixed layer depth of Southern Ocean waters. Parameters ---------- @@ -866,14 +825,11 @@ def zmld_so(s, t, p, threshold=0.05, smooth=None): sigma_t[nan_sigma] = np.nan der = np.divide(np.diff(sigma_t), np.diff(depth)) mld = np.where(der == np.nanmax(der))[0] - zmld = depth[mld] - - return zmld + return depth[mld] def zmld_boyer(s, t, p): - """ - Computes mixed layer depth, based on de Boyer Montégut et al., 2004. + """Computes mixed layer depth, based on de Boyer Montégut et al., 2004. Parameters ---------- @@ -903,65 +859,65 @@ def zmld_boyer(s, t, p): mldepthdens_mldindex = 0 mldepthptemp_mldindex = 0 return mldepthdens_mldindex, mldepthptemp_mldindex - else: - # starti = min(find((pres-10).^2==min((pres-10).^2))); - starti = np.min(np.where((p - 10.0) ** 2 == np.min((p - 10.0) ** 2))[0]) - starti = 0 - pres = p[starti:m] - sal = s[starti:m] - temp = t[starti:m] - - pden = sw.dens0(sal, temp) - 1000 - - mldepthdens_mldindex = m - 1 - for i, pp in enumerate(pden): - if np.abs(pden[starti] - pp) > 0.03: - mldepthdens_mldindex = i - break - - # Interpolate to exactly match the potential density threshold. - presseg = [pres[mldepthdens_mldindex - 1], pres[mldepthdens_mldindex]] - pdenseg = [ - pden[starti] - pden[mldepthdens_mldindex - 1], - pden[starti] - pden[mldepthdens_mldindex], - ] - P = np.polyfit(presseg, pdenseg, 1) - presinterp = np.linspace(presseg[0], presseg[1], 3) - pdenthreshold = np.polyval(P, presinterp) - - # The potential density threshold MLD value: - ix = np.max(np.where(np.abs(pdenthreshold) < 0.03)[0]) - mldepthdens_mldindex = presinterp[ix] - - # Search for the first level that exceeds the temperature threshold. - mldepthptmp_mldindex = m - 1 - for i, tt in enumerate(temp): - if np.abs(temp[starti] - tt) > 0.2: - mldepthptmp_mldindex = i - break - - # Interpolate to exactly match the temperature threshold. - presseg = [pres[mldepthptmp_mldindex - 1], pres[mldepthptmp_mldindex]] - tempseg = [ - temp[starti] - temp[mldepthptmp_mldindex - 1], - temp[starti] - temp[mldepthptmp_mldindex], - ] - P = np.polyfit(presseg, tempseg, 1) - presinterp = np.linspace(presseg[0], presseg[1], 3) - tempthreshold = np.polyval(P, presinterp) - - # The temperature threshold MLD value: - ix = np.max(np.where(np.abs(tempthreshold) < 0.2)[0]) - mldepthptemp_mldindex = presinterp[ix] - - return mldepthdens_mldindex, mldepthptemp_mldindex + # starti = min(find((pres-10).^2==min((pres-10).^2))); + starti = np.min(np.where((p - 10.0) ** 2 == np.min((p - 10.0) ** 2))[0]) + starti = 0 + pres = p[starti:m] + sal = s[starti:m] + temp = t[starti:m] + + pden = sw.dens0(sal, temp) - 1000 + + mldepthdens_mldindex = m - 1 + pden_threshold_mld = 0.03 + for i, pp in enumerate(pden): + if np.abs(pden[starti] - pp) > pden_threshold_mld: + mldepthdens_mldindex = i + break + + # Interpolate to exactly match the potential density threshold. + presseg = [pres[mldepthdens_mldindex - 1], pres[mldepthdens_mldindex]] + pdenseg = [ + pden[starti] - pden[mldepthdens_mldindex - 1], + pden[starti] - pden[mldepthdens_mldindex], + ] + P = np.polyfit(presseg, pdenseg, 1) + presinterp = np.linspace(presseg[0], presseg[1], 3) + pdenthreshold = np.polyval(P, presinterp) + + # The potential density threshold MLD value: + ix = np.max(np.where(np.abs(pdenthreshold) < pden_threshold_mld)[0]) + mldepthdens_mldindex = presinterp[ix] + + # Search for the first level that exceeds the temperature threshold. + mldepthptmp_mldindex = m - 1 + temp_threshold_mld = 0.2 + for i, tt in enumerate(temp): + if np.abs(temp[starti] - tt) > temp_threshold_mld: + mldepthptmp_mldindex = i + break + + # Interpolate to exactly match the temperature threshold. + presseg = [pres[mldepthptmp_mldindex - 1], pres[mldepthptmp_mldindex]] + tempseg = [ + temp[starti] - temp[mldepthptmp_mldindex - 1], + temp[starti] - temp[mldepthptmp_mldindex], + ] + P = np.polyfit(presseg, tempseg, 1) + presinterp = np.linspace(presseg[0], presseg[1], 3) + tempthreshold = np.polyval(P, presinterp) + + # The temperature threshold MLD value: + ix = np.max(np.where(np.abs(tempthreshold) < temp_threshold_mld)[0]) + mldepthptemp_mldindex = presinterp[ix] + + return mldepthdens_mldindex, mldepthptemp_mldindex def o2sol_SP_pt_benson_krause_84(SP, pt): - """ - Calculates the oxygen, O2, concentration expected at equilibrium with air - at an Absolute Pressure of 101325 Pa (sea pressure of 0 dbar) including - saturated water vapor. + """Calculates the oxygen, O2, concentration expected at equilibrium with + air at an Absolute Pressure of 101325 Pa (sea pressure of 0 dbar) + including saturated water vapor. This function uses the solubility coefficients derived from the data of Benson and Krause 1984, as fitted by Garcia and Gordon 1992. @@ -998,7 +954,8 @@ def o2sol_SP_pt_benson_krause_84(SP, pt): """ SP, pt = list(map(np.asanyarray, (SP, pt))) - S = SP # rename to make eq. identical to the paper and increase readability. + # rename to make eq. identical to the paper and increase readability. + S = SP pt68 = pt * 1.00024 # IPTS-68 potential temperature in degC. Ts = np.log((298.15 - pt68) / (273.15 + pt68)) diff --git a/oceans/sw_extras/waves.py b/oceans/sw_extras/waves.py index 0d29a73..e104971 100644 --- a/oceans/sw_extras/waves.py +++ b/oceans/sw_extras/waves.py @@ -3,8 +3,7 @@ class Waves: - r""" - Solves the wave dispersion relationship via Newton-Raphson. + r"""Solves the wave dispersion relationship via Newton-Raphson. .. math:: \omega^2 = gk\tanh kh @@ -83,7 +82,7 @@ class Waves: """ - def __init__(self, h, T=None, L=None, thetao=None, Ho=None, lat=None): + def __init__(self, h, T=None, L=None, thetao=None, Ho=None, lat=None): # noqa: PLR0915, PLR0913, PLR0912, C901 self.T = np.asarray(T, dtype=np.float64) self.L = np.asarray(L, dtype=np.float64) self.Ho = np.asarray(Ho, dtype=np.float64) @@ -99,10 +98,7 @@ def __init__(self, h, T=None, L=None, thetao=None, Ho=None, lat=None): else: self.h = np.asarray(h, dtype=np.float64) - if lat is None: - g = 9.81 # Default gravity. - else: - g = gsw.grav(lat, p=0) + g = 9.81 if lat is None else gsw.grav(lat, p=0) if L is None: self.omega = 2 * np.pi / self.T @@ -110,16 +106,14 @@ def __init__(self, h, T=None, L=None, thetao=None, Ho=None, lat=None): # Returns wavenumber of the gravity wave dispersion relation using # newtons method. The initial guess is shallow water wavenumber. self.k = self.omega / np.sqrt(g) - # TODO: May change to, - # self.k = self.w ** 2 / (g * np.sqrt(self.w ** 2 * self.h / g)) f = g * self.k * np.tanh(self.k * self.h) - self.omega**2 - while np.abs(f.max()) > 1e-10: - dfdk = g * self.k * self.h * ( - 1 / (np.cosh(self.k * self.h)) - ) ** 2 + g * np.tanh(self.k * self.h) + converge = 1e-10 + while np.abs(f.max()) > converge: + dfdk = g * self.k * self.h * (1 / (np.cosh(self.k * self.h))) ** 2 + g * np.tanh( + self.k * self.h, + ) self.k = self.k - f / dfdk - # FIXME: f = g * self.k * np.tanh(self.k * self.h) - self.omega**2 self.L = 2 * np.pi / self.k diff --git a/oceans/synop.py b/oceans/synop.py index cddbf52..a89581a 100644 --- a/oceans/synop.py +++ b/oceans/synop.py @@ -1,9 +1,8 @@ import numpy as np -def scaloa(xc, yc, x, y, t=None, corrlen=None, err=None, zc=None): - """ - Scalar objective analysis. Interpolates t(x, y) into tp(xc, yc) +def scaloa(xc, yc, x, y, t=None, corrlen=None, err=None, zc=None): # noqa: PLR0913 + """Scalar objective analysis. Interpolates t(x, y) into tp(xc, yc) Assumes spatial correlation function to be isotropic and Gaussian in the form of: C = (1 - err) * np.exp(-d**2 / corrlen**2) where: d : Radial distance from the observations. @@ -34,14 +33,11 @@ def scaloa(xc, yc, x, y, t=None, corrlen=None, err=None, zc=None): error. """ - n = len(x) x, y = np.reshape(x, (1, n)), np.reshape(y, (1, n)) # Squared distance matrix between the observations. - d2 = (np.tile(x, (n, 1)).T - np.tile(x, (n, 1))) ** 2 + ( - np.tile(y, (n, 1)).T - np.tile(y, (n, 1)) - ) ** 2 + d2 = (np.tile(x, (n, 1)).T - np.tile(x, (n, 1))) ** 2 + (np.tile(y, (n, 1)).T - np.tile(y, (n, 1))) ** 2 nv = len(xc) xc, yc = np.reshape(xc, (1, nv)), np.reshape(yc, (1, nv)) @@ -71,12 +67,14 @@ def scaloa(xc, yc, x, y, t=None, corrlen=None, err=None, zc=None): t = np.reshape(t, (n, 1)) tp = np.dot(C, np.linalg.solve(A, t)) if 0: # NOTE: `scaloa2.m` - mD = np.sum(np.linalg.solve(A, t)) / np.sum(np.sum(np.linalg.inv(A))) + mD = np.sum(np.linalg.solve(A, t)) / np.sum( + np.sum(np.linalg.inv(A)), + ) t = t - mD tp = C * (np.linalg.solve(A, t)) tp = tp + mD * np.ones(tp.shape) if not t: - print("Computing just the interpolation errors.") # noqa + print("Computing just the interpolation errors.") # noqa: T201 # Normalized mean error. Taking the squared root you can get the # interpolation error in percentage. diff --git a/oceans/utilities.py b/oceans/utilities.py index e03b138..e77be85 100644 --- a/oceans/utilities.py +++ b/oceans/utilities.py @@ -1,15 +1,14 @@ -import os +from pathlib import Path import numpy as np def basename(fname): - return os.path.splitext(os.path.basename(fname)) + return Path(fname).name class match_args_return: - """ - Function decorator to homogenize input arguments and to make the output + """Function decorator to homogenize input arguments and to make the output match the original input with respect to scalar versus array, and masked versus ndarray. @@ -27,7 +26,7 @@ def __call__(self, *args, **kw): # Check if is masked self.masked = np.any([np.ma.isMaskedArray(a) for a in args]) newargs = [np.ma.atleast_1d(np.ma.masked_invalid(a)) for a in args] - newargs = [a.astype(np.float) for a in newargs] + newargs = [a.astype(float) for a in newargs] ret = self.func(*newargs, **kw) if not self.masked: # Return a filled array if not masked. ret = np.ma.filled(ret, np.nan) diff --git a/pyproject.toml b/pyproject.toml index b7ac484..964850b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,6 +22,8 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", ] dynamic = [ "dependencies", @@ -42,12 +44,8 @@ urls.repository = "https://github.com/pyoceans/python-oceans" packages = [ "oceans" ] zip-safe = false include-package-data = true - -[tool.setuptools.dynamic] -dependencies = { file = [ "requirements.txt" ] } - -[tool.setuptools.package-data] -oceans = [ "colormaps/cmap_data/*.dat" ] +dynamic.dependencies = { file = [ "requirements.txt" ] } +package-data.oceans = [ "colormaps/cmap_data/*.dat" ] [tool.setuptools_scm] write_to = "oceans/_version.py" @@ -68,14 +66,13 @@ lint.select = [ lint.ignore = [ "B905", # zip ztrict arg, enable only for py310 ] - lint.per-file-ignores."docs/source/conf.py" = [ "A001", "E402", ] lint.per-file-ignores."oceans/plotting.py" = [ "T201", -] # `print` found +] # `print` found [tool.check-manifest] ignore = [ @@ -88,8 +85,8 @@ ignore = [ "tests/*", ] -[tool.pytest.ini_options] -markers = [ +[tool.pytest] +ini_options.markers = [ "web: marks tests require connection (deselect with '-m \"not web\"')", ] diff --git a/requirements-dev.txt b/requirements-dev.txt index 0063134..71c531e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -9,6 +9,7 @@ pytest pytest-cov pytest-xdist scipy +setuptools-scm sphinx twine xarray diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 0000000..ac35a0c --- /dev/null +++ b/ruff.toml @@ -0,0 +1,52 @@ +line-length = 110 + +lint.select = ["ALL"] + +lint.ignore = [ + "COM812", # missing-trailing-comma + "D203", # 1 blank line required before class docstring + "D205", # 1 blank line required between summary line and description + "D213", # incompatible. Ignoring `multi-line-summary-second-line` + "TRY003", # Avoid specifying long messages outside the exception class + "ANN", # Missing type annotation + "D", # Missing type annotation + "N", # Missing type annotation + "PLR2004", # Magic value used in comparison + "PLC0415", # `import` should be at the top-level of a file + "INP001", # implicit namespace package + "RUF015", # Prefer `next(...)` over single element slice +] + +[lint.extend-per-file-ignores] +"docs/source/conf.py" = [ + "A001", # builtin-variable-shadowing + "D100", # Missing docstring in public module + "E402", # Module level import not at top of file + "ERA001", # Found commented-out code + "ERA001", # Found commented-out code + "EXE001", # Shebang is present but file is not executable +] +"test_*.py" = [ + "ANN001", # Missing type annotation for function argument + "ANN201", # Missing return type annotation for public function + "ANN202", # Missing return type annotation for private function + "INP001", # File is part of an implicit namespace package + "PD901", # Avoid using the generic variable name `df` for DataFrames + "S101", # Use of assert detected +] +# nbqa-ruff acts on converted .py so we cannot glob .ipynb :-/ +# https://github.com/nbQA-dev/nbQA/issues/823 +"notebooks/*" = [ + "ANN001", # Missing type annotation for function argument + "ANN201", # Missing return type annotation for public function + "B018", # Found useless expression. Either assign it to a variable or remove it + "D100", # Missing docstring in public module + "D103", # Missing docstring in public function + "E402", # Module level import not at top of file + "FBT003", # Boolean positional value in function call + "INP001", # File is part of an implicit namespace package + "PD901", # Avoid using the generic variable name `df` for DataFrames + "T201", # `print` found" +] +[lint.pycodestyle] +max-doc-length = 180 diff --git a/tests/test_sw_extras.py b/tests/test_sw_extras.py index 7264e30..f745d59 100644 --- a/tests/test_sw_extras.py +++ b/tests/test_sw_extras.py @@ -1,8 +1,4 @@ -""" -Test Extra seawater functions -============================= - -""" +"""Test Extra seawater functions.""" import numpy as np @@ -10,7 +6,8 @@ def test_kdpar(): - PAR = np.array( + """Test kdpar.""" + par = np.array( [ 389.26, 386.87, @@ -170,6 +167,6 @@ def test_kdpar(): ], ) - kd, par_surface = swe.kdpar(press, PAR, boundary=25) + kd, par_surface = swe.kdpar(press, par, boundary=25) np.testing.assert_almost_equal(kd, 0.13808412818017926) np.testing.assert_almost_equal(par_surface, 690.61656440966783)