diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index 6c2a5df1..48423384 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -32,7 +32,7 @@ jobs: run: > set -e && pip install jupyter-book - && jupyter-book build jupyterbook + && jupyter-book build "jupyter-book<2" - name: GitHub Pages action if: success() && github.event_name == 'release' diff --git a/.github/workflows/docs-linkchecker.yml b/.github/workflows/docs-linkchecker.yml index 497fe4c6..72fa1caf 100644 --- a/.github/workflows/docs-linkchecker.yml +++ b/.github/workflows/docs-linkchecker.yml @@ -26,5 +26,5 @@ jobs: - name: Linkcheck run: > set -e - && pip install jupyter-book + && pip install "jupyter-book<2" && jupyter-book build jupyterbook --builder linkcheck diff --git a/.github/workflows/test-env.yml b/.github/workflows/test-env.yml index f111f651..4466eee4 100644 --- a/.github/workflows/test-env.yml +++ b/.github/workflows/test-env.yml @@ -12,7 +12,7 @@ jobs: strategy: matrix: # macos-latest is osx-arm64 and the env is not building there yet b/c of robis. - os: [ macos-13, ubuntu-latest, windows-latest ] + os: [ macos-15-intel, ubuntu-latest, windows-latest ] fail-fast: false defaults: run: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ecfbcce9..8d039673 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,7 +23,7 @@ repos: exclude: "_templates/layout.html" - repo: https://github.com/psf/black-pre-commit-mirror - rev: 25.12.0 + rev: 26.1.0 hooks: - id: black language_version: python3 @@ -34,7 +34,7 @@ repos: - id: add-trailing-comma - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.14.10 + rev: v0.15.0 hooks: - id: ruff @@ -55,7 +55,7 @@ repos: - id: nb-strip-paths - repo: https://github.com/woodruffw/zizmor-pre-commit - rev: v1.19.0 + rev: v1.22.0 hooks: - id: zizmor diff --git a/jupyterbook/content/code_gallery/data_access_notebooks/2018-02-20-obis.ipynb b/jupyterbook/content/code_gallery/data_access_notebooks/2018-02-20-obis.ipynb index b7823f38..c467d62d 100644 --- a/jupyterbook/content/code_gallery/data_access_notebooks/2018-02-20-obis.ipynb +++ b/jupyterbook/content/code_gallery/data_access_notebooks/2018-02-20-obis.ipynb @@ -48,6 +48,8 @@ "\n", "Created: 2018-02-20\n", "\n", + "Modified: 2026-02-04\n", + "\n", "The [Ocean Biogeographic Information System (OBIS)](https://www.obis.org/) is an open-access data and information system for marine biodiversity for science, conservation and sustainable development.\n", "\n", "In this example we will use R libraries [`obistools`](https://iobis.github.io/obistools) and [`robis`](https://iobis.github.io/robis) to search data regarding marine turtles occurrence in the South Atlantic Ocean.\n", @@ -212,7 +214,7 @@ "\n", "Now let us try to obtain the occurrence data for the South Atlantic. We will need a vector geometry for the ocean basin in the [well-known test (WKT)](https://en.wikipedia.org/wiki/Well-known_text) format to feed into the `robis` `occurrence` function.\n", "\n", - "In this example we converted a South Atlantic shapefile to WKT with geopandas, but one can also obtain geometries by simply drawing them on a map with [iobis maptool](https://obis.org/maptool)." + "In this example we converted a South Atlantic shapefile to WKT with geopandas, but one can also obtain geometries by simply drawing them on a map with [iobis maptool](https://maptool.obis.org)." ] }, { @@ -1070,7 +1072,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.14.2" } }, "nbformat": 4, diff --git a/jupyterbook/content/code_gallery/data_access_notebooks/2022-11-23_pyobis_example.ipynb b/jupyterbook/content/code_gallery/data_access_notebooks/2022-11-23_pyobis_example.ipynb index 5e917af8..3173ea76 100644 --- a/jupyterbook/content/code_gallery/data_access_notebooks/2022-11-23_pyobis_example.ipynb +++ b/jupyterbook/content/code_gallery/data_access_notebooks/2022-11-23_pyobis_example.ipynb @@ -48,15 +48,13 @@ "\n", "Created: 2022-11-23\n", "\n", - "Updated: 2023-03-24\n", + "Updated: 2026-02-04\n", "\n", "\n", "Author: [Mathew Biddle](mailto:mathew.biddle@noaa.gov)\n", "\n", "This notebook uses the [pyobis](https://github.com/iobis/pyobis) Python package to query the [OBIS API](https://api.obis.org/) for datasets associated with projects funded under the United States Marine Biodiversity Observation Network. The notebook walks through the process for querying the OBIS api for a specific institution, then using the resultant datasets to gather the locations of all the occurrences using the pyobis package.\n", "\n", - "![image.png](https://marinebon.org/wp-content/uploads/2022/08/MBON_logo_horizontal_60.png)\n", - "\n", "The [US Marine Biodiversity Observation Network (US MBON)](https://ioos.noaa.gov/project/mbon/) is an interagency initiative that seeks to coordinate across sectors and government to characterize biodiversity and understand drivers of change. US MBON represents a broad, collaborative effort to address the need for systematic collection and sharing of marine life information, ensure that information is available for decision-making and management from local to national levels, and document marine biodiversity status and trends in the face of human- and climate-induced change using a range of technologies and approaches. Through the National Oceanographic Partnership Program, NOAA, NASA, Office of Naval Research, and BOEM have invested in US MBON since 2014, most recently announcing new five year projects in 2022.\n", "\n" ] @@ -179,7 +177,7 @@ "source": [ "Well that looks like the institution we're after! \n", "\n", - "Using the `id` we can check it out on the OBIS website: https://obis.org/institute/23070\n", + "Using the `id` we can check it out on the OBIS website: https://obis.org/organization/23070\n", "\n", "Yes, that does look like what we want. Now let's use that `id` to query OBIS for all associated datasets." ] @@ -1176,7 +1174,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Let's explore those points a little more with [geopandas.GeoDataFrame.explore()](https://geopandas.org/en/stable/docs/reference/api/geopandas.GeoDataFrame.explore.html).\n", + "Let's explore those points a little more with [geopandas.GeoDataFrame.explore()](https://geopandas.org/en/stable/docs/reference/api/geopandas.GeoDataFrame.explore.html).\n", "\n", "This allows you to create an interactive map based on folium/leaflet.jsInteractive map based on GeoPandas and folium/leaflet.js" ] @@ -2009,7 +2007,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.5" + "version": "3.14.2" } }, "nbformat": 4, diff --git a/jupyterbook/content/code_gallery/data_access_notebooks/2024-09-17-CKAN_API_Query.ipynb b/jupyterbook/content/code_gallery/data_access_notebooks/2024-09-17-CKAN_API_Query.ipynb index fa9c296e..e7bdd0a7 100644 --- a/jupyterbook/content/code_gallery/data_access_notebooks/2024-09-17-CKAN_API_Query.ipynb +++ b/jupyterbook/content/code_gallery/data_access_notebooks/2024-09-17-CKAN_API_Query.ipynb @@ -60,7 +60,7 @@ "source": [ "Created: 2024-09-17\n", "\n", - "Updated: 2025-03-06\n", + "Updated: 2026-02-04\n", "\n", "Author: [Mathew Biddle](mailto:mathew.biddle@noaa.gov)" ] @@ -71,9 +71,9 @@ "id": "Dl6UQcydrdtx" }, "source": [ - "In this notebook we highlight the ability to search the [IOOS Data Catalog](https://data.ioos.us/) for a specific subset of observations using the [CKAN](https://ckan.org/) web accessible Application Programming Interface (API). \n", + "In this notebook we highlight the ability to search the IOOS Data Catalog for a specific subset of observations using the [CKAN](https://ckan.org/) web accessible Application Programming Interface (API). \n", "\n", - "For this example, we want to look for observations of oxygen in the water column across the IOOS Catalog. As part of the [IOOS Metadata Profile](https://ioos.github.io/ioos-metadata/), which the US IOOS community uses to publish datasets, we know that each Regional Association and DAC will be following the [Climate and Forecast (CF) Conventions](http://cfconventions.org/) and using CF `standard_names` to describe their datasets. So, with that assumption, we can search across the IOOS Data catalog for datasets with the CF standard names that contain `oxygen` and `sea_water`. Then, we can build a simple map to show the geographical distribution of those datasets." + "For this example, we want to look for observations of oxygen in the water column across the IOOS Catalog. As part of the [IOOS Metadata Profile](https://ioos.github.io/ioos-metadata/), which the US IOOS community uses to publish datasets, we know that each Regional Association and DAC will be following the [Climate and Forecast (CF) Conventions](https://cfconventions.org/) and using CF `standard_names` to describe their datasets. So, with that assumption, we can search across the IOOS Data catalog for datasets with the CF standard names that contain `oxygen` and `sea_water`. Then, we can build a simple map to show the geographical distribution of those datasets." ] }, { @@ -1568,13 +1568,7 @@ "stamina.retry_scheduled\n", " 71%|██████████████████████████████████████████████████████████████████████████████████████████████▏ | 5417/7654 [07:47<39:10, 1.05s/it]stamina.retry_scheduled\n", "stamina.retry_scheduled\n", - " 71%|██████████████████████████████████████████████████████████████████████████████████████████████▏ | 5419/7654 [07:48<32:11, 1.16it/s]stamina.retry_scheduled\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ + " 71%|██████████████████████████████████████████████████████████████████████████████████████████████▏ | 5419/7654 [07:48<32:11, 1.16it/s]stamina.retry_scheduled\n", " 71%|██████████████████████████████████████████████████████████████████████████████████████████████▏ | 5420/7654 [07:49<34:28, 1.08it/s]stamina.retry_scheduled\n", "stamina.retry_scheduled\n", "stamina.retry_scheduled\n", @@ -9169,7 +9163,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.1" + "version": "3.14.2" } }, "nbformat": 4, diff --git a/jupyterbook/content/code_gallery/data_management_notebooks/2017-05-14-running_compliance_checker.ipynb b/jupyterbook/content/code_gallery/data_management_notebooks/2017-05-14-running_compliance_checker.ipynb index 833e983e..d95b4f0f 100644 --- a/jupyterbook/content/code_gallery/data_management_notebooks/2017-05-14-running_compliance_checker.ipynb +++ b/jupyterbook/content/code_gallery/data_management_notebooks/2017-05-14-running_compliance_checker.ipynb @@ -249,7 +249,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.14.2" } }, "nbformat": 4, diff --git a/jupyterbook/content/code_gallery/data_management_notebooks/2017-11-01-Creating-Archives-Using-Bagit.ipynb b/jupyterbook/content/code_gallery/data_management_notebooks/2017-11-01-Creating-Archives-Using-Bagit.ipynb index 28ad6794..7c7e8188 100644 --- a/jupyterbook/content/code_gallery/data_management_notebooks/2017-11-01-Creating-Archives-Using-Bagit.ipynb +++ b/jupyterbook/content/code_gallery/data_management_notebooks/2017-11-01-Creating-Archives-Using-Bagit.ipynb @@ -48,6 +48,8 @@ "\n", "Created: 2017-11-01\n", "\n", + "Modified: 2026-02-04\n", + "\n", "[`BagIt`](https://en.wikipedia.org/wiki/BagIt) is a packaging format that supports storage of arbitrary digital content. The \"bag\" consists of arbitrary content and \"tags,\" the metadata files. `BagIt` packages can be used to facilitate data sharing with federal archive centers - thus ensuring digital preservation of oceanographic datasets within IOOS and its regional associations. NOAA NCEI supports reading from a Web Accessible Folder (WAF) containing bagit archives. For an example please see: http://ncei.axiomdatascience.com/cencoos/\n", "\n", "On this notebook we will use the [python interface](http://libraryofcongress.github.io/bagit-python) for `BagIt` to create a \"bag\" of a time-series profile data. First let us load our data from a comma separated values file (`CSV`)." @@ -190,7 +192,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we can create a [Orthogonal Multidimensional Timeseries Profile](http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_orthogonal_multidimensional_array_representation_of_time_series) object..." + "Now we can create a [Orthogonal Multidimensional Timeseries Profile](https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_orthogonal_multidimensional_array_representation_of_time_series) object." ] }, { @@ -442,7 +444,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.14.2" } }, "nbformat": 4, diff --git a/jupyterbook/content/code_gallery/data_management_notebooks/2023-03-20-Reading_and_writing_zarr.ipynb b/jupyterbook/content/code_gallery/data_management_notebooks/2023-03-20-Reading_and_writing_zarr.ipynb index 75069c3c..ed93fd3a 100644 --- a/jupyterbook/content/code_gallery/data_management_notebooks/2023-03-20-Reading_and_writing_zarr.ipynb +++ b/jupyterbook/content/code_gallery/data_management_notebooks/2023-03-20-Reading_and_writing_zarr.ipynb @@ -70,7 +70,7 @@ "In this example we will load an ocean model data, stored as netCDF and served via THREDDS, subset it and save as zarr. Let's start by saving a single time step for the surface layer temperature and salinity.\n", "\n", "\n", - "\\* Many data formats can take advantage of storing the data in chunks for faster access, the zarr approach is different in that each chunk is a different object in cloud storage, making them better for parallel access. The chunks can be compressed to reduce their size and improve cloud performance even further. Zarr has a nice tutorial on how to balance chunk size for performance. Check it out: https://zarr.readthedocs.io/en/stable/user-guide/performance.html#chunk-optimizations." + "\\* Many data formats can take advantage of storing the data in chunks for faster access, the zarr approach is different in that each chunk is a different object in cloud storage, making them better for parallel access. The chunks can be compressed to reduce their size and improve cloud performance even further. Zarr has a nice tutorial on how to balance chunk size for performance. Check it out: https://zarr.readthedocs.io/en/stable/user-guide/performance/." ] }, { diff --git a/zizmor.yml b/zizmor.yml index ccbe1f47..e55b85df 100644 --- a/zizmor.yml +++ b/zizmor.yml @@ -1,3 +1,3 @@ rules: - obfuscation: + misfeature: disable: true