From 1c04bfc6d81ec9ce6b0f16b2c03ca79fb0fdc98c Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 5 Mar 2024 08:57:51 +0800 Subject: [PATCH 001/218] Enable the test_load_libgmt_fails test on Windows (#3079) --- pygmt/tests/test_clib_loading.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pygmt/tests/test_clib_loading.py b/pygmt/tests/test_clib_loading.py index bc4c154553d..8cb679e9285 100644 --- a/pygmt/tests/test_clib_loading.py +++ b/pygmt/tests/test_clib_loading.py @@ -64,12 +64,13 @@ def test_load_libgmt(): check_libgmt(load_libgmt()) -@pytest.mark.skipif(sys.platform == "win32", reason="run on UNIX platforms only") def test_load_libgmt_fails(monkeypatch): """ Test that GMTCLibNotFoundError is raised when GMT's shared library cannot be found. """ with monkeypatch.context() as mpatch: + if sys.platform == "win32": + mpatch.setattr(ctypes.util, "find_library", lambda name: "fakegmt.dll") # noqa: ARG005 mpatch.setattr( sys, "platform", From 0dfc3564eb88d1b4486532763d9bb8a1075308e4 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 5 Mar 2024 10:45:18 +0800 Subject: [PATCH 002/218] TYP: Add type hints to the clib.loading.clib_full_names/load_libgmt functions (#3078) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/clib/loading.py | 54 +++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/pygmt/clib/loading.py b/pygmt/clib/loading.py index 2cc05a6ae6b..7bcf576b9b6 100644 --- a/pygmt/clib/loading.py +++ b/pygmt/clib/loading.py @@ -10,35 +10,36 @@ import shutil import subprocess as sp import sys +from collections.abc import Iterator, Mapping from ctypes.util import find_library from pathlib import Path from pygmt.exceptions import GMTCLibError, GMTCLibNotFoundError, GMTOSError -def load_libgmt(lib_fullnames=None): +def load_libgmt(lib_fullnames: Iterator[str] | None = None) -> ctypes.CDLL: """ Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`. Will look for the GMT shared library in the directories determined by - clib_full_names(). + ``clib_full_names()``. Parameters ---------- - lib_fullnames : list of str or None - List of possible full names of GMT's shared library. If ``None``, will - default to ``clib_full_names()``. + lib_fullnames + List of possible full names of GMT's shared library. If ``None``, will default + to ``clib_full_names()``. Returns ------- - :py:class:`ctypes.CDLL` object + libgmt The loaded shared library. Raises ------ GMTCLibNotFoundError - If there was any problem loading the library (couldn't find it or - couldn't access the functions). + If there was any problem loading the library (couldn't find it or couldn't + access the functions). """ if lib_fullnames is None: lib_fullnames = clib_full_names() @@ -96,20 +97,27 @@ def clib_names(os_name: str) -> list[str]: return libnames -def clib_full_names(env=None): +def clib_full_names(env: Mapping | None = None) -> Iterator[str]: """ - Return the full path of GMT's shared library for the current OS. + Return full path(s) of GMT shared library for the current operating system. + + The GMT shared library is searched for in following ways, sorted by priority: + + 1. Path defined by environmental variable GMT_LIBRARY_PATH + 2. Path returned by command "gmt --show-library" + 3. Path defined by environmental variable PATH (Windows only) + 4. System default search path Parameters ---------- - env : dict or None - A dictionary containing the environment variables. If ``None``, will - default to ``os.environ``. + env + A dictionary containing the environment variables. If ``None``, will default to + ``os.environ``. Yields ------ - lib_fullnames: list of str - List of possible full names of GMT's shared library. + lib_fullnames + List of possible full names of GMT shared library. """ if env is None: env = os.environ @@ -118,21 +126,18 @@ def clib_full_names(env=None): # Search for the library in different ways, sorted by priority. # 1. Search for the library in GMT_LIBRARY_PATH if defined. - libpath = env.get("GMT_LIBRARY_PATH", "") # e.g. $HOME/miniconda/envs/pygmt/lib - if libpath: + if libpath := env.get("GMT_LIBRARY_PATH"): # e.g. $HOME/miniconda/envs/pygmt/lib for libname in libnames: libfullpath = Path(libpath) / libname if libfullpath.exists(): yield str(libfullpath) - # 2. Search for the library returned by command "gmt --show-library" - # Use `str(Path(realpath))` to avoid mixture of separators "\\" and "/" - if (gmtbin := shutil.which("gmt")) is not None: + # 2. Search for the library returned by command "gmt --show-library". + # Use `str(Path(realpath))` to avoid mixture of separators "\\" and "/". + if gmtbin := shutil.which("gmt"): try: libfullpath = Path( - sp.check_output([gmtbin, "--show-library"], encoding="utf-8").rstrip( - "\n" - ) + sp.check_output([gmtbin, "--show-library"], encoding="utf-8").rstrip() ) if libfullpath.exists(): yield str(libfullpath) @@ -142,8 +147,7 @@ def clib_full_names(env=None): # 3. Search for DLLs in PATH by calling find_library() (Windows only) if sys.platform == "win32": for libname in libnames: - libfullpath = find_library(libname) - if libfullpath: + if libfullpath := find_library(libname): yield libfullpath # 4. Search for library names in the system default path From 61434002ace2081decb6ba21b5ec687f3fb6e30a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Mar 2024 11:11:38 +1300 Subject: [PATCH 003/218] Bump mamba-org/setup-micromamba from 1.8.0 to 1.8.1 (#3088) Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 1.8.0 to 1.8.1. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/v1.8.0...v1.8.1) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cache_data.yaml | 2 +- .github/workflows/ci_docs.yml | 2 +- .github/workflows/ci_doctests.yaml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- .github/workflows/ci_tests_legacy.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cache_data.yaml b/.github/workflows/cache_data.yaml index 33c9e9ee439..239fb703bed 100644 --- a/.github/workflows/cache_data.yaml +++ b/.github/workflows/cache_data.yaml @@ -38,7 +38,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.0 + uses: mamba-org/setup-micromamba@v1.8.1 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index d43d1258bd9..262ae23935d 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -75,7 +75,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.0 + uses: mamba-org/setup-micromamba@v1.8.1 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index 24029432a4f..69913e5d8b4 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -41,7 +41,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.0 + uses: mamba-org/setup-micromamba@v1.8.1 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 3a2e62705fb..1da7e2e92f0 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -98,7 +98,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.0 + uses: mamba-org/setup-micromamba@v1.8.1 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 2f6ac4c754b..af65a3b8b23 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -53,7 +53,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.0 + uses: mamba-org/setup-micromamba@v1.8.1 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index db68a07b3a8..563b2f09427 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -50,7 +50,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.0 + uses: mamba-org/setup-micromamba@v1.8.1 with: environment-name: pygmt condarc: | From 99774d4cbbb5ce5600bd14d4b15c6fd339162109 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Mar 2024 11:12:04 +1300 Subject: [PATCH 004/218] Bump dawidd6/action-download-artifact from 3.1.1 to 3.1.2 (#3089) Bumps [dawidd6/action-download-artifact](https://github.com/dawidd6/action-download-artifact) from 3.1.1 to 3.1.2. - [Release notes](https://github.com/dawidd6/action-download-artifact/releases) - [Commits](https://github.com/dawidd6/action-download-artifact/compare/v3.1.1...v3.1.2) --- updated-dependencies: - dependency-name: dawidd6/action-download-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- .github/workflows/ci_docs.yml | 2 +- .github/workflows/ci_doctests.yaml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- .github/workflows/ci_tests_legacy.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index e5fed8c4b21..8c32d8976b6 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -64,7 +64,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.1 + uses: dawidd6/action-download-artifact@v3.1.2 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 262ae23935d..70a82fb6660 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -111,7 +111,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.1 + uses: dawidd6/action-download-artifact@v3.1.2 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index 69913e5d8b4..09a1795d4b2 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -70,7 +70,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.1 + uses: dawidd6/action-download-artifact@v3.1.2 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 1da7e2e92f0..87cfacde0b5 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -127,7 +127,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.1 + uses: dawidd6/action-download-artifact@v3.1.2 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index af65a3b8b23..6f5e6e05cbb 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -134,7 +134,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.1 + uses: dawidd6/action-download-artifact@v3.1.2 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index 563b2f09427..629946ca7ee 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -82,7 +82,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.1 + uses: dawidd6/action-download-artifact@v3.1.2 with: workflow: cache_data.yaml workflow_conclusion: success From f08cb943efb0b468121557c87e13376d08836e09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Wed, 6 Mar 2024 22:52:56 +0100 Subject: [PATCH 005/218] Fix typo in "clib/session.py" (#3091) --- pygmt/clib/session.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index b2fd760dcd2..d52f66501af 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1621,7 +1621,7 @@ def virtualfile_out( Create a virtual file or an actual file for storing output data. If ``fname`` is not given, a virtual file will be created to store the output - data into a GMT data container and the function yields the name of the virutal + data into a GMT data container and the function yields the name of the virtual file. Otherwise, the output data will be written into the specified file and the function simply yields the actual file name. From a5d8b143761c5efe220e4669e4f8a4f93595497b Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 7 Mar 2024 10:47:00 +0800 Subject: [PATCH 006/218] Wrap GMT's standard data type GMT_DATASET for table input/output (#2729) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/datatypes/dataset.py | 206 ++++++++++++++++++++++++++++++++++++- 1 file changed, 205 insertions(+), 1 deletion(-) diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index 32b7397dbba..21953ee9051 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -3,7 +3,211 @@ """ import ctypes as ctp +from typing import ClassVar + +import numpy as np +import pandas as pd class _GMT_DATASET(ctp.Structure): # noqa: N801 - pass + """ + GMT dataset structure for holding multiple tables (files). + + This class is only meant for internal use by PyGMT and is not exposed to users. + See the GMT source code gmt_resources.h for the original C struct definitions. + + Examples + -------- + >>> from pygmt.helpers import GMTTempFile + >>> from pygmt.clib import Session + >>> + >>> with GMTTempFile(suffix=".txt") as tmpfile: + ... # Prepare the sample data file + ... with open(tmpfile.name, mode="w") as fp: + ... print(">", file=fp) + ... print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) + ... print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) + ... print(">", file=fp) + ... print("7.0 8.0 9.0 TEXT8 TEXT90", file=fp) + ... print("10.0 11.0 12.0 TEXT123 TEXT456789", file=fp) + ... # Read the data file + ... with Session() as lib: + ... with lib.virtualfile_out(kind="dataset") as vouttbl: + ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... # The dataset + ... ds = lib.read_virtualfile(vouttbl, kind="dataset").contents + ... print(ds.n_tables, ds.n_columns, ds.n_segments) + ... print(ds.min[: ds.n_columns], ds.max[: ds.n_columns]) + ... # The table + ... tbl = ds.table[0].contents + ... print(tbl.n_columns, tbl.n_segments, tbl.n_records) + ... print(tbl.min[: tbl.n_columns], ds.max[: tbl.n_columns]) + ... for i in range(tbl.n_segments): + ... seg = tbl.segment[i].contents + ... for j in range(seg.n_columns): + ... print(seg.data[j][: seg.n_rows]) + ... print(seg.text[: seg.n_rows]) + 1 3 2 + [1.0, 2.0, 3.0] [10.0, 11.0, 12.0] + 3 2 4 + [1.0, 2.0, 3.0] [10.0, 11.0, 12.0] + [1.0, 4.0] + [2.0, 5.0] + [3.0, 6.0] + [b'TEXT1 TEXT23', b'TEXT4 TEXT567'] + [7.0, 10.0] + [8.0, 11.0] + [9.0, 12.0] + [b'TEXT8 TEXT90', b'TEXT123 TEXT456789'] + """ + + class _GMT_DATATABLE(ctp.Structure): # noqa: N801 + """ + GMT datatable structure for holding a table with multiple segments. + """ + + class _GMT_DATASEGMENT(ctp.Structure): # noqa: N801 + """ + GMT datasegment structure for holding a segment with multiple columns. + """ + + _fields_: ClassVar = [ + # Number of rows/records in this segment + ("n_rows", ctp.c_uint64), + # Number of fields in each record + ("n_columns", ctp.c_uint64), + # Minimum coordinate for each column + ("min", ctp.POINTER(ctp.c_double)), + # Maximum coordinate for each column + ("max", ctp.POINTER(ctp.c_double)), + # Data x, y, and possibly other columns + ("data", ctp.POINTER(ctp.POINTER(ctp.c_double))), + # Label string (if applicable) + ("label", ctp.c_char_p), + # Segment header (if applicable) + ("header", ctp.c_char_p), + # text beyond the data + ("text", ctp.POINTER(ctp.c_char_p)), + # Book-keeping variables "hidden" from the API + ("hidden", ctp.c_void_p), + ] + + _fields_: ClassVar = [ + # Number of file header records (0 if no header) + ("n_headers", ctp.c_uint), + # Number of columns (fields) in each record + ("n_columns", ctp.c_uint64), + # Number of segments in the array + ("n_segments", ctp.c_uint64), + # Total number of data records across all segments + ("n_records", ctp.c_uint64), + # Minimum coordinate for each column + ("min", ctp.POINTER(ctp.c_double)), + # Maximum coordinate for each column + ("max", ctp.POINTER(ctp.c_double)), + # Array with all file header records, if any + ("header", ctp.POINTER(ctp.c_char_p)), + # Pointer to array of segments + ("segment", ctp.POINTER(ctp.POINTER(_GMT_DATASEGMENT))), + # Book-keeping variables "hidden" from the API + ("hidden", ctp.c_void_p), + ] + + _fields_: ClassVar = [ + # The total number of tables (files) contained + ("n_tables", ctp.c_uint64), + # The number of data columns + ("n_columns", ctp.c_uint64), + # The total number of segments across all tables + ("n_segments", ctp.c_uint64), + # The total number of data records across all tables + ("n_records", ctp.c_uint64), + # Minimum coordinate for each column + ("min", ctp.POINTER(ctp.c_double)), + # Maximum coordinate for each column + ("max", ctp.POINTER(ctp.c_double)), + # Pointer to array of tables + ("table", ctp.POINTER(ctp.POINTER(_GMT_DATATABLE))), + # The datatype (numerical, text, or mixed) of this dataset + ("type", ctp.c_int32), + # The geometry of this dataset + ("geometry", ctp.c_int32), + # To store a referencing system string in PROJ.4 format + ("ProjRefPROJ4", ctp.c_char_p), + # To store a referencing system string in WKT format + ("ProjRefWKT", ctp.c_char_p), + # To store a referencing system EPSG code + ("ProjRefEPSG", ctp.c_int), + # Book-keeping variables "hidden" from the API + ("hidden", ctp.c_void_p), + ] + + def to_dataframe(self) -> pd.DataFrame: + """ + Convert a _GMT_DATASET object to a :class:`pandas.DataFrame` object. + + Currently, the number of columns in all segments of all tables are assumed to be + the same. The same column in all segments of all tables are concatenated. The + trailing text column is also concatenated as a single string column. + + Returns + ------- + df + A :class:`pandas.DataFrame` object. + + Examples + -------- + >>> from pygmt.helpers import GMTTempFile + >>> from pygmt.clib import Session + >>> + >>> with GMTTempFile(suffix=".txt") as tmpfile: + ... # prepare the sample data file + ... with open(tmpfile.name, mode="w") as fp: + ... print(">", file=fp) + ... print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) + ... print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) + ... print(">", file=fp) + ... print("7.0 8.0 9.0 TEXT8 TEXT90", file=fp) + ... print("10.0 11.0 12.0 TEXT123 TEXT456789", file=fp) + ... with Session() as lib: + ... with lib.virtualfile_out(kind="dataset") as vouttbl: + ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... ds = lib.read_virtualfile(vouttbl, kind="dataset") + ... df = ds.contents.to_dataframe() + >>> df + 0 1 2 3 + 0 1.0 2.0 3.0 TEXT1 TEXT23 + 1 4.0 5.0 6.0 TEXT4 TEXT567 + 2 7.0 8.0 9.0 TEXT8 TEXT90 + 3 10.0 11.0 12.0 TEXT123 TEXT456789 + >>> df.dtypes.to_list() + [dtype('float64'), dtype('float64'), dtype('float64'), string[python]] + """ + # Deal with numeric columns + vectors = [] + for icol in range(self.n_columns): + colvector = [] + for itbl in range(self.n_tables): + dtbl = self.table[itbl].contents + for iseg in range(dtbl.n_segments): + dseg = dtbl.segment[iseg].contents + colvector.append( + np.ctypeslib.as_array(dseg.data[icol], shape=(dseg.n_rows,)) + ) + vectors.append(pd.Series(data=np.concatenate(colvector))) + + # Deal with trailing text column + textvector = [] + for itbl in range(self.n_tables): + dtbl = self.table[itbl].contents + for iseg in range(dtbl.n_segments): + dseg = dtbl.segment[iseg].contents + if dseg.text: + textvector.extend(dseg.text[: dseg.n_rows]) + if textvector: + vectors.append( + pd.Series(data=np.char.decode(textvector), dtype=pd.StringDtype()) + ) + + df = pd.concat(objs=vectors, axis=1) + return df From f03d4f4f855a5b18c2289966ab05adbf5a971f8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 10 Mar 2024 04:54:54 +0100 Subject: [PATCH 007/218] Figure.grdview: Fix typo in docs (#3093) --- pygmt/src/grdview.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pygmt/src/grdview.py b/pygmt/src/grdview.py index 4863c3b9d9b..44a654b5690 100644 --- a/pygmt/src/grdview.py +++ b/pygmt/src/grdview.py @@ -63,8 +63,8 @@ def grdview(self, grid, **kwargs): The name of the color palette table to use. drapegrid : str or xarray.DataArray The file name or a DataArray of the image grid to be draped on top - of the relief provided by grid. [Default determines colors from - grid]. Note that ``zscale`` and ``plane`` always refers to the grid. + of the relief provided by ``grid`` [Default determines colors from grid]. + Note that ``zscale`` and ``plane`` always refer to the grid. The drapegrid only provides the information pertaining to colors, which (if drapegrid is a grid) will be looked-up via the CPT (see ``cmap``). plane : float or str From 28e3513ec6d8c42a423856a20523c70e7db38196 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 10 Mar 2024 19:19:25 +0800 Subject: [PATCH 008/218] TYP: Add type hints and improve docstrings of load_tile_map function (#3087) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- pygmt/datasets/tile_map.py | 105 +++++++++++++++++-------------------- 1 file changed, 47 insertions(+), 58 deletions(-) diff --git a/pygmt/datasets/tile_map.py b/pygmt/datasets/tile_map.py index 1f30aa1ff51..aabfccd6fc5 100644 --- a/pygmt/datasets/tile_map.py +++ b/pygmt/datasets/tile_map.py @@ -3,15 +3,17 @@ :class:`xarray.DataArray`. """ -from __future__ import annotations +from typing import Literal from packaging.version import Version try: import contextily + from xyzservices import TileProvider _HAS_CONTEXTILY = True except ImportError: + TileProvider = None _HAS_CONTEXTILY = False import numpy as np @@ -21,76 +23,63 @@ def load_tile_map( - region, - zoom="auto", - source=None, - lonlat=True, - wait=0, - max_retries=2, + region: list, + zoom: int | Literal["auto"] = "auto", + source: TileProvider | str | None = None, + lonlat: bool = True, + wait: int = 0, + max_retries: int = 2, zoom_adjust: int | None = None, -): +) -> xr.DataArray: """ Load a georeferenced raster tile map from XYZ tile providers. The tiles that compose the map are merged and georeferenced into an - :class:`xarray.DataArray` image with 3 bands (RGB). Note that the returned - image is in a Spherical Mercator (EPSG:3857) coordinate reference system. + :class:`xarray.DataArray` image with 3 bands (RGB). Note that the returned image is + in a Spherical Mercator (EPSG:3857) coordinate reference system. Parameters ---------- - region : list - The bounding box of the map in the form of a list [*xmin*, *xmax*, - *ymin*, *ymax*]. These coordinates should be in longitude/latitude if - ``lonlat=True`` or Spherical Mercator (EPSG:3857) if ``lonlat=False``. - - zoom : int or str - Optional. Level of detail. Higher levels (e.g. ``22``) mean a zoom - level closer to the Earth's surface, with more tiles covering a smaller - geographical area and thus more detail. Lower levels (e.g. ``0``) mean - a zoom level further from the Earth's surface, with less tiles covering - a larger geographical area and thus less detail [Default is - ``"auto"`` to automatically determine the zoom level based on the - bounding box region extent]. + region + The bounding box of the map in the form of a list [*xmin*, *xmax*, *ymin*, + *ymax*]. These coordinates should be in longitude/latitude if ``lonlat=True`` or + Spherical Mercator (EPSG:3857) if ``lonlat=False``. + zoom + Level of detail. Higher levels (e.g. ``22``) mean a zoom level closer to the + Earth's surface, with more tiles covering a smaller geographical area and thus + more detail. Lower levels (e.g. ``0``) mean a zoom level further from the + Earth's surface, with less tiles covering a larger geographical area and thus + less detail. Default is ``"auto"`` to automatically determine the zoom level + based on the bounding box region extent. .. note:: The maximum possible zoom level may be smaller than ``22``, and depends on what is supported by the chosen web tile provider source. - - source : xyzservices.TileProvider or str - Optional. The tile source: web tile provider or path to a local file. - Provide either: - - - A web tile provider in the form of a - :class:`xyzservices.TileProvider` object. See - :doc:`Contextily providers ` for a - list of tile providers [Default is - ``xyzservices.providers.OpenStreetMap.HOT``, i.e. OpenStreetMap - Humanitarian web tiles]. - - A web tile provider in the form of a URL. The placeholders for the - XYZ in the URL need to be {x}, {y}, {z}, respectively. E.g. + source + The tile source: web tile provider or path to a local file. Provide either: + + - A web tile provider in the form of a :class:`xyzservices.TileProvider` object. + See :doc:`Contextily providers ` for a list of + tile providers. Default is ``xyzservices.providers.OpenStreetMap.HOT``, i.e. + OpenStreetMap Humanitarian web tiles. + - A web tile provider in the form of a URL. The placeholders for the XYZ in the + URL need to be {x}, {y}, {z}, respectively. E.g. ``https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png``. - - A local file path. The file is read with - :doc:`rasterio ` and all bands are loaded into the - basemap. See + - A local file path. The file is read with :doc:`rasterio ` and + all bands are loaded into the basemap. See :doc:`contextily:working_with_local_files`. .. important:: Tiles are assumed to be in the Spherical Mercator projection (EPSG:3857). - - lonlat : bool - Optional. If ``False``, coordinates in ``region`` are assumed to be - Spherical Mercator as opposed to longitude/latitude [Default is - ``True``]. - - wait : int - Optional. If the tile API is rate-limited, the number of seconds to - wait between a failed request and the next try [Default is ``0``]. - - max_retries : int - Optional. Total number of rejected requests allowed before contextily - will stop trying to fetch more tiles from a rate-limited API [Default - is ``2``]. - + lonlat + If ``False``, coordinates in ``region`` are assumed to be Spherical Mercator as + opposed to longitude/latitude. + wait + If the tile API is rate-limited, the number of seconds to wait between a failed + request and the next try. + max_retries + Total number of rejected requests allowed before contextily will stop trying to + fetch more tiles from a rate-limited API. zoom_adjust The amount to adjust a chosen zoom level if it is chosen automatically. Values outside of -1 to 1 are not recommended as they can lead to slow execution. @@ -100,15 +89,15 @@ def load_tile_map( Returns ------- - raster : xarray.DataArray + raster Georeferenced 3-D data array of RGB values. Raises ------ ImportError - If ``contextily`` is not installed or can't be imported. Follow - :doc:`install instructions for contextily `, (e.g. - via ``python -m pip install contextily``) before using this function. + If ``contextily`` is not installed or can't be imported. Follow the + :doc:`install instructions for contextily `, (e.g. via + ``python -m pip install contextily``) before using this function. Examples -------- From 0b46aade4cc9e6521b8b5e624c367955d8a39fe8 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 11 Mar 2024 19:17:10 +0800 Subject: [PATCH 009/218] clib: Add virtualfile_to_dataset method for converting virtualfile to a dataset (#3083) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- doc/api/index.rst | 5 +- pygmt/clib/session.py | 121 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+), 2 deletions(-) diff --git a/doc/api/index.rst b/doc/api/index.rst index 8758ef10423..547ab15efb2 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -283,8 +283,8 @@ the :meth:`~pygmt.clib.Session.call_module` method: Passing memory blocks between Python data objects (e.g. :class:`numpy.ndarray`, :class:`pandas.Series`, :class:`xarray.DataArray`, etc) and GMT happens through -*virtual files*. These methods are context managers that automate the -conversion of Python variables to GMT virtual files: +*virtual files*. These methods are context managers that automate the conversion of +Python objects to and from GMT virtual files: .. autosummary:: :toctree: generated @@ -294,6 +294,7 @@ conversion of Python variables to GMT virtual files: clib.Session.virtualfile_from_grid clib.Session.virtualfile_in clib.Session.virtualfile_out + clib.Session.virtualfile_to_dataset Low level access (these are mostly used by the :mod:`pygmt.clib` package): diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index d52f66501af..e2feb9cf857 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1738,6 +1738,127 @@ def read_virtualfile( dtype = {"dataset": _GMT_DATASET, "grid": _GMT_GRID}[kind] return ctp.cast(pointer, ctp.POINTER(dtype)) + def virtualfile_to_dataset( + self, + output_type: Literal["pandas", "numpy", "file"], + vfname: str, + column_names: list[str] | None = None, + ) -> pd.DataFrame | np.ndarray | None: + """ + Output a tabular dataset stored in a virtual file to a different format. + + The format of the dataset is determined by the ``output_type`` parameter. + + Parameters + ---------- + output_type + Desired output type of the result data. + + - ``"pandas"`` will return a :class:`pandas.DataFrame` object. + - ``"numpy"`` will return a :class:`numpy.ndarray` object. + - ``"file"`` means the result was saved to a file and will return ``None``. + vfname + The virtual file name that stores the result data. Required for ``"pandas"`` + and ``"numpy"`` output type. + column_names + The column names for the :class:`pandas.DataFrame` output. + + Returns + ------- + result + The result dataset. If ``output_type="file"`` returns ``None``. + + Examples + -------- + >>> from pathlib import Path + >>> import numpy as np + >>> import pandas as pd + >>> + >>> from pygmt.helpers import GMTTempFile + >>> from pygmt.clib import Session + >>> + >>> with GMTTempFile(suffix=".txt") as tmpfile: + ... # prepare the sample data file + ... with open(tmpfile.name, mode="w") as fp: + ... print(">", file=fp) + ... print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) + ... print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) + ... print(">", file=fp) + ... print("7.0 8.0 9.0 TEXT8 TEXT90", file=fp) + ... print("10.0 11.0 12.0 TEXT123 TEXT456789", file=fp) + ... + ... # file output + ... with Session() as lib: + ... with GMTTempFile(suffix=".txt") as outtmp: + ... with lib.virtualfile_out( + ... kind="dataset", fname=outtmp.name + ... ) as vouttbl: + ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... result = lib.virtualfile_to_dataset( + ... output_type="file", vfname=vouttbl + ... ) + ... assert result is None + ... assert Path(outtmp.name).stat().st_size > 0 + ... + ... # numpy output + ... with Session() as lib: + ... with lib.virtualfile_out(kind="dataset") as vouttbl: + ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... outnp = lib.virtualfile_to_dataset( + ... output_type="numpy", vfname=vouttbl + ... ) + ... assert isinstance(outnp, np.ndarray) + ... + ... # pandas output + ... with Session() as lib: + ... with lib.virtualfile_out(kind="dataset") as vouttbl: + ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... outpd = lib.virtualfile_to_dataset( + ... output_type="pandas", vfname=vouttbl + ... ) + ... assert isinstance(outpd, pd.DataFrame) + ... + ... # pandas output with specified column names + ... with Session() as lib: + ... with lib.virtualfile_out(kind="dataset") as vouttbl: + ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... outpd2 = lib.virtualfile_to_dataset( + ... output_type="pandas", + ... vfname=vouttbl, + ... column_names=["col1", "col2", "col3", "coltext"], + ... ) + ... assert isinstance(outpd2, pd.DataFrame) + >>> outnp + array([[1.0, 2.0, 3.0, 'TEXT1 TEXT23'], + [4.0, 5.0, 6.0, 'TEXT4 TEXT567'], + [7.0, 8.0, 9.0, 'TEXT8 TEXT90'], + [10.0, 11.0, 12.0, 'TEXT123 TEXT456789']], dtype=object) + >>> outpd + 0 1 2 3 + 0 1.0 2.0 3.0 TEXT1 TEXT23 + 1 4.0 5.0 6.0 TEXT4 TEXT567 + 2 7.0 8.0 9.0 TEXT8 TEXT90 + 3 10.0 11.0 12.0 TEXT123 TEXT456789 + >>> outpd2 + col1 col2 col3 coltext + 0 1.0 2.0 3.0 TEXT1 TEXT23 + 1 4.0 5.0 6.0 TEXT4 TEXT567 + 2 7.0 8.0 9.0 TEXT8 TEXT90 + 3 10.0 11.0 12.0 TEXT123 TEXT456789 + """ + if output_type == "file": # Already written to file, so return None + return None + + # Read the virtual file as a GMT dataset and convert to pandas.DataFrame + result = self.read_virtualfile(vfname, kind="dataset").contents.to_dataframe() + if output_type == "numpy": # numpy.ndarray output + return result.to_numpy() + + # Assign column names + if column_names is not None: + result.columns = column_names + return result # pandas.DataFrame output + def extract_region(self): """ Extract the WESN bounding box of the currently active figure. From 68bfe57a3879fc97893d3eaad7b0c2fea0551150 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 11 Mar 2024 19:17:51 +0800 Subject: [PATCH 010/218] doc: Move virtualfile_from_* methods as low-level API functions (#3096) --- doc/api/index.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/api/index.rst b/doc/api/index.rst index 547ab15efb2..2b82f2461f5 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -289,9 +289,6 @@ Python objects to and from GMT virtual files: .. autosummary:: :toctree: generated - clib.Session.virtualfile_from_matrix - clib.Session.virtualfile_from_vectors - clib.Session.virtualfile_from_grid clib.Session.virtualfile_in clib.Session.virtualfile_out clib.Session.virtualfile_to_dataset @@ -317,3 +314,7 @@ Low level access (these are mostly used by the :mod:`pygmt.clib` package): clib.Session.read_virtualfile clib.Session.extract_region clib.Session.get_libgmt_func + clib.Session.virtualfile_from_data + clib.Session.virtualfile_from_grid + clib.Session.virtualfile_from_matrix + clib.Session.virtualfile_from_vectors From e3c580f6d6b145f307320c53c1e7538965408d59 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 06:49:00 +0800 Subject: [PATCH 011/218] Bump pypa/gh-action-pypi-publish from 1.8.11 to 1.8.14 (#3100) Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.8.11 to 1.8.14. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.8.11...v1.8.14) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish-to-pypi.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 9980d2be12d..17883789851 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -75,10 +75,10 @@ jobs: ls -lh dist/ - name: Publish to Test PyPI - uses: pypa/gh-action-pypi-publish@v1.8.11 + uses: pypa/gh-action-pypi-publish@v1.8.14 with: repository-url: https://test.pypi.org/legacy/ - name: Publish to PyPI if: startsWith(github.ref, 'refs/tags') - uses: pypa/gh-action-pypi-publish@v1.8.11 + uses: pypa/gh-action-pypi-publish@v1.8.14 From 752305c73ede77a00bfd5c6ee77f4858557b4872 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 13 Mar 2024 13:52:09 +0800 Subject: [PATCH 012/218] pygmt.grd2xyz: Improve performance by storing output in virtual files (#3097) --- pygmt/helpers/decorators.py | 12 ++++++ pygmt/src/grd2xyz.py | 74 +++++++++++++++++-------------------- pygmt/tests/test_grd2xyz.py | 70 ++++------------------------------- 3 files changed, 52 insertions(+), 104 deletions(-) diff --git a/pygmt/helpers/decorators.py b/pygmt/helpers/decorators.py index 046cffa5514..28041911d23 100644 --- a/pygmt/helpers/decorators.py +++ b/pygmt/helpers/decorators.py @@ -254,6 +254,18 @@ input and skip trailing text. **Note**: If ``incols`` is also used then the columns given to ``outcols`` correspond to the order after the ``incols`` selection has taken place.""", + "outfile": """ + outfile + File name for saving the result data. Required if ``output_type="file"``. + If specified, ``output_type`` will be forced to be ``"file"``.""", + "output_type": """ + output_type + Desired output type of the result data. + + - ``pandas`` will return a :class:`pandas.DataFrame` object. + - ``numpy`` will return a :class:`numpy.ndarray` object. + - ``file`` will save the result to the file specified by the ``outfile`` + parameter.""", "outgrid": """ outgrid : str or None Name of the output netCDF grid file. For writing a specific grid diff --git a/pygmt/src/grd2xyz.py b/pygmt/src/grd2xyz.py index eade93473c2..17cfcb246bc 100644 --- a/pygmt/src/grd2xyz.py +++ b/pygmt/src/grd2xyz.py @@ -2,12 +2,13 @@ grd2xyz - Convert grid to data table """ +from typing import TYPE_CHECKING, Literal + import pandas as pd import xarray as xr from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, @@ -15,6 +16,9 @@ validate_output_table_type, ) +if TYPE_CHECKING: + from collections.abc import Hashable + __doctest_skip__ = ["grd2xyz"] @@ -33,7 +37,12 @@ s="skiprows", ) @kwargs_to_strings(R="sequence", o="sequence_comma") -def grd2xyz(grid, output_type="pandas", outfile=None, **kwargs): +def grd2xyz( + grid, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, +) -> pd.DataFrame | xr.DataArray | None: r""" Convert grid to data table. @@ -47,15 +56,8 @@ def grd2xyz(grid, output_type="pandas", outfile=None, **kwargs): Parameters ---------- {grid} - output_type : str - Determine the format the xyz data will be returned in [Default is - ``pandas``]: - - - ``numpy`` - :class:`numpy.ndarray` - - ``pandas``- :class:`pandas.DataFrame` - - ``file`` - ASCII file (requires ``outfile``) - outfile : str - The file name for the output ASCII file. + {output_type} + {outfile} cstyle : str [**f**\|\ **i**]. Replace the x- and y-coordinates on output with the corresponding @@ -118,13 +120,12 @@ def grd2xyz(grid, output_type="pandas", outfile=None, **kwargs): Returns ------- - ret : pandas.DataFrame or numpy.ndarray or None + ret Return type depends on ``outfile`` and ``output_type``: - - None if ``outfile`` is set (output will be stored in file set by - ``outfile``) - - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is - not set (depends on ``output_type``) + - None if ``outfile`` is set (output will be stored in file set by ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) Example ------- @@ -149,31 +150,22 @@ def grd2xyz(grid, output_type="pandas", outfile=None, **kwargs): "or 'file'." ) - # Set the default column names for the pandas dataframe header - dataframe_header = ["x", "y", "z"] + # Set the default column names for the pandas dataframe header. + column_names: list[Hashable] = ["x", "y", "z"] # Let output pandas column names match input DataArray dimension names - if isinstance(grid, xr.DataArray) and output_type == "pandas": + if output_type == "pandas" and isinstance(grid, xr.DataArray): # Reverse the dims because it is rows, columns ordered. - dataframe_header = [grid.dims[1], grid.dims[0], grid.name] - - with GMTTempFile() as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if outfile is None: - outfile = tmpfile.name - lib.call_module( - module="grd2xyz", - args=build_arg_string(kwargs, infile=vingrd, outfile=outfile), - ) - - # Read temporary csv output to a pandas table - if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame - result = pd.read_csv( - tmpfile.name, sep="\t", names=dataframe_header, comment=">" + column_names = [grid.dims[1], grid.dims[0], grid.name] + + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + lib.call_module( + module="grd2xyz", + args=build_arg_string(kwargs, infile=vingrd, outfile=vouttbl), + ) + return lib.virtualfile_to_dataset( + output_type=output_type, vfname=vouttbl, column_names=column_names ) - elif outfile != tmpfile.name: # return None if outfile set, output in outfile - result = None - - if output_type == "numpy": - result = result.to_numpy() - return result diff --git a/pygmt/tests/test_grd2xyz.py b/pygmt/tests/test_grd2xyz.py index b6f8e92c1ea..ab3feccf80c 100644 --- a/pygmt/tests/test_grd2xyz.py +++ b/pygmt/tests/test_grd2xyz.py @@ -2,14 +2,11 @@ Test pygmt.grd2xyz. """ -from pathlib import Path - import numpy as np import pandas as pd import pytest from pygmt import grd2xyz from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import GMTTempFile from pygmt.helpers.testing import load_static_earth_relief @@ -24,70 +21,17 @@ def fixture_grid(): @pytest.mark.benchmark def test_grd2xyz(grid): """ - Make sure grd2xyz works as expected. - """ - xyz_data = grd2xyz(grid=grid, output_type="numpy") - assert xyz_data.shape == (112, 3) - - -def test_grd2xyz_format(grid): + Test the basic functionality of grd2xyz. """ - Test that correct formats are returned. - """ - lon = -50.5 - lat = -18.5 - orig_val = grid.sel(lon=lon, lat=lat).to_numpy() - xyz_default = grd2xyz(grid=grid) - xyz_val = xyz_default[(xyz_default["lon"] == lon) & (xyz_default["lat"] == lat)][ - "z" - ].to_numpy() - assert isinstance(xyz_default, pd.DataFrame) - assert orig_val.size == 1 - assert xyz_val.size == 1 - np.testing.assert_allclose(orig_val, xyz_val) - xyz_array = grd2xyz(grid=grid, output_type="numpy") - assert isinstance(xyz_array, np.ndarray) - xyz_df = grd2xyz(grid=grid, output_type="pandas", outcols=None) + xyz_df = grd2xyz(grid=grid) assert isinstance(xyz_df, pd.DataFrame) assert list(xyz_df.columns) == ["lon", "lat", "z"] + assert xyz_df.shape == (112, 3) - -def test_grd2xyz_file_output(grid): - """ - Test that grd2xyz returns a file output when it is specified. - """ - with GMTTempFile(suffix=".xyz") as tmpfile: - result = grd2xyz(grid=grid, outfile=tmpfile.name, output_type="file") - assert result is None # return value is None - assert Path(tmpfile.name).stat().st_size > 0 # check that outfile exists - - -def test_grd2xyz_invalid_format(grid): - """ - Test that grd2xyz fails with incorrect format. - """ - with pytest.raises(GMTInvalidInput): - grd2xyz(grid=grid, output_type=1) - - -def test_grd2xyz_no_outfile(grid): - """ - Test that grd2xyz fails when a string output is set with no outfile. - """ - with pytest.raises(GMTInvalidInput): - grd2xyz(grid=grid, output_type="file") - - -def test_grd2xyz_outfile_incorrect_output_type(grid): - """ - Test that grd2xyz raises a warning when an outfile filename is set but the - output_type is not set to 'file'. - """ - with pytest.warns(RuntimeWarning): - with GMTTempFile(suffix=".xyz") as tmpfile: - result = grd2xyz(grid=grid, outfile=tmpfile.name, output_type="numpy") - assert result is None # return value is None - assert Path(tmpfile.name).stat().st_size > 0 # check that outfile exists + lon, lat = -50.5, -18.5 + orig_val = grid.sel(lon=lon, lat=lat).to_numpy() + xyz_val = xyz_df[(xyz_df["lon"] == lon) & (xyz_df["lat"] == lat)]["z"].to_numpy() + np.testing.assert_allclose(orig_val, xyz_val) def test_grd2xyz_pandas_output_with_o(grid): From 2f598c55cc3ad6a23ba244464e8355efd24c9ef3 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 13 Mar 2024 13:55:31 +0800 Subject: [PATCH 013/218] Add doctests for the validate_output_table_type function (#3098) Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> --- pygmt/helpers/validators.py | 40 +++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/pygmt/helpers/validators.py b/pygmt/helpers/validators.py index a91205809da..94916eac1f5 100644 --- a/pygmt/helpers/validators.py +++ b/pygmt/helpers/validators.py @@ -3,27 +3,51 @@ """ import warnings +from typing import Literal from pygmt.exceptions import GMTInvalidInput -def validate_output_table_type(output_type, outfile=None): +def validate_output_table_type( + output_type: Literal["pandas", "numpy", "file"], outfile: str | None = None +) -> Literal["pandas", "numpy", "file"]: """ Check if the ``output_type`` and ``outfile`` parameters are valid. Parameters ---------- - output_type : str - The type for a table output. Valid values are "file", "numpy", and - "pandas". - outfile : str - The file name for the output table file. Required if - ``output_type="file"``. + output_type + Desired output type of tabular data. Valid values are ``"pandas"``, + ``"numpy"`` and ``"file"``. + outfile + File name for saving the result data. Required if ``output_type`` is ``"file"``. + If specified, ``output_type`` will be forced to be ``"file"``. Returns ------- str - The original or corrected output type. + The original or updated output type. + + Examples + -------- + >>> validate_output_table_type(output_type="pandas") + 'pandas' + >>> validate_output_table_type(output_type="numpy") + 'numpy' + >>> validate_output_table_type(output_type="file", outfile="output-fname.txt") + 'file' + >>> validate_output_table_type(output_type="invalid-type") + Traceback (most recent call last): + ... + pygmt.exceptions.GMTInvalidInput: Must specify 'output_type' either as 'file', ... + >>> validate_output_table_type("file", outfile=None) + Traceback (most recent call last): + ... + pygmt.exceptions.GMTInvalidInput: Must specify 'outfile' for output_type='file'. + >>> with warnings.catch_warnings(record=True) as w: + ... validate_output_table_type("pandas", outfile="not-none.txt") + ... assert len(w) == 1 + 'file' """ if output_type not in ["file", "numpy", "pandas"]: raise GMTInvalidInput( From bf7b9a101d5f43f989363b089976ddc29f2b117f Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 13 Mar 2024 15:20:12 +0800 Subject: [PATCH 014/218] pygmt.filter1d: Improve performance by storing output in virtual files (#3085) --- pygmt/src/filter1d.py | 62 ++++++++++++---------------- pygmt/tests/test_filter1d.py | 78 ++---------------------------------- 2 files changed, 29 insertions(+), 111 deletions(-) diff --git a/pygmt/src/filter1d.py b/pygmt/src/filter1d.py index 79163e2b0dd..4bd0cdf8344 100644 --- a/pygmt/src/filter1d.py +++ b/pygmt/src/filter1d.py @@ -2,11 +2,13 @@ filter1d - Time domain filtering of 1-D data tables """ +from typing import Literal + import pandas as pd +import xarray as xr from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, use_alias, @@ -20,7 +22,12 @@ F="filter_type", N="time_col", ) -def filter1d(data, output_type="pandas", outfile=None, **kwargs): +def filter1d( + data, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, +) -> pd.DataFrame | xr.DataArray | None: r""" Time domain filtering of 1-D data tables. @@ -38,6 +45,8 @@ def filter1d(data, output_type="pandas", outfile=None, **kwargs): Parameters ---------- + {output_type} + {outfile} filter_type : str **type**\ *width*\ [**+h**]. Set the filter **type**. Choose among convolution and non-convolution @@ -91,48 +100,27 @@ def filter1d(data, output_type="pandas", outfile=None, **kwargs): left-most column is 0, while the right-most is (*n_cols* - 1) [Default is ``0``]. - output_type : str - Determine the format the xyz data will be returned in [Default is - ``pandas``]: - - - ``numpy`` - :class:`numpy.ndarray` - - ``pandas``- :class:`pandas.DataFrame` - - ``file`` - ASCII file (requires ``outfile``) - outfile : str - The file name for the output ASCII file. - Returns ------- - ret : pandas.DataFrame or numpy.ndarray or None + ret Return type depends on ``outfile`` and ``output_type``: - - None if ``outfile`` is set (output will be stored in file set by - ``outfile``) - - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is - not set (depends on ``output_type`` [Default is - :class:`pandas.DataFrame`]) + - None if ``outfile`` is set (output will be stored in file set by ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) """ if kwargs.get("F") is None: raise GMTInvalidInput("Pass a required argument to 'filter_type'.") output_type = validate_output_table_type(output_type, outfile=outfile) - with GMTTempFile() as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: - if outfile is None: - outfile = tmpfile.name - lib.call_module( - module="filter1d", - args=build_arg_string(kwargs, infile=vintbl, outfile=outfile), - ) - - # Read temporary csv output to a pandas table - if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame - result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">") - elif outfile != tmpfile.name: # return None if outfile set, output in outfile - result = None - - if output_type == "numpy": - result = result.to_numpy() - return result + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="vector", data=data) as vintbl, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + lib.call_module( + module="filter1d", + args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + ) + return lib.virtualfile_to_dataset(output_type=output_type, vfname=vouttbl) diff --git a/pygmt/tests/test_filter1d.py b/pygmt/tests/test_filter1d.py index c98c39f7ac4..9fa6f1c50db 100644 --- a/pygmt/tests/test_filter1d.py +++ b/pygmt/tests/test_filter1d.py @@ -2,15 +2,10 @@ Test pygmt.filter1d. """ -from pathlib import Path - -import numpy as np import pandas as pd import pytest from pygmt import filter1d from pygmt.datasets import load_sample_data -from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import GMTTempFile @pytest.fixture(scope="module", name="data") @@ -21,76 +16,11 @@ def fixture_data(): return load_sample_data(name="maunaloa_co2") -def test_filter1d_no_outfile(data): +@pytest.mark.benchmark +def test_filter1d(data): """ - Test filter1d with no set outfile. + Test the basic functionality of filter1d. """ result = filter1d(data=data, filter_type="g5") + assert isinstance(result, pd.DataFrame) assert result.shape == (671, 2) - - -def test_filter1d_file_output(data): - """ - Test that filter1d returns a file output when it is specified. - """ - with GMTTempFile(suffix=".txt") as tmpfile: - result = filter1d( - data=data, filter_type="g5", outfile=tmpfile.name, output_type="file" - ) - assert result is None # return value is None - assert Path(tmpfile.name).stat().st_size > 0 # check that outfile exists - - -def test_filter1d_invalid_format(data): - """ - Test that filter1d fails with an incorrect format for output_type. - """ - with pytest.raises(GMTInvalidInput): - filter1d(data=data, filter_type="g5", output_type="a") - - -def test_filter1d_no_filter(data): - """ - Test that filter1d fails with an argument is missing for filter. - """ - with pytest.raises(GMTInvalidInput): - filter1d(data=data) - - -def test_filter1d_no_outfile_specified(data): - """ - Test that filter1d fails when outpput_type is set to 'file' but no output file name - is specified. - """ - with pytest.raises(GMTInvalidInput): - filter1d(data=data, filter_type="g5", output_type="file") - - -def test_filter1d_outfile_incorrect_output_type(data): - """ - Test that filter1d raises a warning when an outfile filename is set but the - output_type is not set to 'file'. - """ - with pytest.warns(RuntimeWarning): - with GMTTempFile(suffix=".txt") as tmpfile: - result = filter1d( - data=data, filter_type="g5", outfile=tmpfile.name, output_type="numpy" - ) - assert result is None # return value is None - assert Path(tmpfile.name).stat().st_size > 0 # check that outfile exists - - -@pytest.mark.benchmark -def test_filter1d_format(data): - """ - Test that correct formats are returned. - """ - time_series_default = filter1d(data=data, filter_type="g5") - assert isinstance(time_series_default, pd.DataFrame) - assert time_series_default.shape == (671, 2) - time_series_array = filter1d(data=data, filter_type="g5", output_type="numpy") - assert isinstance(time_series_array, np.ndarray) - assert time_series_array.shape == (671, 2) - time_series_df = filter1d(data=data, filter_type="g5", output_type="pandas") - assert isinstance(time_series_df, pd.DataFrame) - assert time_series_df.shape == (671, 2) From 3a507a88c4f8e852317c394caeb4245e44155245 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 13 Mar 2024 15:30:27 +0800 Subject: [PATCH 015/218] Fix typos in the return value types of grd2xyz and filter1d --- pygmt/src/filter1d.py | 4 ++-- pygmt/src/grd2xyz.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pygmt/src/filter1d.py b/pygmt/src/filter1d.py index 4bd0cdf8344..3bb08cf6ff2 100644 --- a/pygmt/src/filter1d.py +++ b/pygmt/src/filter1d.py @@ -4,8 +4,8 @@ from typing import Literal +import numpy as np import pandas as pd -import xarray as xr from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( @@ -27,7 +27,7 @@ def filter1d( output_type: Literal["pandas", "numpy", "file"] = "pandas", outfile: str | None = None, **kwargs, -) -> pd.DataFrame | xr.DataArray | None: +) -> pd.DataFrame | np.ndarray | None: r""" Time domain filtering of 1-D data tables. diff --git a/pygmt/src/grd2xyz.py b/pygmt/src/grd2xyz.py index 17cfcb246bc..ea1c52cb224 100644 --- a/pygmt/src/grd2xyz.py +++ b/pygmt/src/grd2xyz.py @@ -4,6 +4,7 @@ from typing import TYPE_CHECKING, Literal +import numpy as np import pandas as pd import xarray as xr from pygmt.clib import Session @@ -42,7 +43,7 @@ def grd2xyz( output_type: Literal["pandas", "numpy", "file"] = "pandas", outfile: str | None = None, **kwargs, -) -> pd.DataFrame | xr.DataArray | None: +) -> pd.DataFrame | np.ndarray | None: r""" Convert grid to data table. From 4f3c5e44af2f53e31a9a6262469187c76767d1c3 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 16:57:22 +0800 Subject: [PATCH 016/218] Bump to ghotscript 10.03.0 (#3112) --- .github/workflows/ci_docs.yml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- ci/requirements/docs.yml | 2 +- environment.yml | 1 + 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 70a82fb6660..6f3a969cbfd 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -87,7 +87,7 @@ jobs: create-args: >- python=3.12 gmt=6.5.0 - ghostscript=10.02.1 + ghostscript=10.03.0 numpy pandas xarray diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 87cfacde0b5..80e4911da2b 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -110,7 +110,7 @@ jobs: create-args: >- python=${{ matrix.python-version }}${{ matrix.optional-packages }} gmt=6.5.0 - ghostscript=10.02.1 + ghostscript=10.03.0 numpy=${{ matrix.numpy-version }} pandas${{ matrix.pandas-version }} xarray${{ matrix.xarray-version }} diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 6f5e6e05cbb..79c768c5c4e 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -69,7 +69,7 @@ jobs: ninja curl fftw - ghostscript=10.02.1 + ghostscript=10.03.0 glib hdf5 libblas diff --git a/ci/requirements/docs.yml b/ci/requirements/docs.yml index ba27410ea80..7666f2aa09a 100644 --- a/ci/requirements/docs.yml +++ b/ci/requirements/docs.yml @@ -6,7 +6,7 @@ dependencies: # Required dependencies - python=3.12 - gmt=6.5.0 - - ghostscript=10.02.1 + - ghostscript=10.03.0 - numpy - pandas - xarray diff --git a/environment.yml b/environment.yml index b893373b96f..dc1f73c6d0e 100644 --- a/environment.yml +++ b/environment.yml @@ -6,6 +6,7 @@ dependencies: - python=3.12 # Required dependencies - gmt=6.5.0 + - ghotscript=10.03.0 - numpy>=1.23 - pandas>=1.5 - xarray>=2022.03 From 2bc6d24ae159257c2abb941859a517f78eb9d4df Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 21:18:01 +0800 Subject: [PATCH 017/218] pygmt.project: Add 'output_type' parameter for output in pandas/numpy/file formats (#3110) --- pygmt/src/project.py | 87 +++++++++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 38 deletions(-) diff --git a/pygmt/src/project.py b/pygmt/src/project.py index 99738bfd9c8..833c58ce299 100644 --- a/pygmt/src/project.py +++ b/pygmt/src/project.py @@ -2,15 +2,18 @@ project - Project data onto lines or great circles, or generate tracks. """ +from typing import Literal + +import numpy as np import pandas as pd from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, use_alias, + validate_output_table_type, ) @@ -32,7 +35,15 @@ f="coltypes", ) @kwargs_to_strings(E="sequence", L="sequence", T="sequence", W="sequence", C="sequence") -def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs): +def project( + data=None, + x=None, + y=None, + z=None, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, +) -> pd.DataFrame | np.ndarray | None: r""" Project data onto lines or great circles, or generate tracks. @@ -105,6 +116,8 @@ def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs): Pass in (x, y, z) or (longitude, latitude, elevation) values by providing a file name to an ASCII data table, a 2-D {table-classes}. + {output_type} + {outfile} center : str or list *cx*/*cy*. @@ -196,22 +209,18 @@ def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs): *direction* is counter-clockwise from the horizontal instead of an *azimuth*. - outfile : str - The file name for the output ASCII file. - {coltypes} Returns ------- - track: pandas.DataFrame or None - Return type depends on whether the ``outfile`` parameter is set: + ret + Return type depends on ``outfile`` and ``output_type``: - - :class:`pandas.DataFrame` table with (x, y, ..., newcolname) if - ``outfile`` is not set - - None if ``outfile`` is set (output will be stored in file set - by ``outfile``) + - ``None`` if ``outfile`` is set (output will be stored in file set by + ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) """ - if kwargs.get("C") is None: raise GMTInvalidInput("The `center` parameter must be specified.") if kwargs.get("G") is None and data is None: @@ -223,29 +232,31 @@ def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs): "The `convention` parameter is not allowed with `generate`." ) - with GMTTempFile(suffix=".csv") as tmpfile: - if outfile is None: # Output to tmpfile if outfile is not set - outfile = tmpfile.name - with Session() as lib: - if kwargs.get("G") is None: - with lib.virtualfile_in( - check_kind="vector", data=data, x=x, y=y, z=z, required_z=False - ) as vintbl: - # Run project on the temporary (csv) data table - arg_str = build_arg_string(kwargs, infile=vintbl, outfile=outfile) - else: - arg_str = build_arg_string(kwargs, outfile=outfile) - lib.call_module(module="project", args=arg_str) - - # if user did not set outfile, return pd.DataFrame - if outfile == tmpfile.name: - if kwargs.get("G") is not None: - column_names = list("rsp") - result = pd.read_csv(tmpfile.name, sep="\t", names=column_names) - else: - result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">") - # return None if outfile set, output in outfile - elif outfile != tmpfile.name: - result = None - - return result + output_type = validate_output_table_type(output_type, outfile=outfile) + + column_names = None + if output_type == "pandas" and kwargs.get("G") is not None: + column_names = list("rsp") + + with Session() as lib: + with ( + lib.virtualfile_in( + check_kind="vector", + data=data, + x=x, + y=y, + z=z, + required_z=False, + required_data=False, + ) as vintbl, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + lib.call_module( + module="project", + args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + ) + return lib.virtualfile_to_dataset( + output_type=output_type, + vfname=vouttbl, + column_names=column_names, + ) From 26d27595f8cf2f5bbbf61a4fc95835f85fa88418 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 21:18:52 +0800 Subject: [PATCH 018/218] Fix the DataArray repr in the load_tile_map doctest for xarray v2024.02 (#3062) --- pygmt/datasets/tile_map.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pygmt/datasets/tile_map.py b/pygmt/datasets/tile_map.py index aabfccd6fc5..fec7465bdbf 100644 --- a/pygmt/datasets/tile_map.py +++ b/pygmt/datasets/tile_map.py @@ -111,11 +111,11 @@ def load_tile_map( ... ) >>> raster.sizes Frozen({'band': 3, 'y': 256, 'x': 512}) - >>> raster.coords + >>> raster.coords # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Coordinates: - * band (band) uint8 0 1 2 - * y (y) float64 -7.081e-10 -7.858e+04 ... -1.996e+07 ... - * x (x) float64 -2.004e+07 -1.996e+07 ... 1.996e+07 2.004e+07 + * band (band) uint8 ... 0 1 2 + * y (y) float64 ... -7.081e-10 -7.858e+04 ... -1.996e+07 ... + * x (x) float64 ... -2.004e+07 -1.996e+07 ... 1.996e+07 2.004e+07 """ if not _HAS_CONTEXTILY: raise ImportError( From fa3e0e709daa5bb4289cc5a96834e0f4df2e33d0 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 21:20:01 +0800 Subject: [PATCH 019/218] pygmt.grdhisteq.compute_bins: Refactor to store output in virtual files instead of temporary files (#3109) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- pygmt/src/grdhisteq.py | 76 +++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 42 deletions(-) diff --git a/pygmt/src/grdhisteq.py b/pygmt/src/grdhisteq.py index 0e2c8c9ea60..880368f1a7c 100644 --- a/pygmt/src/grdhisteq.py +++ b/pygmt/src/grdhisteq.py @@ -2,6 +2,8 @@ grdhisteq - Perform histogram equalization for a grid. """ +from typing import Literal + import numpy as np import pandas as pd from pygmt.clib import Session @@ -135,7 +137,6 @@ def equalize_grid(grid, **kwargs): @fmt_docstring @use_alias( C="divisions", - D="outfile", R="region", N="gaussian", Q="quadratic", @@ -143,7 +144,12 @@ def equalize_grid(grid, **kwargs): h="header", ) @kwargs_to_strings(R="sequence") - def compute_bins(grid, output_type="pandas", **kwargs): + def compute_bins( + grid, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, + ) -> pd.DataFrame | np.ndarray | None: r""" Perform histogram equalization for a grid. @@ -168,16 +174,8 @@ def compute_bins(grid, output_type="pandas", **kwargs): Parameters ---------- {grid} - outfile : str or bool or None - The name of the output ASCII file to store the results of the - histogram equalization in. - output_type : str - Determine the format the xyz data will be returned in [Default is - ``pandas``]: - - - ``numpy`` - :class:`numpy.ndarray` - - ``pandas``- :class:`pandas.DataFrame` - - ``file`` - ASCII file (requires ``outfile``) + {output_type} + {outfile} divisions : int Set the number of divisions of the data range. quadratic : bool @@ -188,13 +186,13 @@ def compute_bins(grid, output_type="pandas", **kwargs): Returns ------- - ret : pandas.DataFrame or numpy.ndarray or None + ret Return type depends on ``outfile`` and ``output_type``: - - None if ``outfile`` is set (output will be stored in file set by + - ``None`` if ``outfile`` is set (output will be stored in file set by ``outfile``) - - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if - ``outfile`` is not set (depends on ``output_type``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not + set (depends on ``output_type``) Example ------- @@ -225,39 +223,33 @@ def compute_bins(grid, output_type="pandas", **kwargs): This method does a weighted histogram equalization for geographic grids to account for node area varying with latitude. """ - outfile = kwargs.get("D") output_type = validate_output_table_type(output_type, outfile=outfile) if kwargs.get("h") is not None and output_type != "file": raise GMTInvalidInput("'header' is only allowed with output_type='file'.") - with GMTTempFile(suffix=".txt") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if outfile is None: - kwargs["D"] = outfile = tmpfile.name # output to tmpfile - lib.call_module( - module="grdhisteq", args=build_arg_string(kwargs, infile=vingrd) - ) + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + kwargs["D"] = vouttbl # -D for output file name + lib.call_module( + module="grdhisteq", args=build_arg_string(kwargs, infile=vingrd) + ) - if outfile == tmpfile.name: - # if user did not set outfile, return pd.DataFrame - result = pd.read_csv( - filepath_or_buffer=outfile, - sep="\t", - header=None, - names=["start", "stop", "bin_id"], - dtype={ + result = lib.virtualfile_to_dataset( + output_type=output_type, + vfname=vouttbl, + column_names=["start", "stop", "bin_id"], + ) + if output_type == "pandas": + result = result.astype( + { "start": np.float32, "stop": np.float32, "bin_id": np.uint32, - }, + } ) - elif outfile != tmpfile.name: - # return None if outfile set, output in outfile - return None - - if output_type == "numpy": - return result.to_numpy() - - return result.set_index("bin_id") + return result.set_index("bin_id") + return result From 83b1a123999c425b896f13d1783014a4bb0bc58a Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 21:21:24 +0800 Subject: [PATCH 020/218] pygmt.select: Improve performance by storing output in virtual files (#3108) --- pygmt/src/select.py | 69 +++++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 31 deletions(-) diff --git a/pygmt/src/select.py b/pygmt/src/select.py index fe132b356f9..2e9b97299a5 100644 --- a/pygmt/src/select.py +++ b/pygmt/src/select.py @@ -2,14 +2,17 @@ select - Select data table subsets based on multiple spatial criteria. """ +from typing import Literal + +import numpy as np import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, use_alias, + validate_output_table_type, ) __doctest_skip__ = ["select"] @@ -41,7 +44,12 @@ w="wrap", ) @kwargs_to_strings(M="sequence", R="sequence", i="sequence_comma", o="sequence_comma") -def select(data=None, outfile=None, **kwargs): +def select( + data=None, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, +) -> pd.DataFrame | np.ndarray | None: r""" Select data table subsets based on multiple spatial criteria. @@ -70,8 +78,8 @@ def select(data=None, outfile=None, **kwargs): data : str, {table-like} Pass in either a file name to an ASCII data table, a 2-D {table-classes}. - outfile : str - The file name for the output ASCII file. + {output_type} + {outfile} {area_thresh} dist2pt : str *pointfile*\|\ *lon*/*lat*\ **+d**\ *dist*. @@ -180,12 +188,13 @@ def select(data=None, outfile=None, **kwargs): Returns ------- - output : pandas.DataFrame or None - Return type depends on whether the ``outfile`` parameter is set: + ret + Return type depends on ``outfile`` and ``output_type``: - - :class:`pandas.DataFrame` table if ``outfile`` is not set. - - None if ``outfile`` is set (filtered output will be stored in file - set by ``outfile``). + - ``None`` if ``outfile`` is set (output will be stored in file set by + ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) Example ------- @@ -196,25 +205,23 @@ def select(data=None, outfile=None, **kwargs): >>> # longitudes 246 and 247 and latitudes 20 and 21 >>> out = pygmt.select(data=ship_data, region=[246, 247, 20, 21]) """ - - with GMTTempFile(suffix=".csv") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: - if outfile is None: - outfile = tmpfile.name - lib.call_module( - module="select", - args=build_arg_string(kwargs, infile=vintbl, outfile=outfile), - ) - - # Read temporary csv output to a pandas table - if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame - try: - column_names = data.columns.to_list() - result = pd.read_csv(tmpfile.name, sep="\t", names=column_names) - except AttributeError: # 'str' object has no attribute 'columns' - result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">") - elif outfile != tmpfile.name: # return None if outfile set, output in outfile - result = None - - return result + output_type = validate_output_table_type(output_type, outfile=outfile) + + column_names = None + if output_type == "pandas" and isinstance(data, pd.DataFrame): + column_names = data.columns.to_list() + + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="vector", data=data) as vintbl, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + lib.call_module( + module="select", + args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + ) + return lib.virtualfile_to_dataset( + output_type=output_type, + vfname=vouttbl, + column_names=column_names, + ) From 82201804e988b31ae4a39ad3fa2107389f3f7e8f Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 21:22:36 +0800 Subject: [PATCH 021/218] pygmt.grdtrack: Add 'output_type' parameter for output in pandas/numpy/file formats (#3106) --- pygmt/src/grdtrack.py | 80 +++++++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/pygmt/src/grdtrack.py b/pygmt/src/grdtrack.py index 1e5df5ffbda..d71300fa3ba 100644 --- a/pygmt/src/grdtrack.py +++ b/pygmt/src/grdtrack.py @@ -2,15 +2,18 @@ grdtrack - Sample grids at specified (x,y) locations. """ +from typing import Literal + +import numpy as np import pandas as pd from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, use_alias, + validate_output_table_type, ) __doctest_skip__ = ["grdtrack"] @@ -44,7 +47,14 @@ w="wrap", ) @kwargs_to_strings(R="sequence", S="sequence", i="sequence_comma", o="sequence_comma") -def grdtrack(grid, points=None, newcolname=None, outfile=None, **kwargs): +def grdtrack( + grid, + points=None, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + newcolname=None, + **kwargs, +) -> pd.DataFrame | np.ndarray | None: r""" Sample grids at specified (x,y) locations. @@ -73,15 +83,12 @@ def grdtrack(grid, points=None, newcolname=None, outfile=None, **kwargs): points : str, {table-like} Pass in either a file name to an ASCII data table, a 2-D {table-classes}. - + {output_type} + {outfile} newcolname : str Required if ``points`` is a :class:`pandas.DataFrame`. The name for the new column in the track :class:`pandas.DataFrame` table where the sampled values will be placed. - - outfile : str - The file name for the output ASCII file. - resample : str **f**\|\ **p**\|\ **m**\|\ **r**\|\ **R**\ [**+l**] For track resampling (if ``crossprofile`` or ``profile`` are set) we @@ -258,13 +265,13 @@ def grdtrack(grid, points=None, newcolname=None, outfile=None, **kwargs): Returns ------- - track: pandas.DataFrame or None - Return type depends on whether the ``outfile`` parameter is set: + ret + Return type depends on ``outfile`` and ``output_type``: - - :class:`pandas.DataFrame` table with (x, y, ..., newcolname) if - ``outfile`` is not set - - None if ``outfile`` is set (track output will be stored in file set - by ``outfile``) + - ``None`` if ``outfile`` is set (output will be stored in file set by + ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) Example ------- @@ -291,30 +298,27 @@ def grdtrack(grid, points=None, newcolname=None, outfile=None, **kwargs): if hasattr(points, "columns") and newcolname is None: raise GMTInvalidInput("Please pass in a str to 'newcolname'") - with GMTTempFile(suffix=".csv") as tmpfile: - with Session() as lib: - with ( - lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, - lib.virtualfile_in( - check_kind="vector", data=points, required_data=False - ) as vintbl, - ): - kwargs["G"] = vingrd - if outfile is None: # Output to tmpfile if outfile is not set - outfile = tmpfile.name - lib.call_module( - module="grdtrack", - args=build_arg_string(kwargs, infile=vintbl, outfile=outfile), - ) + output_type = validate_output_table_type(output_type, outfile=outfile) - # Read temporary csv output to a pandas table - if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame - try: - column_names = [*points.columns.to_list(), newcolname] - result = pd.read_csv(tmpfile.name, sep="\t", names=column_names) - except AttributeError: # 'str' object has no attribute 'columns' - result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">") - elif outfile != tmpfile.name: # return None if outfile set, output in outfile - result = None + column_names = None + if output_type == "pandas" and isinstance(points, pd.DataFrame): + column_names = [*points.columns.to_list(), newcolname] - return result + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_in( + check_kind="vector", data=points, required_data=False + ) as vintbl, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + kwargs["G"] = vingrd + lib.call_module( + module="grdtrack", + args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + ) + return lib.virtualfile_to_dataset( + output_type=output_type, + vfname=vouttbl, + column_names=column_names, + ) From 855ebddc1debda9d297546177697aa255a930735 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 21:24:19 +0800 Subject: [PATCH 022/218] pygmt.blockm*: Add 'output_type' parameter for output in pandas/numpy/file formats (#3103) --- pygmt/src/blockm.py | 184 ++++++++++++++++++++++++++------------------ 1 file changed, 108 insertions(+), 76 deletions(-) diff --git a/pygmt/src/blockm.py b/pygmt/src/blockm.py index c863f32f3b0..3030234d707 100644 --- a/pygmt/src/blockm.py +++ b/pygmt/src/blockm.py @@ -1,22 +1,26 @@ """ -blockm - Block average (x, y, z) data tables by mean, median, or mode -estimation. +blockm - Block average (x, y, z) data tables by mean, median, or mode estimation. """ +from typing import Literal + +import numpy as np import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, use_alias, + validate_output_table_type, ) __doctest_skip__ = ["blockmean", "blockmedian", "blockmode"] -def _blockm(block_method, data, x, y, z, outfile, **kwargs): +def _blockm( + block_method, data, x, y, z, output_type, outfile, **kwargs +) -> pd.DataFrame | np.ndarray | None: r""" Block average (x, y, z) data tables by mean, median, or mode estimation. @@ -34,38 +38,34 @@ def _blockm(block_method, data, x, y, z, outfile, **kwargs): Returns ------- - output : pandas.DataFrame or None - Return type depends on whether the ``outfile`` parameter is set: + ret + Return type depends on ``outfile`` and ``output_type``: - - :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile`` - is not set - - None if ``outfile`` is set (filtered output will be stored in file - set by ``outfile``) + - ``None`` if ``outfile`` is set (output will be stored in file set by + ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) """ - with GMTTempFile(suffix=".csv") as tmpfile: - with Session() as lib: - with lib.virtualfile_in( + output_type = validate_output_table_type(output_type, outfile=outfile) + + column_names = None + if output_type == "pandas" and isinstance(data, pd.DataFrame): + column_names = data.columns.to_list() + + with Session() as lib: + with ( + lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, z=z, required_z=True - ) as vintbl: - # Run blockm* on data table - if outfile is None: - outfile = tmpfile.name - lib.call_module( - module=block_method, - args=build_arg_string(kwargs, infile=vintbl, outfile=outfile), - ) - - # Read temporary csv output to a pandas table - if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame - try: - column_names = data.columns.to_list() - result = pd.read_csv(tmpfile.name, sep="\t", names=column_names) - except AttributeError: # 'str' object has no attribute 'columns' - result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">") - elif outfile != tmpfile.name: # return None if outfile set, output in outfile - result = None - - return result + ) as vintbl, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + lib.call_module( + module=block_method, + args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + ) + return lib.virtualfile_to_dataset( + output_type=output_type, vfname=vouttbl, column_names=column_names + ) @fmt_docstring @@ -86,7 +86,15 @@ def _blockm(block_method, data, x, y, z, outfile, **kwargs): w="wrap", ) @kwargs_to_strings(I="sequence", R="sequence", i="sequence_comma", o="sequence_comma") -def blockmean(data=None, x=None, y=None, z=None, outfile=None, **kwargs): +def blockmean( + data=None, + x=None, + y=None, + z=None, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, +) -> pd.DataFrame | np.ndarray | None: r""" Block average (x, y, z) data tables by mean estimation. @@ -111,9 +119,9 @@ def blockmean(data=None, x=None, y=None, z=None, outfile=None, **kwargs): {table-classes}. x/y/z : 1-D arrays Arrays of x and y coordinates and values z of the data points. - + {output_type} + {outfile} {spacing} - summary : str [**m**\|\ **n**\|\ **s**\|\ **w**]. Type of summary values calculated by blockmean. @@ -122,12 +130,7 @@ def blockmean(data=None, x=None, y=None, z=None, outfile=None, **kwargs): - **n** - report the number of input points inside each block - **s** - report the sum of all z-values inside a block - **w** - report the sum of weights - {region} - - outfile : str - The file name for the output ASCII file. - {verbose} {aspatial} {binary} @@ -142,13 +145,13 @@ def blockmean(data=None, x=None, y=None, z=None, outfile=None, **kwargs): Returns ------- - output : pandas.DataFrame or None - Return type depends on whether the ``outfile`` parameter is set: + ret + Return type depends on ``outfile`` and ``output_type``: - - :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile`` - is not set. - - None if ``outfile`` is set (filtered output will be stored in file - set by ``outfile``). + - ``None`` if ``outfile`` is set (output will be stored in file set by + ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) Example ------- @@ -159,7 +162,14 @@ def blockmean(data=None, x=None, y=None, z=None, outfile=None, **kwargs): >>> data_bmean = pygmt.blockmean(data=data, region=[245, 255, 20, 30], spacing="5m") """ return _blockm( - block_method="blockmean", data=data, x=x, y=y, z=z, outfile=outfile, **kwargs + block_method="blockmean", + data=data, + x=x, + y=y, + z=z, + output_type=output_type, + outfile=outfile, + **kwargs, ) @@ -180,7 +190,15 @@ def blockmean(data=None, x=None, y=None, z=None, outfile=None, **kwargs): w="wrap", ) @kwargs_to_strings(I="sequence", R="sequence", i="sequence_comma", o="sequence_comma") -def blockmedian(data=None, x=None, y=None, z=None, outfile=None, **kwargs): +def blockmedian( + data=None, + x=None, + y=None, + z=None, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, +) -> pd.DataFrame | np.ndarray | None: r""" Block average (x, y, z) data tables by median estimation. @@ -205,14 +223,10 @@ def blockmedian(data=None, x=None, y=None, z=None, outfile=None, **kwargs): {table-classes}. x/y/z : 1-D arrays Arrays of x and y coordinates and values z of the data points. - + {output_type} + {outfile} {spacing} - {region} - - outfile : str - The file name for the output ASCII file. - {verbose} {aspatial} {binary} @@ -227,13 +241,13 @@ def blockmedian(data=None, x=None, y=None, z=None, outfile=None, **kwargs): Returns ------- - output : pandas.DataFrame or None - Return type depends on whether the ``outfile`` parameter is set: + ret + Return type depends on ``outfile`` and ``output_type``: - - :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile`` - is not set. - - None if ``outfile`` is set (filtered output will be stored in file - set by ``outfile``). + - ``None`` if ``outfile`` is set (output will be stored in file set by + ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) Example ------- @@ -246,7 +260,14 @@ def blockmedian(data=None, x=None, y=None, z=None, outfile=None, **kwargs): ... ) """ return _blockm( - block_method="blockmedian", data=data, x=x, y=y, z=z, outfile=outfile, **kwargs + block_method="blockmedian", + data=data, + x=x, + y=y, + z=z, + output_type=output_type, + outfile=outfile, + **kwargs, ) @@ -267,7 +288,15 @@ def blockmedian(data=None, x=None, y=None, z=None, outfile=None, **kwargs): w="wrap", ) @kwargs_to_strings(I="sequence", R="sequence", i="sequence_comma", o="sequence_comma") -def blockmode(data=None, x=None, y=None, z=None, outfile=None, **kwargs): +def blockmode( + data=None, + x=None, + y=None, + z=None, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, +) -> pd.DataFrame | np.ndarray | None: r""" Block average (x, y, z) data tables by mode estimation. @@ -292,14 +321,10 @@ def blockmode(data=None, x=None, y=None, z=None, outfile=None, **kwargs): {table-classes}. x/y/z : 1-D arrays Arrays of x and y coordinates and values z of the data points. - + {output_type} + {outfile} {spacing} - {region} - - outfile : str - The file name for the output ASCII file. - {verbose} {aspatial} {binary} @@ -314,13 +339,13 @@ def blockmode(data=None, x=None, y=None, z=None, outfile=None, **kwargs): Returns ------- - output : pandas.DataFrame or None - Return type depends on whether the ``outfile`` parameter is set: + ret + Return type depends on ``outfile`` and ``output_type``: - - :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile`` - is not set. - - None if ``outfile`` is set (filtered output will be stored in file - set by ``outfile``). + - ``None`` if ``outfile`` is set (output will be stored in file set by + ``outfile``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) Example ------- @@ -331,5 +356,12 @@ def blockmode(data=None, x=None, y=None, z=None, outfile=None, **kwargs): >>> data_bmode = pygmt.blockmode(data=data, region=[245, 255, 20, 30], spacing="5m") """ return _blockm( - block_method="blockmode", data=data, x=x, y=y, z=z, outfile=outfile, **kwargs + block_method="blockmode", + data=data, + x=x, + y=y, + z=z, + output_type=output_type, + outfile=outfile, + **kwargs, ) From 4036322089bfaf567ed0564e9e5378252ddbe0f5 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 21:25:28 +0800 Subject: [PATCH 023/218] pygmt.grdvolume: Refactor to store output in virtual files instead of temporary files (#3102) --- pygmt/src/grdvolume.py | 60 +++++++++++++------------------ pygmt/tests/test_grdvolume.py | 66 ++++------------------------------- 2 files changed, 31 insertions(+), 95 deletions(-) diff --git a/pygmt/src/grdvolume.py b/pygmt/src/grdvolume.py index 1bb696e9e04..f6b8b0434a4 100644 --- a/pygmt/src/grdvolume.py +++ b/pygmt/src/grdvolume.py @@ -2,10 +2,12 @@ grdvolume - Calculate grid volume and area constrained by a contour. """ +from typing import Literal + +import numpy as np import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, @@ -24,7 +26,12 @@ V="verbose", ) @kwargs_to_strings(C="sequence", R="sequence") -def grdvolume(grid, output_type="pandas", outfile=None, **kwargs): +def grdvolume( + grid, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, + **kwargs, +) -> pd.DataFrame | np.ndarray | None: r""" Determine the volume between the surface of a grid and a plane. @@ -41,15 +48,8 @@ def grdvolume(grid, output_type="pandas", outfile=None, **kwargs): Parameters ---------- {grid} - output_type : str - Determine the format the output data will be returned in [Default is - ``pandas``]: - - - ``numpy`` - :class:`numpy.ndarray` - - ``pandas``- :class:`pandas.DataFrame` - - ``file`` - ASCII file (requires ``outfile``) - outfile : str - The file name for the output ASCII file. + {output_type} + {outfile} contour : str, float, or list *cval*\|\ *low/high/delta*\|\ **r**\ *low/high*\|\ **r**\ *cval*. Find area, volume and mean height (volume/area) inside and above the @@ -69,14 +69,13 @@ def grdvolume(grid, output_type="pandas", outfile=None, **kwargs): Returns ------- - ret : pandas.DataFrame or numpy.ndarray or None + ret Return type depends on ``outfile`` and ``output_type``: - - None if ``outfile`` is set (output will be stored in file set by + - ``None`` if ``outfile`` is set (output will be stored in file set by ``outfile``) - - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` - is not set (depends on ``output_type`` [Default is - :class:`pandas.DataFrame`]) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not set + (depends on ``output_type``) Example ------- @@ -103,22 +102,13 @@ def grdvolume(grid, output_type="pandas", outfile=None, **kwargs): """ output_type = validate_output_table_type(output_type, outfile=outfile) - with GMTTempFile() as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if outfile is None: - outfile = tmpfile.name - lib.call_module( - module="grdvolume", - args=build_arg_string(kwargs, infile=vingrd, outfile=outfile), - ) - - # Read temporary csv output to a pandas table - if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame - result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">") - elif outfile != tmpfile.name: # return None if outfile set, output in outfile - result = None - - if output_type == "numpy": - result = result.to_numpy() - return result + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + lib.call_module( + module="grdvolume", + args=build_arg_string(kwargs, infile=vingrd, outfile=vouttbl), + ) + return lib.virtualfile_to_dataset(output_type=output_type, vfname=vouttbl) diff --git a/pygmt/tests/test_grdvolume.py b/pygmt/tests/test_grdvolume.py index b56e76b1f19..e7cfeae6d27 100644 --- a/pygmt/tests/test_grdvolume.py +++ b/pygmt/tests/test_grdvolume.py @@ -2,15 +2,11 @@ Test pygmt.grdvolume. """ -from pathlib import Path - import numpy as np import numpy.testing as npt import pandas as pd import pytest from pygmt import grdvolume -from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import GMTTempFile from pygmt.helpers.testing import load_static_earth_relief @@ -22,14 +18,6 @@ def fixture_grid(): return load_static_earth_relief() -@pytest.fixture(scope="module", name="region") -def fixture_region(): - """ - Set the data region for the tests. - """ - return [-53, -50, -22, -20] - - @pytest.fixture(scope="module", name="data") def fixture_data(): """ @@ -47,57 +35,15 @@ def fixture_data(): return data -def test_grdvolume_format(grid, region): - """ - Test that correct formats are returned. - """ - grdvolume_default = grdvolume(grid=grid, region=region) - assert isinstance(grdvolume_default, pd.DataFrame) - grdvolume_array = grdvolume(grid=grid, output_type="numpy", region=region) - assert isinstance(grdvolume_array, np.ndarray) - grdvolume_df = grdvolume(grid=grid, output_type="pandas", region=region) - assert isinstance(grdvolume_df, pd.DataFrame) - - -def test_grdvolume_invalid_format(grid): - """ - Test that grdvolume fails with incorrect output_type argument. - """ - with pytest.raises(GMTInvalidInput): - grdvolume(grid=grid, output_type=1) - - -def test_grdvolume_no_outfile(grid): - """ - Test that grdvolume fails when output_type set to 'file' but no outfile is - specified. - """ - with pytest.raises(GMTInvalidInput): - grdvolume(grid=grid, output_type="file") - - @pytest.mark.benchmark -def test_grdvolume_no_outgrid(grid, data, region): +def test_grdvolume(grid, data): """ - Test the expected output of grdvolume with no output file set. + Test the basic functionality of grdvolume. """ test_output = grdvolume( - grid=grid, contour=[200, 400, 50], output_type="numpy", region=region + grid=grid, + contour=[200, 400, 50], + region=[-53, -50, -22, -20], ) + assert isinstance(test_output, pd.DataFrame) npt.assert_allclose(test_output, data) - - -def test_grdvolume_outgrid(grid, region): - """ - Test the expected output of grdvolume with an output file set. - """ - with GMTTempFile(suffix=".csv") as tmpfile: - result = grdvolume( - grid=grid, - contour=[200, 400, 50], - output_type="file", - outfile=tmpfile.name, - region=region, - ) - assert result is None # return value is None - assert Path(tmpfile.name).stat().st_size > 0 # check that outfile exists From e3d52e25ef7b8825e123798ca22786f4238f34d0 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 15 Mar 2024 21:26:29 +0800 Subject: [PATCH 024/218] pygmt.triangulate.delaunay_triples: Improve performance by storing output in virtual files (#3107) --- pygmt/src/triangulate.py | 61 +++++++++++++-------------------- pygmt/tests/test_triangulate.py | 7 ++-- 2 files changed, 28 insertions(+), 40 deletions(-) diff --git a/pygmt/src/triangulate.py b/pygmt/src/triangulate.py index e73ab92fe5e..62c0a24d80e 100644 --- a/pygmt/src/triangulate.py +++ b/pygmt/src/triangulate.py @@ -3,6 +3,9 @@ Cartesian data. """ +from typing import Literal + +import numpy as np import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( @@ -172,10 +175,10 @@ def delaunay_triples( y=None, z=None, *, - output_type="pandas", - outfile=None, + output_type: Literal["pandas", "numpy", "file"] = "pandas", + outfile: str | None = None, **kwargs, - ): + ) -> pd.DataFrame | np.ndarray | None: """ Delaunay triangle based gridding of Cartesian data. @@ -204,16 +207,8 @@ def delaunay_triples( {table-classes}. {projection} {region} - outfile : str or None - The name of the output ASCII file to store the results of the - histogram equalization in. - output_type : str - Determine the format the xyz data will be returned in [Default is - ``pandas``]: - - - ``numpy`` - :class:`numpy.ndarray` - - ``pandas``- :class:`pandas.DataFrame` - - ``file`` - ASCII file (requires ``outfile``) + {output_type} + {outfile} {verbose} {binary} {nodata} @@ -226,13 +221,13 @@ def delaunay_triples( Returns ------- - ret : pandas.DataFrame or numpy.ndarray or None + ret Return type depends on ``outfile`` and ``output_type``: - - None if ``outfile`` is set (output will be stored in file set by + - ``None`` if ``outfile`` is set (output will be stored in file set by ``outfile``) - - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if - ``outfile`` is not set (depends on ``output_type``) + - :class:`pandas.DataFrame` or :class:`numpy.ndarray` if ``outfile`` is not + set (depends on ``output_type``) Note ---- @@ -243,25 +238,15 @@ def delaunay_triples( """ output_type = validate_output_table_type(output_type, outfile) - with GMTTempFile(suffix=".txt") as tmpfile: - with Session() as lib: - with lib.virtualfile_in( + with Session() as lib: + with ( + lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, z=z, required_z=False - ) as vintbl: - if outfile is None: - outfile = tmpfile.name - lib.call_module( - module="triangulate", - args=build_arg_string(kwargs, infile=vintbl, outfile=outfile), - ) - - if outfile == tmpfile.name: - # if user did not set outfile, return pd.DataFrame - result = pd.read_csv(outfile, sep="\t", header=None) - elif outfile != tmpfile.name: - # return None if outfile set, output in outfile - result = None - - if output_type == "numpy": - result = result.to_numpy() - return result + ) as vintbl, + lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl, + ): + lib.call_module( + module="triangulate", + args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + ) + return lib.virtualfile_to_dataset(output_type=output_type, vfname=vouttbl) diff --git a/pygmt/tests/test_triangulate.py b/pygmt/tests/test_triangulate.py index 154bc82b09f..75cccbf17ab 100644 --- a/pygmt/tests/test_triangulate.py +++ b/pygmt/tests/test_triangulate.py @@ -44,7 +44,8 @@ def fixture_expected_dataframe(): [4, 6, 1], [3, 4, 2], [9, 3, 8], - ] + ], + dtype=float, ) @@ -116,7 +117,9 @@ def test_delaunay_triples_outfile(dataframe, expected_dataframe): assert len(record) == 1 # check that only one warning was raised assert result is None # return value is None assert Path(tmpfile.name).stat().st_size > 0 - temp_df = pd.read_csv(filepath_or_buffer=tmpfile.name, sep="\t", header=None) + temp_df = pd.read_csv( + filepath_or_buffer=tmpfile.name, sep="\t", header=None, dtype=float + ) pd.testing.assert_frame_equal(left=temp_df, right=expected_dataframe) From bcbbcad1ffca6f73fcc8ebc2872f228aaaa2266e Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Sat, 16 Mar 2024 18:37:04 +1300 Subject: [PATCH 025/218] Fix ghostscript typo in environment.yml (#3113) --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index dc1f73c6d0e..08d54e1cb41 100644 --- a/environment.yml +++ b/environment.yml @@ -6,7 +6,7 @@ dependencies: - python=3.12 # Required dependencies - gmt=6.5.0 - - ghotscript=10.03.0 + - ghostscript=10.03.0 - numpy>=1.23 - pandas>=1.5 - xarray>=2022.03 From 6fd4a00c29a6fe9de9b541823190697a6ec751f7 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 18 Mar 2024 16:12:17 +0800 Subject: [PATCH 026/218] CI: Ignore ResearchGate homepage in the links check (#3118) --- .github/workflows/check-links.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 6389ae03725..d6bfc339d32 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -53,7 +53,7 @@ jobs: --exclude "^https://www.generic-mapping-tools.org/remote-datasets/%s$" --exclude "^https://hackmd.io/@pygmt" --exclude "^https://doi.org" - --exclude "^https://www.researchgate.net/project/" + --exclude "^https://www.researchgate.net/" --exclude "^https://test.pypi.org/simple/" --verbose "repository/**/*.rst" From 29f1c2f7f66fd41a9c76602c298b99cdef623f41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Mon, 18 Mar 2024 15:02:27 +0100 Subject: [PATCH 027/218] Datetime tutorial: Change symbol usde in first code example to fix failing under Windows (#3121) --- examples/tutorials/advanced/date_time_charts.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/tutorials/advanced/date_time_charts.py b/examples/tutorials/advanced/date_time_charts.py index 69622088675..4f5f95d1253 100644 --- a/examples/tutorials/advanced/date_time_charts.py +++ b/examples/tutorials/advanced/date_time_charts.py @@ -29,8 +29,8 @@ # data points stored in the list ``x``. Additionally, dates are passed into # the ``region`` parameter in the format ``[x_start, x_end, y_start, y_end]``, # where the date range is plotted on the x-axis. An additional notable -# parameter is ``style``, where it's specified that data points are to be -# plotted in an **X** shape with a size of 0.3 centimeters. +# parameter is ``style``, where it's specified that data points are plotted +# as circles with a diameter of 0.3 centimeters. x = [ datetime.date(2010, 6, 1), @@ -47,7 +47,7 @@ frame=["WSen", "afg"], x=x, y=y, - style="x0.3c", + style="c0.3c", pen="1p", ) fig.show() From 3b1825c8bd98221f8ab0582196074e69b943b8fe Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 19 Mar 2024 01:11:03 +0800 Subject: [PATCH 028/218] pygmt.grdvolume: Fix a failing doctest (#3122) --- pygmt/src/grdvolume.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pygmt/src/grdvolume.py b/pygmt/src/grdvolume.py index f6b8b0434a4..f53a57e9fd7 100644 --- a/pygmt/src/grdvolume.py +++ b/pygmt/src/grdvolume.py @@ -93,12 +93,12 @@ def grdvolume( ... grid=grid, contour=[200, 400, 50], output_type="pandas" ... ) >>> print(output_dataframe) - 0 1 2 3 - 0 200 2.323600e+12 8.523815e+14 366.836554 - 1 250 2.275864e+12 7.371655e+14 323.905736 - 2 300 2.166707e+12 6.258570e+14 288.851699 - 3 350 2.019284e+12 5.207732e+14 257.899955 - 4 400 1.870441e+12 4.236191e+14 226.480847 + 0 1 2 3 + 0 200.0 2.323600e+12 8.523815e+14 366.836554 + 1 250.0 2.275864e+12 7.371655e+14 323.905736 + 2 300.0 2.166707e+12 6.258570e+14 288.851699 + 3 350.0 2.019284e+12 5.207732e+14 257.899955 + 4 400.0 1.870441e+12 4.236191e+14 226.480847 """ output_type = validate_output_table_type(output_type, outfile=outfile) From f21a623ccf99de539d0d924d9b7c03a14865f625 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Tue, 19 Mar 2024 01:50:33 +0100 Subject: [PATCH 029/218] datetime tutorial: Rewrap to 88 characters (#3123) --- .../tutorials/advanced/date_time_charts.py | 180 ++++++++---------- 1 file changed, 83 insertions(+), 97 deletions(-) diff --git a/examples/tutorials/advanced/date_time_charts.py b/examples/tutorials/advanced/date_time_charts.py index 4f5f95d1253..df31b85a198 100644 --- a/examples/tutorials/advanced/date_time_charts.py +++ b/examples/tutorials/advanced/date_time_charts.py @@ -2,15 +2,14 @@ Plotting datetime charts ======================== -PyGMT accepts a variety of datetime objects to plot data and create charts. -Aside from the built-in Python ``datetime`` module, PyGMT supports inputs -containing ISO formatted strings as well as objects generated with -``numpy``, ``pandas``, and ``xarray``. These data types can be used to plot -specific points as well as get passed into the ``region`` parameter to -create a range of the data on an axis. - -The following examples will demonstrate how to create plots using these -different datetime objects. +PyGMT accepts a variety of datetime objects to plot data and create charts. Aside from +the built-in Python ``datetime`` module, PyGMT supports inputs containing ISO formatted +strings as well as objects generated with ``numpy``, ``pandas``, and ``xarray``. These +data types can be used to plot specific points as well as get passed into the ``region`` +parameter to create a range of the data on an axis. + +The following examples will demonstrate how to create plots using these different +datetime objects. """ # %% @@ -25,12 +24,11 @@ # Using Python's ``datetime`` # --------------------------- # -# In this example, Python's built-in ``datetime`` module is used to create -# data points stored in the list ``x``. Additionally, dates are passed into -# the ``region`` parameter in the format ``[x_start, x_end, y_start, y_end]``, -# where the date range is plotted on the x-axis. An additional notable -# parameter is ``style``, where it's specified that data points are plotted -# as circles with a diameter of 0.3 centimeters. +# In this example, Python's built-in ``datetime`` module is used to create data points +# stored in the list ``x``. Additionally, dates are passed into the ``region`` parameter +# in the format ``[x_start, x_end, y_start, y_end]``, where the date range is plotted on +# the x-axis. An additional notable parameter is ``style``, where it's specified that +# data points are plotted as circles with a diameter of 0.3 centimeters. x = [ datetime.date(2010, 6, 1), @@ -53,17 +51,16 @@ fig.show() # %% -# In addition to specifying the date, ``datetime`` supports the time at -# which the data points were recorded. Using :meth:`datetime.datetime` the -# ``region`` parameter as well as data points can be created with both date and -# time information. +# In addition to specifying the date, ``datetime`` supports the time at which the data +# points were recorded. Using :meth:`datetime.datetime` the ``region`` parameter as well +# as data points can be created with both date and time information. # # Some notable differences to the previous example include: # -# - Modifying ``frame`` to only include West (left) and South (bottom) borders, -# and removing grid lines -# - Using circles to plot data points defined by ``c`` in the argument passed -# through the ``style`` parameter +# - Modifying ``frame`` to only include West (left) and South (bottom) borders, and +# removing grid lines +# - Using circles to plot data points defined by ``c`` in the argument passed through +# the ``style`` parameter x = [ datetime.datetime(2021, 1, 1, 3, 45, 1), @@ -96,20 +93,21 @@ # Using ISO Format # ---------------- # -# In addition to Python's ``datetime`` module, PyGMT also supports passing -# dates in ISO format. Basic ISO strings are formatted as ``YYYY-MM-DD`` with -# each ``-`` delineated section marking the four-digit year value, two-digit -# month value, and two-digit day value, respectively. +# In addition to Python's ``datetime`` module, PyGMT also supports passing dates in ISO +# format. Basic ISO strings are formatted as ``YYYY-MM-DD`` with each ``-`` delineated +# section marking the four-digit year value, two-digit month value, and two-digit day +# value, respectively. # -# For including the time into an ISO string, the ``T`` character is used, as it -# can be seen in the following example. This character is immediately followed -# by a string formatted as ``hh:mm:ss`` where each ``:`` delineated section -# marking the two-digit hour value, two-digit minute value, and two-digit -# second value, respectively. The figure in the following example is plotted -# over a horizontal range of one year from 2016-01-01 to 2017-01-01. +# For including the time into an ISO string, the ``T`` character is used, as it can be +# seen in the following example. This character is immediately followed by a string +# formatted as ``hh:mm:ss`` where each ``:`` delineated section marking the two-digit +# hour value, two-digit minute value, and two-digit second value, respectively. The +# figure in the following example is plotted over a horizontal range of one year from +# 2016-01-01 to 2017-01-01. x = ["2016-02-01", "2016-06-04T14", "2016-10-04T00:00:15", "2016-12-01T05:00:15"] y = [1, 3, 5, 2] + fig = pygmt.Figure() fig.plot( projection="X10c/5c", @@ -126,23 +124,23 @@ # %% # .. note:: # -# PyGMT doesn't recognize non-ISO datetime strings like "Jun 05, 2018". If -# your data contain non-ISO datetime strings, you can convert them to a -# recognized format using :func:`pandas.to_datetime` and then pass it to -# PyGMT. +# PyGMT doesn't recognize non-ISO datetime strings like "Jun 05, 2018". If your data +# contain non-ISO datetime strings, you can convert them to a recognized format +# using :func:`pandas.to_datetime` and then pass it to PyGMT. # %% # Mixing and matching Python ``datetime`` and ISO dates # ----------------------------------------------------- # -# The following example provides context on how both ``datetime`` and ISO date -# data can be plotted using PyGMT. This can be helpful when dates and times are -# coming from different sources, meaning conversions do not need to take place -# between ISO and datetime in order to create valid plots. +# The following example provides context on how both ``datetime`` and ISO date data can +# be plotted using PyGMT. This can be helpful when dates and times are coming from +# different sources, meaning conversions do not need to take place between ISO and +# datetime in order to create valid plots. x = ["2020-02-01", "2020-06-04", "2020-10-04", datetime.datetime(2021, 1, 15)] y = [1.3, 2.2, 4.1, 3] + fig = pygmt.Figure() fig.plot( projection="X10c/5c", @@ -162,14 +160,13 @@ # ------------------------------- # # In the following example, :func:`pandas.date_range` produces a list of -# :class:`pandas.DatetimeIndex` objects, which is used to pass date data to -# the PyGMT figure. -# Specifically ``x`` contains 7 different :class:`pandas.DatetimeIndex` -# objects, with the number being manipulated by the ``periods`` parameter. Each -# period begins at the start of a business quarter as denoted by BQS when -# passed to the ``freq`` parameter. The initial date is the first argument -# that is passed to :func:`pandas.date_range` and it marks the first data point -# in the list ``x`` that will be plotted. +# :class:`pandas.DatetimeIndex` objects, which is used to pass date data to the PyGMT +# figure. Specifically ``x`` contains 7 different :class:`pandas.DatetimeIndex` objects, +# with the number being manipulated by the ``periods`` parameter. Each period begins at +# the start of a business quarter as denoted by BQS when passed to the ``freq`` +# parameter. The initial date is the first argument that is passed to +# :func:`pandas.date_range` and it marks the first data point in the list ``x`` that +# will be plotted. x = pd.date_range("2018-03-01", periods=7, freq="BQS") y = [4, 5, 6, 8, 6, 3, 5] @@ -192,13 +189,12 @@ # Using :class:`xarray.DataArray` # ------------------------------- # -# In this example, instead of using a list of :class:`pandas.DatetimeIndex` -# objects, ``x`` is initialized as an :class:`xarray.DataArray` object. This -# object provides a wrapper around regular PyData formats. It also allows the -# data to have labeled dimensions while supporting operations that use various -# pieces of metadata. The following code uses :func:`pandas.date_range` to fill -# the DataArray with data, but this is not essential for the creation of a -# valid DataArray. +# In this example, instead of using a list of :class:`pandas.DatetimeIndex` objects, +# ``x`` is initialized as an :class:`xarray.DataArray` object. This object provides a +# wrapper around regular PyData formats. It also allows the data to have labeled +# dimensions while supporting operations that use various pieces of metadata. The +# following code uses :func:`pandas.date_range` to fill the DataArray with data, but +# this is not essential for the creation of a valid DataArray. x = xr.DataArray(data=pd.date_range(start="2020-01-01", periods=4, freq="Q")) y = [4, 7, 5, 6] @@ -221,11 +217,10 @@ # Using :class:`numpy.datetime64` # ------------------------------- # -# In this example, instead of using :func:`pd.date_range`, ``x`` is -# initialized as an ``np.array`` object. Similar to :class:`xarray.DataArray` -# this wraps the dataset before passing it as an argument. However, -# ``np.array`` objects use less memory and allow developers to specify -# data types. +# In this example, instead of using :func:`pd.date_range`, ``x`` is initialized as an +# ``np.array`` object. Similar to :class:`xarray.DataArray` this wraps the dataset +# before passing it as an argument. However, ``np.array`` objects use less memory and +# allow developers to specify data types. x = np.array(["2010-06-01", "2011-06-01T12", "2012-01-01T12:34:56"], dtype="datetime64") y = [2, 7, 5] @@ -248,12 +243,11 @@ # Generating an automatic region # ------------------------------ # -# Another way of creating charts involving datetime data can be done by -# automatically generating the region of the plot. This can be done by -# passing the DataFrame to :func:`pygmt.info`, which will find the maximum and -# minimum values for each column and create a list that could be passed as -# region. Additionally, the ``spacing`` parameter can be used to increase the -# range past the maximum and minimum data points. +# Another way of creating charts involving datetime data can be done by automatically +# generating the region of the plot. This can be done by passing the DataFrame to +# :func:`pygmt.info`, which will find the maximum and minimum values for each column and +# create a list that could be passed as region. Additionally, the ``spacing`` parameter +# can be used to increase the range past the maximum and minimum data points. data = [ ["20200712", 1000], @@ -266,12 +260,11 @@ ] df = pd.DataFrame(data, columns=["Date", "Score"]) df.Date = pd.to_datetime(df["Date"], format="%Y%m%d") - -fig = pygmt.Figure() region = pygmt.info( data=df[["Date", "Score"]], per_column=True, spacing=(700, 700), coltypes="T" ) +fig = pygmt.Figure() fig.plot( region=region, projection="X15c/10c", @@ -282,7 +275,6 @@ pen="1p", fill="green3", ) - fig.show() @@ -290,23 +282,20 @@ # Setting Primary and Secondary Time Axes # --------------------------------------- # -# This example focuses on annotating the axes and setting the interval in which -# the annotations should appear. All of these modifications are passed -# to the ``frame`` parameter and each item in that list modifies a specific -# aspect of the frame. +# This example focuses on annotating the axes and setting the interval in which the +# annotations should appear. All of these modifications are passed to the ``frame`` +# parameter and each item in that list modifies a specific aspect of the frame. # -# Adding ``"WS"`` means that only the Western/Left (**W**) and Southern/Bottom -# (**S**) borders of the plot are annotated. For more information on this, -# please refer to the :doc:`Frames, ticks, titles, and labels tutorial -# `. +# Adding ``"WS"`` means that only the Western/Left (**W**) and Southern/Bottom (**S**) +# borders of the plot are annotated. For more information on this, please refer to the +# :doc:`Frames, ticks, titles, and labels tutorial `. # -# Another important item in the list passed to ``frame`` is ``"sxa1Of1D"``. -# This string modifies the secondary annotation (**s**) of the x-axis (**x**). -# Specifically, it sets the main annotation and major tick spacing interval -# to one month (**a1O**) (capital letter O, not zero). Additionally, it sets -# the minor tick spacing interval to 1 day (**f1D**). To use the month's name -# instead of its number set :gmt-term:`FORMAT_DATE_MAP` to **o**. More -# information on configuring date formats can be found at +# Another important item in the list passed to ``frame`` is ``"sxa1Of1D"``. This string +# modifies the secondary annotation (**s**) of the x-axis (**x**). Specifically, it sets +# the main annotation and major tick spacing interval to one month (**a1O**) (capital +# letter O, not zero). Additionally, it sets the minor tick spacing interval to 1 day +# (**f1D**). To use the month name instead of its number set :gmt-term:`FORMAT_DATE_MAP` +# to **o**. More information on configuring date formats can be found at # :gmt-term:`FORMAT_DATE_MAP`, :gmt-term:`FORMAT_DATE_IN`, and # :gmt-term:`FORMAT_DATE_OUT`. @@ -325,22 +314,19 @@ pen="1p", fill="green3", ) - fig.show() # %% -# The same concept shown above can be applied to smaller as well as larger -# intervals. In this example, data are plotted for different times throughout -# two days. The primary x-axis annotations are modified to repeat every 6 -# hours, and the secondary x-axis annotations repeat every day and show the -# day of the week. +# The same concept shown above can be applied to smaller as well as larger intervals. In +# this example, data are plotted for different times throughout two days. The primary +# x-axis annotations are modified to repeat every 6 hours, and the secondary x-axis +# annotations repeat every day and show the day of the week. # -# Another notable mention in this example is setting -# :gmt-term:`FORMAT_CLOCK_MAP` to **-hhAM** which specifies the format used -# for time. In this case, leading zeros are removed using (**-**), and only -# hours are displayed. Additionally, an AM/PM system is used instead of a -# 24-hour system. More information on configuring time formats can be found -# at :gmt-term:`FORMAT_CLOCK_MAP`, :gmt-term:`FORMAT_CLOCK_IN`, and +# Another notable mention in this example is setting :gmt-term:`FORMAT_CLOCK_MAP` to +# **-hhAM** which specifies the format used for time. In this case, leading zeros are +# removed using (**-**), and only hours are displayed. Additionally, an AM/PM system is +# used instead of a 24-hour system. More information on configuring time formats can be +# found at :gmt-term:`FORMAT_CLOCK_MAP`, :gmt-term:`FORMAT_CLOCK_IN`, and # :gmt-term:`FORMAT_CLOCK_OUT`. x = pd.date_range("2021-04-15", periods=8, freq="6H") From 9a6da0f2a3404212cfbc0a80e1f782324ce94ef2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 07:23:18 +0800 Subject: [PATCH 030/218] Bump dawidd6/action-download-artifact from 3.1.2 to 3.1.4 (#3126) Bumps [dawidd6/action-download-artifact](https://github.com/dawidd6/action-download-artifact) from 3.1.2 to 3.1.4. - [Release notes](https://github.com/dawidd6/action-download-artifact/releases) - [Commits](https://github.com/dawidd6/action-download-artifact/compare/v3.1.2...v3.1.4) --- updated-dependencies: - dependency-name: dawidd6/action-download-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- .github/workflows/ci_docs.yml | 2 +- .github/workflows/ci_doctests.yaml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- .github/workflows/ci_tests_legacy.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 8c32d8976b6..7d052f9ed0e 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -64,7 +64,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.2 + uses: dawidd6/action-download-artifact@v3.1.4 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 6f3a969cbfd..40258c943f8 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -111,7 +111,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.2 + uses: dawidd6/action-download-artifact@v3.1.4 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index 09a1795d4b2..a53f7eb5c1d 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -70,7 +70,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.2 + uses: dawidd6/action-download-artifact@v3.1.4 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 80e4911da2b..0690372aa69 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -127,7 +127,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.2 + uses: dawidd6/action-download-artifact@v3.1.4 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 79c768c5c4e..644a149a2c2 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -134,7 +134,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.2 + uses: dawidd6/action-download-artifact@v3.1.4 with: workflow: cache_data.yaml workflow_conclusion: success diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index 629946ca7ee..d971923b8dd 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -82,7 +82,7 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.2 + uses: dawidd6/action-download-artifact@v3.1.4 with: workflow: cache_data.yaml workflow_conclusion: success From ce1ae824ee99c8072ac6fe79d0d239584ad2edec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 07:24:12 +0800 Subject: [PATCH 031/218] Bump shogo82148/actions-upload-release-asset from 1.7.2 to 1.7.4 (#3125) Bumps [shogo82148/actions-upload-release-asset](https://github.com/shogo82148/actions-upload-release-asset) from 1.7.2 to 1.7.4. - [Release notes](https://github.com/shogo82148/actions-upload-release-asset/releases) - [Commits](https://github.com/shogo82148/actions-upload-release-asset/compare/v1.7.2...v1.7.4) --- updated-dependencies: - dependency-name: shogo82148/actions-upload-release-asset dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release-baseline-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-baseline-images.yml b/.github/workflows/release-baseline-images.yml index f70a27cdcf6..9f47ad7680b 100644 --- a/.github/workflows/release-baseline-images.yml +++ b/.github/workflows/release-baseline-images.yml @@ -35,7 +35,7 @@ jobs: shasum -a 256 baseline-images.zip - name: Upload baseline image as a release asset - uses: shogo82148/actions-upload-release-asset@v1.7.2 + uses: shogo82148/actions-upload-release-asset@v1.7.4 with: upload_url: ${{ github.event.release.upload_url }} asset_path: baseline-images.zip From 7fd8253aceadf7d136ff02016cd90141ff69f2bd Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 20 Mar 2024 09:18:09 +0800 Subject: [PATCH 032/218] clib: Change the parameter order and set output_type to pandas in virtualfile_to_dataset (#3124) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/clib/session.py | 16 ++++++++-------- pygmt/src/blockm.py | 2 +- pygmt/src/filter1d.py | 2 +- pygmt/src/grd2xyz.py | 2 +- pygmt/src/grdhisteq.py | 2 +- pygmt/src/grdtrack.py | 2 +- pygmt/src/grdvolume.py | 2 +- pygmt/src/project.py | 2 +- pygmt/src/select.py | 2 +- pygmt/src/triangulate.py | 2 +- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index e2feb9cf857..6e6e495e244 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1740,8 +1740,8 @@ def read_virtualfile( def virtualfile_to_dataset( self, - output_type: Literal["pandas", "numpy", "file"], vfname: str, + output_type: Literal["pandas", "numpy", "file"] = "pandas", column_names: list[str] | None = None, ) -> pd.DataFrame | np.ndarray | None: """ @@ -1751,15 +1751,15 @@ def virtualfile_to_dataset( Parameters ---------- + vfname + The virtual file name that stores the result data. Required for ``"pandas"`` + and ``"numpy"`` output type. output_type Desired output type of the result data. - ``"pandas"`` will return a :class:`pandas.DataFrame` object. - ``"numpy"`` will return a :class:`numpy.ndarray` object. - ``"file"`` means the result was saved to a file and will return ``None``. - vfname - The virtual file name that stores the result data. Required for ``"pandas"`` - and ``"numpy"`` output type. column_names The column names for the :class:`pandas.DataFrame` output. @@ -1795,7 +1795,7 @@ def virtualfile_to_dataset( ... ) as vouttbl: ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") ... result = lib.virtualfile_to_dataset( - ... output_type="file", vfname=vouttbl + ... vfname=vouttbl, output_type="file" ... ) ... assert result is None ... assert Path(outtmp.name).stat().st_size > 0 @@ -1805,7 +1805,7 @@ def virtualfile_to_dataset( ... with lib.virtualfile_out(kind="dataset") as vouttbl: ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") ... outnp = lib.virtualfile_to_dataset( - ... output_type="numpy", vfname=vouttbl + ... vfname=vouttbl, output_type="numpy" ... ) ... assert isinstance(outnp, np.ndarray) ... @@ -1814,7 +1814,7 @@ def virtualfile_to_dataset( ... with lib.virtualfile_out(kind="dataset") as vouttbl: ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") ... outpd = lib.virtualfile_to_dataset( - ... output_type="pandas", vfname=vouttbl + ... vfname=vouttbl, output_type="pandas" ... ) ... assert isinstance(outpd, pd.DataFrame) ... @@ -1823,8 +1823,8 @@ def virtualfile_to_dataset( ... with lib.virtualfile_out(kind="dataset") as vouttbl: ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") ... outpd2 = lib.virtualfile_to_dataset( - ... output_type="pandas", ... vfname=vouttbl, + ... output_type="pandas", ... column_names=["col1", "col2", "col3", "coltext"], ... ) ... assert isinstance(outpd2, pd.DataFrame) diff --git a/pygmt/src/blockm.py b/pygmt/src/blockm.py index 3030234d707..1798da62e9e 100644 --- a/pygmt/src/blockm.py +++ b/pygmt/src/blockm.py @@ -64,7 +64,7 @@ def _blockm( args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset( - output_type=output_type, vfname=vouttbl, column_names=column_names + vfname=vouttbl, output_type=output_type, column_names=column_names ) diff --git a/pygmt/src/filter1d.py b/pygmt/src/filter1d.py index 3bb08cf6ff2..32e046e2e59 100644 --- a/pygmt/src/filter1d.py +++ b/pygmt/src/filter1d.py @@ -123,4 +123,4 @@ def filter1d( module="filter1d", args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), ) - return lib.virtualfile_to_dataset(output_type=output_type, vfname=vouttbl) + return lib.virtualfile_to_dataset(vfname=vouttbl, output_type=output_type) diff --git a/pygmt/src/grd2xyz.py b/pygmt/src/grd2xyz.py index ea1c52cb224..4c6d3eb224f 100644 --- a/pygmt/src/grd2xyz.py +++ b/pygmt/src/grd2xyz.py @@ -168,5 +168,5 @@ def grd2xyz( args=build_arg_string(kwargs, infile=vingrd, outfile=vouttbl), ) return lib.virtualfile_to_dataset( - output_type=output_type, vfname=vouttbl, column_names=column_names + vfname=vouttbl, output_type=output_type, column_names=column_names ) diff --git a/pygmt/src/grdhisteq.py b/pygmt/src/grdhisteq.py index 880368f1a7c..b0285e4e3d5 100644 --- a/pygmt/src/grdhisteq.py +++ b/pygmt/src/grdhisteq.py @@ -239,8 +239,8 @@ def compute_bins( ) result = lib.virtualfile_to_dataset( - output_type=output_type, vfname=vouttbl, + output_type=output_type, column_names=["start", "stop", "bin_id"], ) if output_type == "pandas": diff --git a/pygmt/src/grdtrack.py b/pygmt/src/grdtrack.py index d71300fa3ba..930cf00656f 100644 --- a/pygmt/src/grdtrack.py +++ b/pygmt/src/grdtrack.py @@ -318,7 +318,7 @@ def grdtrack( args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset( - output_type=output_type, vfname=vouttbl, + output_type=output_type, column_names=column_names, ) diff --git a/pygmt/src/grdvolume.py b/pygmt/src/grdvolume.py index f53a57e9fd7..c651163076c 100644 --- a/pygmt/src/grdvolume.py +++ b/pygmt/src/grdvolume.py @@ -111,4 +111,4 @@ def grdvolume( module="grdvolume", args=build_arg_string(kwargs, infile=vingrd, outfile=vouttbl), ) - return lib.virtualfile_to_dataset(output_type=output_type, vfname=vouttbl) + return lib.virtualfile_to_dataset(vfname=vouttbl, output_type=output_type) diff --git a/pygmt/src/project.py b/pygmt/src/project.py index 833c58ce299..ab0e6ba7f8d 100644 --- a/pygmt/src/project.py +++ b/pygmt/src/project.py @@ -256,7 +256,7 @@ def project( args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset( - output_type=output_type, vfname=vouttbl, + output_type=output_type, column_names=column_names, ) diff --git a/pygmt/src/select.py b/pygmt/src/select.py index 2e9b97299a5..fde4f88b47f 100644 --- a/pygmt/src/select.py +++ b/pygmt/src/select.py @@ -221,7 +221,7 @@ def select( args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset( - output_type=output_type, vfname=vouttbl, + output_type=output_type, column_names=column_names, ) diff --git a/pygmt/src/triangulate.py b/pygmt/src/triangulate.py index 62c0a24d80e..7a64178c99a 100644 --- a/pygmt/src/triangulate.py +++ b/pygmt/src/triangulate.py @@ -249,4 +249,4 @@ def delaunay_triples( module="triangulate", args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), ) - return lib.virtualfile_to_dataset(output_type=output_type, vfname=vouttbl) + return lib.virtualfile_to_dataset(vfname=vouttbl, output_type=output_type) From 598154a0d498bf6ce6ac33aebf516d3db01f295a Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 20 Mar 2024 10:45:31 +0800 Subject: [PATCH 033/218] TYP: Add type hints to the Figure.tilemap method (#3095) --- pygmt/src/tilemap.py | 118 +++++++++++++++++++------------------------ 1 file changed, 52 insertions(+), 66 deletions(-) diff --git a/pygmt/src/tilemap.py b/pygmt/src/tilemap.py index 35e41e1c414..168b2a2131f 100644 --- a/pygmt/src/tilemap.py +++ b/pygmt/src/tilemap.py @@ -2,7 +2,7 @@ tilemap - Plot XYZ tile maps. """ -from __future__ import annotations +from typing import Literal from pygmt.clib import Session from pygmt.datasets.tile_map import load_tile_map @@ -10,9 +10,11 @@ try: import rioxarray # noqa: F401 + from xyzservices import TileProvider _HAS_RIOXARRAY = True except ImportError: + TileProvider = None _HAS_RIOXARRAY = False @@ -34,12 +36,12 @@ @kwargs_to_strings(c="sequence_comma", p="sequence") # R="sequence", def tilemap( self, - region, - zoom="auto", - source=None, - lonlat=True, - wait=0, - max_retries=2, + region: list, + zoom: int | Literal["auto"] = "auto", + source: TileProvider | str | None = None, + lonlat: bool = True, + wait: int = 0, + max_retries: int = 2, zoom_adjust: int | None = None, **kwargs, ): @@ -47,73 +49,59 @@ def tilemap( Plot an XYZ tile map. This method loads XYZ tile maps from a tile server or local file using - :func:`pygmt.datasets.load_tile_map` into a georeferenced form, and plots - the tiles as a basemap or overlay using :meth:`pygmt.Figure.grdimage`. + :func:`pygmt.datasets.load_tile_map` into a georeferenced form, and plots the tiles + as a basemap or overlay using :meth:`pygmt.Figure.grdimage`. **Note**: By default, standard web map tiles served in a Spherical Mercator (EPSG:3857) Cartesian format will be reprojected to a geographic coordinate - reference system (OGC:WGS84) and plotted with longitude/latitude bounds - when ``lonlat=True``. If reprojection is not desired, please set - ``lonlat=False`` and provide Spherical Mercator (EPSG:3857) coordinates to - the ``region`` parameter. + reference system (OGC:WGS84) and plotted with longitude/latitude bounds when + ``lonlat=True``. If reprojection is not desired, please set ``lonlat=False`` and + provide Spherical Mercator (EPSG:3857) coordinates to the ``region`` parameter. {aliases} Parameters ---------- - region : list - The bounding box of the map in the form of a list [*xmin*, *xmax*, - *ymin*, *ymax*]. These coordinates should be in longitude/latitude if - ``lonlat=True`` or Spherical Mercator (EPSG:3857) if ``lonlat=False``. - - zoom : int or str - Optional. Level of detail. Higher levels (e.g. ``22``) mean a zoom - level closer to the Earth's surface, with more tiles covering a smaller - geographical area and thus more detail. Lower levels (e.g. ``0``) mean - a zoom level further from the Earth's surface, with less tiles covering - a larger geographical area and thus less detail [Default is - ``"auto"`` to automatically determine the zoom level based on the - bounding box region extent]. + region + The bounding box of the map in the form of a list [*xmin*, *xmax*, *ymin*, + *ymax*]. These coordinates should be in longitude/latitude if ``lonlat=True`` or + Spherical Mercator (EPSG:3857) if ``lonlat=False``. + zoom + Level of detail. Higher levels (e.g. ``22``) mean a zoom level closer to the + Earth's surface, with more tiles covering a smaller geographical area and thus + more detail. Lower levels (e.g. ``0``) mean a zoom level further from the + Earth's surface, with less tiles covering a larger geographical area and thus + less detail. Default is ``"auto"`` to automatically determine the zoom level + based on the bounding box region extent. .. note:: The maximum possible zoom level may be smaller than ``22``, and depends on what is supported by the chosen web tile provider source. - - source : xyzservices.TileProvider or str - Optional. The tile source: web tile provider or path to a local file. - Provide either: - - - A web tile provider in the form of a - :class:`xyzservices.TileProvider` object. See - :doc:`Contextily providers ` for a - list of tile providers [Default is - ``xyzservices.providers.OpenStreetMap.HOT``, i.e. OpenStreetMap - Humanitarian web tiles]. - - A web tile provider in the form of a URL. The placeholders for the - XYZ in the URL need to be {{x}}, {{y}}, {{z}}, respectively. E.g. + source + The tile source: web tile provider or path to a local file. Provide either: + + - A web tile provider in the form of a :class:`xyzservices.TileProvider` object. + See :doc:`Contextily providers ` for a list of + tile providers. Default is ``xyzservices.providers.OpenStreetMap.HOT``, i.e. + OpenStreetMap Humanitarian web tiles. + - A web tile provider in the form of a URL. The placeholders for the XYZ in the + URL need to be ``{{x}}``, ``{{y}}``, ``{{z}}``, respectively. E.g. ``https://{{s}}.tile.openstreetmap.org/{{z}}/{{x}}/{{y}}.png``. - - A local file path. The file is read with - :doc:`rasterio ` and all bands are loaded into the - basemap. See + - A local file path. The file is read with :doc:`rasterio ` and + all bands are loaded into the basemap. See :doc:`contextily:working_with_local_files`. .. important:: Tiles are assumed to be in the Spherical Mercator projection (EPSG:3857). - - lonlat : bool - Optional. If ``False``, coordinates in ``region`` are assumed to be - Spherical Mercator as opposed to longitude/latitude [Default is - ``True``]. - - wait : int - Optional. If the tile API is rate-limited, the number of seconds to - wait between a failed request and the next try [Default is ``0``]. - - max_retries : int - Optional. Total number of rejected requests allowed before contextily - will stop trying to fetch more tiles from a rate-limited API [Default - is ``2``]. - + lonlat + If ``False``, coordinates in ``region`` are assumed to be Spherical Mercator as + opposed to longitude/latitude. + wait + If the tile API is rate-limited, the number of seconds to wait between a failed + request and the next try. + max_retries + Total number of rejected requests allowed before contextily will stop trying to + fetch more tiles from a rate-limited API. zoom_adjust The amount to adjust a chosen zoom level if it is chosen automatically. Values outside of -1 to 1 are not recommended as they can lead to slow execution. @@ -128,9 +116,8 @@ def tilemap( ------ ImportError If ``rioxarray`` is not installed. Follow - :doc:`install instructions for rioxarray `, - (e.g. via ``python -m pip install rioxarray``) before using this - function. + :doc:`install instructions for rioxarray `, (e.g. via + ``python -m pip install rioxarray``) before using this function. """ kwargs = self._preprocess(**kwargs) @@ -138,8 +125,7 @@ def tilemap( raise ImportError( "Package `rioxarray` is required to be installed to use this function. " "Please use `python -m pip install rioxarray` or " - "`mamba install -c conda-forge rioxarray` " - "to install the package." + "`mamba install -c conda-forge rioxarray` to install the package." ) raster = load_tile_map( @@ -152,14 +138,14 @@ def tilemap( zoom_adjust=zoom_adjust, ) - # Reproject raster from Spherical Mercator (EPSG:3857) to - # lonlat (OGC:CRS84) if bounding box region was provided in lonlat + # Reproject raster from Spherical Mercator (EPSG:3857) to lonlat (OGC:CRS84) if + # bounding box region was provided in lonlat if lonlat and raster.rio.crs == "EPSG:3857": raster = raster.rio.reproject(dst_crs="OGC:CRS84") raster.gmt.gtype = 1 # set to geographic type - # Only set region if no_clip is None or False, so that plot is clipped to - # exact bounding box region + # Only set region if no_clip is None or False, so that plot is clipped to exact + # bounding box region if kwargs.get("N") in [None, False]: kwargs["R"] = "/".join(str(coordinate) for coordinate in region) From bfb641ea73af3507e20489fd05fa9525ca3e0e77 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 20 Mar 2024 10:55:46 +0800 Subject: [PATCH 034/218] Wrap GMT's data structure GMT_GRID_HEADER for grid/image/cube headers (#3127) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/datatypes/grid.py | 98 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 97 insertions(+), 1 deletion(-) diff --git a/pygmt/datatypes/grid.py b/pygmt/datatypes/grid.py index e67f44ebef5..dfb3c096d20 100644 --- a/pygmt/datatypes/grid.py +++ b/pygmt/datatypes/grid.py @@ -1,8 +1,104 @@ """ -Wrapper for the GMT_GRID data type. +Wrapper for the GMT_GRID data type and the GMT_GRID_HEADER data structure. """ import ctypes as ctp +from typing import ClassVar + +# Constants for lengths of grid header variables. +# +# Note: Ideally we should be able to get these constants from the GMT shared library +# using the ``lib["GMT_GRID_UNIT_LEN80"]`` syntax, but it causes cyclic import error. +# So we have to hardcode the values here. +GMT_GRID_UNIT_LEN80 = 80 +GMT_GRID_TITLE_LEN80 = 80 +GMT_GRID_COMMAND_LEN320 = 320 +GMT_GRID_REMARK_LEN160 = 160 + +# GMT uses single-precision for grids by default, but can be built to use +# double-precision. Currently, only single-precision is supported. +gmt_grdfloat = ctp.c_float + + +class _GMT_GRID_HEADER(ctp.Structure): # noqa: N801 + """ + GMT grid header structure for metadata about the grid. + + The class is used in the `GMT_GRID`/`GMT_IMAGE`/`GMT_CUBE` data structure. See the + GMT source code gmt_resources.h for the original C structure definitions. + """ + + _fields_: ClassVar = [ + # Number of columns + ("n_columns", ctp.c_uint32), + # Number of rows + ("n_rows", ctp.c_uint32), + # Grid registration, 0 for gridline and 1 for pixel + ("registration", ctp.c_uint32), + # Minimum/maximum x and y coordinates + ("wesn", ctp.c_double * 4), + # Minimum z value + ("z_min", ctp.c_double), + # Maximum z value + ("z_max", ctp.c_double), + # x and y increments + ("inc", ctp.c_double * 2), + # Grid values must be multiplied by this factor + ("z_scale_factor", ctp.c_double), + # After scaling, add this offset + ("z_add_offset", ctp.c_double), + # Units in x-directions, in the form "long_name [units]" + ("x_units", ctp.c_char * GMT_GRID_UNIT_LEN80), + # Units in y-direction, in the form "long_name [units]" + ("y_units", ctp.c_char * GMT_GRID_UNIT_LEN80), + # Grid value units, in the form "long_name [units]" + ("z_units", ctp.c_char * GMT_GRID_UNIT_LEN80), + # Name of data set + ("title", ctp.c_char * GMT_GRID_TITLE_LEN80), + # Name of generating command + ("command", ctp.c_char * GMT_GRID_COMMAND_LEN320), + # Comments for this data set + ("remark", ctp.c_char * GMT_GRID_REMARK_LEN160), + # Below are items used internally by GMT + # Number of data points (n_columns * n_rows) [paddings are excluded] + ("nm", ctp.c_size_t), + # Actual number of items (not bytes) required to hold this grid (mx * my), + # per band (for images) + ("size", ctp.c_size_t), + # Bits per data value (e.g., 32 for ints/floats; 8 for bytes). + # Only used for ERSI ArcInfo ASCII Exchange grids. + ("bits", ctp.c_uint), + # For complex grid. + # 0 for normal + # GMT_GRID_IS_COMPLEX_REAL = real part of complex grid + # GMT_GRID_IS_COMPLEX_IMAG = imag part of complex grid + ("complex_mode", ctp.c_uint), + # Grid format + ("type", ctp.c_uint), + # Number of bands [1]. Used with GMT_IMAGE containers + ("n_bands", ctp.c_uint), + # Actual x-dimension in memory. mx = n_columns + pad[0] + pad[1] + ("mx", ctp.c_uint), + # Actual y-dimension in memory. my = n_rows + pad[2] + pad[3] + ("my", ctp.c_uint), + # Paddings on west, east, south, north sides [2,2,2,2] + ("pad", ctp.c_uint * 4), + # Three or four char codes T|B R|C S|R|S (grd) or B|L|P + A|a (img) + # describing array layout in mem and interleaving + ("mem_layout", ctp.c_char * 4), + # Missing value as stored in grid file + ("nan_value", gmt_grdfloat), + # 0.0 for gridline grids and 0.5 for pixel grids + ("xy_off", ctp.c_double), + # Referencing system string in PROJ.4 format + ("ProjRefPROJ4", ctp.c_char_p), + # Referencing system string in WKT format + ("ProjRefWKT", ctp.c_char_p), + # Referencing system EPSG code + ("ProjRefEPSG", ctp.c_int), + # Lower-level information for GMT use only + ("hidden", ctp.c_void_p), + ] class _GMT_GRID(ctp.Structure): # noqa: N801 From 0cdb87de8bd813deff42224cf73203db56a4e699 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 20 Mar 2024 17:23:34 +0800 Subject: [PATCH 035/218] Migrate from os.path to pathlib (#3119) --- examples/gallery/images/image.py | 4 +-- examples/tutorials/basics/text.py | 4 +-- pygmt/figure.py | 13 ++++--- pygmt/helpers/tempfile.py | 9 +++-- pygmt/helpers/testing.py | 26 +++++++------- pygmt/src/grdfilter.py | 4 +-- pygmt/src/x2sys_cross.py | 2 +- pygmt/tests/test_accessor.py | 3 +- pygmt/tests/test_clib.py | 6 ++-- pygmt/tests/test_clib_virtualfiles.py | 5 ++- pygmt/tests/test_contour.py | 5 ++- pygmt/tests/test_figure.py | 16 ++++----- pygmt/tests/test_grd2cpt.py | 4 +-- pygmt/tests/test_grdcontour.py | 5 ++- pygmt/tests/test_grdtrack.py | 4 +-- pygmt/tests/test_helpers.py | 26 ++++++++------ pygmt/tests/test_info.py | 12 +++---- pygmt/tests/test_makecpt.py | 4 +-- pygmt/tests/test_plot.py | 4 +-- pygmt/tests/test_plot3d.py | 4 +-- pygmt/tests/test_psconvert.py | 20 +++++------ pygmt/tests/test_session_management.py | 15 ++++---- pygmt/tests/test_sphinx_gallery.py | 10 +++--- pygmt/tests/test_text.py | 8 ++--- pygmt/tests/test_x2sys_cross.py | 49 +++++++++++++------------- pygmt/tests/test_x2sys_init.py | 18 +++++----- 26 files changed, 131 insertions(+), 149 deletions(-) diff --git a/examples/gallery/images/image.py b/examples/gallery/images/image.py index 52d6b197fa5..1806ae55577 100644 --- a/examples/gallery/images/image.py +++ b/examples/gallery/images/image.py @@ -11,7 +11,7 @@ """ # %% -import os +from pathlib import Path import pygmt @@ -28,6 +28,6 @@ ) # clean up the downloaded image in the current directory -os.remove("gmt-logo.png") +Path("gmt-logo.png").unlink() fig.show() diff --git a/examples/tutorials/basics/text.py b/examples/tutorials/basics/text.py index 4ad92091de1..1f225bb39c3 100644 --- a/examples/tutorials/basics/text.py +++ b/examples/tutorials/basics/text.py @@ -7,7 +7,7 @@ """ # %% -import os +from pathlib import Path import pygmt @@ -88,7 +88,7 @@ fig.text(textfiles="examples.txt", angle=True, font=True, justify=True) # Cleanups -os.remove("examples.txt") +Path("examples.txt").unlink() fig.show() diff --git a/pygmt/figure.py b/pygmt/figure.py index 98f06291c6b..f908cff1b93 100644 --- a/pygmt/figure.py +++ b/pygmt/figure.py @@ -67,10 +67,9 @@ class Figure: >>> fig.basemap(region=[0, 360, -90, 90], projection="W15c", frame=True) >>> fig.savefig("my-figure.png") >>> # Make sure the figure file is generated and clean it up - >>> import os - >>> os.path.exists("my-figure.png") - True - >>> os.remove("my-figure.png") + >>> from pathlib import Path + >>> assert Path("my-figure.png").exists() + >>> Path("my-figure.png").unlink() The plot region can be specified through ISO country codes (for example, ``"JP"`` for Japan): @@ -380,8 +379,8 @@ def savefig( # noqa: PLR0912 # Remove the .pgw world file if exists # Not necessary after GMT 6.5.0. # See upstream fix https://github.com/GenericMappingTools/gmt/pull/7865 - if ext == "tiff" and fname.with_suffix(".pgw").exists(): - fname.with_suffix(".pgw").unlink() + if ext == "tiff": + fname.with_suffix(".pgw").unlink(missing_ok=True) # Rename if file extension doesn't match the input file suffix if ext != suffix[1:]: @@ -495,7 +494,7 @@ def _preview(self, fmt, dpi, as_bytes=False, **kwargs): If ``as_bytes=False``, this is the file name of the preview image file. Else, it is the file content loaded as a bytes string. """ - fname = os.path.join(self._preview_dir.name, f"{self._name}.{fmt}") + fname = Path(self._preview_dir.name) / f"{self._name}.{fmt}" self.savefig(fname, dpi=dpi, **kwargs) if as_bytes: with open(fname, "rb") as image: diff --git a/pygmt/helpers/tempfile.py b/pygmt/helpers/tempfile.py index 62a3daf5447..bed79f352ae 100644 --- a/pygmt/helpers/tempfile.py +++ b/pygmt/helpers/tempfile.py @@ -2,9 +2,9 @@ Utilities for dealing with temporary file management. """ -import os import uuid from contextlib import contextmanager +from pathlib import Path from tempfile import NamedTemporaryFile import numpy as np @@ -72,8 +72,7 @@ def __exit__(self, *args): """ Remove the temporary file. """ - if os.path.exists(self.name): - os.remove(self.name) + Path(self.name).unlink(missing_ok=True) def read(self, keep_tabs=False): """ @@ -133,7 +132,7 @@ def tempfile_from_geojson(geojson): with GMTTempFile(suffix=".gmt") as tmpfile: import geopandas as gpd - os.remove(tmpfile.name) # ensure file is deleted first + Path(tmpfile.name).unlink() # Ensure file is deleted first ogrgmt_kwargs = {"filename": tmpfile.name, "driver": "OGR_GMT", "mode": "w"} try: # OGR_GMT only supports 32-bit integers. We need to map int/int64 @@ -185,7 +184,7 @@ def tempfile_from_image(image): A temporary GeoTIFF file holding the image data. E.g. '1a2b3c4d5.tif'. """ with GMTTempFile(suffix=".tif") as tmpfile: - os.remove(tmpfile.name) # ensure file is deleted first + Path(tmpfile.name).unlink() # Ensure file is deleted first try: image.rio.to_raster(raster_path=tmpfile.name) except AttributeError as e: # object has no attribute 'rio' diff --git a/pygmt/helpers/testing.py b/pygmt/helpers/testing.py index 1a1c72cb334..28027e82f64 100644 --- a/pygmt/helpers/testing.py +++ b/pygmt/helpers/testing.py @@ -4,8 +4,8 @@ import importlib import inspect -import os import string +from pathlib import Path from pygmt.exceptions import GMTImageComparisonFailure from pygmt.io import load_dataarray @@ -39,6 +39,7 @@ def check_figures_equal(*, extensions=("png",), tol=0.0, result_dir="result_imag >>> import pytest >>> import shutil >>> from pygmt import Figure + >>> from pathlib import Path >>> @check_figures_equal(result_dir="tmp_result_images") ... def test_check_figures_equal(): @@ -50,7 +51,7 @@ def check_figures_equal(*, extensions=("png",), tol=0.0, result_dir="result_imag ... ) ... return fig_ref, fig_test >>> test_check_figures_equal() - >>> assert len(os.listdir("tmp_result_images")) == 0 + >>> assert len(list(Path("tmp_result_images").iterdir())) == 0 >>> shutil.rmtree(path="tmp_result_images") # cleanup folder if tests pass >>> @check_figures_equal(result_dir="tmp_result_images") @@ -63,12 +64,9 @@ def check_figures_equal(*, extensions=("png",), tol=0.0, result_dir="result_imag >>> with pytest.raises(GMTImageComparisonFailure): ... test_check_figures_unequal() >>> for suffix in ["", "-expected", "-failed-diff"]: - ... assert os.path.exists( - ... os.path.join( - ... "tmp_result_images", - ... f"test_check_figures_unequal{suffix}.png", - ... ) - ... ) + ... assert ( + ... Path("tmp_result_images") / f"test_check_figures_unequal{suffix}.png" + ... ).exists() >>> shutil.rmtree(path="tmp_result_images") # cleanup folder if tests pass """ allowed_chars = set(string.digits + string.ascii_letters + "_-[]()") @@ -78,7 +76,7 @@ def decorator(func): import pytest from matplotlib.testing.compare import compare_images - os.makedirs(result_dir, exist_ok=True) + Path(result_dir).mkdir(parents=True, exist_ok=True) old_sig = inspect.signature(func) @pytest.mark.parametrize("ext", extensions) @@ -93,8 +91,8 @@ def wrapper(*args, ext="png", request=None, **kwargs): file_name = func.__name__ try: fig_ref, fig_test = func(*args, **kwargs) - ref_image_path = os.path.join(result_dir, f"{file_name}-expected.{ext}") - test_image_path = os.path.join(result_dir, f"{file_name}.{ext}") + ref_image_path = Path(result_dir) / f"{file_name}-expected.{ext}" + test_image_path = Path(result_dir) / f"{file_name}.{ext}" fig_ref.savefig(ref_image_path) fig_test.savefig(test_image_path) @@ -107,11 +105,11 @@ def wrapper(*args, ext="png", request=None, **kwargs): in_decorator=True, ) if err is None: # Images are the same - os.remove(ref_image_path) - os.remove(test_image_path) + ref_image_path.unlink() + test_image_path.unlink() else: # Images are not the same for key in ["actual", "expected", "diff"]: - err[key] = os.path.relpath(err[key]) + err[key] = Path(err[key]).relative_to(".") raise GMTImageComparisonFailure( f"images not close (RMS {err['rms']:.3f}):\n" f"\t{err['actual']}\n" diff --git a/pygmt/src/grdfilter.py b/pygmt/src/grdfilter.py index a4a544819ad..a6e49c65001 100644 --- a/pygmt/src/grdfilter.py +++ b/pygmt/src/grdfilter.py @@ -114,7 +114,7 @@ def grdfilter(grid, **kwargs): Examples -------- - >>> import os + >>> from pathlib import Path >>> import pygmt >>> # Apply a filter of 600 km (full width) to the @earth_relief_30m_g file >>> # and return a filtered field (saved as netCDF) @@ -126,7 +126,7 @@ def grdfilter(grid, **kwargs): ... spacing=0.5, ... outgrid="filtered_pacific.nc", ... ) - >>> os.remove("filtered_pacific.nc") # Cleanup file + >>> Path("filtered_pacific.nc").unlink() # Cleanup file >>> # Apply a Gaussian smoothing filter of 600 km to the input DataArray >>> # and return a filtered DataArray with the smoothed field >>> grid = pygmt.datasets.load_earth_relief() diff --git a/pygmt/src/x2sys_cross.py b/pygmt/src/x2sys_cross.py index a8988a00af8..c530c964025 100644 --- a/pygmt/src/x2sys_cross.py +++ b/pygmt/src/x2sys_cross.py @@ -53,7 +53,7 @@ def tempfile_from_dftrack(track, suffix): ) yield tmpfilename finally: - os.remove(tmpfilename) + Path(tmpfilename).unlink() @fmt_docstring diff --git a/pygmt/tests/test_accessor.py b/pygmt/tests/test_accessor.py index a12c01e9265..ed047c2ec28 100644 --- a/pygmt/tests/test_accessor.py +++ b/pygmt/tests/test_accessor.py @@ -2,7 +2,6 @@ Test the behaviour of the GMTDataArrayAccessor class. """ -import os import sys from pathlib import Path @@ -101,7 +100,7 @@ def test_accessor_sliced_datacube(): assert grid.gmt.registration == 0 # gridline registration assert grid.gmt.gtype == 1 # geographic coordinate type finally: - os.remove(fname) + Path(fname).unlink() def test_accessor_grid_source_file_not_exist(): diff --git a/pygmt/tests/test_clib.py b/pygmt/tests/test_clib.py index 78121b13cda..2f655435ee4 100644 --- a/pygmt/tests/test_clib.py +++ b/pygmt/tests/test_clib.py @@ -2,7 +2,6 @@ Test the wrappers for the C API. """ -import os from contextlib import contextmanager from pathlib import Path @@ -22,7 +21,7 @@ ) from pygmt.helpers import GMTTempFile -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") +POINTS_DATA = Path(__file__).parent / "data" / "points.txt" @contextmanager @@ -136,11 +135,10 @@ def test_call_module(): """ Run a command to see if call_module works. """ - data_fname = os.path.join(TEST_DATA_DIR, "points.txt") out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: - lib.call_module("info", f"{data_fname} -C ->{out_fname.name}") + lib.call_module("info", f"{POINTS_DATA} -C ->{out_fname.name}") assert Path(out_fname.name).stat().st_size > 0 output = out_fname.read().strip() assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" diff --git a/pygmt/tests/test_clib_virtualfiles.py b/pygmt/tests/test_clib_virtualfiles.py index ce71553fc24..7e7822f99e5 100644 --- a/pygmt/tests/test_clib_virtualfiles.py +++ b/pygmt/tests/test_clib_virtualfiles.py @@ -2,9 +2,9 @@ Test the C API functions related to virtual files. """ -import os from importlib.util import find_spec from itertools import product +from pathlib import Path import numpy as np import pandas as pd @@ -15,8 +15,7 @@ from pygmt.helpers import GMTTempFile from pygmt.tests.test_clib import mock -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt") +POINTS_DATA = Path(__file__).parent / "data" / "points.txt" @pytest.fixture(scope="module", name="data") diff --git a/pygmt/tests/test_contour.py b/pygmt/tests/test_contour.py index 416ad5c8a3e..6cca6ecbf7c 100644 --- a/pygmt/tests/test_contour.py +++ b/pygmt/tests/test_contour.py @@ -2,7 +2,7 @@ Test Figure.contour. """ -import os +from pathlib import Path import numpy as np import pandas as pd @@ -10,8 +10,7 @@ import xarray as xr from pygmt import Figure -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt") +POINTS_DATA = Path(__file__).parent / "data" / "points.txt" @pytest.fixture(scope="module", name="data") diff --git a/pygmt/tests/test_figure.py b/pygmt/tests/test_figure.py index d33be107eda..042c9876e71 100644 --- a/pygmt/tests/test_figure.py +++ b/pygmt/tests/test_figure.py @@ -5,7 +5,6 @@ """ import importlib -import os from pathlib import Path import numpy as np @@ -82,10 +81,8 @@ def test_figure_savefig_exists(): fig.basemap(region="10/70/-300/800", projection="X3i/5i", frame="af") prefix = "test_figure_savefig_exists" for fmt in "bmp eps jpg jpeg pdf png ppm tif PNG JPG JPEG Png".split(): - fname = f"{prefix}.{fmt}" + fname = Path(f"{prefix}.{fmt}") fig.savefig(fname) - - fname = Path(fname) assert fname.exists() fname.unlink() @@ -201,10 +198,10 @@ def test_figure_savefig_transparent(): with pytest.raises(GMTInvalidInput): fig.savefig(fname, transparent=True) # png should not raise an error - fname = f"{prefix}.png" + fname = Path(f"{prefix}.png") fig.savefig(fname, transparent=True) - assert os.path.exists(fname) - os.remove(fname) + assert fname.exists() + fname.unlink() def test_figure_savefig_filename_with_spaces(): @@ -215,8 +212,9 @@ def test_figure_savefig_filename_with_spaces(): fig.basemap(region=[0, 1, 0, 1], projection="X1c/1c", frame=True) with GMTTempFile(prefix="pygmt-filename with spaces", suffix=".png") as imgfile: fig.savefig(fname=imgfile.name) - assert r"\040" not in os.path.abspath(imgfile.name) - assert Path(imgfile.name).stat().st_size > 0 + imgpath = Path(imgfile.name).resolve() + assert r"\040" not in str(imgpath) + assert imgpath.stat().st_size > 0 def test_figure_savefig(): diff --git a/pygmt/tests/test_grd2cpt.py b/pygmt/tests/test_grd2cpt.py index 76d19905259..c2d9a257cc1 100644 --- a/pygmt/tests/test_grd2cpt.py +++ b/pygmt/tests/test_grd2cpt.py @@ -2,7 +2,7 @@ Test pygmt.grd2cpt. """ -import os +from pathlib import Path import pytest from pygmt import Figure, grd2cpt @@ -55,7 +55,7 @@ def test_grd2cpt_output_to_cpt_file(grid): """ with GMTTempFile(suffix=".cpt") as cptfile: grd2cpt(grid=grid, output=cptfile.name) - assert os.path.getsize(cptfile.name) > 0 + assert Path(cptfile.name).stat().st_size > 0 def test_grd2cpt_unrecognized_data_type(): diff --git a/pygmt/tests/test_grdcontour.py b/pygmt/tests/test_grdcontour.py index c40439bdc7b..2cedd80dc14 100644 --- a/pygmt/tests/test_grdcontour.py +++ b/pygmt/tests/test_grdcontour.py @@ -2,7 +2,7 @@ Test Figure.grdcontour. """ -import os +from pathlib import Path import numpy as np import pytest @@ -10,8 +10,7 @@ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers.testing import load_static_earth_relief -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -TEST_CONTOUR_FILE = os.path.join(TEST_DATA_DIR, "contours.txt") +TEST_CONTOUR_FILE = Path(__file__).parent / "data" / "contours.txt" @pytest.fixture(scope="module", name="grid") diff --git a/pygmt/tests/test_grdtrack.py b/pygmt/tests/test_grdtrack.py index fdb25d9e5f7..e139171d948 100644 --- a/pygmt/tests/test_grdtrack.py +++ b/pygmt/tests/test_grdtrack.py @@ -2,7 +2,6 @@ Test pygmt.grdtrack. """ -import os from pathlib import Path import numpy as np @@ -14,8 +13,7 @@ from pygmt.helpers import GMTTempFile, data_kind from pygmt.helpers.testing import load_static_earth_relief -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -POINTS_DATA = os.path.join(TEST_DATA_DIR, "track.txt") +POINTS_DATA = Path(__file__).parent / "data" / "track.txt" @pytest.fixture(scope="module", name="dataarray") diff --git a/pygmt/tests/test_helpers.py b/pygmt/tests/test_helpers.py index 29cc61fe8b7..9b25c71aa0b 100644 --- a/pygmt/tests/test_helpers.py +++ b/pygmt/tests/test_helpers.py @@ -2,7 +2,7 @@ Test the helper functions/classes/etc used in wrapping GMT. """ -import os +from pathlib import Path import numpy as np import pytest @@ -90,9 +90,9 @@ def test_gmttempfile(): Check that file is really created and deleted. """ with GMTTempFile() as tmpfile: - assert os.path.exists(tmpfile.name) + assert Path(tmpfile.name).exists() # File should be deleted when leaving the with block - assert not os.path.exists(tmpfile.name) + assert not Path(tmpfile.name).exists() def test_gmttempfile_unique(): @@ -110,17 +110,21 @@ def test_gmttempfile_prefix_suffix(): Make sure the prefix and suffix of temporary files are user specifiable. """ with GMTTempFile() as tmpfile: - assert os.path.basename(tmpfile.name).startswith("pygmt-") - assert os.path.basename(tmpfile.name).endswith(".txt") + tmpname = Path(tmpfile.name).name + assert tmpname.startswith("pygmt-") + assert tmpname.endswith(".txt") with GMTTempFile(prefix="user-prefix-") as tmpfile: - assert os.path.basename(tmpfile.name).startswith("user-prefix-") - assert os.path.basename(tmpfile.name).endswith(".txt") + tmpname = Path(tmpfile.name).name + assert tmpname.startswith("user-prefix-") + assert tmpname.endswith(".txt") with GMTTempFile(suffix=".log") as tmpfile: - assert os.path.basename(tmpfile.name).startswith("pygmt-") - assert os.path.basename(tmpfile.name).endswith(".log") + tmpname = Path(tmpfile.name).name + assert tmpname.startswith("pygmt-") + assert tmpname.endswith(".log") with GMTTempFile(prefix="user-prefix-", suffix=".log") as tmpfile: - assert os.path.basename(tmpfile.name).startswith("user-prefix-") - assert os.path.basename(tmpfile.name).endswith(".log") + tmpname = Path(tmpfile.name).name + assert tmpname.startswith("user-prefix-") + assert tmpname.endswith(".log") def test_gmttempfile_read(): diff --git a/pygmt/tests/test_info.py b/pygmt/tests/test_info.py index b812e1457ce..a14030cb772 100644 --- a/pygmt/tests/test_info.py +++ b/pygmt/tests/test_info.py @@ -2,9 +2,8 @@ Test pygmt.info. """ -import os -import pathlib import sys +from pathlib import Path, PurePosixPath, PureWindowsPath import numpy as np import numpy.testing as npt @@ -15,8 +14,7 @@ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers.testing import skip_if_no -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt") +POINTS_DATA = Path(__file__).parent / "data" / "points.txt" def test_info(): @@ -36,16 +34,16 @@ def test_info(): @pytest.mark.parametrize( "table", [ - pathlib.Path(POINTS_DATA), + Path(POINTS_DATA), pytest.param( - pathlib.PureWindowsPath(POINTS_DATA), + PureWindowsPath(POINTS_DATA), marks=pytest.mark.skipif( sys.platform != "win32", reason="PureWindowsPath is only available on Windows", ), ), pytest.param( - pathlib.PurePosixPath(POINTS_DATA), + PurePosixPath(POINTS_DATA), marks=pytest.mark.skipif( sys.platform == "win32", reason="PurePosixPath is not available on Windows", diff --git a/pygmt/tests/test_makecpt.py b/pygmt/tests/test_makecpt.py index eaba2589f04..39733c7cfc5 100644 --- a/pygmt/tests/test_makecpt.py +++ b/pygmt/tests/test_makecpt.py @@ -2,7 +2,6 @@ Test pygmt.makecpt. """ -import os from pathlib import Path import numpy as np @@ -11,8 +10,7 @@ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import GMTTempFile -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt") +POINTS_DATA = Path(__file__).parent / "data" / "points.txt" @pytest.fixture(scope="module", name="points") diff --git a/pygmt/tests/test_plot.py b/pygmt/tests/test_plot.py index 83bc356692f..76158bfe038 100644 --- a/pygmt/tests/test_plot.py +++ b/pygmt/tests/test_plot.py @@ -3,7 +3,6 @@ """ import datetime -import os from pathlib import Path import numpy as np @@ -14,8 +13,7 @@ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import GMTTempFile -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt") +POINTS_DATA = Path(__file__).parent / "data" / "points.txt" @pytest.fixture(scope="module", name="data") diff --git a/pygmt/tests/test_plot3d.py b/pygmt/tests/test_plot3d.py index ad91f06f8fa..c37c899a30b 100644 --- a/pygmt/tests/test_plot3d.py +++ b/pygmt/tests/test_plot3d.py @@ -2,7 +2,6 @@ Test Figure.plot3d. """ -import os from pathlib import Path import numpy as np @@ -11,8 +10,7 @@ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import GMTTempFile -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt") +POINTS_DATA = Path(__file__).parent / "data" / "points.txt" @pytest.fixture(scope="module", name="data") diff --git a/pygmt/tests/test_psconvert.py b/pygmt/tests/test_psconvert.py index 6cc82cc50e9..f0460be452b 100644 --- a/pygmt/tests/test_psconvert.py +++ b/pygmt/tests/test_psconvert.py @@ -2,7 +2,7 @@ Test Figure.psconvert. """ -import os +from pathlib import Path import pytest from pygmt import Figure @@ -18,9 +18,9 @@ def test_psconvert(): fig.basemap(region="10/70/-3/8", projection="X4i/3i", frame="a") prefix = "test_psconvert" fig.psconvert(prefix=prefix, fmt="f", crop=True) - fname = prefix + ".pdf" - assert os.path.exists(fname) - os.remove(fname) + fname = Path(prefix + ".pdf") + assert fname.exists() + fname.unlink() def test_psconvert_twice(): @@ -32,14 +32,14 @@ def test_psconvert_twice(): prefix = "test_psconvert_twice" # Make a PDF fig.psconvert(prefix=prefix, fmt="f") - fname = prefix + ".pdf" - assert os.path.exists(fname) - os.remove(fname) + fname = Path(prefix + ".pdf") + assert fname.exists() + fname.unlink() # Make a PNG fig.psconvert(prefix=prefix, fmt="g") - fname = prefix + ".png" - assert os.path.exists(fname) - os.remove(fname) + fname = Path(prefix + ".png") + assert fname.exists() + fname.unlink() def test_psconvert_without_prefix(): diff --git a/pygmt/tests/test_session_management.py b/pygmt/tests/test_session_management.py index 0fbe6f8e45b..77a3970787f 100644 --- a/pygmt/tests/test_session_management.py +++ b/pygmt/tests/test_session_management.py @@ -3,7 +3,6 @@ """ import multiprocessing as mp -import os from importlib import reload from pathlib import Path @@ -25,8 +24,8 @@ def test_begin_end(): lib.call_module("basemap", "-R10/70/-3/8 -JX4i/3i -Ba") end() begin() # Restart the global session - assert os.path.exists("pygmt-session.pdf") - os.remove("pygmt-session.pdf") + assert Path("pygmt-session.pdf").exists() + Path("pygmt-session.pdf").unlink() def test_gmt_compat_6_is_applied(capsys): @@ -54,12 +53,12 @@ def test_gmt_compat_6_is_applied(capsys): finally: end() # Clean up the global "gmt.conf" in the current directory - assert os.path.exists("gmt.conf") - os.remove("gmt.conf") - assert os.path.exists("pygmt-session.pdf") - os.remove("pygmt-session.pdf") + assert Path("gmt.conf").exists() + Path("gmt.conf").unlink() + assert Path("pygmt-session.pdf").exists() + Path("pygmt-session.pdf").unlink() # Make sure no global "gmt.conf" in the current directory - assert not os.path.exists("gmt.conf") + assert not Path("gmt.conf").exists() begin() # Restart the global session diff --git a/pygmt/tests/test_sphinx_gallery.py b/pygmt/tests/test_sphinx_gallery.py index 205f0f3e3e5..c40d4732a0b 100644 --- a/pygmt/tests/test_sphinx_gallery.py +++ b/pygmt/tests/test_sphinx_gallery.py @@ -2,7 +2,7 @@ Test the sphinx-gallery scraper and code required to make it work. """ -import os +from pathlib import Path from tempfile import TemporaryDirectory import pytest @@ -27,13 +27,13 @@ def test_pygmtscraper(): assert len(SHOWED_FIGURES) == 1 assert SHOWED_FIGURES[0] is fig scraper = PyGMTScraper() - with TemporaryDirectory(dir=os.getcwd()) as tmpdir: + with TemporaryDirectory(dir=Path.cwd()) as tmpdir: conf = {"src_dir": "meh"} - fname = os.path.join(tmpdir, "meh.png") + fname = Path(tmpdir) / "meh.png" block_vars = {"image_path_iterator": (i for i in [fname])} - assert not os.path.exists(fname) + assert not fname.exists() scraper(None, block_vars, conf) - assert os.path.exists(fname) + assert fname.exists() assert not SHOWED_FIGURES finally: SHOWED_FIGURES.extend(showed) diff --git a/pygmt/tests/test_text.py b/pygmt/tests/test_text.py index 7ef50224c38..3969c451e68 100644 --- a/pygmt/tests/test_text.py +++ b/pygmt/tests/test_text.py @@ -2,7 +2,7 @@ Test Figure.text. """ -import os +from pathlib import Path import numpy as np import pytest @@ -10,9 +10,9 @@ from pygmt.exceptions import GMTCLibError, GMTInvalidInput from pygmt.helpers import GMTTempFile -TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") -POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt") -CITIES_DATA = os.path.join(TEST_DATA_DIR, "cities.txt") +TEST_DATA_DIR = Path(__file__).parent / "data" +POINTS_DATA = TEST_DATA_DIR / "points.txt" +CITIES_DATA = TEST_DATA_DIR / "cities.txt" @pytest.fixture(scope="module", name="projection") diff --git a/pygmt/tests/test_x2sys_cross.py b/pygmt/tests/test_x2sys_cross.py index 226fbc06e6d..49256953707 100644 --- a/pygmt/tests/test_x2sys_cross.py +++ b/pygmt/tests/test_x2sys_cross.py @@ -3,7 +3,6 @@ """ import copy -import os from pathlib import Path from tempfile import TemporaryDirectory @@ -25,7 +24,7 @@ def _fixture_mock_x2sys_home(monkeypatch): Set the X2SYS_HOME environment variable to the current working directory for the test session. """ - monkeypatch.setenv("X2SYS_HOME", os.getcwd()) + monkeypatch.setenv("X2SYS_HOME", Path.cwd()) @pytest.fixture(scope="module", name="tracks") @@ -44,16 +43,17 @@ def test_x2sys_cross_input_file_output_file(): Run x2sys_cross by passing in a filename, and output internal crossovers to an ASCII txt file. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tmpdir_p = Path(tmpdir) + tag = tmpdir_p.name x2sys_init(tag=tag, fmtfile="xyz", force=True) - outfile = os.path.join(tmpdir, "tmp_coe.txt") + outfile = tmpdir_p / "tmp_coe.txt" output = x2sys_cross( tracks=["@tut_ship.xyz"], tag=tag, coe="i", outfile=outfile ) assert output is None # check that output is None since outfile is set - assert Path(outfile).stat().st_size > 0 # check that outfile exists at path + assert outfile.stat().st_size > 0 # check that outfile exists at path _ = pd.read_csv(outfile, sep="\t", header=2) # ensure ASCII text file loads ok @@ -67,8 +67,8 @@ def test_x2sys_cross_input_file_output_dataframe(): Run x2sys_cross by passing in a filename, and output internal crossovers to a pandas.DataFrame. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tag = Path(tmpdir).name x2sys_init(tag=tag, fmtfile="xyz", force=True) output = x2sys_cross(tracks=["@tut_ship.xyz"], tag=tag, coe="i") @@ -86,8 +86,8 @@ def test_x2sys_cross_input_dataframe_output_dataframe(tracks): Run x2sys_cross by passing in one dataframe, and output internal crossovers to a pandas.DataFrame. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tag = Path(tmpdir).name x2sys_init(tag=tag, fmtfile="xyz", force=True) output = x2sys_cross(tracks=tracks, tag=tag, coe="i") @@ -107,16 +107,15 @@ def test_x2sys_cross_input_two_dataframes(): Run x2sys_cross by passing in two pandas.DataFrame tables with a time column, and output external crossovers to a pandas.DataFrame. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tmpdir_p = Path(tmpdir) + tag = tmpdir_p.name x2sys_init( tag=tag, fmtfile="xyz", suffix="xyzt", units=["de", "se"], force=True ) # Add a time row to the x2sys fmtfile - with open( - file=os.path.join(tmpdir, "xyz.fmt"), mode="a", encoding="utf8" - ) as fmtfile: + with open(tmpdir_p / "xyz.fmt", mode="a", encoding="utf8") as fmtfile: fmtfile.write("time\ta\tN\t0\t1\t0\t%g\n") # Create pandas.DataFrame track tables @@ -144,8 +143,8 @@ def test_x2sys_cross_input_dataframe_with_nan(tracks): Run x2sys_cross by passing in one dataframe with NaN values, and output internal crossovers to a pandas.DataFrame. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tag = Path(tmpdir).name x2sys_init( tag=tag, fmtfile="xyz", suffix="xyzt", units=["de", "se"], force=True ) @@ -169,15 +168,15 @@ def test_x2sys_cross_input_two_filenames(): Run x2sys_cross by passing in two filenames, and output external crossovers to a pandas.DataFrame. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tag = Path(tmpdir).name x2sys_init(tag=tag, fmtfile="xyz", force=True) # Create temporary xyz files for i in range(2): rng = np.random.default_rng(seed=i) with open( - os.path.join(os.getcwd(), f"track_{i}.xyz"), mode="w", encoding="utf8" + Path.cwd() / f"track_{i}.xyz", mode="w", encoding="utf8" ) as fname: np.savetxt(fname=fname, X=rng.random((10, 3))) @@ -188,7 +187,7 @@ def test_x2sys_cross_input_two_filenames(): columns = list(output.columns) assert columns[:6] == ["x", "y", "i_1", "i_2", "dist_1", "dist_2"] assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"] - _ = [os.remove(f"track_{i}.xyz") for i in range(2)] # cleanup track files + _ = [Path(f"track_{i}.xyz").unlink() for i in range(2)] # cleanup track files def test_x2sys_cross_invalid_tracks_input_type(tracks): @@ -212,8 +211,8 @@ def test_x2sys_cross_region_interpolation_numpoints(): Test that x2sys_cross's region (R), interpolation (l) and numpoints (W) arguments work. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tag = Path(tmpdir).name x2sys_init(tag=tag, fmtfile="xyz", force=True) output = x2sys_cross( tracks=["@tut_ship.xyz"], @@ -240,8 +239,8 @@ def test_x2sys_cross_trackvalues(): """ Test that x2sys_cross's trackvalues (Z) argument work. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tag = Path(tmpdir).name x2sys_init(tag=tag, fmtfile="xyz", force=True) output = x2sys_cross(tracks=["@tut_ship.xyz"], tag=tag, trackvalues=True) diff --git a/pygmt/tests/test_x2sys_init.py b/pygmt/tests/test_x2sys_init.py index 808c3f68315..c0664110eda 100644 --- a/pygmt/tests/test_x2sys_init.py +++ b/pygmt/tests/test_x2sys_init.py @@ -2,7 +2,7 @@ Test pygmt.x2sys_init. """ -import os +from pathlib import Path from tempfile import TemporaryDirectory import pytest @@ -15,7 +15,7 @@ def _fixture_mock_x2sys_home(monkeypatch): Set the X2SYS_HOME environment variable to the current working directory for the test session. """ - monkeypatch.setenv("X2SYS_HOME", os.getcwd()) + monkeypatch.setenv("X2SYS_HOME", Path.cwd()) @pytest.mark.usefixtures("mock_x2sys_home") @@ -24,13 +24,14 @@ def test_x2sys_init_region_spacing(): Test that x2sys_init's region (R) and spacing (I) sequence arguments accept a list properly. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tmpdir_p = Path(tmpdir) + tag = tmpdir_p.name x2sys_init( tag=tag, fmtfile="xyz", force=True, region=[0, 10, 20, 30], spacing=[5, 5] ) - with open(os.path.join(tmpdir, f"{tag}.tag"), encoding="utf8") as tagpath: + with open(tmpdir_p / f"{tag}.tag", encoding="utf8") as tagpath: tail_line = tagpath.readlines()[-1] assert "-R0/10/20/30" in tail_line assert "-I5/5" in tail_line @@ -42,8 +43,9 @@ def test_x2sys_init_units_gap(): """ Test that x2sys_init's units (N) and gap (W) arguments accept a list properly. """ - with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir: - tag = os.path.basename(tmpdir) + with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: + tmpdir_p = Path(tmpdir) + tag = tmpdir_p.name x2sys_init( tag=tag, fmtfile="xyz", @@ -52,7 +54,7 @@ def test_x2sys_init_units_gap(): gap=["tseconds", "de"], ) - with open(os.path.join(tmpdir, f"{tag}.tag"), encoding="utf8") as tagpath: + with open(tmpdir_p / f"{tag}.tag", encoding="utf8") as tagpath: tail_line = tagpath.readlines()[-1] assert "-Nse -Nde" in tail_line assert "-Wtseconds -Wde" in tail_line From dd8e0cd87a7a0629aafb752ba8934cec4149f011 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 21 Mar 2024 06:32:13 +0800 Subject: [PATCH 036/218] Enable ruff's PTH (flake8-use-pathlib) rules and fix violations (#3129) --- examples/tutorials/basics/text.py | 2 +- pygmt/clib/session.py | 11 ++++++----- pygmt/datatypes/dataset.py | 6 ++++-- pygmt/figure.py | 4 +--- pygmt/helpers/tempfile.py | 9 ++++----- pygmt/src/plot.py | 4 +++- pygmt/src/plot3d.py | 4 +++- pygmt/tests/test_helpers.py | 3 +-- pygmt/tests/test_legend.py | 5 +++-- pygmt/tests/test_meca.py | 11 ++++------- pygmt/tests/test_plot.py | 6 ++---- pygmt/tests/test_plot3d.py | 6 ++---- pygmt/tests/test_text.py | 3 +-- pygmt/tests/test_x2sys_cross.py | 9 +++------ pygmt/tests/test_x2sys_init.py | 17 +++++++---------- pyproject.toml | 1 + 16 files changed, 46 insertions(+), 55 deletions(-) diff --git a/examples/tutorials/basics/text.py b/examples/tutorials/basics/text.py index 1f225bb39c3..43bd76ed799 100644 --- a/examples/tutorials/basics/text.py +++ b/examples/tutorials/basics/text.py @@ -72,7 +72,7 @@ fig.coast(land="black", water="skyblue") # Create space-delimited file -with open("examples.txt", "w") as f: +with Path("examples.txt").open(mode="w") as f: f.write("114 0.5 0 22p,Helvetica-Bold,white CM BORNEO\n") f.write("119 3.25 0 12p,Helvetica-Bold,black CM CELEBES SEA\n") f.write("112 -4.6 0 12p,Helvetica-Bold,black CM JAVA SEA\n") diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 6e6e495e244..1bea0e4b886 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1641,12 +1641,13 @@ def virtualfile_out( Examples -------- + >>> from pathlib import Path >>> from pygmt.clib import Session >>> from pygmt.datatypes import _GMT_DATASET >>> from pygmt.helpers import GMTTempFile >>> >>> with GMTTempFile(suffix=".txt") as tmpfile: - ... with open(tmpfile.name, mode="w") as fp: + ... with Path(tmpfile.name).open(mode="w") as fp: ... print("1.0 2.0 3.0 TEXT", file=fp) ... ... # Create a virtual file for storing the output table. @@ -1661,8 +1662,7 @@ def virtualfile_out( ... with lib.virtualfile_out(fname=tmpfile.name) as vouttbl: ... assert vouttbl == tmpfile.name ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") - ... with open(vouttbl, mode="r") as fp: - ... line = fp.readline() + ... line = Path(vouttbl).read_text() ... assert line == "1\t2\t3\tTEXT\n" """ if fname is not None: # Yield the actual file name. @@ -1692,13 +1692,14 @@ def read_virtualfile( Examples -------- + >>> from pathlib import Path >>> from pygmt.clib import Session >>> from pygmt.helpers import GMTTempFile >>> >>> # Read dataset from a virtual file >>> with Session() as lib: ... with GMTTempFile(suffix=".txt") as tmpfile: - ... with open(tmpfile.name, mode="w") as fp: + ... with Path(tmpfile.name).open(mode="w") as fp: ... print("1.0 2.0 3.0 TEXT", file=fp) ... with lib.virtualfile_out(kind="dataset") as vouttbl: ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") @@ -1779,7 +1780,7 @@ def virtualfile_to_dataset( >>> >>> with GMTTempFile(suffix=".txt") as tmpfile: ... # prepare the sample data file - ... with open(tmpfile.name, mode="w") as fp: + ... with Path(tmpfile.name).open(mode="w") as fp: ... print(">", file=fp) ... print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) ... print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index 21953ee9051..a0d0547f3ca 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -18,12 +18,13 @@ class _GMT_DATASET(ctp.Structure): # noqa: N801 Examples -------- + >>> from pathlib import Path >>> from pygmt.helpers import GMTTempFile >>> from pygmt.clib import Session >>> >>> with GMTTempFile(suffix=".txt") as tmpfile: ... # Prepare the sample data file - ... with open(tmpfile.name, mode="w") as fp: + ... with Path(tmpfile.name).open(mode="w") as fp: ... print(">", file=fp) ... print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) ... print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) @@ -157,12 +158,13 @@ def to_dataframe(self) -> pd.DataFrame: Examples -------- + >>> from pathlib import Path >>> from pygmt.helpers import GMTTempFile >>> from pygmt.clib import Session >>> >>> with GMTTempFile(suffix=".txt") as tmpfile: ... # prepare the sample data file - ... with open(tmpfile.name, mode="w") as fp: + ... with Path(tmpfile.name).open(mode="w") as fp: ... print(">", file=fp) ... print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) ... print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) diff --git a/pygmt/figure.py b/pygmt/figure.py index f908cff1b93..ebeccc90287 100644 --- a/pygmt/figure.py +++ b/pygmt/figure.py @@ -497,9 +497,7 @@ def _preview(self, fmt, dpi, as_bytes=False, **kwargs): fname = Path(self._preview_dir.name) / f"{self._name}.{fmt}" self.savefig(fname, dpi=dpi, **kwargs) if as_bytes: - with open(fname, "rb") as image: - preview = image.read() - return preview + return fname.read_bytes() return fname def _repr_png_(self): diff --git a/pygmt/helpers/tempfile.py b/pygmt/helpers/tempfile.py index bed79f352ae..3cbb88060df 100644 --- a/pygmt/helpers/tempfile.py +++ b/pygmt/helpers/tempfile.py @@ -88,11 +88,10 @@ def read(self, keep_tabs=False): content : str Content of the temporary file as a Unicode string. """ - with open(self.name, encoding="utf8") as tmpfile: - content = tmpfile.read() - if not keep_tabs: - content = content.replace("\t", " ") - return content + content = Path(self.name).read_text(encoding="utf8") + if not keep_tabs: + content = content.replace("\t", " ") + return content def loadtxt(self, **kwargs): """ diff --git a/pygmt/src/plot.py b/pygmt/src/plot.py index b4c01b9438e..0ed42569386 100644 --- a/pygmt/src/plot.py +++ b/pygmt/src/plot.py @@ -2,6 +2,8 @@ plot - Plot in two dimensions. """ +from pathlib import Path + from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( @@ -220,7 +222,7 @@ def plot( # noqa: PLR0912 elif kwargs.get("S") is None and kind == "file" and str(data).endswith(".gmt"): # checking that the data is a file path to set default style try: - with open(which(data), encoding="utf8") as file: + with Path(which(data)).open(encoding="utf8") as file: line = file.readline() if "@GMULTIPOINT" in line or "@GPOINT" in line: # if the file is gmt style and geometry is set to Point diff --git a/pygmt/src/plot3d.py b/pygmt/src/plot3d.py index 029820bcec4..71407a65c90 100644 --- a/pygmt/src/plot3d.py +++ b/pygmt/src/plot3d.py @@ -2,6 +2,8 @@ plot3d - Plot in three dimensions. """ +from pathlib import Path + from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( @@ -195,7 +197,7 @@ def plot3d( # noqa: PLR0912 elif kwargs.get("S") is None and kind == "file" and str(data).endswith(".gmt"): # checking that the data is a file path to set default style try: - with open(which(data), encoding="utf8") as file: + with Path(which(data)).open(encoding="utf8") as file: line = file.readline() if "@GMULTIPOINT" in line or "@GPOINT" in line: # if the file is gmt style and geometry is set to Point diff --git a/pygmt/tests/test_helpers.py b/pygmt/tests/test_helpers.py index 9b25c71aa0b..b805523f75d 100644 --- a/pygmt/tests/test_helpers.py +++ b/pygmt/tests/test_helpers.py @@ -132,8 +132,7 @@ def test_gmttempfile_read(): Make sure GMTTempFile.read() works. """ with GMTTempFile() as tmpfile: - with open(tmpfile.name, "w", encoding="utf8") as ftmp: - ftmp.write("in.dat: N = 2\t<1/3>\t<2/4>\n") + Path(tmpfile.name).write_text("in.dat: N = 2\t<1/3>\t<2/4>\n") assert tmpfile.read() == "in.dat: N = 2 <1/3> <2/4>\n" assert tmpfile.read(keep_tabs=True) == "in.dat: N = 2\t<1/3>\t<2/4>\n" diff --git a/pygmt/tests/test_legend.py b/pygmt/tests/test_legend.py index 9c4936d9984..8721bb66384 100644 --- a/pygmt/tests/test_legend.py +++ b/pygmt/tests/test_legend.py @@ -2,6 +2,8 @@ Test Figure.legend. """ +from pathlib import Path + import pytest from pygmt import Figure from pygmt.exceptions import GMTInvalidInput @@ -95,8 +97,7 @@ def test_legend_specfile(): """ with GMTTempFile() as specfile: - with open(specfile.name, "w", encoding="utf8") as file: - file.write(specfile_contents) + Path(specfile.name).write_text(specfile_contents) fig = Figure() fig.basemap(projection="x6i", region=[0, 1, 0, 1], frame=True) fig.legend(specfile.name, position="JTM+jCM+w5i") diff --git a/pygmt/tests/test_meca.py b/pygmt/tests/test_meca.py index bcf69f059f3..e54799711ad 100644 --- a/pygmt/tests/test_meca.py +++ b/pygmt/tests/test_meca.py @@ -2,6 +2,8 @@ Test Figure.meca. """ +from pathlib import Path + import numpy as np import pandas as pd import pytest @@ -72,13 +74,8 @@ def test_meca_spec_single_focalmecha_file(): fig = Figure() fig.basemap(region=[-1, 1, 4, 6], projection="M8c", frame=2) with GMTTempFile() as temp: - with open(temp.name, mode="w", encoding="utf8") as temp_file: - temp_file.write("0 5 0 0 90 0 5") - fig.meca( - spec=temp.name, - convention="aki", - scale="2.5c", - ) + Path(temp.name).write_text("0 5 0 0 90 0 5") + fig.meca(spec=temp.name, convention="aki", scale="2.5c") return fig diff --git a/pygmt/tests/test_plot.py b/pygmt/tests/test_plot.py index 76158bfe038..ecae491c6fa 100644 --- a/pygmt/tests/test_plot.py +++ b/pygmt/tests/test_plot.py @@ -487,8 +487,7 @@ def test_plot_ogrgmt_file_multipoint_default_style(func): # FEATURE_DATA 1 2 """ - with open(tmpfile.name, "w", encoding="utf8") as file: - file.write(gmt_file) + Path(tmpfile.name).write_text(gmt_file) fig = Figure() fig.plot( data=func(tmpfile.name), region=[0, 2, 1, 3], projection="X2c", frame=True @@ -507,8 +506,7 @@ def test_plot_ogrgmt_file_multipoint_non_default_style(): # FEATURE_DATA 1 2 """ - with open(tmpfile.name, "w", encoding="utf8") as file: - file.write(gmt_file) + Path(tmpfile.name).write_text(gmt_file) fig = Figure() fig.plot( data=tmpfile.name, diff --git a/pygmt/tests/test_plot3d.py b/pygmt/tests/test_plot3d.py index c37c899a30b..33f3c94812f 100644 --- a/pygmt/tests/test_plot3d.py +++ b/pygmt/tests/test_plot3d.py @@ -444,8 +444,7 @@ def test_plot3d_ogrgmt_file_multipoint_default_style(func): > 1 1 2 1.5 1.5 1""" - with open(tmpfile.name, "w", encoding="utf8") as file: - file.write(gmt_file) + Path(tmpfile.name).write_text(gmt_file) fig = Figure() fig.plot3d( data=func(tmpfile.name), @@ -470,8 +469,7 @@ def test_plot3d_ogrgmt_file_multipoint_non_default_style(): > 1 1 2 1.5 1.5 1""" - with open(tmpfile.name, "w", encoding="utf8") as file: - file.write(gmt_file) + Path(tmpfile.name).write_text(gmt_file) fig = Figure() fig.plot3d( data=tmpfile.name, diff --git a/pygmt/tests/test_text.py b/pygmt/tests/test_text.py index 3969c451e68..1ef6a19bc11 100644 --- a/pygmt/tests/test_text.py +++ b/pygmt/tests/test_text.py @@ -299,8 +299,7 @@ def test_text_angle_font_justify_from_textfile(): """ fig = Figure() with GMTTempFile(suffix=".txt") as tempfile: - with open(tempfile.name, "w", encoding="utf8") as tmpfile: - tmpfile.write("114 0.5 30 22p,Helvetica-Bold,black LM BORNEO") + Path(tempfile.name).write_text("114 0.5 30 22p,Helvetica-Bold,black LM BORNEO") fig.text( region=[113, 117.5, -0.5, 3], projection="M5c", diff --git a/pygmt/tests/test_x2sys_cross.py b/pygmt/tests/test_x2sys_cross.py index 49256953707..c9209bd254a 100644 --- a/pygmt/tests/test_x2sys_cross.py +++ b/pygmt/tests/test_x2sys_cross.py @@ -24,7 +24,7 @@ def _fixture_mock_x2sys_home(monkeypatch): Set the X2SYS_HOME environment variable to the current working directory for the test session. """ - monkeypatch.setenv("X2SYS_HOME", Path.cwd()) + monkeypatch.setenv("X2SYS_HOME", str(Path.cwd())) @pytest.fixture(scope="module", name="tracks") @@ -115,7 +115,7 @@ def test_x2sys_cross_input_two_dataframes(): ) # Add a time row to the x2sys fmtfile - with open(tmpdir_p / "xyz.fmt", mode="a", encoding="utf8") as fmtfile: + with (tmpdir_p / "xyz.fmt").open(mode="a", encoding="utf8") as fmtfile: fmtfile.write("time\ta\tN\t0\t1\t0\t%g\n") # Create pandas.DataFrame track tables @@ -175,10 +175,7 @@ def test_x2sys_cross_input_two_filenames(): # Create temporary xyz files for i in range(2): rng = np.random.default_rng(seed=i) - with open( - Path.cwd() / f"track_{i}.xyz", mode="w", encoding="utf8" - ) as fname: - np.savetxt(fname=fname, X=rng.random((10, 3))) + np.savetxt(fname=Path.cwd() / f"track_{i}.xyz", X=rng.random((10, 3))) output = x2sys_cross(tracks=["track_0.xyz", "track_1.xyz"], tag=tag, coe="e") diff --git a/pygmt/tests/test_x2sys_init.py b/pygmt/tests/test_x2sys_init.py index c0664110eda..70b1b8bc57c 100644 --- a/pygmt/tests/test_x2sys_init.py +++ b/pygmt/tests/test_x2sys_init.py @@ -15,7 +15,7 @@ def _fixture_mock_x2sys_home(monkeypatch): Set the X2SYS_HOME environment variable to the current working directory for the test session. """ - monkeypatch.setenv("X2SYS_HOME", Path.cwd()) + monkeypatch.setenv("X2SYS_HOME", str(Path.cwd())) @pytest.mark.usefixtures("mock_x2sys_home") @@ -30,11 +30,9 @@ def test_x2sys_init_region_spacing(): x2sys_init( tag=tag, fmtfile="xyz", force=True, region=[0, 10, 20, 30], spacing=[5, 5] ) - - with open(tmpdir_p / f"{tag}.tag", encoding="utf8") as tagpath: - tail_line = tagpath.readlines()[-1] - assert "-R0/10/20/30" in tail_line - assert "-I5/5" in tail_line + tail_line = (tmpdir_p / f"{tag}.tag").read_text().splitlines()[-1] + assert "-R0/10/20/30" in tail_line + assert "-I5/5" in tail_line @pytest.mark.benchmark @@ -54,7 +52,6 @@ def test_x2sys_init_units_gap(): gap=["tseconds", "de"], ) - with open(tmpdir_p / f"{tag}.tag", encoding="utf8") as tagpath: - tail_line = tagpath.readlines()[-1] - assert "-Nse -Nde" in tail_line - assert "-Wtseconds -Wde" in tail_line + tail_line = (tmpdir_p / f"{tag}.tag").read_text().splitlines()[-1] + assert "-Nse -Nde" in tail_line + assert "-Wtseconds -Wde" in tail_line diff --git a/pyproject.toml b/pyproject.toml index 0ae8a526d52..27637b2f6a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,6 +107,7 @@ select = [ "PIE", # flake8-pie "PL", # pylint "PT", # flake8-pytest-style + "PTH", # flake8-use-pathlib "RET", # flake8-return "RSE", # flake8-raise "RUF", # ruff-specific From 32e3cb36a0a2faaebd837ab99986431ee57e8060 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 22 Mar 2024 17:49:57 +0800 Subject: [PATCH 037/218] CI: Configure workflows to run on 'workflow_dispatch' event (#3133) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- .github/workflows/cache_data.yaml | 17 +++++++++++------ .github/workflows/check-links.yml | 1 + .github/workflows/ci_docs.yml | 1 + .github/workflows/ci_doctests.yaml | 1 + .github/workflows/ci_tests.yaml | 1 + .github/workflows/ci_tests_dev.yaml | 1 + .github/workflows/ci_tests_legacy.yaml | 1 + 7 files changed, 17 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cache_data.yaml b/.github/workflows/cache_data.yaml index 239fb703bed..c2a3ef28abe 100644 --- a/.github/workflows/cache_data.yaml +++ b/.github/workflows/cache_data.yaml @@ -1,12 +1,16 @@ # Cache GMT remote data files and upload as artifacts # -# This workflow downloads data files needed by PyGMT tests/documentation from -# the GMT data server and uploads as workflow artifacts which can be accessed -# by other GitHub Actions workflows. +# This workflow downloads data files needed by PyGMT tests/documentation from the GMT +# data server and uploads them as workflow artifacts, which can then be accessed by other +# GitHub Actions workflows. # -# It is scheduled to run every Sunday at 12:00 (UTC). If new remote files are -# needed urgently, maintainers can update the workflow file or the -# 'pygmt/helpers/caching.py' file to refresh the cache. +# It is scheduled to run every Sunday at 12:00 (UTC). If new remote files are needed +# urgently, maintainers can refresh the cache by one of the following methods: +# +# 1. Update this workflow file +# 2. Update the `pygmt/helpers/caching.py` file +# 3. Go to https://github.com/GenericMappingTools/pygmt/actions/workflows/cache_data.yaml +# and click the "Run workflow" button # name: Cache data @@ -16,6 +20,7 @@ on: paths: - 'pygmt/helpers/caching.py' - '.github/workflows/cache_data.yaml' + workflow_dispatch: # Schedule runs on 12 noon every Sunday schedule: - cron: '0 12 * * 0' diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index d6bfc339d32..7afc0aac564 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -10,6 +10,7 @@ name: Check Links on: # Uncomment the 'pull_request' line below to trigger the workflow in PR # pull_request: + workflow_dispatch: # Schedule runs on 12 noon every Sunday schedule: - cron: '0 12 * * 0' diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 40258c943f8..ac5f33d96d5 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -34,6 +34,7 @@ on: - 'examples/**' - 'README.md' - '.github/workflows/ci_docs.yml' + workflow_dispatch: release: types: - published diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index a53f7eb5c1d..9524a81f950 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -9,6 +9,7 @@ on: # push: # branches: [ main ] # pull_request: + workflow_dispatch: # Schedule weekly tests on Sunday schedule: - cron: '0 0 * * 0' diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 0690372aa69..8b9ec793063 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -33,6 +33,7 @@ on: paths: - 'pygmt/**' - '.github/workflows/ci_tests.yaml' + workflow_dispatch: release: types: - published diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 644a149a2c2..18aa30f3c86 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -20,6 +20,7 @@ on: paths: - 'pygmt/**' - '.github/workflows/ci_tests_dev.yaml' + workflow_dispatch: # Schedule tests on Monday/Wednesday/Friday schedule: - cron: '0 0 * * 1,3,5' diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index d971923b8dd..fee0e29ec8b 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -17,6 +17,7 @@ on: # paths: # - 'pygmt/**' # - '.github/workflows/ci_tests_legacy.yaml' + workflow_dispatch: # Schedule tests on Tuesday schedule: - cron: '0 0 * * 2' From 1eb6dec5d72a29bc00349459d1369eb0a0673a93 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 24 Mar 2024 02:05:57 +0800 Subject: [PATCH 038/218] Tests: Suppress warnings by explicitly setting output_type to 'file' (#3135) --- pygmt/tests/test_blockm.py | 1 + pygmt/tests/test_blockmedian.py | 1 + pygmt/tests/test_grdtrack.py | 4 +++- pygmt/tests/test_project.py | 1 + pygmt/tests/test_select.py | 1 + 5 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pygmt/tests/test_blockm.py b/pygmt/tests/test_blockm.py index 9af3264e3b0..b9f656e99ea 100644 --- a/pygmt/tests/test_blockm.py +++ b/pygmt/tests/test_blockm.py @@ -82,6 +82,7 @@ def test_blockmean_input_filename(): data="@tut_ship.xyz", spacing="5m", region=[245, 255, 20, 30], + output_type="file", outfile=tmpfile.name, ) assert output is None # check that output is None since outfile is set diff --git a/pygmt/tests/test_blockmedian.py b/pygmt/tests/test_blockmedian.py index a1111664065..6f3f24b3c77 100644 --- a/pygmt/tests/test_blockmedian.py +++ b/pygmt/tests/test_blockmedian.py @@ -79,6 +79,7 @@ def test_blockmedian_input_filename(): data="@tut_ship.xyz", spacing="5m", region=[245, 255, 20, 30], + output_type="file", outfile=tmpfile.name, ) assert output is None # check that output is None since outfile is set diff --git a/pygmt/tests/test_grdtrack.py b/pygmt/tests/test_grdtrack.py index e139171d948..f3862adeae0 100644 --- a/pygmt/tests/test_grdtrack.py +++ b/pygmt/tests/test_grdtrack.py @@ -70,7 +70,9 @@ def test_grdtrack_input_csvfile_and_dataarray(dataarray, expected_array): Run grdtrack by passing in a csvfile and xarray.DataArray as inputs. """ with GMTTempFile() as tmpfile: - output = grdtrack(points=POINTS_DATA, grid=dataarray, outfile=tmpfile.name) + output = grdtrack( + points=POINTS_DATA, grid=dataarray, output_type="file", outfile=tmpfile.name + ) assert output is None # check that output is None since outfile is set assert Path(tmpfile.name).stat().st_size > 0 # check that outfile exists output = np.loadtxt(tmpfile.name) diff --git a/pygmt/tests/test_project.py b/pygmt/tests/test_project.py index 29aa6b531dc..36917368c63 100644 --- a/pygmt/tests/test_project.py +++ b/pygmt/tests/test_project.py @@ -62,6 +62,7 @@ def test_project_output_filename(dataframe): center=[0, -1], azimuth=45, flat_earth=True, + output_type="file", outfile=tmpfile.name, ) assert output is None # check that output is None since outfile is set diff --git a/pygmt/tests/test_select.py b/pygmt/tests/test_select.py index f4cc72e0259..c05b2b25bfc 100644 --- a/pygmt/tests/test_select.py +++ b/pygmt/tests/test_select.py @@ -56,6 +56,7 @@ def test_select_input_filename(): data="@tut_ship.xyz", region=[250, 251, 26, 27], z_subregion=["-/-630", "-120/0+a"], + output_type="file", outfile=tmpfile.name, ) assert output is None # check that output is None since outfile is set From a166ae1d3981ea6cd8e3f789d5775923900f817b Mon Sep 17 00:00:00 2001 From: Michael Grund <23025878+michaelgrund@users.noreply.github.com> Date: Mon, 25 Mar 2024 11:14:59 +0100 Subject: [PATCH 039/218] Fix typo in colorbar example (#3138) --- examples/gallery/embellishments/colorbar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/gallery/embellishments/colorbar.py b/examples/gallery/embellishments/colorbar.py index fff012d00e8..2febbdea6d4 100644 --- a/examples/gallery/embellishments/colorbar.py +++ b/examples/gallery/embellishments/colorbar.py @@ -6,7 +6,7 @@ The colormap is set via the ``cmap`` parameter. A full list of available color palette tables can be found at :gmt-docs:`reference/cpts.html`. Use the ``frame`` parameter to add labels to the **x** and **y** axes -of the colorbar by appending **+l** followed by the desired text. To Add +of the colorbar by appending **+l** followed by the desired text. To add and adjust the annotations (**a**) and ticks (**f**) append the letter followed by the desired interval. The placement of the colorbar is set via the ``position`` parameter. There are the following options: From 754a52fdb5e1f5b9e486456049dfd0b2805ee965 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 26 Mar 2024 09:25:23 +0800 Subject: [PATCH 040/218] Add sequence_to_ctypes_array to convert a sequence to a ctypes array (#3136) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- pygmt/clib/conversion.py | 60 +++++++++++++++++++++++++--------------- pygmt/clib/session.py | 31 +++++++++++++-------- 2 files changed, 57 insertions(+), 34 deletions(-) diff --git a/pygmt/clib/conversion.py b/pygmt/clib/conversion.py index 27cda9f971c..bd19a38d591 100644 --- a/pygmt/clib/conversion.py +++ b/pygmt/clib/conversion.py @@ -2,7 +2,9 @@ Functions to convert data types into ctypes friendly formats. """ +import ctypes as ctp import warnings +from collections.abc import Sequence import numpy as np from pygmt.exceptions import GMTInvalidInput @@ -243,41 +245,55 @@ def as_c_contiguous(array): return array -def kwargs_to_ctypes_array(argument, kwargs, dtype): +def sequence_to_ctypes_array(sequence: Sequence, ctype, size: int) -> ctp.Array | None: """ - Convert an iterable argument from kwargs into a ctypes array variable. + Convert a sequence of numbers into a ctypes array variable. - If the argument is not present in kwargs, returns ``None``. + If the sequence is ``None``, returns ``None``. Otherwise, returns a ctypes array. + The function only works for sequences of numbers. For converting a sequence of + strings, use ``strings_to_ctypes_array`` instead. Parameters ---------- - argument : str - The name of the argument. - kwargs : dict - Dictionary of keyword arguments. - dtype : ctypes type - The ctypes array type (e.g., ``ctypes.c_double*4``) + sequence + The sequence to convert. If ``None``, returns ``None``. Otherwise, it must be a + sequence (e.g., list, tuple, numpy array). + ctype + The ctypes type of the array (e.g., ``ctypes.c_int``). + size + The size of the array. If the sequence is smaller than the size, the remaining + elements will be filled with zeros. If the sequence is larger than the size, an + exception will be raised. Returns ------- - ctypes_value : ctypes array or None + ctypes_array + The ctypes array variable. Examples -------- - - >>> import ctypes as ct - >>> value = kwargs_to_ctypes_array("bla", {"bla": [10, 10]}, ct.c_long * 2) - >>> type(value) - - >>> should_be_none = kwargs_to_ctypes_array( - ... "swallow", {"bla": 1, "foo": [20, 30]}, ct.c_int * 2 - ... ) - >>> print(should_be_none) + >>> import ctypes as ctp + >>> ctypes_array = sequence_to_ctypes_array([1, 2, 3], ctp.c_long, 3) + >>> type(ctypes_array) + + >>> ctypes_array[:] + [1, 2, 3] + >>> ctypes_array = sequence_to_ctypes_array([1, 2], ctp.c_long, 5) + >>> type(ctypes_array) + + >>> ctypes_array[:] + [1, 2, 0, 0, 0] + >>> ctypes_array = sequence_to_ctypes_array(None, ctp.c_long, 5) + >>> print(ctypes_array) None + >>> ctypes_array = sequence_to_ctypes_array([1, 2, 3, 4, 5, 6], ctp.c_long, 5) + Traceback (most recent call last): + ... + IndexError: invalid index """ - if argument in kwargs: - return dtype(*kwargs[argument]) - return None + if sequence is None: + return None + return (ctype * size)(*sequence) def array_to_datetime(array): diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 1bea0e4b886..2caa8e55cd0 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -19,7 +19,7 @@ array_to_datetime, as_c_contiguous, dataarray_to_matrix, - kwargs_to_ctypes_array, + sequence_to_ctypes_array, vectors_to_arrays, ) from pygmt.clib.loading import load_libgmt @@ -628,7 +628,17 @@ def call_module(self, module, args): f"Module '{module}' failed with status code {status}:\n{self._error_message}" ) - def create_data(self, family, geometry, mode, **kwargs): + def create_data( + self, + family, + geometry, + mode, + dim=None, + ranges=None, + inc=None, + registration="GMT_GRID_NODE_REG", + pad=None, + ): """ Create an empty GMT data container. @@ -692,15 +702,13 @@ def create_data(self, family, geometry, mode, **kwargs): valid_modifiers=["GMT_GRID_IS_CARTESIAN", "GMT_GRID_IS_GEO"], ) geometry_int = self._parse_constant(geometry, valid=GEOMETRIES) - registration_int = self._parse_constant( - kwargs.get("registration", "GMT_GRID_NODE_REG"), valid=REGISTRATIONS - ) + registration_int = self._parse_constant(registration, valid=REGISTRATIONS) # Convert dim, ranges, and inc to ctypes arrays if given (will be None # if not given to represent NULL pointers) - dim = kwargs_to_ctypes_array("dim", kwargs, ctp.c_uint64 * 4) - ranges = kwargs_to_ctypes_array("ranges", kwargs, ctp.c_double * 4) - inc = kwargs_to_ctypes_array("inc", kwargs, ctp.c_double * 2) + dim = sequence_to_ctypes_array(dim, ctp.c_uint64, 4) + ranges = sequence_to_ctypes_array(ranges, ctp.c_double, 4) + inc = sequence_to_ctypes_array(inc, ctp.c_double, 2) # Use a NULL pointer (None) for existing data to indicate that the # container should be created empty. Fill it in later using put_vector @@ -714,7 +722,7 @@ def create_data(self, family, geometry, mode, **kwargs): ranges, inc, registration_int, - self._parse_pad(family, kwargs), + self._parse_pad(family, pad), None, ) @@ -723,7 +731,7 @@ def create_data(self, family, geometry, mode, **kwargs): return data_ptr - def _parse_pad(self, family, kwargs): + def _parse_pad(self, family, pad): """ Parse and return an appropriate value for pad if none is given. @@ -731,7 +739,6 @@ def _parse_pad(self, family, kwargs): (row or column major). Using the default pad will set it to column major and mess things up with the numpy arrays. """ - pad = kwargs.get("pad", None) if pad is None: pad = 0 if "MATRIX" in family else self["GMT_PAD_DEFAULT"] return pad @@ -1080,7 +1087,7 @@ def write_data(self, family, geometry, mode, wesn, output, data): self["GMT_IS_FILE"], geometry_int, self[mode], - (ctp.c_double * 6)(*wesn), + sequence_to_ctypes_array(wesn, ctp.c_double, 6), output.encode(), data, ) From 62eb5d6b9156612c89d4d0357904fd09e2dddc17 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 26 Mar 2024 09:28:42 +0800 Subject: [PATCH 041/218] Add strings_to_ctypes_array to convert a sequence of strings into a ctypes array (#3137) --- pygmt/clib/conversion.py | 26 ++++++++++++++++++++++++++ pygmt/clib/session.py | 14 +++++--------- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/pygmt/clib/conversion.py b/pygmt/clib/conversion.py index bd19a38d591..eb8daa61da1 100644 --- a/pygmt/clib/conversion.py +++ b/pygmt/clib/conversion.py @@ -296,6 +296,32 @@ def sequence_to_ctypes_array(sequence: Sequence, ctype, size: int) -> ctp.Array return (ctype * size)(*sequence) +def strings_to_ctypes_array(strings: Sequence[str]) -> ctp.Array: + """ + Convert a sequence (e.g., a list) of strings into a ctypes array. + + Parameters + ---------- + strings + A sequence of strings. + + Returns + ------- + ctypes_array + A ctypes array of strings. + + Examples + -------- + >>> strings = ["first", "second", "third"] + >>> ctypes_array = strings_to_ctypes_array(strings) + >>> type(ctypes_array) + + >>> [s.decode() for s in ctypes_array] + ['first', 'second', 'third'] + """ + return (ctp.c_char_p * len(strings))(*[s.encode() for s in strings]) + + def array_to_datetime(array): """ Convert a 1-D datetime array from various types into numpy.datetime64. diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 2caa8e55cd0..0640ba6ae20 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -20,6 +20,7 @@ as_c_contiguous, dataarray_to_matrix, sequence_to_ctypes_array, + strings_to_ctypes_array, vectors_to_arrays, ) from pygmt.clib.loading import load_libgmt @@ -897,13 +898,9 @@ def put_vector(self, dataset, column, vector): gmt_type = self._check_dtype_and_dim(vector, ndim=1) if gmt_type in (self["GMT_TEXT"], self["GMT_DATETIME"]): - vector_pointer = (ctp.c_char_p * len(vector))() if gmt_type == self["GMT_DATETIME"]: - vector_pointer[:] = np.char.encode( - np.datetime_as_string(array_to_datetime(vector)) - ) - else: - vector_pointer[:] = np.char.encode(vector) + vector = np.datetime_as_string(array_to_datetime(vector)) + vector_pointer = strings_to_ctypes_array(vector) else: vector_pointer = vector.ctypes.data_as(ctp.c_void_p) status = c_put_vector( @@ -960,13 +957,12 @@ def put_strings(self, dataset, family, strings): restype=ctp.c_int, ) - strings_pointer = (ctp.c_char_p * len(strings))() - strings_pointer[:] = np.char.encode(strings) - family_int = self._parse_constant( family, valid=FAMILIES, valid_modifiers=METHODS ) + strings_pointer = strings_to_ctypes_array(strings) + status = c_put_strings( self.session_pointer, family_int, dataset, strings_pointer ) From dc6b3d13f39cdd5991656f9ab34480f84f7d8160 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Mar 2024 09:40:51 +1300 Subject: [PATCH 042/218] Bump actions/setup-python from 5.0.0 to 5.1.0 (#3141) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.0.0 to 5.1.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v5.0.0...v5.1.0) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/format-command.yml | 2 +- .github/workflows/publish-to-pypi.yml | 2 +- .github/workflows/style_checks.yaml | 2 +- .github/workflows/type_checks.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index a2544d7ffb1..fc5fd4fc0c5 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -25,7 +25,7 @@ jobs: ref: ${{ github.event.client_payload.pull_request.head.ref }} # Setup Python environment - - uses: actions/setup-python@v5.0.0 + - uses: actions/setup-python@v5.1.0 with: python-version: '3.12' diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 17883789851..1a2b06b5e7c 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -51,7 +51,7 @@ jobs: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@v5.1.0 with: python-version: '3.12' diff --git a/.github/workflows/style_checks.yaml b/.github/workflows/style_checks.yaml index 87d95ce7913..036f642ec82 100644 --- a/.github/workflows/style_checks.yaml +++ b/.github/workflows/style_checks.yaml @@ -28,7 +28,7 @@ jobs: # Setup Python - name: Set up Python - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@v5.1.0 with: python-version: '3.12' diff --git a/.github/workflows/type_checks.yml b/.github/workflows/type_checks.yml index 7ecefe04277..4d0ec5c3494 100644 --- a/.github/workflows/type_checks.yml +++ b/.github/workflows/type_checks.yml @@ -37,7 +37,7 @@ jobs: # Setup Python - name: Set up Python - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@v5.1.0 with: python-version: '3.12' From 81db87e2cdedcaf27886acd18efdb4e93bd0ec96 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Mar 2024 09:41:20 +1300 Subject: [PATCH 043/218] Bump codecov/codecov-action from 4.1.0 to 4.1.1 (#3142) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.1.0 to 4.1.1. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.1.0...v4.1.1) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 8b9ec793063..d5da30e7bb4 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -166,7 +166,7 @@ jobs: # Upload coverage to Codecov - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4.1.0 + uses: codecov/codecov-action@v4.1.1 with: file: ./coverage.xml # optional env_vars: OS,PYTHON,NUMPY From 4b3b3eb314e486bd3b6c49aa3d4e598c4041ecec Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 27 Mar 2024 10:16:43 +0800 Subject: [PATCH 044/218] Figure.plot: Refactor to increase code readability (#2742) --- pygmt/src/plot.py | 82 ++++++++++++++++++++++++----------------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/pygmt/src/plot.py b/pygmt/src/plot.py index 0ed42569386..3206c5c053a 100644 --- a/pygmt/src/plot.py +++ b/pygmt/src/plot.py @@ -209,48 +209,50 @@ def plot( # noqa: PLR0912 kwargs = self._preprocess(**kwargs) kind = data_kind(data, x, y) - extra_arrays = [] - if kwargs.get("S") is not None and kwargs["S"][0] in "vV" and direction is not None: - extra_arrays.extend(direction) - elif ( - kwargs.get("S") is None - and kind == "geojson" - and data.geom_type.isin(["Point", "MultiPoint"]).all() - ): # checking if the geometry of a geoDataFrame is Point or MultiPoint - kwargs["S"] = "s0.2c" - elif kwargs.get("S") is None and kind == "file" and str(data).endswith(".gmt"): - # checking that the data is a file path to set default style - try: - with Path(which(data)).open(encoding="utf8") as file: - line = file.readline() - if "@GMULTIPOINT" in line or "@GPOINT" in line: - # if the file is gmt style and geometry is set to Point - kwargs["S"] = "s0.2c" - except FileNotFoundError: - pass - if is_nonstr_iter(kwargs.get("G")): - if kind != "vectors": - raise GMTInvalidInput( - "Can't use arrays for fill if data is matrix or file." - ) - extra_arrays.append(kwargs["G"]) - del kwargs["G"] - if size is not None: - if kind != "vectors": - raise GMTInvalidInput( - "Can't use arrays for 'size' if data is a matrix or file." - ) - extra_arrays.append(size) + if kind == "vectors": # Add more columns for vectors input + # Parameters for vector styles + if ( + kwargs.get("S") is not None + and kwargs["S"][0] in "vV" + and is_nonstr_iter(direction) + ): + extra_arrays.extend(direction) + # Fill + if is_nonstr_iter(kwargs.get("G")): + extra_arrays.append(kwargs.get("G")) + del kwargs["G"] + # Size + if is_nonstr_iter(size): + extra_arrays.append(size) + # Intensity and transparency + for flag in ["I", "t"]: + if is_nonstr_iter(kwargs.get(flag)): + extra_arrays.append(kwargs.get(flag)) + kwargs[flag] = "" + else: + for name, value in [ + ("direction", direction), + ("fill", kwargs.get("G")), + ("size", size), + ("intensity", kwargs.get("I")), + ("transparency", kwargs.get("t")), + ]: + if is_nonstr_iter(value): + raise GMTInvalidInput(f"'{name}' can't be 1-D array if 'data' is used.") - for flag in ["I", "t"]: - if is_nonstr_iter(kwargs.get(flag)): - if kind != "vectors": - raise GMTInvalidInput( - f"Can't use arrays for {plot.aliases[flag]} if data is matrix or file." - ) - extra_arrays.append(kwargs[flag]) - kwargs[flag] = "" + # Set the default style if data has a geometry of Point or MultiPoint + if kwargs.get("S") is None: + if kind == "geojson" and data.geom_type.isin(["Point", "MultiPoint"]).all(): + kwargs["S"] = "s0.2c" + elif kind == "file" and str(data).endswith(".gmt"): # OGR_GMT file + try: + with Path(which(data)).open() as file: + line = file.readline() + if "@GMULTIPOINT" in line or "@GPOINT" in line: + kwargs["S"] = "s0.2c" + except FileNotFoundError: + pass with Session() as lib: with lib.virtualfile_in( From f0d4d21f7518b4dc10f503ec26e2e5b603d8891e Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 27 Mar 2024 10:27:34 +0800 Subject: [PATCH 045/218] Session.virtualfile_to_dataset: Add new parameters 'dtype'/'index_col' for pandas output (#3140) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- pygmt/clib/session.py | 17 ++++++++++++----- pygmt/datatypes/dataset.py | 25 +++++++++++++++++++++++-- pygmt/src/grdhisteq.py | 18 +++++++----------- 3 files changed, 42 insertions(+), 18 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 0640ba6ae20..42d74a815a2 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1747,6 +1747,8 @@ def virtualfile_to_dataset( vfname: str, output_type: Literal["pandas", "numpy", "file"] = "pandas", column_names: list[str] | None = None, + dtype: type | dict[str, type] | None = None, + index_col: str | int | None = None, ) -> pd.DataFrame | np.ndarray | None: """ Output a tabular dataset stored in a virtual file to a different format. @@ -1766,6 +1768,11 @@ def virtualfile_to_dataset( - ``"file"`` means the result was saved to a file and will return ``None``. column_names The column names for the :class:`pandas.DataFrame` output. + dtype + Data type for the columns of the :class:`pandas.DataFrame` output. Can be a + single type for all columns or a dictionary mapping column names to types. + index_col + Column to set as the index of the :class:`pandas.DataFrame` output. Returns ------- @@ -1854,13 +1861,13 @@ def virtualfile_to_dataset( return None # Read the virtual file as a GMT dataset and convert to pandas.DataFrame - result = self.read_virtualfile(vfname, kind="dataset").contents.to_dataframe() + result = self.read_virtualfile(vfname, kind="dataset").contents.to_dataframe( + column_names=column_names, + dtype=dtype, + index_col=index_col, + ) if output_type == "numpy": # numpy.ndarray output return result.to_numpy() - - # Assign column names - if column_names is not None: - result.columns = column_names return result # pandas.DataFrame output def extract_region(self): diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index a0d0547f3ca..274d2fee97c 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -143,7 +143,12 @@ class _GMT_DATASEGMENT(ctp.Structure): # noqa: N801 ("hidden", ctp.c_void_p), ] - def to_dataframe(self) -> pd.DataFrame: + def to_dataframe( + self, + column_names: list[str] | None = None, + dtype: type | dict[str, type] | None = None, + index_col: str | int | None = None, + ) -> pd.DataFrame: """ Convert a _GMT_DATASET object to a :class:`pandas.DataFrame` object. @@ -151,6 +156,16 @@ def to_dataframe(self) -> pd.DataFrame: the same. The same column in all segments of all tables are concatenated. The trailing text column is also concatenated as a single string column. + Parameters + ---------- + column_names + A list of column names. + dtype + Data type. Can be a single type for all columns or a dictionary mapping + column names to types. + index_col + Column to set as index. + Returns ------- df @@ -211,5 +226,11 @@ def to_dataframe(self) -> pd.DataFrame: pd.Series(data=np.char.decode(textvector), dtype=pd.StringDtype()) ) - df = pd.concat(objs=vectors, axis=1) + df = pd.concat(objs=vectors, axis="columns") + if column_names is not None: # Assign column names + df.columns = column_names + if dtype is not None: + df = df.astype(dtype) + if index_col is not None: + df = df.set_index(index_col) return df diff --git a/pygmt/src/grdhisteq.py b/pygmt/src/grdhisteq.py index b0285e4e3d5..44d191a417e 100644 --- a/pygmt/src/grdhisteq.py +++ b/pygmt/src/grdhisteq.py @@ -238,18 +238,14 @@ def compute_bins( module="grdhisteq", args=build_arg_string(kwargs, infile=vingrd) ) - result = lib.virtualfile_to_dataset( + return lib.virtualfile_to_dataset( vfname=vouttbl, output_type=output_type, column_names=["start", "stop", "bin_id"], + dtype={ + "start": np.float32, + "stop": np.float32, + "bin_id": np.uint32, + }, + index_col="bin_id" if output_type == "pandas" else None, ) - if output_type == "pandas": - result = result.astype( - { - "start": np.float32, - "stop": np.float32, - "bin_id": np.uint32, - } - ) - return result.set_index("bin_id") - return result From 19c4f559fe0b2c08689882b42b3efaacd7f1060f Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 28 Mar 2024 16:05:44 +0800 Subject: [PATCH 046/218] gitignore: Ignore the sg_execution_times.rst file generated by Sphinx-Gallery (#3145) --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 7d46956d921..5fae9448de5 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,8 @@ doc/gallery/ doc/projections/ doc/tutorials/ doc/get_started/ +# doc/sg_execution_time.rst is auto-generated by Sphinx-Gallery +doc/sg_execution_times.rst # Jupyter Notebook .ipynb_checkpoints/ From dbbc1684ded29ec8d8be7db727abdffd62538d2b Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 28 Mar 2024 16:05:54 +0800 Subject: [PATCH 047/218] Figure.plot3d: Refactor to increase code readability (#3143) --- pygmt/src/plot3d.py | 85 +++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 41 deletions(-) diff --git a/pygmt/src/plot3d.py b/pygmt/src/plot3d.py index 71407a65c90..3e4e91b597d 100644 --- a/pygmt/src/plot3d.py +++ b/pygmt/src/plot3d.py @@ -184,48 +184,51 @@ def plot3d( # noqa: PLR0912 kwargs = self._preprocess(**kwargs) kind = data_kind(data, x, y, z) - extra_arrays = [] - if kwargs.get("S") is not None and kwargs["S"][0] in "vV" and direction is not None: - extra_arrays.extend(direction) - elif ( - kwargs.get("S") is None - and kind == "geojson" - and data.geom_type.isin(["Point", "MultiPoint"]).all() - ): # checking if the geometry of a geoDataFrame is Point or MultiPoint - kwargs["S"] = "u0.2c" - elif kwargs.get("S") is None and kind == "file" and str(data).endswith(".gmt"): - # checking that the data is a file path to set default style - try: - with Path(which(data)).open(encoding="utf8") as file: - line = file.readline() - if "@GMULTIPOINT" in line or "@GPOINT" in line: - # if the file is gmt style and geometry is set to Point - kwargs["S"] = "u0.2c" - except FileNotFoundError: - pass - if is_nonstr_iter(kwargs.get("G")): - if kind != "vectors": - raise GMTInvalidInput( - "Can't use arrays for fill if data is matrix or file." - ) - extra_arrays.append(kwargs["G"]) - del kwargs["G"] - if size is not None: - if kind != "vectors": - raise GMTInvalidInput( - "Can't use arrays for 'size' if data is a matrix or a file." - ) - extra_arrays.append(size) - - for flag in ["I", "t"]: - if is_nonstr_iter(kwargs.get(flag)): - if kind != "vectors": - raise GMTInvalidInput( - f"Can't use arrays for {plot3d.aliases[flag]} if data is matrix or file." - ) - extra_arrays.append(kwargs[flag]) - kwargs[flag] = "" + + if kind == "vectors": # Add more columns for vectors input + # Parameters for vector styles + if ( + kwargs.get("S") is not None + and kwargs["S"][0] in "vV" + and is_nonstr_iter(direction) + ): + extra_arrays.extend(direction) + # Fill + if is_nonstr_iter(kwargs.get("G")): + extra_arrays.append(kwargs.get("G")) + del kwargs["G"] + # Size + if is_nonstr_iter(size): + extra_arrays.append(size) + # Intensity and transparency + for flag in ["I", "t"]: + if is_nonstr_iter(kwargs.get(flag)): + extra_arrays.append(kwargs.get(flag)) + kwargs[flag] = "" + else: + for name, value in [ + ("direction", direction), + ("fill", kwargs.get("G")), + ("size", size), + ("intensity", kwargs.get("I")), + ("transparency", kwargs.get("t")), + ]: + if is_nonstr_iter(value): + raise GMTInvalidInput(f"'{name}' can't be 1-D array if 'data' is used.") + + # Set the default style if data has a geometry of Point or MultiPoint + if kwargs.get("S") is None: + if kind == "geojson" and data.geom_type.isin(["Point", "MultiPoint"]).all(): + kwargs["S"] = "u0.2c" + elif kind == "file" and str(data).endswith(".gmt"): # OGR_GMT file + try: + with Path(which(data)).open() as file: + line = file.readline() + if "@GMULTIPOINT" in line or "@GPOINT" in line: + kwargs["S"] = "u0.2c" + except FileNotFoundError: + pass with Session() as lib: with lib.virtualfile_in( From 85d4ed2e64a01baf052bcd82c0c9deaaf3af3c2a Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 29 Mar 2024 13:52:21 +0800 Subject: [PATCH 048/218] GMT_DATASET.to_dataframe: Return an empty DataFrame if a file contains no data (#3131) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/datatypes/dataset.py | 31 ++++++---- pygmt/tests/test_datatypes_dataset.py | 83 +++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 11 deletions(-) create mode 100644 pygmt/tests/test_datatypes_dataset.py diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index 274d2fee97c..7a61b7f3d91 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -3,7 +3,8 @@ """ import ctypes as ctp -from typing import ClassVar +from collections.abc import Mapping +from typing import Any, ClassVar import numpy as np import pandas as pd @@ -13,8 +14,8 @@ class _GMT_DATASET(ctp.Structure): # noqa: N801 """ GMT dataset structure for holding multiple tables (files). - This class is only meant for internal use by PyGMT and is not exposed to users. - See the GMT source code gmt_resources.h for the original C struct definitions. + This class is only meant for internal use and is not exposed to users. See the GMT + source code ``gmt_resources.h`` for the original C struct definitions. Examples -------- @@ -145,8 +146,8 @@ class _GMT_DATASEGMENT(ctp.Structure): # noqa: N801 def to_dataframe( self, - column_names: list[str] | None = None, - dtype: type | dict[str, type] | None = None, + column_names: pd.Index | None = None, + dtype: type | Mapping[Any, type] | None = None, index_col: str | int | None = None, ) -> pd.DataFrame: """ @@ -156,6 +157,9 @@ def to_dataframe( the same. The same column in all segments of all tables are concatenated. The trailing text column is also concatenated as a single string column. + If the object contains no data, an empty DataFrame will be returned (with the + column names and dtypes set if provided). + Parameters ---------- column_names @@ -200,8 +204,8 @@ def to_dataframe( >>> df.dtypes.to_list() [dtype('float64'), dtype('float64'), dtype('float64'), string[python]] """ - # Deal with numeric columns vectors = [] + # Deal with numeric columns for icol in range(self.n_columns): colvector = [] for itbl in range(self.n_tables): @@ -226,11 +230,16 @@ def to_dataframe( pd.Series(data=np.char.decode(textvector), dtype=pd.StringDtype()) ) - df = pd.concat(objs=vectors, axis="columns") - if column_names is not None: # Assign column names - df.columns = column_names - if dtype is not None: + if len(vectors) == 0: + # Return an empty DataFrame if no columns are found. + df = pd.DataFrame(columns=column_names) + else: + # Create a DataFrame object by concatenating multiple columns + df = pd.concat(objs=vectors, axis="columns") + if column_names is not None: # Assign column names + df.columns = column_names + if dtype is not None: # Set dtype for the whole dataset or individual columns df = df.astype(dtype) - if index_col is not None: + if index_col is not None: # Use a specific column as index df = df.set_index(index_col) return df diff --git a/pygmt/tests/test_datatypes_dataset.py b/pygmt/tests/test_datatypes_dataset.py new file mode 100644 index 00000000000..7861b6b3119 --- /dev/null +++ b/pygmt/tests/test_datatypes_dataset.py @@ -0,0 +1,83 @@ +""" +Tests for GMT_DATASET data type. +""" + +from pathlib import Path + +import pandas as pd +import pytest +from pygmt.clib import Session +from pygmt.helpers import GMTTempFile + + +def dataframe_from_pandas(filepath_or_buffer, sep=r"\s+", comment="#", header=None): + """ + Read tabular data as pandas.DataFrame object using pandas.read_csv(). + + The parameters have the same meaning as in ``pandas.read_csv()``. + """ + try: + df = pd.read_csv(filepath_or_buffer, sep=sep, comment=comment, header=header) + except pd.errors.EmptyDataError: + # Return an empty DataFrame if the file contains no data + return pd.DataFrame() + + # By default, pandas reads text strings with whitespaces as multiple columns, but + # GMT concatenates all trailing text as a single string column. Need do find all + # string columns (with dtype="object") and combine them into a single string column. + string_columns = df.select_dtypes(include=["object"]).columns + if len(string_columns) > 1: + df[string_columns[0]] = df[string_columns].apply(lambda x: " ".join(x), axis=1) + df = df.drop(string_columns[1:], axis=1) + # Convert 'object' to 'string' type + df = df.convert_dtypes( + convert_string=True, + convert_integer=False, + convert_boolean=False, + convert_floating=False, + ) + return df + + +def dataframe_from_gmt(fname): + """ + Read tabular data as pandas.DataFrame using GMT virtual file. + """ + with Session() as lib: + with lib.virtualfile_out(kind="dataset") as vouttbl: + lib.call_module("read", f"{fname} {vouttbl} -Td") + df = lib.virtualfile_to_dataset(vfname=vouttbl) + return df + + +@pytest.mark.benchmark +def test_dataset(): + """ + Test the basic functionality of GMT_DATASET. + """ + with GMTTempFile(suffix=".txt") as tmpfile: + with Path(tmpfile.name).open(mode="w") as fp: + print(">", file=fp) + print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) + print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) + print(">", file=fp) + print("7.0 8.0 9.0 TEXT8 TEXT90", file=fp) + print("10.0 11.0 12.0 TEXT123 TEXT456789", file=fp) + + df = dataframe_from_gmt(tmpfile.name) + expected_df = dataframe_from_pandas(tmpfile.name, comment=">") + pd.testing.assert_frame_equal(df, expected_df) + + +def test_dataset_empty(): + """ + Make sure that an empty DataFrame is returned if a file contains no data. + """ + with GMTTempFile(suffix=".txt") as tmpfile: + with Path(tmpfile.name).open(mode="w") as fp: + print("# This is a comment line.", file=fp) + + df = dataframe_from_gmt(tmpfile.name) + assert df.empty # Empty DataFrame + expected_df = dataframe_from_pandas(tmpfile.name) + pd.testing.assert_frame_equal(df, expected_df) From bbf909d9ea6e1cf48af3566e490b14d10de09785 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 31 Mar 2024 13:34:57 +0800 Subject: [PATCH 049/218] Session.call_module: Support passing a list of argument strings (#3139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> --- pygmt/clib/session.py | 56 ++++++++++++++++++++++++++++++---------- pygmt/tests/test_clib.py | 40 ++++++++++++++++++++++++---- 2 files changed, 78 insertions(+), 18 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 42d74a815a2..a9ea8c65f27 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -592,25 +592,36 @@ def get_common(self, option): # the function return value (i.e., 'status') return status - def call_module(self, module, args): + def call_module(self, module: str, args: str | list[str]): """ Call a GMT module with the given arguments. - Makes a call to ``GMT_Call_Module`` from the C API using mode - ``GMT_MODULE_CMD`` (arguments passed as a single string). + Wraps ``GMT_Call_Module``. - Most interactions with the C API are done through this function. + The ``GMT_Call_Module`` API function supports passing module arguments in three + different ways: + + 1. Pass a single string that contains whitespace-separated module arguments. + 2. Pass a list of strings and each string contains a module argument. + 3. Pass a list of ``GMT_OPTION`` data structure. + + Both options 1 and 2 are implemented in this function, but option 2 is preferred + because it can correctly handle special characters like whitespaces and + quotation marks in module arguments. Parameters ---------- - module : str - Module name (``'coast'``, ``'basemap'``, etc). - args : str - String with the command line arguments that will be passed to the - module (for example, ``'-R0/5/0/10 -JM'``). + module + The GMT module name to be called (``"coast"``, ``"basemap"``, etc). + args + Module arguments that will be passed to the GMT module. It can be either + a single string (e.g., ``"-R0/5/0/10 -JX10c -BWSen+t'My Title'"``) or a list + of strings (e.g., ``["-R0/5/0/10", "-JX10c", "-BWSEN+tMy Title"]``). Raises ------ + GMTInvalidInput + If the ``args`` argument is not a string or a list of strings. GMTCLibError If the returned status code of the function is non-zero. """ @@ -620,10 +631,29 @@ def call_module(self, module, args): restype=ctp.c_int, ) - mode = self["GMT_MODULE_CMD"] - status = c_call_module( - self.session_pointer, module.encode(), mode, args.encode() - ) + # 'args' can be (1) a single string or (2) a list of strings. + argv: bytes | ctp.Array[ctp.c_char_p] | None + if isinstance(args, str): + # 'args' is a single string that contains whitespace-separated arguments. + # In this way, we need to correctly handle option arguments that contain + # whitespaces or quotation marks. It's used in PyGMT <= v0.11.0 but is no + # longer recommended. + mode = self["GMT_MODULE_CMD"] + argv = args.encode() + elif isinstance(args, list): + # 'args' is a list of strings and each string contains a module argument. + # In this way, GMT can correctly handle option arguments with whitespaces or + # quotation marks. This is the preferred way to pass arguments to the GMT + # API and is used for PyGMT >= v0.12.0. + mode = len(args) # 'mode' is the number of arguments. + # Pass a null pointer if no arguments are specified. + argv = strings_to_ctypes_array(args) if mode != 0 else None + else: + raise GMTInvalidInput( + "'args' must be either a string or a list of strings." + ) + + status = c_call_module(self.session_pointer, module.encode(), mode, argv) if status != 0: raise GMTCLibError( f"Module '{module}' failed with status code {status}:\n{self._error_message}" diff --git a/pygmt/tests/test_clib.py b/pygmt/tests/test_clib.py index 2f655435ee4..201d7d27fb7 100644 --- a/pygmt/tests/test_clib.py +++ b/pygmt/tests/test_clib.py @@ -133,9 +133,20 @@ def test_destroy_session_fails(): @pytest.mark.benchmark def test_call_module(): """ - Run a command to see if call_module works. + Call a GMT module by passing a list of arguments. + """ + with clib.Session() as lib: + with GMTTempFile() as out_fname: + lib.call_module("info", [str(POINTS_DATA), "-C", f"->{out_fname.name}"]) + assert Path(out_fname.name).stat().st_size > 0 + output = out_fname.read().strip() + assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" + + +def test_call_module_argument_string(): + """ + Call a GMT module by passing a single argument string. """ - out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module("info", f"{POINTS_DATA} -C ->{out_fname.name}") @@ -144,9 +155,28 @@ def test_call_module(): assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" +def test_call_module_empty_argument(): + """ + call_module should work if an empty string or an empty list is passed as argument. + """ + with clib.Session() as lib: + lib.call_module("defaults", "") + with clib.Session() as lib: + lib.call_module("defaults", []) + + +def test_call_module_invalid_argument_type(): + """ + call_module only accepts a string or a list of strings as module arguments. + """ + with clib.Session() as lib: + with pytest.raises(GMTInvalidInput): + lib.call_module("get", ("FONT_TITLE", "FONT_TAG")) + + def test_call_module_invalid_arguments(): """ - Fails for invalid module arguments. + call_module should fail for invalid module arguments. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): @@ -155,7 +185,7 @@ def test_call_module_invalid_arguments(): def test_call_module_invalid_name(): """ - Fails when given bad input. + call_module should fail when an invalid module name is given. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): @@ -164,7 +194,7 @@ def test_call_module_invalid_name(): def test_call_module_error_message(): """ - Check is the GMT error message was captured. + Check if the GMT error message was captured when calling a module. """ with clib.Session() as lib: with pytest.raises(GMTCLibError) as exc_info: From d40c440047005594a7a48b4033cf12720f93bc56 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 1 Apr 2024 12:48:30 +0800 Subject: [PATCH 050/218] SPEC 0: Bump minimum supported versions to xarray 2022.06 (#3151) --- .github/workflows/ci_tests.yaml | 2 +- doc/minversions.md | 2 +- environment.yml | 2 +- pyproject.toml | 2 +- requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index d5da30e7bb4..490e576ff67 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -70,7 +70,7 @@ jobs: - python-version: '3.10' numpy-version: '1.23' pandas-version: '=1.5' - xarray-version: '=2022.03' + xarray-version: '=2022.06' optional-packages: '' - python-version: '3.12' numpy-version: '1.26' diff --git a/doc/minversions.md b/doc/minversions.md index aafa03284e4..b2fa02cd9f4 100644 --- a/doc/minversions.md +++ b/doc/minversions.md @@ -12,7 +12,7 @@ after their initial release. | PyGMT Version | GMT | Python | NumPy | Pandas | Xarray | |---|---|---|---|---|---| -| [Dev][]* [[Docs][Docs Dev]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.03 | +| [Dev][]* [[Docs][Docs Dev]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | | [v0.11.0][] [[Docs][Docs v0.11.0]] | >=6.3.0 | >=3.9 | >=1.23 | | | | [v0.10.0][] [[Docs][Docs v0.10.0]] | >=6.3.0 | >=3.9 | >=1.22 | | | | [v0.9.0][] [[Docs][Docs v0.9.0]] | >=6.3.0 | >=3.8 | >=1.21 | | | diff --git a/environment.yml b/environment.yml index 08d54e1cb41..e0a9ad0da8b 100644 --- a/environment.yml +++ b/environment.yml @@ -9,7 +9,7 @@ dependencies: - ghostscript=10.03.0 - numpy>=1.23 - pandas>=1.5 - - xarray>=2022.03 + - xarray>=2022.06 - netCDF4 - packaging # Optional dependencies diff --git a/pyproject.toml b/pyproject.toml index 27637b2f6a9..0b9daf9ab16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ classifiers = [ dependencies = [ "numpy>=1.23", "pandas>=1.5", - "xarray>=2022.03", + "xarray>=2022.06", "netCDF4", "packaging", ] diff --git a/requirements.txt b/requirements.txt index 0adb02054dd..000de0692eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # Required packages numpy>=1.23 pandas>=1.5 -xarray>=2022.03 +xarray>=2022.06 netCDF4 packaging From 65cc190a9d668d73c99672c9444a8a40c42dbb23 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 1 Apr 2024 13:56:01 +0800 Subject: [PATCH 051/218] GMT_GRID_HEADER: Parse grid header and add grid properties (#3134) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/datatypes/grid.py | 98 +-------------- pygmt/datatypes/header.py | 247 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 248 insertions(+), 97 deletions(-) create mode 100644 pygmt/datatypes/header.py diff --git a/pygmt/datatypes/grid.py b/pygmt/datatypes/grid.py index dfb3c096d20..e67f44ebef5 100644 --- a/pygmt/datatypes/grid.py +++ b/pygmt/datatypes/grid.py @@ -1,104 +1,8 @@ """ -Wrapper for the GMT_GRID data type and the GMT_GRID_HEADER data structure. +Wrapper for the GMT_GRID data type. """ import ctypes as ctp -from typing import ClassVar - -# Constants for lengths of grid header variables. -# -# Note: Ideally we should be able to get these constants from the GMT shared library -# using the ``lib["GMT_GRID_UNIT_LEN80"]`` syntax, but it causes cyclic import error. -# So we have to hardcode the values here. -GMT_GRID_UNIT_LEN80 = 80 -GMT_GRID_TITLE_LEN80 = 80 -GMT_GRID_COMMAND_LEN320 = 320 -GMT_GRID_REMARK_LEN160 = 160 - -# GMT uses single-precision for grids by default, but can be built to use -# double-precision. Currently, only single-precision is supported. -gmt_grdfloat = ctp.c_float - - -class _GMT_GRID_HEADER(ctp.Structure): # noqa: N801 - """ - GMT grid header structure for metadata about the grid. - - The class is used in the `GMT_GRID`/`GMT_IMAGE`/`GMT_CUBE` data structure. See the - GMT source code gmt_resources.h for the original C structure definitions. - """ - - _fields_: ClassVar = [ - # Number of columns - ("n_columns", ctp.c_uint32), - # Number of rows - ("n_rows", ctp.c_uint32), - # Grid registration, 0 for gridline and 1 for pixel - ("registration", ctp.c_uint32), - # Minimum/maximum x and y coordinates - ("wesn", ctp.c_double * 4), - # Minimum z value - ("z_min", ctp.c_double), - # Maximum z value - ("z_max", ctp.c_double), - # x and y increments - ("inc", ctp.c_double * 2), - # Grid values must be multiplied by this factor - ("z_scale_factor", ctp.c_double), - # After scaling, add this offset - ("z_add_offset", ctp.c_double), - # Units in x-directions, in the form "long_name [units]" - ("x_units", ctp.c_char * GMT_GRID_UNIT_LEN80), - # Units in y-direction, in the form "long_name [units]" - ("y_units", ctp.c_char * GMT_GRID_UNIT_LEN80), - # Grid value units, in the form "long_name [units]" - ("z_units", ctp.c_char * GMT_GRID_UNIT_LEN80), - # Name of data set - ("title", ctp.c_char * GMT_GRID_TITLE_LEN80), - # Name of generating command - ("command", ctp.c_char * GMT_GRID_COMMAND_LEN320), - # Comments for this data set - ("remark", ctp.c_char * GMT_GRID_REMARK_LEN160), - # Below are items used internally by GMT - # Number of data points (n_columns * n_rows) [paddings are excluded] - ("nm", ctp.c_size_t), - # Actual number of items (not bytes) required to hold this grid (mx * my), - # per band (for images) - ("size", ctp.c_size_t), - # Bits per data value (e.g., 32 for ints/floats; 8 for bytes). - # Only used for ERSI ArcInfo ASCII Exchange grids. - ("bits", ctp.c_uint), - # For complex grid. - # 0 for normal - # GMT_GRID_IS_COMPLEX_REAL = real part of complex grid - # GMT_GRID_IS_COMPLEX_IMAG = imag part of complex grid - ("complex_mode", ctp.c_uint), - # Grid format - ("type", ctp.c_uint), - # Number of bands [1]. Used with GMT_IMAGE containers - ("n_bands", ctp.c_uint), - # Actual x-dimension in memory. mx = n_columns + pad[0] + pad[1] - ("mx", ctp.c_uint), - # Actual y-dimension in memory. my = n_rows + pad[2] + pad[3] - ("my", ctp.c_uint), - # Paddings on west, east, south, north sides [2,2,2,2] - ("pad", ctp.c_uint * 4), - # Three or four char codes T|B R|C S|R|S (grd) or B|L|P + A|a (img) - # describing array layout in mem and interleaving - ("mem_layout", ctp.c_char * 4), - # Missing value as stored in grid file - ("nan_value", gmt_grdfloat), - # 0.0 for gridline grids and 0.5 for pixel grids - ("xy_off", ctp.c_double), - # Referencing system string in PROJ.4 format - ("ProjRefPROJ4", ctp.c_char_p), - # Referencing system string in WKT format - ("ProjRefWKT", ctp.c_char_p), - # Referencing system EPSG code - ("ProjRefEPSG", ctp.c_int), - # Lower-level information for GMT use only - ("hidden", ctp.c_void_p), - ] class _GMT_GRID(ctp.Structure): # noqa: N801 diff --git a/pygmt/datatypes/header.py b/pygmt/datatypes/header.py new file mode 100644 index 00000000000..04e10ac0c72 --- /dev/null +++ b/pygmt/datatypes/header.py @@ -0,0 +1,247 @@ +""" +Wrapper for the GMT_GRID_HEADER data structure and related utility functions. +""" + +import ctypes as ctp +from typing import Any, ClassVar + +import numpy as np + +# Constants for lengths of grid header variables. +# +# Note: Ideally we should be able to get these constants from the GMT shared library +# using the ``lib["GMT_GRID_UNIT_LEN80"]`` syntax, but it causes cyclic import error. +# So we have to hardcode the values here. +GMT_GRID_UNIT_LEN80 = 80 +GMT_GRID_TITLE_LEN80 = 80 +GMT_GRID_VARNAME_LEN80 = 80 +GMT_GRID_COMMAND_LEN320 = 320 +GMT_GRID_REMARK_LEN160 = 160 + +# GMT uses single-precision for grids by default, but can be built to use +# double-precision. Currently, only single-precision is supported. +gmt_grdfloat = ctp.c_float + + +def _parse_nameunits(nameunits: str) -> tuple[str, str | None]: + """ + Get the long_name and units attributes from x_units/y_units/z_units in the grid + header. + + In the GMT grid header, the x_units/y_units/z_units are strings in the form of + ``long_name [units]``, in which both ``long_name`` and ``units`` are standard + netCDF attributes defined by CF conventions. The ``[units]`` part is optional. + + This function parses the x_units/y_units/z_units strings and gets the ``long_name`` + and ``units`` attributes. + + Parameters + ---------- + nameunits + The x_units/y_units/z_units strings in the grid header. + + Returns + ------- + (long_name, units) + Tuple of netCDF attributes ``long_name`` and ``units``. ``units`` may be + ``None``. + + Examples + -------- + >>> _parse_nameunits("longitude [degrees_east]") + ('longitude', 'degrees_east') + >>> _parse_nameunits("latitude [degrees_north]") + ('latitude', 'degrees_north') + >>> _parse_nameunits("x") + ('x', None) + >>> _parse_nameunits("y") + ('y', None) + >>> + """ + parts = nameunits.split("[") + long_name = parts[0].strip() + units = parts[1].strip("]").strip() if len(parts) > 1 else None + return long_name, units + + +class _GMT_GRID_HEADER(ctp.Structure): # noqa: N801 + """ + GMT grid header structure for metadata about the grid. + + The class is used in the `GMT_GRID`/`GMT_IMAGE`/`GMT_CUBE` data structure. See the + GMT source code gmt_resources.h for the original C structure definitions. + """ + + _fields_: ClassVar = [ + # Number of columns + ("n_columns", ctp.c_uint32), + # Number of rows + ("n_rows", ctp.c_uint32), + # Grid registration, 0 for gridline and 1 for pixel + ("registration", ctp.c_uint32), + # Minimum/maximum x and y coordinates + ("wesn", ctp.c_double * 4), + # Minimum z value + ("z_min", ctp.c_double), + # Maximum z value + ("z_max", ctp.c_double), + # x and y increments + ("inc", ctp.c_double * 2), + # Grid values must be multiplied by this factor + ("z_scale_factor", ctp.c_double), + # After scaling, add this offset + ("z_add_offset", ctp.c_double), + # Units in x-directions, in the form "long_name [units]" + ("x_units", ctp.c_char * GMT_GRID_UNIT_LEN80), + # Units in y-direction, in the form "long_name [units]" + ("y_units", ctp.c_char * GMT_GRID_UNIT_LEN80), + # Grid value units, in the form "long_name [units]" + ("z_units", ctp.c_char * GMT_GRID_UNIT_LEN80), + # Name of data set + ("title", ctp.c_char * GMT_GRID_TITLE_LEN80), + # Name of generating command + ("command", ctp.c_char * GMT_GRID_COMMAND_LEN320), + # Comments for this data set + ("remark", ctp.c_char * GMT_GRID_REMARK_LEN160), + # Below are items used internally by GMT + # Number of data points (n_columns * n_rows) [paddings are excluded] + ("nm", ctp.c_size_t), + # Actual number of items (not bytes) required to hold this grid (mx * my), + # per band (for images) + ("size", ctp.c_size_t), + # Bits per data value (e.g., 32 for ints/floats; 8 for bytes). + # Only used for ERSI ArcInfo ASCII Exchange grids. + ("bits", ctp.c_uint), + # For complex grid. + # 0 for normal + # GMT_GRID_IS_COMPLEX_REAL = real part of complex grid + # GMT_GRID_IS_COMPLEX_IMAG = imag part of complex grid + ("complex_mode", ctp.c_uint), + # Grid format + ("type", ctp.c_uint), + # Number of bands [1]. Used with GMT_IMAGE containers + ("n_bands", ctp.c_uint), + # Actual x-dimension in memory. mx = n_columns + pad[0] + pad[1] + ("mx", ctp.c_uint), + # Actual y-dimension in memory. my = n_rows + pad[2] + pad[3] + ("my", ctp.c_uint), + # Paddings on west, east, south, north sides [2,2,2,2] + ("pad", ctp.c_uint * 4), + # Three or four char codes T|B R|C S|R|S (grd) or B|L|P + A|a (img) + # describing array layout in mem and interleaving + ("mem_layout", ctp.c_char * 4), + # Missing value as stored in grid file + ("nan_value", gmt_grdfloat), + # 0.0 for gridline grids and 0.5 for pixel grids + ("xy_off", ctp.c_double), + # Referencing system string in PROJ.4 format + ("ProjRefPROJ4", ctp.c_char_p), + # Referencing system string in WKT format + ("ProjRefWKT", ctp.c_char_p), + # Referencing system EPSG code + ("ProjRefEPSG", ctp.c_int), + # Lower-level information for GMT use only + ("hidden", ctp.c_void_p), + ] + + def _parse_dimensions(self): + """ + Get dimension names and attributes from the grid header. + + For a 2-D grid, the dimension names are set to "y" and "x" by default. The + attributes for each dimension are parsed from the grid header following GMT + source codes. See the GMT functions "gmtnc_put_units", "gmtnc_get_units" and + "gmtnc_grd_info" for reference. + """ + # Default dimension names. + dims = ("y", "x") + nameunits = (self.y_units, self.x_units) + + # Dictionary for dimension attributes with the dimension name as the key. + attrs = {dim: {} for dim in dims} + # Dictionary for mapping the default dimension names to the actual names. + newdims = {dim: dim for dim in dims} + # Loop over dimensions and get the dimension name and attributes from header. + for dim, nameunit in zip(dims, nameunits, strict=True): + # The long_name and units attributes. + long_name, units = _parse_nameunits(nameunit.decode()) + if long_name: + attrs[dim]["long_name"] = long_name + if units: + attrs[dim]["units"] = units + + # "degrees_east"/"degrees_north" are the units for geographic coordinates + # following CF-conventions. + if units == "degrees_east": + attrs[dim]["standard_name"] = "longitude" + newdims[dim] = "lon" + elif units == "degrees_north": + attrs[dim]["standard_name"] = "latitude" + newdims[dim] = "lat" + + # Axis attributes are "X"/"Y"/"Z"/"T" for horizontal/vertical/time axis. + attrs[dim]["axis"] = dim.upper() + idx = 2 if dim == "y" else 0 + attrs[dim]["actual_range"] = np.array(self.wesn[idx : idx + 2]) + + # Save the lists of dimension names and attributes in the _nc attribute. + self._nc = { + "dims": [newdims[dim] for dim in dims], + "attrs": [attrs[dim] for dim in dims], + } + + @property + def name(self) -> str: + """ + Name of the grid. + """ + return "z" + + @property + def data_attrs(self) -> dict[str, Any]: + """ + Attributes for the data variable from the grid header. + """ + attrs: dict[str, Any] = {} + attrs["Conventions"] = "CF-1.7" + attrs["title"] = self.title.decode() + attrs["history"] = self.command.decode() + attrs["description"] = self.remark.decode() + long_name, units = _parse_nameunits(self.z_units.decode()) + if long_name: + attrs["long_name"] = long_name + if units: + attrs["units"] = units + attrs["actual_range"] = np.array([self.z_min, self.z_max]) + return attrs + + @property + def dims(self) -> list: + """ + List of dimension names. + """ + if not hasattr(self, "_nc"): + self._parse_dimensions() + return self._nc["dims"] + + @property + def dim_attrs(self) -> list[dict]: + """ + List of attributes for each dimension. + """ + if not hasattr(self, "_nc"): + self._parse_dimensions() + return self._nc["attrs"] + + @property + def gtype(self) -> int: + """ + Grid type. 0 for Cartesian grid and 1 for geographic grid. + + The grid is assumed to be Cartesian by default. If the x/y dimensions are named + "lon"/"lat" or have units "degrees_east"/"degrees_north", then the grid is + assumed to be geographic. + """ + dims = self.dims + gtype = 1 if dims[0] == "lat" and dims[1] == "lon" else 0 + return gtype From 82684e3b127b537a77a99d4f92337567d7c76ecc Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 3 Apr 2024 02:28:44 +0800 Subject: [PATCH 052/218] Refactor the test_call_module_empty_argument test (#3153) --- pygmt/tests/test_clib.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pygmt/tests/test_clib.py b/pygmt/tests/test_clib.py index 201d7d27fb7..59a9f745983 100644 --- a/pygmt/tests/test_clib.py +++ b/pygmt/tests/test_clib.py @@ -159,10 +159,11 @@ def test_call_module_empty_argument(): """ call_module should work if an empty string or an empty list is passed as argument. """ + Figure() with clib.Session() as lib: - lib.call_module("defaults", "") + lib.call_module("logo", "") with clib.Session() as lib: - lib.call_module("defaults", []) + lib.call_module("logo", []) def test_call_module_invalid_argument_type(): From 566e3ff0584a03c0f971cd76ab1fad4737137a57 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 10:48:03 +1300 Subject: [PATCH 053/218] Bump CodSpeedHQ/action from 2.2.1 to 2.3.0 (#3155) Bumps [CodSpeedHQ/action](https://github.com/codspeedhq/action) from 2.2.1 to 2.3.0. - [Release notes](https://github.com/codspeedhq/action/releases) - [Changelog](https://github.com/CodSpeedHQ/action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codspeedhq/action/compare/v2.2.1...v2.3.0) --- updated-dependencies: - dependency-name: CodSpeedHQ/action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 7d052f9ed0e..0f9fcaf5a8d 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -86,7 +86,7 @@ jobs: # Run the benchmark tests - name: Run benchmarks - uses: CodSpeedHQ/action@v2.2.1 + uses: CodSpeedHQ/action@v2.3.0 with: run: | python -c "import pygmt; pygmt.show_versions()" From d35741c8eca21a08205e1677208e7c9f643d1539 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 10:48:35 +1300 Subject: [PATCH 054/218] Bump conda-incubator/setup-miniconda from 3.0.1 to 3.0.3 (#3156) Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 3.0.1 to 3.0.3. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/v3.0.1...v3.0.3) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 0f9fcaf5a8d..279adbb893c 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -44,7 +44,7 @@ jobs: # Install Miniconda with conda-forge dependencies - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v3.0.1 + uses: conda-incubator/setup-miniconda@v3.0.3 with: auto-activate-base: true activate-environment: "" # base environment From b490b0fae17f5f51eacd7aa12307185ee51d2d8e Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 3 Apr 2024 15:31:08 +0800 Subject: [PATCH 055/218] Change the dev dependency 'matplotlib' to 'matplotlib-base' to reduce local environment size (#3158) --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index e0a9ad0da8b..6791196c1e7 100644 --- a/environment.yml +++ b/environment.yml @@ -27,7 +27,7 @@ dependencies: - codespell - ruff>=0.3.0 # Dev dependencies (unit testing) - - matplotlib + - matplotlib-base - pytest-cov - pytest-doctestplus - pytest-mpl From 61939382f28a6a04bdd4c88c4766cc7d4ca0ef72 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 6 Apr 2024 16:51:11 +0800 Subject: [PATCH 056/218] Session.virtualfile_to_dataset: Add 'strings' output type for the array of trailing texts (#3157) --- pygmt/clib/session.py | 29 +++++++++++++++++++------ pygmt/datatypes/dataset.py | 44 ++++++++++++++++++++++---------------- 2 files changed, 48 insertions(+), 25 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index a9ea8c65f27..eedb4572992 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1775,7 +1775,7 @@ def read_virtualfile( def virtualfile_to_dataset( self, vfname: str, - output_type: Literal["pandas", "numpy", "file"] = "pandas", + output_type: Literal["pandas", "numpy", "file", "strings"] = "pandas", column_names: list[str] | None = None, dtype: type | dict[str, type] | None = None, index_col: str | int | None = None, @@ -1796,6 +1796,7 @@ def virtualfile_to_dataset( - ``"pandas"`` will return a :class:`pandas.DataFrame` object. - ``"numpy"`` will return a :class:`numpy.ndarray` object. - ``"file"`` means the result was saved to a file and will return ``None``. + - ``"strings"`` will return the trailing text only as an array of strings. column_names The column names for the :class:`pandas.DataFrame` output. dtype @@ -1841,6 +1842,16 @@ def virtualfile_to_dataset( ... assert result is None ... assert Path(outtmp.name).stat().st_size > 0 ... + ... # strings output + ... with Session() as lib: + ... with lib.virtualfile_out(kind="dataset") as vouttbl: + ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... outstr = lib.virtualfile_to_dataset( + ... vfname=vouttbl, output_type="strings" + ... ) + ... assert isinstance(outstr, np.ndarray) + ... assert outstr.dtype.kind in ("S", "U") + ... ... # numpy output ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: @@ -1869,6 +1880,9 @@ def virtualfile_to_dataset( ... column_names=["col1", "col2", "col3", "coltext"], ... ) ... assert isinstance(outpd2, pd.DataFrame) + >>> outstr + array(['TEXT1 TEXT23', 'TEXT4 TEXT567', 'TEXT8 TEXT90', + 'TEXT123 TEXT456789'], dtype='>> outnp array([[1.0, 2.0, 3.0, 'TEXT1 TEXT23'], [4.0, 5.0, 6.0, 'TEXT4 TEXT567'], @@ -1890,11 +1904,14 @@ def virtualfile_to_dataset( if output_type == "file": # Already written to file, so return None return None - # Read the virtual file as a GMT dataset and convert to pandas.DataFrame - result = self.read_virtualfile(vfname, kind="dataset").contents.to_dataframe( - column_names=column_names, - dtype=dtype, - index_col=index_col, + # Read the virtual file as a _GMT_DATASET object + result = self.read_virtualfile(vfname, kind="dataset").contents + + if output_type == "strings": # strings output + return result.to_strings() + + result = result.to_dataframe( + column_names=column_names, dtype=dtype, index_col=index_col ) if output_type == "numpy": # numpy.ndarray output return result.to_numpy() diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index 7a61b7f3d91..7d0b1d469db 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -144,6 +144,17 @@ class _GMT_DATASEGMENT(ctp.Structure): # noqa: N801 ("hidden", ctp.c_void_p), ] + def to_strings(self) -> np.ndarray[Any, np.dtype[np.str_]]: + """ + Convert the trailing text column to an array of strings. + """ + textvector = [] + for table in self.table[: self.n_tables]: + for segment in table.contents.segment[: table.contents.n_segments]: + if segment.contents.text: + textvector.extend(segment.contents.text[: segment.contents.n_rows]) + return np.char.decode(textvector) if textvector else np.array([], dtype=str) + def to_dataframe( self, column_names: pd.Index | None = None, @@ -194,7 +205,11 @@ def to_dataframe( ... with lib.virtualfile_out(kind="dataset") as vouttbl: ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") ... ds = lib.read_virtualfile(vouttbl, kind="dataset") + ... text = ds.contents.to_strings() ... df = ds.contents.to_dataframe() + >>> text + array(['TEXT1 TEXT23', 'TEXT4 TEXT567', 'TEXT8 TEXT90', + 'TEXT123 TEXT456789'], dtype='>> df 0 1 2 3 0 1.0 2.0 3.0 TEXT1 TEXT23 @@ -207,28 +222,19 @@ def to_dataframe( vectors = [] # Deal with numeric columns for icol in range(self.n_columns): - colvector = [] - for itbl in range(self.n_tables): - dtbl = self.table[itbl].contents - for iseg in range(dtbl.n_segments): - dseg = dtbl.segment[iseg].contents - colvector.append( - np.ctypeslib.as_array(dseg.data[icol], shape=(dseg.n_rows,)) - ) + colvector = [ + np.ctypeslib.as_array( + seg.contents.data[icol], shape=(seg.contents.n_rows,) + ) + for tbl in self.table[: self.n_tables] + for seg in tbl.contents.segment[: tbl.contents.n_segments] + ] vectors.append(pd.Series(data=np.concatenate(colvector))) # Deal with trailing text column - textvector = [] - for itbl in range(self.n_tables): - dtbl = self.table[itbl].contents - for iseg in range(dtbl.n_segments): - dseg = dtbl.segment[iseg].contents - if dseg.text: - textvector.extend(dseg.text[: dseg.n_rows]) - if textvector: - vectors.append( - pd.Series(data=np.char.decode(textvector), dtype=pd.StringDtype()) - ) + textvector = self.to_strings() + if len(textvector) != 0: + vectors.append(pd.Series(data=textvector, dtype=pd.StringDtype())) if len(vectors) == 0: # Return an empty DataFrame if no columns are found. From 6069ebc06e753eab2900ae199926c8bfeb80ebcc Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 7 Apr 2024 19:59:00 +0800 Subject: [PATCH 057/218] pygmt.which: Refactor to get rid of temporary files (#3148) --- pygmt/src/which.py | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/pygmt/src/which.py b/pygmt/src/which.py index 1cfd7df0c39..c4afd1d006d 100644 --- a/pygmt/src/which.py +++ b/pygmt/src/which.py @@ -3,19 +3,13 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @use_alias(G="download", V="verbose") @kwargs_to_strings(fname="sequence_space") -def which(fname, **kwargs): +def which(fname, **kwargs) -> str | list[str]: r""" Find the full path to specified files. @@ -56,7 +50,7 @@ def which(fname, **kwargs): Returns ------- - path : str or list + path The path(s) to the file(s), depending on the parameters used. Raises @@ -64,14 +58,19 @@ def which(fname, **kwargs): FileNotFoundError If the file is not found. """ - with GMTTempFile() as tmpfile: - with Session() as lib: + with Session() as lib: + with lib.virtualfile_out(kind="dataset") as vouttbl: lib.call_module( module="which", - args=build_arg_string(kwargs, infile=fname, outfile=tmpfile.name), + args=build_arg_string(kwargs, infile=fname, outfile=vouttbl), ) - path = tmpfile.read().strip() - if not path: - _fname = fname.replace(" ", "', '") - raise FileNotFoundError(f"File(s) '{_fname}' not found.") - return path.split("\n") if "\n" in path else path + paths = lib.virtualfile_to_dataset(vfname=vouttbl, output_type="strings") + + match paths.size: + case 0: + _fname = fname.replace(" ", "', '") + raise FileNotFoundError(f"File(s) '{_fname}' not found.") + case 1: + return paths[0] + case _: + return paths.tolist() From a32049d1e18983685303e9a64b4bfaf4234ab805 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 7 Apr 2024 19:59:29 +0800 Subject: [PATCH 058/218] Wrap GMT_Inquire_VirtualFile to get the family of virtualfiles (#3152) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- pygmt/clib/session.py | 29 +++++++++++++++++++++++++++ pygmt/tests/test_clib_virtualfiles.py | 28 ++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index eedb4572992..b54ad76f887 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1709,6 +1709,35 @@ def virtualfile_out( with self.open_virtualfile(family, geometry, "GMT_OUT", None) as vfile: yield vfile + def inquire_virtualfile(self, vfname: str) -> int: + """ + Get the family of a virtual file. + + Parameters + ---------- + vfname + Name of the virtual file to inquire. + + Returns + ------- + family + The integer value for the family of the virtual file. + + Examples + -------- + >>> from pygmt.clib import Session + >>> with Session() as lib: + ... with lib.virtualfile_out(kind="dataset") as vfile: + ... family = lib.inquire_virtualfile(vfile) + ... assert family == lib["GMT_IS_DATASET"] + """ + c_inquire_virtualfile = self.get_libgmt_func( + "GMT_Inquire_VirtualFile", + argtypes=[ctp.c_void_p, ctp.c_char_p], + restype=ctp.c_uint, + ) + return c_inquire_virtualfile(self.session_pointer, vfname.encode()) + def read_virtualfile( self, vfname: str, kind: Literal["dataset", "grid", None] = None ): diff --git a/pygmt/tests/test_clib_virtualfiles.py b/pygmt/tests/test_clib_virtualfiles.py index 7e7822f99e5..26ebfc5d379 100644 --- a/pygmt/tests/test_clib_virtualfiles.py +++ b/pygmt/tests/test_clib_virtualfiles.py @@ -379,3 +379,31 @@ def test_virtualfile_from_vectors_arraylike(): bounds = "\t".join([f"<{min(i):.0f}/{max(i):.0f}>" for i in (x, y, z)]) expected = f": N = {size}\t{bounds}\n" assert output == expected + + +def test_inquire_virtualfile(): + """ + Test that the inquire_virtualfile method returns the correct family. + + Currently, only output virtual files are tested. + """ + with clib.Session() as lib: + for family in [ + "GMT_IS_DATASET", + "GMT_IS_DATASET|GMT_VIA_MATRIX", + "GMT_IS_DATASET|GMT_VIA_VECTOR", + ]: + with lib.open_virtualfile( + family, "GMT_IS_PLP", "GMT_OUT|GMT_IS_REFERENCE", None + ) as vfile: + assert lib.inquire_virtualfile(vfile) == lib["GMT_IS_DATASET"] + + for family, geometry in [ + ("GMT_IS_GRID", "GMT_IS_SURFACE"), + ("GMT_IS_IMAGE", "GMT_IS_SURFACE"), + ("GMT_IS_CUBE", "GMT_IS_VOLUME"), + ("GMT_IS_PALETTE", "GMT_IS_NONE"), + ("GMT_IS_POSTSCRIPT", "GMT_IS_NONE"), + ]: + with lib.open_virtualfile(family, geometry, "GMT_OUT", None) as vfile: + assert lib.inquire_virtualfile(vfile) == lib[family] From 79cda63a1a17ea0bf607429f0585e504942e3d15 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 8 Apr 2024 11:08:14 +0800 Subject: [PATCH 059/218] Add function build_arg_list for building arguments list from keyword dictionaries (#3149) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/helpers/__init__.py | 1 + pygmt/helpers/decorators.py | 2 +- pygmt/helpers/utils.py | 105 +++++++++++++++++++++++++++++++++++- 3 files changed, 106 insertions(+), 2 deletions(-) diff --git a/pygmt/helpers/__init__.py b/pygmt/helpers/__init__.py index d0356798d97..128b1e31a18 100644 --- a/pygmt/helpers/__init__.py +++ b/pygmt/helpers/__init__.py @@ -16,6 +16,7 @@ ) from pygmt.helpers.utils import ( args_in_kwargs, + build_arg_list, build_arg_string, data_kind, is_nonstr_iter, diff --git a/pygmt/helpers/decorators.py b/pygmt/helpers/decorators.py index 28041911d23..c6f19693781 100644 --- a/pygmt/helpers/decorators.py +++ b/pygmt/helpers/decorators.py @@ -622,7 +622,7 @@ def kwargs_to_strings(**conversions): The strings are what GMT expects from command line arguments. Boolean arguments and None are not converted and will be processed in the - ``build_arg_string`` function. + ``build_arg_list`` function. You can also specify other conversions to specific arguments. diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index 75514d2077d..204519c3cf2 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -11,7 +11,8 @@ import sys import time import webbrowser -from collections.abc import Iterable +from collections.abc import Iterable, Sequence +from typing import Any import xarray as xr from pygmt.exceptions import GMTInvalidInput @@ -315,6 +316,108 @@ def non_ascii_to_octal(argstr): return argstr.translate(str.maketrans(mapping)) +def build_arg_list( + kwdict: dict[str, Any], + confdict: dict[str, str] | None = None, + infile: str | pathlib.PurePath | Sequence[str | pathlib.PurePath] | None = None, + outfile: str | pathlib.PurePath | None = None, +) -> list[str]: + r""" + Convert keyword dictionaries and input/output files into a list of GMT arguments. + + Make sure all values in ``kwdict`` have been previously converted to a string + representation using the ``kwargs_to_strings`` decorator. The only exceptions are + ``True``, ``False`` and ``None``. + + Any remaining lists or tuples will be interpreted as multiple entries for the same + parameter. For example, the kwargs entry ``"B": ["xa", "yaf"]`` will be + converted to ``["-Bxa", "-Byaf"]``. + + Parameters + ---------- + kwdict + A dictionary containing parsed keyword arguments. + confdict + A dictionary containing configurable GMT parameters. + infile + The input file or a list of input files. + outfile + The output file. + + Returns + ------- + args + The list of command line arguments that will be passed to GMT modules. The + keyword arguments are sorted alphabetically, followed by GMT configuration + key-value pairs, with optional input file(s) at the beginning and optional + output file at the end. + + Examples + -------- + >>> build_arg_list(dict(A=True, B=False, C=None, D=0, E=200, F="", G="1/2/3/4")) + ['-A', '-D0', '-E200', '-F', '-G1/2/3/4'] + >>> build_arg_list(dict(A="1/2/3/4", B=["xaf", "yaf", "WSen"], C=("1p", "2p"))) + ['-A1/2/3/4', '-BWSen', '-Bxaf', '-Byaf', '-C1p', '-C2p'] + >>> print( + ... build_arg_list( + ... dict( + ... B=["af", "WSne+tBlank Space"], + ... F='+t"Empty Spaces"', + ... l="'Void Space'", + ... ) + ... ) + ... ) + ['-BWSne+tBlank Space', '-Baf', '-F+t"Empty Spaces"', "-l'Void Space'"] + >>> print( + ... build_arg_list( + ... dict(A="0", B=True, C="rainbow"), + ... confdict=dict(FORMAT_DATE_MAP="o dd"), + ... infile="input.txt", + ... outfile="output.txt", + ... ) + ... ) + ['input.txt', '-A0', '-B', '-Crainbow', '--FORMAT_DATE_MAP=o dd', '->output.txt'] + >>> print( + ... build_arg_list( + ... dict(A="0", B=True), + ... confdict=dict(FORMAT_DATE_MAP="o dd"), + ... infile=["f1.txt", "f2.txt"], + ... outfile="out.txt", + ... ) + ... ) + ['f1.txt', 'f2.txt', '-A0', '-B', '--FORMAT_DATE_MAP=o dd', '->out.txt'] + >>> print(build_arg_list(dict(R="1/2/3/4", J="X4i", watre=True))) + Traceback (most recent call last): + ... + pygmt.exceptions.GMTInvalidInput: Unrecognized parameter 'watre'. + """ + gmt_args = [] + for key, value in kwdict.items(): + if len(key) > 2: # Raise an exception for unrecognized options + raise GMTInvalidInput(f"Unrecognized parameter '{key}'.") + if value is None or value is False: # Exclude arguments that are None or False + pass + elif value is True: + gmt_args.append(f"-{key}") + elif is_nonstr_iter(value): + gmt_args.extend(non_ascii_to_octal(f"-{key}{_value}") for _value in value) + else: + gmt_args.append(non_ascii_to_octal(f"-{key}{value}")) + gmt_args = sorted(gmt_args) + + if confdict: + gmt_args.extend(f"--{key}={value}" for key, value in confdict.items()) + + if infile: # infile can be a single file or a list of files + if isinstance(infile, str | pathlib.PurePath): + gmt_args = [str(infile), *gmt_args] + else: + gmt_args = [str(_file) for _file in infile] + gmt_args + if outfile: + gmt_args.append(f"->{outfile}") + return gmt_args + + def build_arg_string(kwdict, confdict=None, infile=None, outfile=None): r""" Convert keyword dictionaries and input/output files into a GMT argument string. From 426ae2daf160aba06ce5ec90e0be99094390886a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 04:41:23 +0800 Subject: [PATCH 060/218] Bump shogo82148/actions-upload-release-asset from 1.7.4 to 1.7.5 (#3161) Bumps [shogo82148/actions-upload-release-asset](https://github.com/shogo82148/actions-upload-release-asset) from 1.7.4 to 1.7.5. - [Release notes](https://github.com/shogo82148/actions-upload-release-asset/releases) - [Commits](https://github.com/shogo82148/actions-upload-release-asset/compare/v1.7.4...v1.7.5) --- updated-dependencies: - dependency-name: shogo82148/actions-upload-release-asset dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release-baseline-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-baseline-images.yml b/.github/workflows/release-baseline-images.yml index 9f47ad7680b..98b40e4c66b 100644 --- a/.github/workflows/release-baseline-images.yml +++ b/.github/workflows/release-baseline-images.yml @@ -35,7 +35,7 @@ jobs: shasum -a 256 baseline-images.zip - name: Upload baseline image as a release asset - uses: shogo82148/actions-upload-release-asset@v1.7.4 + uses: shogo82148/actions-upload-release-asset@v1.7.5 with: upload_url: ${{ github.event.release.upload_url }} asset_path: baseline-images.zip From 16339753634de75b0b165bc8a1beead37d1c3ad2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 09:57:27 +0800 Subject: [PATCH 061/218] Bump codecov/codecov-action from 4.1.1 to 4.3.0 (#3162) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.1.1 to 4.3.0. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.1.1...v4.3.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 490e576ff67..70f3d5773cc 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -166,7 +166,7 @@ jobs: # Upload coverage to Codecov - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4.1.1 + uses: codecov/codecov-action@v4.3.0 with: file: ./coverage.xml # optional env_vars: OS,PYTHON,NUMPY From b3e0048e8b86c03a91480645531322e928ca6e0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Wed, 10 Apr 2024 15:39:40 +0200 Subject: [PATCH 062/218] Switch to official GitHub action for managing app tokens (#3165) --- .github/workflows/format-command.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index fc5fd4fc0c5..68813e2b13c 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -11,11 +11,11 @@ jobs: runs-on: ubuntu-latest steps: # Generate token from GenericMappingTools bot - - uses: tibdex/github-app-token@v2 + - uses: actions/create-github-app-token@v1.9.3 id: generate-token with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} # Checkout the pull request branch - uses: actions/checkout@v4.1.1 From 70d666e38d6bfac4132fc66d3c7adbeb57dc9588 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 11 Apr 2024 08:01:08 +0800 Subject: [PATCH 063/218] CI: Use gh to create issues in Check Links workflow (#3166) --- .github/workflows/check-links.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 7afc0aac564..cb6881b4c5f 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -68,7 +68,8 @@ jobs: - name: Create Issue From File if: env.lychee_exit_code != 0 - uses: peter-evans/create-issue-from-file@v5 - with: - title: Link Checker Report on ${{ steps.date.outputs.date }} - content-filepath: ./lychee/out.md + run: | + title="Link Checker Report on ${{ steps.date.outputs.date }}" + gh issue create --title "$title" --body-file ./lychee/out.md + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} From 35ed27ac312acc4baab206cb9a573db0424c1c62 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 16 Apr 2024 12:04:37 +0800 Subject: [PATCH 064/218] GMT_DATASET: Add workaround for None values in trailing text (#3174) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- .github/workflows/cache_data.yaml | 8 ++++---- pygmt/datatypes/dataset.py | 12 ++++++++++++ pygmt/tests/test_datatypes_dataset.py | 26 ++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cache_data.yaml b/.github/workflows/cache_data.yaml index c2a3ef28abe..0630262b483 100644 --- a/.github/workflows/cache_data.yaml +++ b/.github/workflows/cache_data.yaml @@ -80,7 +80,7 @@ jobs: with: name: gmt-cache path: | - ~/.gmt/cache - ~/.gmt/server - ~/.gmt/gmt_data_server.txt - ~/.gmt/gmt_hash_server.txt + ~/.gmt/cache + ~/.gmt/server + ~/.gmt/gmt_data_server.txt + ~/.gmt/gmt_hash_server.txt diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index 7d0b1d469db..daf0073aefe 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -3,6 +3,7 @@ """ import ctypes as ctp +import warnings from collections.abc import Mapping from typing import Any, ClassVar @@ -153,6 +154,17 @@ def to_strings(self) -> np.ndarray[Any, np.dtype[np.str_]]: for segment in table.contents.segment[: table.contents.n_segments]: if segment.contents.text: textvector.extend(segment.contents.text[: segment.contents.n_rows]) + if None in textvector: + # Workaround for upstream GMT bug reported in + # https://github.com/GenericMappingTools/pygmt/issues/3170. + msg = ( + "The trailing text column contains `None' values and has been replaced" + "with empty strings to avoid TypeError exceptions. " + "It's likely caused by an upstream GMT API bug. " + "Please consider reporting to us." + ) + warnings.warn(msg, category=RuntimeWarning, stacklevel=1) + textvector = [item if item is not None else b"" for item in textvector] return np.char.decode(textvector) if textvector else np.array([], dtype=str) def to_dataframe( diff --git a/pygmt/tests/test_datatypes_dataset.py b/pygmt/tests/test_datatypes_dataset.py index 7861b6b3119..e78782ada37 100644 --- a/pygmt/tests/test_datatypes_dataset.py +++ b/pygmt/tests/test_datatypes_dataset.py @@ -6,6 +6,7 @@ import pandas as pd import pytest +from pygmt import which from pygmt.clib import Session from pygmt.helpers import GMTTempFile @@ -81,3 +82,28 @@ def test_dataset_empty(): assert df.empty # Empty DataFrame expected_df = dataframe_from_pandas(tmpfile.name) pd.testing.assert_frame_equal(df, expected_df) + + +def test_dataset_to_strings_with_none_values(): + """ + Test that None values in the trailing text doesn't raise an excetion. + + Due to a likely upstream bug, the trailing texts sometimes can be ``None`` when + downloading tiled grids. The temporary workaround is to replace any None values with + an empty string. + + See the bug report at https://github.com/GenericMappingTools/pygmt/issues/3170. + """ + tiles = ["@N30W120.earth_relief_15s_p.nc", "@N00E000.earth_relief_15s_p.nc"] + paths = which(fname=tiles, download="a") + assert len(paths) == 2 + # 'paths' may contain an empty string or not, depending on if the tiles are cached. + if "" not in paths: # Contains two valid paths. + # Delete the cached tiles and try again. + for path in paths: + Path(path).unlink() + with pytest.warns(expected_warning=RuntimeWarning) as record: + paths = which(fname=tiles, download="a") + assert len(record) == 1 + assert len(paths) == 2 + assert "" in paths From 809880c6715b4756dcd5340833f8fdc3ad31e1d4 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 16 Apr 2024 21:56:26 +0800 Subject: [PATCH 065/218] Wrap GMT's standard data type GMT_GRID for grids and refactor wrappers to use virtualfiles for output grids (#2398) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- doc/api/index.rst | 1 + pygmt/clib/session.py | 71 ++++++- pygmt/datatypes/grid.py | 192 +++++++++++++++++- pygmt/helpers/decorators.py | 10 +- pygmt/src/binstats.py | 32 ++- pygmt/src/dimfilter.py | 32 ++- pygmt/src/grdclip.py | 32 ++- pygmt/src/grdfill.py | 31 ++- pygmt/src/grdfilter.py | 32 ++- pygmt/src/grdgradient.py | 37 ++-- pygmt/src/grdhisteq.py | 24 +-- pygmt/src/grdlandmask.py | 22 +- pygmt/src/grdproject.py | 31 ++- pygmt/src/grdsample.py | 32 ++- pygmt/src/nearneighbor.py | 36 ++-- pygmt/src/sph2grd.py | 32 ++- pygmt/src/sphdistance.py | 32 ++- pygmt/src/sphinterpolate.py | 33 ++- pygmt/src/surface.py | 34 ++-- pygmt/src/triangulate.py | 31 ++- pygmt/src/xyz2grd.py | 34 ++-- .../test_datasets_load_remote_datasets.py | 7 +- pygmt/tests/test_sphinterpolate.py | 2 +- 23 files changed, 481 insertions(+), 339 deletions(-) diff --git a/doc/api/index.rst b/doc/api/index.rst index 2b82f2461f5..646fb49886a 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -292,6 +292,7 @@ Python objects to and from GMT virtual files: clib.Session.virtualfile_in clib.Session.virtualfile_out clib.Session.virtualfile_to_dataset + clib.Session.virtualfile_to_raster Low level access (these are mostly used by the :mod:`pygmt.clib` package): diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index b54ad76f887..8a8b52df8e5 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -14,6 +14,7 @@ import numpy as np import pandas as pd +import xarray as xr from packaging.version import Version from pygmt.clib.conversion import ( array_to_datetime, @@ -1739,7 +1740,9 @@ def inquire_virtualfile(self, vfname: str) -> int: return c_inquire_virtualfile(self.session_pointer, vfname.encode()) def read_virtualfile( - self, vfname: str, kind: Literal["dataset", "grid", None] = None + self, + vfname: str, + kind: Literal["dataset", "grid", "image", "cube", None] = None, ): """ Read data from a virtual file and optionally cast into a GMT data container. @@ -1798,6 +1801,8 @@ def read_virtualfile( # _GMT_DATASET). if kind is None: # Return the ctypes void pointer return pointer + if kind in ["image", "cube"]: + raise NotImplementedError(f"kind={kind} is not supported yet.") dtype = {"dataset": _GMT_DATASET, "grid": _GMT_GRID}[kind] return ctp.cast(pointer, ctp.POINTER(dtype)) @@ -1946,6 +1951,70 @@ def virtualfile_to_dataset( return result.to_numpy() return result # pandas.DataFrame output + def virtualfile_to_raster( + self, + vfname: str, + kind: Literal["grid", "image", "cube", None] = "grid", + outgrid: str | None = None, + ) -> xr.DataArray | None: + """ + Output raster data stored in a virtual file to an :class:`xarray.DataArray` + object. + + The raster data can be a grid, an image or a cube. + + Parameters + ---------- + vfname + The virtual file name that stores the result grid/image/cube. + kind + Type of the raster data. Valid values are ``"grid"``, ``"image"``, + ``"cube"`` or ``None``. If ``None``, will inquire the data type from the + virtual file name. + outgrid + Name of the output grid/image/cube. If specified, it means the raster data + was already saved into an actual file and will return ``None``. + + Returns + ------- + result + The result grid/image/cube. If ``outgrid`` is specified, return ``None``. + + Examples + -------- + >>> from pathlib import Path + >>> from pygmt.clib import Session + >>> from pygmt.helpers import GMTTempFile + >>> with Session() as lib: + ... # file output + ... with GMTTempFile(suffix=".nc") as tmpfile: + ... outgrid = tmpfile.name + ... with lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd: + ... lib.call_module("read", f"@earth_relief_01d_g {voutgrd} -Tg") + ... result = lib.virtualfile_to_raster( + ... vfname=voutgrd, outgrid=outgrid + ... ) + ... assert result == None + ... assert Path(outgrid).stat().st_size > 0 + ... + ... # xarray.DataArray output + ... outgrid = None + ... with lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd: + ... lib.call_module("read", f"@earth_relief_01d_g {voutgrd} -Tg") + ... result = lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) + ... assert isinstance(result, xr.DataArray) + """ + if outgrid is not None: + return None + if kind is None: # Inquire the data family from the virtualfile + family = self.inquire_virtualfile(vfname) + kind = { # type: ignore[assignment] + self["GMT_IS_GRID"]: "grid", + self["GMT_IS_IMAGE"]: "image", + self["GMT_IS_CUBE"]: "cube", + }[family] + return self.read_virtualfile(vfname, kind=kind).contents.to_dataarray() + def extract_region(self): """ Extract the WESN bounding box of the currently active figure. diff --git a/pygmt/datatypes/grid.py b/pygmt/datatypes/grid.py index e67f44ebef5..1caa0bd0240 100644 --- a/pygmt/datatypes/grid.py +++ b/pygmt/datatypes/grid.py @@ -3,7 +3,197 @@ """ import ctypes as ctp +from typing import ClassVar + +import numpy as np +import xarray as xr +from pygmt.datatypes.header import _GMT_GRID_HEADER, gmt_grdfloat class _GMT_GRID(ctp.Structure): # noqa: N801 - pass + """ + GMT grid structure for holding a grid and its header. + + This class is only meant for internal use and is not exposed to users. See the GMT + source code gmt_resources.h for the original C structure definitions. + + Examples + -------- + >>> from pygmt.clib import Session + >>> with Session() as lib: + ... with lib.virtualfile_out(kind="grid") as voutgrd: + ... lib.call_module("read", f"@static_earth_relief.nc {voutgrd} -Tg") + ... # Read the grid from the virtual file + ... grid = lib.read_virtualfile(voutgrd, kind="grid").contents + ... # The grid header + ... header = grid.header.contents + ... # Access the header properties + ... print(header.n_rows, header.n_columns, header.registration) + ... print(header.wesn[:], header.z_min, header.z_max, header.inc[:]) + ... print(header.z_scale_factor, header.z_add_offset) + ... print(header.x_units, header.y_units, header.z_units) + ... print(header.title) + ... print(header.command) + ... print(header.remark) + ... print(header.nm, header.size, header.complex_mode) + ... print(header.type, header.n_bands, header.mx, header.my) + ... print(header.pad[:]) + ... print(header.mem_layout, header.nan_value, header.xy_off) + ... # The x and y coordinates + ... print(grid.x[: header.n_columns]) + ... print(grid.y[: header.n_rows]) + ... # The data array (with paddings) + ... data = np.reshape( + ... grid.data[: header.mx * header.my], (header.my, header.mx) + ... ) + ... # The data array (without paddings) + ... pad = header.pad[:] + ... data = data[pad[2] : header.my - pad[3], pad[0] : header.mx - pad[1]] + ... print(data) + 14 8 1 + [-55.0, -47.0, -24.0, -10.0] 190.0 981.0 [1.0, 1.0] + 1.0 0.0 + b'longitude [degrees_east]' b'latitude [degrees_north]' b'elevation (m)' + b'Produced by grdcut' + b'grdcut @earth_relief_01d_p -R-55/-47/-24/-10 -Gstatic_earth_relief.nc' + b'Reduced by Gaussian Cartesian filtering (111.2 km fullwidth) from ...' + 112 216 0 + 18 1 12 18 + [2, 2, 2, 2] + b'' nan 0.5 + [-54.5, -53.5, -52.5, -51.5, -50.5, -49.5, -48.5, -47.5] + [-10.5, -11.5, -12.5, -13.5, -14.5, -15.5, ..., -22.5, -23.5] + [[347.5 331.5 309. 282. 190. 208. 299.5 348. ] + [349. 313. 325.5 247. 191. 225. 260. 452.5] + [345.5 320. 335. 292. 207.5 247. 325. 346.5] + [450.5 395.5 366. 248. 250. 354.5 550. 797.5] + [494.5 488.5 357. 254.5 286. 484.5 653.5 930. ] + [601. 526.5 535. 299. 398.5 645. 797.5 964. ] + [308. 595.5 555.5 556. 580. 770. 927. 920. ] + [521.5 682.5 796. 886. 571.5 638.5 739.5 881.5] + [310. 521.5 757. 570.5 538.5 524. 686.5 794. ] + [561.5 539. 446.5 481.5 439.5 553. 726.5 981. ] + [557. 435. 385.5 345.5 413.5 496. 519.5 833.5] + [373. 367.5 349. 352.5 419.5 428. 570. 667.5] + [383. 284.5 344.5 394. 491. 556.5 578.5 618.5] + [347.5 344.5 386. 640.5 617. 579. 646.5 671. ]] + """ + + _fields_: ClassVar = [ + # Pointer to full GMT header for grid + ("header", ctp.POINTER(_GMT_GRID_HEADER)), + # Pointer to grid data + ("data", ctp.POINTER(gmt_grdfloat)), + # Pointer to x coordinate vector + ("x", ctp.POINTER(ctp.c_double)), + # Pointer to y coordinate vector + ("y", ctp.POINTER(ctp.c_double)), + # Low-level information for GMT use only + ("hidden", ctp.c_void_p), + ] + + def to_dataarray(self) -> xr.DataArray: + """ + Convert a _GMT_GRID object to a :class:`xarray.DataArray` object. + + Returns + ------- + dataarray + A :class:`xr.DataArray` object. + + Examples + -------- + >>> from pygmt.clib import Session + >>> with Session() as lib: + ... with lib.virtualfile_out(kind="grid") as voutgrd: + ... lib.call_module("read", f"@static_earth_relief.nc {voutgrd} -Tg") + ... # Read the grid from the virtual file + ... grid = lib.read_virtualfile(voutgrd, kind="grid") + ... # Convert to xarray.DataArray and use it later + ... da = grid.contents.to_dataarray() + >>> da # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS + ... + array([[347.5, 344.5, 386. , 640.5, 617. , 579. , 646.5, 671. ], + [383. , 284.5, 344.5, 394. , 491. , 556.5, 578.5, 618.5], + [373. , 367.5, 349. , 352.5, 419.5, 428. , 570. , 667.5], + [557. , 435. , 385.5, 345.5, 413.5, 496. , 519.5, 833.5], + [561.5, 539. , 446.5, 481.5, 439.5, 553. , 726.5, 981. ], + [310. , 521.5, 757. , 570.5, 538.5, 524. , 686.5, 794. ], + [521.5, 682.5, 796. , 886. , 571.5, 638.5, 739.5, 881.5], + [308. , 595.5, 555.5, 556. , 580. , 770. , 927. , 920. ], + [601. , 526.5, 535. , 299. , 398.5, 645. , 797.5, 964. ], + [494.5, 488.5, 357. , 254.5, 286. , 484.5, 653.5, 930. ], + [450.5, 395.5, 366. , 248. , 250. , 354.5, 550. , 797.5], + [345.5, 320. , 335. , 292. , 207.5, 247. , 325. , 346.5], + [349. , 313. , 325.5, 247. , 191. , 225. , 260. , 452.5], + [347.5, 331.5, 309. , 282. , 190. , 208. , 299.5, 348. ]]) + Coordinates: + * lat (lat) float64... -23.5 -22.5 -21.5 -20.5 ... -12.5 -11.5 -10.5 + * lon (lon) float64... -54.5 -53.5 -52.5 -51.5 -50.5 -49.5 -48.5 -47.5 + Attributes: + Conventions: CF-1.7 + title: Produced by grdcut + history: grdcut @earth_relief_01d_p -R-55/-47/-24/-10 -Gstatic_ea... + description: Reduced by Gaussian Cartesian filtering (111.2 km fullwi... + long_name: elevation (m) + actual_range: [190. 981.] + >>> da.coords["lon"] # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS + ... + array([-54.5, -53.5, -52.5, -51.5, -50.5, -49.5, -48.5, -47.5]) + Coordinates: + * lon (lon) float64... -54.5 -53.5 -52.5 -51.5 -50.5 -49.5 -48.5 -47.5 + Attributes: + long_name: longitude + units: degrees_east + standard_name: longitude + axis: X + actual_range: [-55. -47.] + >>> da.coords["lat"] # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS + ... + array([-23.5, -22.5, -21.5, -20.5, -19.5, -18.5, -17.5, -16.5, -15.5, -14.5, + -13.5, -12.5, -11.5, -10.5]) + Coordinates: + * lat (lat) float64... -23.5 -22.5 -21.5 -20.5 ... -12.5 -11.5 -10.5 + Attributes: + long_name: latitude + units: degrees_north + standard_name: latitude + axis: Y + actual_range: [-24. -10.] + >>> da.gmt.registration, da.gmt.gtype + (1, 1) + """ + # The grid header + header = self.header.contents + + # Get dimensions and their attributes from the header. + dims, dim_attrs = header.dims, header.dim_attrs + # The coordinates, given as a tuple of the form (dims, data, attrs) + coords = [ + (dims[0], self.y[: header.n_rows], dim_attrs[0]), + (dims[1], self.x[: header.n_columns], dim_attrs[1]), + ] + + # The data array without paddings + pad = header.pad[:] + data = np.reshape(self.data[: header.mx * header.my], (header.my, header.mx))[ + pad[2] : header.my - pad[3], pad[0] : header.mx - pad[1] + ] + + # Create the xarray.DataArray object + grid = xr.DataArray( + data, coords=coords, name=header.name, attrs=header.data_attrs + ) + + # Flip the coordinates and data if necessary so that coordinates are ascending. + # `grid.sortby(list(grid.dims))` sometimes causes crashes. + # The solution comes from https://github.com/pydata/xarray/discussions/6695. + for dim in grid.dims: + if grid[dim][0] > grid[dim][1]: + grid = grid.isel({dim: slice(None, None, -1)}) + + # Set GMT accessors. + # Must put at the end, otherwise info gets lost after certain grid operations. + grid.gmt.registration = header.registration + grid.gmt.gtype = header.gtype + return grid diff --git a/pygmt/helpers/decorators.py b/pygmt/helpers/decorators.py index c6f19693781..47809a2787f 100644 --- a/pygmt/helpers/decorators.py +++ b/pygmt/helpers/decorators.py @@ -267,10 +267,12 @@ - ``file`` will save the result to the file specified by the ``outfile`` parameter.""", "outgrid": """ - outgrid : str or None - Name of the output netCDF grid file. For writing a specific grid - file format or applying basic data operations to the output grid, - see :gmt-docs:`gmt.html#grd-inout-full` for the available modifiers.""", + outgrid + Name of the output netCDF grid file. If not specified, will return an + :class:`xarray.DataArray` object. For writing a specific grid file format or + applying basic data operations to the output grid, see + :gmt-docs:`gmt.html#grd-inout-full` for the available modifiers. + """, "panel": r""" panel : bool, int, or list [*row,col*\|\ *index*]. diff --git a/pygmt/src/binstats.py b/pygmt/src/binstats.py index f34ad8ca911..028e79da1cc 100644 --- a/pygmt/src/binstats.py +++ b/pygmt/src/binstats.py @@ -3,21 +3,13 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @use_alias( C="statistic", E="empty", - G="outgrid", I="spacing", N="normalize", R="region", @@ -31,7 +23,7 @@ r="registration", ) @kwargs_to_strings(I="sequence", R="sequence", i="sequence_comma") -def binstats(data, **kwargs): +def binstats(data, outgrid: str | None = None, **kwargs): r""" Bin spatial data and determine statistics per bin. @@ -110,13 +102,13 @@ def binstats(data, **kwargs): - None if ``outgrid`` is set (grid output will be stored in file set by ``outgrid``) """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="binstats", args=build_arg_string(kwargs, infile=vintbl) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="vector", data=data) as vintbl, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="binstats", args=build_arg_string(kwargs, infile=vintbl) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/dimfilter.py b/pygmt/src/dimfilter.py index fc9355b7f43..f07c56f9171 100644 --- a/pygmt/src/dimfilter.py +++ b/pygmt/src/dimfilter.py @@ -4,14 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["dimfilter"] @@ -20,14 +13,13 @@ @use_alias( D="distance", F="filter", - G="outgrid", I="spacing", N="sectors", R="region", V="verbose", ) @kwargs_to_strings(I="sequence", R="sequence") -def dimfilter(grid, **kwargs): +def dimfilter(grid, outgrid: str | None = None, **kwargs): r""" Filter a grid by dividing the filter circle. @@ -149,13 +141,13 @@ def dimfilter(grid, **kwargs): distance, filters, or sectors.""" ) - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="dimfilter", args=build_arg_string(kwargs, infile=vingrd) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="dimfilter", args=build_arg_string(kwargs, infile=vingrd) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdclip.py b/pygmt/src/grdclip.py index 03a75421bf4..00c58a370e9 100644 --- a/pygmt/src/grdclip.py +++ b/pygmt/src/grdclip.py @@ -3,21 +3,13 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdclip"] @fmt_docstring @use_alias( - G="outgrid", R="region", Sa="above", Sb="below", @@ -32,7 +24,7 @@ Si="sequence", Sr="sequence", ) -def grdclip(grid, **kwargs): +def grdclip(grid, outgrid: str | None = None, **kwargs): r""" Set values in a grid that meet certain criteria to a new value. @@ -95,13 +87,13 @@ def grdclip(grid, **kwargs): >>> [new_grid.data.min(), new_grid.data.max()] [0.0, 10000.0] """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="grdclip", args=build_arg_string(kwargs, infile=vingrd) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="grdclip", args=build_arg_string(kwargs, infile=vingrd) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdfill.py b/pygmt/src/grdfill.py index 4eae3818257..1fe1dbf24ca 100644 --- a/pygmt/src/grdfill.py +++ b/pygmt/src/grdfill.py @@ -4,14 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdfill"] @@ -19,13 +12,12 @@ @fmt_docstring @use_alias( A="mode", - G="outgrid", N="no_data", R="region", V="verbose", ) @kwargs_to_strings(R="sequence") -def grdfill(grid, **kwargs): +def grdfill(grid, outgrid: str | None = None, **kwargs): r""" Fill blank areas from a grid file. @@ -77,13 +69,14 @@ def grdfill(grid, **kwargs): """ if kwargs.get("A") is None and kwargs.get("L") is None: raise GMTInvalidInput("At least parameter 'mode' or 'L' must be specified.") - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="grdfill", args=build_arg_string(kwargs, infile=vingrd) - ) - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="grdfill", args=build_arg_string(kwargs, infile=vingrd) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdfilter.py b/pygmt/src/grdfilter.py index a6e49c65001..f8d9915c231 100644 --- a/pygmt/src/grdfilter.py +++ b/pygmt/src/grdfilter.py @@ -3,21 +3,13 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @use_alias( D="distance", F="filter", - G="outgrid", I="spacing", N="nans", R="region", @@ -28,7 +20,7 @@ x="cores", ) @kwargs_to_strings(I="sequence", R="sequence") -def grdfilter(grid, **kwargs): +def grdfilter(grid, outgrid: str | None = None, **kwargs): r""" Filter a grid in the space (or time) domain. @@ -132,13 +124,13 @@ def grdfilter(grid, **kwargs): >>> grid = pygmt.datasets.load_earth_relief() >>> smooth_field = pygmt.grdfilter(grid=grid, filter="g600", distance="4") """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="grdfilter", args=build_arg_string(kwargs, infile=vingrd) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="grdfilter", args=build_arg_string(kwargs, infile=vingrd) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdgradient.py b/pygmt/src/grdgradient.py index 783ef7de48c..0a656d2e09d 100644 --- a/pygmt/src/grdgradient.py +++ b/pygmt/src/grdgradient.py @@ -5,14 +5,12 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - GMTTempFile, args_in_kwargs, build_arg_string, fmt_docstring, kwargs_to_strings, use_alias, ) -from pygmt.io import load_dataarray __doctest_skip__ = ["grdgradient"] @@ -22,7 +20,6 @@ A="azimuth", D="direction", E="radiance", - G="outgrid", N="normalize", Q="tiles", R="region", @@ -32,7 +29,7 @@ n="interpolation", ) @kwargs_to_strings(A="sequence", E="sequence", R="sequence") -def grdgradient(grid, **kwargs): +def grdgradient(grid, outgrid: str | None = None, **kwargs): r""" Compute the directional derivative of the vector gradient of the data. @@ -160,20 +157,20 @@ def grdgradient(grid, **kwargs): >>> # Create a new grid from an input grid, set the azimuth to 10 degrees, >>> new_grid = pygmt.grdgradient(grid=grid, azimuth=10) """ - with GMTTempFile(suffix=".nc") as tmpfile: - if kwargs.get("Q") is not None and kwargs.get("N") is None: - raise GMTInvalidInput("""Must specify normalize if tiles is specified.""") - if not args_in_kwargs(args=["A", "D", "E"], kwargs=kwargs): - raise GMTInvalidInput( - """At least one of the following parameters must be specified: - azimuth, direction, or radiance""" + if kwargs.get("Q") is not None and kwargs.get("N") is None: + raise GMTInvalidInput("""Must specify normalize if tiles is specified.""") + if not args_in_kwargs(args=["A", "D", "E"], kwargs=kwargs): + raise GMTInvalidInput( + "At least one of the following parameters must be specified: " + "azimuth, direction, or radiance." + ) + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="grdgradient", args=build_arg_string(kwargs, infile=vingrd) ) - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="grdgradient", args=build_arg_string(kwargs, infile=vingrd) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdhisteq.py b/pygmt/src/grdhisteq.py index 44d191a417e..7cf2e9f25a4 100644 --- a/pygmt/src/grdhisteq.py +++ b/pygmt/src/grdhisteq.py @@ -9,14 +9,12 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, use_alias, validate_output_table_type, ) -from pygmt.io import load_dataarray __doctest_skip__ = ["grdhisteq.*"] @@ -56,7 +54,6 @@ class grdhisteq: # noqa: N801 @fmt_docstring @use_alias( C="divisions", - G="outgrid", R="region", N="gaussian", Q="quadratic", @@ -64,7 +61,7 @@ class grdhisteq: # noqa: N801 h="header", ) @kwargs_to_strings(R="sequence") - def equalize_grid(grid, **kwargs): + def equalize_grid(grid, outgrid: str | None = None, **kwargs): r""" Perform histogram equalization for a grid. @@ -123,15 +120,16 @@ def equalize_grid(grid, **kwargs): This method does a weighted histogram equalization for geographic grids to account for node area varying with latitude. """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="grdhisteq", args=build_arg_string(kwargs, infile=vingrd) - ) - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="grdhisteq", args=build_arg_string(kwargs, infile=vingrd) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) @staticmethod @fmt_docstring diff --git a/pygmt/src/grdlandmask.py b/pygmt/src/grdlandmask.py index 29bfef73fd6..75d3327e121 100644 --- a/pygmt/src/grdlandmask.py +++ b/pygmt/src/grdlandmask.py @@ -4,14 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdlandmask"] @@ -21,7 +14,6 @@ A="area_thresh", D="resolution", E="bordervalues", - G="outgrid", I="spacing", N="maskvalues", R="region", @@ -30,7 +22,7 @@ x="cores", ) @kwargs_to_strings(I="sequence", R="sequence", N="sequence", E="sequence") -def grdlandmask(**kwargs): +def grdlandmask(outgrid: str | None = None, **kwargs): r""" Create a grid file with set values for land and water. @@ -105,10 +97,8 @@ def grdlandmask(**kwargs): if kwargs.get("I") is None or kwargs.get("R") is None: raise GMTInvalidInput("Both 'region' and 'spacing' must be specified.") - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile + with Session() as lib: + with lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd: + kwargs["G"] = voutgrd lib.call_module(module="grdlandmask", args=build_arg_string(kwargs)) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdproject.py b/pygmt/src/grdproject.py index 7903a090f67..9046ccbfa6a 100644 --- a/pygmt/src/grdproject.py +++ b/pygmt/src/grdproject.py @@ -4,14 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdproject"] @@ -22,7 +15,6 @@ D="spacing", E="dpi", F="scaling", - G="outgrid", J="projection", I="inverse", M="unit", @@ -32,7 +24,7 @@ r="registration", ) @kwargs_to_strings(C="sequence", D="sequence", R="sequence") -def grdproject(grid, **kwargs): +def grdproject(grid, outgrid: str | None = None, **kwargs): r""" Change projection of gridded data between geographical and rectangular. @@ -111,13 +103,14 @@ def grdproject(grid, **kwargs): """ if kwargs.get("J") is None: raise GMTInvalidInput("The projection must be specified.") - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="grdproject", args=build_arg_string(kwargs, infile=vingrd) - ) - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="grdproject", args=build_arg_string(kwargs, infile=vingrd) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdsample.py b/pygmt/src/grdsample.py index 350da05ff84..8c2c0f692a2 100644 --- a/pygmt/src/grdsample.py +++ b/pygmt/src/grdsample.py @@ -3,21 +3,13 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdsample"] @fmt_docstring @use_alias( - G="outgrid", I="spacing", R="region", T="translate", @@ -28,7 +20,7 @@ x="cores", ) @kwargs_to_strings(I="sequence", R="sequence") -def grdsample(grid, **kwargs): +def grdsample(grid, outgrid: str | None = None, **kwargs): r""" Change the registration, spacing, or nodes in a grid file. @@ -87,13 +79,13 @@ def grdsample(grid, **kwargs): >>> # and set both x- and y-spacing to 0.5 arc-degrees >>> new_grid = pygmt.grdsample(grid=grid, translate=True, spacing=[0.5, 0.5]) """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="grdsample", args=build_arg_string(kwargs, infile=vingrd) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="raster", data=grid) as vingrd, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="grdsample", args=build_arg_string(kwargs, infile=vingrd) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/nearneighbor.py b/pygmt/src/nearneighbor.py index a9c7a22ca14..81e0fd4d50f 100644 --- a/pygmt/src/nearneighbor.py +++ b/pygmt/src/nearneighbor.py @@ -3,14 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["nearneighbor"] @@ -18,7 +11,6 @@ @fmt_docstring @use_alias( E="empty", - G="outgrid", I="spacing", N="sectors", R="region", @@ -36,7 +28,9 @@ w="wrap", ) @kwargs_to_strings(I="sequence", R="sequence", i="sequence_comma") -def nearneighbor(data=None, x=None, y=None, z=None, **kwargs): +def nearneighbor( + data=None, x=None, y=None, z=None, outgrid: str | None = None, **kwargs +): r""" Grid table data using a "Nearest neighbor" algorithm. @@ -143,15 +137,15 @@ def nearneighbor(data=None, x=None, y=None, z=None, **kwargs): ... search_radius="10m", ... ) """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in( + with Session() as lib: + with ( + lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, z=z, required_z=True - ) as vintbl: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="nearneighbor", args=build_arg_string(kwargs, infile=vintbl) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + ) as vintbl, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="nearneighbor", args=build_arg_string(kwargs, infile=vintbl) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/sph2grd.py b/pygmt/src/sph2grd.py index 53e4f8be280..533b578caa0 100644 --- a/pygmt/src/sph2grd.py +++ b/pygmt/src/sph2grd.py @@ -3,21 +3,13 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["sph2grd"] @fmt_docstring @use_alias( - G="outgrid", I="spacing", R="region", V="verbose", @@ -28,7 +20,7 @@ x="cores", ) @kwargs_to_strings(I="sequence", R="sequence", i="sequence_comma") -def sph2grd(data, **kwargs): +def sph2grd(data, outgrid: str | None = None, **kwargs): r""" Create spherical grid files in tension of data. @@ -72,13 +64,13 @@ def sph2grd(data, **kwargs): >>> # set the grid spacing to 1 arc-degree, and the region to global ("g") >>> new_grid = pygmt.sph2grd(data="@EGM96_to_36.txt", spacing=1, region="g") """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="sph2grd", args=build_arg_string(kwargs, infile=vintbl) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="vector", data=data) as vintbl, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="sph2grd", args=build_arg_string(kwargs, infile=vintbl) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/sphdistance.py b/pygmt/src/sphdistance.py index e2dc839b0cc..2c426a54352 100644 --- a/pygmt/src/sphdistance.py +++ b/pygmt/src/sphdistance.py @@ -5,14 +5,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["sphdistance"] @@ -22,7 +15,6 @@ C="single_form", D="duplicate", E="quantity", - G="outgrid", I="spacing", L="unit", N="node_table", @@ -31,7 +23,7 @@ V="verbose", ) @kwargs_to_strings(I="sequence", R="sequence") -def sphdistance(data=None, x=None, y=None, **kwargs): +def sphdistance(data=None, x=None, y=None, outgrid: str | None = None, **kwargs): r""" Create Voronoi distance, node, or natural nearest-neighbor grid on a sphere. @@ -116,13 +108,13 @@ def sphdistance(data=None, x=None, y=None, **kwargs): """ if kwargs.get("I") is None or kwargs.get("R") is None: raise GMTInvalidInput("Both 'region' and 'spacing' must be specified.") - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="vector", data=data, x=x, y=y) as vintbl: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="sphdistance", args=build_arg_string(kwargs, infile=vintbl) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="vector", data=data, x=x, y=y) as vintbl, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="sphdistance", args=build_arg_string(kwargs, infile=vintbl) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/sphinterpolate.py b/pygmt/src/sphinterpolate.py index 82bae2eef3a..a8d57d20abe 100644 --- a/pygmt/src/sphinterpolate.py +++ b/pygmt/src/sphinterpolate.py @@ -3,27 +3,19 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["sphinterpolate"] @fmt_docstring @use_alias( - G="outgrid", I="spacing", R="region", V="verbose", ) @kwargs_to_strings(I="sequence", R="sequence") -def sphinterpolate(data, **kwargs): +def sphinterpolate(data, outgrid: str | None = None, **kwargs): r""" Create spherical grid files in tension of data. @@ -66,14 +58,13 @@ def sphinterpolate(data, **kwargs): >>> # to produce a grid with a 1 arc-degree spacing >>> grid = pygmt.sphinterpolate(data=mars_shape, spacing=1, region="g") """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="sphinterpolate", - args=build_arg_string(kwargs, infile=vintbl), - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + with Session() as lib: + with ( + lib.virtualfile_in(check_kind="vector", data=data) as vintbl, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="sphinterpolate", args=build_arg_string(kwargs, infile=vintbl) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/surface.py b/pygmt/src/surface.py index 019267a5078..d336034632a 100644 --- a/pygmt/src/surface.py +++ b/pygmt/src/surface.py @@ -4,14 +4,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["surface"] @@ -19,7 +12,6 @@ @fmt_docstring @use_alias( C="convergence", - G="outgrid", I="spacing", Ll="lower", Lu="upper", @@ -38,7 +30,7 @@ w="wrap", ) @kwargs_to_strings(I="sequence", R="sequence") -def surface(data=None, x=None, y=None, z=None, **kwargs): +def surface(data=None, x=None, y=None, z=None, outgrid: str | None = None, **kwargs): r""" Grid table data using adjustable tension continuous curvature splines. @@ -158,15 +150,15 @@ def surface(data=None, x=None, y=None, z=None, **kwargs): >>> # Perform gridding of topography data >>> grid = pygmt.surface(data=topography, spacing=1, region=[0, 4, 0, 8]) """ - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in( + with Session() as lib: + with ( + lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, z=z, required_z=True - ) as vintbl: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="surface", args=build_arg_string(kwargs, infile=vintbl) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + ) as vintbl, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="surface", args=build_arg_string(kwargs, infile=vintbl) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/triangulate.py b/pygmt/src/triangulate.py index 7a64178c99a..135928ca424 100644 --- a/pygmt/src/triangulate.py +++ b/pygmt/src/triangulate.py @@ -9,14 +9,12 @@ import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( - GMTTempFile, build_arg_string, fmt_docstring, kwargs_to_strings, use_alias, validate_output_table_type, ) -from pygmt.io import load_dataarray class triangulate: # noqa: N801 @@ -50,7 +48,6 @@ class triangulate: # noqa: N801 @staticmethod @fmt_docstring @use_alias( - G="outgrid", I="spacing", J="projection", R="region", @@ -66,7 +63,9 @@ class triangulate: # noqa: N801 w="wrap", ) @kwargs_to_strings(I="sequence", R="sequence", i="sequence_comma") - def regular_grid(data=None, x=None, y=None, z=None, **kwargs): + def regular_grid( + data=None, x=None, y=None, z=None, outgrid: str | None = None, **kwargs + ): """ Delaunay triangle based gridding of Cartesian data. @@ -136,20 +135,18 @@ def regular_grid(data=None, x=None, y=None, z=None, **kwargs): ``triangulate`` is a Cartesian or small-geographic area operator and is unaware of periodic or polar boundary conditions. """ - # Return an xarray.DataArray if ``outgrid`` is not set - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in( + with Session() as lib: + with ( + lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, z=z, required_z=False - ) as vintbl: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="triangulate", - args=build_arg_string(kwargs, infile=vintbl), - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + ) as vintbl, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="triangulate", args=build_arg_string(kwargs, infile=vintbl) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) @staticmethod @fmt_docstring diff --git a/pygmt/src/xyz2grd.py b/pygmt/src/xyz2grd.py index fcd7a2211b8..c3a5bd434ad 100644 --- a/pygmt/src/xyz2grd.py +++ b/pygmt/src/xyz2grd.py @@ -4,14 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import ( - GMTTempFile, - build_arg_string, - fmt_docstring, - kwargs_to_strings, - use_alias, -) -from pygmt.io import load_dataarray +from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["xyz2grd"] @@ -19,7 +12,6 @@ @fmt_docstring @use_alias( A="duplicate", - G="outgrid", I="spacing", J="projection", R="region", @@ -35,7 +27,7 @@ w="wrap", ) @kwargs_to_strings(I="sequence", R="sequence") -def xyz2grd(data=None, x=None, y=None, z=None, **kwargs): +def xyz2grd(data=None, x=None, y=None, z=None, outgrid: str | None = None, **kwargs): r""" Create a grid file from table data. @@ -150,15 +142,15 @@ def xyz2grd(data=None, x=None, y=None, z=None, **kwargs): if kwargs.get("I") is None or kwargs.get("R") is None: raise GMTInvalidInput("Both 'region' and 'spacing' must be specified.") - with GMTTempFile(suffix=".nc") as tmpfile: - with Session() as lib: - with lib.virtualfile_in( + with Session() as lib: + with ( + lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, z=z, required_z=True - ) as vintbl: - if (outgrid := kwargs.get("G")) is None: - kwargs["G"] = outgrid = tmpfile.name # output to tmpfile - lib.call_module( - module="xyz2grd", args=build_arg_string(kwargs, infile=vintbl) - ) - - return load_dataarray(outgrid) if outgrid == tmpfile.name else None + ) as vintbl, + lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd, + ): + kwargs["G"] = voutgrd + lib.call_module( + module="xyz2grd", args=build_arg_string(kwargs, infile=vintbl) + ) + return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/tests/test_datasets_load_remote_datasets.py b/pygmt/tests/test_datasets_load_remote_datasets.py index 6b6b472281e..0b9c3f55dde 100644 --- a/pygmt/tests/test_datasets_load_remote_datasets.py +++ b/pygmt/tests/test_datasets_load_remote_datasets.py @@ -3,8 +3,6 @@ """ import pytest -from packaging.version import Version -from pygmt.clib import __gmt_version__ from pygmt.datasets.load_remote_dataset import _load_remote_dataset from pygmt.exceptions import GMTInvalidInput @@ -35,8 +33,9 @@ def test_load_remote_dataset_benchmark_with_region(): assert data.gmt.registration == 0 assert data.shape == (11, 21) # The cpt attribute was added since GMT 6.4.0 - if Version(__gmt_version__) >= Version("6.4.0"): - assert data.attrs["cpt"] == "@earth_age.cpt" + # Can't access the cpt attribute using virtual files + # if Version(__gmt_version__) >= Version("6.4.0"): + # assert data.attrs["cpt"] == "@earth_age.cpt" def test_load_remote_dataset_invalid_resolutions(): diff --git a/pygmt/tests/test_sphinterpolate.py b/pygmt/tests/test_sphinterpolate.py index d7e24eba780..5e5485dc2cc 100644 --- a/pygmt/tests/test_sphinterpolate.py +++ b/pygmt/tests/test_sphinterpolate.py @@ -41,4 +41,4 @@ def test_sphinterpolate_no_outgrid(mars): npt.assert_allclose(temp_grid.max(), 14628.144) npt.assert_allclose(temp_grid.min(), -6908.1987) npt.assert_allclose(temp_grid.median(), 118.96849) - npt.assert_allclose(temp_grid.mean(), 272.60593) + npt.assert_allclose(temp_grid.mean(), 272.60578) From 7f77c7749a699c8c5a21b49683314e7b92c91e26 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 17 Apr 2024 07:41:04 +0800 Subject: [PATCH 066/218] CI: Use GH_TOKEN instead of GITHUB_TOKEN for GitHub CLI (#3175) --- .github/workflows/check-links.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index cb6881b4c5f..524bc6f078d 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -72,4 +72,4 @@ jobs: title="Link Checker Report on ${{ steps.date.outputs.date }}" gh issue create --title "$title" --body-file ./lychee/out.md env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} From 9da3ea3b74bcc1a166ef11f6e4c25314ebbf8a5c Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 18 Apr 2024 00:32:02 +0800 Subject: [PATCH 067/218] Reorganize the list of data files for caching (#3171) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/helpers/caching.py | 104 +++++++++++++++++---------------------- 1 file changed, 45 insertions(+), 59 deletions(-) diff --git a/pygmt/helpers/caching.py b/pygmt/helpers/caching.py index 44d9f6f05d1..7bf292a2003 100644 --- a/pygmt/helpers/caching.py +++ b/pygmt/helpers/caching.py @@ -9,89 +9,75 @@ def cache_data(): """ Download GMT remote data files used in PyGMT tests and docs to cache folder. """ - # List of datasets to download + # List of data files to download. datasets = [ - # Earth relief grids + # List of GMT remote datasets. + "@earth_age_01d_g", + "@earth_day_01d_p", + "@earth_faa_01d_g", "@earth_gebco_01d_g", "@earth_gebcosi_01d_g", "@earth_gebcosi_15m_p", - "@earth_relief_01d_p", - "@earth_relief_01d_g", - "@earth_relief_30m_p", - "@earth_relief_30m_g", - "@earth_relief_10m_p", - "@earth_relief_10m_g", - "@earth_relief_05m_p", - "@earth_relief_05m_g", - "@earth_synbath_01d_g", - # List of tiles of 03s srtm data. - # Names like @N35E135.earth_relief_03s_g.nc are for internal use only. - # The naming scheme may change. DO NOT USE IT IN YOUR SCRIPTS. - "@N30W120.earth_relief_15s_p.nc", - "@N35E135.earth_relief_03s_g.nc", - "@N37W120.earth_relief_03s_g.nc", - "@N00W090.earth_relief_03m_p.nc", - "@N00E135.earth_relief_30s_g.nc", - "@N00W010.earth_relief_15s_p.nc", # Specific grid for 15s test - "@N04W010.earth_relief_03s_g.nc", # Specific grid for 03s test - # Earth synbath relief grid - "@S15W105.earth_synbath_30s_p.nc", - # Earth seafloor age grids - "@earth_age_01d_g", - "@N00W030.earth_age_01m_g.nc", # Specific grid for 01m test - # Earth geoid grids "@earth_geoid_01d_g", - "@N00W030.earth_geoid_01m_g.nc", # Specific grid for 01m test - # Earth magnetic anomaly grids "@earth_mag_01d_g", - "@S30W060.earth_mag_02m_p.nc", # Specific grid for 02m test "@earth_mag4km_01d_g", - "@S30W120.earth_mag4km_02m_p.nc", # Specific grid for 02m test - # Earth mask grid "@earth_mask_01d_g", - # Earth free-air anomaly grids - "@earth_faa_01d_g", - "@N00W030.earth_faa_01m_p.nc", # Specific grid for 01m test - # Earth vertical gravity gradient grids + "@earth_relief_01d_g", + "@earth_relief_01d_p", + "@earth_relief_10m_g", + "@earth_relief_10m_p", + "@earth_relief_30m_g", + "@earth_relief_30m_p", + "@earth_relief_05m_g", + "@earth_relief_05m_p", + "@earth_synbath_01d_g", "@earth_vgg_01d_g", - "@N00W030.earth_vgg_01m_p.nc", # Specific grid for 01m test - # Earth WDMAM grids "@earth_wdmam_01d_g", - "@S90E000.earth_wdmam_03m_g.nc", # Specific grid for 03m test - # Earth day/night images - "@earth_day_01d_p", - # Mars relief grids "@mars_relief_01d_g", - "@N00W030.mars_relief_01m_g.nc", # Specific grid for 01m tes - # Mercury relief grids "@mercury_relief_01d_g", - "@N00W030.mercury_relief_01m_p.nc", # Specific grid for 01m test - # Moon relief grids "@moon_relief_01d_g", - "@N00W030.moon_relief_01m_p.nc", # Specific grid for 01m test - # Pluto relief grids "@pluto_relief_01d_g", - "@N00W030.pluto_relief_01m_p.nc", # Specific grid for 01m test - # Venus relief grids "@venus_relief_01d_g", - "@N00W030.venus_relief_01m_g.nc", # Specific grid for 01m test - # Other cache files - "@capitals.gmt", - "@circuit.png", - "@earth_relief_20m_holes.grd", + # List of tiled remote datasets. + # Names like @N35E135.earth_relief_03s_g.nc are for internal use only. + # The naming scheme may change. DO NOT USE IT IN YOUR SCRIPTS. + "@N00W030.earth_age_01m_g.nc", + "@N00W030.earth_faa_01m_p.nc", + "@N00W030.earth_geoid_01m_g.nc", + "@S30W060.earth_mag_02m_p.nc", + "@S30W120.earth_mag4km_02m_p.nc", + "@N00W090.earth_relief_03m_p.nc", + "@N00E135.earth_relief_30s_g.nc", + "@N00W010.earth_relief_15s_p.nc", + "@N30W120.earth_relief_15s_p.nc", + "@N04W010.earth_relief_03s_g.nc", + "@N35E135.earth_relief_03s_g.nc", + "@N37W120.earth_relief_03s_g.nc", + "@S15W105.earth_synbath_30s_p.nc", + "@N00W030.earth_vgg_01m_p.nc", + "@S90E000.earth_wdmam_03m_g.nc", + "@N00W030.mars_relief_01m_g.nc", + "@N00W030.mercury_relief_01m_p.nc", + "@N00W030.moon_relief_01m_p.nc", + "@N00W030.pluto_relief_01m_p.nc", + "@N00W030.venus_relief_01m_g.nc", + # List of cache files. "@EGM96_to_36.txt", "@MaunaLoa_CO2.txt", - "@RidgeTest.shp", - "@RidgeTest.shx", "@RidgeTest.dbf", "@RidgeTest.prj", + "@RidgeTest.shp", + "@RidgeTest.shx", "@Table_5_11.txt", "@Table_5_11_mean.xyz", + "@capitals.gmt", + "@circuit.png", + "@earth_relief_20m_holes.grd", "@fractures_06.txt", "@hotspots.txt", - "@ridge.txt", "@mars370d.txt", - "@srtm_tiles.nc", # needed for 03s and 01s relief data + "@ridge.txt", + "@srtm_tiles.nc", # Needed for earth relief 03s and 01s data. "@static_earth_relief.nc", "@ternary.txt", "@test.dat.nc", From b15a38a863a43313b4f9923bf9c6df2246a38bf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Thu, 18 Apr 2024 09:36:48 +0200 Subject: [PATCH 068/218] tests/test_datatypes_dataset: Fix typo in comment (#3177) --- pygmt/tests/test_datatypes_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygmt/tests/test_datatypes_dataset.py b/pygmt/tests/test_datatypes_dataset.py index e78782ada37..6481591b22a 100644 --- a/pygmt/tests/test_datatypes_dataset.py +++ b/pygmt/tests/test_datatypes_dataset.py @@ -86,7 +86,7 @@ def test_dataset_empty(): def test_dataset_to_strings_with_none_values(): """ - Test that None values in the trailing text doesn't raise an excetion. + Test that None values in the trailing text doesn't raise an exception. Due to a likely upstream bug, the trailing texts sometimes can be ``None`` when downloading tiled grids. The temporary workaround is to replace any None values with From fd286fbc86df7e5d1f042642ea4cecee35e0b110 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 18 Apr 2024 18:58:27 +0800 Subject: [PATCH 069/218] Refactor all wrappers to pass an argument list to Session.call_module (#3132) --- pygmt/figure.py | 15 +++++---------- pygmt/helpers/utils.py | 6 +++++- pygmt/session_management.py | 6 +++--- pygmt/src/basemap.py | 4 ++-- pygmt/src/binstats.py | 4 ++-- pygmt/src/blockm.py | 4 ++-- pygmt/src/coast.py | 4 ++-- pygmt/src/colorbar.py | 4 ++-- pygmt/src/config.py | 13 +++++++------ pygmt/src/contour.py | 4 ++-- pygmt/src/dimfilter.py | 4 ++-- pygmt/src/filter1d.py | 4 ++-- pygmt/src/grd2cpt.py | 6 +++--- pygmt/src/grd2xyz.py | 4 ++-- pygmt/src/grdclip.py | 4 ++-- pygmt/src/grdcontour.py | 4 ++-- pygmt/src/grdcut.py | 4 ++-- pygmt/src/grdfill.py | 4 ++-- pygmt/src/grdfilter.py | 4 ++-- pygmt/src/grdgradient.py | 4 ++-- pygmt/src/grdhisteq.py | 6 +++--- pygmt/src/grdimage.py | 4 ++-- pygmt/src/grdinfo.py | 4 ++-- pygmt/src/grdlandmask.py | 4 ++-- pygmt/src/grdproject.py | 4 ++-- pygmt/src/grdsample.py | 4 ++-- pygmt/src/grdtrack.py | 4 ++-- pygmt/src/grdview.py | 4 ++-- pygmt/src/grdvolume.py | 4 ++-- pygmt/src/histogram.py | 4 ++-- pygmt/src/image.py | 4 ++-- pygmt/src/info.py | 4 ++-- pygmt/src/inset.py | 9 +++++---- pygmt/src/legend.py | 8 +++++--- pygmt/src/logo.py | 4 ++-- pygmt/src/makecpt.py | 6 +++--- pygmt/src/meca.py | 4 ++-- pygmt/src/nearneighbor.py | 4 ++-- pygmt/src/plot.py | 4 ++-- pygmt/src/plot3d.py | 6 ++---- pygmt/src/project.py | 4 ++-- pygmt/src/rose.py | 4 ++-- pygmt/src/select.py | 4 ++-- pygmt/src/shift_origin.py | 2 +- pygmt/src/solar.py | 4 ++-- pygmt/src/sph2grd.py | 4 ++-- pygmt/src/sphdistance.py | 4 ++-- pygmt/src/sphinterpolate.py | 4 ++-- pygmt/src/subplot.py | 18 +++++++++++------- pygmt/src/surface.py | 4 ++-- pygmt/src/ternary.py | 4 ++-- pygmt/src/text.py | 11 +++-------- pygmt/src/tilemap.py | 4 ++-- pygmt/src/timestamp.py | 4 ++-- pygmt/src/triangulate.py | 6 +++--- pygmt/src/velo.py | 4 ++-- pygmt/src/which.py | 7 +++---- pygmt/src/wiggle.py | 6 ++---- pygmt/src/x2sys_cross.py | 6 ++---- pygmt/src/x2sys_init.py | 4 ++-- pygmt/src/xyz2grd.py | 4 ++-- pygmt/tests/test_geopandas.py | 2 +- 62 files changed, 155 insertions(+), 160 deletions(-) diff --git a/pygmt/figure.py b/pygmt/figure.py index ebeccc90287..5190c4acf77 100644 --- a/pygmt/figure.py +++ b/pygmt/figure.py @@ -18,7 +18,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTError, GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, launch_external_viewer, @@ -108,7 +108,7 @@ def _activate_figure(self): # Passing format '-' tells pygmt.end to not produce any files. fmt = "-" with Session() as lib: - lib.call_module(module="figure", args=f"{self._name} {fmt}") + lib.call_module(module="figure", args=[self._name, fmt]) def _preprocess(self, **kwargs): """ @@ -234,15 +234,12 @@ def psconvert(self, **kwargs): # Default cropping the figure to True if kwargs.get("A") is None: kwargs["A"] = "" - # Manually handle prefix -F argument so spaces aren't converted to \040 - # by build_arg_string function. For more information, see - # https://github.com/GenericMappingTools/pygmt/pull/1487 - prefix = kwargs.pop("F", None) + + prefix = kwargs.get("F") if prefix in ["", None, False, True]: raise GMTInvalidInput( "The 'prefix' parameter must be specified with a valid value." ) - prefix_arg = f'-F"{prefix}"' # check if the parent directory exists prefix_path = Path(prefix).parent @@ -252,9 +249,7 @@ def psconvert(self, **kwargs): ) with Session() as lib: - lib.call_module( - module="psconvert", args=f"{prefix_arg} {build_arg_string(kwargs)}" - ) + lib.call_module(module="psconvert", args=build_arg_list(kwargs)) def savefig( # noqa: PLR0912 self, diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index 204519c3cf2..0fcd61d6673 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -174,7 +174,11 @@ def data_kind(data=None, x=None, y=None, z=None, required_z=False, required_data 'image' """ # determine the data kind - if isinstance(data, str | pathlib.PurePath): + if isinstance(data, str | pathlib.PurePath) or ( + isinstance(data, list | tuple) + and all(isinstance(_file, str | pathlib.PurePath) for _file in data) + ): + # One or more files kind = "file" elif isinstance(data, bool | int | float) or (data is None and not required_data): kind = "arg" diff --git a/pygmt/session_management.py b/pygmt/session_management.py index 9b598c72d73..87055bb44e8 100644 --- a/pygmt/session_management.py +++ b/pygmt/session_management.py @@ -23,9 +23,9 @@ def begin(): prefix = "pygmt-session" with Session() as lib: - lib.call_module(module="begin", args=prefix) + lib.call_module(module="begin", args=[prefix]) # pygmt relies on GMT modern mode with GMT_COMPATIBILITY at version 6 - lib.call_module(module="set", args="GMT_COMPATIBILITY 6") + lib.call_module(module="set", args=["GMT_COMPATIBILITY=6"]) def end(): @@ -38,4 +38,4 @@ def end(): ``pygmt.begin``), and bring the figures to the working directory. """ with Session() as lib: - lib.call_module(module="end", args="") + lib.call_module(module="end", args=[]) diff --git a/pygmt/src/basemap.py b/pygmt/src/basemap.py index 25ea7cb408e..6355e2e02f7 100644 --- a/pygmt/src/basemap.py +++ b/pygmt/src/basemap.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -84,4 +84,4 @@ def basemap(self, **kwargs): """ kwargs = self._preprocess(**kwargs) with Session() as lib: - lib.call_module(module="basemap", args=build_arg_string(kwargs)) + lib.call_module(module="basemap", args=build_arg_list(kwargs)) diff --git a/pygmt/src/binstats.py b/pygmt/src/binstats.py index 028e79da1cc..d60a337d8b2 100644 --- a/pygmt/src/binstats.py +++ b/pygmt/src/binstats.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -109,6 +109,6 @@ def binstats(data, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="binstats", args=build_arg_string(kwargs, infile=vintbl) + module="binstats", args=build_arg_list(kwargs, infile=vintbl) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/blockm.py b/pygmt/src/blockm.py index 1798da62e9e..a8b35d6c942 100644 --- a/pygmt/src/blockm.py +++ b/pygmt/src/blockm.py @@ -8,7 +8,7 @@ import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -61,7 +61,7 @@ def _blockm( ): lib.call_module( module=block_method, - args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + args=build_arg_list(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset( vfname=vouttbl, output_type=output_type, column_names=column_names diff --git a/pygmt/src/coast.py b/pygmt/src/coast.py index 897f9ed5c03..aef178ea74c 100644 --- a/pygmt/src/coast.py +++ b/pygmt/src/coast.py @@ -6,7 +6,7 @@ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( args_in_kwargs, - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -227,4 +227,4 @@ def coast(self, **kwargs): lakes, land, water, rivers, borders, dcw, Q, or shorelines""" ) with Session() as lib: - lib.call_module(module="coast", args=build_arg_string(kwargs)) + lib.call_module(module="coast", args=build_arg_list(kwargs)) diff --git a/pygmt/src/colorbar.py b/pygmt/src/colorbar.py index d3c96486216..c5e6f3fb5a9 100644 --- a/pygmt/src/colorbar.py +++ b/pygmt/src/colorbar.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["colorbar"] @@ -146,4 +146,4 @@ def colorbar(self, **kwargs): """ kwargs = self._preprocess(**kwargs) with Session() as lib: - lib.call_module(module="colorbar", args=build_arg_string(kwargs)) + lib.call_module(module="colorbar", args=build_arg_list(kwargs)) diff --git a/pygmt/src/config.py b/pygmt/src/config.py index 9d647ff83d1..f38402e80dc 100644 --- a/pygmt/src/config.py +++ b/pygmt/src/config.py @@ -199,9 +199,10 @@ def __init__(self, **kwargs): self.old_defaults[key] = lib.get_default(key) # call gmt set to change GMT defaults - arg_str = " ".join([f'{key}="{value}"' for key, value in kwargs.items()]) with Session() as lib: - lib.call_module(module="set", args=arg_str) + lib.call_module( + module="set", args=[f"{key}={value}" for key, value in kwargs.items()] + ) def __enter__(self): """ @@ -213,8 +214,8 @@ def __exit__(self, exc_type, exc_value, traceback): """ Revert GMT configurations to initial values. """ - arg_str = " ".join( - [f'{key}="{value}"' for key, value in self.old_defaults.items()] - ) with Session() as lib: - lib.call_module(module="set", args=arg_str) + lib.call_module( + module="set", + args=[f"{key}={value}" for key, value in self.old_defaults.items()], + ) diff --git a/pygmt/src/contour.py b/pygmt/src/contour.py index 9a481a167fa..cf5b2007c66 100644 --- a/pygmt/src/contour.py +++ b/pygmt/src/contour.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -119,5 +119,5 @@ def contour(self, data=None, x=None, y=None, z=None, **kwargs): check_kind="vector", data=data, x=x, y=y, z=z, required_z=True ) as vintbl: lib.call_module( - module="contour", args=build_arg_string(kwargs, infile=vintbl) + module="contour", args=build_arg_list(kwargs, infile=vintbl) ) diff --git a/pygmt/src/dimfilter.py b/pygmt/src/dimfilter.py index f07c56f9171..0498fb02e7c 100644 --- a/pygmt/src/dimfilter.py +++ b/pygmt/src/dimfilter.py @@ -4,7 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["dimfilter"] @@ -148,6 +148,6 @@ def dimfilter(grid, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="dimfilter", args=build_arg_string(kwargs, infile=vingrd) + module="dimfilter", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/filter1d.py b/pygmt/src/filter1d.py index 32e046e2e59..0469385dcf9 100644 --- a/pygmt/src/filter1d.py +++ b/pygmt/src/filter1d.py @@ -9,7 +9,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, use_alias, validate_output_table_type, @@ -121,6 +121,6 @@ def filter1d( ): lib.call_module( module="filter1d", - args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + args=build_arg_list(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset(vfname=vouttbl, output_type=output_type) diff --git a/pygmt/src/grd2cpt.py b/pygmt/src/grd2cpt.py index 9ba6c5aa5bf..de4a9ab5248 100644 --- a/pygmt/src/grd2cpt.py +++ b/pygmt/src/grd2cpt.py @@ -4,7 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grd2cpt"] @@ -186,10 +186,10 @@ def grd2cpt(grid, **kwargs): with Session() as lib: with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: if kwargs.get("H") is None: # if no output is set - arg_str = build_arg_string(kwargs, infile=vingrd) + arg_str = build_arg_list(kwargs, infile=vingrd) else: # if output is set outfile, kwargs["H"] = kwargs["H"], True if not outfile or not isinstance(outfile, str): raise GMTInvalidInput("'output' should be a proper file name.") - arg_str = build_arg_string(kwargs, infile=vingrd, outfile=outfile) + arg_str = build_arg_list(kwargs, infile=vingrd, outfile=outfile) lib.call_module(module="grd2cpt", args=arg_str) diff --git a/pygmt/src/grd2xyz.py b/pygmt/src/grd2xyz.py index 4c6d3eb224f..04e4d21d848 100644 --- a/pygmt/src/grd2xyz.py +++ b/pygmt/src/grd2xyz.py @@ -10,7 +10,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -165,7 +165,7 @@ def grd2xyz( ): lib.call_module( module="grd2xyz", - args=build_arg_string(kwargs, infile=vingrd, outfile=vouttbl), + args=build_arg_list(kwargs, infile=vingrd, outfile=vouttbl), ) return lib.virtualfile_to_dataset( vfname=vouttbl, output_type=output_type, column_names=column_names diff --git a/pygmt/src/grdclip.py b/pygmt/src/grdclip.py index 00c58a370e9..27614ebd66d 100644 --- a/pygmt/src/grdclip.py +++ b/pygmt/src/grdclip.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdclip"] @@ -94,6 +94,6 @@ def grdclip(grid, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="grdclip", args=build_arg_string(kwargs, infile=vingrd) + module="grdclip", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdcontour.py b/pygmt/src/grdcontour.py index 89268d8fe51..53e6b32ced8 100644 --- a/pygmt/src/grdcontour.py +++ b/pygmt/src/grdcontour.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdcontour"] @@ -125,5 +125,5 @@ def grdcontour(self, grid, **kwargs): with Session() as lib: with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: lib.call_module( - module="grdcontour", args=build_arg_string(kwargs, infile=vingrd) + module="grdcontour", args=build_arg_list(kwargs, infile=vingrd) ) diff --git a/pygmt/src/grdcut.py b/pygmt/src/grdcut.py index 9ffcda213d6..6aed8dad680 100644 --- a/pygmt/src/grdcut.py +++ b/pygmt/src/grdcut.py @@ -5,7 +5,7 @@ from pygmt.clib import Session from pygmt.helpers import ( GMTTempFile, - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -105,7 +105,7 @@ def grdcut(grid, **kwargs): if (outgrid := kwargs.get("G")) is None: kwargs["G"] = outgrid = tmpfile.name # output to tmpfile lib.call_module( - module="grdcut", args=build_arg_string(kwargs, infile=vingrd) + module="grdcut", args=build_arg_list(kwargs, infile=vingrd) ) return load_dataarray(outgrid) if outgrid == tmpfile.name else None diff --git a/pygmt/src/grdfill.py b/pygmt/src/grdfill.py index 1fe1dbf24ca..b4a3b9739e9 100644 --- a/pygmt/src/grdfill.py +++ b/pygmt/src/grdfill.py @@ -4,7 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdfill"] @@ -77,6 +77,6 @@ def grdfill(grid, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="grdfill", args=build_arg_string(kwargs, infile=vingrd) + module="grdfill", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdfilter.py b/pygmt/src/grdfilter.py index f8d9915c231..6944a4d0be3 100644 --- a/pygmt/src/grdfilter.py +++ b/pygmt/src/grdfilter.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -131,6 +131,6 @@ def grdfilter(grid, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="grdfilter", args=build_arg_string(kwargs, infile=vingrd) + module="grdfilter", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdgradient.py b/pygmt/src/grdgradient.py index 0a656d2e09d..cb947b9b78e 100644 --- a/pygmt/src/grdgradient.py +++ b/pygmt/src/grdgradient.py @@ -6,7 +6,7 @@ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( args_in_kwargs, - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -171,6 +171,6 @@ def grdgradient(grid, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="grdgradient", args=build_arg_string(kwargs, infile=vingrd) + module="grdgradient", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdhisteq.py b/pygmt/src/grdhisteq.py index 7cf2e9f25a4..0365f5238dc 100644 --- a/pygmt/src/grdhisteq.py +++ b/pygmt/src/grdhisteq.py @@ -9,7 +9,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -127,7 +127,7 @@ def equalize_grid(grid, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="grdhisteq", args=build_arg_string(kwargs, infile=vingrd) + module="grdhisteq", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) @@ -233,7 +233,7 @@ def compute_bins( ): kwargs["D"] = vouttbl # -D for output file name lib.call_module( - module="grdhisteq", args=build_arg_string(kwargs, infile=vingrd) + module="grdhisteq", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_dataset( diff --git a/pygmt/src/grdimage.py b/pygmt/src/grdimage.py index 281bc5a9016..a1ba783ab6c 100644 --- a/pygmt/src/grdimage.py +++ b/pygmt/src/grdimage.py @@ -5,7 +5,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -172,5 +172,5 @@ def grdimage(self, grid, **kwargs): ): kwargs["I"] = vshadegrid lib.call_module( - module="grdimage", args=build_arg_string(kwargs, infile=vingrd) + module="grdimage", args=build_arg_list(kwargs, infile=vingrd) ) diff --git a/pygmt/src/grdinfo.py b/pygmt/src/grdinfo.py index 02de4924775..7ebb8bfdd03 100644 --- a/pygmt/src/grdinfo.py +++ b/pygmt/src/grdinfo.py @@ -5,7 +5,7 @@ from pygmt.clib import Session from pygmt.helpers import ( GMTTempFile, - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -115,7 +115,7 @@ def grdinfo(grid, **kwargs): with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: lib.call_module( module="grdinfo", - args=build_arg_string(kwargs, infile=vingrd, outfile=outfile.name), + args=build_arg_list(kwargs, infile=vingrd, outfile=outfile.name), ) result = outfile.read() return result diff --git a/pygmt/src/grdlandmask.py b/pygmt/src/grdlandmask.py index 75d3327e121..6d4a57242f3 100644 --- a/pygmt/src/grdlandmask.py +++ b/pygmt/src/grdlandmask.py @@ -4,7 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdlandmask"] @@ -100,5 +100,5 @@ def grdlandmask(outgrid: str | None = None, **kwargs): with Session() as lib: with lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd: kwargs["G"] = voutgrd - lib.call_module(module="grdlandmask", args=build_arg_string(kwargs)) + lib.call_module(module="grdlandmask", args=build_arg_list(kwargs)) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdproject.py b/pygmt/src/grdproject.py index 9046ccbfa6a..1c80425da18 100644 --- a/pygmt/src/grdproject.py +++ b/pygmt/src/grdproject.py @@ -4,7 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdproject"] @@ -111,6 +111,6 @@ def grdproject(grid, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="grdproject", args=build_arg_string(kwargs, infile=vingrd) + module="grdproject", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdsample.py b/pygmt/src/grdsample.py index 8c2c0f692a2..27675ff3744 100644 --- a/pygmt/src/grdsample.py +++ b/pygmt/src/grdsample.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdsample"] @@ -86,6 +86,6 @@ def grdsample(grid, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="grdsample", args=build_arg_string(kwargs, infile=vingrd) + module="grdsample", args=build_arg_list(kwargs, infile=vingrd) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/grdtrack.py b/pygmt/src/grdtrack.py index 930cf00656f..c8693308edf 100644 --- a/pygmt/src/grdtrack.py +++ b/pygmt/src/grdtrack.py @@ -9,7 +9,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -315,7 +315,7 @@ def grdtrack( kwargs["G"] = vingrd lib.call_module( module="grdtrack", - args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + args=build_arg_list(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset( vfname=vouttbl, diff --git a/pygmt/src/grdview.py b/pygmt/src/grdview.py index 44a654b5690..5e6256a0b1f 100644 --- a/pygmt/src/grdview.py +++ b/pygmt/src/grdview.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["grdview"] @@ -153,5 +153,5 @@ def grdview(self, grid, **kwargs): ): kwargs["G"] = vdrapegrid lib.call_module( - module="grdview", args=build_arg_string(kwargs, infile=vingrd) + module="grdview", args=build_arg_list(kwargs, infile=vingrd) ) diff --git a/pygmt/src/grdvolume.py b/pygmt/src/grdvolume.py index c651163076c..3439bef69b5 100644 --- a/pygmt/src/grdvolume.py +++ b/pygmt/src/grdvolume.py @@ -8,7 +8,7 @@ import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -109,6 +109,6 @@ def grdvolume( ): lib.call_module( module="grdvolume", - args=build_arg_string(kwargs, infile=vingrd, outfile=vouttbl), + args=build_arg_list(kwargs, infile=vingrd, outfile=vouttbl), ) return lib.virtualfile_to_dataset(vfname=vouttbl, output_type=output_type) diff --git a/pygmt/src/histogram.py b/pygmt/src/histogram.py index 93b4fff2345..54cc94283c9 100644 --- a/pygmt/src/histogram.py +++ b/pygmt/src/histogram.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -137,5 +137,5 @@ def histogram(self, data, **kwargs): with Session() as lib: with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: lib.call_module( - module="histogram", args=build_arg_string(kwargs, infile=vintbl) + module="histogram", args=build_arg_list(kwargs, infile=vintbl) ) diff --git a/pygmt/src/image.py b/pygmt/src/image.py index 80bc7552b0c..601c09d7eff 100644 --- a/pygmt/src/image.py +++ b/pygmt/src/image.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -69,4 +69,4 @@ def image(self, imagefile, **kwargs): """ kwargs = self._preprocess(**kwargs) with Session() as lib: - lib.call_module(module="image", args=build_arg_string(kwargs, infile=imagefile)) + lib.call_module(module="image", args=build_arg_list(kwargs, infile=imagefile)) diff --git a/pygmt/src/info.py b/pygmt/src/info.py index e0497d3b162..f9fa73c62f7 100644 --- a/pygmt/src/info.py +++ b/pygmt/src/info.py @@ -6,7 +6,7 @@ from pygmt.clib import Session from pygmt.helpers import ( GMTTempFile, - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -85,7 +85,7 @@ def info(data, **kwargs): with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: lib.call_module( module="info", - args=build_arg_string(kwargs, infile=vintbl, outfile=tmpfile.name), + args=build_arg_list(kwargs, infile=vintbl, outfile=tmpfile.name), ) result = tmpfile.read() diff --git a/pygmt/src/inset.py b/pygmt/src/inset.py index 97b9825c1d1..e05b038c5ac 100644 --- a/pygmt/src/inset.py +++ b/pygmt/src/inset.py @@ -5,7 +5,7 @@ import contextlib from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["inset"] @@ -137,8 +137,9 @@ def inset(self, **kwargs): kwargs = self._preprocess(**kwargs) with Session() as lib: try: - lib.call_module(module="inset", args=f"begin {build_arg_string(kwargs)}") + lib.call_module(module="inset", args=["begin", *build_arg_list(kwargs)]) yield finally: - v_arg = build_arg_string({"V": kwargs.get("V")}) - lib.call_module(module="inset", args=f"end {v_arg}".strip()) + lib.call_module( + module="inset", args=["end", *build_arg_list({"V": kwargs.get("V")})] + ) diff --git a/pygmt/src/legend.py b/pygmt/src/legend.py index 8f58112d6d7..e5a7ebefab0 100644 --- a/pygmt/src/legend.py +++ b/pygmt/src/legend.py @@ -5,9 +5,10 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, data_kind, fmt_docstring, + is_nonstr_iter, kwargs_to_strings, use_alias, ) @@ -77,8 +78,9 @@ def legend(self, spec=None, position="JTR+jTR+o0.2c", box="+gwhite+p1p", **kwarg with Session() as lib: if spec is None: specfile = "" - elif data_kind(spec) == "file": + elif data_kind(spec) == "file" and not is_nonstr_iter(spec): + # Is a file but not a list of files specfile = spec else: raise GMTInvalidInput(f"Unrecognized data type: {type(spec)}") - lib.call_module(module="legend", args=build_arg_string(kwargs, infile=specfile)) + lib.call_module(module="legend", args=build_arg_list(kwargs, infile=specfile)) diff --git a/pygmt/src/logo.py b/pygmt/src/logo.py index 8c73a8f69ce..bab9c5dcd36 100644 --- a/pygmt/src/logo.py +++ b/pygmt/src/logo.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -56,4 +56,4 @@ def logo(self, **kwargs): """ kwargs = self._preprocess(**kwargs) with Session() as lib: - lib.call_module(module="logo", args=build_arg_string(kwargs)) + lib.call_module(module="logo", args=build_arg_list(kwargs)) diff --git a/pygmt/src/makecpt.py b/pygmt/src/makecpt.py index 9e8a07e6f95..695ea4c5afa 100644 --- a/pygmt/src/makecpt.py +++ b/pygmt/src/makecpt.py @@ -4,7 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -157,10 +157,10 @@ def makecpt(**kwargs): if kwargs.get("W") is not None and kwargs.get("Ww") is not None: raise GMTInvalidInput("Set only categorical or cyclic to True, not both.") if kwargs.get("H") is None: # if no output is set - arg_str = build_arg_string(kwargs) + arg_str = build_arg_list(kwargs) else: # if output is set outfile, kwargs["H"] = kwargs.pop("H"), True if not outfile or not isinstance(outfile, str): raise GMTInvalidInput("'output' should be a proper file name.") - arg_str = build_arg_string(kwargs, outfile=outfile) + arg_str = build_arg_list(kwargs, outfile=outfile) lib.call_module(module="makecpt", args=arg_str) diff --git a/pygmt/src/meca.py b/pygmt/src/meca.py index 9161e0dcd0d..e44f3cb00a4 100644 --- a/pygmt/src/meca.py +++ b/pygmt/src/meca.py @@ -6,7 +6,7 @@ import pandas as pd from pygmt.clib import Session from pygmt.exceptions import GMTError, GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias def convention_code(convention, component="full"): @@ -490,4 +490,4 @@ def meca( # noqa: PLR0912, PLR0913, PLR0915 kwargs["S"] = f"{data_format}{scale}" with Session() as lib: with lib.virtualfile_in(check_kind="vector", data=spec) as vintbl: - lib.call_module(module="meca", args=build_arg_string(kwargs, infile=vintbl)) + lib.call_module(module="meca", args=build_arg_list(kwargs, infile=vintbl)) diff --git a/pygmt/src/nearneighbor.py b/pygmt/src/nearneighbor.py index 81e0fd4d50f..7027e04a358 100644 --- a/pygmt/src/nearneighbor.py +++ b/pygmt/src/nearneighbor.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["nearneighbor"] @@ -146,6 +146,6 @@ def nearneighbor( ): kwargs["G"] = voutgrd lib.call_module( - module="nearneighbor", args=build_arg_string(kwargs, infile=vintbl) + module="nearneighbor", args=build_arg_list(kwargs, infile=vintbl) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/plot.py b/pygmt/src/plot.py index 3206c5c053a..e660f370848 100644 --- a/pygmt/src/plot.py +++ b/pygmt/src/plot.py @@ -7,7 +7,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, data_kind, fmt_docstring, is_nonstr_iter, @@ -258,4 +258,4 @@ def plot( # noqa: PLR0912 with lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, extra_arrays=extra_arrays ) as vintbl: - lib.call_module(module="plot", args=build_arg_string(kwargs, infile=vintbl)) + lib.call_module(module="plot", args=build_arg_list(kwargs, infile=vintbl)) diff --git a/pygmt/src/plot3d.py b/pygmt/src/plot3d.py index 3e4e91b597d..929847337c5 100644 --- a/pygmt/src/plot3d.py +++ b/pygmt/src/plot3d.py @@ -7,7 +7,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, data_kind, fmt_docstring, is_nonstr_iter, @@ -240,6 +240,4 @@ def plot3d( # noqa: PLR0912 extra_arrays=extra_arrays, required_z=True, ) as vintbl: - lib.call_module( - module="plot3d", args=build_arg_string(kwargs, infile=vintbl) - ) + lib.call_module(module="plot3d", args=build_arg_list(kwargs, infile=vintbl)) diff --git a/pygmt/src/project.py b/pygmt/src/project.py index ab0e6ba7f8d..811a7d48158 100644 --- a/pygmt/src/project.py +++ b/pygmt/src/project.py @@ -9,7 +9,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -253,7 +253,7 @@ def project( ): lib.call_module( module="project", - args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + args=build_arg_list(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset( vfname=vouttbl, diff --git a/pygmt/src/rose.py b/pygmt/src/rose.py index 189e68546a2..c347db0e3f3 100644 --- a/pygmt/src/rose.py +++ b/pygmt/src/rose.py @@ -4,7 +4,7 @@ from pygmt.clib import Session from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -203,4 +203,4 @@ def rose(self, data=None, length=None, azimuth=None, **kwargs): with lib.virtualfile_in( check_kind="vector", data=data, x=length, y=azimuth ) as vintbl: - lib.call_module(module="rose", args=build_arg_string(kwargs, infile=vintbl)) + lib.call_module(module="rose", args=build_arg_list(kwargs, infile=vintbl)) diff --git a/pygmt/src/select.py b/pygmt/src/select.py index fde4f88b47f..75f4fa802ef 100644 --- a/pygmt/src/select.py +++ b/pygmt/src/select.py @@ -8,7 +8,7 @@ import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -218,7 +218,7 @@ def select( ): lib.call_module( module="select", - args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + args=build_arg_list(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset( vfname=vouttbl, diff --git a/pygmt/src/shift_origin.py b/pygmt/src/shift_origin.py index ed789231799..397168be198 100644 --- a/pygmt/src/shift_origin.py +++ b/pygmt/src/shift_origin.py @@ -65,4 +65,4 @@ def shift_origin( args.append(f"-Y{yshift}") with Session() as lib: - lib.call_module(module="plot", args=" ".join(args)) + lib.call_module(module="plot", args=args) diff --git a/pygmt/src/solar.py b/pygmt/src/solar.py index ffc086e8267..4237f0492dd 100644 --- a/pygmt/src/solar.py +++ b/pygmt/src/solar.py @@ -9,7 +9,7 @@ import pandas as pd from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["solar"] @@ -118,4 +118,4 @@ def solar( raise GMTInvalidInput("Unrecognized datetime format.") from verr kwargs["T"] += f"+d{datetime_string}" with Session() as lib: - lib.call_module(module="solar", args=build_arg_string(kwargs)) + lib.call_module(module="solar", args=build_arg_list(kwargs)) diff --git a/pygmt/src/sph2grd.py b/pygmt/src/sph2grd.py index 533b578caa0..fe959487a5c 100644 --- a/pygmt/src/sph2grd.py +++ b/pygmt/src/sph2grd.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["sph2grd"] @@ -71,6 +71,6 @@ def sph2grd(data, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="sph2grd", args=build_arg_string(kwargs, infile=vintbl) + module="sph2grd", args=build_arg_list(kwargs, infile=vintbl) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/sphdistance.py b/pygmt/src/sphdistance.py index 2c426a54352..e23f7999a8e 100644 --- a/pygmt/src/sphdistance.py +++ b/pygmt/src/sphdistance.py @@ -5,7 +5,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["sphdistance"] @@ -115,6 +115,6 @@ def sphdistance(data=None, x=None, y=None, outgrid: str | None = None, **kwargs) ): kwargs["G"] = voutgrd lib.call_module( - module="sphdistance", args=build_arg_string(kwargs, infile=vintbl) + module="sphdistance", args=build_arg_list(kwargs, infile=vintbl) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/sphinterpolate.py b/pygmt/src/sphinterpolate.py index a8d57d20abe..995579c7737 100644 --- a/pygmt/src/sphinterpolate.py +++ b/pygmt/src/sphinterpolate.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["sphinterpolate"] @@ -65,6 +65,6 @@ def sphinterpolate(data, outgrid: str | None = None, **kwargs): ): kwargs["G"] = voutgrd lib.call_module( - module="sphinterpolate", args=build_arg_string(kwargs, infile=vintbl) + module="sphinterpolate", args=build_arg_list(kwargs, infile=vintbl) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/subplot.py b/pygmt/src/subplot.py index 0b913472022..d4d769f4c21 100644 --- a/pygmt/src/subplot.py +++ b/pygmt/src/subplot.py @@ -7,7 +7,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -160,13 +160,16 @@ def subplot(self, nrows=1, ncols=1, **kwargs): # See https://github.com/GenericMappingTools/pygmt/issues/2426. try: with Session() as lib: - arg_str = " ".join(["begin", f"{nrows}x{ncols}", build_arg_string(kwargs)]) - lib.call_module(module="subplot", args=arg_str) + lib.call_module( + module="subplot", + args=["begin", f"{nrows}x{ncols}", *build_arg_list(kwargs)], + ) yield finally: with Session() as lib: - v_arg = build_arg_string({"V": kwargs.get("V")}) - lib.call_module(module="subplot", args=f"end {v_arg}") + lib.call_module( + module="subplot", args=["end", *build_arg_list({"V": kwargs.get("V")})] + ) @fmt_docstring @@ -224,6 +227,7 @@ def set_panel(self, panel=None, **kwargs): kwargs = self._preprocess(**kwargs) with Session() as lib: - arg_str = " ".join(["set", f"{panel}", build_arg_string(kwargs)]) - lib.call_module(module="subplot", args=arg_str) + lib.call_module( + module="subplot", args=["set", str(panel), *build_arg_list(kwargs)] + ) yield diff --git a/pygmt/src/surface.py b/pygmt/src/surface.py index d336034632a..23fdbdb353d 100644 --- a/pygmt/src/surface.py +++ b/pygmt/src/surface.py @@ -4,7 +4,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["surface"] @@ -159,6 +159,6 @@ def surface(data=None, x=None, y=None, z=None, outgrid: str | None = None, **kwa ): kwargs["G"] = voutgrd lib.call_module( - module="surface", args=build_arg_string(kwargs, infile=vintbl) + module="surface", args=build_arg_list(kwargs, infile=vintbl) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/src/ternary.py b/pygmt/src/ternary.py index d3761c21b0d..8b1201e77c5 100644 --- a/pygmt/src/ternary.py +++ b/pygmt/src/ternary.py @@ -5,7 +5,7 @@ import pandas as pd from packaging.version import Version from pygmt.clib import Session, __gmt_version__ -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -90,5 +90,5 @@ def ternary(self, data, alabel=None, blabel=None, clabel=None, **kwargs): with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: lib.call_module( module="ternary", - args=build_arg_string(kwargs, infile=vintbl), + args=build_arg_list(kwargs, infile=vintbl), ) diff --git a/pygmt/src/text.py b/pygmt/src/text.py index 91432cf9d87..04abf12ea3b 100644 --- a/pygmt/src/text.py +++ b/pygmt/src/text.py @@ -6,7 +6,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, data_kind, fmt_docstring, is_nonstr_iter, @@ -37,12 +37,7 @@ t="transparency", w="wrap", ) -@kwargs_to_strings( - R="sequence", - textfiles="sequence_space", - c="sequence_comma", - p="sequence", -) +@kwargs_to_strings(R="sequence", c="sequence_comma", p="sequence") def text_( # noqa: PLR0912 self, textfiles=None, @@ -240,4 +235,4 @@ def text_( # noqa: PLR0912 with lib.virtualfile_in( check_kind="vector", data=textfiles, x=x, y=y, extra_arrays=extra_arrays ) as vintbl: - lib.call_module(module="text", args=build_arg_string(kwargs, infile=vintbl)) + lib.call_module(module="text", args=build_arg_list(kwargs, infile=vintbl)) diff --git a/pygmt/src/tilemap.py b/pygmt/src/tilemap.py index 168b2a2131f..1fe98ea1f1b 100644 --- a/pygmt/src/tilemap.py +++ b/pygmt/src/tilemap.py @@ -6,7 +6,7 @@ from pygmt.clib import Session from pygmt.datasets.tile_map import load_tile_map -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias try: import rioxarray # noqa: F401 @@ -152,5 +152,5 @@ def tilemap( with Session() as lib: with lib.virtualfile_in(check_kind="raster", data=raster) as vingrd: lib.call_module( - module="grdimage", args=build_arg_string(kwargs, infile=vingrd) + module="grdimage", args=build_arg_list(kwargs, infile=vingrd) ) diff --git a/pygmt/src/timestamp.py b/pygmt/src/timestamp.py index c7cc18574cf..1dbd8a86c17 100644 --- a/pygmt/src/timestamp.py +++ b/pygmt/src/timestamp.py @@ -9,7 +9,7 @@ from packaging.version import Version from pygmt.clib import Session, __gmt_version__ -from pygmt.helpers import build_arg_string, deprecate_parameter, kwargs_to_strings +from pygmt.helpers import build_arg_list, deprecate_parameter, kwargs_to_strings if TYPE_CHECKING: from collections.abc import Sequence @@ -114,7 +114,7 @@ def timestamp( with Session() as lib: lib.call_module( module="plot", - args=build_arg_string( + args=build_arg_list( kwdict, confdict={"FONT_LOGO": font, "FORMAT_TIME_STAMP": timefmt} ), ) diff --git a/pygmt/src/triangulate.py b/pygmt/src/triangulate.py index 135928ca424..f1b64db38ec 100644 --- a/pygmt/src/triangulate.py +++ b/pygmt/src/triangulate.py @@ -9,7 +9,7 @@ import pandas as pd from pygmt.clib import Session from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -144,7 +144,7 @@ def regular_grid( ): kwargs["G"] = voutgrd lib.call_module( - module="triangulate", args=build_arg_string(kwargs, infile=vintbl) + module="triangulate", args=build_arg_list(kwargs, infile=vintbl) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) @@ -244,6 +244,6 @@ def delaunay_triples( ): lib.call_module( module="triangulate", - args=build_arg_string(kwargs, infile=vintbl, outfile=vouttbl), + args=build_arg_list(kwargs, infile=vintbl, outfile=vouttbl), ) return lib.virtualfile_to_dataset(vfname=vouttbl, output_type=output_type) diff --git a/pygmt/src/velo.py b/pygmt/src/velo.py index 2c349c6b265..4c536cb2ea0 100644 --- a/pygmt/src/velo.py +++ b/pygmt/src/velo.py @@ -7,7 +7,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - build_arg_string, + build_arg_list, fmt_docstring, kwargs_to_strings, use_alias, @@ -255,4 +255,4 @@ def velo(self, data=None, **kwargs): with Session() as lib: with lib.virtualfile_in(check_kind="vector", data=data) as vintbl: - lib.call_module(module="velo", args=build_arg_string(kwargs, infile=vintbl)) + lib.call_module(module="velo", args=build_arg_list(kwargs, infile=vintbl)) diff --git a/pygmt/src/which.py b/pygmt/src/which.py index c4afd1d006d..2871693c666 100644 --- a/pygmt/src/which.py +++ b/pygmt/src/which.py @@ -3,12 +3,11 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, is_nonstr_iter, use_alias @fmt_docstring @use_alias(G="download", V="verbose") -@kwargs_to_strings(fname="sequence_space") def which(fname, **kwargs) -> str | list[str]: r""" Find the full path to specified files. @@ -62,13 +61,13 @@ def which(fname, **kwargs) -> str | list[str]: with lib.virtualfile_out(kind="dataset") as vouttbl: lib.call_module( module="which", - args=build_arg_string(kwargs, infile=fname, outfile=vouttbl), + args=build_arg_list(kwargs, infile=fname, outfile=vouttbl), ) paths = lib.virtualfile_to_dataset(vfname=vouttbl, output_type="strings") match paths.size: case 0: - _fname = fname.replace(" ", "', '") + _fname = "', '".join(fname) if is_nonstr_iter(fname) else fname raise FileNotFoundError(f"File(s) '{_fname}' not found.") case 1: return paths[0] diff --git a/pygmt/src/wiggle.py b/pygmt/src/wiggle.py index 96bf3be0f6d..921c5317349 100644 --- a/pygmt/src/wiggle.py +++ b/pygmt/src/wiggle.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -110,6 +110,4 @@ def wiggle( with lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, z=z, required_z=True ) as vintbl: - lib.call_module( - module="wiggle", args=build_arg_string(kwargs, infile=vintbl) - ) + lib.call_module(module="wiggle", args=build_arg_list(kwargs, infile=vintbl)) diff --git a/pygmt/src/x2sys_cross.py b/pygmt/src/x2sys_cross.py index c530c964025..eadd20dcfb2 100644 --- a/pygmt/src/x2sys_cross.py +++ b/pygmt/src/x2sys_cross.py @@ -12,7 +12,7 @@ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( GMTTempFile, - build_arg_string, + build_arg_list, data_kind, fmt_docstring, kwargs_to_strings, @@ -223,9 +223,7 @@ def x2sys_cross(tracks=None, outfile=None, **kwargs): outfile = tmpfile.name lib.call_module( module="x2sys_cross", - args=build_arg_string( - kwargs, infile=" ".join(fnames), outfile=outfile - ), + args=build_arg_list(kwargs, infile=fnames, outfile=outfile), ) # Read temporary csv output to a pandas table diff --git a/pygmt/src/x2sys_init.py b/pygmt/src/x2sys_init.py index f4107e6f0b6..b6ce159abc7 100644 --- a/pygmt/src/x2sys_init.py +++ b/pygmt/src/x2sys_init.py @@ -3,7 +3,7 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias @fmt_docstring @@ -111,4 +111,4 @@ def x2sys_init(tag, **kwargs): {distcalc} """ with Session() as lib: - lib.call_module(module="x2sys_init", args=build_arg_string(kwargs, infile=tag)) + lib.call_module(module="x2sys_init", args=build_arg_list(kwargs, infile=tag)) diff --git a/pygmt/src/xyz2grd.py b/pygmt/src/xyz2grd.py index c3a5bd434ad..2eedfb62e83 100644 --- a/pygmt/src/xyz2grd.py +++ b/pygmt/src/xyz2grd.py @@ -4,7 +4,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias __doctest_skip__ = ["xyz2grd"] @@ -151,6 +151,6 @@ def xyz2grd(data=None, x=None, y=None, z=None, outgrid: str | None = None, **kwa ): kwargs["G"] = voutgrd lib.call_module( - module="xyz2grd", args=build_arg_string(kwargs, infile=vintbl) + module="xyz2grd", args=build_arg_list(kwargs, infile=vintbl) ) return lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) diff --git a/pygmt/tests/test_geopandas.py b/pygmt/tests/test_geopandas.py index af40cc8e6d4..77fa7a9f9d5 100644 --- a/pygmt/tests/test_geopandas.py +++ b/pygmt/tests/test_geopandas.py @@ -49,7 +49,7 @@ def fixture_gdf_ridge(): """ # Read shapefile into a geopandas.GeoDataFrame shapefile = which( - fname="@RidgeTest.shp @RidgeTest.shx @RidgeTest.dbf @RidgeTest.prj", + fname=["@RidgeTest.shp", "@RidgeTest.shx", "@RidgeTest.dbf", "@RidgeTest.prj"], download="c", ) gdf = gpd.read_file(shapefile[0]) From 1746c04d60036174e9c2b8353be1896783845c8d Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 18 Apr 2024 20:20:48 +0800 Subject: [PATCH 070/218] Session.virtualfile_to_dataset: Add 'header' parameter to parse column names from table header (#3117) --- pygmt/clib/session.py | 7 ++- pygmt/datatypes/dataset.py | 24 ++++++++--- pygmt/tests/test_datatypes_dataset.py | 61 ++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 8 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 8a8b52df8e5..ba3644f0e28 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1810,6 +1810,7 @@ def virtualfile_to_dataset( self, vfname: str, output_type: Literal["pandas", "numpy", "file", "strings"] = "pandas", + header: int | None = None, column_names: list[str] | None = None, dtype: type | dict[str, type] | None = None, index_col: str | int | None = None, @@ -1831,6 +1832,10 @@ def virtualfile_to_dataset( - ``"numpy"`` will return a :class:`numpy.ndarray` object. - ``"file"`` means the result was saved to a file and will return ``None``. - ``"strings"`` will return the trailing text only as an array of strings. + header + Row number containing column names for the :class:`pandas.DataFrame` output. + ``header=None`` means not to parse the column names from table header. + Ignored if the row number is larger than the number of headers in the table. column_names The column names for the :class:`pandas.DataFrame` output. dtype @@ -1945,7 +1950,7 @@ def virtualfile_to_dataset( return result.to_strings() result = result.to_dataframe( - column_names=column_names, dtype=dtype, index_col=index_col + header=header, column_names=column_names, dtype=dtype, index_col=index_col ) if output_type == "numpy": # numpy.ndarray output return result.to_numpy() diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index daf0073aefe..e5df4a2b4a0 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -27,6 +27,7 @@ class _GMT_DATASET(ctp.Structure): # noqa: N801 >>> with GMTTempFile(suffix=".txt") as tmpfile: ... # Prepare the sample data file ... with Path(tmpfile.name).open(mode="w") as fp: + ... print("# x y z name", file=fp) ... print(">", file=fp) ... print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) ... print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) @@ -43,7 +44,8 @@ class _GMT_DATASET(ctp.Structure): # noqa: N801 ... print(ds.min[: ds.n_columns], ds.max[: ds.n_columns]) ... # The table ... tbl = ds.table[0].contents - ... print(tbl.n_columns, tbl.n_segments, tbl.n_records) + ... print(tbl.n_columns, tbl.n_segments, tbl.n_records, tbl.n_headers) + ... print(tbl.header[: tbl.n_headers]) ... print(tbl.min[: tbl.n_columns], ds.max[: tbl.n_columns]) ... for i in range(tbl.n_segments): ... seg = tbl.segment[i].contents @@ -52,7 +54,8 @@ class _GMT_DATASET(ctp.Structure): # noqa: N801 ... print(seg.text[: seg.n_rows]) 1 3 2 [1.0, 2.0, 3.0] [10.0, 11.0, 12.0] - 3 2 4 + 3 2 4 1 + [b'x y z name'] [1.0, 2.0, 3.0] [10.0, 11.0, 12.0] [1.0, 4.0] [2.0, 5.0] @@ -169,6 +172,7 @@ def to_strings(self) -> np.ndarray[Any, np.dtype[np.str_]]: def to_dataframe( self, + header: int | None = None, column_names: pd.Index | None = None, dtype: type | Mapping[Any, type] | None = None, index_col: str | int | None = None, @@ -187,6 +191,10 @@ def to_dataframe( ---------- column_names A list of column names. + header + Row number containing column names. ``header=None`` means not to parse the + column names from table header. Ignored if the row number is larger than the + number of headers in the table. dtype Data type. Can be a single type for all columns or a dictionary mapping column names to types. @@ -207,6 +215,7 @@ def to_dataframe( >>> with GMTTempFile(suffix=".txt") as tmpfile: ... # prepare the sample data file ... with Path(tmpfile.name).open(mode="w") as fp: + ... print("# col1 col2 col3 colstr", file=fp) ... print(">", file=fp) ... print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) ... print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) @@ -218,12 +227,12 @@ def to_dataframe( ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") ... ds = lib.read_virtualfile(vouttbl, kind="dataset") ... text = ds.contents.to_strings() - ... df = ds.contents.to_dataframe() + ... df = ds.contents.to_dataframe(header=0) >>> text array(['TEXT1 TEXT23', 'TEXT4 TEXT567', 'TEXT8 TEXT90', 'TEXT123 TEXT456789'], dtype='>> df - 0 1 2 3 + col1 col2 col3 colstr 0 1.0 2.0 3.0 TEXT1 TEXT23 1 4.0 5.0 6.0 TEXT4 TEXT567 2 7.0 8.0 9.0 TEXT8 TEXT90 @@ -248,6 +257,11 @@ def to_dataframe( if len(textvector) != 0: vectors.append(pd.Series(data=textvector, dtype=pd.StringDtype())) + if header is not None: + tbl = self.table[0].contents # Use the first table! + if header < tbl.n_headers: + column_names = tbl.header[header].decode().split() + if len(vectors) == 0: # Return an empty DataFrame if no columns are found. df = pd.DataFrame(columns=column_names) @@ -255,7 +269,7 @@ def to_dataframe( # Create a DataFrame object by concatenating multiple columns df = pd.concat(objs=vectors, axis="columns") if column_names is not None: # Assign column names - df.columns = column_names + df.columns = column_names[: df.shape[1]] if dtype is not None: # Set dtype for the whole dataset or individual columns df = df.astype(dtype) if index_col is not None: # Use a specific column as index diff --git a/pygmt/tests/test_datatypes_dataset.py b/pygmt/tests/test_datatypes_dataset.py index 6481591b22a..9576595b6b2 100644 --- a/pygmt/tests/test_datatypes_dataset.py +++ b/pygmt/tests/test_datatypes_dataset.py @@ -40,14 +40,14 @@ def dataframe_from_pandas(filepath_or_buffer, sep=r"\s+", comment="#", header=No return df -def dataframe_from_gmt(fname): +def dataframe_from_gmt(fname, **kwargs): """ Read tabular data as pandas.DataFrame using GMT virtual file. """ with Session() as lib: with lib.virtualfile_out(kind="dataset") as vouttbl: lib.call_module("read", f"{fname} {vouttbl} -Td") - df = lib.virtualfile_to_dataset(vfname=vouttbl) + df = lib.virtualfile_to_dataset(vfname=vouttbl, **kwargs) return df @@ -84,6 +84,63 @@ def test_dataset_empty(): pd.testing.assert_frame_equal(df, expected_df) +def test_dataset_header(): + """ + Test parsing column names from dataset header. + """ + with GMTTempFile(suffix=".txt") as tmpfile: + with Path(tmpfile.name).open(mode="w") as fp: + print("# lon lat z text", file=fp) + print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) + print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) + + # Parse columne names from the first header line + df = dataframe_from_gmt(tmpfile.name, header=0) + assert df.columns.tolist() == ["lon", "lat", "z", "text"] + # pd.read_csv() can't parse the header line with a leading '#'. + # So, we need to skip the header line and manually set the column names. + expected_df = dataframe_from_pandas(tmpfile.name, header=None) + expected_df.columns = df.columns.tolist() + pd.testing.assert_frame_equal(df, expected_df) + + +def test_dataset_header_greater_than_nheaders(): + """ + Test passing a header line number that is greater than the number of header lines. + """ + with GMTTempFile(suffix=".txt") as tmpfile: + with Path(tmpfile.name).open(mode="w") as fp: + print("# lon lat z text", file=fp) + print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) + print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) + + # Parse column names from the second header line. + df = dataframe_from_gmt(tmpfile.name, header=1) + # There is only one header line, so the column names should be default. + assert df.columns.tolist() == [0, 1, 2, 3] + expected_df = dataframe_from_pandas(tmpfile.name, header=None) + pd.testing.assert_frame_equal(df, expected_df) + + +def test_dataset_header_too_many_names(): + """ + Test passing a header line with more column names than the number of columns. + """ + with GMTTempFile(suffix=".txt") as tmpfile: + with Path(tmpfile.name).open(mode="w") as fp: + print("# lon lat z text1 text2", file=fp) + print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) + print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) + + df = dataframe_from_gmt(tmpfile.name, header=0) + assert df.columns.tolist() == ["lon", "lat", "z", "text1"] + # pd.read_csv() can't parse the header line with a leading '#'. + # So, we need to skip the header line and manually set the column names. + expected_df = dataframe_from_pandas(tmpfile.name, header=None) + expected_df.columns = df.columns.tolist() + pd.testing.assert_frame_equal(df, expected_df) + + def test_dataset_to_strings_with_none_values(): """ Test that None values in the trailing text doesn't raise an exception. From 62872d3da73dfb69c6ec84d419f66cd8ae90bc11 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 18 Apr 2024 23:34:37 +0800 Subject: [PATCH 071/218] Add a test for typesetting quotation marks (#3179) --- .../baseline/test_text_quotation_marks.png.dvc | 5 +++++ pygmt/tests/test_text.py | 13 +++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 pygmt/tests/baseline/test_text_quotation_marks.png.dvc diff --git a/pygmt/tests/baseline/test_text_quotation_marks.png.dvc b/pygmt/tests/baseline/test_text_quotation_marks.png.dvc new file mode 100644 index 00000000000..2a1cf6296d3 --- /dev/null +++ b/pygmt/tests/baseline/test_text_quotation_marks.png.dvc @@ -0,0 +1,5 @@ +outs: +- md5: 90d08c5a11c606abed51b84eafcdea04 + size: 1662 + hash: md5 + path: test_text_quotation_marks.png diff --git a/pygmt/tests/test_text.py b/pygmt/tests/test_text.py index 1ef6a19bc11..9d52f2629af 100644 --- a/pygmt/tests/test_text.py +++ b/pygmt/tests/test_text.py @@ -419,3 +419,16 @@ def test_text_nonascii(): fig.text(x=1, y=1, text="xytext:°α") # noqa: RUF001 fig.text(x=[5, 5], y=[3, 5], text=["xytext1:αζΔ❡", "xytext2:∑π∇✉"]) return fig + + +@pytest.mark.mpl_image_compare +def test_text_quotation_marks(): + """ + Test typesetting quotation marks. + + See https://github.com/GenericMappingTools/pygmt/issues/3104. + """ + fig = Figure() + fig.basemap(projection="X4c/2c", region=[0, 4, 0, 2], frame=0) + fig.text(x=2, y=1, text="\\234 \\140 ' \" \\216 \\217", font="20p") + return fig From f339678a069905b69160ed56ca1837dc98e12764 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 21 Apr 2024 07:41:19 +0200 Subject: [PATCH 072/218] Tutorial "Plotting text": Rewrite to improve structure, add parameters, add list input (#2760) --- examples/tutorials/basics/text.py | 273 ++++++++++++++++++++---------- 1 file changed, 179 insertions(+), 94 deletions(-) diff --git a/examples/tutorials/basics/text.py b/examples/tutorials/basics/text.py index 43bd76ed799..35744456e40 100644 --- a/examples/tutorials/basics/text.py +++ b/examples/tutorials/basics/text.py @@ -2,8 +2,8 @@ Plotting text ============= -It is often useful to add annotations to a plot. This is handled by -:meth:`pygmt.Figure.text`. +It is often useful to add text annotations to a plot or map. This is handled by the +:meth:`pygmt.Figure.text` method of the :class:`pygmt.Figure` class. """ # %% @@ -12,136 +12,221 @@ import pygmt # %% -# Basic map annotation -# -------------------- +# Adding a single text label +# -------------------------- # -# Text annotations can be added to a map using the :meth:`pygmt.Figure.text` -# method of the :class:`pygmt.Figure` class. -# -# Here we create a simple map and add an annotation using the ``text``, ``x``, -# and ``y`` parameters to specify the annotation text and position in the -# projection frame. ``text`` accepts *str* types, while ``x`` and ``y`` -# accept either *int* or *float* numbers, or a list/array of numbers. +# To add a single text label to a plot, use the ``text`` and ``x`` and ``y`` parameters +# to specify the text and position. fig = pygmt.Figure() -with pygmt.config(MAP_FRAME_TYPE="plain"): - fig.basemap(region=[108, 120, -5, 8], projection="M20c", frame="a") -fig.coast(land="black", water="skyblue") - -# Plot text annotations using single arguments -fig.text(text="SOUTH CHINA SEA", x=112, y=6) - -# Plot text annotations using lists of arguments -fig.text(text=["CELEBES SEA", "JAVA SEA"], x=[119, 112], y=[3.25, -4.6]) - +fig.basemap(region=[-5, 5, -5, 5], projection="X5c", frame=True) +fig.text(x=0, y=0, text="My text") fig.show() # %% -# Changing font style -# ------------------- +# Adjusting the text label +# ------------------------ # -# The size, family/weight, and color of an annotation can be specified using -# the ``font`` parameter. +# There are several optional parameters to adjust the text label: # -# A list of all recognized fonts can be found at -# :gmt-docs:`PostScript Fonts Used by GMT `, -# including details of how to use non-default fonts. +# * ``font``: Sets the size, family/weight, and color of the font for the text. +# A list of all recognized fonts can be found at +# :gmt-docs:`PostScript Fonts Used by GMT `, +# including details of how to use non-default fonts. +# * ``angle``: Specifies the rotation of the text. It is measured counter-clockwise +# from the horizontal in degrees. +# * ``justify``: Defines the anchor point of the bounding box for the text. It is +# specified by a two-letter (order independent) code, chosen from: +# +# * Vertical: **T**\(op), **M**\(iddle), **B**\(ottom) +# * Horizontal: **L**\(eft), **C**\(entre), **R**\(ight) +# +# * ``offset``: Shifts the text relatively to the reference point. fig = pygmt.Figure() -with pygmt.config(MAP_FRAME_TYPE="plain"): - fig.basemap(region=[108, 120, -5, 8], projection="M20c", frame="a") -fig.coast(land="black", water="skyblue") -# Customize the font style -fig.text(text="BORNEO", x=114.0, y=0.5, font="22p,Helvetica-Bold,white") +# ----------------------------------------------------------------------------- +# Left: "font", "angle", and "offset" parameters +fig.basemap(region=[-5, 5, -5, 5], projection="X5c", frame="rtlb") + +# Change font size, family/weight, color of the text +fig.text(x=0, y=3, text="my text", font="12p,Helvetica-Bold,blue") + +# Rotate the text by 30 degrees counter-clockwise from the horizontal +fig.text(x=0, y=0, text="my text", angle=30) + +# Plot marker and text label for reference +fig.plot(x=0, y=-3, style="s0.2c", fill="darkorange", pen="0.7p,darkgray") +fig.text(x=0, y=-3, text="my text") +# Shift the text label relatively to the position given via the x and y parameters +# by 1 centimeter to the right (positive x direction) and 0.5 centimeters down +# (negative y direction) +fig.text(x=0, y=-3, text="my text", offset="1c/-0.5c") + +fig.shift_origin(xshift="w+0.5c") + +# ----------------------------------------------------------------------------- +# Right: "justify" parameter +fig.basemap(region=[-1, 1, -1, 1], projection="X5c", frame="rtlb") + +# Plot markers for reference +fig.plot( + x=[-0.5, 0, 0.5, -0.5, 0, 0.5, -0.5, 0, 0.5], + y=[0.5, 0.5, 0.5, 0, 0, 0, -0.5, -0.5, -0.5], + style="s0.2c", + fill="darkorange", + pen="0.7p,darkgray", +) + +# Plot text labels at the x and y positions of the markers while varying the anchor +# point via the justify parameter +fig.text(x=-0.5, y=0.5, text="TL", justify="TL") # TopLeft +fig.text(x=0, y=0.5, text="TM", justify="TC") # TopCenter +fig.text(x=0.5, y=0.5, text="TR", justify="TR") # TopRight +fig.text(x=-0.5, y=0, text="ML", justify="ML") # MiddleLeft +fig.text(x=0, y=0, text="MC", justify="MC") # MiddleCenter +fig.text(x=0.5, y=0, text="MR", justify="MR") # MiddleRight +fig.text(x=-0.5, y=-0.5, text="BL", justify="BL") # BottomLeft +fig.text(x=0, y=-0.5, text="BC", justify="BC") # BottomCenter +fig.text(x=0.5, y=-0.5, text="BR", justify="BR") # BottomRight fig.show() # %% -# Plotting from a text file -# ------------------------- +# Adding a text box +# ----------------- # -# It is also possible to add annotations from a file containing ``x``, ``y``, -# and ``text`` columns. Here we give a complete example. +# There are different optional parameters to add and customize a text box: +# +# * ``fill``: Fills the text box with a color. +# * ``pen``: Outlines the text box. +# * ``clearance``: Adds margins in x and y directions between the text and the outline +# of the text box. Can be used to get a text box with rounded edges. fig = pygmt.Figure() -with pygmt.config(MAP_FRAME_TYPE="plain"): - fig.basemap(region=[108, 120, -5, 8], projection="M20c", frame="a") -fig.coast(land="black", water="skyblue") - -# Create space-delimited file -with Path("examples.txt").open(mode="w") as f: - f.write("114 0.5 0 22p,Helvetica-Bold,white CM BORNEO\n") - f.write("119 3.25 0 12p,Helvetica-Bold,black CM CELEBES SEA\n") - f.write("112 -4.6 0 12p,Helvetica-Bold,black CM JAVA SEA\n") - f.write("112 6 40 12p,Helvetica-Bold,black CM SOUTH CHINA SEA\n") - f.write("119.12 7.25 -40 12p,Helvetica-Bold,black CM SULU SEA\n") - f.write("118.4 -1 65 12p,Helvetica-Bold,black CM MAKASSAR STRAIT\n") - -# Plot region names / sea names from a text file, where -# the longitude (x) and latitude (y) coordinates are in the first two columns. -# Setting angle/font/justify to True will indicate that those columns are -# present in the text file too (Note: must be in that order!). -# Finally, the text to be printed will be in the last column -fig.text(textfiles="examples.txt", angle=True, font=True, justify=True) -# Cleanups -Path("examples.txt").unlink() +fig.basemap(region=[-5, 5, -5, 5], projection="X5c", frame="rtlb") + +# Add a box with a fill in green color +fig.text(x=0, y=3, text="My text", fill="green") + +# Add box with a seagreen, 1-point thick, solid outline +fig.text(x=0, y=1, text="My text", pen="1p,seagreen,solid") + +# Add margins between the text and the outline of the text box of 0.1 +# centimeters in x direction and 0.2 centimeters in y direction +fig.text(x=0, y=-1, text="My text", pen="1p,seagreen,dashed", clearance="0.1c/0.2c") + +# Get rounded edges by passing "+tO" to the "clearance" parameter +fig.text(x=0, y=-3, text="My text", pen="1p,seagreen,solid", clearance="0.2c/0.2c+tO") fig.show() # %% -# ``justify`` parameter -# --------------------- -# -# ``justify`` is used to define the anchor point for the bounding box for text -# being added to a plot. The following code segment demonstrates the -# positioning of the anchor point relative to the text. +# Adding multiple text labels with individual configurations +# ---------------------------------------------------------- # -# The anchor point is specified with a two-letter (order independent) code, -# chosen from: -# -# * Vertical anchor: **T**\(op), **M**\(iddle), **B**\(ottom) -# * Horizontal anchor: **L**\(eft), **C**\(entre), **R**\(ight) +# To add multiple text labels with individual ``font``, ``angle``, and ``justify``, +# one can provide lists with the corresponding arguments. fig = pygmt.Figure() -fig.basemap(region=[0, 3, 0, 3], projection="X10c", frame=["WSne", "af0.5g"]) -for position in ("TL", "TC", "TR", "ML", "MC", "MR", "BL", "BC", "BR"): - fig.text( - text=position, - position=position, - font="28p,Helvetica-Bold,black", - justify=position, - ) +fig.basemap(region=[-5, 5, -5, 5], projection="X5c", frame=True) + +fig.text( + x=[0, 0, 0], + y=[3, 2, -2], + font=["5p,Helvetica,black", "5p,Helvetica,blue", "6p,Courier-Bold,red"], + angle=[0, 0, 30], + justify=["CM", "LT", "CM"], + text=[ + "black text with justify='CM'", + "blue text with justify='LT'", + "red text with angle=30", + ], +) + fig.show() # %% -# ``angle`` parameter -# ------------------- +# Using an external input file +# ---------------------------- # -# ``angle`` is an optional parameter used to specify the counter-clockwise -# rotation in degrees of the text from the horizontal. +# It is also possible to add text labels via an external input file containing ``x``, +# ``y``, and ``text`` columns. Addionaly, columns to set the ``angle``, ``front``, +# and ``justify`` parameters can be provided. Here, we give a complete example. fig = pygmt.Figure() -fig.basemap(region=[0, 4, 0, 4], projection="X5c", frame="WSen") -for i in range(0, 360, 30): - fig.text(text=f"` {i}@.", x=2, y=2, justify="LM", angle=i) +fig.basemap(region=[108, 121, -5, 8], projection="M10c", frame="a2f1") +fig.coast(land="darkgray", water="steelblue", shorelines="1/0.1p,gray30") + +# Create space-delimited file with region / sea names: +# - longitude (x) and latitude (y) coordinates are in the first two columns +# - angle, font, and justify muss be present in this order in the next three columns +# - the text to be printed is given in the last column +with Path.open("examples.txt", "w") as f: + f.write("114.00 0.50 0 15p,Helvetica-Bold,white CM BORNEO\n") + f.write("119.00 3.25 0 8p,Helvetica-Bold,black CM CELEBES SEA\n") + f.write("112.00 -4.60 0 8p,Helvetica-Bold,black CM JAVA SEA\n") + f.write("112.00 6.00 40 8p,Helvetica-Bold,black CM SOUTH CHINA SEA\n") + f.write("119.12 7.25 -40 8p,Helvetica-Bold,black CM SULU SEA\n") + f.write("118.40 -1.00 65 8p,Helvetica-Bold,black CM MAKASSAR STRAIT\n") + +# Setting the angle, font, and justify parameters to True indicates that those columns +# are present in the text file +fig.text(textfiles="examples.txt", angle=True, font=True, justify=True) + +# Cleanups +Path("examples.txt").unlink() + fig.show() # %% -# ``fill`` parameter -# ------------------ +# Using the position parameter +# ---------------------------- +# +# Instead of using the ``x`` and ``y`` parameters, the ``position`` parameter can be +# specified to set the reference point for the text on the plot. As for the ``justify`` +# parameter, the ``position`` parameter is specified by a two-letter (order independent) +# code, chosen from: # -# ``fill`` is used to set the fill color of the area surrounding the text. +# * Vertical: **T**\(op), **M**\(iddle), **B**\(ottom) +# * Horizontal: **L**\(eft), **C**\(entre), **R**\(ight) +# +# This can be helpful to add a tag to a subplot or text labels out of the plot or map +# frame, e.g., for depth slices. fig = pygmt.Figure() -fig.basemap(region=[0, 1, 0, 1], projection="X5c", frame="WSen") -fig.text(text="Green", x=0.5, y=0.5, fill="green") + +# ----------------------------------------------------------------------------- +# Left: Add a tag to a subplot +fig.basemap(region=[-5, 5, -5, 5], projection="X5c", frame=["WStr", "af"]) + +fig.text( + text="(a)", + position="TL", # Top Left + justify="TL", # Top Left + offset="0.1c/-0.1c", +) + +fig.shift_origin(xshift="w+1c") + +# ----------------------------------------------------------------------------- +# Right: Add a text label outside of the plot or map frame +fig.basemap(region=[-30, 30, 10, 60], projection="L0/35/23/47/5c", frame=["wSnE", "af"]) + +fig.text( + text="@@100 km", # "@@" gives "@" in GMT or PyGMT + position="TC", # Top Center + justify="MC", # Middle Center + offset="0c/0.2c", + no_clip=True, # Allow plotting outside of the map or plot frame +) + fig.show() @@ -149,9 +234,9 @@ # Advanced configuration # ---------------------- # -# For crafting more advanced styles, including using special symbols and -# other character sets, be sure to check out the GMT documentation -# at :gmt-docs:`text.html` and also the GMT Technical Reference at -# :gmt-docs:`reference/features.html#placement-of-text`. Good luck! +# For crafting more advanced styles, including using special symbols and other character +# sets, be sure to check out the GMT documentation at :gmt-docs:`text.html` and also the +# Technical References at :gmt-docs:`reference/features.html#placement-of-text`. Good +# luck! -# sphinx_gallery_thumbnail_number = 3 +# sphinx_gallery_thumbnail_number = 4 From bfbe4449a4e1e6b89d77dfb51322ea23b4e3ddaf Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 21 Apr 2024 15:28:39 +0800 Subject: [PATCH 073/218] CI: Remove the 'add reaction' step from the format workflow (#3185) --- .github/workflows/format-command.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 68813e2b13c..e2cffd7b2cd 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -48,11 +48,3 @@ jobs: git commit -am "[format-command] fixes" git push fi - - - name: Add reaction - uses: peter-evans/create-or-update-comment@v4.0.0 - with: - token: ${{ steps.generate-token.outputs.token }} - repository: ${{ github.event.client_payload.github.payload.repository.full_name }} - comment-id: ${{ github.event.client_payload.github.payload.comment.id }} - reactions: hooray From 86cf1e2a8a660a009584ee21ea6b5b8d7646c3be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 21 Apr 2024 16:04:42 +0200 Subject: [PATCH 074/218] Gallery example "Custom symbols": Mention own coustom symbols (#3186) Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> --- examples/gallery/symbols/custom_symbols.py | 38 ++++++++++++---------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/examples/gallery/symbols/custom_symbols.py b/examples/gallery/symbols/custom_symbols.py index 3949903f8e2..5e2b5a001b2 100644 --- a/examples/gallery/symbols/custom_symbols.py +++ b/examples/gallery/symbols/custom_symbols.py @@ -2,12 +2,21 @@ Custom symbols ============== -The :meth:`pygmt.Figure.plot` method can plot individual custom symbols -by passing the corresponding symbol name together with the **k** shortcut to -the ``style`` parameter. In total 41 custom symbols are already included of -which the following plot shows five exemplary ones. The symbols are shown -underneath their corresponding names. For the remaining symbols see the GMT -Technical Reference :gmt-docs:`reference/custom-symbols.html`. +The :meth:`pygmt.Figure.plot` method can plot individual custom symbols by +passing the corresponding symbol name together with the **k** shortcut to the +``style`` parameter. + +In total 41 custom symbols are already included of which the following plot shows +five exemplary ones. The symbols are shown underneath their corresponding names. +For the remaining symbols see the GMT Technical Reference +:gmt-docs:`reference/custom-symbols.html`. + +Beside these built-in custom symbols GMT allows users to define their own custom +symbols. For this, a specific macro language is used. A detailed introduction can +be found at :gmt-docs:`reference/custom-symbols.html#the-macro-language`. After +defining such a symbol it can be used in the same way as a built-in custom symbol. + +*Please note*: Custom symbols can not be used in auto-legends yet. """ # %% @@ -16,32 +25,27 @@ fig = pygmt.Figure() fig.basemap(region=[0, 8, 0, 3], projection="X12c/4c", frame=True) -# define pen and fontstyle for annotations +# Define pen and fontstyle for annotations pen = "1p,black" font = "15p,Helvetica-Bold" -# use the volcano symbol with a size of 1.5c, -# fill color is set to "seagreen" +# Use the volcano symbol with a size of 1.5c, fill color is set to "seagreen" fig.plot(x=1, y=1.25, style="kvolcano/1.5c", pen=pen, fill="seagreen") fig.text(x=1, y=2.5, text="volcano", font=font) -# use the astroid symbol with a size of 1.5c, -# fill color is set to "red3" +# Use the astroid symbol with a size of 1.5c, fill color is set to "red3" fig.plot(x=2.5, y=1.25, style="kastroid/1.5c", pen=pen, fill="red3") fig.text(x=2.5, y=2.5, text="astroid", font=font) -# use the flash symbol with a size of 1.5c, -# fill color is set to "darkorange" +# Use the flash symbol with a size of 1.5c, fill color is set to "darkorange" fig.plot(x=4, y=1.25, style="kflash/1.5c", pen=pen, fill="darkorange") fig.text(x=4, y=2.5, text="flash", font=font) -# use the star4 symbol with a size of 1.5c, -# fill color is set to "dodgerblue4" +# Use the star4 symbol with a size of 1.5c, fill color is set to "dodgerblue4" fig.plot(x=5.5, y=1.25, style="kstar4/1.5c", pen=pen, fill="dodgerblue4") fig.text(x=5.5, y=2.5, text="star4", font=font) -# use the hurricane symbol with a size of 1.5c, -# fill color is set to "magenta4" +# Use the hurricane symbol with a size of 1.5c, fill color is set to "magenta4" fig.plot(x=7, y=1.25, style="khurricane/1.5c", pen=pen, fill="magenta4") fig.text(x=7, y=2.5, text="hurricane", font=font) From d844fd164e42f992324d13a9a4ecd2843a673d01 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 22 Apr 2024 05:41:03 +0800 Subject: [PATCH 075/218] Remove sequence_space converter from kwargs_to_string (#3183) No longer needed after build_arg_list was used in #3132. --- pygmt/helpers/decorators.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/pygmt/helpers/decorators.py b/pygmt/helpers/decorators.py index 47809a2787f..c61f1acecbd 100644 --- a/pygmt/helpers/decorators.py +++ b/pygmt/helpers/decorators.py @@ -634,7 +634,6 @@ def kwargs_to_strings(**conversions): string * 'sequence_comma': transforms a sequence into a ``','`` separated string * 'sequence_plus': transforms a sequence into a ``'+'`` separated string - * 'sequence_space': transforms a sequence into a ``' '`` separated string Parameters ---------- @@ -645,7 +644,7 @@ def kwargs_to_strings(**conversions): Examples -------- - >>> @kwargs_to_strings(R="sequence", i="sequence_comma", files="sequence_space") + >>> @kwargs_to_strings(R="sequence", i="sequence_comma") ... def module(*args, **kwargs): ... "A module that prints the arguments it received" ... print("{", end="") @@ -670,7 +669,7 @@ def kwargs_to_strings(**conversions): >>> module(i=[1, 2]) {'i': '1,2'} >>> module(files=["data1.txt", "data2.txt"]) - {'files': 'data1.txt data2.txt'} + {'files': ['data1.txt', 'data2.txt']} >>> # Other non-boolean arguments are passed along as they are >>> module(123, bla=(1, 2, 3), foo=True, A=False, i=(5, 6)) {'A': False, 'bla': (1, 2, 3), 'foo': True, 'i': '5,6'} @@ -695,7 +694,6 @@ def kwargs_to_strings(**conversions): >>> # Here is a more realistic example >>> # See https://github.com/GenericMappingTools/pygmt/issues/2361 >>> @kwargs_to_strings( - ... files="sequence_space", ... offset="sequence", ... R="sequence", ... i="sequence_comma", @@ -711,21 +709,20 @@ def kwargs_to_strings(**conversions): ... ) ... print("}") >>> module(files=["data1.txt", "data2.txt"]) - data1.txt data2.txt -54p/-54p {} + ['data1.txt', 'data2.txt'] -54p/-54p {} >>> module(["data1.txt", "data2.txt"]) - data1.txt data2.txt -54p/-54p {} + ['data1.txt', 'data2.txt'] -54p/-54p {} >>> module(files=["data1.txt", "data2.txt"], offset=("20p", "20p")) - data1.txt data2.txt 20p/20p {} + ['data1.txt', 'data2.txt'] 20p/20p {} >>> module(["data1.txt", "data2.txt"], ("20p", "20p")) - data1.txt data2.txt 20p/20p {} + ['data1.txt', 'data2.txt'] 20p/20p {} >>> module(["data1.txt", "data2.txt"], ("20p", "20p"), R=[1, 2, 3, 4]) - data1.txt data2.txt 20p/20p {'R': '1/2/3/4'} + ['data1.txt', 'data2.txt'] 20p/20p {'R': '1/2/3/4'} """ separators = { "sequence": "/", "sequence_comma": ",", "sequence_plus": "+", - "sequence_space": " ", } for arg, fmt in conversions.items(): From 47861107bca45ea71c003133e282fc7c35bbd2b9 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 22 Apr 2024 05:45:22 +0800 Subject: [PATCH 076/218] CI: Use 'gh run download' to download GMT cache artifacts (#3188) Removes the need for the 'dawidd6/action-download-artifact' GitHub Action. --- .github/workflows/benchmarks.yml | 16 ++++++---------- .github/workflows/ci_docs.yml | 16 ++++++---------- .github/workflows/ci_doctests.yaml | 16 ++++++---------- .github/workflows/ci_tests.yaml | 16 ++++++---------- .github/workflows/ci_tests_dev.yaml | 16 ++++++---------- .github/workflows/ci_tests_legacy.yaml | 16 ++++++---------- 6 files changed, 36 insertions(+), 60 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 279adbb893c..af44301acfd 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -64,21 +64,17 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.4 - with: - workflow: cache_data.yaml - workflow_conclusion: success - name: gmt-cache - path: .gmt - - # Move downloaded files to ~/.gmt directory and list them - - name: Move and list downloaded remote files run: | + gh run download -n gmt-cache -D gmt-cache + # Move downloaded files to ~/.gmt directory and list them mkdir -p ~/.gmt - mv .gmt/* ~/.gmt + mv gmt-cache/* ~/.gmt + rmdir gmt-cache # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt + env: + GH_TOKEN: ${{ github.token }} # Install the package that we want to test - name: Install the package diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index ac5f33d96d5..cd92a4484c6 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -112,21 +112,17 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.4 - with: - workflow: cache_data.yaml - workflow_conclusion: success - name: gmt-cache - path: .gmt - - # Move downloaded files to ~/.gmt directory and list them - - name: Move and list downloaded remote files run: | + gh run download -n gmt-cache -D gmt-cache + # Move downloaded files to ~/.gmt directory and list them mkdir -p ~/.gmt - mv .gmt/* ~/.gmt + mv gmt-cache/* ~/.gmt + rmdir gmt-cache # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt + env: + GH_TOKEN: ${{ github.token }} # Install the package that we want to test - name: Install the package diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index 9524a81f950..0bea2a9a2f5 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -71,21 +71,17 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.4 - with: - workflow: cache_data.yaml - workflow_conclusion: success - name: gmt-cache - path: .gmt - - # Move downloaded files to ~/.gmt directory and list them - - name: Move and list downloaded remote files run: | + gh run download -n gmt-cache -D gmt-cache + # Move downloaded files to ~/.gmt directory and list them mkdir -p ~/.gmt - mv .gmt/* ~/.gmt + mv gmt-cache/* ~/.gmt + rmdir gmt-cache # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt + env: + GH_TOKEN: ${{ github.token }} # Install the package that we want to test - name: Install the package diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 70f3d5773cc..cf5d473963f 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -128,21 +128,17 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.4 - with: - workflow: cache_data.yaml - workflow_conclusion: success - name: gmt-cache - path: .gmt - - # Move downloaded files to ~/.gmt directory and list them - - name: Move and list downloaded remote files run: | + gh run download -n gmt-cache -D gmt-cache + # Move downloaded files to ~/.gmt directory and list them mkdir -p ~/.gmt - mv .gmt/* ~/.gmt + mv gmt-cache/* ~/.gmt + rmdir gmt-cache # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt + env: + GH_TOKEN: ${{ github.token }} # Pull baseline image data from dvc remote (DAGsHub) - name: Pull baseline image data from dvc remote diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 18aa30f3c86..e57786a4e9c 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -135,21 +135,17 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.4 - with: - workflow: cache_data.yaml - workflow_conclusion: success - name: gmt-cache - path: .gmt - - # Move downloaded files to ~/.gmt directory and list them - - name: Move and list downloaded remote files run: | + gh run download -n gmt-cache -D gmt-cache + # Move downloaded files to ~/.gmt directory and list them mkdir -p ~/.gmt - mv .gmt/* ~/.gmt + mv gmt-cache/* ~/.gmt + rmdir gmt-cache # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt + env: + GH_TOKEN: ${{ github.token }} # Install the package that we want to test - name: Install the package diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index fee0e29ec8b..94ed7eec2c9 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -83,24 +83,20 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub - uses: dawidd6/action-download-artifact@v3.1.4 - with: - workflow: cache_data.yaml - workflow_conclusion: success - name: gmt-cache - path: .gmt - - # Move downloaded files to ~/.gmt directory and list them - - name: Move and list downloaded remote files run: | + gh run download -n gmt-cache -D gmt-cache + # Move downloaded files to ~/.gmt directory and list them mkdir -p ~/.gmt - mv .gmt/* ~/.gmt + mv gmt-cache/* ~/.gmt + rmdir gmt-cache # Change modification times of the two files, so GMT won't refresh it # The two files are in the `~/.gmt/server` directory for GMT<=6.4, and # in the `~/.gmt` directory for GMT>=6.5. mv ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ~/.gmt/server/ touch ~/.gmt/server/gmt_data_server.txt ~/.gmt/server/gmt_hash_server.txt ls -lhR ~/.gmt + env: + GH_TOKEN: ${{ github.token }} # Install the package that we want to test - name: Install the package From da664bc111e7245bb1f7780f57258cc8dbcf4ebd Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 22 Apr 2024 08:07:52 +0800 Subject: [PATCH 077/218] Deprecate the build_arg_string function in v0.12.0 (will be removed in v0.14.0) and use build_arg_list instead (#3184) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- pygmt/helpers/utils.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index 0fcd61d6673..1732dfb4e3f 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -10,6 +10,7 @@ import subprocess import sys import time +import warnings import webbrowser from collections.abc import Iterable, Sequence from typing import Any @@ -439,6 +440,10 @@ def build_arg_string(kwdict, confdict=None, infile=None, outfile=None): strings (e.g. "+proj=longlat +datum=WGS84") will have their spaces removed. See https://github.com/GenericMappingTools/pygmt/pull/1487 for more info. + .. deprecated:: 0.12.0 + + Use :func:`build_arg_list` instead. + Parameters ---------- kwdict : dict @@ -513,8 +518,13 @@ def build_arg_string(kwdict, confdict=None, infile=None, outfile=None): ... ) input.txt -A0 -B -Crainbow --FORMAT_DATE_MAP="o dd" ->output.txt """ - gmt_args = [] + msg = ( + "Utility function 'build_arg_string()' is deprecated in v0.12.0 and will be " + "removed in v0.14.0. Use 'build_arg_list()' instead." + ) + warnings.warn(msg, category=FutureWarning, stacklevel=2) + gmt_args = [] for key in kwdict: if len(key) > 2: # raise an exception for unrecognized options raise GMTInvalidInput(f"Unrecognized parameter '{key}'.") From 11a1bc15d348ffb3589f37555007df8166946a1e Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 22 Apr 2024 08:51:29 +0800 Subject: [PATCH 078/218] CI: Use 'gh release' to upload assets to release (#3187) --- .github/workflows/release-baseline-images.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release-baseline-images.yml b/.github/workflows/release-baseline-images.yml index 98b40e4c66b..b6f28b7cfc4 100644 --- a/.github/workflows/release-baseline-images.yml +++ b/.github/workflows/release-baseline-images.yml @@ -17,6 +17,10 @@ jobs: runs-on: ubuntu-latest if: github.repository == 'GenericMappingTools/pygmt' + permissions: + # To write assets to GitHub release + contents: write + steps: - name: Checkout uses: actions/checkout@v4.1.1 @@ -35,7 +39,6 @@ jobs: shasum -a 256 baseline-images.zip - name: Upload baseline image as a release asset - uses: shogo82148/actions-upload-release-asset@v1.7.5 - with: - upload_url: ${{ github.event.release.upload_url }} - asset_path: baseline-images.zip + run: gh release upload ${{ github.ref_name }} baseline-images.zip + env: + GH_TOKEN: ${{ github.token }} From 44f44d38034ce2ab0ede78ee4bfd8b8b09386393 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 22 Apr 2024 17:43:58 +0800 Subject: [PATCH 079/218] CI: Consistently use github.token instead of secrets.GITHUB_TOKEN (#3189) --- .github/workflows/check-links.yml | 2 +- .github/workflows/dvc-diff.yml | 2 +- .github/workflows/release-drafter.yml | 2 +- .github/workflows/slash-command-dispatch.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 524bc6f078d..8e9f58aeff8 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -72,4 +72,4 @@ jobs: title="Link Checker Report on ${{ steps.date.outputs.date }}" gh issue create --title "$title" --body-file ./lychee/out.md env: - GH_TOKEN: ${{secrets.GITHUB_TOKEN}} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/dvc-diff.yml b/.github/workflows/dvc-diff.yml index 443b03e7ab7..5792a5cd64f 100644 --- a/.github/workflows/dvc-diff.yml +++ b/.github/workflows/dvc-diff.yml @@ -52,7 +52,7 @@ jobs: # Report last updated at commit abcdef - name: Generate the image diff report env: - repo_token: ${{ secrets.GITHUB_TOKEN }} + repo_token: ${{ github.token }} run: | echo -e "## Summary of changed images\n" > report.md echo -e "This is an auto-generated report of images that have changed on the DVC remote\n" >> report.md diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index 33f571ecdbe..118778439e1 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -23,4 +23,4 @@ jobs: # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml config-name: release-drafter.yml env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ github.token }} diff --git a/.github/workflows/slash-command-dispatch.yml b/.github/workflows/slash-command-dispatch.yml index 9bc6f905ded..7f483528e0d 100644 --- a/.github/workflows/slash-command-dispatch.yml +++ b/.github/workflows/slash-command-dispatch.yml @@ -16,7 +16,7 @@ jobs: - name: Slash Command Dispatch uses: peter-evans/slash-command-dispatch@v4 with: - token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ githubu.token }} commands: | format issue-type: pull-request From 8b2a74ca8e68c1d7e34e46b089bf522e7ba683f3 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 23 Apr 2024 07:44:51 +0800 Subject: [PATCH 080/218] Refactor the _load_remote_dataset function to load tiled and non-tiled grids in a consistent way (#3120) --- pygmt/datasets/load_remote_dataset.py | 66 ++++++++++++++++----------- pygmt/tests/test_accessor.py | 5 +- 2 files changed, 42 insertions(+), 29 deletions(-) diff --git a/pygmt/datasets/load_remote_dataset.py b/pygmt/datasets/load_remote_dataset.py index 0907d35dccf..5b66a14057d 100644 --- a/pygmt/datasets/load_remote_dataset.py +++ b/pygmt/datasets/load_remote_dataset.py @@ -4,12 +4,12 @@ from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar, NamedTuple +from typing import TYPE_CHECKING, ClassVar, Literal, NamedTuple +from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import kwargs_to_strings -from pygmt.io import load_dataarray -from pygmt.src import grdcut, which +from pygmt.helpers import build_arg_list, kwargs_to_strings +from pygmt.src import which if TYPE_CHECKING: import xarray as xr @@ -344,7 +344,7 @@ def _load_remote_dataset( dataset_prefix: str, resolution: str, region: str | list, - registration: str, + registration: Literal["gridline", "pixel", None], ) -> xr.DataArray: r""" Load GMT remote datasets. @@ -370,33 +370,39 @@ def _load_remote_dataset( Returns ------- - grid : :class:`xarray.DataArray` + grid The GMT remote dataset grid. Note ---- - The returned :class:`xarray.DataArray` doesn't support slice operation for tiled - grids. + The registration and coordinate system type of the returned + :class:`xarray.DataArray` grid can be accessed via the GMT accessors (i.e., + ``grid.gmt.registration`` and ``grid.gmt.gtype`` respectively). However, these + properties may be lost after specific grid operations (such as slicing) and will + need to be manually set before passing the grid to any PyGMT data processing or + plotting functions. Refer to :class:`pygmt.GMTDataArrayAccessor` for detailed + explanations and workarounds. """ dataset = datasets[dataset_name] + # Check resolution if resolution not in dataset.resolutions: raise GMTInvalidInput( f"Invalid resolution '{resolution}' for {dataset.title} dataset. " f"Available resolutions are: {', '.join(dataset.resolutions)}." ) + resinfo = dataset.resolutions[resolution] - # check registration - valid_registrations = dataset.resolutions[resolution].registrations + # Check registration if registration is None: - # use gridline registration unless only pixel registration is available - registration = "gridline" if "gridline" in valid_registrations else "pixel" + # Use gridline registration unless only pixel registration is available + registration = "gridline" if "gridline" in resinfo.registrations else "pixel" elif registration in ("pixel", "gridline"): - if registration not in valid_registrations: + if registration not in resinfo.registrations: raise GMTInvalidInput( f"{registration} registration is not available for the " f"{resolution} {dataset.title} dataset. Only " - f"{valid_registrations[0]} registration is available." + f"{resinfo.registrations[0]} registration is available." ) else: raise GMTInvalidInput( @@ -404,20 +410,28 @@ def _load_remote_dataset( "'gridline' or None. Default is None, where a gridline-registered grid is " "returned unless only the pixel-registered grid is available." ) - reg = f"_{registration[0]}" - # different ways to load tiled and non-tiled grids. - # Known issue: tiled grids don't support slice operation - # See https://github.com/GenericMappingTools/pygmt/issues/524 - if region is None: - if dataset.resolutions[resolution].tiled: - raise GMTInvalidInput( - f"'region' is required for {dataset.title} resolution '{resolution}'." + fname = f"@{dataset_prefix}{resolution}_{registration[0]}" + if resinfo.tiled and region is None: + raise GMTInvalidInput( + f"'region' is required for {dataset.title} resolution '{resolution}'." + ) + + # Currently, only grids are supported. Will support images in the future. + kwdict = {"T": "g", "R": region} # region can be None + with Session() as lib: + with lib.virtualfile_out(kind="grid") as voutgrd: + lib.call_module( + module="read", + args=[fname, voutgrd, *build_arg_list(kwdict)], ) - fname = which(f"@{dataset_prefix}{resolution}{reg}", download="a") - grid = load_dataarray(fname, engine="netcdf4") - else: - grid = grdcut(f"@{dataset_prefix}{resolution}{reg}", region=region) + grid = lib.virtualfile_to_raster(outgrid=None, vfname=voutgrd) + + # Full path to the grid if not tiled grids. + source = which(fname, download="a") if not resinfo.tiled else None + # Manually add source to xarray.DataArray encoding to make the GMT accessors work. + if source: + grid.encoding["source"] = source # Add some metadata to the grid grid.name = dataset.name diff --git a/pygmt/tests/test_accessor.py b/pygmt/tests/test_accessor.py index ed047c2ec28..e5e4534e1c3 100644 --- a/pygmt/tests/test_accessor.py +++ b/pygmt/tests/test_accessor.py @@ -115,9 +115,8 @@ def test_accessor_grid_source_file_not_exist(): # Registration and gtype are correct assert grid.gmt.registration == 1 assert grid.gmt.gtype == 1 - # The source grid file is defined but doesn't exist - assert grid.encoding["source"].endswith(".nc") - assert not Path(grid.encoding["source"]).exists() + # The source grid file is undefined. + assert grid.encoding.get("source") is None # For a sliced grid, fallback to default registration and gtype, # because the source grid file doesn't exist. From c9c347d53454c2d51331ea7979c503c35c9ec5e8 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 23 Apr 2024 23:25:00 +0800 Subject: [PATCH 081/218] Update the list of cached data files (#3195) --- pygmt/helpers/caching.py | 6 +++--- pygmt/tests/test_datatypes_dataset.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pygmt/helpers/caching.py b/pygmt/helpers/caching.py index 7bf292a2003..b81e8384758 100644 --- a/pygmt/helpers/caching.py +++ b/pygmt/helpers/caching.py @@ -57,9 +57,9 @@ def cache_data(): "@N00W030.earth_vgg_01m_p.nc", "@S90E000.earth_wdmam_03m_g.nc", "@N00W030.mars_relief_01m_g.nc", - "@N00W030.mercury_relief_01m_p.nc", - "@N00W030.moon_relief_01m_p.nc", - "@N00W030.pluto_relief_01m_p.nc", + "@N00W030.mercury_relief_01m_g.nc", + "@N00W030.moon_relief_01m_g.nc", + "@N00W030.pluto_relief_01m_g.nc", "@N00W030.venus_relief_01m_g.nc", # List of cache files. "@EGM96_to_36.txt", diff --git a/pygmt/tests/test_datatypes_dataset.py b/pygmt/tests/test_datatypes_dataset.py index 9576595b6b2..21871f9f60a 100644 --- a/pygmt/tests/test_datatypes_dataset.py +++ b/pygmt/tests/test_datatypes_dataset.py @@ -151,7 +151,7 @@ def test_dataset_to_strings_with_none_values(): See the bug report at https://github.com/GenericMappingTools/pygmt/issues/3170. """ - tiles = ["@N30W120.earth_relief_15s_p.nc", "@N00E000.earth_relief_15s_p.nc"] + tiles = ["@N30W120.earth_relief_15s_p.nc", "@N00W010.earth_relief_15s_p.nc"] paths = which(fname=tiles, download="a") assert len(paths) == 2 # 'paths' may contain an empty string or not, depending on if the tiles are cached. From 16b89456eba100b67ad9d491b70f58932ce25054 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Wed, 24 Apr 2024 00:03:56 +0200 Subject: [PATCH 082/218] CI: slash-commands-dispatch: Fix typo (#3196) Fix typo githubu -> github --- .github/workflows/slash-command-dispatch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/slash-command-dispatch.yml b/.github/workflows/slash-command-dispatch.yml index 7f483528e0d..c5d10cf5be8 100644 --- a/.github/workflows/slash-command-dispatch.yml +++ b/.github/workflows/slash-command-dispatch.yml @@ -16,7 +16,7 @@ jobs: - name: Slash Command Dispatch uses: peter-evans/slash-command-dispatch@v4 with: - token: ${{ githubu.token }} + token: ${{ github.token }} commands: | format issue-type: pull-request From 99bc5d326ca0e0a8d058f41b72d74dfd85f94c1e Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 24 Apr 2024 09:28:10 +0800 Subject: [PATCH 083/218] Mark three x2sys_cross as xfail on macOS (#3198) --- pygmt/tests/test_x2sys_cross.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pygmt/tests/test_x2sys_cross.py b/pygmt/tests/test_x2sys_cross.py index c9209bd254a..bae686efe27 100644 --- a/pygmt/tests/test_x2sys_cross.py +++ b/pygmt/tests/test_x2sys_cross.py @@ -3,6 +3,7 @@ """ import copy +import sys from pathlib import Path from tempfile import TemporaryDirectory @@ -59,7 +60,7 @@ def test_x2sys_cross_input_file_output_file(): @pytest.mark.usefixtures("mock_x2sys_home") @pytest.mark.xfail( - condition=Version(__gmt_version__) < Version("6.5.0"), + condition=Version(__gmt_version__) < Version("6.5.0") or sys.platform == "darwin", reason="Upstream bug fixed in https://github.com/GenericMappingTools/gmt/pull/8188", ) def test_x2sys_cross_input_file_output_dataframe(): @@ -200,7 +201,7 @@ def test_x2sys_cross_invalid_tracks_input_type(tracks): @pytest.mark.usefixtures("mock_x2sys_home") @pytest.mark.xfail( - condition=Version(__gmt_version__) < Version("6.5.0"), + condition=Version(__gmt_version__) < Version("6.5.0") or sys.platform == "darwin", reason="Upstream bug fixed in https://github.com/GenericMappingTools/gmt/pull/8188", ) def test_x2sys_cross_region_interpolation_numpoints(): @@ -229,7 +230,7 @@ def test_x2sys_cross_region_interpolation_numpoints(): @pytest.mark.usefixtures("mock_x2sys_home") @pytest.mark.xfail( - condition=Version(__gmt_version__) < Version("6.5.0"), + condition=Version(__gmt_version__) < Version("6.5.0") or sys.platform == "darwin", reason="Upstream bug fixed in https://github.com/GenericMappingTools/gmt/pull/8188", ) def test_x2sys_cross_trackvalues(): From 9aa2780113ed3facf82ad181f6063e52cc14c5f2 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 24 Apr 2024 18:21:21 +0800 Subject: [PATCH 084/218] Use unique earth_age tiles in test_dataset_to_strings_with_none_values (#3200) --- pygmt/helpers/caching.py | 2 ++ pygmt/tests/test_datatypes_dataset.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pygmt/helpers/caching.py b/pygmt/helpers/caching.py index b81e8384758..714f12d890e 100644 --- a/pygmt/helpers/caching.py +++ b/pygmt/helpers/caching.py @@ -42,6 +42,8 @@ def cache_data(): # Names like @N35E135.earth_relief_03s_g.nc are for internal use only. # The naming scheme may change. DO NOT USE IT IN YOUR SCRIPTS. "@N00W030.earth_age_01m_g.nc", + "@N30E060.earth_age_01m_g.nc", + "@N30E090.earth_age_01m_g.nc", "@N00W030.earth_faa_01m_p.nc", "@N00W030.earth_geoid_01m_g.nc", "@S30W060.earth_mag_02m_p.nc", diff --git a/pygmt/tests/test_datatypes_dataset.py b/pygmt/tests/test_datatypes_dataset.py index 21871f9f60a..dd7e4073852 100644 --- a/pygmt/tests/test_datatypes_dataset.py +++ b/pygmt/tests/test_datatypes_dataset.py @@ -151,7 +151,7 @@ def test_dataset_to_strings_with_none_values(): See the bug report at https://github.com/GenericMappingTools/pygmt/issues/3170. """ - tiles = ["@N30W120.earth_relief_15s_p.nc", "@N00W010.earth_relief_15s_p.nc"] + tiles = ["@N30E060.earth_age_01m_g.nc", "@N30E090.earth_age_01m_g.nc"] paths = which(fname=tiles, download="a") assert len(paths) == 2 # 'paths' may contain an empty string or not, depending on if the tiles are cached. From 5234b52b3b924c22c6e834a73c6bcaaaaa722ae4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Wed, 24 Apr 2024 13:19:31 +0200 Subject: [PATCH 085/218] Remote Datasets: Adjust attributes - remove "title", use default of "name" and "long_name", introduce "description" (#3048) Co-authored-by: Dongdong Tian --- .../tutorials/advanced/grid_equalization.py | 2 +- pygmt/datasets/earth_free_air_anomaly.py | 8 +- pygmt/datasets/earth_geoid.py | 6 +- pygmt/datasets/earth_magnetic_anomaly.py | 2 +- pygmt/datasets/earth_mask.py | 4 +- pygmt/datasets/earth_relief.py | 9 +- .../earth_vertical_gravity_gradient.py | 6 +- pygmt/datasets/load_remote_dataset.py | 143 ++++++++---------- pygmt/src/grd2xyz.py | 2 +- pygmt/tests/test_datasets_earth_age.py | 5 +- .../test_datasets_earth_free_air_anomaly.py | 5 +- pygmt/tests/test_datasets_earth_geoid.py | 5 +- .../test_datasets_earth_magnetic_anomaly.py | 15 +- pygmt/tests/test_datasets_earth_mask.py | 4 +- pygmt/tests/test_datasets_earth_relief.py | 11 +- ...atasets_earth_vertical_gravity_gradient.py | 5 +- .../test_datasets_load_remote_datasets.py | 4 +- pygmt/tests/test_datasets_mars_relief.py | 5 +- pygmt/tests/test_datasets_mercury_relief.py | 5 +- pygmt/tests/test_datasets_moon_relief.py | 5 +- pygmt/tests/test_datasets_pluto_relief.py | 5 +- pygmt/tests/test_datasets_venus_relief.py | 5 +- 22 files changed, 135 insertions(+), 126 deletions(-) diff --git a/examples/tutorials/advanced/grid_equalization.py b/examples/tutorials/advanced/grid_equalization.py index b631fe49834..1370ff3ab3e 100644 --- a/examples/tutorials/advanced/grid_equalization.py +++ b/examples/tutorials/advanced/grid_equalization.py @@ -20,7 +20,7 @@ grid = pygmt.datasets.load_earth_relief( resolution="03s", region=[-119.825, -119.4, 37.6, 37.825] ) -grid_dist = pygmt.grd2xyz(grid=grid, output_type="pandas")["elevation"] +grid_dist = pygmt.grd2xyz(grid=grid, output_type="pandas")["z"] # %% diff --git a/pygmt/datasets/earth_free_air_anomaly.py b/pygmt/datasets/earth_free_air_anomaly.py index 00abae85122..9beae54ef1a 100644 --- a/pygmt/datasets/earth_free_air_anomaly.py +++ b/pygmt/datasets/earth_free_air_anomaly.py @@ -1,5 +1,5 @@ """ -Function to download the IGPP Earth Free-Air Anomaly dataset from the GMT data server, +Function to download the IGPP Earth free-air anomaly dataset from the GMT data server, and load as :class:`xarray.DataArray`. The grids are available in various resolutions. @@ -20,13 +20,13 @@ def load_earth_free_air_anomaly( registration: Literal["gridline", "pixel", None] = None, ): r""" - Load the IGPP Earth Free-Air Anomaly dataset in various resolutions. + Load the IGPP Earth free-air anomaly dataset in various resolutions. .. figure:: https://www.generic-mapping-tools.org/remote-datasets/_images/GMT_earth_faa.jpg :width: 80 % :align: center - IGPP Earth Free-Air Anomaly dataset. + IGPP Earth free-air anomaly dataset. The grids are downloaded to a user data directory (usually ``~/.gmt/server/earth/earth_faa/``) the first time you invoke @@ -102,7 +102,7 @@ def load_earth_free_air_anomaly( ... ) """ grid = _load_remote_dataset( - dataset_name="earth_free_air_anomaly", + dataset_name="earth_faa", dataset_prefix="earth_faa_", resolution=resolution, region=region, diff --git a/pygmt/datasets/earth_geoid.py b/pygmt/datasets/earth_geoid.py index e62fb20bdf3..8965e06857d 100644 --- a/pygmt/datasets/earth_geoid.py +++ b/pygmt/datasets/earth_geoid.py @@ -1,5 +1,5 @@ """ -Function to download the EGM2008 Earth Geoid dataset from the GMT data server, and load +Function to download the EGM2008 Earth geoid dataset from the GMT data server, and load as :class:`xarray.DataArray`. The grids are available in various resolutions. @@ -20,13 +20,13 @@ def load_earth_geoid( registration: Literal["gridline", "pixel"] = "gridline", ): r""" - Load the EGM2008 Earth Geoid dataset in various resolutions. + Load the EGM2008 Earth geoid dataset in various resolutions. .. figure:: https://www.generic-mapping-tools.org/remote-datasets/_images/GMT_earth_geoid.jpg :width: 80 % :align: center - EGM2008 Earth Geoid dataset. + EGM2008 Earth geoid dataset. The grids are downloaded to a user data directory (usually ``~/.gmt/server/earth/earth_geoid/``) the first time you invoke diff --git a/pygmt/datasets/earth_magnetic_anomaly.py b/pygmt/datasets/earth_magnetic_anomaly.py index 7eb1552bbc8..e8d7afe1684 100644 --- a/pygmt/datasets/earth_magnetic_anomaly.py +++ b/pygmt/datasets/earth_magnetic_anomaly.py @@ -147,7 +147,7 @@ def load_earth_magnetic_anomaly( "Valid values are 'emag2', 'emag2_4km', and 'wdmam'." ) dataset_prefix = magnetic_anomaly_sources[data_source] - dataset_name = "earth_wdmam" if data_source == "wdmam" else "earth_magnetic_anomaly" + dataset_name = "earth_wdmam" if data_source == "wdmam" else "earth_mag" grid = _load_remote_dataset( dataset_name=dataset_name, dataset_prefix=dataset_prefix, diff --git a/pygmt/datasets/earth_mask.py b/pygmt/datasets/earth_mask.py index cd38b5bc5c2..1c439ec32d5 100644 --- a/pygmt/datasets/earth_mask.py +++ b/pygmt/datasets/earth_mask.py @@ -20,13 +20,13 @@ def load_earth_mask( registration: Literal["gridline", "pixel"] = "gridline", ): r""" - Load the GSHHG Earth Mask dataset in various resolutions. + Load the GSHHG Earth mask dataset in various resolutions. .. figure:: https://www.generic-mapping-tools.org/remote-datasets/_images/GMT_earth_mask.jpg :width: 80 % :align: center - GSHHG Earth Mask dataset. + GSHHG Earth mask dataset. The grids are downloaded to a user data directory (usually ``~/.gmt/server/earth/earth_mask/``) the first time you invoke diff --git a/pygmt/datasets/earth_relief.py b/pygmt/datasets/earth_relief.py index e87f2b403fe..a0ddcd6eabc 100644 --- a/pygmt/datasets/earth_relief.py +++ b/pygmt/datasets/earth_relief.py @@ -161,9 +161,14 @@ def load_earth_relief( ) else: dataset_prefix = earth_relief_sources[data_source] - + # Choose earth relief dataset + match data_source: + case "igpp" | "synbath": + dataset_name = "earth_igpp" + case "gebco" | "gebcosi": + dataset_name = "earth_gebco" grid = _load_remote_dataset( - dataset_name="earth_relief", + dataset_name=dataset_name, dataset_prefix=dataset_prefix, resolution=resolution, region=region, diff --git a/pygmt/datasets/earth_vertical_gravity_gradient.py b/pygmt/datasets/earth_vertical_gravity_gradient.py index da5fc880766..53e11e229c1 100644 --- a/pygmt/datasets/earth_vertical_gravity_gradient.py +++ b/pygmt/datasets/earth_vertical_gravity_gradient.py @@ -1,5 +1,5 @@ """ -Function to download the IGPP Earth Vertical Gravity Gradient dataset from the GMT data +Function to download the IGPP Earth vertical gravity gradient dataset from the GMT data server, and load as :class:`xarray.DataArray`. The grids are available in various resolutions. @@ -20,13 +20,13 @@ def load_earth_vertical_gravity_gradient( registration: Literal["gridline", "pixel", None] = None, ): r""" - Load the IGPP Earth Vertical Gravity Gradient dataset in various resolutions. + Load the IGPP Earth vertical gravity gradient dataset in various resolutions. .. figure:: https://www.generic-mapping-tools.org/remote-datasets/_images/GMT_earth_vgg.jpg :width: 80 % :align: center - IGPP Earth Vertical Gravity Gradient dataset. + IGPP Earth vertical gravity gradient dataset. The grids are downloaded to a user data directory (usually ``~/.gmt/server/earth/earth_vgg/``) the first time you invoke diff --git a/pygmt/datasets/load_remote_dataset.py b/pygmt/datasets/load_remote_dataset.py index 5b66a14057d..6f099952512 100644 --- a/pygmt/datasets/load_remote_dataset.py +++ b/pygmt/datasets/load_remote_dataset.py @@ -41,13 +41,9 @@ class GMTRemoteDataset(NamedTuple): Attributes ---------- - title : str - The title of the dataset, used in error messages. - name : str - The name assigned as an attribute to the DataArray. - long_name : str - The long name assigned as an attribute to the DataArray. - units : str + description : str + The name assigned as an attribute to the DataArray. + units : str, None The units of the values in the DataArray. resolutions : dict Dictionary of available resolution as keys and Resolution objects as values. @@ -55,9 +51,7 @@ class GMTRemoteDataset(NamedTuple): A dictionary of extra or unique attributes of the dataset. """ - title: str - name: str - long_name: str + description: str units: str | None resolutions: dict[str, Resolution] extra_attributes: dict @@ -65,9 +59,7 @@ class GMTRemoteDataset(NamedTuple): datasets = { "earth_age": GMTRemoteDataset( - title="seafloor age", - name="seafloor_age", - long_name="age of seafloor crust", + description="EarthByte Earth seafloor crustal age", units="Myr", extra_attributes={"horizontal_datum": "WGS84"}, resolutions={ @@ -84,10 +76,8 @@ class GMTRemoteDataset(NamedTuple): "01m": Resolution("01m", registrations=["gridline"], tiled=True), }, ), - "earth_free_air_anomaly": GMTRemoteDataset( - title="free air anomaly", - name="free_air_anomaly", - long_name="IGPP Earth Free-Air Anomaly", + "earth_faa": GMTRemoteDataset( + description="IGPP Earth free-air anomaly", units="mGal", extra_attributes={"horizontal_datum": "WGS84"}, resolutions={ @@ -104,10 +94,30 @@ class GMTRemoteDataset(NamedTuple): "01m": Resolution("01m", registrations=["pixel"], tiled=True), }, ), + "earth_gebco": GMTRemoteDataset( + description="GEBCO Earth relief", + units="meters", + extra_attributes={"vertical_datum": "EGM96", "horizontal_datum": "WGS84"}, + resolutions={ + "01d": Resolution("01d"), + "30m": Resolution("30m"), + "20m": Resolution("20m"), + "15m": Resolution("15m"), + "10m": Resolution("10m"), + "06m": Resolution("06m"), + "05m": Resolution("05m", tiled=True), + "04m": Resolution("04m", tiled=True), + "03m": Resolution("03m", tiled=True), + "02m": Resolution("02m", tiled=True), + "01m": Resolution("01m", tiled=True), + "30s": Resolution("30s", tiled=True), + "15s": Resolution("15s", registrations=["pixel"], tiled=True), + "03s": Resolution("03s", registrations=["gridline"], tiled=True), + "01s": Resolution("01s", registrations=["gridline"], tiled=True), + }, + ), "earth_geoid": GMTRemoteDataset( - title="Earth geoid", - name="earth_geoid", - long_name="EGM2008 Earth Geoid", + description="EGM2008 Earth geoid", units="m", extra_attributes={"horizontal_datum": "WGS84"}, resolutions={ @@ -124,10 +134,30 @@ class GMTRemoteDataset(NamedTuple): "01m": Resolution("01m", registrations=["gridline"], tiled=True), }, ), - "earth_magnetic_anomaly": GMTRemoteDataset( - title="Earth magnetic anomaly", - name="magnetic_anomaly", - long_name="Earth magnetic anomaly", + "earth_igpp": GMTRemoteDataset( + description="IGPP Earth relief", + units="meters", + extra_attributes={"vertical_datum": "EGM96", "horizontal_datum": "WGS84"}, + resolutions={ + "01d": Resolution("01d"), + "30m": Resolution("30m"), + "20m": Resolution("20m"), + "15m": Resolution("15m"), + "10m": Resolution("10m"), + "06m": Resolution("06m"), + "05m": Resolution("05m", tiled=True), + "04m": Resolution("04m", tiled=True), + "03m": Resolution("03m", tiled=True), + "02m": Resolution("02m", tiled=True), + "01m": Resolution("01m", tiled=True), + "30s": Resolution("30s", tiled=True), + "15s": Resolution("15s", registrations=["pixel"], tiled=True), + "03s": Resolution("03s", registrations=["gridline"], tiled=True), + "01s": Resolution("01s", registrations=["gridline"], tiled=True), + }, + ), + "earth_mag": GMTRemoteDataset( + description="EMAG2 Earth Magnetic Anomaly Model", units="nT", extra_attributes={"horizontal_datum": "WGS84"}, resolutions={ @@ -144,9 +174,7 @@ class GMTRemoteDataset(NamedTuple): }, ), "earth_mask": GMTRemoteDataset( - title="Earth mask", - name="earth_mask", - long_name="Mask of land and water features", + description="GSHHG Earth mask", units=None, extra_attributes={"horizontal_datum": "WGS84"}, resolutions={ @@ -165,34 +193,8 @@ class GMTRemoteDataset(NamedTuple): "15s": Resolution("15s"), }, ), - "earth_relief": GMTRemoteDataset( - title="Earth relief", - name="elevation", - long_name="Earth elevation relative to the geoid", - units="meters", - extra_attributes={"vertical_datum": "EGM96", "horizontal_datum": "WGS84"}, - resolutions={ - "01d": Resolution("01d"), - "30m": Resolution("30m"), - "20m": Resolution("20m"), - "15m": Resolution("15m"), - "10m": Resolution("10m"), - "06m": Resolution("06m"), - "05m": Resolution("05m", tiled=True), - "04m": Resolution("04m", tiled=True), - "03m": Resolution("03m", tiled=True), - "02m": Resolution("02m", tiled=True), - "01m": Resolution("01m", tiled=True), - "30s": Resolution("30s", tiled=True), - "15s": Resolution("15s", registrations=["pixel"], tiled=True), - "03s": Resolution("03s", registrations=["gridline"], tiled=True), - "01s": Resolution("01s", registrations=["gridline"], tiled=True), - }, - ), "earth_vgg": GMTRemoteDataset( - title="Earth vertical gravity gradient", - name="earth_vgg", - long_name="IGPP Earth Vertical Gravity Gradient", + description="IGPP Earth vertical gravity gradient", units="Eotvos", extra_attributes={"horizontal_datum": "WGS84"}, resolutions={ @@ -210,9 +212,7 @@ class GMTRemoteDataset(NamedTuple): }, ), "earth_wdmam": GMTRemoteDataset( - title="WDMAM magnetic anomaly", - name="wdmam", - long_name="World Digital Magnetic Anomaly Map", + description="WDMAM World Digital Magnetic Anomaly Map", units="nT", extra_attributes={"horizontal_datum": "WGS84"}, resolutions={ @@ -228,9 +228,7 @@ class GMTRemoteDataset(NamedTuple): }, ), "mars_relief": GMTRemoteDataset( - title="Mars relief", - name="mars_relief", - long_name="NASA Mars (MOLA) relief", + description="NASA Mars (MOLA) relief", units="meters", extra_attributes={}, resolutions={ @@ -251,9 +249,7 @@ class GMTRemoteDataset(NamedTuple): }, ), "moon_relief": GMTRemoteDataset( - title="Moon relief", - name="moon_relief", - long_name="USGS Moon (LOLA) relief", + description="USGS Moon (LOLA) relief", units="meters", extra_attributes={}, resolutions={ @@ -274,9 +270,7 @@ class GMTRemoteDataset(NamedTuple): }, ), "mercury_relief": GMTRemoteDataset( - title="Mercury relief", - name="mercury_relief", - long_name="USGS Mercury relief", + description="USGS Mercury relief", units="meters", extra_attributes={}, resolutions={ @@ -295,9 +289,7 @@ class GMTRemoteDataset(NamedTuple): }, ), "pluto_relief": GMTRemoteDataset( - title="Pluto relief", - name="pluto_relief", - long_name="USGS Pluto relief", + description="USGS Pluto relief", units="meters", extra_attributes={}, resolutions={ @@ -316,9 +308,7 @@ class GMTRemoteDataset(NamedTuple): }, ), "venus_relief": GMTRemoteDataset( - title="Venus relief", - name="venus_relief", - long_name="NASA Magellan Venus relief", + description="NASA Magellan Venus relief", units="meters", extra_attributes={}, resolutions={ @@ -388,7 +378,7 @@ def _load_remote_dataset( # Check resolution if resolution not in dataset.resolutions: raise GMTInvalidInput( - f"Invalid resolution '{resolution}' for {dataset.title} dataset. " + f"Invalid resolution '{resolution}' for {dataset.description} dataset. " f"Available resolutions are: {', '.join(dataset.resolutions)}." ) resinfo = dataset.resolutions[resolution] @@ -401,7 +391,7 @@ def _load_remote_dataset( if registration not in resinfo.registrations: raise GMTInvalidInput( f"{registration} registration is not available for the " - f"{resolution} {dataset.title} dataset. Only " + f"{resolution} {dataset.description} dataset. Only " f"{resinfo.registrations[0]} registration is available." ) else: @@ -414,7 +404,7 @@ def _load_remote_dataset( fname = f"@{dataset_prefix}{resolution}_{registration[0]}" if resinfo.tiled and region is None: raise GMTInvalidInput( - f"'region' is required for {dataset.title} resolution '{resolution}'." + f"'region' is required for {dataset.description} resolution '{resolution}'." ) # Currently, only grids are supported. Will support images in the future. @@ -434,8 +424,7 @@ def _load_remote_dataset( grid.encoding["source"] = source # Add some metadata to the grid - grid.name = dataset.name - grid.attrs["long_name"] = dataset.long_name + grid.attrs["description"] = dataset.description if dataset.units: grid.attrs["units"] = dataset.units for key, value in dataset.extra_attributes.items(): diff --git a/pygmt/src/grd2xyz.py b/pygmt/src/grd2xyz.py index 04e4d21d848..3fbe933bc04 100644 --- a/pygmt/src/grd2xyz.py +++ b/pygmt/src/grd2xyz.py @@ -139,7 +139,7 @@ def grd2xyz( >>> # Create a pandas DataFrame with the xyz data from an input grid >>> xyz_dataframe = pygmt.grd2xyz(grid=grid, output_type="pandas") >>> xyz_dataframe.head(n=2) - lon lat elevation + lon lat z 0 10.0 25.0 965.5 1 10.5 25.0 876.5 """ diff --git a/pygmt/tests/test_datasets_earth_age.py b/pygmt/tests/test_datasets_earth_age.py index 854821bad25..67ade3dd3d9 100644 --- a/pygmt/tests/test_datasets_earth_age.py +++ b/pygmt/tests/test_datasets_earth_age.py @@ -12,9 +12,10 @@ def test_earth_age_01d(): Test some properties of the earth age 01d data. """ data = load_earth_age(resolution="01d") - assert data.name == "seafloor_age" + assert data.name == "z" + assert data.attrs["long_name"] == "ages (Myr)" + assert data.attrs["description"] == "EarthByte Earth seafloor crustal age" assert data.attrs["units"] == "Myr" - assert data.attrs["long_name"] == "age of seafloor crust" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) assert data.gmt.registration == 0 diff --git a/pygmt/tests/test_datasets_earth_free_air_anomaly.py b/pygmt/tests/test_datasets_earth_free_air_anomaly.py index 3bf3b32fa47..517a1bb9b89 100644 --- a/pygmt/tests/test_datasets_earth_free_air_anomaly.py +++ b/pygmt/tests/test_datasets_earth_free_air_anomaly.py @@ -12,8 +12,9 @@ def test_earth_faa_01d(): Test some properties of the free air anomaly 01d data. """ data = load_earth_free_air_anomaly(resolution="01d") - assert data.name == "free_air_anomaly" - assert data.attrs["long_name"] == "IGPP Earth Free-Air Anomaly" + assert data.name == "z" + assert data.attrs["long_name"] == "faa (mGal)" + assert data.attrs["description"] == "IGPP Earth free-air anomaly" assert data.attrs["units"] == "mGal" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) diff --git a/pygmt/tests/test_datasets_earth_geoid.py b/pygmt/tests/test_datasets_earth_geoid.py index 3ea2916792c..84bfc5d7bf4 100644 --- a/pygmt/tests/test_datasets_earth_geoid.py +++ b/pygmt/tests/test_datasets_earth_geoid.py @@ -12,9 +12,10 @@ def test_earth_geoid_01d(): Test some properties of the earth geoid 01d data. """ data = load_earth_geoid(resolution="01d") - assert data.name == "earth_geoid" + assert data.name == "z" + assert data.attrs["long_name"] == "geoid (m)" + assert data.attrs["description"] == "EGM2008 Earth geoid" assert data.attrs["units"] == "m" - assert data.attrs["long_name"] == "EGM2008 Earth Geoid" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) assert data.gmt.registration == 0 diff --git a/pygmt/tests/test_datasets_earth_magnetic_anomaly.py b/pygmt/tests/test_datasets_earth_magnetic_anomaly.py index aa81ad5e1a4..65e1a57601c 100644 --- a/pygmt/tests/test_datasets_earth_magnetic_anomaly.py +++ b/pygmt/tests/test_datasets_earth_magnetic_anomaly.py @@ -14,8 +14,9 @@ def test_earth_mag_01d(): Test some properties of the magnetic anomaly 01d data. """ data = load_earth_magnetic_anomaly(resolution="01d") - assert data.name == "magnetic_anomaly" - assert data.attrs["long_name"] == "Earth magnetic anomaly" + assert data.name == "z" + assert data.attrs["long_name"] == "anomaly (nT)" + assert data.attrs["description"] == "EMAG2 Earth Magnetic Anomaly Model" assert data.attrs["units"] == "nT" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) @@ -60,8 +61,9 @@ def test_earth_mag4km_01d(): Test some properties of the magnetic anomaly 4km 01d data. """ data = load_earth_magnetic_anomaly(resolution="01d", data_source="emag2_4km") - assert data.name == "magnetic_anomaly" - assert data.attrs["long_name"] == "Earth magnetic anomaly" + assert data.name == "z" + assert data.attrs["long_name"] == "anomaly (nT)" + assert data.attrs["description"] == "EMAG2 Earth Magnetic Anomaly Model" assert data.attrs["units"] == "nT" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) @@ -116,8 +118,9 @@ def test_earth_mag_01d_wdmam(): data = load_earth_magnetic_anomaly( resolution="01d", registration="gridline", data_source="wdmam" ) - assert data.name == "wdmam" - assert data.attrs["long_name"] == "World Digital Magnetic Anomaly Map" + assert data.name == "z" + assert data.attrs["long_name"] == "anomaly (nT)" + assert data.attrs["description"] == "WDMAM World Digital Magnetic Anomaly Map" assert data.attrs["units"] == "nT" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) diff --git a/pygmt/tests/test_datasets_earth_mask.py b/pygmt/tests/test_datasets_earth_mask.py index d46bfd3353f..c449e1a79a7 100644 --- a/pygmt/tests/test_datasets_earth_mask.py +++ b/pygmt/tests/test_datasets_earth_mask.py @@ -12,8 +12,8 @@ def test_earth_mask_01d(): Test some properties of the Earth mask 01d data. """ data = load_earth_mask(resolution="01d") - assert data.name == "earth_mask" - assert data.attrs["long_name"] == "Mask of land and water features" + assert data.name == "z" + assert data.attrs["description"] == "GSHHG Earth mask" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) assert data.gmt.registration == 0 diff --git a/pygmt/tests/test_datasets_earth_relief.py b/pygmt/tests/test_datasets_earth_relief.py index 13339ab0bb8..48ce69996ba 100644 --- a/pygmt/tests/test_datasets_earth_relief.py +++ b/pygmt/tests/test_datasets_earth_relief.py @@ -18,13 +18,14 @@ def test_earth_relief_01d_igpp_synbath(data_source): Test some properties of the earth relief 01d data with IGPP and SYNBATH data. """ data = load_earth_relief(resolution="01d", data_source=data_source) - assert data.name == "elevation" + assert data.name == "z" + assert data.attrs["long_name"] == "elevation (m)" + assert data.attrs["description"] == "IGPP Earth relief" assert data.attrs["units"] == "meters" - assert data.attrs["long_name"] == "Earth elevation relative to the geoid" assert data.attrs["vertical_datum"] == "EGM96" assert data.attrs["horizontal_datum"] == "WGS84" - assert data.gmt.registration == 0 assert data.shape == (181, 361) + assert data.gmt.registration == 0 npt.assert_allclose(data.lat, np.arange(-90, 91, 1)) npt.assert_allclose(data.lon, np.arange(-180, 181, 1)) npt.assert_allclose(data.min(), -7174.0, atol=0.5) @@ -37,8 +38,10 @@ def test_earth_relief_01d_gebco(data_source): Test some properties of the earth relief 01d data with GEBCO and GEBOCSI data. """ data = load_earth_relief(resolution="01d", data_source=data_source) + assert data.name == "z" + assert data.attrs["long_name"] == "elevation (m)" + assert data.attrs["description"] == "GEBCO Earth relief" assert data.attrs["units"] == "meters" - assert data.attrs["long_name"] == "Earth elevation relative to the geoid" assert data.attrs["vertical_datum"] == "EGM96" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) diff --git a/pygmt/tests/test_datasets_earth_vertical_gravity_gradient.py b/pygmt/tests/test_datasets_earth_vertical_gravity_gradient.py index 7e1ab6a2c88..84fda47f8f7 100644 --- a/pygmt/tests/test_datasets_earth_vertical_gravity_gradient.py +++ b/pygmt/tests/test_datasets_earth_vertical_gravity_gradient.py @@ -12,9 +12,10 @@ def test_earth_vertical_gravity_gradient_01d(): Test some properties of the earth vgg 01d data. """ data = load_earth_vertical_gravity_gradient(resolution="01d") - assert data.name == "earth_vgg" + assert data.name == "z" + assert data.attrs["long_name"] == "vgg (Eotvos)" + assert data.attrs["description"] == "IGPP Earth vertical gravity gradient" assert data.attrs["units"] == "Eotvos" - assert data.attrs["long_name"] == "IGPP Earth Vertical Gravity Gradient" assert data.attrs["horizontal_datum"] == "WGS84" assert data.shape == (181, 361) assert data.gmt.registration == 0 diff --git a/pygmt/tests/test_datasets_load_remote_datasets.py b/pygmt/tests/test_datasets_load_remote_datasets.py index 0b9c3f55dde..ac28a594813 100644 --- a/pygmt/tests/test_datasets_load_remote_datasets.py +++ b/pygmt/tests/test_datasets_load_remote_datasets.py @@ -26,8 +26,8 @@ def test_load_remote_dataset_benchmark_with_region(): Benchmark loading a remote dataset with 'region'. """ data = load_remote_dataset_wrapper(resolution="01d", region=[-10, 10, -5, 5]) - assert data.name == "seafloor_age" - assert data.attrs["long_name"] == "age of seafloor crust" + assert data.name == "z" + assert data.attrs["long_name"] == "ages (Myr)" assert data.attrs["units"] == "Myr" assert data.attrs["horizontal_datum"] == "WGS84" assert data.gmt.registration == 0 diff --git a/pygmt/tests/test_datasets_mars_relief.py b/pygmt/tests/test_datasets_mars_relief.py index 7a8ed0993b1..88c437848f6 100644 --- a/pygmt/tests/test_datasets_mars_relief.py +++ b/pygmt/tests/test_datasets_mars_relief.py @@ -12,9 +12,10 @@ def test_mars_relief_01d(): Test some properties of the Mars relief 01d data. """ data = load_mars_relief(resolution="01d") - assert data.name == "mars_relief" + assert data.name == "z" + assert data.attrs["long_name"] == "elevation (m)" + assert data.attrs["description"] == "NASA Mars (MOLA) relief" assert data.attrs["units"] == "meters" - assert data.attrs["long_name"] == "NASA Mars (MOLA) relief" assert data.shape == (181, 361) assert data.gmt.registration == 0 npt.assert_allclose(data.lat, np.arange(-90, 91, 1)) diff --git a/pygmt/tests/test_datasets_mercury_relief.py b/pygmt/tests/test_datasets_mercury_relief.py index e51ef7c0dc1..65dbe62e0d1 100644 --- a/pygmt/tests/test_datasets_mercury_relief.py +++ b/pygmt/tests/test_datasets_mercury_relief.py @@ -12,9 +12,10 @@ def test_mercury_relief_01d(): Test some properties of the Mercury relief 01d data. """ data = load_mercury_relief(resolution="01d") - assert data.name == "mercury_relief" + assert data.name == "z" + assert data.attrs["long_name"] == "elevation (m)" + assert data.attrs["description"] == "USGS Mercury relief" assert data.attrs["units"] == "meters" - assert data.attrs["long_name"] == "USGS Mercury relief" assert data.shape == (181, 361) assert data.gmt.registration == 0 npt.assert_allclose(data.lat, np.arange(-90, 91, 1)) diff --git a/pygmt/tests/test_datasets_moon_relief.py b/pygmt/tests/test_datasets_moon_relief.py index 5aa801d792d..87f10ded098 100644 --- a/pygmt/tests/test_datasets_moon_relief.py +++ b/pygmt/tests/test_datasets_moon_relief.py @@ -12,9 +12,10 @@ def test_moon_relief_01d(): Test some properties of the Moon relief 01d data. """ data = load_moon_relief(resolution="01d") - assert data.name == "moon_relief" + assert data.name == "z" + assert data.attrs["long_name"] == "elevation (m)" + assert data.attrs["description"] == "USGS Moon (LOLA) relief" assert data.attrs["units"] == "meters" - assert data.attrs["long_name"] == "USGS Moon (LOLA) relief" assert data.shape == (181, 361) assert data.gmt.registration == 0 npt.assert_allclose(data.lat, np.arange(-90, 91, 1)) diff --git a/pygmt/tests/test_datasets_pluto_relief.py b/pygmt/tests/test_datasets_pluto_relief.py index ad5a046d528..9979f5915a5 100644 --- a/pygmt/tests/test_datasets_pluto_relief.py +++ b/pygmt/tests/test_datasets_pluto_relief.py @@ -12,9 +12,10 @@ def test_pluto_relief_01d(): Test some properties of the Pluto relief 01d data. """ data = load_pluto_relief(resolution="01d") - assert data.name == "pluto_relief" + assert data.name == "z" + assert data.attrs["long_name"] == "elevation (m)" + assert data.attrs["description"] == "USGS Pluto relief" assert data.attrs["units"] == "meters" - assert data.attrs["long_name"] == "USGS Pluto relief" assert data.shape == (181, 361) assert data.gmt.registration == 0 npt.assert_allclose(data.lat, np.arange(-90, 91, 1)) diff --git a/pygmt/tests/test_datasets_venus_relief.py b/pygmt/tests/test_datasets_venus_relief.py index f411ff2069c..f2dc9a9489f 100644 --- a/pygmt/tests/test_datasets_venus_relief.py +++ b/pygmt/tests/test_datasets_venus_relief.py @@ -12,9 +12,10 @@ def test_venus_relief_01d(): Test some properties of the Venus relief 01d data. """ data = load_venus_relief(resolution="01d") - assert data.name == "venus_relief" + assert data.name == "z" + assert data.attrs["long_name"] == "elevation (m)" + assert data.attrs["description"] == "NASA Magellan Venus relief" assert data.attrs["units"] == "meters" - assert data.attrs["long_name"] == "NASA Magellan Venus relief" assert data.shape == (181, 361) assert data.gmt.registration == 0 npt.assert_allclose(data.lat, np.arange(-90, 91, 1)) From 8592b547879b6edf36d186d49fc70bc36f20a365 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 25 Apr 2024 08:20:52 +0800 Subject: [PATCH 086/218] Support left/right single quotation marks in text and arguments (#3192) --- pygmt/helpers/utils.py | 3 ++- pygmt/tests/test_text.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index 1732dfb4e3f..66be427d937 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -305,10 +305,11 @@ def non_ascii_to_octal(argstr): c: "\\" + format(i, "o") for c, i in zip( "•…™—–fiž" # \03x. \030 is undefined + "’‘" # \047 and \140 "š" # \177 "Œ†‡Ł⁄‹Š›œŸŽł‰„“”" # \20x-\21x "ı`´ˆ˜¯˘˙¨‚˚¸'˝˛ˇ", # \22x-\23x - [*range(25, 32), *range(127, 160)], + [*range(25, 32), 39, 96, *range(127, 160)], strict=True, ) } diff --git a/pygmt/tests/test_text.py b/pygmt/tests/test_text.py index 9d52f2629af..ab07e964954 100644 --- a/pygmt/tests/test_text.py +++ b/pygmt/tests/test_text.py @@ -430,5 +430,5 @@ def test_text_quotation_marks(): """ fig = Figure() fig.basemap(projection="X4c/2c", region=[0, 4, 0, 2], frame=0) - fig.text(x=2, y=1, text="\\234 \\140 ' \" \\216 \\217", font="20p") + fig.text(x=2, y=1, text='\\234 ‘ ’ " “ ”', font="20p") # noqa: RUF001 return fig From bc673bc9433b7d9c7e669d45620dd9d36b5558d5 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 25 Apr 2024 08:21:38 +0800 Subject: [PATCH 087/218] non_ascii_to_octal: Return the input string if it only contains printable ASCII characters (#3199) --- pygmt/helpers/utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index 66be427d937..781e0e4533f 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -233,6 +233,10 @@ def non_ascii_to_octal(argstr): >>> non_ascii_to_octal("ABC ±120° DEF α ♥") 'ABC \\261120\\260 DEF @~\\141@~ @%34%\\252@%%' """ # noqa: RUF002 + # Return the string if it only contains printable ASCII characters from 32 to 126. + if all(32 <= ord(c) <= 126 for c in argstr): + return argstr + # Dictionary mapping non-ASCII characters to octal codes mapping = {} From e4a42856eab6ec985a4ea6d5d8f48d1911ec806d Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 27 Apr 2024 11:15:05 +0800 Subject: [PATCH 088/218] _load_remote_dataset: Rename parameter names and remove trailing underscore from prefix (#3202) * Rename parameter names in _load_remote_datasets * Remove the trailing underscore from dataset prefix * Fix one test --- pygmt/datasets/earth_age.py | 4 ++-- pygmt/datasets/earth_free_air_anomaly.py | 4 ++-- pygmt/datasets/earth_geoid.py | 4 ++-- pygmt/datasets/earth_magnetic_anomaly.py | 10 +++++----- pygmt/datasets/earth_mask.py | 4 ++-- pygmt/datasets/earth_relief.py | 14 +++++++------- pygmt/datasets/earth_vertical_gravity_gradient.py | 4 ++-- pygmt/datasets/load_remote_dataset.py | 12 ++++++------ pygmt/datasets/mars_relief.py | 4 ++-- pygmt/datasets/mercury_relief.py | 4 ++-- pygmt/datasets/moon_relief.py | 4 ++-- pygmt/datasets/pluto_relief.py | 4 ++-- pygmt/datasets/venus_relief.py | 4 ++-- pygmt/tests/test_datasets_load_remote_datasets.py | 4 ++-- 14 files changed, 40 insertions(+), 40 deletions(-) diff --git a/pygmt/datasets/earth_age.py b/pygmt/datasets/earth_age.py index 702129826c8..03e81d703d9 100644 --- a/pygmt/datasets/earth_age.py +++ b/pygmt/datasets/earth_age.py @@ -100,8 +100,8 @@ def load_earth_age( ... ) """ grid = _load_remote_dataset( - dataset_name="earth_age", - dataset_prefix="earth_age_", + name="earth_age", + prefix="earth_age", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/earth_free_air_anomaly.py b/pygmt/datasets/earth_free_air_anomaly.py index 9beae54ef1a..350f54ba831 100644 --- a/pygmt/datasets/earth_free_air_anomaly.py +++ b/pygmt/datasets/earth_free_air_anomaly.py @@ -102,8 +102,8 @@ def load_earth_free_air_anomaly( ... ) """ grid = _load_remote_dataset( - dataset_name="earth_faa", - dataset_prefix="earth_faa_", + name="earth_faa", + prefix="earth_faa", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/earth_geoid.py b/pygmt/datasets/earth_geoid.py index 8965e06857d..5190eb01413 100644 --- a/pygmt/datasets/earth_geoid.py +++ b/pygmt/datasets/earth_geoid.py @@ -93,8 +93,8 @@ def load_earth_geoid( ... ) """ grid = _load_remote_dataset( - dataset_name="earth_geoid", - dataset_prefix="earth_geoid_", + name="earth_geoid", + prefix="earth_geoid", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/earth_magnetic_anomaly.py b/pygmt/datasets/earth_magnetic_anomaly.py index e8d7afe1684..39862efa0b8 100644 --- a/pygmt/datasets/earth_magnetic_anomaly.py +++ b/pygmt/datasets/earth_magnetic_anomaly.py @@ -137,9 +137,9 @@ def load_earth_magnetic_anomaly( ... ) """ magnetic_anomaly_sources = { - "emag2": "earth_mag_", - "emag2_4km": "earth_mag4km_", - "wdmam": "earth_wdmam_", + "emag2": "earth_mag", + "emag2_4km": "earth_mag4km", + "wdmam": "earth_wdmam", } if data_source not in magnetic_anomaly_sources: raise GMTInvalidInput( @@ -149,8 +149,8 @@ def load_earth_magnetic_anomaly( dataset_prefix = magnetic_anomaly_sources[data_source] dataset_name = "earth_wdmam" if data_source == "wdmam" else "earth_mag" grid = _load_remote_dataset( - dataset_name=dataset_name, - dataset_prefix=dataset_prefix, + name=dataset_name, + prefix=dataset_prefix, resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/earth_mask.py b/pygmt/datasets/earth_mask.py index 1c439ec32d5..53ef5ecb29d 100644 --- a/pygmt/datasets/earth_mask.py +++ b/pygmt/datasets/earth_mask.py @@ -97,8 +97,8 @@ def load_earth_mask( array(0, dtype=int8) """ grid = _load_remote_dataset( - dataset_name="earth_mask", - dataset_prefix="earth_mask_", + name="earth_mask", + prefix="earth_mask", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/earth_relief.py b/pygmt/datasets/earth_relief.py index a0ddcd6eabc..0f6dae1f781 100644 --- a/pygmt/datasets/earth_relief.py +++ b/pygmt/datasets/earth_relief.py @@ -140,10 +140,10 @@ def load_earth_relief( land_only_srtm_resolutions = ["03s", "01s"] earth_relief_sources = { - "igpp": "earth_relief_", - "gebco": "earth_gebco_", - "gebcosi": "earth_gebcosi_", - "synbath": "earth_synbath_", + "igpp": "earth_relief", + "gebco": "earth_gebco", + "gebcosi": "earth_gebcosi", + "synbath": "earth_synbath", } if data_source not in earth_relief_sources: raise GMTInvalidInput( @@ -153,7 +153,7 @@ def load_earth_relief( # Choose earth relief data prefix if use_srtm and resolution in land_only_srtm_resolutions: if data_source == "igpp": - dataset_prefix = "srtm_relief_" + dataset_prefix = "srtm_relief" else: raise GMTInvalidInput( f"Option 'use_srtm=True' doesn't work with data source '{data_source}'." @@ -168,8 +168,8 @@ def load_earth_relief( case "gebco" | "gebcosi": dataset_name = "earth_gebco" grid = _load_remote_dataset( - dataset_name=dataset_name, - dataset_prefix=dataset_prefix, + name=dataset_name, + prefix=dataset_prefix, resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/earth_vertical_gravity_gradient.py b/pygmt/datasets/earth_vertical_gravity_gradient.py index 53e11e229c1..502f3eec913 100644 --- a/pygmt/datasets/earth_vertical_gravity_gradient.py +++ b/pygmt/datasets/earth_vertical_gravity_gradient.py @@ -104,8 +104,8 @@ def load_earth_vertical_gravity_gradient( ... ) """ grid = _load_remote_dataset( - dataset_name="earth_vgg", - dataset_prefix="earth_vgg_", + name="earth_vgg", + prefix="earth_vgg", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/load_remote_dataset.py b/pygmt/datasets/load_remote_dataset.py index 6f099952512..c97455bb36f 100644 --- a/pygmt/datasets/load_remote_dataset.py +++ b/pygmt/datasets/load_remote_dataset.py @@ -330,8 +330,8 @@ class GMTRemoteDataset(NamedTuple): @kwargs_to_strings(region="sequence") def _load_remote_dataset( - dataset_name: str, - dataset_prefix: str, + name: str, + prefix: str, resolution: str, region: str | list, registration: Literal["gridline", "pixel", None], @@ -341,9 +341,9 @@ def _load_remote_dataset( Parameters ---------- - dataset_name + name The name for the dataset in the 'datasets' dictionary. - dataset_prefix + prefix The prefix for the dataset that will be passed to the GMT C API. resolution The grid resolution. The suffix ``d``, ``m``, and ``s`` stand for arc-degrees, @@ -373,7 +373,7 @@ def _load_remote_dataset( plotting functions. Refer to :class:`pygmt.GMTDataArrayAccessor` for detailed explanations and workarounds. """ - dataset = datasets[dataset_name] + dataset = datasets[name] # Check resolution if resolution not in dataset.resolutions: @@ -401,7 +401,7 @@ def _load_remote_dataset( "returned unless only the pixel-registered grid is available." ) - fname = f"@{dataset_prefix}{resolution}_{registration[0]}" + fname = f"@{prefix}_{resolution}_{registration[0]}" if resinfo.tiled and region is None: raise GMTInvalidInput( f"'region' is required for {dataset.description} resolution '{resolution}'." diff --git a/pygmt/datasets/mars_relief.py b/pygmt/datasets/mars_relief.py index a8106513a79..750d8bdbaa5 100644 --- a/pygmt/datasets/mars_relief.py +++ b/pygmt/datasets/mars_relief.py @@ -96,8 +96,8 @@ def load_mars_relief( ... ) """ grid = _load_remote_dataset( - dataset_name="mars_relief", - dataset_prefix="mars_relief_", + name="mars_relief", + prefix="mars_relief", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/mercury_relief.py b/pygmt/datasets/mercury_relief.py index 956a41ed145..ee753f8a7a0 100644 --- a/pygmt/datasets/mercury_relief.py +++ b/pygmt/datasets/mercury_relief.py @@ -96,8 +96,8 @@ def load_mercury_relief( ... ) """ grid = _load_remote_dataset( - dataset_name="mercury_relief", - dataset_prefix="mercury_relief_", + name="mercury_relief", + prefix="mercury_relief", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/moon_relief.py b/pygmt/datasets/moon_relief.py index 5681fb7becb..522632476bb 100644 --- a/pygmt/datasets/moon_relief.py +++ b/pygmt/datasets/moon_relief.py @@ -96,8 +96,8 @@ def load_moon_relief( ... ) """ grid = _load_remote_dataset( - dataset_name="moon_relief", - dataset_prefix="moon_relief_", + name="moon_relief", + prefix="moon_relief", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/pluto_relief.py b/pygmt/datasets/pluto_relief.py index 72de0c9c593..b3cc7a428c9 100644 --- a/pygmt/datasets/pluto_relief.py +++ b/pygmt/datasets/pluto_relief.py @@ -96,8 +96,8 @@ def load_pluto_relief( ... ) """ grid = _load_remote_dataset( - dataset_name="pluto_relief", - dataset_prefix="pluto_relief_", + name="pluto_relief", + prefix="pluto_relief", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/venus_relief.py b/pygmt/datasets/venus_relief.py index 0bf52f7b611..172d9e2a588 100644 --- a/pygmt/datasets/venus_relief.py +++ b/pygmt/datasets/venus_relief.py @@ -93,8 +93,8 @@ def load_venus_relief( ... ) """ grid = _load_remote_dataset( - dataset_name="venus_relief", - dataset_prefix="venus_relief_", + name="venus_relief", + prefix="venus_relief", resolution=resolution, region=region, registration=registration, diff --git a/pygmt/tests/test_datasets_load_remote_datasets.py b/pygmt/tests/test_datasets_load_remote_datasets.py index ac28a594813..b46e21e2f94 100644 --- a/pygmt/tests/test_datasets_load_remote_datasets.py +++ b/pygmt/tests/test_datasets_load_remote_datasets.py @@ -12,8 +12,8 @@ def load_remote_dataset_wrapper(resolution="01d", region=None, registration=None Wrapper for _load_remote_dataset using the earth age dataset as an example. """ return _load_remote_dataset( - dataset_name="earth_age", - dataset_prefix="earth_age_", + name="earth_age", + prefix="earth_age", resolution=resolution, region=region, registration=registration, From 9d234fe0b62cedc9d1a04198cfa7f355f1281afd Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 27 Apr 2024 20:24:26 +0800 Subject: [PATCH 089/218] Rename issue template to set the order of issue templates (#3204) --- .github/ISSUE_TEMPLATE/{bug_report.yaml => 1-bug_report.yaml} | 0 .../{feature_request.yaml => 2-feature_request.yaml} | 0 .../ISSUE_TEMPLATE/{module_request.yaml => 3-module_request.yaml} | 0 .../{release_checklist.md => 4-release_checklist.md} | 0 .../{bump_gmt_checklist.md => 5-bump_gmt_checklist.md} | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename .github/ISSUE_TEMPLATE/{bug_report.yaml => 1-bug_report.yaml} (100%) rename .github/ISSUE_TEMPLATE/{feature_request.yaml => 2-feature_request.yaml} (100%) rename .github/ISSUE_TEMPLATE/{module_request.yaml => 3-module_request.yaml} (100%) rename .github/ISSUE_TEMPLATE/{release_checklist.md => 4-release_checklist.md} (100%) rename .github/ISSUE_TEMPLATE/{bump_gmt_checklist.md => 5-bump_gmt_checklist.md} (100%) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/1-bug_report.yaml similarity index 100% rename from .github/ISSUE_TEMPLATE/bug_report.yaml rename to .github/ISSUE_TEMPLATE/1-bug_report.yaml diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/2-feature_request.yaml similarity index 100% rename from .github/ISSUE_TEMPLATE/feature_request.yaml rename to .github/ISSUE_TEMPLATE/2-feature_request.yaml diff --git a/.github/ISSUE_TEMPLATE/module_request.yaml b/.github/ISSUE_TEMPLATE/3-module_request.yaml similarity index 100% rename from .github/ISSUE_TEMPLATE/module_request.yaml rename to .github/ISSUE_TEMPLATE/3-module_request.yaml diff --git a/.github/ISSUE_TEMPLATE/release_checklist.md b/.github/ISSUE_TEMPLATE/4-release_checklist.md similarity index 100% rename from .github/ISSUE_TEMPLATE/release_checklist.md rename to .github/ISSUE_TEMPLATE/4-release_checklist.md diff --git a/.github/ISSUE_TEMPLATE/bump_gmt_checklist.md b/.github/ISSUE_TEMPLATE/5-bump_gmt_checklist.md similarity index 100% rename from .github/ISSUE_TEMPLATE/bump_gmt_checklist.md rename to .github/ISSUE_TEMPLATE/5-bump_gmt_checklist.md From 72ce692dcc2b15653d93f386305e487a70924a9e Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 28 Apr 2024 06:50:04 +0800 Subject: [PATCH 090/218] Simplify the load_earth_magnetic_anomaly and load_earth_relief functions (#3203) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- pygmt/datasets/earth_magnetic_anomaly.py | 13 ++++++------ pygmt/datasets/earth_relief.py | 26 +++++++++++------------- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/pygmt/datasets/earth_magnetic_anomaly.py b/pygmt/datasets/earth_magnetic_anomaly.py index 39862efa0b8..b614edd1c81 100644 --- a/pygmt/datasets/earth_magnetic_anomaly.py +++ b/pygmt/datasets/earth_magnetic_anomaly.py @@ -136,21 +136,20 @@ def load_earth_magnetic_anomaly( ... resolution="20m", registration="gridline", data_source="wdmam" ... ) """ - magnetic_anomaly_sources = { + # Map data source to prefix + prefix = { "emag2": "earth_mag", "emag2_4km": "earth_mag4km", "wdmam": "earth_wdmam", - } - if data_source not in magnetic_anomaly_sources: + }.get(data_source) + if prefix is None: raise GMTInvalidInput( f"Invalid earth magnetic anomaly data source '{data_source}'. " "Valid values are 'emag2', 'emag2_4km', and 'wdmam'." ) - dataset_prefix = magnetic_anomaly_sources[data_source] - dataset_name = "earth_wdmam" if data_source == "wdmam" else "earth_mag" grid = _load_remote_dataset( - name=dataset_name, - prefix=dataset_prefix, + name="earth_wdmam" if data_source == "wdmam" else "earth_mag", + prefix=prefix, resolution=resolution, region=region, registration=registration, diff --git a/pygmt/datasets/earth_relief.py b/pygmt/datasets/earth_relief.py index 0f6dae1f781..5ba34a24c4d 100644 --- a/pygmt/datasets/earth_relief.py +++ b/pygmt/datasets/earth_relief.py @@ -139,37 +139,35 @@ def load_earth_relief( # resolutions of original land-only SRTM tiles from NASA land_only_srtm_resolutions = ["03s", "01s"] - earth_relief_sources = { + # Map data source to prefix + prefix = { "igpp": "earth_relief", "gebco": "earth_gebco", "gebcosi": "earth_gebcosi", "synbath": "earth_synbath", - } - if data_source not in earth_relief_sources: + }.get(data_source) + if prefix is None: raise GMTInvalidInput( f"Invalid earth relief data source '{data_source}'. " - "Valid values are 'igpp', 'gebco', 'gebcosi' and 'synbath'." + "Valid values are 'igpp', 'gebco', 'gebcosi', and 'synbath'." ) - # Choose earth relief data prefix + # Use SRTM or not. if use_srtm and resolution in land_only_srtm_resolutions: - if data_source == "igpp": - dataset_prefix = "srtm_relief" - else: + if data_source != "igpp": raise GMTInvalidInput( f"Option 'use_srtm=True' doesn't work with data source '{data_source}'." " Please set 'data_source' to 'igpp'." ) - else: - dataset_prefix = earth_relief_sources[data_source] + prefix = "srtm_relief" # Choose earth relief dataset match data_source: case "igpp" | "synbath": - dataset_name = "earth_igpp" + name = "earth_igpp" case "gebco" | "gebcosi": - dataset_name = "earth_gebco" + name = "earth_gebco" grid = _load_remote_dataset( - name=dataset_name, - prefix=dataset_prefix, + name=name, + prefix=prefix, resolution=resolution, region=region, registration=registration, From 0e630e3440b51ab6ac50d860c249276886cda07b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 28 Apr 2024 01:46:13 +0200 Subject: [PATCH 091/218] Figure.grdcontour: Adjust processing of arguments passed to the "annotation" and "interval" parameters, deprecate "sequence_plus" (#3116) Co-authored-by: Dongdong Tian Co-authored-by: actions-bot <58130806+actions-bot@users.noreply.github.com> Co-authored-by: Yvonne --- pygmt/src/grdcontour.py | 90 +++++++++++++------ .../test_grdcontour_multiple_levels.png.dvc | 6 ++ .../test_grdcontour_one_level.png.dvc | 6 ++ pygmt/tests/test_grdcontour.py | 50 ++++++++++- 4 files changed, 122 insertions(+), 30 deletions(-) create mode 100644 pygmt/tests/baseline/test_grdcontour_multiple_levels.png.dvc create mode 100644 pygmt/tests/baseline/test_grdcontour_one_level.png.dvc diff --git a/pygmt/src/grdcontour.py b/pygmt/src/grdcontour.py index 53e6b32ced8..8a6db2715e7 100644 --- a/pygmt/src/grdcontour.py +++ b/pygmt/src/grdcontour.py @@ -2,8 +2,16 @@ grdcontour - Plot a contour figure. """ +import warnings + from pygmt.clib import Session -from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import ( + build_arg_list, + fmt_docstring, + is_nonstr_iter, + kwargs_to_strings, + use_alias, +) __doctest_skip__ = ["grdcontour"] @@ -27,9 +35,7 @@ p="perspective", t="transparency", ) -@kwargs_to_strings( - R="sequence", L="sequence", A="sequence_plus", c="sequence_comma", p="sequence" -) +@kwargs_to_strings(R="sequence", L="sequence", c="sequence_comma", p="sequence") def grdcontour(self, grid, **kwargs): r""" Convert grids or images to contours and plot them on maps. @@ -43,26 +49,26 @@ def grdcontour(self, grid, **kwargs): Parameters ---------- {grid} - interval : str or int + interval : float, list, or str Specify the contour lines to generate. - - The file name of a CPT file where the color boundaries will - be used as contour levels. - - The file name of a 2 (or 3) column file containing the contour - levels (col 1), (**C**)ontour or (**A**)nnotate (col 2), and optional - angle (col 3). - - A fixed contour interval *cont_int* or a single contour with - +\ *cont_int*. - annotation : str, int, or list + - The file name of a CPT file where the color boundaries will be used as + contour levels. + - The file name of a 2 (or 3) column file containing the contour levels (col 0), + (**C**)ontour or (**A**)nnotate (col 1), and optional angle (col 2). + - A fixed contour interval. + - A list of contour levels. + annotation : float, list, or str Specify or disable annotated contour levels, modifies annotated contours specified in ``interval``. - - Specify a fixed annotation interval *annot_int* or a - single annotation level +\ *annot_int*. - - Disable all annotation with **-**. - - Optional label modifiers can be specified as a single string - ``"[annot_int]+e"`` or with a list of arguments - ``([annot_int], "e", "f10p", "gred")``. + - Specify a fixed annotation interval. + - Specify a list of annotation levels. + - Disable all annotations by setting ``annotation="n"``. + - Adjust the appearance by appending different modifiers, e.g., + ``"annot_int+f10p+gred"`` gives annotations with a font size of 10 points + and a red filled box. For all available modifiers see + :gmt-docs:`grdcontour.html#a`. limit : str or list of 2 ints *low*/*high*. Do no draw contours below `low` or above `high`, specify as string @@ -96,32 +102,58 @@ def grdcontour(self, grid, **kwargs): Example ------- >>> import pygmt - >>> # load the 15 arc-minutes grid with "gridline" registration - >>> # in a specified region + >>> # Load the 15 arc-minutes grid with "gridline" registration in the + >>> # specified region >>> grid = pygmt.datasets.load_earth_relief( ... resolution="15m", ... region=[-92.5, -82.5, -3, 7], ... registration="gridline", ... ) - >>> # create a new plot with pygmt.Figure() + >>> # Create a new plot with pygmt.Figure() >>> fig = pygmt.Figure() - >>> # create the contour plot + >>> # Create the contour plot >>> fig.grdcontour( - ... # pass in the grid downloaded above + ... # Pass in the grid downloaded above ... grid=grid, - ... # set the interval for contour lines at 250 meters + ... # Set the interval for contour lines at 250 meters ... interval=250, - ... # set the interval for annotated contour lines at 1,000 meters + ... # Set the interval for annotated contour lines at 1,000 meters ... annotation=1000, - ... # add a frame for the plot + ... # Add a frame for the plot ... frame="a", - ... # set the projection to Mercator for the 10 cm figure + ... # Set the projection to Mercator for the 10 cm figure ... projection="M10c", ... ) - >>> # show the plot + >>> # Show the plot >>> fig.show() """ kwargs = self._preprocess(**kwargs) + + # Backward compatibility with the old syntax for the annotation parameter, e.g., + # [100, "e", "f10p", "gred"]. + if is_nonstr_iter(kwargs.get("A")) and any( + i[0] in "acdefgijlLnoprtuvwx=" for i in kwargs["A"] if isinstance(i, str) + ): + msg = ( + "Argument of the parameter 'annotation'/'A' is using the old, deprecated " + "syntax. Please refer to the PyGMT documentation for the new syntax. " + "The warning will be removed in v0.14.0 and the old syntax will no longer " + "be supported. " + ) + warnings.warn(msg, category=FutureWarning, stacklevel=2) + kwargs["A"] = "+".join(f"{item}" for item in kwargs["A"]) + + # Specify levels for the annotation and interval parameters. + # One level is converted to a string with a trailing comma to separate it from + # specifying an interval. + # Multiple levels are concatenated to a comma-separated string. + for arg in ["A", "C"]: + if is_nonstr_iter(kwargs.get(arg)): + if len(kwargs[arg]) == 1: # One level + kwargs[arg] = str(kwargs[arg][0]) + "," + else: # Multiple levels + kwargs[arg] = ",".join(f"{item}" for item in kwargs[arg]) + with Session() as lib: with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: lib.call_module( diff --git a/pygmt/tests/baseline/test_grdcontour_multiple_levels.png.dvc b/pygmt/tests/baseline/test_grdcontour_multiple_levels.png.dvc new file mode 100644 index 00000000000..e0558fff47d --- /dev/null +++ b/pygmt/tests/baseline/test_grdcontour_multiple_levels.png.dvc @@ -0,0 +1,6 @@ +outs: +- md5: 4d20cdb71af2e6568f64f0246ec860ea + size: 64008 + isexec: true + hash: md5 + path: test_grdcontour_multiple_levels.png diff --git a/pygmt/tests/baseline/test_grdcontour_one_level.png.dvc b/pygmt/tests/baseline/test_grdcontour_one_level.png.dvc new file mode 100644 index 00000000000..4072632ab00 --- /dev/null +++ b/pygmt/tests/baseline/test_grdcontour_one_level.png.dvc @@ -0,0 +1,6 @@ +outs: +- md5: fc624766f0b8eac8206735a05c7c9662 + size: 45023 + isexec: true + hash: md5 + path: test_grdcontour_one_level.png diff --git a/pygmt/tests/test_grdcontour.py b/pygmt/tests/test_grdcontour.py index 2cedd80dc14..33fd04bdd0c 100644 --- a/pygmt/tests/test_grdcontour.py +++ b/pygmt/tests/test_grdcontour.py @@ -24,7 +24,8 @@ def fixture_grid(): @pytest.mark.mpl_image_compare def test_grdcontour(grid): """ - Plot a contour image using an xarray grid with fixed contour interval. + Plot a contour image using an xarray grid with fixed (different) contour and + annotation intervals. """ fig = Figure() fig.grdcontour( @@ -33,6 +34,53 @@ def test_grdcontour(grid): return fig +@pytest.mark.mpl_image_compare +def test_grdcontour_one_level(grid): + """ + Plot a contour image using an xarray grid with one contour level and one + (different) annotation level. + """ + fig = Figure() + fig.grdcontour( + grid=grid, interval=[400], annotation=[570], projection="M10c", frame=True + ) + return fig + + +@pytest.mark.mpl_image_compare(filename="test_grdcontour_one_level.png") +def test_grdcontour_old_annotations(grid): + """ + Test the old syntax for the annotation parameter using "sequence_plus". + Modified from the "test_grdcontour_one_level()" test. Can be removed in v0.14.0. + """ + fig = Figure() + fig.grdcontour( + grid=grid, + interval=[400], + annotation=["570,", "gwhite"], + projection="M10c", + frame=True, + ) + return fig + + +@pytest.mark.mpl_image_compare +def test_grdcontour_multiple_levels(grid): + """ + Plot a contour image using an xarray grid with multiple (different) contour + and annotation levels. + """ + fig = Figure() + fig.grdcontour( + grid=grid, + interval=[400, 450, 500], + annotation=[400, 570], + projection="M10c", + frame=True, + ) + return fig + + @pytest.mark.benchmark @pytest.mark.mpl_image_compare def test_grdcontour_labels(grid): From 38f91e17a8aa26857f6d2260d4a48beba686d480 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 28 Apr 2024 06:35:38 +0200 Subject: [PATCH 092/218] Figure.contour: Adjust processing of arguments passed to the "annotation" and "levels" parameters (#2706) --- pygmt/datasets/earth_relief.py | 2 +- pygmt/src/contour.py | 44 ++++++++++----- .../baseline/test_contour_interval.png.dvc | 6 +++ .../test_contour_multiple_levels.png.dvc | 6 +++ .../baseline/test_contour_one_level.png.dvc | 6 +++ pygmt/tests/test_contour.py | 54 +++++++++++++++++++ 6 files changed, 105 insertions(+), 13 deletions(-) create mode 100644 pygmt/tests/baseline/test_contour_interval.png.dvc create mode 100644 pygmt/tests/baseline/test_contour_multiple_levels.png.dvc create mode 100644 pygmt/tests/baseline/test_contour_one_level.png.dvc diff --git a/pygmt/datasets/earth_relief.py b/pygmt/datasets/earth_relief.py index 5ba34a24c4d..38461137e7c 100644 --- a/pygmt/datasets/earth_relief.py +++ b/pygmt/datasets/earth_relief.py @@ -140,7 +140,7 @@ def load_earth_relief( land_only_srtm_resolutions = ["03s", "01s"] # Map data source to prefix - prefix = { + prefix = { "igpp": "earth_relief", "gebco": "earth_gebco", "gebcosi": "earth_gebcosi", diff --git a/pygmt/src/contour.py b/pygmt/src/contour.py index cf5b2007c66..7c6dc8a3904 100644 --- a/pygmt/src/contour.py +++ b/pygmt/src/contour.py @@ -3,7 +3,13 @@ """ from pygmt.clib import Session -from pygmt.helpers import build_arg_list, fmt_docstring, kwargs_to_strings, use_alias +from pygmt.helpers import ( + build_arg_list, + fmt_docstring, + is_nonstr_iter, + kwargs_to_strings, + use_alias, +) @fmt_docstring @@ -54,23 +60,26 @@ def contour(self, data=None, x=None, y=None, z=None, **kwargs): Arrays of x and y coordinates and values z of the data points. {projection} {region} - annotation : str or int + annotation : float, list, or str Specify or disable annotated contour levels, modifies annotated contours specified in ``levels``. - - Specify a fixed annotation interval *annot_int* or a - single annotation level +\ *annot_int*. + - Specify a fixed annotation interval. + - Specify a list of annotation levels. + - Disable all annotations by setting ``annotation="n"``. + - Adjust the appearance by appending different modifiers, e.g., + ``"annot_int+f10p+gred"`` gives annotations with a font size of 10 points and + a red filled box. For all available modifiers see :gmt-docs:`contour.html#a`. {frame} - levels : str or int + levels : float, list, or str Specify the contour lines to generate. - - The file name of a CPT file where the color boundaries will - be used as contour levels. - - The file name of a 2 (or 3) column file containing the contour - levels (col 1), (**C**)ontour or (**A**)nnotate (col 2), and optional - angle (col 3). - - A fixed contour interval *cont_int* or a single contour with - +\ *cont_int*. + - The file name of a CPT file where the color boundaries will be used as + contour levels. + - The file name of a 2 (or 3) column file containing the contour levels (col 0), + (**C**)ontour or (**A**)nnotate (col 1), and optional angle (col 2). + - A fixed contour interval. + - A list of contour levels. D : str Dump contour coordinates. E : str @@ -114,6 +123,17 @@ def contour(self, data=None, x=None, y=None, z=None, **kwargs): """ kwargs = self._preprocess(**kwargs) + # Specify levels for contours or annotations. + # One level is converted to a string with a trailing comma to separate it from + # specifying an interval. + # Multiple levels are concatenated to a comma-separated string. + for arg in ["A", "C"]: + if is_nonstr_iter(kwargs.get(arg)): + if len(kwargs[arg]) == 1: # One level + kwargs[arg] = str(kwargs[arg][0]) + "," + else: # Multiple levels + kwargs[arg] = ",".join(f"{item}" for item in kwargs[arg]) + with Session() as lib: with lib.virtualfile_in( check_kind="vector", data=data, x=x, y=y, z=z, required_z=True diff --git a/pygmt/tests/baseline/test_contour_interval.png.dvc b/pygmt/tests/baseline/test_contour_interval.png.dvc new file mode 100644 index 00000000000..32b57f89111 --- /dev/null +++ b/pygmt/tests/baseline/test_contour_interval.png.dvc @@ -0,0 +1,6 @@ +outs: +- md5: 44d70a0b17bc7c7939462184bf06e4da + size: 50998 + isexec: true + hash: md5 + path: test_contour_interval.png diff --git a/pygmt/tests/baseline/test_contour_multiple_levels.png.dvc b/pygmt/tests/baseline/test_contour_multiple_levels.png.dvc new file mode 100644 index 00000000000..1e5012d7460 --- /dev/null +++ b/pygmt/tests/baseline/test_contour_multiple_levels.png.dvc @@ -0,0 +1,6 @@ +outs: +- md5: 7bef85a616c46b9f05f4dbed07bd703d + size: 29247 + isexec: true + hash: md5 + path: test_contour_multiple_levels.png diff --git a/pygmt/tests/baseline/test_contour_one_level.png.dvc b/pygmt/tests/baseline/test_contour_one_level.png.dvc new file mode 100644 index 00000000000..02234514e0c --- /dev/null +++ b/pygmt/tests/baseline/test_contour_one_level.png.dvc @@ -0,0 +1,6 @@ +outs: +- md5: 8c1ed221788e3af76279a7765640ea43 + size: 28882 + isexec: true + hash: md5 + path: test_contour_one_level.png diff --git a/pygmt/tests/test_contour.py b/pygmt/tests/test_contour.py index 6cca6ecbf7c..c359c1d124c 100644 --- a/pygmt/tests/test_contour.py +++ b/pygmt/tests/test_contour.py @@ -75,6 +75,60 @@ def test_contour_from_file(region): return fig +@pytest.mark.mpl_image_compare +def test_contour_interval(region): + """ + Plot data with fixed (different) contour and annotation intervals. + """ + fig = Figure() + fig.contour( + data=POINTS_DATA, + projection="X10c", + region=region, + frame="af", + levels=0.1, + annotation=0.2, + pen=True, + ) + return fig + + +@pytest.mark.mpl_image_compare +def test_contour_one_level(region): + """ + Plot data with one contour level and one (different) annotation level. + """ + fig = Figure() + fig.contour( + data=POINTS_DATA, + projection="X10c", + region=region, + frame="af", + levels=[0.4], + annotation=[0.5], + pen=True, + ) + return fig + + +@pytest.mark.mpl_image_compare +def test_contour_multiple_levels(region): + """ + Plot data with multiple (different) contour and annotation levels. + """ + fig = Figure() + fig.contour( + data=POINTS_DATA, + projection="X10c", + region=region, + frame="af", + levels=[0.2, 0.3], + annotation=[0.4, 0.45], + pen=True, + ) + return fig + + @pytest.mark.mpl_image_compare(filename="test_contour_vec.png") def test_contour_incols_transposed_data(region): """ From d58720a49d0154ba6b46d7c7d89436d0a65a5f93 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 28 Apr 2024 18:08:07 +0800 Subject: [PATCH 093/218] Remove sequence_plus converter from kwargs_to_string (#3207) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- pygmt/helpers/decorators.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/pygmt/helpers/decorators.py b/pygmt/helpers/decorators.py index c61f1acecbd..525cc611a54 100644 --- a/pygmt/helpers/decorators.py +++ b/pygmt/helpers/decorators.py @@ -630,10 +630,8 @@ def kwargs_to_strings(**conversions): Conversions available: - * 'sequence': transforms a sequence (list, tuple) into a ``'/'`` separated - string - * 'sequence_comma': transforms a sequence into a ``','`` separated string - * 'sequence_plus': transforms a sequence into a ``'+'`` separated string + * "sequence": transform a sequence (list, tuple) into a ``"/"`` separated string + * "sequence_comma": transform a sequence into a ``","`` separated string Parameters ---------- @@ -719,11 +717,7 @@ def kwargs_to_strings(**conversions): >>> module(["data1.txt", "data2.txt"], ("20p", "20p"), R=[1, 2, 3, 4]) ['data1.txt', 'data2.txt'] 20p/20p {'R': '1/2/3/4'} """ - separators = { - "sequence": "/", - "sequence_comma": ",", - "sequence_plus": "+", - } + separators = {"sequence": "/", "sequence_comma": ","} for arg, fmt in conversions.items(): if fmt not in separators: From b1f5cc7966ed18110c39b0bcb5361ea23d4b0c43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:44:11 +0200 Subject: [PATCH 094/218] Figure.grdcontour: Deprecate parameter "interval" to "levels" (FutureWarning since v0.12.0, will be removed in v0.16.0) (#3209) --- examples/get_started/02_contour_map.py | 6 +++--- examples/tutorials/advanced/contour_map.py | 14 +++++--------- pygmt/src/grdcontour.py | 12 +++++++----- pygmt/tests/test_grdcontour.py | 16 +++++++--------- 4 files changed, 22 insertions(+), 26 deletions(-) diff --git a/examples/get_started/02_contour_map.py b/examples/get_started/02_contour_map.py index fc7d1f7a1e2..30a1f6ffd98 100644 --- a/examples/get_started/02_contour_map.py +++ b/examples/get_started/02_contour_map.py @@ -82,7 +82,7 @@ # :meth:`pygmt.Figure.grdcontour` method is used. The ``frame`` and # ``projection`` are already set using :meth:`pygmt.Figure.grdimage` and are # not needed again. However, the same input for ``grid`` (in this case, the -# variable named "grid") must be input again. The ``interval`` parameter sets +# variable named "grid") must be input again. The ``levels`` parameter sets # the spacing between adjacent contour lines (in this case, 500 meters). The # ``annotation`` parameter annotates the contour lines corresponding to the # given interval (in this case, 1,000 meters) with the related values, here @@ -93,7 +93,7 @@ fig = pygmt.Figure() fig.grdimage(grid=grid, frame="a", projection="M10c", cmap="oleron") -fig.grdcontour(grid=grid, interval=500, annotation=1000) +fig.grdcontour(grid=grid, levels=500, annotation=1000) fig.colorbar(frame=["a1000", "x+lElevation", "y+lm"]) fig.show() @@ -109,7 +109,7 @@ fig = pygmt.Figure() fig.grdimage(grid=grid, frame="a", projection="M10c", cmap="oleron") -fig.grdcontour(grid=grid, interval=500, annotation=1000) +fig.grdcontour(grid=grid, levels=500, annotation=1000) fig.coast(shorelines="2p", land="lightgray") fig.colorbar(frame=["a1000", "x+lElevation", "y+lm"]) fig.show() diff --git a/examples/tutorials/advanced/contour_map.py b/examples/tutorials/advanced/contour_map.py index 2f58ec9564b..75ca65c2588 100644 --- a/examples/tutorials/advanced/contour_map.py +++ b/examples/tutorials/advanced/contour_map.py @@ -33,16 +33,12 @@ # Contour line settings # --------------------- # -# Use the ``annotation`` and ``interval`` parameters to adjust contour line +# Use the ``annotation`` and ``levels`` parameters to adjust contour line # intervals. In the example below, there are contour intervals every 250 meters # and annotated contour lines every 1,000 meters. fig = pygmt.Figure() -fig.grdcontour( - annotation=1000, - interval=250, - grid=grid, -) +fig.grdcontour(annotation=1000, levels=250, grid=grid) fig.show() @@ -57,7 +53,7 @@ fig = pygmt.Figure() fig.grdcontour( annotation=1000, - interval=250, + levels=250, grid=grid, limit=[-4000, -2000], ) @@ -74,7 +70,7 @@ fig = pygmt.Figure() fig.grdcontour( annotation=1000, - interval=250, + levels=250, grid=grid, limit=[-4000, -2000], projection="M10c", @@ -104,7 +100,7 @@ ) fig.grdcontour( annotation=1000, - interval=250, + levels=250, grid=grid, limit=[-4000, -2000], ) diff --git a/pygmt/src/grdcontour.py b/pygmt/src/grdcontour.py index 8a6db2715e7..2d3ba763722 100644 --- a/pygmt/src/grdcontour.py +++ b/pygmt/src/grdcontour.py @@ -7,6 +7,7 @@ from pygmt.clib import Session from pygmt.helpers import ( build_arg_list, + deprecate_parameter, fmt_docstring, is_nonstr_iter, kwargs_to_strings, @@ -17,10 +18,11 @@ @fmt_docstring +@deprecate_parameter("interval", "levels", "v0.12.0", remove_version="v0.16.0") @use_alias( A="annotation", B="frame", - C="interval", + C="levels", G="label_placement", J="projection", L="limit", @@ -49,7 +51,7 @@ def grdcontour(self, grid, **kwargs): Parameters ---------- {grid} - interval : float, list, or str + levels : float, list, or str Specify the contour lines to generate. - The file name of a CPT file where the color boundaries will be used as @@ -60,7 +62,7 @@ def grdcontour(self, grid, **kwargs): - A list of contour levels. annotation : float, list, or str Specify or disable annotated contour levels, modifies annotated - contours specified in ``interval``. + contours specified in ``levels``. - Specify a fixed annotation interval. - Specify a list of annotation levels. @@ -116,7 +118,7 @@ def grdcontour(self, grid, **kwargs): ... # Pass in the grid downloaded above ... grid=grid, ... # Set the interval for contour lines at 250 meters - ... interval=250, + ... levels=250, ... # Set the interval for annotated contour lines at 1,000 meters ... annotation=1000, ... # Add a frame for the plot @@ -143,7 +145,7 @@ def grdcontour(self, grid, **kwargs): warnings.warn(msg, category=FutureWarning, stacklevel=2) kwargs["A"] = "+".join(f"{item}" for item in kwargs["A"]) - # Specify levels for the annotation and interval parameters. + # Specify levels for the annotation and levels parameters. # One level is converted to a string with a trailing comma to separate it from # specifying an interval. # Multiple levels are concatenated to a comma-separated string. diff --git a/pygmt/tests/test_grdcontour.py b/pygmt/tests/test_grdcontour.py index 33fd04bdd0c..14d43e849cf 100644 --- a/pygmt/tests/test_grdcontour.py +++ b/pygmt/tests/test_grdcontour.py @@ -28,9 +28,7 @@ def test_grdcontour(grid): annotation intervals. """ fig = Figure() - fig.grdcontour( - grid=grid, interval=50, annotation=200, projection="M10c", frame=True - ) + fig.grdcontour(grid=grid, levels=50, annotation=200, projection="M10c", frame=True) return fig @@ -42,7 +40,7 @@ def test_grdcontour_one_level(grid): """ fig = Figure() fig.grdcontour( - grid=grid, interval=[400], annotation=[570], projection="M10c", frame=True + grid=grid, levels=[400], annotation=[570], projection="M10c", frame=True ) return fig @@ -56,7 +54,7 @@ def test_grdcontour_old_annotations(grid): fig = Figure() fig.grdcontour( grid=grid, - interval=[400], + levels=[400], annotation=["570,", "gwhite"], projection="M10c", frame=True, @@ -73,7 +71,7 @@ def test_grdcontour_multiple_levels(grid): fig = Figure() fig.grdcontour( grid=grid, - interval=[400, 450, 500], + levels=[400, 450, 500], annotation=[400, 570], projection="M10c", frame=True, @@ -90,7 +88,7 @@ def test_grdcontour_labels(grid): fig = Figure() fig.grdcontour( grid=grid, - interval=50, + levels=50, annotation=200, projection="M10c", pen=["a1p,red", "c0.5p,black"], @@ -108,7 +106,7 @@ def test_grdcontour_slice(grid): grid_ = grid.sel(lat=slice(-20, -10)) fig = Figure() - fig.grdcontour(grid=grid_, interval=100, projection="M10c", frame=True) + fig.grdcontour(grid=grid_, levels=100, projection="M10c", frame=True) return fig @@ -121,7 +119,7 @@ def test_grdcontour_interval_file_full_opts(grid): comargs = { "region": [-53, -49, -20, -17], - "interval": TEST_CONTOUR_FILE, + "levels": TEST_CONTOUR_FILE, "grid": grid, "resample": 100, "projection": "M10c", From 48f12cc241a34c2879fb89645b609a357c7f9314 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:48:09 +0200 Subject: [PATCH 095/218] Figure.contour & Figur.grdcontour: Improve docstring of pen (#3210) --- pygmt/src/contour.py | 11 ++++++++++- pygmt/src/grdcontour.py | 11 ++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/pygmt/src/contour.py b/pygmt/src/contour.py index 7c6dc8a3904..c5aa26a3b10 100644 --- a/pygmt/src/contour.py +++ b/pygmt/src/contour.py @@ -102,7 +102,16 @@ def contour(self, data=None, x=None, y=None, z=None, **kwargs): skip : bool or str [**p**\|\ **t**]. Skip input points outside region. - {pen} + pen : str or list + [*type*]\ *pen*\ [**+c**\ [**l**\|\ **f**]]. + *type*, if present, can be **a** for annotated contours or **c** for regular + contours [Default]. The pen sets the attributes for the particular line. + Default pen for annotated contours is ``"0.75p,black"`` and for regular + contours ``"0.25p,black"``. Normally, all contours are drawn with a fixed + color determined by the pen setting. If **+cl** is appended the colors of the + contour lines are taken from the CPT (see ``levels``). If **+cf** is + appended the colors from the CPT file are applied to the contour annotations. + Select **+c** for both effects. label : str Add a legend entry for the contour being plotted. Normally, the annotated contour is selected for the legend. You can select the diff --git a/pygmt/src/grdcontour.py b/pygmt/src/grdcontour.py index 2d3ba763722..0c461330acf 100644 --- a/pygmt/src/grdcontour.py +++ b/pygmt/src/grdcontour.py @@ -88,7 +88,16 @@ def grdcontour(self, grid, **kwargs): five controlling algorithms. See :gmt-docs:`grdcontour.html#g` for details. {verbose} - {pen} + pen : str or list + [*type*]\ *pen*\ [**+c**\ [**l**\|\ **f**]]. + *type*, if present, can be **a** for annotated contours or **c** for regular + contours [Default]. The pen sets the attributes for the particular line. + Default pen for annotated contours is ``"0.75p,black"`` and for regular + contours ``"0.25p,black"``. Normally, all contours are drawn with a fixed + color determined by the pen setting. If **+cl** is appended the colors of the + contour lines are taken from the CPT (see ``levels``). If **+cf** is + appended the colors from the CPT file are applied to the contour annotations. + Select **+c** for both effects. {panel} {coltypes} label : str From 3a5e8db86dbcecc6f4a223880cefd1af36f80c00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:22:56 +0200 Subject: [PATCH 096/218] Tutorial and Intro for Figure.grdcontour: Adjust line length and parameter order (#3212) --- examples/get_started/02_contour_map.py | 113 ++++++++++----------- examples/tutorials/advanced/contour_map.py | 64 +++++------- 2 files changed, 74 insertions(+), 103 deletions(-) diff --git a/examples/get_started/02_contour_map.py b/examples/get_started/02_contour_map.py index 30a1f6ffd98..4a9062916be 100644 --- a/examples/get_started/02_contour_map.py +++ b/examples/get_started/02_contour_map.py @@ -2,12 +2,10 @@ 2. Create a contour map ======================= -This tutorial page covers the basics of creating a figure of the Earth -relief, using a remote dataset hosted by GMT, using the method -:meth:`pygmt.datasets.load_earth_relief`. It will use the -:meth:`pygmt.Figure.grdimage`, :meth:`pygmt.Figure.grdcontour`, -:meth:`pygmt.Figure.colorbar`, and :meth:`pygmt.Figure.coast` methods for -plotting. +This tutorial page covers the basics of creating a figure of the Earth relief, using a +remote dataset hosted by GMT, using the method :meth:`pygmt.datasets.load_earth_relief`. +It will use the :meth:`pygmt.Figure.grdimage`, :meth:`pygmt.Figure.grdcontour`, +:meth:`pygmt.Figure.colorbar`, and :meth:`pygmt.Figure.coast` methods for plotting. """ # %% @@ -17,13 +15,12 @@ # Loading the Earth relief dataset # -------------------------------- # -# The first step is to use :meth:`pygmt.datasets.load_earth_relief`. -# The ``resolution`` parameter sets the resolution of the remote grid file, -# which will affect the resolution of the plot made later in the tutorial. -# The ``registration`` parameter determines the grid registration. +# The first step is to use :meth:`pygmt.datasets.load_earth_relief`. The ``resolution`` +# parameter sets the resolution of the remote grid file, which will affect the +# resolution of the plot made later in the tutorial. The ``registration`` parameter +# determines the grid registration. # -# This grid region covers the islands of Guam and Rota in the western Pacific -# Ocean. +# This grid region covers the islands of Guam and Rota in the western Pacific Ocean. grid = pygmt.datasets.load_earth_relief( resolution="30s", region=[144.5, 145.5, 13, 14.5], registration="gridline" @@ -34,18 +31,16 @@ # Plotting Earth relief # --------------------- # -# To plot Earth relief data, the method :meth:`pygmt.Figure.grdimage` can be -# used to plot a color-coded figure to display the topography and bathymetry -# in the grid file. The ``grid`` parameter accepts the input grid, which in -# this case is the remote file downloaded in the previous step. If the -# ``region`` parameter is not set, the region boundaries of the input grid are -# used. +# To plot Earth relief data, the method :meth:`pygmt.Figure.grdimage` can be used to +# plot a color-coded figure to display the topography and bathymetry in the grid file. +# The ``grid`` parameter accepts the input grid, which in this case is the remote file +# downloaded in the previous step. If the ``region`` parameter is not set, the region +# boundaries of the input grid are used. # -# The ``cmap`` parameter sets the color palette table (CPT) used for portraying -# the Earth relief. The :meth:`pygmt.Figure.grdimage` method uses the input -# grid to relate the Earth relief values to a specific color within the CPT. -# In this case, the CPT "oleron" is used; a full list of CPTs can be found -# at :gmt-docs:`reference/cpts.html`. +# The ``cmap`` parameter sets the color palette table (CPT) used for portraying the +# Earth relief. The :meth:`pygmt.Figure.grdimage` method uses the input grid to relate +# the Earth relief values to a specific color within the CPT. In this case, the CPT +# "oleron" is used; a full list of CPTs can be found at :gmt-docs:`reference/cpts.html`. fig = pygmt.Figure() fig.grdimage(grid=grid, frame="a", projection="M10c", cmap="oleron") @@ -56,17 +51,17 @@ # Adding a colorbar # ----------------- # -# To show how the plotted colors relate to the Earth relief, a colorbar can be -# added using the :meth:`pygmt.Figure.colorbar` method. +# To show how the plotted colors relate to the Earth relief, a colorbar can be added +# using the :meth:`pygmt.Figure.colorbar` method. # -# To control the annotation and labels on the colorbar, a list is passed to -# the ``frame`` parameter. The value beginning with ``"a"`` sets the interval -# for the annotation on the colorbar, in this case every 1,000 meters. To set -# the label for an axis on the colorbar, the argument begins with either -# ``"x+l"`` (x-axis) or ``"y+l"`` (y-axis), followed by the intended label. +# To control the annotation and labels on the colorbar, a list is passed to the +# ``frame`` parameter. The value beginning with ``"a"`` sets the interval for the +# annotation on the colorbar, in this case every 1,000 meters. To set the label for an +# axis on the colorbar, the argument begins with either ``"x+l"`` (x-axis) or ``"y+l"`` +# (y-axis), followed by the intended label. # -# By default, the CPT for the colorbar is the same as the one set -# in :meth:`pygmt.Figure.grdimage`. +# By default, the CPT for the colorbar is the same as the one set in +# :meth:`pygmt.Figure.grdimage`. fig = pygmt.Figure() fig.grdimage(grid=grid, frame="a", projection="M10c", cmap="oleron") @@ -78,18 +73,16 @@ # Adding contour lines # -------------------- # -# To add contour lines to the color-coded figure, the -# :meth:`pygmt.Figure.grdcontour` method is used. The ``frame`` and -# ``projection`` are already set using :meth:`pygmt.Figure.grdimage` and are -# not needed again. However, the same input for ``grid`` (in this case, the -# variable named "grid") must be input again. The ``levels`` parameter sets -# the spacing between adjacent contour lines (in this case, 500 meters). The -# ``annotation`` parameter annotates the contour lines corresponding to the -# given interval (in this case, 1,000 meters) with the related values, here -# elevation or bathymetry. By default, these contour lines are drawn thicker. -# Optionally, the appearance (thickness, color, style) of the annotated and -# the not-annotated contour lines can be adjusted (separately) by specifying -# the desired ``pen``. +# To add contour lines to the color-coded figure, the :meth:`pygmt.Figure.grdcontour` +# method is used. The ``frame`` and ``projection`` are already set using +# :meth:`pygmt.Figure.grdimage` and are not needed again. However, the same input for +# ``grid`` (in this case, the variable named "grid") must be input again. The ``levels`` +# parameter sets the spacing between adjacent contour lines (in this case, 500 meters). +# The ``annotation`` parameter annotates the contour lines corresponding to the given +# interval (in this case, 1,000 meters) with the related values, here elevation or +# bathymetry. By default, these contour lines are drawn thicker. Optionally, the +# appearance (thickness, color, style) of the annotated and the not-annotated contour +# lines can be adjusted (separately) by specifying the desired ``pen``. fig = pygmt.Figure() fig.grdimage(grid=grid, frame="a", projection="M10c", cmap="oleron") @@ -102,10 +95,9 @@ # Color in land # ------------- # -# To make it clear where the islands are located, the -# :meth:`pygmt.Figure.coast` method can be used to color in the landmasses. -# The ``land`` is colored in as "lightgray", and the ``shorelines`` parameter -# draws a border around the islands. +# To make it clear where the islands are located, the :meth:`pygmt.Figure.coast` method +# can be used to color in the landmasses. The ``land`` is colored in as "lightgray", and +# the ``shorelines`` parameter draws a border around the islands. fig = pygmt.Figure() fig.grdimage(grid=grid, frame="a", projection="M10c", cmap="oleron") @@ -119,25 +111,22 @@ # Additional exercises # -------------------- # -# This is the end of the second tutorial. Here are some additional exercises -# for the concepts that were discussed: +# This is the end of the second tutorial. Here are some additional exercises for the +# concepts that were discussed: # -# 1. Change the resolution of the grid file to either ``"01m"`` (1 arc-minute, -# a lower resolution) or ``"15s"`` (15 arc-seconds, a higher resolution). -# Note that higher resolution grids will have larger file sizes. Available -# resolutions can be found `here -# `_. +# 1. Change the resolution of the grid file to either ``"01m"`` (1 arc-minute, a lower +# resolution) or ``"15s"`` (15 arc-seconds, a higher resolution). Note that higher +# resolution grids will have larger file sizes. Available resolutions can be found +# at :meth:`pygmt.datasets.load_earth_relief`. # # 2. Create a contour map of the area around Mt. Rainier. A suggestion for the # ``region`` would be ``[-122, -121, 46.5, 47.5]``. Adjust the -# :meth:`pygmt.Figure.grdcontour` and :meth:`pygmt.Figure.colorbar` -# settings as needed to make the figure look good. +# :meth:`pygmt.Figure.grdcontour` and :meth:`pygmt.Figure.colorbar` settings as +# needed to make the figure look good. # -# 3. Create a contour map of São Miguel Island in the Azores; a suggested -# ``region`` is ``[-26, -25, 37.5, 38]``. Instead of coloring in ``land``, -# set ``water`` to "lightblue" to only display Earth relief information for -# the land. +# 3. Create a contour map of São Miguel Island in the Azores; a suggested ``region`` is +# ``[-26, -25, 37.5, 38]``. Instead of coloring in ``land``, set ``water`` to +# "lightblue" to only display Earth relief information for the land. # # 4. Try other CPTs, such as "SCM/fes" or "geo". diff --git a/examples/tutorials/advanced/contour_map.py b/examples/tutorials/advanced/contour_map.py index 75ca65c2588..7e6e6361c62 100644 --- a/examples/tutorials/advanced/contour_map.py +++ b/examples/tutorials/advanced/contour_map.py @@ -16,13 +16,11 @@ # Create contour plot # ------------------- # -# The :meth:`pygmt.Figure.grdcontour` method takes the grid input. -# It plots annotated contour lines, which are thicker and have the -# elevation/depth written on them, and unannotated contour lines. -# In the example below, the default contour line intervals are 500 meters, -# with an annotated contour line every 1,000 meters. -# By default, it plots the map with the -# equidistant cylindrical projection and with no frame. +# The :meth:`pygmt.Figure.grdcontour` method takes the grid input. It plots annotated +# contour lines, which are thicker and have the elevation/depth written on them, and +# unannotated contour lines. In the example below, the default contour line intervals +# are 500 meters, with an annotated contour line every 1,000 meters. By default, it +# plots the map with the equidistant cylindrical projection and with no frame. fig = pygmt.Figure() fig.grdcontour(grid=grid) @@ -33,12 +31,12 @@ # Contour line settings # --------------------- # -# Use the ``annotation`` and ``levels`` parameters to adjust contour line -# intervals. In the example below, there are contour intervals every 250 meters -# and annotated contour lines every 1,000 meters. +# Use the ``annotation`` and ``levels`` parameters to adjust contour line intervals. In +# the example below, there are contour intervals every 250 meters and annotated contour +# lines every 1,000 meters. fig = pygmt.Figure() -fig.grdcontour(annotation=1000, levels=250, grid=grid) +fig.grdcontour(grid=grid, annotation=1000, levels=250) fig.show() @@ -46,17 +44,12 @@ # Contour limits # -------------- # -# The ``limit`` parameter sets the minimum and maximum values for the contour -# lines. The parameter takes the low and high values, and is either a list (as -# below) or a string ``limit="-4000/-2000"``. +# The ``limit`` parameter sets the minimum and maximum values for the contour lines. The +# parameter takes the low and high values, and is either a list (as below) or a string +# ``limit="-4000/-2000"``. fig = pygmt.Figure() -fig.grdcontour( - annotation=1000, - levels=250, - grid=grid, - limit=[-4000, -2000], -) +fig.grdcontour(grid=grid, annotation=1000, levels=250, limit=[-4000, -2000]) fig.show() @@ -64,14 +57,14 @@ # Map settings # ------------ # -# The :meth:`pygmt.Figure.grdcontour` method accepts additional parameters, -# including setting the projection and frame. +# The :meth:`pygmt.Figure.grdcontour` method accepts additional parameters, including +# setting the projection and frame. fig = pygmt.Figure() fig.grdcontour( + grid=grid, annotation=1000, levels=250, - grid=grid, limit=[-4000, -2000], projection="M10c", frame=True, @@ -83,27 +76,16 @@ # Adding a colormap # ----------------- # -# The :meth:`pygmt.Figure.grdimage` method can be used to add a -# colormap to the contour map. It must be called prior to -# :meth:`pygmt.Figure.grdcontour` to keep the contour lines visible on the -# final map. If the ``projection`` parameter is specified in the +# The :meth:`pygmt.Figure.grdimage` method can be used to add a colormap to the contour +# map. It must be called prior to :meth:`pygmt.Figure.grdcontour` to keep the contour +# lines visible on the final map. If the ``projection`` parameter is specified in the # :meth:`pygmt.Figure.grdimage` method, it does not need to be repeated in the -# :meth:`pygmt.Figure.grdcontour` method. Finally, a colorbar is added using -# the :meth:`pygmt.Figure.colorbar` method. +# :meth:`pygmt.Figure.grdcontour` method. Finally, a colorbar is added using the +# :meth:`pygmt.Figure.colorbar` method. fig = pygmt.Figure() -fig.grdimage( - grid=grid, - cmap="haxby", - projection="M10c", - frame=True, -) -fig.grdcontour( - annotation=1000, - levels=250, - grid=grid, - limit=[-4000, -2000], -) +fig.grdimage(grid=grid, cmap="haxby", projection="M10c", frame=True) +fig.grdcontour(grid=grid, annotation=1000, levels=250, limit=[-4000, -2000]) fig.colorbar(frame=["x+lelevation", "y+lm"]) fig.show() From a43a8c760d2808d875867febe7ce8ef02fee2037 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Tue, 30 Apr 2024 17:11:57 +0200 Subject: [PATCH 097/218] External resources: Add repo "gmt-pygmt-plotting" (#3213) --- doc/external_resources.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/external_resources.md b/doc/external_resources.md index 87a69ec538e..0cd744810ac 100644 --- a/doc/external_resources.md +++ b/doc/external_resources.md @@ -128,4 +128,14 @@ Michael Grund Wei Ji Leong :::: +::::{grid-item-card} PyGMT plotting examples +:link: https://github.com/yvonnefroehlich/gmt-pygmt-plotting +:text-align: center +:margin: 0 3 0 0 + +![](https://github.com/yvonnefroehlich/gmt-pygmt-plotting/raw/main/_images/github_maps_readme_main.png) ++++ +Yvonne Fröhlich +:::: + ::::: From 3095700349992f2414011fad635c3c6dc3c355f6 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 30 Apr 2024 23:14:09 +0800 Subject: [PATCH 098/218] Document that a list of file names, pathlib.Path objects, URLs or remote files are supported (#3214) --- examples/get_started/04_table_inputs.py | 71 +++++++++++++------------ 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/examples/get_started/04_table_inputs.py b/examples/get_started/04_table_inputs.py index 8ea55d9b2c9..541a7057711 100644 --- a/examples/get_started/04_table_inputs.py +++ b/examples/get_started/04_table_inputs.py @@ -4,20 +4,19 @@ Generally, PyGMT accepts two different types of data inputs: tables and grids. -- A table is a 2-D array with rows and columns. Each column represents a - different variable (e.g., *x*, *y* and *z*) and each row represents a - different record. -- A grid is a 2-D array of data that is regularly spaced in the x and y - directions (or longitude and latitude). - -In this tutorial, we'll focus on working with table inputs, and cover grid -inputs in a separate tutorial. - -PyGMT supports a variety of table input types that allow you to work with data -in a format that suits your needs. In this tutorial, we'll explore the -different table input types available in PyGMT and provide examples for each. -By understanding the different table input types, you can choose the one that -best fits your data and analysis needs, and work more efficiently with PyGMT. +- A table is a 2-D array with rows and columns. Each column represents a different + variable (e.g., *x*, *y* and *z*) and each row represents a different record. +- A grid is a 2-D array of data that is regularly spaced in the x and y directions (or + longitude and latitude). + +In this tutorial, we'll focus on working with table inputs, and cover grid inputs in a +separate tutorial. + +PyGMT supports a variety of table input types that allow you to work with data in a +format that suits your needs. In this tutorial, we'll explore the different table input +types available in PyGMT and provide examples for each. By understanding the different +table input types, you can choose the one that best fits your data and analysis needs, +and work more efficiently with PyGMT. """ # %% @@ -32,10 +31,10 @@ # ASCII table file # ---------------- # -# Most PyGMT functions/methods that accept table input data have a ``data`` -# parameter. The easiest way to provide table input data to PyGMT is by -# specifying the file name of an ASCII table (e.g., ``data="input_data.dat"``). -# This is useful when your data is stored in a separate text file. +# Most PyGMT functions/methods that accept table input data have a ``data`` parameter. +# The easiest way to provide table input data to PyGMT is by specifying the file name of +# an ASCII table (e.g., ``data="input_data.dat"``). This is useful when your data is +# stored in a separate text file. # Create an example file with 3 rows and 2 columns data = np.array([[1.0, 2.0], [5.0, 4.0], [8.0, 3.0]]) @@ -51,14 +50,16 @@ Path("input_data.dat").unlink() # %% -# Besides a plain string to a table file, the following variants are also -# accepted: +# Besides a plain string to a table file, the following variants are also accepted: # # - A :class:`pathlib.Path` object. # - A full URL. PyGMT will download the file to the current directory first. -# - A file name prefixed with ``@`` (e.g., ``data="@input_data.dat"``), which -# is a special syntax in GMT to indicate that the file is a remote file -# hosted on the GMT data server. +# - A file name prefixed with ``@`` (e.g., ``data="@input_data.dat"``), which is a +# special syntax in GMT to indicate that the file is a remote file hosted on the GMT +# data server. +# +# Additionally, PyGMT also supports a list of file names, :class:`pathlib.Path` objects, +# URLs, or remote files, to provide more flexibility in specifying input files. # %% # 2-D array: `list`, `numpy.ndarray`, and `pandas.DataFrame` @@ -92,9 +93,9 @@ # ------------------------------- # # If you're working with geospatial data, you can read your data as a -# :class:`geopandas.GeoDataFrame` object and pass it to the ``data`` -# parameter. This is useful if your data is stored in a geospatial data format -# (e.g., GeoJSON, etc.) that GMT and PyGMT do not support natively. +# :class:`geopandas.GeoDataFrame` object and pass it to the ``data`` parameter. This is +# useful if your data is stored in a geospatial data format (e.g., GeoJSON, etc.) that +# GMT and PyGMT do not support natively. # Example GeoDataFrame gdf = gpd.GeoDataFrame( @@ -114,10 +115,10 @@ # Scalar values or 1-D arrays # --------------------------- # -# In addition to the ``data`` parameter, some PyGMT functions/methods also -# provide individual parameters (e.g., ``x`` and ``y`` for data coordinates) -# which allow you to specify the data. These parameters accept individual -# scalar values or 1-D arrays (lists or 1-D numpy arrays). +# In addition to the ``data`` parameter, some PyGMT functions/methods also provide +# individual parameters (e.g., ``x`` and ``y`` for data coordinates) which allow you to +# specify the data. These parameters accept individual scalar values or 1-D arrays +# (lists or 1-D numpy arrays). fig = pygmt.Figure() fig.basemap(region=[0, 10, 0, 5], projection="X10c/5c", frame=True) @@ -139,8 +140,8 @@ # Conclusion # ---------- # -# In PyGMT, you have the flexibility to provide data in various table input -# types, including file names, 2-D arrays (2-D :class:`list`, -# :class:`numpy.ndarray`, :class:`pandas.DataFrames`), scalar values or a -# series of 1-D arrays, and :class:`geopandas.GeoDataFrame`. Choose the input -# type that best suits your data source and analysis requirements. +# In PyGMT, you have the flexibility to provide data in various table input types, +# including file names, 2-D arrays (2-D :class:`list`, :class:`numpy.ndarray`, +# :class:`pandas.DataFrames`), scalar values or a series of 1-D arrays, and +# :class:`geopandas.GeoDataFrame`. Choose the input type that best suits your data +# source and analysis requirements. From 95fc4f662dde31d45491ff5a7ec56c80e82aecd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Tue, 30 Apr 2024 17:14:47 +0200 Subject: [PATCH 099/218] Fix some typos (#3211) --- .github/dependabot.yml | 2 +- .github/workflows/ci_docs.yml | 2 +- doc/changes.md | 2 +- doc/contributing.md | 2 +- pygmt/datatypes/dataset.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 639bdce3e44..94aa41f2fcb 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,7 +6,7 @@ updates: - package-ecosystem: "github-actions" directory: "/" schedule: - # Check for updates to GitHub Actions every weekday + # Check for updates to GitHub Actions on Tuesdays interval: "weekly" day: "tuesday" # Allow up to 2 open pull requests at a time diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index cd92a4484c6..5437ad95511 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -158,7 +158,7 @@ jobs: # to get the right commit hash. message="Deploy $version from $(git rev-parse --short HEAD)" cd deploy - # Need to have this file so that Github doesn't try to run Jekyll + # Need to have this file so that GitHub doesn't try to run Jekyll touch .nojekyll # Delete all the files and replace with our new set echo -e "\nRemoving old files from previous builds of ${version}:" diff --git a/doc/changes.md b/doc/changes.md index d3eea0bcccd..bcb815dd6cf 100644 --- a/doc/changes.md +++ b/doc/changes.md @@ -858,7 +858,7 @@ * Improve the DVC image diff workflow to support side-by-side comparison of modified images ([#1219](https://github.com/GenericMappingTools/pygmt/pull/1219)) * Document the deprecation policy and add the deprecate_parameter decorator to deprecate parameters ([#1160](https://github.com/GenericMappingTools/pygmt/pull/1160)) * Convert booleans arguments in build_arg_string, not in kwargs_to_strings ([#1125](https://github.com/GenericMappingTools/pygmt/pull/1125)) -* Create Github Action workflow for reporting DVC image diffs ([#1104](https://github.com/GenericMappingTools/pygmt/pull/1104)) +* Create GitHub Action workflow for reporting DVC image diffs ([#1104](https://github.com/GenericMappingTools/pygmt/pull/1104)) * Update "GMT Dev Tests" workflow to test macOS-11.0 and pre-release Python packages ([#1105](https://github.com/GenericMappingTools/pygmt/pull/1105)) * Initialize data version control for managing test images ([#1036](https://github.com/GenericMappingTools/pygmt/pull/1036)) * Separate workflows for running tests and building documentation ([#1033](https://github.com/GenericMappingTools/pygmt/pull/1033)) diff --git a/doc/contributing.md b/doc/contributing.md index 46ca360378b..8ee7468d1d8 100644 --- a/doc/contributing.md +++ b/doc/contributing.md @@ -664,7 +664,7 @@ summarized as follows: mv baseline/*.png pygmt/tests/baseline/ # Generate hash for baseline image and stage the *.dvc file in git - dvc status # check which files need to be added to dvc + dvc status # Check which files need to be added to dvc dvc add pygmt/tests/baseline/test_logo.png git add pygmt/tests/baseline/test_logo.png.dvc diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index e5df4a2b4a0..f9a2fa19803 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -161,7 +161,7 @@ def to_strings(self) -> np.ndarray[Any, np.dtype[np.str_]]: # Workaround for upstream GMT bug reported in # https://github.com/GenericMappingTools/pygmt/issues/3170. msg = ( - "The trailing text column contains `None' values and has been replaced" + "The trailing text column contains 'None' values and has been replaced " "with empty strings to avoid TypeError exceptions. " "It's likely caused by an upstream GMT API bug. " "Please consider reporting to us." From 82f11741a7b455af30f74e5e5483b1585bd30e5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 23:27:09 +0800 Subject: [PATCH 100/218] Bump actions/checkout from 4.1.1 to 4.1.4 (#3216) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.1 to 4.1.4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4.1.1...v4.1.4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- .github/workflows/cache_data.yaml | 2 +- .github/workflows/check-links.yml | 4 ++-- .github/workflows/ci_docs.yml | 4 ++-- .github/workflows/ci_doctests.yaml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- .github/workflows/ci_tests_legacy.yaml | 2 +- .github/workflows/dvc-diff.yml | 2 +- .github/workflows/format-command.yml | 2 +- .github/workflows/publish-to-pypi.yml | 2 +- .github/workflows/release-baseline-images.yml | 2 +- .github/workflows/style_checks.yaml | 2 +- .github/workflows/type_checks.yml | 2 +- 14 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index af44301acfd..2142ef70b24 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -37,7 +37,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/cache_data.yaml b/.github/workflows/cache_data.yaml index 0630262b483..06ee9a229c9 100644 --- a/.github/workflows/cache_data.yaml +++ b/.github/workflows/cache_data.yaml @@ -36,7 +36,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 8e9f58aeff8..71e1f82150e 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -23,12 +23,12 @@ jobs: steps: - name: Checkout the repository - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: path: repository - name: Checkout the documentation - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: ref: gh-pages path: documentation diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 5437ad95511..63c8a4d4967 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -69,7 +69,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that setuptools-scm works fetch-depth: 0 @@ -135,7 +135,7 @@ jobs: run: make -C doc clean all - name: Checkout the gh-pages branch - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: ref: gh-pages # Checkout to this folder instead of the current one diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index 0bea2a9a2f5..76197f3079e 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -35,7 +35,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index cf5d473963f..53bd6869623 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -92,7 +92,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index e57786a4e9c..0d86f32162f 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -47,7 +47,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index 94ed7eec2c9..70b4fcb2694 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -44,7 +44,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/dvc-diff.yml b/.github/workflows/dvc-diff.yml index 5792a5cd64f..b8bcbd28de0 100644 --- a/.github/workflows/dvc-diff.yml +++ b/.github/workflows/dvc-diff.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that dvc diff works fetch-depth: 0 diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index e2cffd7b2cd..4ef2837c67d 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -18,7 +18,7 @@ jobs: private-key: ${{ secrets.APP_PRIVATE_KEY }} # Checkout the pull request branch - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@v4.1.4 with: token: ${{ steps.generate-token.outputs.token }} repository: ${{ github.event.client_payload.pull_request.head.repo.full_name }} diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 1a2b06b5e7c..50a151a1457 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -45,7 +45,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/release-baseline-images.yml b/.github/workflows/release-baseline-images.yml index b6f28b7cfc4..2b58fa2898e 100644 --- a/.github/workflows/release-baseline-images.yml +++ b/.github/workflows/release-baseline-images.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 - name: Setup data version control (DVC) uses: iterative/setup-dvc@v1.1.2 diff --git a/.github/workflows/style_checks.yaml b/.github/workflows/style_checks.yaml index 036f642ec82..31b9d99a3b2 100644 --- a/.github/workflows/style_checks.yaml +++ b/.github/workflows/style_checks.yaml @@ -24,7 +24,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 # Setup Python - name: Set up Python diff --git a/.github/workflows/type_checks.yml b/.github/workflows/type_checks.yml index 4d0ec5c3494..b0229155395 100644 --- a/.github/workflows/type_checks.yml +++ b/.github/workflows/type_checks.yml @@ -33,7 +33,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.1.4 # Setup Python - name: Set up Python From f39d48c30748ecf5f43a347b01d435ce7d015910 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 23:42:23 +0800 Subject: [PATCH 101/218] Bump conda-incubator/setup-miniconda from 3.0.3 to 3.0.4 (#3215) Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 3.0.3 to 3.0.4. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/v3.0.3...v3.0.4) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 2142ef70b24..daac43a38c7 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -44,7 +44,7 @@ jobs: # Install Miniconda with conda-forge dependencies - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v3.0.3 + uses: conda-incubator/setup-miniconda@v3.0.4 with: auto-activate-base: true activate-environment: "" # base environment From e277315437297de79339e7123f6246db573d6527 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 09:38:41 +1200 Subject: [PATCH 102/218] Bump lycheeverse/lychee-action from 1.9.3 to 1.10.0 (#3217) Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 1.9.3 to 1.10.0. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v1.9.3...v1.10.0) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/check-links.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 71e1f82150e..b4bf93575a0 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -35,7 +35,7 @@ jobs: - name: Link Checker id: lychee - uses: lycheeverse/lychee-action@v1.9.3 + uses: lycheeverse/lychee-action@v1.10.0 with: # 429: Too many requests args: > From 317e22fee66b49403c57187b7dc99aef638d7c52 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 09:39:11 +1200 Subject: [PATCH 103/218] Bump CodSpeedHQ/action from 2.3.0 to 2.4.1 (#3218) Bumps [CodSpeedHQ/action](https://github.com/codspeedhq/action) from 2.3.0 to 2.4.1. - [Release notes](https://github.com/codspeedhq/action/releases) - [Changelog](https://github.com/CodSpeedHQ/action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codspeedhq/action/compare/v2.3.0...v2.4.1) --- updated-dependencies: - dependency-name: CodSpeedHQ/action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index daac43a38c7..67a5d71dbf1 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -82,7 +82,7 @@ jobs: # Run the benchmark tests - name: Run benchmarks - uses: CodSpeedHQ/action@v2.3.0 + uses: CodSpeedHQ/action@v2.4.1 with: run: | python -c "import pygmt; pygmt.show_versions()" From 3986056b1c3200ba3356e7179668d1d4524e2e31 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 1 May 2024 19:10:09 +0800 Subject: [PATCH 104/218] Changelog entry for v0.12.0 (#3201) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Twelfth minor release of PyGMT * Add v0.12.0 to doc/_static/version_switch.js * Copy draft release note to doc/changes.md * Update the order of contributors * Add v0.12.0 to doc/minversions.md * Update DOI badge * Update citation information * Sort entries * Update the author order of Yvonne --------- Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> --- CITATION.cff | 14 +++--- README.md | 12 ++--- doc/_static/version_switch.js | 1 + doc/changes.md | 89 +++++++++++++++++++++++++++++++++++ doc/minversions.md | 3 ++ 5 files changed, 106 insertions(+), 13 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index bc92a5fcccf..ee4d5ded691 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -16,14 +16,14 @@ authors: family-names: Leong affiliation: Development Seed, USA orcid: https://orcid.org/0000-0003-2354-1988 -- given-names: William - family-names: Schlitzer - affiliation: Unaffiliated - orcid: https://orcid.org/0000-0002-5843-2282 - given-names: Yvonne family-names: Fröhlich affiliation: Karlsruhe Institute of Technology, Germany orcid: https://orcid.org/0000-0002-8566-0619 +- given-names: William + family-names: Schlitzer + affiliation: Unaffiliated + orcid: https://orcid.org/0000-0002-5843-2282 - given-names: Michael family-names: Grund affiliation: SNP Innovation Lab GmbH, Germany @@ -76,9 +76,9 @@ authors: family-names: Wessel affiliation: University of Hawaiʻi at Mānoa, USA orcid: https://orcid.org/0000-0001-5708-7336 -date-released: 2024-02-01 -doi: 10.5281/zenodo.10578540 +date-released: 2024-05-01 +doi: 10.5281/zenodo.11062720 license: BSD-3-Clause repository-code: https://github.com/GenericMappingTools/pygmt type: software -version: 0.11.0 +version: 0.12.0 diff --git a/README.md b/README.md index de5a1fe7d61..9782b0f6517 100644 --- a/README.md +++ b/README.md @@ -129,12 +129,12 @@ research using the following BibTeX: ``` @software{ - pygmt_2024_10578540, + pygmt_2024_11062720, author = {Tian, Dongdong and Uieda, Leonardo and Leong, Wei Ji and - Schlitzer, William and Fröhlich, Yvonne and + Schlitzer, William and Grund, Michael and Jones, Max and Toney, Liam and @@ -149,12 +149,12 @@ research using the following BibTeX: Quinn, Jamie and Wessel, Paul}, title = {{PyGMT: A Python interface for the Generic Mapping Tools}}, - month = feb, + month = may, year = 2024, publisher = {Zenodo}, - version = {0.11.0}, - doi = {10.5281/zenodo.10578540}, - url = {https://doi.org/10.5281/zenodo.10578540} + version = {0.12.0}, + doi = {10.5281/zenodo.11062720}, + url = {https://doi.org/10.5281/zenodo.11062720} } ``` diff --git a/doc/_static/version_switch.js b/doc/_static/version_switch.js index f4545ebbe4e..f79874cb998 100644 --- a/doc/_static/version_switch.js +++ b/doc/_static/version_switch.js @@ -12,6 +12,7 @@ var all_versions = { 'latest': 'latest', 'dev': 'dev', + 'v0.12.0': 'v0.12.0', 'v0.11.0': 'v0.11.0', 'v0.10.0': 'v0.10.0', 'v0.9.0': 'v0.9.0', diff --git a/doc/changes.md b/doc/changes.md index bcb815dd6cf..e9a1a6ca17c 100644 --- a/doc/changes.md +++ b/doc/changes.md @@ -1,5 +1,94 @@ # Changelog +## Release v0.12.0 (2024/05/01) + +[![Digital Object Identifier for PyGMT v0.12.0](https://zenodo.org/badge/DOI/10.5281/zenodo.11062720.svg)](https://doi.org/10.5281/zenodo.11062720) + +### Highlights + +* 🎉 **Twelfth minor release of PyGMT** 🎉 +* 🚀 Almost all module wrappers (with a few exceptions) now use in-memory GMT *virtual files* instead of intermediate temporary files to improve performance ([#2730](https://github.com/GenericMappingTools/pygmt/issues/2730)) +* Almost all module wrappers (with a few exceptions) now have consistent behavior for table-like output ([#1318](https://github.com/GenericMappingTools/pygmt/issues/1318)) +* Adopt [SPEC 0](https://scientific-python.org/specs/spec-0000/) policy for minimum supported versions of GMT, Python, and other core dependencies + +### Enhancements + +* pygmt.project: Add 'output_type' parameter for output in pandas/numpy/file formats ([#3110](https://github.com/GenericMappingTools/pygmt/pull/3110)) +* pygmt.grdtrack: Add 'output_type' parameter for output in pandas/numpy/file formats ([#3106](https://github.com/GenericMappingTools/pygmt/pull/3106)) +* pygmt.blockm*: Add 'output_type' parameter for output in pandas/numpy/file formats ([#3103](https://github.com/GenericMappingTools/pygmt/pull/3103)) +* Figure.grdcontour: Adjust processing arguments passed to "annotation" and "interval" parameters ([#3116](https://github.com/GenericMappingTools/pygmt/pull/3116)) +* Figure.contour: Adjust processing arguments passed to "annotation" and "levels" parameters ([#2706](https://github.com/GenericMappingTools/pygmt/pull/2706)) +* clib: Wrap the GMT API function GMT_Read_VirtualFile ([#2993](https://github.com/GenericMappingTools/pygmt/pull/2993)) +* clib: Add virtualfile_to_dataset method for converting virtualfile to a dataset ([#3083](https://github.com/GenericMappingTools/pygmt/pull/3083), [#3140](https://github.com/GenericMappingTools/pygmt/pull/3140), [#3157](https://github.com/GenericMappingTools/pygmt/pull/3157), +[#3117](https://github.com/GenericMappingTools/pygmt/pull/3117)) +* clib: Add the virtualfile_out method for creating output virtualfile ([#3057](https://github.com/GenericMappingTools/pygmt/pull/3057)) +* Wrap GMT_Inquire_VirtualFile to get the family of virtualfiles ([#3152](https://github.com/GenericMappingTools/pygmt/pull/3152)) +* Wrap GMT's standard data type GMT_GRID for grids ([#2398](https://github.com/GenericMappingTools/pygmt/pull/2398)) +* Wrap GMT's standard data type GMT_DATASET for table inputs ([#2729](https://github.com/GenericMappingTools/pygmt/pull/2729), [#3131](https://github.com/GenericMappingTools/pygmt/pull/3131), [#3174](https://github.com/GenericMappingTools/pygmt/pull/3174)) +* Wrap GMT's data structure GMT_GRID_HEADER for grid/image/cube headers ([#3127](https://github.com/GenericMappingTools/pygmt/pull/3127), [#3134](https://github.com/GenericMappingTools/pygmt/pull/3134)) +* Session.call_module: Support passing a list of argument strings ([#3139](https://github.com/GenericMappingTools/pygmt/pull/3139)) +* Refactor the _load_remote_dataset function to load tiled and non-tiled grids in a consistent way ([#3120](https://github.com/GenericMappingTools/pygmt/pull/3120)) +* Refactor all wrappers to pass an argument list to Session.call_module ([#3132](https://github.com/GenericMappingTools/pygmt/pull/3132)) +* Add function build_arg_list for building arguments list from keyword dictionaries ([#3149](https://github.com/GenericMappingTools/pygmt/pull/3149)) +* Support left/right single quotation marks in text and arguments ([#3192](https://github.com/GenericMappingTools/pygmt/pull/3192)) +* non_ascii_to_octal: Return the input string if it only contains printable ASCII characters ([#3199](https://github.com/GenericMappingTools/pygmt/pull/3199)) + +### Deprecations + +* Figure.plot/plot3d/rose: Remove deprecated parameter "color", use "fill" instead (deprecated since v0.8.0) ([#3032](https://github.com/GenericMappingTools/pygmt/pull/3032)) +* Figure.velo: Remove deprecated parameters "color"/"uncertaintycolor", use "fill"/"uncertaintyfill" instead (deprecated since v0.8.0) ([#3034](https://github.com/GenericMappingTools/pygmt/pull/3034)) +* Figure.wiggle: Remove deprecated parameter "color", use "fillpositive"/"fillnegative" instead (deprecated since v0.8.0) ([#3035](https://github.com/GenericMappingTools/pygmt/pull/3035)) +* Figure.grdimage: Remove deprecated parameter "bit_color", use "bitcolor" instead (deprecated since v0.8.0) ([#3036](https://github.com/GenericMappingTools/pygmt/pull/3036)) +* Figure: Remove deprecated "xshift" ("X") and "yshift" ("Y") parameters, use "Figure.shift_origin" instead (deprecated since v0.8.0) ([#3044](https://github.com/GenericMappingTools/pygmt/pull/3044)) +* Figure: Remove deprecated "timestamp" ("U") parameter, use "Figure.timestamp" instead (deprecated since v0.9.0) ([#3045](https://github.com/GenericMappingTools/pygmt/pull/3045)) +* clib: Rename the "virtualfile_from_data" method to "virtualfile_in" ([#3068](https://github.com/GenericMappingTools/pygmt/pull/3068)) +* Deprecate the "build_arg_string" function, use build_arg_list instead (deprecated since v0.12.0, will be removed in v0.14.0) ([#3184](https://github.com/GenericMappingTools/pygmt/pull/3184)) +* Deprecate the "sequence_plus" converter, only used for the "annotation" parameter of Figure.grdcontour (deprecated since v0.12.0, will be removed in v0.14.0) ([#3207](https://github.com/GenericMappingTools/pygmt/pull/3207)) +* Figure.grdcontour: Deprecate parameter "interval" to "levels" (FutureWarning since v0.12.0, will be removed in v0.16.0) ([#3209](https://github.com/GenericMappingTools/pygmt/pull/3209)) + +### Documentation + +* External Resources: Add repository "gmt-pygmt-plotting" ([#3213](https://github.com/GenericMappingTools/pygmt/pull/3213)) +* Gallery example "Custom symbols": Mention own custom symbols ([#3186](https://github.com/GenericMappingTools/pygmt/pull/3186)) +* Intro "04 Table inputs": Document that a list of file names, pathlib.Path objects, URLs, or remote files is supported ([3214](https://github.com/GenericMappingTools/pygmt/pull/3214)) +* Tutorial "Plotting text": Rewrite to improve structure, explain more parameters, show list input ([#2760](https://github.com/GenericMappingTools/pygmt/pull/2760)) + +### Maintenance + +* pygmt.filter1d: Improve performance by storing output in virtual files ([#3085](https://github.com/GenericMappingTools/pygmt/pull/3085)) +* pygmt.grdvolume: Refactor to store output in virtual files instead of temporary files ([#3102](https://github.com/GenericMappingTools/pygmt/pull/3102)) +* pygmt.grdhisteq.compute_bins: Refactor to store output in virtual files instead of temporary files ([#3109](https://github.com/GenericMappingTools/pygmt/pull/3109)) +* pygmt.grd2xyz: Improve performance by storing output in virtual files ([#3097](https://github.com/GenericMappingTools/pygmt/pull/3097)) +* pygmt.select: Improve performance by storing output in virtual files ([#3108](https://github.com/GenericMappingTools/pygmt/pull/3108)) +* pygmt.triangulate.delaunay_triples: Improve performance by storing output in virtual files ([#3107](https://github.com/GenericMappingTools/pygmt/pull/3107)) +* pygmt.which: Refactor to get rid of temporary files ([#3148](https://github.com/GenericMappingTools/pygmt/pull/3148)) +* Use consistent names (vintbl and vingrd) for input virtual files ([#3082](https://github.com/GenericMappingTools/pygmt/pull/3082)) +* Add sequence_to_ctypes_array to convert a sequence to a ctypes array ([#3136](https://github.com/GenericMappingTools/pygmt/pull/3136)) +* Add strings_to_ctypes_array to convert a sequence of strings into a ctypes array ([#3137](https://github.com/GenericMappingTools/pygmt/pull/3137)) +* Figure.psconvert: Ignore the unrecognized "metadata" parameter added by pytest-mpl v0.17.0 ([#3054](https://github.com/GenericMappingTools/pygmt/pull/3054)) +* Remote Datasets: Adjust attributes - remove "title", use default of "name" and "long_name", introduce "description" ([#3048](https://github.com/GenericMappingTools/pygmt/pull/3048)) +* Adopt SPEC 0 policy and drop NEP 29 policy ([#3037](https://github.com/GenericMappingTools/pygmt/pull/3037)) +* SPEC 0: Set minimum supported versions to Python>=3.10, pandas>=1.5 and xarray>=2022.06 ([#3043](https://github.com/GenericMappingTools/pygmt/pull/3043), [#3039](https://github.com/GenericMappingTools/pygmt/pull/3039), [#3151](https://github.com/GenericMappingTools/pygmt/pull/3151)) +* Document the support policy for minimum required GMT versions ([#3070](https://github.com/GenericMappingTools/pygmt/pull/3070)) +* Bump to ghostscript 10.03.0 ([#3112](https://github.com/GenericMappingTools/pygmt/pull/3112)) +* Bump to ruff 0.3.0 ([#3081](https://github.com/GenericMappingTools/pygmt/pull/3081)) +* Enable ruff's PTH (flake8-use-pathlib) rules and fix violations ([#3129](https://github.com/GenericMappingTools/pygmt/pull/3129)) +* Change the dev dependency "matplotlib" to "matplotlib-base" to reduce environment size ([#3158](https://github.com/GenericMappingTools/pygmt/pull/3158)) +* Migrate from os.path to pathlib ([#3119](https://github.com/GenericMappingTools/pygmt/pull/3119)) +* CI: Use "gh release" to upload assets to release ([#3187](https://github.com/GenericMappingTools/pygmt/pull/3187)) +* CI: Consistently use github.token instead of secrets.GITHUB_TOKEN ([#3189](https://github.com/GenericMappingTools/pygmt/pull/3189)) +* CI: Configure workflows to run on "workflow_dispatch" event ([#3133](https://github.com/GenericMappingTools/pygmt/pull/3133)) +* Switch to official GitHub action for managing app tokens ([#3165](https://github.com/GenericMappingTools/pygmt/pull/3165)) + +**Full Changelog**: + +### Contributors + +* [Dongdong Tian](https://github.com/seisman) +* [Yvonne Fröhlich](https://github.com/yvonnefroehlich) +* [Michael Grund](https://github.com/michaelgrund) +* [Wei Ji Leong](https://github.com/weiji14) + ## Release v0.11.0 (2024/02/01) [![Digital Object Identifier for PyGMT v0.11.0](https://zenodo.org/badge/DOI/10.5281/zenodo.10578540.svg)](https://doi.org/10.5281/zenodo.10578540) diff --git a/doc/minversions.md b/doc/minversions.md index b2fa02cd9f4..50633d9776f 100644 --- a/doc/minversions.md +++ b/doc/minversions.md @@ -13,6 +13,7 @@ after their initial release. | PyGMT Version | GMT | Python | NumPy | Pandas | Xarray | |---|---|---|---|---|---| | [Dev][]* [[Docs][Docs Dev]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | +| [v0.12.0][]* [[Docs][Docs v0.12.0]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | | [v0.11.0][] [[Docs][Docs v0.11.0]] | >=6.3.0 | >=3.9 | >=1.23 | | | | [v0.10.0][] [[Docs][Docs v0.10.0]] | >=6.3.0 | >=3.9 | >=1.22 | | | | [v0.9.0][] [[Docs][Docs v0.9.0]] | >=6.3.0 | >=3.8 | >=1.21 | | | @@ -34,6 +35,7 @@ after their initial release. *Dev reflects the main branch and is for the upcoming release. [Dev]: https://github.com/GenericMappingTools/pygmt/milestones +[v0.12.0]: https://github.com/GenericMappingTools/pygmt/releases/tag/v0.12.0 [v0.11.0]: https://github.com/GenericMappingTools/pygmt/releases/tag/v0.11.0 [v0.10.0]: https://github.com/GenericMappingTools/pygmt/releases/tag/v0.10.0 [v0.9.0]: https://github.com/GenericMappingTools/pygmt/releases/tag/v0.9.0 @@ -53,6 +55,7 @@ after their initial release. [v0.1.0]: https://github.com/GenericMappingTools/pygmt/releases/tag/v0.1.0 [Docs Dev]: https://www.pygmt.org/dev +[Docs v0.12.0]: https://www.pygmt.org/v0.12.0 [Docs v0.11.0]: https://www.pygmt.org/v0.11.0 [Docs v0.10.0]: https://www.pygmt.org/v0.10.0 [Docs v0.9.0]: https://www.pygmt.org/v0.9.0 From 19edbd3c31e76d9bb4cbd51049b6f48bd8a6e087 Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Thu, 2 May 2024 11:58:14 +1200 Subject: [PATCH 105/218] Change NEP29 to SPEC0 in release checklist (#3221) Patches #3037 --- .github/ISSUE_TEMPLATE/4-release_checklist.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/4-release_checklist.md b/.github/ISSUE_TEMPLATE/4-release_checklist.md index 9d142dd9e3e..5af32bc1c9d 100644 --- a/.github/ISSUE_TEMPLATE/4-release_checklist.md +++ b/.github/ISSUE_TEMPLATE/4-release_checklist.md @@ -43,7 +43,7 @@ assignees: '' - [ ] Manually upload the pygmt-vX.Y.Z.zip and baseline-images.zip files to https://zenodo.org/deposit, ensure that it is filed under the correct reserved DOI **After release**: -- [ ] Update conda-forge [pygmt-feedstock](https://github.com/conda-forge/pygmt-feedstock) [Done automatically by conda-forge's bot, but remember to pin NEP29 versions] +- [ ] Update conda-forge [pygmt-feedstock](https://github.com/conda-forge/pygmt-feedstock) [Done automatically by conda-forge's bot, but remember to pin SPEC0 versions] - [ ] Bump PyGMT version on https://github.com/GenericMappingTools/try-gmt (after conda-forge update) - [ ] Announce the release on: - [ ] GMT [forum](https://forum.generic-mapping-tools.org/c/news/) (do this announcement first! draft on https://hackmd.io/@pygmt. requires moderator status) From 50daf276da86707ea8383b4044aded23ad27e720 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sat, 4 May 2024 10:05:03 +0200 Subject: [PATCH 106/218] Minimum supported version: Fix typo (#3223) --- doc/minversions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/minversions.md b/doc/minversions.md index 50633d9776f..98223a62125 100644 --- a/doc/minversions.md +++ b/doc/minversions.md @@ -13,7 +13,7 @@ after their initial release. | PyGMT Version | GMT | Python | NumPy | Pandas | Xarray | |---|---|---|---|---|---| | [Dev][]* [[Docs][Docs Dev]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | -| [v0.12.0][]* [[Docs][Docs v0.12.0]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | +| [v0.12.0][] [[Docs][Docs v0.12.0]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | | [v0.11.0][] [[Docs][Docs v0.11.0]] | >=6.3.0 | >=3.9 | >=1.23 | | | | [v0.10.0][] [[Docs][Docs v0.10.0]] | >=6.3.0 | >=3.9 | >=1.22 | | | | [v0.9.0][] [[Docs][Docs v0.9.0]] | >=6.3.0 | >=3.8 | >=1.21 | | | From b2eda1e818b672a58b7a9e67f8b378be696e9578 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 6 May 2024 06:36:23 +0800 Subject: [PATCH 107/218] CI: Disable the Benchmarks workflow on release events (#3224) --- .github/workflows/benchmarks.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 67a5d71dbf1..8d978093775 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -1,7 +1,7 @@ # Run performance benchmarks # -# Continuous benchmarking using pytest-codspeed. Measures the execution speed -# of tests marked with @pytest.mark.benchmark decorator. +# Continuous benchmarking using pytest-codspeed. Measures the execution speed of tests +# marked with @pytest.mark.benchmark decorator. name: Benchmarks @@ -15,12 +15,9 @@ on: # Run in PRs but only if the PR has the 'run/benchmark' label pull_request: types: [ opened, reopened, labeled, synchronize ] - # `workflow_dispatch` allows CodSpeed to trigger backtest - # performance analysis in order to generate initial data. + # 'workflow_dispatch' allows CodSpeed to trigger backtest performance analysis + # in order to generate initial data. workflow_dispatch: - release: - types: - - published concurrency: group: ${{ github.workflow }}-${{ github.ref }} From 8605895eea8530d418f84c6ba4a7fe9368a9a139 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 6 May 2024 06:41:00 +0800 Subject: [PATCH 108/218] CI: Temporarily pin to numpy<2 in the "GMT Dev Tests" workflow (#3208) --- .github/workflows/ci_tests_dev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 0d86f32162f..7e1fd7a7763 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -120,7 +120,7 @@ jobs: run: | python -m pip install --pre --prefer-binary \ --extra-index https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ - numpy pandas xarray netCDF4 packaging \ + 'numpy<2' pandas xarray netCDF4 packaging \ build contextily dvc geopandas ipython pyarrow rioxarray \ 'pytest>=6.0' pytest-cov pytest-doctestplus pytest-mpl \ sphinx-gallery From bfe033b0dc4916bf6fe031e9c4a4333ec408bbfe Mon Sep 17 00:00:00 2001 From: Michael Grund <23025878+michaelgrund@users.noreply.github.com> Date: Mon, 6 May 2024 02:48:16 +0200 Subject: [PATCH 109/218] Figure.timestamp: Remove deprecated parameter 'justification', use justify instead (deprecated since v0.11.0) (#3222) --- pygmt/src/timestamp.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pygmt/src/timestamp.py b/pygmt/src/timestamp.py index 1dbd8a86c17..d091b8e300c 100644 --- a/pygmt/src/timestamp.py +++ b/pygmt/src/timestamp.py @@ -9,7 +9,7 @@ from packaging.version import Version from pygmt.clib import Session, __gmt_version__ -from pygmt.helpers import build_arg_list, deprecate_parameter, kwargs_to_strings +from pygmt.helpers import build_arg_list, kwargs_to_strings if TYPE_CHECKING: from collections.abc import Sequence @@ -18,7 +18,6 @@ __doctest_skip__ = ["timestamp"] -@deprecate_parameter("justification", "justify", "v0.11.0", remove_version="v0.13.0") @kwargs_to_strings(offset="sequence") def timestamp( self, From 63129e576acef3780c0c41cc706ed74d41129e8e Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 6 May 2024 09:43:12 +0800 Subject: [PATCH 110/218] clib: Deprecate API function 'Session.virtualfile_from_data', use 'Session.virtualfile_in' instead (will be removed in v0.15.0) (#3225) --- pygmt/clib/session.py | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index ba3644f0e28..46bce82d692 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -1644,8 +1644,41 @@ def virtualfile_in( # noqa: PLR0912 return file_context - # virtualfile_from_data was renamed to virtualfile_in since v0.12.0. - virtualfile_from_data = virtualfile_in + def virtualfile_from_data( + self, + check_kind=None, + data=None, + x=None, + y=None, + z=None, + extra_arrays=None, + required_z=False, + required_data=True, + ): + """ + Store any data inside a virtual file. + + .. deprecated: 0.13.0 + + Will be removed in v0.15.0. Use :meth:`pygmt.clib.Session.virtualfile_in` + instead. + """ + msg = ( + "API function 'Session.virtualfile_from_datae()' has been deprecated since " + "v0.13.0 and will be removed in v0.15.0. Use 'Session.virtualfile_in()' " + "instead." + ) + warnings.warn(msg, category=FutureWarning, stacklevel=2) + return self.virtualfile_in( + check_kind=check_kind, + data=data, + x=x, + y=y, + z=z, + extra_arrays=extra_arrays, + required_z=required_z, + required_data=required_data, + ) @contextlib.contextmanager def virtualfile_out( From a1bd3e6aa2874cc215c071aacb6e15f7fde5f6b1 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 6 May 2024 12:43:39 +0800 Subject: [PATCH 111/218] CI: Simplify the step for downloading GMT cached files (#3227) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 7 ++----- .github/workflows/ci_docs.yml | 7 ++----- .github/workflows/ci_doctests.yaml | 7 ++----- .github/workflows/ci_tests.yaml | 7 ++----- .github/workflows/ci_tests_dev.yaml | 7 ++----- .github/workflows/ci_tests_legacy.yaml | 8 +++----- 6 files changed, 13 insertions(+), 30 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 8d978093775..2230181542a 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -62,11 +62,8 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub run: | - gh run download -n gmt-cache -D gmt-cache - # Move downloaded files to ~/.gmt directory and list them - mkdir -p ~/.gmt - mv gmt-cache/* ~/.gmt - rmdir gmt-cache + # Download cached files to ~/.gmt directory and list them + gh run download --name gmt-cache --dir ~/.gmt/ # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 63c8a4d4967..fd2127e0627 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -113,11 +113,8 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub run: | - gh run download -n gmt-cache -D gmt-cache - # Move downloaded files to ~/.gmt directory and list them - mkdir -p ~/.gmt - mv gmt-cache/* ~/.gmt - rmdir gmt-cache + # Download cached files to ~/.gmt directory and list them + gh run download --name gmt-cache --dir ~/.gmt/ # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index 76197f3079e..bc54e8aebbc 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -72,11 +72,8 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub run: | - gh run download -n gmt-cache -D gmt-cache - # Move downloaded files to ~/.gmt directory and list them - mkdir -p ~/.gmt - mv gmt-cache/* ~/.gmt - rmdir gmt-cache + # Download cached files to ~/.gmt directory and list them + gh run download --name gmt-cache --dir ~/.gmt/ # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 53bd6869623..cae2be4062e 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -129,11 +129,8 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub run: | - gh run download -n gmt-cache -D gmt-cache - # Move downloaded files to ~/.gmt directory and list them - mkdir -p ~/.gmt - mv gmt-cache/* ~/.gmt - rmdir gmt-cache + # Download files to ~/.gmt directory and list them + gh run download --name gmt-cache --dir ~/.gmt/ # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 7e1fd7a7763..244099e7958 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -136,11 +136,8 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub run: | - gh run download -n gmt-cache -D gmt-cache - # Move downloaded files to ~/.gmt directory and list them - mkdir -p ~/.gmt - mv gmt-cache/* ~/.gmt - rmdir gmt-cache + # Download cached files to ~/.gmt directory and list them + gh run download --name gmt-cache --dir ~/.gmt/ # Change modification times of the two files, so GMT won't refresh it touch ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ls -lhR ~/.gmt diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index 70b4fcb2694..0b049732c5c 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -84,14 +84,12 @@ jobs: # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub run: | - gh run download -n gmt-cache -D gmt-cache - # Move downloaded files to ~/.gmt directory and list them - mkdir -p ~/.gmt - mv gmt-cache/* ~/.gmt - rmdir gmt-cache + # Download cached files to ~/.gmt directory and list them + gh run download --name gmt-cache --dir ~/.gmt/ # Change modification times of the two files, so GMT won't refresh it # The two files are in the `~/.gmt/server` directory for GMT<=6.4, and # in the `~/.gmt` directory for GMT>=6.5. + mkdir -p ~/.gmt/server/ mv ~/.gmt/gmt_data_server.txt ~/.gmt/gmt_hash_server.txt ~/.gmt/server/ touch ~/.gmt/server/gmt_data_server.txt ~/.gmt/server/gmt_hash_server.txt ls -lhR ~/.gmt From c783a79ed55068a424eb913d271fa880fa44928d Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 6 May 2024 14:52:28 +0800 Subject: [PATCH 112/218] CI: Fix typos in .github/workflows/check-links.yml (#3228) Patches #3166 --- .github/workflows/check-links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index b4bf93575a0..1bb7c4263ef 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -71,5 +71,5 @@ jobs: run: | title="Link Checker Report on ${{ steps.date.outputs.date }}" gh issue create --title "$title" --body-file ./lychee/out.md - env: - GH_TOKEN: ${{ github.token }} + env: + GH_TOKEN: ${{ github.token }} From a4d2b8e12cf1d10e31645768db0aae4253a058a3 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 7 May 2024 09:11:28 +0800 Subject: [PATCH 113/218] Allow validate_output_table_type to specify the supported output types (#3191) --- pygmt/helpers/validators.py | 28 +++++++++++++++++++++------- pygmt/src/triangulate.py | 2 +- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/pygmt/helpers/validators.py b/pygmt/helpers/validators.py index 94916eac1f5..eb040d189ec 100644 --- a/pygmt/helpers/validators.py +++ b/pygmt/helpers/validators.py @@ -3,13 +3,16 @@ """ import warnings +from collections.abc import Sequence from typing import Literal from pygmt.exceptions import GMTInvalidInput def validate_output_table_type( - output_type: Literal["pandas", "numpy", "file"], outfile: str | None = None + output_type: Literal["pandas", "numpy", "file"], + valid_types: Sequence[str] = ("pandas", "numpy", "file"), + outfile: str | None = None, ) -> Literal["pandas", "numpy", "file"]: """ Check if the ``output_type`` and ``outfile`` parameters are valid. @@ -17,8 +20,10 @@ def validate_output_table_type( Parameters ---------- output_type - Desired output type of tabular data. Valid values are ``"pandas"``, - ``"numpy"`` and ``"file"``. + Desired output type of tabular data. Default valid values are ``"pandas"``, + ``"numpy"`` and ``"file"``, but can be configured by parameter ``valid_types``. + valid_types + Tuple of valid desired output types. outfile File name for saving the result data. Required if ``output_type`` is ``"file"``. If specified, ``output_type`` will be forced to be ``"file"``. @@ -36,23 +41,32 @@ def validate_output_table_type( 'numpy' >>> validate_output_table_type(output_type="file", outfile="output-fname.txt") 'file' + >>> validate_output_table_type(output_type="pandas", valid_types=("pandas", "file")) + 'pandas' >>> validate_output_table_type(output_type="invalid-type") Traceback (most recent call last): ... - pygmt.exceptions.GMTInvalidInput: Must specify 'output_type' either as 'file', ... + pygmt.exceptions.GMTInvalidInput: Must specify 'output_type' as 'pandas', ... >>> validate_output_table_type("file", outfile=None) Traceback (most recent call last): ... pygmt.exceptions.GMTInvalidInput: Must specify 'outfile' for output_type='file'. + >>> validate_output_table_type(output_type="numpy", valid_types=("pandas", "file")) + Traceback (most recent call last): + ... + pygmt.exceptions.GMTInvalidInput: Must specify 'output_type' as 'pandas', or 'file'. >>> with warnings.catch_warnings(record=True) as w: ... validate_output_table_type("pandas", outfile="not-none.txt") ... assert len(w) == 1 'file' """ - if output_type not in ["file", "numpy", "pandas"]: - raise GMTInvalidInput( - "Must specify 'output_type' either as 'file', 'numpy', or 'pandas'." + if output_type not in valid_types: + msg = ( + "Must specify 'output_type' as " + + ", ".join(f"'{v}'" for v in valid_types[:-1]) + + f", or '{valid_types[-1]}'." ) + raise GMTInvalidInput(msg) if output_type == "file" and outfile is None: raise GMTInvalidInput("Must specify 'outfile' for output_type='file'.") if output_type != "file" and outfile is not None: diff --git a/pygmt/src/triangulate.py b/pygmt/src/triangulate.py index f1b64db38ec..1765bd1d28e 100644 --- a/pygmt/src/triangulate.py +++ b/pygmt/src/triangulate.py @@ -233,7 +233,7 @@ def delaunay_triples( ``triangulate`` is a Cartesian or small-geographic area operator and is unaware of periodic or polar boundary conditions. """ - output_type = validate_output_table_type(output_type, outfile) + output_type = validate_output_table_type(output_type, outfile=outfile) with Session() as lib: with ( From 127e6578a2cd6822088e3b6226a9b2bf3fa5436c Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 7 May 2024 12:03:11 +0800 Subject: [PATCH 114/218] Revert "Allow validate_output_table_type to specify the supported output types (#3191)" (#3233) This reverts commit a4d2b8e12cf1d10e31645768db0aae4253a058a3. --- pygmt/helpers/validators.py | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/pygmt/helpers/validators.py b/pygmt/helpers/validators.py index eb040d189ec..94916eac1f5 100644 --- a/pygmt/helpers/validators.py +++ b/pygmt/helpers/validators.py @@ -3,16 +3,13 @@ """ import warnings -from collections.abc import Sequence from typing import Literal from pygmt.exceptions import GMTInvalidInput def validate_output_table_type( - output_type: Literal["pandas", "numpy", "file"], - valid_types: Sequence[str] = ("pandas", "numpy", "file"), - outfile: str | None = None, + output_type: Literal["pandas", "numpy", "file"], outfile: str | None = None ) -> Literal["pandas", "numpy", "file"]: """ Check if the ``output_type`` and ``outfile`` parameters are valid. @@ -20,10 +17,8 @@ def validate_output_table_type( Parameters ---------- output_type - Desired output type of tabular data. Default valid values are ``"pandas"``, - ``"numpy"`` and ``"file"``, but can be configured by parameter ``valid_types``. - valid_types - Tuple of valid desired output types. + Desired output type of tabular data. Valid values are ``"pandas"``, + ``"numpy"`` and ``"file"``. outfile File name for saving the result data. Required if ``output_type`` is ``"file"``. If specified, ``output_type`` will be forced to be ``"file"``. @@ -41,32 +36,23 @@ def validate_output_table_type( 'numpy' >>> validate_output_table_type(output_type="file", outfile="output-fname.txt") 'file' - >>> validate_output_table_type(output_type="pandas", valid_types=("pandas", "file")) - 'pandas' >>> validate_output_table_type(output_type="invalid-type") Traceback (most recent call last): ... - pygmt.exceptions.GMTInvalidInput: Must specify 'output_type' as 'pandas', ... + pygmt.exceptions.GMTInvalidInput: Must specify 'output_type' either as 'file', ... >>> validate_output_table_type("file", outfile=None) Traceback (most recent call last): ... pygmt.exceptions.GMTInvalidInput: Must specify 'outfile' for output_type='file'. - >>> validate_output_table_type(output_type="numpy", valid_types=("pandas", "file")) - Traceback (most recent call last): - ... - pygmt.exceptions.GMTInvalidInput: Must specify 'output_type' as 'pandas', or 'file'. >>> with warnings.catch_warnings(record=True) as w: ... validate_output_table_type("pandas", outfile="not-none.txt") ... assert len(w) == 1 'file' """ - if output_type not in valid_types: - msg = ( - "Must specify 'output_type' as " - + ", ".join(f"'{v}'" for v in valid_types[:-1]) - + f", or '{valid_types[-1]}'." + if output_type not in ["file", "numpy", "pandas"]: + raise GMTInvalidInput( + "Must specify 'output_type' either as 'file', 'numpy', or 'pandas'." ) - raise GMTInvalidInput(msg) if output_type == "file" and outfile is None: raise GMTInvalidInput("Must specify 'outfile' for output_type='file'.") if output_type != "file" and outfile is not None: From 7fc5279c07aac7f3e4d85b025e71e374285ec920 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 May 2024 07:43:50 +0800 Subject: [PATCH 115/218] Bump actions/checkout from 4.1.4 to 4.1.5 (#3235) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.4 to 4.1.5. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4.1.4...v4.1.5) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- .github/workflows/cache_data.yaml | 2 +- .github/workflows/check-links.yml | 4 ++-- .github/workflows/ci_docs.yml | 4 ++-- .github/workflows/ci_doctests.yaml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- .github/workflows/ci_tests_legacy.yaml | 2 +- .github/workflows/dvc-diff.yml | 2 +- .github/workflows/format-command.yml | 2 +- .github/workflows/publish-to-pypi.yml | 2 +- .github/workflows/release-baseline-images.yml | 2 +- .github/workflows/style_checks.yaml | 2 +- .github/workflows/type_checks.yml | 2 +- 14 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 2230181542a..58e17524e3b 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -34,7 +34,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/cache_data.yaml b/.github/workflows/cache_data.yaml index 06ee9a229c9..9fd1758e5ee 100644 --- a/.github/workflows/cache_data.yaml +++ b/.github/workflows/cache_data.yaml @@ -36,7 +36,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 1bb7c4263ef..8714cfcc3e7 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -23,12 +23,12 @@ jobs: steps: - name: Checkout the repository - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: path: repository - name: Checkout the documentation - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: ref: gh-pages path: documentation diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index fd2127e0627..4f886a67b69 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -69,7 +69,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that setuptools-scm works fetch-depth: 0 @@ -132,7 +132,7 @@ jobs: run: make -C doc clean all - name: Checkout the gh-pages branch - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: ref: gh-pages # Checkout to this folder instead of the current one diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index bc54e8aebbc..12763753120 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -35,7 +35,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index cae2be4062e..4c814e0318f 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -92,7 +92,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 244099e7958..020e08ec958 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -47,7 +47,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index 0b049732c5c..a2d6357ef1c 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -44,7 +44,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/dvc-diff.yml b/.github/workflows/dvc-diff.yml index b8bcbd28de0..25c92fbe235 100644 --- a/.github/workflows/dvc-diff.yml +++ b/.github/workflows/dvc-diff.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that dvc diff works fetch-depth: 0 diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 4ef2837c67d..797378e5ee7 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -18,7 +18,7 @@ jobs: private-key: ${{ secrets.APP_PRIVATE_KEY }} # Checkout the pull request branch - - uses: actions/checkout@v4.1.4 + - uses: actions/checkout@v4.1.5 with: token: ${{ steps.generate-token.outputs.token }} repository: ${{ github.event.client_payload.pull_request.head.repo.full_name }} diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 50a151a1457..fbe8e554b50 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -45,7 +45,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/release-baseline-images.yml b/.github/workflows/release-baseline-images.yml index 2b58fa2898e..89ca51f265d 100644 --- a/.github/workflows/release-baseline-images.yml +++ b/.github/workflows/release-baseline-images.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 - name: Setup data version control (DVC) uses: iterative/setup-dvc@v1.1.2 diff --git a/.github/workflows/style_checks.yaml b/.github/workflows/style_checks.yaml index 31b9d99a3b2..442b7cb8dcb 100644 --- a/.github/workflows/style_checks.yaml +++ b/.github/workflows/style_checks.yaml @@ -24,7 +24,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 # Setup Python - name: Set up Python diff --git a/.github/workflows/type_checks.yml b/.github/workflows/type_checks.yml index b0229155395..b5fa9c2f9b1 100644 --- a/.github/workflows/type_checks.yml +++ b/.github/workflows/type_checks.yml @@ -33,7 +33,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.5 # Setup Python - name: Set up Python From 118a873a2d2d426a4241572de78b94f136fb1b7e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 May 2024 08:35:14 +0800 Subject: [PATCH 116/218] Bump codecov/codecov-action from 4.3.0 to 4.3.1 (#3236) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Dongdong Tian --- .github/workflows/ci_tests.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 4c814e0318f..4d7e8b09e8a 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -159,10 +159,9 @@ jobs: # Upload coverage to Codecov - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4.3.0 + uses: codecov/codecov-action@v4.3.1 with: file: ./coverage.xml # optional env_vars: OS,PYTHON,NUMPY fail_ci_if_error: false - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + token: ${{ secrets.CODECOV_TOKEN }} From 4fbf32f31c9c1f0bf6a5731dc1e6bfa63cf4d7b6 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 8 May 2024 20:03:58 +0800 Subject: [PATCH 117/218] clib: Improve Session.get_default docstring to clarify that GMT configuration parameters are supported (#3232) Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> --- pygmt/clib/session.py | 26 ++++++++++++-------------- pygmt/tests/test_clib.py | 1 + 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 46bce82d692..854eebb997a 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -453,11 +453,12 @@ def destroy(self): self.session_pointer = None - def get_default(self, name): + def get_default(self, name: str) -> str: """ - Get the value of a GMT default parameter (library version, paths, etc). + Get the value of a GMT configuration parameter or a GMT API parameter. - Possible default parameter names include: + In addition to the long list of GMT configuration parameters, the following API + parameter names are also supported: * ``"API_VERSION"``: The GMT API version * ``"API_PAD"``: The grid padding setting @@ -473,13 +474,14 @@ def get_default(self, name): Parameters ---------- - name : str - The name of the default parameter (e.g., ``"API_VERSION"``) + name + The name of the GMT configuration parameter (e.g., ``"PROJ_LENGTH_UNIT"``) + or a GMT API parameter (e.g., ``"API_VERSION"``). Returns ------- - value : str - The default value for the parameter. + value + The current value for the parameter. Raises ------ @@ -493,15 +495,11 @@ def get_default(self, name): ) # Make a string buffer to get a return value - value = ctp.create_string_buffer(10000) - + value = ctp.create_string_buffer(4096) status = c_get_default(self.session_pointer, name.encode(), value) - if status != 0: - raise GMTCLibError( - f"Error getting default value for '{name}' (error code {status})." - ) - + msg = f"Error getting value for '{name}' (error code {status})." + raise GMTCLibError(msg) return value.value.decode() def get_common(self, option): diff --git a/pygmt/tests/test_clib.py b/pygmt/tests/test_clib.py index 59a9f745983..bc3d294b96b 100644 --- a/pygmt/tests/test_clib.py +++ b/pygmt/tests/test_clib.py @@ -534,6 +534,7 @@ def test_get_default(): assert lib.get_default("API_GRID_LAYOUT") in ["rows", "columns"] assert int(lib.get_default("API_CORES")) >= 1 assert Version(lib.get_default("API_VERSION")) >= Version("6.3.0") + assert lib.get_default("PROJ_LENGTH_UNIT") == "cm" def test_get_default_fails(): From 2a663f7b26c9d949b6fc371ed32680b8a7e22fb0 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 11 May 2024 14:31:44 +0800 Subject: [PATCH 118/218] CI: Use OIDC token for codecov uploading (#3163) --- .github/workflows/ci_tests.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 4d7e8b09e8a..5c6ec2c948e 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -49,6 +49,8 @@ jobs: test: name: ${{ matrix.os }} - Python ${{ matrix.python-version }} / NumPy ${{ matrix.numpy-version }} runs-on: ${{ matrix.os }} + permissions: + id-token: write # This is required for requesting OIDC token for codecov strategy: fail-fast: false matrix: @@ -161,7 +163,7 @@ jobs: - name: Upload coverage to Codecov uses: codecov/codecov-action@v4.3.1 with: + use_oidc: true file: ./coverage.xml # optional env_vars: OS,PYTHON,NUMPY fail_ci_if_error: false - token: ${{ secrets.CODECOV_TOKEN }} From a17f23fd3cba8ee18a82bb1b0a94258f57b848cd Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 11 May 2024 20:23:04 +0800 Subject: [PATCH 119/218] Fix IPython package name from ipython to IPython (#3243) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0b9daf9ab16..773bf07f5e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ dynamic = ["version"] all = [ "contextily", "geopandas", - "ipython", + "IPython", # 'ipython' is not the correct module name. "rioxarray", ] From cbbbd19a87bd343622a9527859e2082a6753cc8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 12 May 2024 02:41:13 +0200 Subject: [PATCH 120/218] Team Gallery page: Restructure in three subsections (#3240) Co-authored-by: Dongdong Tian --- .github/ISSUE_TEMPLATE/4-release_checklist.md | 1 + doc/team.md | 143 +++++++++--------- 2 files changed, 72 insertions(+), 72 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/4-release_checklist.md b/.github/ISSUE_TEMPLATE/4-release_checklist.md index 5af32bc1c9d..de85db93215 100644 --- a/.github/ISSUE_TEMPLATE/4-release_checklist.md +++ b/.github/ISSUE_TEMPLATE/4-release_checklist.md @@ -24,6 +24,7 @@ assignees: '' - [ ] All tests pass in the ["Doctests" workflow](https://github.com/GenericMappingTools/pygmt/actions/workflows/ci_doctests.yaml) - [ ] Deprecations and related tests are removed for this version by running `grep --include="*.py" -r 'remove_version="vX.Y.Z"' pygmt` from the base of the repository - [ ] Reserve a DOI on [Zenodo](https://zenodo.org) by clicking on "New Version" +- [ ] Review the ["PyGMT Team" page](https://www.pygmt.org/dev/team.html) - [ ] Finish up 'Changelog entry for v0.x.x' Pull Request: - [ ] Add a new entry in `doc/_static/version_switch.js` for documentation switcher - [ ] Update `CITATION.cff` and BibTeX at https://github.com/GenericMappingTools/pygmt#citing-pygmt diff --git a/doc/team.md b/doc/team.md index fb37ab56fd9..08a512bc979 100644 --- a/doc/team.md +++ b/doc/team.md @@ -1,120 +1,119 @@ -# Team Gallery +# PyGMT Team -We are an international team dedicated to building a Pythonic API for the -Generic Mapping Tools (GMT). Our goal is to improve GMT's accessibility for -new and experienced users by creating user-friendly interfaces with the GMT -C API, supporting rich display in Jupyter notebooks, and integrating with -the PyData ecosystem. +We are an international team dedicated to building a Pythonic API for the Generic Mapping +Tools (GMT). -All are welcome to become involved with the PyGMT project! For more information -about how to get involved, see the {doc}`contributing`. +All are welcome to become involved with the PyGMT project! For more information about how +to get involved, see the {doc}`contributing`. -## Distinguished Contributors - -PyGMT Distinguished Contributors are recognized for their substantial -contributions to PyGMT, which may include code, documentation, pull request -review, triaging, forum responses, community building and engagement, -outreach, and inclusion and diversity. New Distinguished Contributors are -selected twice per year by those listed below. +Distinguished Contributors are recognized for their substantial contributions to PyGMT, +which may include code, documentation, pull request review, triaging, forum responses, +community building and engagement, outreach, and inclusion and diversity. Maintainers +are recognized for their responsibilities in maintaining the project, as detailed in +the {doc}`maintenance`. -Distinguished Contributors is not meant as a means of conveying -responsibilities. Distinguished Contributors who are also active maintainers of -the PyGMT project and have responsibilities detailed in the -{doc}`maintenance` have 'Maintainer' listed below their names. +New Distinguished Contributors and Active Maintainers are selected and voted by current +Active Maintainers before each release. Maintainers that are inactive for more than one +year will be moved to Distinguished Contributors. -:::::{grid} 2 3 3 4 -::::{grid-item-card} Dongdong Tian -:margin: 0 3 0 0 -:text-align: center -:img-top: https://avatars.githubusercontent.com/u/3974108?v=4 +## Founders -[@seisman](https://github.com/seisman) -+++ -{bdg-primary}`Maintainer` -:::: - -::::{grid-item-card} Jiayuan Yao -:margin: 0 3 0 0 -:text-align: center -:img-top: https://avatars.githubusercontent.com/u/50591376?v=4 - -[@core-man](https://github.com/core-man) -:::: +:::::{grid} 5 ::::{grid-item-card} Leonardo Uieda -:margin: 0 3 0 0 +:padding: 1 :text-align: center :img-top: https://avatars.githubusercontent.com/u/290082?v=4 [@leouieda](https://github.com/leouieda) -+++ -{bdg-success}`Founder` :::: -::::{grid-item-card} Liam Toney -:margin: 0 3 0 0 +::::{grid-item-card} Paul Wessel +:padding: 1 :text-align: center -:img-top: https://avatars.githubusercontent.com/u/38269494?v=4 +:img-top: https://avatars.githubusercontent.com/u/26473567?v=4 -[@liamtoney](https://github.com/liamtoney) +[@PaulWessel](https://github.com/PaulWessel) :::: -::::{grid-item-card} Max Jones -:margin: 0 3 0 0 -:text-align: center -:img-top: https://avatars.githubusercontent.com/u/14077947?v=4 +::::: -[@maxrjones](https://github.com/maxrjones) -:::: -::::{grid-item-card} Michael Grund -:margin: 0 3 0 0 -:text-align: center -:img-top: https://avatars.githubusercontent.com/u/23025878?v=4 +## Active Maintainers -[@michaelgrund](https://github.com/michaelgrund) -+++ -{bdg-primary}`Maintainer` -:::: +:::::{grid} 5 -::::{grid-item-card} Paul Wessel -:margin: 0 3 0 0 +::::{grid-item-card} Dongdong Tian +:padding: 1 :text-align: center -:img-top: https://avatars.githubusercontent.com/u/26473567?v=4 +:img-top: https://avatars.githubusercontent.com/u/3974108?v=4 -[@PaulWessel](https://github.com/PaulWessel) -+++ -{bdg-success}`Founder` +[@seisman](https://github.com/seisman) :::: ::::{grid-item-card} Wei Ji Leong -:margin: 0 3 0 0 +:padding: 1 :text-align: center :img-top: https://avatars.githubusercontent.com/u/23487320?v=4 [@weiji14](https://github.com/weiji14) -+++ -{bdg-primary}`Maintainer` +:::: + +::::{grid-item-card} Michael Grund +:padding: 1 +:text-align: center +:img-top: https://avatars.githubusercontent.com/u/23025878?v=4 + +[@michaelgrund](https://github.com/michaelgrund) :::: ::::{grid-item-card} Will Schlitzer -:margin: 0 3 0 0 +:padding: 1 :text-align: center :img-top: https://avatars.githubusercontent.com/u/29518865?v=4 [@willschlitzer](https://github.com/willschlitzer) -+++ -{bdg-primary}`Maintainer` :::: ::::{grid-item-card} Yvonne Fröhlich -:margin: 0 3 0 0 +:padding: 1 :text-align: center :img-top: https://avatars.githubusercontent.com/u/94163266?v=4 [@yvonnefroehlich](https://github.com/yvonnefroehlich) -+++ -{bdg-primary}`Maintainer` :::: + +::::: + + +## Distinguished Contributors + +:::::{grid} 5 + +::::{grid-item-card} Max Jones +:padding: 1 +:text-align: center +:img-top: https://avatars.githubusercontent.com/u/14077947?v=4 + +[@maxrjones](https://github.com/maxrjones) +:::: + +::::{grid-item-card} Jiayuan Yao +:padding: 1 +:text-align: center +:img-top: https://avatars.githubusercontent.com/u/50591376?v=4 + +[@core-man](https://github.com/core-man) +:::: + +::::{grid-item-card} Liam Toney +:padding: 1 +:text-align: center +:img-top: https://avatars.githubusercontent.com/u/38269494?v=4 + +[@liamtoney](https://github.com/liamtoney) +:::: + ::::: + From 4e364bb70c17fe11557961c254cc202ae2aa8ab9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 12 May 2024 05:00:07 +0200 Subject: [PATCH 121/218] Gallery example "Velocity arrows and confidence ellipses": Improve argument passed to "spec" (#3245) --- .../gallery/seismology/velo_arrow_ellipse.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/examples/gallery/seismology/velo_arrow_ellipse.py b/examples/gallery/seismology/velo_arrow_ellipse.py index 076cb65127f..e6e7030ea2b 100644 --- a/examples/gallery/seismology/velo_arrow_ellipse.py +++ b/examples/gallery/seismology/velo_arrow_ellipse.py @@ -2,12 +2,12 @@ Velocity arrows and confidence ellipses ======================================= -The :meth:`pygmt.Figure.velo` method can be used to plot mean velocity arrows -and confidence ellipses. The example below plots red velocity arrows with -light-blue confidence ellipses outlined in red with the east_velocity x -north_velocity used for the station names. Note that the velocity arrows are -scaled by 0.2 and the 39% confidence limit will give an ellipse which fits -inside a rectangle of dimension east_sigma by north_sigma. +The :meth:`pygmt.Figure.velo` method can be used to plot mean velocity arrows and +confidence ellipses. The example below plots red velocity arrows with lightblue +confidence ellipses outlined in red with the east_velocity x north_velocity used for +the station names. Note that the velocity arrows are scaled by 0.2 and the 39% +confidence limit will give an ellipse which fits inside a rectangle of dimension +east_sigma by north_sigma. """ # %% @@ -30,12 +30,12 @@ fig.velo( data=df, region=[-10, 8, -10, 6], - pen="0.6p,red", + projection="x0.8c", + frame=["WSne", "2g2f"], + spec="e0.2/0.39+f18", uncertaintyfill="lightblue1", + pen="0.6p,red", line=True, - spec="e0.2/0.39/18", - frame=["WSne", "2g2f"], - projection="x0.8c", vector="0.3c+p1p+e+gred", ) From 5d1a8b4cfbb397210879e8932c5c0e51a72239e6 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 12 May 2024 19:13:18 +0800 Subject: [PATCH 122/218] CI: Set environment cache key based on current week of year to avoid outdated cache in the "Setup Micromamba" step (#3234) --- .github/workflows/ci_docs.yml | 6 ++++++ .github/workflows/ci_tests.yaml | 6 ++++++ .github/workflows/ci_tests_dev.yaml | 6 ++++++ .github/workflows/ci_tests_legacy.yaml | 2 -- 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 4f886a67b69..ae16cdad084 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -74,6 +74,10 @@ jobs: # fetch all history so that setuptools-scm works fetch-depth: 0 + - name: Get current week number of year + id: date + run: echo "date=$(date +%Y-W%W)" >> $GITHUB_OUTPUT # e.g., 2024-W19 + # Install Micromamba with conda-forge dependencies - name: Setup Micromamba uses: mamba-org/setup-micromamba@v1.8.1 @@ -85,6 +89,8 @@ jobs: - nodefaults cache-downloads: false cache-environment: true + # environment cache is persistent for one week. + cache-environment-key: micromamba-environment-${{ steps.date.outputs.date }} create-args: >- python=3.12 gmt=6.5.0 diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 5c6ec2c948e..edced909a0f 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -99,6 +99,10 @@ jobs: # fetch all history so that setuptools-scm works fetch-depth: 0 + - name: Get current week number of year + id: date + run: echo "date=$(date +%Y-W%W)" >> $GITHUB_OUTPUT # e.g., 2024-W19 + # Install Micromamba with conda-forge dependencies - name: Setup Micromamba uses: mamba-org/setup-micromamba@v1.8.1 @@ -110,6 +114,8 @@ jobs: - nodefaults cache-downloads: false cache-environment: true + # environment cache is persistent for one week. + cache-environment-key: micromamba-environment-${{ steps.date.outputs.date }} create-args: >- python=${{ matrix.python-version }}${{ matrix.optional-packages }} gmt=6.5.0 diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 020e08ec958..f7505109348 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -52,6 +52,10 @@ jobs: # fetch all history so that setuptools-scm works fetch-depth: 0 + - name: Get current week number of year + id: date + run: echo "date=$(date +%Y-W%W)" >> $GITHUB_OUTPUT # e.g., 2024-W19 + # Install Micromamba with conda-forge dependencies - name: Setup Micromamba uses: mamba-org/setup-micromamba@v1.8.1 @@ -63,6 +67,8 @@ jobs: - nodefaults cache-downloads: false cache-environment: true + # environment cache is persistent for one week. + cache-environment-key: micromamba-environment-${{ steps.date.outputs.date }} create-args: >- python=3.12 cmake diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index a2d6357ef1c..8e857e24d24 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -58,8 +58,6 @@ jobs: channels: - conda-forge - nodefaults - cache-downloads: false - cache-environment: true create-args: >- python=3.10 gmt=${{ matrix.gmt_version }} From 9d0c02eecff76545e23ed1daf745733b8317bd56 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 13 May 2024 07:17:08 +0800 Subject: [PATCH 123/218] CI: Set PROJ_LIB in the Benchmarks workflow to fix the proj_create_from_database error (#3241) --- .github/workflows/benchmarks.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 58e17524e3b..927e5704724 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -83,3 +83,4 @@ jobs: PYGMT_USE_EXTERNAL_DISPLAY="false" python -m pytest -r P --pyargs pygmt --codspeed env: GMT_LIBRARY_PATH: /usr/share/miniconda/lib/ + PROJ_LIB: /usr/share/miniconda/share/proj From d2d79a37ba80c5d364ff4d80ae99511ca1d03344 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 14 May 2024 19:02:37 +0800 Subject: [PATCH 124/218] CI: Replace conda-incubator/setup-miniconda with mamba-org/setup-micromamba in the Benchmarks workflow (#3248) --- .github/workflows/benchmarks.yml | 56 ++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 927e5704724..9d223f6f7cd 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -39,25 +39,36 @@ jobs: # fetch all history so that setuptools-scm works fetch-depth: 0 - # Install Miniconda with conda-forge dependencies - - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v3.0.4 - with: - auto-activate-base: true - activate-environment: "" # base environment - channels: conda-forge,nodefaults - channel-priority: strict + - name: Get current week number of year + id: date + run: echo "date=$(date +%Y-W%W)" >> $GITHUB_OUTPUT # e.g., 2024-W19 - # Install GMT and dependencies from conda-forge - - name: Install dependencies - run: | - # $CONDA is an environment variable pointing to the root of the miniconda directory - # Preprend $CONDA/bin to $PATH so that conda's python is used over system python - echo $CONDA/bin >> $GITHUB_PATH - conda install --solver=libmamba gmt=6.5.0 python=3.12 \ - numpy pandas xarray netCDF4 packaging \ - geopandas pyarrow pytest pytest-mpl - python -m pip install -U pytest-codspeed setuptools + # Install Micromamba with conda-forge dependencies + - name: Setup Micromamba + uses: mamba-org/setup-micromamba@v1.8.1 + with: + environment-name: pygmt + condarc: | + channels: + - conda-forge + - nodefaults + cache-downloads: false + cache-environment: true + # environment cache is persistent for one week. + cache-environment-key: micromamba-environment-${{ steps.date.outputs.date }} + create-args: >- + gmt=6.5.0 + python=3.12 + numpy + pandas + xarray + netCDF4 + packaging + geopandas + pyarrow + pytest + pytest-codspeed + pytest-mpl # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub @@ -78,9 +89,6 @@ jobs: - name: Run benchmarks uses: CodSpeedHQ/action@v2.4.1 with: - run: | - python -c "import pygmt; pygmt.show_versions()" - PYGMT_USE_EXTERNAL_DISPLAY="false" python -m pytest -r P --pyargs pygmt --codspeed - env: - GMT_LIBRARY_PATH: /usr/share/miniconda/lib/ - PROJ_LIB: /usr/share/miniconda/share/proj + # 'bash -el -c' is needed to use the custom shell. + # See https://github.com/CodSpeedHQ/action/issues/65. + run: bash -el -c "python -c \"import pygmt; pygmt.show_versions()\"; PYGMT_USE_EXTERNAL_DISPLAY=false python -m pytest -r P --pyargs pygmt --codspeed" From d2d66946692275550415a91c82d085a3f68f2b09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 May 2024 06:21:12 +0800 Subject: [PATCH 125/218] Bump actions/create-github-app-token from 1.9.3 to 1.10.0 (#3251) Bumps [actions/create-github-app-token](https://github.com/actions/create-github-app-token) from 1.9.3 to 1.10.0. - [Release notes](https://github.com/actions/create-github-app-token/releases) - [Commits](https://github.com/actions/create-github-app-token/compare/v1.9.3...v1.10.0) --- updated-dependencies: - dependency-name: actions/create-github-app-token dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/format-command.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 797378e5ee7..70f88f407f2 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: # Generate token from GenericMappingTools bot - - uses: actions/create-github-app-token@v1.9.3 + - uses: actions/create-github-app-token@v1.10.0 id: generate-token with: app-id: ${{ secrets.APP_ID }} From d44e196d6dc27f6204db8f599ac226323f64af43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 May 2024 06:22:08 +0800 Subject: [PATCH 126/218] Bump codecov/codecov-action from 4.3.1 to 4.4.0 (#3250) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.3.1 to 4.4.0. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.3.1...v4.4.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index edced909a0f..7daa46b1835 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -167,7 +167,7 @@ jobs: # Upload coverage to Codecov - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4.3.1 + uses: codecov/codecov-action@v4.4.0 with: use_oidc: true file: ./coverage.xml # optional From b3d75755b321af92df57fac6a952718f0997fd08 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 15 May 2024 23:12:16 +0800 Subject: [PATCH 127/218] CI: Always upload code coverage reports to codecov (#3246) --- .github/workflows/ci_tests.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 7daa46b1835..c0b2d79f606 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -160,7 +160,7 @@ jobs: # Upload diff images on test failure - name: Upload diff images if any test fails uses: actions/upload-artifact@v4 - if: ${{ failure() }} + if: failure() with: name: artifact-${{ runner.os }}-${{ matrix.python-version }} path: tmp-test-dir-with-unique-name @@ -168,6 +168,7 @@ jobs: # Upload coverage to Codecov - name: Upload coverage to Codecov uses: codecov/codecov-action@v4.4.0 + if: success() || failure() with: use_oidc: true file: ./coverage.xml # optional From 744aeda77e9010687cb2341198c3d6d159a5c984 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Thu, 16 May 2024 17:10:58 +0200 Subject: [PATCH 128/218] Tutorial "Plotting text": Fix typo (#3253) --- examples/tutorials/basics/text.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/tutorials/basics/text.py b/examples/tutorials/basics/text.py index 35744456e40..37b8cb4dc20 100644 --- a/examples/tutorials/basics/text.py +++ b/examples/tutorials/basics/text.py @@ -82,7 +82,7 @@ # Plot text labels at the x and y positions of the markers while varying the anchor # point via the justify parameter fig.text(x=-0.5, y=0.5, text="TL", justify="TL") # TopLeft -fig.text(x=0, y=0.5, text="TM", justify="TC") # TopCenter +fig.text(x=0, y=0.5, text="TC", justify="TC") # TopCenter fig.text(x=0.5, y=0.5, text="TR", justify="TR") # TopRight fig.text(x=-0.5, y=0, text="ML", justify="ML") # MiddleLeft fig.text(x=0, y=0, text="MC", justify="MC") # MiddleCenter From 8839e7bfa7eb7290fbf3004212b0c3e7bc696c29 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 17 May 2024 18:32:25 +0800 Subject: [PATCH 129/218] Refactor doctests to pass list of arguments to the Session.call_module method (#3255) --- pygmt/clib/session.py | 48 +++++++++++++++++++------------------- pygmt/datatypes/dataset.py | 4 ++-- pygmt/datatypes/grid.py | 4 ++-- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 854eebb997a..5648bf00c6e 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -34,7 +34,6 @@ ) from pygmt.helpers import ( data_kind, - fmt_docstring, tempfile_from_geojson, tempfile_from_image, ) @@ -146,7 +145,7 @@ class Session: ... with GMTTempFile() as fout: ... # Call the grdinfo module with the virtual file as input ... # and the temp file as output. - ... ses.call_module("grdinfo", f"{fin} -C ->{fout.name}") + ... ses.call_module("grdinfo", [fin, "-C", f"->{fout.name}"]) ... # Read the contents of the temp file before it's deleted. ... print(fout.read().strip()) -55 -47 -24 -10 190 981 1 1 8 14 1 1 @@ -543,18 +542,20 @@ def get_common(self, option): Examples -------- >>> with Session() as lib: - ... lib.call_module("basemap", "-R0/10/10/15 -JX5i/2.5i -Baf -Ve") + ... lib.call_module( + ... "basemap", ["-R0/10/10/15", "-JX5i/2.5i", "-Baf", "-Ve"] + ... ) ... region = lib.get_common("R") ... projection = lib.get_common("J") ... timestamp = lib.get_common("U") ... verbose = lib.get_common("V") - ... lib.call_module("plot", "-T -Xw+1i -Yh-1i") + ... lib.call_module("plot", ["-T", "-Xw+1i", "-Yh-1i"]) ... xshift = lib.get_common("X") # xshift/yshift are in inches ... yshift = lib.get_common("Y") >>> print(region, projection, timestamp, verbose, xshift, yshift) [ 0. 10. 10. 15.] True False 3 6.0 1.5 >>> with Session() as lib: - ... lib.call_module("basemap", "-R0/10/10/15 -JX5i/2.5i -Baf") + ... lib.call_module("basemap", ["-R0/10/10/15", "-JX5i/2.5i", "-Baf"]) ... lib.get_common("A") Traceback (most recent call last): ... @@ -1180,8 +1181,7 @@ def open_virtualfile(self, family, geometry, direction, data): ... with lib.open_virtualfile(*vfargs) as vfile: ... # Send the output to a temp file so that we can read it ... with GMTTempFile() as ofile: - ... args = f"{vfile} ->{ofile.name}" - ... lib.call_module("info", args) + ... lib.call_module("info", [vfile, f"->{ofile.name}"]) ... print(ofile.read().strip()) : N = 5 <0/4> <5/9> """ @@ -1288,7 +1288,7 @@ def virtualfile_from_vectors(self, *vectors): ... with ses.virtualfile_from_vectors(x, y, z) as fin: ... # Send the output to a file so that we can read it ... with GMTTempFile() as fout: - ... ses.call_module("info", f"{fin} ->{fout.name}") + ... ses.call_module("info", [fin, f"->{fout.name}"]) ... print(fout.read().strip()) : N = 3 <1/3> <4/6> <7/9> """ @@ -1398,7 +1398,7 @@ def virtualfile_from_matrix(self, matrix): ... with ses.virtualfile_from_matrix(data) as fin: ... # Send the output to a file so that we can read it ... with GMTTempFile() as fout: - ... ses.call_module("info", f"{fin} ->{fout.name}") + ... ses.call_module("info", [fin, f"->{fout.name}"]) ... print(fout.read().strip()) : N = 4 <0/9> <1/10> <2/11> """ @@ -1478,8 +1478,9 @@ def virtualfile_from_grid(self, grid): ... with ses.virtualfile_from_grid(data) as fin: ... # Send the output to a file so that we can read it ... with GMTTempFile() as fout: - ... args = f"{fin} -L0 -Cn ->{fout.name}" - ... ses.call_module("grdinfo", args) + ... ses.call_module( + ... "grdinfo", [fin, "-L0", "-Cn", f"->{fout.name}"] + ... ) ... print(fout.read().strip()) -55 -47 -24 -10 190 981 1 1 8 14 1 1 >>> # The output is: w e s n z0 z1 dx dy n_columns n_rows reg gtype @@ -1510,7 +1511,6 @@ def virtualfile_from_grid(self, grid): with self.open_virtualfile(*args) as vfile: yield vfile - @fmt_docstring def virtualfile_in( # noqa: PLR0912 self, check_kind=None, @@ -1571,7 +1571,7 @@ def virtualfile_in( # noqa: PLR0912 ... with ses.virtualfile_in(check_kind="vector", data=data) as fin: ... # Send the output to a file so that we can read it ... with GMTTempFile() as fout: - ... ses.call_module("info", fin + " ->" + fout.name) + ... ses.call_module("info", [fin, f"->{fout.name}"]) ... print(fout.read().strip()) : N = 3 <7/9> <4/6> <1/3> """ @@ -1718,7 +1718,7 @@ def virtualfile_out( ... # Create a virtual file for storing the output table. ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... ds = lib.read_virtualfile(vouttbl, kind="dataset") ... assert isinstance(ds.contents, _GMT_DATASET) ... @@ -1726,7 +1726,7 @@ def virtualfile_out( ... with Session() as lib: ... with lib.virtualfile_out(fname=tmpfile.name) as vouttbl: ... assert vouttbl == tmpfile.name - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... line = Path(vouttbl).read_text() ... assert line == "1\t2\t3\tTEXT\n" """ @@ -1798,7 +1798,7 @@ def read_virtualfile( ... with Path(tmpfile.name).open(mode="w") as fp: ... print("1.0 2.0 3.0 TEXT", file=fp) ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... # Read the virtual file as a void pointer ... void_pointer = lib.read_virtualfile(vouttbl) ... assert isinstance(void_pointer, int) # void pointer is an int @@ -1809,7 +1809,7 @@ def read_virtualfile( >>> # Read grid from a virtual file >>> with Session() as lib: ... with lib.virtualfile_out(kind="grid") as voutgrd: - ... lib.call_module("read", f"@earth_relief_01d_g {voutgrd} -Tg") + ... lib.call_module("read", ["@earth_relief_01d_g", voutgrd, "-Tg"]) ... # Read the virtual file as a void pointer ... void_pointer = lib.read_virtualfile(voutgrd) ... assert isinstance(void_pointer, int) # void pointer is an int @@ -1905,7 +1905,7 @@ def virtualfile_to_dataset( ... with lib.virtualfile_out( ... kind="dataset", fname=outtmp.name ... ) as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... result = lib.virtualfile_to_dataset( ... vfname=vouttbl, output_type="file" ... ) @@ -1915,7 +1915,7 @@ def virtualfile_to_dataset( ... # strings output ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... outstr = lib.virtualfile_to_dataset( ... vfname=vouttbl, output_type="strings" ... ) @@ -1925,7 +1925,7 @@ def virtualfile_to_dataset( ... # numpy output ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... outnp = lib.virtualfile_to_dataset( ... vfname=vouttbl, output_type="numpy" ... ) @@ -1934,7 +1934,7 @@ def virtualfile_to_dataset( ... # pandas output ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... outpd = lib.virtualfile_to_dataset( ... vfname=vouttbl, output_type="pandas" ... ) @@ -1943,7 +1943,7 @@ def virtualfile_to_dataset( ... # pandas output with specified column names ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... outpd2 = lib.virtualfile_to_dataset( ... vfname=vouttbl, ... output_type="pandas", @@ -2026,7 +2026,7 @@ def virtualfile_to_raster( ... with GMTTempFile(suffix=".nc") as tmpfile: ... outgrid = tmpfile.name ... with lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd: - ... lib.call_module("read", f"@earth_relief_01d_g {voutgrd} -Tg") + ... lib.call_module("read", ["@earth_relief_01d_g", voutgrd, "-Tg"]) ... result = lib.virtualfile_to_raster( ... vfname=voutgrd, outgrid=outgrid ... ) @@ -2036,7 +2036,7 @@ def virtualfile_to_raster( ... # xarray.DataArray output ... outgrid = None ... with lib.virtualfile_out(kind="grid", fname=outgrid) as voutgrd: - ... lib.call_module("read", f"@earth_relief_01d_g {voutgrd} -Tg") + ... lib.call_module("read", ["@earth_relief_01d_g", voutgrd, "-Tg"]) ... result = lib.virtualfile_to_raster(vfname=voutgrd, outgrid=outgrid) ... assert isinstance(result, xr.DataArray) """ diff --git a/pygmt/datatypes/dataset.py b/pygmt/datatypes/dataset.py index f9a2fa19803..3f2202052e0 100644 --- a/pygmt/datatypes/dataset.py +++ b/pygmt/datatypes/dataset.py @@ -37,7 +37,7 @@ class _GMT_DATASET(ctp.Structure): # noqa: N801 ... # Read the data file ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... # The dataset ... ds = lib.read_virtualfile(vouttbl, kind="dataset").contents ... print(ds.n_tables, ds.n_columns, ds.n_segments) @@ -224,7 +224,7 @@ def to_dataframe( ... print("10.0 11.0 12.0 TEXT123 TEXT456789", file=fp) ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", f"{tmpfile.name} {vouttbl} -Td") + ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) ... ds = lib.read_virtualfile(vouttbl, kind="dataset") ... text = ds.contents.to_strings() ... df = ds.contents.to_dataframe(header=0) diff --git a/pygmt/datatypes/grid.py b/pygmt/datatypes/grid.py index 1caa0bd0240..b420db6d7f1 100644 --- a/pygmt/datatypes/grid.py +++ b/pygmt/datatypes/grid.py @@ -22,7 +22,7 @@ class _GMT_GRID(ctp.Structure): # noqa: N801 >>> from pygmt.clib import Session >>> with Session() as lib: ... with lib.virtualfile_out(kind="grid") as voutgrd: - ... lib.call_module("read", f"@static_earth_relief.nc {voutgrd} -Tg") + ... lib.call_module("read", ["@static_earth_relief.nc", voutgrd, "-Tg"]) ... # Read the grid from the virtual file ... grid = lib.read_virtualfile(voutgrd, kind="grid").contents ... # The grid header @@ -106,7 +106,7 @@ def to_dataarray(self) -> xr.DataArray: >>> from pygmt.clib import Session >>> with Session() as lib: ... with lib.virtualfile_out(kind="grid") as voutgrd: - ... lib.call_module("read", f"@static_earth_relief.nc {voutgrd} -Tg") + ... lib.call_module("read", ["@static_earth_relief.nc", voutgrd, "-Tg"]) ... # Read the grid from the virtual file ... grid = lib.read_virtualfile(voutgrd, kind="grid") ... # Convert to xarray.DataArray and use it later From 5787a34ae4ccf978da4c83c8406fc9a576ab37a1 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 17 May 2024 20:23:09 +0800 Subject: [PATCH 130/218] Refactor tests to pass list of arguments to the Session.call_module method (#3256) --- pygmt/tests/test_clib.py | 12 ++++++------ pygmt/tests/test_clib_virtualfiles.py | 20 ++++++++++---------- pygmt/tests/test_datatypes_dataset.py | 2 +- pygmt/tests/test_session_management.py | 6 +++--- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/pygmt/tests/test_clib.py b/pygmt/tests/test_clib.py index bc3d294b96b..8f7c10c8728 100644 --- a/pygmt/tests/test_clib.py +++ b/pygmt/tests/test_clib.py @@ -181,7 +181,7 @@ def test_call_module_invalid_arguments(): """ with clib.Session() as lib: with pytest.raises(GMTCLibError): - lib.call_module("info", "bogus-data.bla") + lib.call_module("info", ["bogus-data.bla"]) def test_call_module_invalid_name(): @@ -190,7 +190,7 @@ def test_call_module_invalid_name(): """ with clib.Session() as lib: with pytest.raises(GMTCLibError): - lib.call_module("meh", "") + lib.call_module("meh", []) def test_call_module_error_message(): @@ -199,7 +199,7 @@ def test_call_module_error_message(): """ with clib.Session() as lib: with pytest.raises(GMTCLibError) as exc_info: - lib.call_module("info", "bogus-data.bla") + lib.call_module("info", ["bogus-data.bla"]) assert "Module 'info' failed with status code" in exc_info.value.args[0] assert ( "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in exc_info.value.args[0] @@ -213,7 +213,7 @@ def test_method_no_session(): # Create an instance of Session without "with" so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): - lib.call_module("gmtdefaults", "") + lib.call_module("gmtdefaults", []) with pytest.raises(GMTCLibNoSessionError): _ = lib.session_pointer @@ -385,14 +385,14 @@ def test_extract_region_two_figures(): # Activate the first figure and extract the region from it # Use in a different session to avoid any memory problems. with clib.Session() as lib: - lib.call_module("figure", f"{fig1._name} -") + lib.call_module("figure", [fig1._name, "-"]) with clib.Session() as lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1) # Now try it with the second one with clib.Session() as lib: - lib.call_module("figure", f"{fig2._name} -") + lib.call_module("figure", [fig2._name, "-"]) with clib.Session() as lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0])) diff --git a/pygmt/tests/test_clib_virtualfiles.py b/pygmt/tests/test_clib_virtualfiles.py index 26ebfc5d379..b8b5ee0500d 100644 --- a/pygmt/tests/test_clib_virtualfiles.py +++ b/pygmt/tests/test_clib_virtualfiles.py @@ -69,7 +69,7 @@ def test_virtual_file(dtypes): vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset) with lib.open_virtualfile(*vfargs) as vfile: with GMTTempFile() as outfile: - lib.call_module("info", f"{vfile} ->{outfile.name}") + lib.call_module("info", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) bounds = "\t".join([f"<{col.min():.0f}/{col.max():.0f}>" for col in data.T]) expected = f": N = {shape[0]}\t{bounds}\n" @@ -144,7 +144,7 @@ def test_virtualfile_in_required_z_matrix(array_func, kind): data=data, required_z=True, check_kind="vector" ) as vfile: with GMTTempFile() as outfile: - lib.call_module("info", f"{vfile} ->{outfile.name}") + lib.call_module("info", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) bounds = "\t".join( [ @@ -217,7 +217,7 @@ def test_virtualfile_from_vectors(dtypes): with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: - lib.call_module("info", f"{vfile} ->{outfile.name}") + lib.call_module("info", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) bounds = "\t".join([f"<{i.min():.0f}/{i.max():.0f}>" for i in (x, y, z)]) expected = f": N = {size}\t{bounds}\n" @@ -237,7 +237,7 @@ def test_virtualfile_from_vectors_one_string_or_object_column(dtype): with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile: - lib.call_module("convert", f"{vfile} ->{outfile.name}") + lib.call_module("convert", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) expected = "".join( f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings, strict=True) @@ -259,7 +259,7 @@ def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with GMTTempFile() as outfile: - lib.call_module("convert", f"{vfile} ->{outfile.name}") + lib.call_module("convert", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) expected = "".join( f"{h}\t{i}\t{j} {k}\n" @@ -278,7 +278,7 @@ def test_virtualfile_from_vectors_transpose(dtypes): with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile: - lib.call_module("info", f"{vfile} -C ->{outfile.name}") + lib.call_module("info", [vfile, "-C", f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) bounds = "\t".join([f"{col.min():.0f}\t{col.max():.0f}" for col in data.T]) expected = f"{bounds}\n" @@ -308,7 +308,7 @@ def test_virtualfile_from_matrix(dtypes): with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: - lib.call_module("info", f"{vfile} ->{outfile.name}") + lib.call_module("info", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) bounds = "\t".join([f"<{col.min():.0f}/{col.max():.0f}>" for col in data.T]) expected = f": N = {shape[0]}\t{bounds}\n" @@ -328,7 +328,7 @@ def test_virtualfile_from_matrix_slice(dtypes): with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: - lib.call_module("info", f"{vfile} ->{outfile.name}") + lib.call_module("info", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) bounds = "\t".join([f"<{col.min():.0f}/{col.max():.0f}>" for col in data.T]) expected = f": N = {rows}\t{bounds}\n" @@ -354,7 +354,7 @@ def test_virtualfile_from_vectors_pandas(dtypes_pandas): with clib.Session() as lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile: with GMTTempFile() as outfile: - lib.call_module("info", f"{vfile} ->{outfile.name}") + lib.call_module("info", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) bounds = "\t".join( [f"<{i.min():.0f}/{i.max():.0f}>" for i in (data.x, data.y, data.z)] @@ -374,7 +374,7 @@ def test_virtualfile_from_vectors_arraylike(): with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: - lib.call_module("info", f"{vfile} ->{outfile.name}") + lib.call_module("info", [vfile, f"->{outfile.name}"]) output = outfile.read(keep_tabs=True) bounds = "\t".join([f"<{min(i):.0f}/{max(i):.0f}>" for i in (x, y, z)]) expected = f": N = {size}\t{bounds}\n" diff --git a/pygmt/tests/test_datatypes_dataset.py b/pygmt/tests/test_datatypes_dataset.py index dd7e4073852..aa261c74a62 100644 --- a/pygmt/tests/test_datatypes_dataset.py +++ b/pygmt/tests/test_datatypes_dataset.py @@ -46,7 +46,7 @@ def dataframe_from_gmt(fname, **kwargs): """ with Session() as lib: with lib.virtualfile_out(kind="dataset") as vouttbl: - lib.call_module("read", f"{fname} {vouttbl} -Td") + lib.call_module("read", [fname, vouttbl, "-Td"]) df = lib.virtualfile_to_dataset(vfname=vouttbl, **kwargs) return df diff --git a/pygmt/tests/test_session_management.py b/pygmt/tests/test_session_management.py index 77a3970787f..d949f1a51c0 100644 --- a/pygmt/tests/test_session_management.py +++ b/pygmt/tests/test_session_management.py @@ -21,7 +21,7 @@ def test_begin_end(): end() # Kill the global session begin() with Session() as lib: - lib.call_module("basemap", "-R10/70/-3/8 -JX4i/3i -Ba") + lib.call_module("basemap", ["-R10/70/-3/8", "-JX4i/3i", "-Ba"]) end() begin() # Restart the global session assert Path("pygmt-session.pdf").exists() @@ -39,10 +39,10 @@ def test_gmt_compat_6_is_applied(capsys): # Generate a gmt.conf file in the current directory # with GMT_COMPATIBILITY = 5 with Session() as lib: - lib.call_module("gmtset", "GMT_COMPATIBILITY 5") + lib.call_module("gmtset", ["GMT_COMPATIBILITY=5"]) begin() with Session() as lib: - lib.call_module("basemap", "-R10/70/-3/8 -JX4i/3i -Ba") + lib.call_module("basemap", ["-R10/70/-3/8", "-JX4i/3i", "-Ba"]) out, err = capsys.readouterr() # capture stdout and stderr assert out == "" assert err != ( From 8926fdefd4b2bdc2b4b05c7e5fc7a9b37cc504a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sat, 18 May 2024 11:38:36 +0200 Subject: [PATCH 131/218] GMT-Ghostscript incompatibility: Give recommendations and extend examples (#3249) Co-authored-by: Dongdong Tian Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/4-release_checklist.md | 2 ++ README.md | 7 ++++-- doc/install.md | 23 ++++++++++++++++--- 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/4-release_checklist.md b/.github/ISSUE_TEMPLATE/4-release_checklist.md index de85db93215..12dde7459b6 100644 --- a/.github/ISSUE_TEMPLATE/4-release_checklist.md +++ b/.github/ISSUE_TEMPLATE/4-release_checklist.md @@ -23,6 +23,8 @@ assignees: '' - [ ] All tests pass in the ["GMT Dev Tests" workflow](https://github.com/GenericMappingTools/pygmt/actions/workflows/ci_tests_dev.yaml) - [ ] All tests pass in the ["Doctests" workflow](https://github.com/GenericMappingTools/pygmt/actions/workflows/ci_doctests.yaml) - [ ] Deprecations and related tests are removed for this version by running `grep --include="*.py" -r 'remove_version="vX.Y.Z"' pygmt` from the base of the repository +- [ ] Update warnings in `pygmt.show_versions()` as well as notes in [Common installation issues](https://www.pygmt.org/dev/install.html#not-working-transparency) + and [Testing your install]((https://www.pygmt.org/dev/install.html#testing-your-install) regarding GMT-Ghostscript incompatibility - [ ] Reserve a DOI on [Zenodo](https://zenodo.org) by clicking on "New Version" - [ ] Review the ["PyGMT Team" page](https://www.pygmt.org/dev/team.html) - [ ] Finish up 'Changelog entry for v0.x.x' Pull Request: diff --git a/README.md b/README.md index 9782b0f6517..c415f5052df 100644 --- a/README.md +++ b/README.md @@ -74,11 +74,14 @@ or a [Jupyter notebook](https://docs.jupyter.org/en/latest/running.html), and tr ``` python import pygmt fig = pygmt.Figure() -fig.coast(projection="H10c", region="g", frame=True, land="gray") +fig.coast(projection="N15c", region="g", frame=True, land="tan", water="lightblue") +fig.text(position="MC", text="PyGMT", font="80p,Helvetica-Bold,red@75") fig.show() ``` -For more examples, please have a look at the [Gallery](https://www.pygmt.org/latest/gallery/index.html) and +You should see a global map with land and water masses colored in tan and lightblue, respectively. On top, +there should be the semi-transparent text "PyGMT". For more examples, please have a look at the +[Gallery](https://www.pygmt.org/latest/gallery/index.html) and [Tutorials](https://www.pygmt.org/latest/tutorials/index.html). ## Contacting us diff --git a/doc/install.md b/doc/install.md index 887e971ce55..a17d6e237d4 100644 --- a/doc/install.md +++ b/doc/install.md @@ -105,7 +105,7 @@ If you have [PyArrow](https://arrow.apache.org/docs/python/index.html) installed does have some initial support for `pandas.Series` and `pandas.DataFrame` objects with Apache Arrow-backed arrays. Specifically, only uint/int/float and date32/date64 dtypes are supported for now. Support for string Arrow dtypes is still a work in progress. -For more details, see [issue #2800](https://github.com/GenericMappingTools/pygmt/issues/2800). +For more details, see [issue #2800](https://github.com/GenericMappingTools/pygmt/issues/2800). ::: ## Installing GMT and other dependencies @@ -238,11 +238,17 @@ import pygmt pygmt.show_versions() fig = pygmt.Figure() -fig.coast(region="g", frame=True, shorelines=1) +fig.coast(projection="N15c", region="g", frame=True, land="tan", water="lightblue") +fig.text(position="MC", text="PyGMT", font="80p,Helvetica-Bold,red@75") fig.show() ``` +![pygmt-get-started](https://github.com/GenericMappingTools/pygmt/assets/3974108/f7f51484-8640-4b58-ae5b-6c71e7150f7a){.align-center width="70%"} -If you see a global map with shorelines, then you're all set. +You should see a global map with land and water masses colored in tan and lightblue +respectively. On top, there should be the semi-transparent text "PyGMT". If the +semi-transparency does not show up, there is probably an incompatibility between your +GMT and Ghostscript versions. For details, please run `pygmt.show_versions()` and see +[Not working transparency](#not-working-transparency). ## Common installation issues @@ -284,3 +290,14 @@ jupyter kernelspec list --json After that, you need to restart Jupyter, open your notebook, select the `pygmt` kernel and then import pygmt. + + +### Not working transparency + +It is known that some combinations of GMT and Ghostscript versions cause issues, +especially regarding transparency. If the transparency doesn't work in your figures, +please check your GMT and Ghostscript versions (you can run `pygmt.show_versions()`). +We recommend: + +- Ghostscript 9.53-9.56 for GMT 6.3.0/6.4.0 +- Ghostscript 10.03 or later for GMT 6.5.0 From e02e501b8fbe7fa2f717861f1904ffbf6569ae9d Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 18 May 2024 18:53:51 +0800 Subject: [PATCH 132/218] pygmt.show_versions: Warn about incompatible ghostscript versions (#3244) --- pygmt/__init__.py | 92 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 68 insertions(+), 24 deletions(-) diff --git a/pygmt/__init__.py b/pygmt/__init__.py index 917aab0ac62..3eac6ee579d 100644 --- a/pygmt/__init__.py +++ b/pygmt/__init__.py @@ -102,6 +102,9 @@ def show_versions(file=sys.stdout): - System information (Python version, Operating System) - Core dependency versions (NumPy, Pandas, Xarray, etc) - GMT library information + + It also warns users if the installed Ghostscript version has serious bugs or is + incompatible with the installed GMT version. """ import importlib @@ -110,8 +113,18 @@ def show_versions(file=sys.stdout): import subprocess from packaging.requirements import Requirement + from packaging.version import Version + + def _get_clib_info() -> dict: + """ + Return information about the GMT shared library. + """ + from pygmt.clib import Session - def _get_module_version(modname): + with Session() as ses: + return ses.info + + def _get_module_version(modname: str) -> str | None: """ Get version information of a Python module. """ @@ -128,17 +141,19 @@ def _get_module_version(modname): except ImportError: return None - def _get_ghostscript_version(): + def _get_ghostscript_version() -> str | None: """ - Get ghostscript version. + Get Ghostscript version. """ - os_name = sys.platform - if os_name.startswith(("linux", "freebsd", "darwin")): - cmds = ["gs"] - elif os_name == "win32": - cmds = ["gswin64c.exe", "gswin32c.exe"] - else: - return None + match sys.platform: + case "linux" | "darwin": + cmds = ["gs"] + case os_name if os_name.startswith("freebsd"): + cmds = ["gs"] + case "win32": + cmds = ["gswin64c.exe", "gswin32c.exe"] + case _: + return None for gs_cmd in cmds: if (gsfullpath := shutil.which(gs_cmd)) is not None: @@ -147,24 +162,53 @@ def _get_ghostscript_version(): ).strip() return None + def _check_ghostscript_version(gs_version: str) -> str | None: + """ + Check if the Ghostscript version is compatible with GMT versions. + """ + match Version(gs_version): + case v if v < Version("9.53"): + return ( + f"Ghostscript v{gs_version} is too old and may have serious bugs. " + "Please consider upgrading your Ghostscript." + ) + case v if Version("10.00") <= v < Version("10.02"): + return ( + f"Ghostscript v{gs_version} has known bugs. " + "Please consider upgrading to version v10.02 or later." + ) + case v if v >= Version("10.02"): + from pygmt.clib import __gmt_version__ + + if Version(__gmt_version__) < Version("6.5.0"): + return ( + f"GMT v{__gmt_version__} doesn't support Ghostscript " + "v{gs_version}. Please consider upgrading to GMT>=6.5.0 or " + "downgrading to Ghostscript v9.56." + ) + return None + sys_info = { "python": sys.version.replace("\n", " "), "executable": sys.executable, "machine": platform.platform(), } - deps = [Requirement(v).name for v in importlib.metadata.requires("pygmt")] + gs_version = _get_ghostscript_version() + + lines = [] + lines.append("PyGMT information:") + lines.append(f" version: {__version__}") + lines.append("System information:") + lines.extend([f" {key}: {val}" for key, val in sys_info.items()]) + lines.append("Dependency information:") + lines.extend([f" {modname}: {_get_module_version(modname)}" for modname in deps]) + lines.append(f" ghostscript: {gs_version}") + lines.append("GMT library information:") + lines.extend([f" {key}: {val}" for key, val in _get_clib_info().items()]) + + if warnmsg := _check_ghostscript_version(gs_version): + lines.append("WARNING:") + lines.append(f" {warnmsg}") - print("PyGMT information:", file=file) - print(f" version: {__version__}", file=file) - - print("System information:", file=file) - for key, val in sys_info.items(): - print(f" {key}: {val}", file=file) - - print("Dependency information:", file=file) - for modname in deps: - print(f" {modname}: {_get_module_version(modname)}", file=file) - print(f" ghostscript: {_get_ghostscript_version()}", file=file) - - print_clib_info(file=file) + print("\n".join(lines), file=file) From ef9f650369801e88b1492c73f11da7b8023664b4 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 18 May 2024 20:49:46 +0800 Subject: [PATCH 133/218] CI: Use ubuntu-24.04 in the GMT Dev Tests workflow (#3258) --- .github/workflows/ci_tests_dev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index f7505109348..dca46781a50 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -37,7 +37,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-22.04, macos-14, windows-2022] + os: [ubuntu-24.04, macos-14, windows-2022] gmt_git_ref: [master] timeout-minutes: 30 defaults: From 861f454ef4be671dae421994accbd971cc0f607f Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 19 May 2024 09:40:29 +0800 Subject: [PATCH 134/218] CI: Add pytest plugins pytest-xdist and pytest-rerunfailures (#3193) --- .github/workflows/benchmarks.yml | 4 +++- .github/workflows/ci_tests.yaml | 2 ++ .github/workflows/ci_tests_dev.yaml | 2 +- environment.yml | 4 +++- pyproject.toml | 2 +- 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 9d223f6f7cd..93fc26fbc8c 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -69,7 +69,9 @@ jobs: pytest pytest-codspeed pytest-mpl - + pytest-rerunfailures + pytest-xdist + # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub run: | diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index c0b2d79f606..42046dcbd39 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -133,6 +133,8 @@ jobs: pytest-cov pytest-doctestplus pytest-mpl + pytest-rerunfailures + pytest-xdist # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index dca46781a50..e7c39c43e7f 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -128,7 +128,7 @@ jobs: --extra-index https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ 'numpy<2' pandas xarray netCDF4 packaging \ build contextily dvc geopandas ipython pyarrow rioxarray \ - 'pytest>=6.0' pytest-cov pytest-doctestplus pytest-mpl \ + pytest pytest-cov pytest-doctestplus pytest-mpl pytest-rerunfailures pytest-xdist\ sphinx-gallery # Show installed pkg information for postmortem diagnostic diff --git a/environment.yml b/environment.yml index 6791196c1e7..ad92914bda9 100644 --- a/environment.yml +++ b/environment.yml @@ -28,10 +28,12 @@ dependencies: - ruff>=0.3.0 # Dev dependencies (unit testing) - matplotlib-base + - pytest>=6.0 - pytest-cov - pytest-doctestplus - pytest-mpl - - pytest>=6.0 + - pytest-rerunfailures + - pytest-xdist # Dev dependencies (building documentation) - myst-parser - panel diff --git a/pyproject.toml b/pyproject.toml index 773bf07f5e6..aa09cd11e92 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -162,7 +162,7 @@ max-args=10 [tool.pytest.ini_options] minversion = "6.0" -addopts = "--verbose --durations=0 --durations-min=0.2 --doctest-modules --mpl --mpl-results-path=results" +addopts = "--verbose --durations=0 --durations-min=0.2 --doctest-modules --mpl --mpl-results-path=results -n auto --reruns 2" markers = [ "benchmark: mark a test with custom benchmark settings.", ] From 091bd3c1abdde4fb6a93c42acb01bb426b027acf Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 20 May 2024 12:21:00 +0800 Subject: [PATCH 135/218] geopandas: Use io.StringIO to read geojson data and handle compatibility with geopandas v0.x and v1.x (#3247) --- pygmt/helpers/tempfile.py | 40 ++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/pygmt/helpers/tempfile.py b/pygmt/helpers/tempfile.py index 3cbb88060df..17d90be6935 100644 --- a/pygmt/helpers/tempfile.py +++ b/pygmt/helpers/tempfile.py @@ -2,12 +2,14 @@ Utilities for dealing with temporary file management. """ +import io import uuid from contextlib import contextmanager from pathlib import Path from tempfile import NamedTemporaryFile import numpy as np +from packaging.version import Version def unique_name(): @@ -139,29 +141,33 @@ def tempfile_from_geojson(geojson): # 32-bit integer overflow issue. Related issues: # https://github.com/geopandas/geopandas/issues/967#issuecomment-842877704 # https://github.com/GenericMappingTools/pygmt/issues/2497 - if geojson.index.name is None: - geojson.index.name = "index" - geojson = geojson.reset_index(drop=False) - schema = gpd.io.file.infer_schema(geojson) - for col, dtype in schema["properties"].items(): - if dtype in ("int", "int64"): - overflow = geojson[col].abs().max() > 2**31 - 1 - schema["properties"][col] = "float" if overflow else "int32" - ogrgmt_kwargs["schema"] = schema + if Version(gpd.__version__).major < 1: # GeoPandas v0.x + # The default engine 'fiona' supports the 'schema' parameter. + if geojson.index.name is None: + geojson.index.name = "index" + geojson = geojson.reset_index(drop=False) + schema = gpd.io.file.infer_schema(geojson) + for col, dtype in schema["properties"].items(): + if dtype in ("int", "int64"): + overflow = geojson[col].abs().max() > 2**31 - 1 + schema["properties"][col] = "float" if overflow else "int32" + ogrgmt_kwargs["schema"] = schema + else: # GeoPandas v1.x. + # The default engine "pyogrio" doesn't support the 'schema' parameter + # but we can change the dtype directly. + for col in geojson.columns: + if geojson[col].dtype in ("int", "int64", "Int64"): + overflow = geojson[col].abs().max() > 2**31 - 1 + dtype = "float" if overflow else "int32" + geojson[col] = geojson[col].astype(dtype) # Using geopandas.to_file to directly export to OGR_GMT format geojson.to_file(**ogrgmt_kwargs) except AttributeError: # Other 'geo' formats which implement __geo_interface__ import json - import fiona - - with fiona.Env(): - jsontext = json.dumps(geojson.__geo_interface__) - # Do Input/Output via Fiona virtual memory - with fiona.io.MemoryFile(file_or_bytes=jsontext.encode()) as memfile: - geoseries = gpd.GeoSeries.from_file(filename=memfile) - geoseries.to_file(**ogrgmt_kwargs) + jsontext = json.dumps(geojson.__geo_interface__) + gpd.read_file(filename=io.StringIO(jsontext)).to_file(**ogrgmt_kwargs) yield tmpfile.name From 8192ca056b9d3e3586c1669e15965c60b2091e41 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 21 May 2024 07:15:06 +0800 Subject: [PATCH 136/218] CI: Use macos-12 in the GMT Legacy Tests workflow (#3262) --- .github/workflows/ci_tests_legacy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index 8e857e24d24..00fea79f379 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -34,7 +34,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-20.04, macos-11, windows-2019] + os: [ubuntu-20.04, macos-12, windows-2019] gmt_version: ['6.3', '6.4'] timeout-minutes: 30 defaults: From eaca67df16ee48b26e8faf161fd10c0b2873018d Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 21 May 2024 13:58:24 +0800 Subject: [PATCH 137/218] Remove the unused pygmt.print_clib_info function (#3257) --- doc/api/index.rst | 1 - pygmt/__init__.py | 15 --------------- 2 files changed, 16 deletions(-) diff --git a/doc/api/index.rst b/doc/api/index.rst index 646fb49886a..5cf28bb3ebb 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -203,7 +203,6 @@ Miscellaneous :toctree: generated which - print_clib_info show_versions .. currentmodule:: pygmt diff --git a/pygmt/__init__.py b/pygmt/__init__.py index 3eac6ee579d..efc0ea9baa5 100644 --- a/pygmt/__init__.py +++ b/pygmt/__init__.py @@ -77,21 +77,6 @@ _atexit.register(_end) -def print_clib_info(file=sys.stdout): - """ - Print information about the GMT shared library that we can find. - - Includes the GMT version, default values for parameters, the path to the - ``libgmt`` shared library, and GMT directories. - """ - from pygmt.clib import Session - - print("GMT library information:", file=file) - with Session() as ses: - lines = [f" {key}: {ses.info[key]}" for key in sorted(ses.info)] - print("\n".join(lines), file=file) - - def show_versions(file=sys.stdout): """ Print various dependency versions which are useful when submitting bug reports. From e17a1bf3230ebea72157ff14bcd5e428577d1981 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 21 May 2024 15:00:19 +0800 Subject: [PATCH 138/218] CI: Build GMT dev source code with OpenMP enabled on Linux and GThreads enabled on Linux/macOS (#3011) --- .github/workflows/ci_tests_dev.yaml | 42 ++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index e7c39c43e7f..ef95f2c9f05 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -6,9 +6,8 @@ # installed by fetching the latest source codes from the GMT master branch and # compiling. # -# It is triggered when a pull request is marked as "ready as review", or labeled with -# 'run/test-gmt-dev'. It is also scheduled to run on Monday, Wednesday, and Friday on -# the main branch. +# It is triggered in a pull request if labeled with 'run/test-gmt-dev'. +# It is also scheduled to run on Monday, Wednesday, and Friday on the main branch. # name: GMT Dev Tests @@ -87,18 +86,42 @@ jobs: pcre zlib - # Build and install latest GMT from GitHub - - name: Install GMT ${{ matrix.gmt_git_ref }} branch (Linux/macOS) - run: curl https://raw.githubusercontent.com/GenericMappingTools/gmt/master/ci/build-gmt.sh | bash + # Checkout current GMT repository + - name: Checkout the GMT source from ${{ matrix.gmt_git_ref }} branch + uses: actions/checkout@v4.1.4 + with: + repository: 'GenericMappingTools/gmt' + ref: ${{ matrix.gmt_git_ref }} + path: 'gmt' + + # Build GMT from source on Linux/macOS, script is adapted from + # https://github.com/GenericMappingTools/gmt/blob/6.5.0/ci/build-gmt.sh + - name: Build GMT on Linux/macOS + run: | + if [ "$RUNNER_OS" == "macOS" ]; then + GMT_ENABLE_OPENMP=FALSE + else + GMT_ENABLE_OPENMP=TRUE + fi + cd gmt/ + mkdir build + cd build + cmake -G Ninja .. \ + -DCMAKE_INSTALL_PREFIX=${{ env.GMT_INSTALL_DIR }} \ + -DCMAKE_BUILD_TYPE=Release \ + -DGMT_ENABLE_OPENMP=${GMT_ENABLE_OPENMP} \ + -DGMT_USE_THREADS=TRUE + cmake --build . + cmake --build . --target install + cd .. + rm -rf gmt/ env: - GMT_GIT_REF: ${{ matrix.gmt_git_ref }} GMT_INSTALL_DIR: ${{ runner.temp }}/gmt-install-dir if: runner.os != 'Windows' - - name: Install GMT ${{ matrix.gmt_git_ref }} branch (Windows) + - name: Build GMT on Windows shell: cmd run: | - git clone --depth=1 --single-branch --branch ${{ env.GMT_GIT_REF }} https://github.com/GenericMappingTools/gmt cd gmt/ mkdir build cd build @@ -114,7 +137,6 @@ jobs: cd .. rm -rf gmt/ env: - GMT_GIT_REF: ${{ matrix.gmt_git_ref }} GMT_INSTALL_DIR: ${{ runner.temp }}/gmt-install-dir if: runner.os == 'Windows' From e0772a5817eef5f5fb60c733fdb8553296387108 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 09:00:38 +1200 Subject: [PATCH 139/218] Bump actions/checkout from 4.1.4 to 4.1.6 (#3263) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- .github/workflows/cache_data.yaml | 2 +- .github/workflows/check-links.yml | 4 ++-- .github/workflows/ci_docs.yml | 4 ++-- .github/workflows/ci_doctests.yaml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 4 ++-- .github/workflows/ci_tests_legacy.yaml | 2 +- .github/workflows/dvc-diff.yml | 2 +- .github/workflows/format-command.yml | 2 +- .github/workflows/publish-to-pypi.yml | 2 +- .github/workflows/release-baseline-images.yml | 2 +- .github/workflows/style_checks.yaml | 2 +- .github/workflows/type_checks.yml | 2 +- 14 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 93fc26fbc8c..5e09986f3ff 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -34,7 +34,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/cache_data.yaml b/.github/workflows/cache_data.yaml index 9fd1758e5ee..b90f9478586 100644 --- a/.github/workflows/cache_data.yaml +++ b/.github/workflows/cache_data.yaml @@ -36,7 +36,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 8714cfcc3e7..3f95532e415 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -23,12 +23,12 @@ jobs: steps: - name: Checkout the repository - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: path: repository - name: Checkout the documentation - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: ref: gh-pages path: documentation diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index ae16cdad084..dc00a33b034 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -69,7 +69,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that setuptools-scm works fetch-depth: 0 @@ -138,7 +138,7 @@ jobs: run: make -C doc clean all - name: Checkout the gh-pages branch - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: ref: gh-pages # Checkout to this folder instead of the current one diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index 12763753120..2187e06cade 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -35,7 +35,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 42046dcbd39..66ed3d96ff5 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -94,7 +94,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index ef95f2c9f05..34dd89fb8fd 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -46,7 +46,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that setuptools-scm works fetch-depth: 0 @@ -88,7 +88,7 @@ jobs: # Checkout current GMT repository - name: Checkout the GMT source from ${{ matrix.gmt_git_ref }} branch - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4.1.6 with: repository: 'GenericMappingTools/gmt' ref: ${{ matrix.gmt_git_ref }} diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index 00fea79f379..b52cf2dd4da 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -44,7 +44,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/dvc-diff.yml b/.github/workflows/dvc-diff.yml index 25c92fbe235..8cdd04bdf93 100644 --- a/.github/workflows/dvc-diff.yml +++ b/.github/workflows/dvc-diff.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that dvc diff works fetch-depth: 0 diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 70f88f407f2..24a9da1d578 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -18,7 +18,7 @@ jobs: private-key: ${{ secrets.APP_PRIVATE_KEY }} # Checkout the pull request branch - - uses: actions/checkout@v4.1.5 + - uses: actions/checkout@v4.1.6 with: token: ${{ steps.generate-token.outputs.token }} repository: ${{ github.event.client_payload.pull_request.head.repo.full_name }} diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index fbe8e554b50..266261d01e3 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -45,7 +45,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/release-baseline-images.yml b/.github/workflows/release-baseline-images.yml index 89ca51f265d..38f20accafb 100644 --- a/.github/workflows/release-baseline-images.yml +++ b/.github/workflows/release-baseline-images.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 - name: Setup data version control (DVC) uses: iterative/setup-dvc@v1.1.2 diff --git a/.github/workflows/style_checks.yaml b/.github/workflows/style_checks.yaml index 442b7cb8dcb..31914a4e9d4 100644 --- a/.github/workflows/style_checks.yaml +++ b/.github/workflows/style_checks.yaml @@ -24,7 +24,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 # Setup Python - name: Set up Python diff --git a/.github/workflows/type_checks.yml b/.github/workflows/type_checks.yml index b5fa9c2f9b1..d423ba7719f 100644 --- a/.github/workflows/type_checks.yml +++ b/.github/workflows/type_checks.yml @@ -33,7 +33,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.5 + uses: actions/checkout@v4.1.6 # Setup Python - name: Set up Python From 6513ac5f2eb88b543f45c3e949c937bfd747c1f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 09:01:03 +1200 Subject: [PATCH 140/218] Bump codecov/codecov-action from 4.4.0 to 4.4.1 (#3264) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 66ed3d96ff5..b24173ed92e 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -169,7 +169,7 @@ jobs: # Upload coverage to Codecov - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4.4.0 + uses: codecov/codecov-action@v4.4.1 if: success() || failure() with: use_oidc: true From 4862bf6eb22ceafae71c936fa08e4f336f4ce377 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 22 May 2024 09:23:00 +0800 Subject: [PATCH 141/218] Update tests for earth relief v2.6 (#3265) --- pygmt/tests/test_datasets_earth_relief.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pygmt/tests/test_datasets_earth_relief.py b/pygmt/tests/test_datasets_earth_relief.py index 48ce69996ba..129e1a4629e 100644 --- a/pygmt/tests/test_datasets_earth_relief.py +++ b/pygmt/tests/test_datasets_earth_relief.py @@ -65,7 +65,7 @@ def test_earth_relief_01d_with_region_srtm(): assert data.gmt.registration == 0 npt.assert_allclose(data.lat, np.arange(-5, 6, 1)) npt.assert_allclose(data.lon, np.arange(-10, 11, 1)) - npt.assert_allclose(data.min(), -5136.0, atol=0.5) + npt.assert_allclose(data.min(), -5118.0, atol=0.5) npt.assert_allclose(data.max(), 680.5, atol=0.5) @@ -189,7 +189,7 @@ def test_earth_relief_15s_default_registration(): npt.assert_allclose(data.coords["lon"].data.min(), -9.997917) npt.assert_allclose(data.coords["lon"].data.max(), -9.502083) npt.assert_allclose(data.min(), -3897, atol=0.5) - npt.assert_allclose(data.max(), -71, atol=0.5) + npt.assert_allclose(data.max(), -76.5, atol=0.5) @pytest.mark.xfail( From 176693ca3fef722961681fb5a4b02a491f7bdd6c Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 22 May 2024 23:16:43 +0800 Subject: [PATCH 142/218] clib.Session: Refactor the __getitem__ special method to avoid calling API function GMT_Get_Enum repeatedly (#3261) --- pygmt/clib/session.py | 49 +++++++++++++++++++++++++++------------- pygmt/tests/test_clib.py | 14 +++++------- 2 files changed, 39 insertions(+), 24 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 5648bf00c6e..b0aaff44ec3 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -91,6 +91,8 @@ np.datetime64: "GMT_DATETIME", np.timedelta64: "GMT_LONG", } +# Dictionary for storing the values of GMT constants. +GMT_CONSTANTS = {} # Load the GMT library outside the Session class to avoid repeated loading. _libgmt = load_libgmt() @@ -239,23 +241,41 @@ def __exit__(self, exc_type, exc_value, traceback): """ self.destroy() - def __getitem__(self, name): + def __getitem__(self, name: str) -> int: + """ + Get the value of a GMT constant. + + Parameters + ---------- + name + The name of the constant (e.g., ``"GMT_SESSION_EXTERNAL"``). + + Returns + ------- + value + Integer value of the constant. Do not rely on this value because it might + change. + """ + if name not in GMT_CONSTANTS: + GMT_CONSTANTS[name] = self.get_enum(name) + return GMT_CONSTANTS[name] + + def get_enum(self, name: str) -> int: """ Get the value of a GMT constant (C enum) from gmt_resources.h. - Used to set configuration values for other API calls. Wraps - ``GMT_Get_Enum``. + Used to set configuration values for other API calls. Wraps ``GMT_Get_Enum``. Parameters ---------- - name : str - The name of the constant (e.g., ``"GMT_SESSION_EXTERNAL"``) + name + The name of the constant (e.g., ``"GMT_SESSION_EXTERNAL"``). Returns ------- - constant : int - Integer value of the constant. Do not rely on this value because it - might change. + value + Integer value of the constant. Do not rely on this value because it might + change. Raises ------ @@ -266,18 +286,15 @@ def __getitem__(self, name): "GMT_Get_Enum", argtypes=[ctp.c_void_p, ctp.c_char_p], restype=ctp.c_int ) - # The C lib introduced the void API pointer to GMT_Get_Enum so that - # it's consistent with other functions. It doesn't use the pointer so - # we can pass in None (NULL pointer). We can't give it the actual - # pointer because we need to call GMT_Get_Enum when creating a new API - # session pointer (chicken-and-egg type of thing). + # The C library introduced the void API pointer to GMT_Get_Enum so that it's + # consistent with other functions. It doesn't use the pointer so we can pass + # in None (NULL pointer). We can't give it the actual pointer because we need + # to call GMT_Get_Enum when creating a new API session pointer (chicken-and-egg + # type of thing). session = None - value = c_get_enum(session, name.encode()) - if value is None or value == -99999: raise GMTCLibError(f"Constant '{name}' doesn't exist in libgmt.") - return value def get_libgmt_func(self, name, argtypes=None, restype=None): diff --git a/pygmt/tests/test_clib.py b/pygmt/tests/test_clib.py index 8f7c10c8728..c2732de91d7 100644 --- a/pygmt/tests/test_clib.py +++ b/pygmt/tests/test_clib.py @@ -65,15 +65,13 @@ def mock_get_libgmt_func(name, argtypes=None, restype=None): def test_getitem(): """ - Test that I can get correct constants from the C lib. + Test getting the GMT constants from the C library. """ - ses = clib.Session() - assert ses["GMT_SESSION_EXTERNAL"] != -99999 - assert ses["GMT_MODULE_CMD"] != -99999 - assert ses["GMT_PAD_DEFAULT"] != -99999 - assert ses["GMT_DOUBLE"] != -99999 - with pytest.raises(GMTCLibError): - ses["A_WHOLE_LOT_OF_JUNK"] + with clib.Session() as lib: + for name in ["GMT_SESSION_EXTERNAL", "GMT_MODULE_CMD", "GMT_DOUBLE"]: + assert lib[name] != -99999 + with pytest.raises(GMTCLibError): + lib["A_WHOLE_LOT_OF_JUNK"] def test_create_destroy_session(): From 5396e1e2664f8a372a50eccbcd517c89aab9ddbb Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 23 May 2024 06:39:16 +0800 Subject: [PATCH 143/218] CI: Set GMT_ENABLE_OPENMP to TRUE to enable OpenMP support on macOS (#3266) --- .github/workflows/ci_tests_dev.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 34dd89fb8fd..08707583abb 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -98,18 +98,13 @@ jobs: # https://github.com/GenericMappingTools/gmt/blob/6.5.0/ci/build-gmt.sh - name: Build GMT on Linux/macOS run: | - if [ "$RUNNER_OS" == "macOS" ]; then - GMT_ENABLE_OPENMP=FALSE - else - GMT_ENABLE_OPENMP=TRUE - fi cd gmt/ mkdir build cd build cmake -G Ninja .. \ -DCMAKE_INSTALL_PREFIX=${{ env.GMT_INSTALL_DIR }} \ -DCMAKE_BUILD_TYPE=Release \ - -DGMT_ENABLE_OPENMP=${GMT_ENABLE_OPENMP} \ + -DGMT_ENABLE_OPENMP=TRUE \ -DGMT_USE_THREADS=TRUE cmake --build . cmake --build . --target install From 7bd57d57a5cf045536c9eedc3805c0359f18ed2e Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 23 May 2024 06:39:31 +0800 Subject: [PATCH 144/218] CI: Remove pytest-xdist and pytest-rerunfailures options from addopts and only add them in CI jobs (#3267) --- .github/workflows/benchmarks.yml | 6 +++--- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- environment.yml | 2 -- pyproject.toml | 2 +- 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 5e09986f3ff..06a2fa8b731 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -69,9 +69,9 @@ jobs: pytest pytest-codspeed pytest-mpl - pytest-rerunfailures + pytest-rerunfailures pytest-xdist - + # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub run: | @@ -93,4 +93,4 @@ jobs: with: # 'bash -el -c' is needed to use the custom shell. # See https://github.com/CodSpeedHQ/action/issues/65. - run: bash -el -c "python -c \"import pygmt; pygmt.show_versions()\"; PYGMT_USE_EXTERNAL_DISPLAY=false python -m pytest -r P --pyargs pygmt --codspeed" + run: bash -el -c "python -c \"import pygmt; pygmt.show_versions()\"; PYGMT_USE_EXTERNAL_DISPLAY=false python -m pytest -r P -n auto --reruns 2 --pyargs pygmt --codspeed" diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index b24173ed92e..ce55888c7d8 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -157,7 +157,7 @@ jobs: # Run the regular tests - name: Run tests - run: make test PYTEST_EXTRA="-r P" + run: make test PYTEST_EXTRA="-r P -n auto --reruns 2" # Upload diff images on test failure - name: Upload diff images if any test fails diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 08707583abb..1e783752dea 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -173,7 +173,7 @@ jobs: # Run the tests - name: Test with pytest - run: make test PYTEST_EXTRA="-r P" + run: make test PYTEST_EXTRA="-r P -n auto --reruns 2" env: GMT_LIBRARY_PATH: ${{ runner.temp }}/gmt-install-dir/lib diff --git a/environment.yml b/environment.yml index ad92914bda9..88342d2c64f 100644 --- a/environment.yml +++ b/environment.yml @@ -32,8 +32,6 @@ dependencies: - pytest-cov - pytest-doctestplus - pytest-mpl - - pytest-rerunfailures - - pytest-xdist # Dev dependencies (building documentation) - myst-parser - panel diff --git a/pyproject.toml b/pyproject.toml index aa09cd11e92..773bf07f5e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -162,7 +162,7 @@ max-args=10 [tool.pytest.ini_options] minversion = "6.0" -addopts = "--verbose --durations=0 --durations-min=0.2 --doctest-modules --mpl --mpl-results-path=results -n auto --reruns 2" +addopts = "--verbose --durations=0 --durations-min=0.2 --doctest-modules --mpl --mpl-results-path=results" markers = [ "benchmark: mark a test with custom benchmark settings.", ] From 5a9ac8edfb8aa3053640a27f5e52992d3bf42720 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 23 May 2024 13:43:55 +0800 Subject: [PATCH 145/218] CI: Bump to mambaforge-22.9 in ReadTheDocs (#3268) --- .readthedocs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 5b4448647d1..b3c3a1a3e06 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -8,7 +8,7 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "mambaforge-4.10" + python: "mambaforge-22.9" jobs: post_checkout: # Cancel building pull requests when there aren't changes related to docs. From a13fc5e55a950bc8abe68a1984b7f2558a14baca Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 23 May 2024 15:19:15 +0800 Subject: [PATCH 146/218] CI: Bump to Ghostscript 10.03.1 (#3269) --- .github/workflows/ci_docs.yml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- ci/requirements/docs.yml | 2 +- environment.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index dc00a33b034..32e8b66ba4f 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -94,7 +94,7 @@ jobs: create-args: >- python=3.12 gmt=6.5.0 - ghostscript=10.03.0 + ghostscript=10.03.1 numpy pandas xarray diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index ce55888c7d8..df8f9df397b 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -119,7 +119,7 @@ jobs: create-args: >- python=${{ matrix.python-version }}${{ matrix.optional-packages }} gmt=6.5.0 - ghostscript=10.03.0 + ghostscript=10.03.1 numpy=${{ matrix.numpy-version }} pandas${{ matrix.pandas-version }} xarray${{ matrix.xarray-version }} diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 1e783752dea..38751afb768 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -75,7 +75,7 @@ jobs: ninja curl fftw - ghostscript=10.03.0 + ghostscript=10.03.1 glib hdf5 libblas diff --git a/ci/requirements/docs.yml b/ci/requirements/docs.yml index 7666f2aa09a..414f1419d44 100644 --- a/ci/requirements/docs.yml +++ b/ci/requirements/docs.yml @@ -6,7 +6,7 @@ dependencies: # Required dependencies - python=3.12 - gmt=6.5.0 - - ghostscript=10.03.0 + - ghostscript=10.03.1 - numpy - pandas - xarray diff --git a/environment.yml b/environment.yml index 88342d2c64f..57a4799442e 100644 --- a/environment.yml +++ b/environment.yml @@ -6,7 +6,7 @@ dependencies: - python=3.12 # Required dependencies - gmt=6.5.0 - - ghostscript=10.03.0 + - ghostscript=10.03.1 - numpy>=1.23 - pandas>=1.5 - xarray>=2022.06 From 897444a380042c0924a84f3c8195d0284c69cba8 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 25 May 2024 08:35:57 +0800 Subject: [PATCH 147/218] pygmt.datasets.load_*: Add autocompletion support for the 'resolution' parameter (#3260) --- pygmt/datasets/earth_age.py | 12 ++++----- pygmt/datasets/earth_free_air_anomaly.py | 12 ++++----- pygmt/datasets/earth_geoid.py | 12 ++++----- pygmt/datasets/earth_magnetic_anomaly.py | 12 ++++----- pygmt/datasets/earth_mask.py | 24 ++++++++++++----- pygmt/datasets/earth_relief.py | 27 ++++++++++++++----- .../earth_vertical_gravity_gradient.py | 12 ++++----- pygmt/datasets/mars_relief.py | 23 ++++++++++++---- pygmt/datasets/mercury_relief.py | 21 +++++++++++---- pygmt/datasets/moon_relief.py | 23 ++++++++++++---- pygmt/datasets/pluto_relief.py | 21 +++++++++++---- pygmt/datasets/venus_relief.py | 9 ++++--- 12 files changed, 141 insertions(+), 67 deletions(-) diff --git a/pygmt/datasets/earth_age.py b/pygmt/datasets/earth_age.py index 03e81d703d9..ef45eac1bec 100644 --- a/pygmt/datasets/earth_age.py +++ b/pygmt/datasets/earth_age.py @@ -15,7 +15,9 @@ @kwargs_to_strings(region="sequence") def load_earth_age( - resolution="01d", + resolution: Literal[ + "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" + ] = "01d", region=None, registration: Literal["gridline", "pixel"] = "gridline", ): @@ -51,11 +53,9 @@ def load_earth_age( Parameters ---------- - resolution : str - The grid resolution. The suffix ``d`` and ``m`` stand for - arc-degrees and arc-minutes. It can be ``"01d"``, ``"30m"``, - ``"20m"``, ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, - ``"03m"``, ``"02m"``, or ``"01m"``. + resolution + The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and + arc-minutes. region : str or list The subregion of the grid to load, in the form of a list diff --git a/pygmt/datasets/earth_free_air_anomaly.py b/pygmt/datasets/earth_free_air_anomaly.py index 350f54ba831..56ac3fbe324 100644 --- a/pygmt/datasets/earth_free_air_anomaly.py +++ b/pygmt/datasets/earth_free_air_anomaly.py @@ -15,7 +15,9 @@ @kwargs_to_strings(region="sequence") def load_earth_free_air_anomaly( - resolution="01d", + resolution: Literal[ + "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" + ] = "01d", region=None, registration: Literal["gridline", "pixel", None] = None, ): @@ -51,11 +53,9 @@ def load_earth_free_air_anomaly( Parameters ---------- - resolution : str - The grid resolution. The suffix ``d`` and ``m`` stand for - arc-degrees and arc-minutes. It can be ``"01d"``, ``"30m"``, - ``"20m"``, ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, - ``"03m"``, ``"02m"``, or ``"01m"``. + resolution + The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and + arc-minutes. region : str or list The subregion of the grid to load, in the form of a list diff --git a/pygmt/datasets/earth_geoid.py b/pygmt/datasets/earth_geoid.py index 5190eb01413..acae0c527b3 100644 --- a/pygmt/datasets/earth_geoid.py +++ b/pygmt/datasets/earth_geoid.py @@ -15,7 +15,9 @@ @kwargs_to_strings(region="sequence") def load_earth_geoid( - resolution="01d", + resolution: Literal[ + "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" + ] = "01d", region=None, registration: Literal["gridline", "pixel"] = "gridline", ): @@ -44,11 +46,9 @@ def load_earth_geoid( Parameters ---------- - resolution : str - The grid resolution. The suffix ``d`` and ``m`` stand for - arc-degrees and arc-minutes. It can be ``"01d"``, ``"30m"``, - ``"20m"``, ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, - ``"03m"``, ``"02m"``, or ``"01m"``. + resolution + The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and + arc-minutes. region : str or list The subregion of the grid to load, in the form of a list diff --git a/pygmt/datasets/earth_magnetic_anomaly.py b/pygmt/datasets/earth_magnetic_anomaly.py index b614edd1c81..869d272736a 100644 --- a/pygmt/datasets/earth_magnetic_anomaly.py +++ b/pygmt/datasets/earth_magnetic_anomaly.py @@ -16,7 +16,9 @@ @kwargs_to_strings(region="sequence") def load_earth_magnetic_anomaly( - resolution="01d", + resolution: Literal[ + "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m" + ] = "01d", region=None, registration: Literal["gridline", "pixel", None] = None, data_source: Literal["emag2", "emag2_4km", "wdmam"] = "emag2", @@ -64,11 +66,9 @@ def load_earth_magnetic_anomaly( Parameters ---------- - resolution : str - The grid resolution. The suffix ``d`` and ``m`` stand for - arc-degrees and arc-minutes. It can be ``"01d"``, ``"30m"``, - ``"20m"``, ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, - ``"03m"``, or ``"02m"``. The ``"02m"`` resolution is not available for + resolution + The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and + arc-minutes. The resolution ``"02m"`` is not available for ``data_source="wdmam"``. region : str or list diff --git a/pygmt/datasets/earth_mask.py b/pygmt/datasets/earth_mask.py index 53ef5ecb29d..b2c9ef40b5c 100644 --- a/pygmt/datasets/earth_mask.py +++ b/pygmt/datasets/earth_mask.py @@ -15,7 +15,21 @@ @kwargs_to_strings(region="sequence") def load_earth_mask( - resolution="01d", + resolution: Literal[ + "01d", + "30m", + "20m", + "15m", + "10m", + "06m", + "05m", + "04m", + "03m", + "02m", + "01m", + "30s", + "15s", + ] = "01d", region=None, registration: Literal["gridline", "pixel"] = "gridline", ): @@ -44,11 +58,9 @@ def load_earth_mask( Parameters ---------- - resolution : str - The grid resolution. The suffix ``d``, ``m``, and ``s`` stand for - arc-degrees, arc-minutes, and arc-seconds. It can be ``"01d"``, - ``"30m"``, ``"20m"``, ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, - ``"04m"``, ``"03m"``, ``"02m"``, ``"01m"``, ``"30s"``, or ``"15s"``. + resolution + The grid resolution. The suffix ``d``, ``m``, and ``s`` stand for arc-degrees, + arc-minutes, and arc-seconds. region : str or list The subregion of the grid to load, in the form of a list diff --git a/pygmt/datasets/earth_relief.py b/pygmt/datasets/earth_relief.py index 38461137e7c..969e3d89b60 100644 --- a/pygmt/datasets/earth_relief.py +++ b/pygmt/datasets/earth_relief.py @@ -16,7 +16,23 @@ @kwargs_to_strings(region="sequence") def load_earth_relief( - resolution="01d", + resolution: Literal[ + "01d", + "30m", + "20m", + "15m", + "10m", + "06m", + "05m", + "04m", + "03m", + "02m", + "01m", + "30s", + "15s", + "03s", + "01s", + ] = "01d", region=None, registration: Literal["gridline", "pixel", None] = None, data_source: Literal["igpp", "gebco", "gebcosi", "synbath"] = "igpp", @@ -58,12 +74,9 @@ def load_earth_relief( Parameters ---------- - resolution : str - The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for - arc-degrees, arc-minutes, and arc-seconds. It can be ``"01d"``, - ``"30m"``, ``"20m"``, ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, - ``"04m"``, ``"03m"``, ``"02m"``, ``"01m"``, ``"30s"``, ``"15s"``, - ``"03s"``, or ``"01s"``. + resolution + The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, + arc-minutes, and arc-seconds. region : str or list The subregion of the grid to load, in the form of a list diff --git a/pygmt/datasets/earth_vertical_gravity_gradient.py b/pygmt/datasets/earth_vertical_gravity_gradient.py index 502f3eec913..86ef5be5550 100644 --- a/pygmt/datasets/earth_vertical_gravity_gradient.py +++ b/pygmt/datasets/earth_vertical_gravity_gradient.py @@ -15,7 +15,9 @@ @kwargs_to_strings(region="sequence") def load_earth_vertical_gravity_gradient( - resolution="01d", + resolution: Literal[ + "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" + ] = "01d", region=None, registration: Literal["gridline", "pixel", None] = None, ): @@ -51,11 +53,9 @@ def load_earth_vertical_gravity_gradient( Parameters ---------- - resolution : str - The grid resolution. The suffix ``d`` and ``m`` stand for - arc-degrees and arc-minutes. It can be ``"01d"``, ``"30m"``, - ``"20m"``, ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, - ``"03m"``, ``"02m"``, or ``"01m"``. + resolution + The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and + arc-minutes. region : str or list The subregion of the grid to load, in the form of a list diff --git a/pygmt/datasets/mars_relief.py b/pygmt/datasets/mars_relief.py index 750d8bdbaa5..b7ffb8971cb 100644 --- a/pygmt/datasets/mars_relief.py +++ b/pygmt/datasets/mars_relief.py @@ -15,7 +15,22 @@ @kwargs_to_strings(region="sequence") def load_mars_relief( - resolution="01d", + resolution: Literal[ + "01d", + "30m", + "20m", + "15m", + "10m", + "06m", + "05m", + "04m", + "03m", + "02m", + "01m", + "30s", + "15s", + "12s", + ] = "01d", region=None, registration: Literal["gridline", "pixel", None] = None, ): @@ -50,11 +65,9 @@ def load_mars_relief( Parameters ---------- - resolution : str + resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, - arc-minutes and arc-seconds. It can be ``"01d"``, ``"30m"``, ``"20m"``, - ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, ``"03m"``, ``"02m"``, - ``"01m"``, ``"30s"``, ``"15s"``, and ``"12s"``. + arc-minutes and arc-seconds. region : str or list The subregion of the grid to load, in the form of a list [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. Required for diff --git a/pygmt/datasets/mercury_relief.py b/pygmt/datasets/mercury_relief.py index ee753f8a7a0..4764408c7ef 100644 --- a/pygmt/datasets/mercury_relief.py +++ b/pygmt/datasets/mercury_relief.py @@ -15,7 +15,20 @@ @kwargs_to_strings(region="sequence") def load_mercury_relief( - resolution="01d", + resolution: Literal[ + "01d", + "30m", + "20m", + "15m", + "10m", + "06m", + "05m", + "04m", + "03m", + "02m", + "01m", + "56s", + ] = "01d", region=None, registration: Literal["gridline", "pixel", None] = None, ): @@ -50,11 +63,9 @@ def load_mercury_relief( Parameters ---------- - resolution : str + resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, - arc-minutes and arc-seconds. It can be ``"01d"``, ``"30m"``, ``"20m"``, - ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, ``"03m"``, ``"02m"``, - ``"01m"``, and ``"56s"``. + arc-minutes and arc-seconds. region : str or list The subregion of the grid to load, in the form of a list [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. Required for diff --git a/pygmt/datasets/moon_relief.py b/pygmt/datasets/moon_relief.py index 522632476bb..22809f6a281 100644 --- a/pygmt/datasets/moon_relief.py +++ b/pygmt/datasets/moon_relief.py @@ -15,7 +15,22 @@ @kwargs_to_strings(region="sequence") def load_moon_relief( - resolution="01d", + resolution: Literal[ + "01d", + "30m", + "20m", + "15m", + "10m", + "06m", + "05m", + "04m", + "03m", + "02m", + "01m", + "30s", + "15s", + "14s", + ] = "01d", region=None, registration: Literal["gridline", "pixel", None] = None, ): @@ -50,11 +65,9 @@ def load_moon_relief( Parameters ---------- - resolution : str + resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, - arc-minutes and arc-seconds. It can be ``"01d"``, ``"30m"``, ``"20m"``, - ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, ``"03m"``, ``"02m"``, - ``"01m"``, ``"30s"``, ``"15s"``, and ``"14s"``. + arc-minutes and arc-seconds. region : str or list The subregion of the grid to load, in the form of a list [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. Required for diff --git a/pygmt/datasets/pluto_relief.py b/pygmt/datasets/pluto_relief.py index b3cc7a428c9..1b25ac910ea 100644 --- a/pygmt/datasets/pluto_relief.py +++ b/pygmt/datasets/pluto_relief.py @@ -15,7 +15,20 @@ @kwargs_to_strings(region="sequence") def load_pluto_relief( - resolution="01d", + resolution: Literal[ + "01d", + "30m", + "20m", + "15m", + "10m", + "06m", + "05m", + "04m", + "03m", + "02m", + "01m", + "52s", + ] = "01d", region=None, registration: Literal["gridline", "pixel", None] = None, ): @@ -50,11 +63,9 @@ def load_pluto_relief( Parameters ---------- - resolution : str + resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, - arc-minutes and arc-seconds. It can be ``"01d"``, ``"30m"``, ``"20m"``, - ``"15m"``, ``"10m"``, ``"06m"``, ``"05m"``, ``"04m"``, ``"03m"``, ``"02m"``, - ``"01m"``, and ``"52s"``. + arc-minutes and arc-seconds. region : str or list The subregion of the grid to load, in the form of a list [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. Required for diff --git a/pygmt/datasets/venus_relief.py b/pygmt/datasets/venus_relief.py index 172d9e2a588..e86eed40d95 100644 --- a/pygmt/datasets/venus_relief.py +++ b/pygmt/datasets/venus_relief.py @@ -15,7 +15,9 @@ @kwargs_to_strings(region="sequence") def load_venus_relief( - resolution="01d", + resolution: Literal[ + "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" + ] = "01d", region=None, registration: Literal["gridline", "pixel"] = "gridline", ): @@ -50,10 +52,9 @@ def load_venus_relief( Parameters ---------- - resolution : str + resolution The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and - arc-minutes. It can be ``"01d"``, ``"30m"``, ``"20m"``, ``"15m"``, ``"10m"``, - ``"06m"``, ``"05m"``, ``"04m"``, ``"03m"``, ``"02m"``, and ``"01m"``. + arc-minutes. region : str or list The subregion of the grid to load, in the form of a list [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. From 466c8b6b45d7d6aa202077d91764bd2ab8ed7324 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 27 May 2024 12:38:34 +0800 Subject: [PATCH 148/218] CI: Add the dvc '--no-run-cache' option to avoid RunCacheNotSupported error (#3273) --- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index df8f9df397b..7ebd6195886 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -149,7 +149,7 @@ jobs: # Pull baseline image data from dvc remote (DAGsHub) - name: Pull baseline image data from dvc remote - run: dvc pull --verbose && ls -lhR pygmt/tests/baseline/ + run: dvc pull --no-run-cache --verbose && ls -lhR pygmt/tests/baseline/ # Install the package that we want to test - name: Install the package diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 38751afb768..35651d96cfe 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -154,7 +154,7 @@ jobs: # Pull baseline image data from dvc remote (DAGsHub) - name: Pull baseline image data from dvc remote - run: dvc pull --verbose && ls -lhR pygmt/tests/baseline/ + run: dvc pull --no-run-cache --verbose && ls -lhR pygmt/tests/baseline/ # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub From 14082d8a8ffbcf1f793279969d8f0449ad41aeea Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 28 May 2024 07:50:06 +0800 Subject: [PATCH 149/218] pygmt.dataset.load_*: Add type hints for the 'region' parameter (#3272) --- pygmt/datasets/earth_age.py | 16 +++++--------- pygmt/datasets/earth_free_air_anomaly.py | 16 +++++--------- pygmt/datasets/earth_geoid.py | 16 +++++--------- pygmt/datasets/earth_magnetic_anomaly.py | 17 +++++--------- pygmt/datasets/earth_mask.py | 13 +++++------ pygmt/datasets/earth_relief.py | 22 +++++++------------ .../earth_vertical_gravity_gradient.py | 16 +++++--------- pygmt/datasets/mars_relief.py | 13 +++++------ pygmt/datasets/mercury_relief.py | 13 +++++------ pygmt/datasets/moon_relief.py | 13 +++++------ pygmt/datasets/pluto_relief.py | 13 +++++------ pygmt/datasets/venus_relief.py | 13 +++++------ 12 files changed, 73 insertions(+), 108 deletions(-) diff --git a/pygmt/datasets/earth_age.py b/pygmt/datasets/earth_age.py index ef45eac1bec..1ee66513622 100644 --- a/pygmt/datasets/earth_age.py +++ b/pygmt/datasets/earth_age.py @@ -5,20 +5,19 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_earth_age"] -@kwargs_to_strings(region="sequence") def load_earth_age( resolution: Literal[ "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel"] = "gridline", ): r""" @@ -56,13 +55,10 @@ def load_earth_age( resolution The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and arc-minutes. - - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. - Required for grids with resolutions higher than 5 - arc-minutes (i.e., ``"05m"``). - + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. diff --git a/pygmt/datasets/earth_free_air_anomaly.py b/pygmt/datasets/earth_free_air_anomaly.py index 56ac3fbe324..0630c7f8a38 100644 --- a/pygmt/datasets/earth_free_air_anomaly.py +++ b/pygmt/datasets/earth_free_air_anomaly.py @@ -5,20 +5,19 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_earth_free_air_anomaly"] -@kwargs_to_strings(region="sequence") def load_earth_free_air_anomaly( resolution: Literal[ "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, ): r""" @@ -56,13 +55,10 @@ def load_earth_free_air_anomaly( resolution The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and arc-minutes. - - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. - Required for grids with resolutions higher than 5 - arc-minutes (i.e., ``"05m"``). - + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. Default is ``None``, means diff --git a/pygmt/datasets/earth_geoid.py b/pygmt/datasets/earth_geoid.py index acae0c527b3..f2a747b8602 100644 --- a/pygmt/datasets/earth_geoid.py +++ b/pygmt/datasets/earth_geoid.py @@ -5,20 +5,19 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_earth_geoid"] -@kwargs_to_strings(region="sequence") def load_earth_geoid( resolution: Literal[ "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel"] = "gridline", ): r""" @@ -49,13 +48,10 @@ def load_earth_geoid( resolution The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and arc-minutes. - - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. - Required for grids with resolutions higher than 5 - arc-minutes (i.e., ``"05m"``). - + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. diff --git a/pygmt/datasets/earth_magnetic_anomaly.py b/pygmt/datasets/earth_magnetic_anomaly.py index 869d272736a..b49bb29e2c8 100644 --- a/pygmt/datasets/earth_magnetic_anomaly.py +++ b/pygmt/datasets/earth_magnetic_anomaly.py @@ -5,21 +5,20 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_earth_magnetic_anomaly"] -@kwargs_to_strings(region="sequence") def load_earth_magnetic_anomaly( resolution: Literal[ "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m" ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, data_source: Literal["emag2", "emag2_4km", "wdmam"] = "emag2", ): @@ -70,20 +69,16 @@ def load_earth_magnetic_anomaly( The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and arc-minutes. The resolution ``"02m"`` is not available for ``data_source="wdmam"``. - - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. - Required for grids with resolutions higher than 5 - arc-minutes (i.e., ``"05m"``). - + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. Default is ``None``, means ``"gridline"`` for all resolutions except ``"02m"`` for ``data_source="emag2"`` or ``data_source="emag2_4km"``, which are ``"pixel"`` only. - data_source Select the source of the magnetic anomaly data. Available options are: diff --git a/pygmt/datasets/earth_mask.py b/pygmt/datasets/earth_mask.py index b2c9ef40b5c..22400a7369b 100644 --- a/pygmt/datasets/earth_mask.py +++ b/pygmt/datasets/earth_mask.py @@ -5,15 +5,14 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_earth_mask"] -@kwargs_to_strings(region="sequence") def load_earth_mask( resolution: Literal[ "01d", @@ -30,7 +29,7 @@ def load_earth_mask( "30s", "15s", ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel"] = "gridline", ): r""" @@ -61,11 +60,9 @@ def load_earth_mask( resolution The grid resolution. The suffix ``d``, ``m``, and ``s`` stand for arc-degrees, arc-minutes, and arc-seconds. - - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. - + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. diff --git a/pygmt/datasets/earth_relief.py b/pygmt/datasets/earth_relief.py index 969e3d89b60..823aa9ce43a 100644 --- a/pygmt/datasets/earth_relief.py +++ b/pygmt/datasets/earth_relief.py @@ -5,16 +5,15 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset from pygmt.exceptions import GMTInvalidInput -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_earth_relief"] -@kwargs_to_strings(region="sequence") def load_earth_relief( resolution: Literal[ "01d", @@ -33,10 +32,10 @@ def load_earth_relief( "03s", "01s", ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, data_source: Literal["igpp", "gebco", "gebcosi", "synbath"] = "igpp", - use_srtm=False, + use_srtm: bool = False, ): r""" Load the Earth relief datasets (topography and bathymetry) in various resolutions. @@ -77,19 +76,15 @@ def load_earth_relief( resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, arc-minutes, and arc-seconds. - - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. - Required for Earth relief grids with resolutions higher than 5 - arc-minutes (i.e., ``"05m"``). - + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. Default is ``None``, means ``"gridline"`` for all resolutions except ``"15s"`` which is ``"pixel"`` only. - data_source Select the source for the Earth relief data. Available options are: @@ -102,8 +97,7 @@ def load_earth_relief( inferred relief via altimetric gravity. See :gmt-datasets:`earth-gebco.html`. - ``"gebcosi"``: GEBCO Earth Relief that gives sub-ice (si) elevations. - - use_srtm : bool + use_srtm By default, the land-only SRTM tiles from NASA are used to generate the ``"03s"`` and ``"01s"`` grids, and the missing ocean values are filled by up-sampling the SRTM15 tiles which have a resolution of 15 diff --git a/pygmt/datasets/earth_vertical_gravity_gradient.py b/pygmt/datasets/earth_vertical_gravity_gradient.py index 86ef5be5550..71a6a649340 100644 --- a/pygmt/datasets/earth_vertical_gravity_gradient.py +++ b/pygmt/datasets/earth_vertical_gravity_gradient.py @@ -5,20 +5,19 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_earth_vertical_gravity_gradient"] -@kwargs_to_strings(region="sequence") def load_earth_vertical_gravity_gradient( resolution: Literal[ "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, ): r""" @@ -56,13 +55,10 @@ def load_earth_vertical_gravity_gradient( resolution The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and arc-minutes. - - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. - Required for grids with resolutions higher than 5 - arc-minutes (i.e., ``"05m"``). - + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. Default is ``None``, means diff --git a/pygmt/datasets/mars_relief.py b/pygmt/datasets/mars_relief.py index b7ffb8971cb..49b317db308 100644 --- a/pygmt/datasets/mars_relief.py +++ b/pygmt/datasets/mars_relief.py @@ -5,15 +5,14 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_mars_relief"] -@kwargs_to_strings(region="sequence") def load_mars_relief( resolution: Literal[ "01d", @@ -31,7 +30,7 @@ def load_mars_relief( "15s", "12s", ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, ): r""" @@ -68,10 +67,10 @@ def load_mars_relief( resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, arc-minutes and arc-seconds. - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. Required for - grids with resolutions higher than 5 arc-minutes (i.e., ``"05m"``). + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. Default is ``None``, means diff --git a/pygmt/datasets/mercury_relief.py b/pygmt/datasets/mercury_relief.py index 4764408c7ef..05b6024cb47 100644 --- a/pygmt/datasets/mercury_relief.py +++ b/pygmt/datasets/mercury_relief.py @@ -5,15 +5,14 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_mercury_relief"] -@kwargs_to_strings(region="sequence") def load_mercury_relief( resolution: Literal[ "01d", @@ -29,7 +28,7 @@ def load_mercury_relief( "01m", "56s", ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, ): r""" @@ -66,10 +65,10 @@ def load_mercury_relief( resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, arc-minutes and arc-seconds. - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. Required for - grids with resolutions higher than 5 arc-minutes (i.e., ``"05m"``). + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. Default is ``None``, means diff --git a/pygmt/datasets/moon_relief.py b/pygmt/datasets/moon_relief.py index 22809f6a281..6c8a68e599e 100644 --- a/pygmt/datasets/moon_relief.py +++ b/pygmt/datasets/moon_relief.py @@ -5,15 +5,14 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_moon_relief"] -@kwargs_to_strings(region="sequence") def load_moon_relief( resolution: Literal[ "01d", @@ -31,7 +30,7 @@ def load_moon_relief( "15s", "14s", ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, ): r""" @@ -68,10 +67,10 @@ def load_moon_relief( resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, arc-minutes and arc-seconds. - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. Required for - grids with resolutions higher than 5 arc-minutes (i.e., ``"05m"``). + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. Default is ``None``, means diff --git a/pygmt/datasets/pluto_relief.py b/pygmt/datasets/pluto_relief.py index 1b25ac910ea..feb04ee83d3 100644 --- a/pygmt/datasets/pluto_relief.py +++ b/pygmt/datasets/pluto_relief.py @@ -5,15 +5,14 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_pluto_relief"] -@kwargs_to_strings(region="sequence") def load_pluto_relief( resolution: Literal[ "01d", @@ -29,7 +28,7 @@ def load_pluto_relief( "01m", "52s", ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, ): r""" @@ -66,10 +65,10 @@ def load_pluto_relief( resolution The grid resolution. The suffix ``d``, ``m`` and ``s`` stand for arc-degrees, arc-minutes and arc-seconds. - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. Required for - grids with resolutions higher than 5 arc-minutes (i.e., ``"05m"``). + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. Default is ``None``, means diff --git a/pygmt/datasets/venus_relief.py b/pygmt/datasets/venus_relief.py index e86eed40d95..c83c49843ba 100644 --- a/pygmt/datasets/venus_relief.py +++ b/pygmt/datasets/venus_relief.py @@ -5,20 +5,19 @@ The grids are available in various resolutions. """ +from collections.abc import Sequence from typing import Literal from pygmt.datasets.load_remote_dataset import _load_remote_dataset -from pygmt.helpers import kwargs_to_strings __doctest_skip__ = ["load_venus_relief"] -@kwargs_to_strings(region="sequence") def load_venus_relief( resolution: Literal[ "01d", "30m", "20m", "15m", "10m", "06m", "05m", "04m", "03m", "02m", "01m" ] = "01d", - region=None, + region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel"] = "gridline", ): r""" @@ -55,10 +54,10 @@ def load_venus_relief( resolution The grid resolution. The suffix ``d`` and ``m`` stand for arc-degrees and arc-minutes. - region : str or list - The subregion of the grid to load, in the form of a list - [*xmin*, *xmax*, *ymin*, *ymax*] or a string *xmin/xmax/ymin/ymax*. - Required for grids with resolutions higher than 5 arc-minutes (i.e., ``"05m"``). + region + The subregion of the grid to load, in the form of a sequence [*xmin*, *xmax*, + *ymin*, *ymax*] or an ISO country code. Required for grids with resolutions + higher than 5 arc-minutes (i.e., ``"05m"``). registration Grid registration type. Either ``"pixel"`` for pixel registration or ``"gridline"`` for gridline registration. From 09e23522f99f80b724a572850198a01aa0d986e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 May 2024 07:45:55 +0800 Subject: [PATCH 150/218] Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 (#3274) Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 1.8.1 to 1.9.0. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/v1.8.1...v1.9.0) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- .github/workflows/cache_data.yaml | 2 +- .github/workflows/ci_docs.yml | 2 +- .github/workflows/ci_doctests.yaml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 2 +- .github/workflows/ci_tests_legacy.yaml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 06a2fa8b731..3855fec7b55 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -45,7 +45,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.1 + uses: mamba-org/setup-micromamba@v1.9.0 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/cache_data.yaml b/.github/workflows/cache_data.yaml index b90f9478586..8e6c75c09fe 100644 --- a/.github/workflows/cache_data.yaml +++ b/.github/workflows/cache_data.yaml @@ -43,7 +43,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.1 + uses: mamba-org/setup-micromamba@v1.9.0 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 32e8b66ba4f..90257a814ec 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -80,7 +80,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.1 + uses: mamba-org/setup-micromamba@v1.9.0 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index 2187e06cade..a752bf853a3 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -42,7 +42,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.1 + uses: mamba-org/setup-micromamba@v1.9.0 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 7ebd6195886..5ce46dd2e93 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -105,7 +105,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.1 + uses: mamba-org/setup-micromamba@v1.9.0 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 35651d96cfe..4c2e9864f93 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -57,7 +57,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.1 + uses: mamba-org/setup-micromamba@v1.9.0 with: environment-name: pygmt condarc: | diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index b52cf2dd4da..fdcaf227216 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -51,7 +51,7 @@ jobs: # Install Micromamba with conda-forge dependencies - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1.8.1 + uses: mamba-org/setup-micromamba@v1.9.0 with: environment-name: pygmt condarc: | From a42be05fceac7b7a8ea6ccbdd68d11768952f2cd Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Wed, 29 May 2024 17:03:15 +1200 Subject: [PATCH 151/218] Move pygmt.show_versions function to _show_versions.py (#3277) --- pygmt/__init__.py | 129 +------------------------------------ pygmt/_show_versions.py | 139 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 140 insertions(+), 128 deletions(-) create mode 100644 pygmt/_show_versions.py diff --git a/pygmt/__init__.py b/pygmt/__init__.py index efc0ea9baa5..dbf292e4f1f 100644 --- a/pygmt/__init__.py +++ b/pygmt/__init__.py @@ -19,15 +19,10 @@ """ import atexit as _atexit -import sys -from importlib.metadata import version - -# Get semantic version through setuptools-scm -__version__ = f'v{version("pygmt")}' # e.g. v0.1.2.dev3+g0ab3cd78 -__commit__ = __version__.split("+g")[-1] if "+g" in __version__ else "" # 0ab3cd78 # Import modules to make the high-level GMT Python API from pygmt import datasets +from pygmt._show_versions import __commit__, __version__, show_versions from pygmt.accessors import GMTDataArrayAccessor from pygmt.figure import Figure, set_display from pygmt.io import load_dataarray @@ -75,125 +70,3 @@ _begin() # Tell Python to run _end when shutting down _atexit.register(_end) - - -def show_versions(file=sys.stdout): - """ - Print various dependency versions which are useful when submitting bug reports. - - This includes information about: - - - PyGMT itself - - System information (Python version, Operating System) - - Core dependency versions (NumPy, Pandas, Xarray, etc) - - GMT library information - - It also warns users if the installed Ghostscript version has serious bugs or is - incompatible with the installed GMT version. - """ - - import importlib - import platform - import shutil - import subprocess - - from packaging.requirements import Requirement - from packaging.version import Version - - def _get_clib_info() -> dict: - """ - Return information about the GMT shared library. - """ - from pygmt.clib import Session - - with Session() as ses: - return ses.info - - def _get_module_version(modname: str) -> str | None: - """ - Get version information of a Python module. - """ - try: - if modname in sys.modules: - module = sys.modules[modname] - else: - module = importlib.import_module(modname) - - try: - return module.__version__ - except AttributeError: - return module.version - except ImportError: - return None - - def _get_ghostscript_version() -> str | None: - """ - Get Ghostscript version. - """ - match sys.platform: - case "linux" | "darwin": - cmds = ["gs"] - case os_name if os_name.startswith("freebsd"): - cmds = ["gs"] - case "win32": - cmds = ["gswin64c.exe", "gswin32c.exe"] - case _: - return None - - for gs_cmd in cmds: - if (gsfullpath := shutil.which(gs_cmd)) is not None: - return subprocess.check_output( - [gsfullpath, "--version"], universal_newlines=True - ).strip() - return None - - def _check_ghostscript_version(gs_version: str) -> str | None: - """ - Check if the Ghostscript version is compatible with GMT versions. - """ - match Version(gs_version): - case v if v < Version("9.53"): - return ( - f"Ghostscript v{gs_version} is too old and may have serious bugs. " - "Please consider upgrading your Ghostscript." - ) - case v if Version("10.00") <= v < Version("10.02"): - return ( - f"Ghostscript v{gs_version} has known bugs. " - "Please consider upgrading to version v10.02 or later." - ) - case v if v >= Version("10.02"): - from pygmt.clib import __gmt_version__ - - if Version(__gmt_version__) < Version("6.5.0"): - return ( - f"GMT v{__gmt_version__} doesn't support Ghostscript " - "v{gs_version}. Please consider upgrading to GMT>=6.5.0 or " - "downgrading to Ghostscript v9.56." - ) - return None - - sys_info = { - "python": sys.version.replace("\n", " "), - "executable": sys.executable, - "machine": platform.platform(), - } - deps = [Requirement(v).name for v in importlib.metadata.requires("pygmt")] - gs_version = _get_ghostscript_version() - - lines = [] - lines.append("PyGMT information:") - lines.append(f" version: {__version__}") - lines.append("System information:") - lines.extend([f" {key}: {val}" for key, val in sys_info.items()]) - lines.append("Dependency information:") - lines.extend([f" {modname}: {_get_module_version(modname)}" for modname in deps]) - lines.append(f" ghostscript: {gs_version}") - lines.append("GMT library information:") - lines.extend([f" {key}: {val}" for key, val in _get_clib_info().items()]) - - if warnmsg := _check_ghostscript_version(gs_version): - lines.append("WARNING:") - lines.append(f" {warnmsg}") - - print("\n".join(lines), file=file) diff --git a/pygmt/_show_versions.py b/pygmt/_show_versions.py new file mode 100644 index 00000000000..8a3c9acf2c1 --- /dev/null +++ b/pygmt/_show_versions.py @@ -0,0 +1,139 @@ +""" +Utility methods to print system info for debugging. + +Adapted from :func:`rioxarray.show_versions` and :func:`pandas.show_versions`. +""" + +import importlib +import platform +import shutil +import sys +from importlib.metadata import version + +# Get semantic version through setuptools-scm +__version__ = f'v{version("pygmt")}' # e.g. v0.1.2.dev3+g0ab3cd78 +__commit__ = __version__.split("+g")[-1] if "+g" in __version__ else "" # 0ab3cd78 + + +def _get_clib_info() -> dict: + """ + Return information about the GMT shared library. + """ + from pygmt.clib import Session + + with Session() as ses: + return ses.info + + +def _get_module_version(modname: str) -> str | None: + """ + Get version information of a Python module. + """ + try: + if modname in sys.modules: + module = sys.modules[modname] + else: + module = importlib.import_module(modname) + + try: + return module.__version__ + except AttributeError: + return module.version + except ImportError: + return None + + +def _get_ghostscript_version() -> str | None: + """ + Get Ghostscript version. + """ + import subprocess + + match sys.platform: + case "linux" | "darwin": + cmds = ["gs"] + case os_name if os_name.startswith("freebsd"): + cmds = ["gs"] + case "win32": + cmds = ["gswin64c.exe", "gswin32c.exe"] + case _: + return None + + for gs_cmd in cmds: + if (gsfullpath := shutil.which(gs_cmd)) is not None: + return subprocess.check_output( + [gsfullpath, "--version"], universal_newlines=True + ).strip() + return None + + +def _check_ghostscript_version(gs_version: str) -> str | None: + """ + Check if the Ghostscript version is compatible with GMT versions. + """ + from packaging.version import Version + + match Version(gs_version): + case v if v < Version("9.53"): + return ( + f"Ghostscript v{gs_version} is too old and may have serious bugs. " + "Please consider upgrading your Ghostscript." + ) + case v if Version("10.00") <= v < Version("10.02"): + return ( + f"Ghostscript v{gs_version} has known bugs. " + "Please consider upgrading to version v10.02 or later." + ) + case v if v >= Version("10.02"): + from pygmt.clib import __gmt_version__ + + if Version(__gmt_version__) < Version("6.5.0"): + return ( + f"GMT v{__gmt_version__} doesn't support Ghostscript " + "v{gs_version}. Please consider upgrading to GMT>=6.5.0 or " + "downgrading to Ghostscript v9.56." + ) + return None + + +def show_versions(file=sys.stdout): + """ + Print various dependency versions which are useful when submitting bug reports. + + This includes information about: + + - PyGMT itself + - System information (Python version, Operating System) + - Core dependency versions (NumPy, Pandas, Xarray, etc) + - GMT library information + + It also warns users if the installed Ghostscript version has serious bugs or is + incompatible with the installed GMT version. + """ + + from packaging.requirements import Requirement + + sys_info = { + "python": sys.version.replace("\n", " "), + "executable": sys.executable, + "machine": platform.platform(), + } + deps = [Requirement(v).name for v in importlib.metadata.requires("pygmt")] + gs_version = _get_ghostscript_version() + + lines = [] + lines.append("PyGMT information:") + lines.append(f" version: {__version__}") + lines.append("System information:") + lines.extend([f" {key}: {val}" for key, val in sys_info.items()]) + lines.append("Dependency information:") + lines.extend([f" {modname}: {_get_module_version(modname)}" for modname in deps]) + lines.append(f" ghostscript: {gs_version}") + lines.append("GMT library information:") + lines.extend([f" {key}: {val}" for key, val in _get_clib_info().items()]) + + if warnmsg := _check_ghostscript_version(gs_version): + lines.append("WARNING:") + lines.append(f" {warnmsg}") + + print("\n".join(lines), file=file) From d56503fa51bb74ef72a7548d2dcfbdcfb01300a8 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 31 May 2024 14:38:20 +0800 Subject: [PATCH 152/218] CI: Fix the Check Links workflow (#3281) --- .github/workflows/check-links.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 3f95532e415..70727b02a0c 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -69,6 +69,7 @@ jobs: - name: Create Issue From File if: env.lychee_exit_code != 0 run: | + cd repository/ title="Link Checker Report on ${{ steps.date.outputs.date }}" gh issue create --title "$title" --body-file ./lychee/out.md env: From 88eddc73f42e74c102b71c6095f58120f1dfdc92 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jun 2024 09:05:34 +0800 Subject: [PATCH 153/218] Build(deps): Bump actions/create-github-app-token from 1.10.0 to 1.10.1 (#3284) Bumps [actions/create-github-app-token](https://github.com/actions/create-github-app-token) from 1.10.0 to 1.10.1. - [Release notes](https://github.com/actions/create-github-app-token/releases) - [Commits](https://github.com/actions/create-github-app-token/compare/v1.10.0...v1.10.1) --- updated-dependencies: - dependency-name: actions/create-github-app-token dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/format-command.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 24a9da1d578..2c9553b80e2 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: # Generate token from GenericMappingTools bot - - uses: actions/create-github-app-token@v1.10.0 + - uses: actions/create-github-app-token@v1.10.1 id: generate-token with: app-id: ${{ secrets.APP_ID }} From 844594fe8083992afbf0262ae49113acfc254a8b Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 9 Jun 2024 22:03:50 +0800 Subject: [PATCH 154/218] pygmt.x2sys_cross: Refactor to use virtualfiles for output tables Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/src/x2sys_cross.py | 111 +++++++++++++++++--------------- pygmt/tests/test_x2sys_cross.py | 108 +++++++++++++++++++++++-------- 2 files changed, 139 insertions(+), 80 deletions(-) diff --git a/pygmt/src/x2sys_cross.py b/pygmt/src/x2sys_cross.py index eadd20dcfb2..af79cfee852 100644 --- a/pygmt/src/x2sys_cross.py +++ b/pygmt/src/x2sys_cross.py @@ -5,13 +5,12 @@ import contextlib import os from pathlib import Path +from typing import Any import pandas as pd -from packaging.version import Version from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( - GMTTempFile, build_arg_list, data_kind, fmt_docstring, @@ -71,7 +70,9 @@ def tempfile_from_dftrack(track, suffix): Z="trackvalues", ) @kwargs_to_strings(R="sequence") -def x2sys_cross(tracks=None, outfile=None, **kwargs): +def x2sys_cross( + tracks=None, outfile: str | None = None, **kwargs +) -> pd.DataFrame | None: r""" Calculate crossovers between track data files. @@ -103,10 +104,8 @@ def x2sys_cross(tracks=None, outfile=None, **kwargs): will also be looked for via $MGD77_HOME/mgd77_paths.txt and .gmt files will be searched for via $GMT_SHAREDIR/mgg/gmtfile_paths). - outfile : str - Optional. The file name for the output ASCII txt file to store the - table in. - + outfile + The file name for the output ASCII txt file to store the table in. tag : str Specify the x2sys TAG which identifies the attributes of this data type. @@ -183,68 +182,74 @@ def x2sys_cross(tracks=None, outfile=None, **kwargs): Returns ------- - crossover_errors : :class:`pandas.DataFrame` or None - Table containing crossover error information. - Return type depends on whether the ``outfile`` parameter is set: - - - :class:`pandas.DataFrame` with (x, y, ..., etc) if ``outfile`` is not - set - - None if ``outfile`` is set (track output will be stored in the set in - ``outfile``) + crossover_errors + Table containing crossover error information. A :class:`pandas.DataFrame` object + is returned if ``outfile`` is not set, otherwise ``None`` is returned and output + will be stored in file set by ``outfile``. """ - with Session() as lib: - file_contexts = [] - for track in tracks: - kind = data_kind(track) - if kind == "file": + # Determine output type based on 'outfile' parameter + output_type = "pandas" if outfile is None else "file" + + file_contexts: list[contextlib.AbstractContextManager[Any]] = [] + for track in tracks: + match data_kind(track): + case "file": file_contexts.append(contextlib.nullcontext(track)) - elif kind == "matrix": + case "matrix": # find suffix (-E) of trackfiles used (e.g. xyz, csv, etc) from # $X2SYS_HOME/TAGNAME/TAGNAME.tag file - lastline = ( - Path(os.environ["X2SYS_HOME"], kwargs["T"], f"{kwargs['T']}.tag") - .read_text(encoding="utf8") - .strip() - .split("\n")[-1] - ) # e.g. "-Dxyz -Etsv -I1/1" + tagfile = Path( + os.environ["X2SYS_HOME"], kwargs["T"], f"{kwargs['T']}.tag" + ) + # Last line is like "-Dxyz -Etsv -I1/1" + lastline = tagfile.read_text(encoding="utf8").splitlines()[-1] for item in sorted(lastline.split()): # sort list alphabetically if item.startswith(("-E", "-D")): # prefer -Etsv over -Dxyz suffix = item[2:] # e.g. tsv (1st choice) or xyz (2nd choice) # Save pandas.DataFrame track data to temporary file file_contexts.append(tempfile_from_dftrack(track=track, suffix=suffix)) - else: + case _: raise GMTInvalidInput(f"Unrecognized data type: {type(track)}") - with GMTTempFile(suffix=".txt") as tmpfile: + with Session() as lib: + with lib.virtualfile_out(kind="dataset", fname=outfile) as vouttbl: with contextlib.ExitStack() as stack: fnames = [stack.enter_context(c) for c in file_contexts] - if outfile is None: - outfile = tmpfile.name lib.call_module( module="x2sys_cross", - args=build_arg_list(kwargs, infile=fnames, outfile=outfile), - ) - - # Read temporary csv output to a pandas table - if outfile == tmpfile.name: # if outfile isn't set, return pd.DataFrame - # Read the tab-separated ASCII table - date_format_kwarg = ( - {"date_format": "ISO8601"} - if Version(pd.__version__) >= Version("2.0.0") - else {} + args=build_arg_list(kwargs, infile=fnames, outfile=vouttbl), ) - table = pd.read_csv( - tmpfile.name, - sep="\t", - header=2, # Column names are on 2nd row - comment=">", # Skip the 3rd row with a ">" - parse_dates=[2, 3], # Datetimes on 3rd and 4th column - **date_format_kwarg, # Parse dates in ISO8601 format on pandas>=2 + result = lib.virtualfile_to_dataset( + vfname=vouttbl, output_type=output_type, header=2 ) - # Remove the "# " from "# x" in the first column - table = table.rename(columns={table.columns[0]: table.columns[0][2:]}) - elif outfile != tmpfile.name: # if outfile is set, output in outfile only - table = None - return table + if output_type == "file": + return result + + # Convert 3rd and 4th columns to datetime/timedelta for pandas output. + # These two columns have names "t_1"/"t_2" or "i_1"/"i_2". + # "t_" means absolute datetimes and "i_" means dummy times. + # Internally, they are all represented as double-precision numbers in GMT, + # relative to TIME_EPOCH with the unit defined by TIME_UNIT. + # In GMT, TIME_UNIT can be 'y' (year), 'o' (month), 'w' (week), 'd' (day), + # 'h' (hour), 'm' (minute), 's' (second). Years are 365.2425 days and months + # are of equal length. + # pd.to_timedelta() supports unit of 'W'/'D'/'h'/'m'/'s'/'ms'/'us'/'ns'. + match time_unit := lib.get_default("TIME_UNIT"): + case "y": + unit = "s" + scale = 365.2425 * 86400.0 + case "o": + unit = "s" + scale = 365.2425 / 12.0 * 86400.0 + case "w" | "d" | "h" | "m" | "s": + unit = time_unit.upper() if time_unit in "wd" else time_unit + scale = 1.0 + + columns = result.columns[2:4] + result[columns] *= scale + result[columns] = result[columns].apply(pd.to_timedelta, unit=unit) + if columns[0][0] == "t": # "t" or "i": + result[columns] += pd.Timestamp(lib.get_default("TIME_EPOCH")) + return result diff --git a/pygmt/tests/test_x2sys_cross.py b/pygmt/tests/test_x2sys_cross.py index bae686efe27..09f424d1a42 100644 --- a/pygmt/tests/test_x2sys_cross.py +++ b/pygmt/tests/test_x2sys_cross.py @@ -12,7 +12,7 @@ import pandas as pd import pytest from packaging.version import Version -from pygmt import x2sys_cross, x2sys_init +from pygmt import config, x2sys_cross, x2sys_init from pygmt.clib import __gmt_version__ from pygmt.datasets import load_sample_data from pygmt.exceptions import GMTInvalidInput @@ -52,15 +52,20 @@ def test_x2sys_cross_input_file_output_file(): output = x2sys_cross( tracks=["@tut_ship.xyz"], tag=tag, coe="i", outfile=outfile ) - assert output is None # check that output is None since outfile is set assert outfile.stat().st_size > 0 # check that outfile exists at path - _ = pd.read_csv(outfile, sep="\t", header=2) # ensure ASCII text file loads ok + result = pd.read_csv(outfile, sep="\t", comment=">", header=2) + assert result.shape == (14374, 12) if sys.platform == "darwin" else (14338, 12) + columns = list(result.columns) + assert columns[:6] == ["# x", "y", "i_1", "i_2", "dist_1", "dist_2"] + assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"] + npt.assert_allclose(result["i_1"].min(), 45.2099, rtol=1.0e-4) + npt.assert_allclose(result["i_1"].max(), 82945.9370, rtol=1.0e-4) @pytest.mark.usefixtures("mock_x2sys_home") @pytest.mark.xfail( - condition=Version(__gmt_version__) < Version("6.5.0") or sys.platform == "darwin", + condition=Version(__gmt_version__) < Version("6.5.0"), reason="Upstream bug fixed in https://github.com/GenericMappingTools/gmt/pull/8188", ) def test_x2sys_cross_input_file_output_dataframe(): @@ -74,39 +79,70 @@ def test_x2sys_cross_input_file_output_dataframe(): output = x2sys_cross(tracks=["@tut_ship.xyz"], tag=tag, coe="i") assert isinstance(output, pd.DataFrame) - assert output.shape == (14338, 12) + assert output.shape == (14374, 12) if sys.platform == "darwin" else (14338, 12) columns = list(output.columns) assert columns[:6] == ["x", "y", "i_1", "i_2", "dist_1", "dist_2"] assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"] + assert output["i_1"].dtype.type == np.timedelta64 + assert output["i_2"].dtype.type == np.timedelta64 + npt.assert_allclose(output["i_1"].min().total_seconds(), 45.2099, rtol=1.0e-4) + npt.assert_allclose(output["i_1"].max().total_seconds(), 82945.937, rtol=1.0e-4) @pytest.mark.benchmark @pytest.mark.usefixtures("mock_x2sys_home") -def test_x2sys_cross_input_dataframe_output_dataframe(tracks): +@pytest.mark.parametrize("unit", ["s", "o", "y"]) +def test_x2sys_cross_input_dataframe_output_dataframe(tracks, unit): """ Run x2sys_cross by passing in one dataframe, and output internal crossovers to a - pandas.DataFrame. + pandas.DataFrame, checking TIME_UNIT s (second), o (month), and y (year). """ with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: tag = Path(tmpdir).name x2sys_init(tag=tag, fmtfile="xyz", force=True) - output = x2sys_cross(tracks=tracks, tag=tag, coe="i") + with config(TIME_UNIT=unit): + output = x2sys_cross(tracks=tracks, tag=tag, coe="i") assert isinstance(output, pd.DataFrame) assert output.shape == (14, 12) columns = list(output.columns) assert columns[:6] == ["x", "y", "i_1", "i_2", "dist_1", "dist_2"] assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"] - assert output.dtypes["i_1"].type == np.object_ - assert output.dtypes["i_2"].type == np.object_ + assert output["i_1"].dtype.type == np.timedelta64 + assert output["i_2"].dtype.type == np.timedelta64 + + # Scale to convert a value to second + match unit: + case "y": + scale = 365.2425 * 86400.0 + case "o": + scale = 365.2425 / 12.0 * 86400.0 + case _: + scale = 1.0 + npt.assert_allclose( + output["i_1"].min().total_seconds(), 0.9175 * scale, rtol=1.0e-4 + ) + npt.assert_allclose( + output["i_1"].max().total_seconds(), 23.9996 * scale, rtol=1.0e-4 + ) @pytest.mark.usefixtures("mock_x2sys_home") -def test_x2sys_cross_input_two_dataframes(): +@pytest.mark.parametrize( + ("unit", "epoch"), + [ + ("s", "1970-01-01T00:00:00"), + ("o", "1970-01-01T00:00:00"), + ("y", "1970-01-01T00:00:00"), + ("s", "2012-03-04T05:06:07"), + ], +) +def test_x2sys_cross_input_two_dataframes(unit, epoch): """ Run x2sys_cross by passing in two pandas.DataFrame tables with a time column, and - output external crossovers to a pandas.DataFrame. + output external crossovers to a pandas.DataFrame, checking TIME_UNIT s (second), + o (month), and y (year), and TIME_EPOCH 1970 and 2012. """ with TemporaryDirectory(prefix="X2SYS", dir=Path.cwd()) as tmpdir: tmpdir_p = Path(tmpdir) @@ -127,15 +163,22 @@ def test_x2sys_cross_input_two_dataframes(): track["time"] = pd.date_range(start=f"2020-{i}1-01", periods=10, freq="min") tracks.append(track) - output = x2sys_cross(tracks=tracks, tag=tag, coe="e") + with config(TIME_UNIT=unit, TIME_EPOCH=epoch): + output = x2sys_cross(tracks=tracks, tag=tag, coe="e") assert isinstance(output, pd.DataFrame) assert output.shape == (26, 12) columns = list(output.columns) assert columns[:6] == ["x", "y", "t_1", "t_2", "dist_1", "dist_2"] assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"] - assert output.dtypes["t_1"].type == np.datetime64 - assert output.dtypes["t_2"].type == np.datetime64 + assert output["t_1"].dtype.type == np.datetime64 + assert output["t_2"].dtype.type == np.datetime64 + + tolerance = pd.Timedelta("1ms") + t1_min = pd.Timestamp("2020-01-01 00:00:10.6677") + t1_max = pd.Timestamp("2020-01-01 00:08:29.8067") + assert abs(output["t_1"].min() - t1_min) < tolerance + assert abs(output["t_1"].max() - t1_max) < tolerance @pytest.mark.usefixtures("mock_x2sys_home") @@ -159,8 +202,8 @@ def test_x2sys_cross_input_dataframe_with_nan(tracks): columns = list(output.columns) assert columns[:6] == ["x", "y", "i_1", "i_2", "dist_1", "dist_2"] assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"] - assert output.dtypes["i_1"].type == np.object_ - assert output.dtypes["i_2"].type == np.object_ + assert output.dtypes["i_1"].type == np.timedelta64 + assert output.dtypes["i_2"].type == np.timedelta64 @pytest.mark.usefixtures("mock_x2sys_home") @@ -201,7 +244,7 @@ def test_x2sys_cross_invalid_tracks_input_type(tracks): @pytest.mark.usefixtures("mock_x2sys_home") @pytest.mark.xfail( - condition=Version(__gmt_version__) < Version("6.5.0") or sys.platform == "darwin", + condition=Version(__gmt_version__) < Version("6.5.0"), reason="Upstream bug fixed in https://github.com/GenericMappingTools/gmt/pull/8188", ) def test_x2sys_cross_region_interpolation_numpoints(): @@ -222,15 +265,21 @@ def test_x2sys_cross_region_interpolation_numpoints(): ) assert isinstance(output, pd.DataFrame) - assert output.shape == (3882, 12) - # Check crossover errors (z_X) and mean value of observables (z_M) - npt.assert_allclose(output.z_X.mean(), -138.66, rtol=1e-4) - npt.assert_allclose(output.z_M.mean(), -2896.875915) + if sys.platform == "darwin": + assert output.shape == (3894, 12) + # Check crossover errors (z_X) and mean value of observables (z_M) + npt.assert_allclose(output.z_X.mean(), -138.23215, rtol=1e-4) + npt.assert_allclose(output.z_M.mean(), -2897.187545, rtol=1e-4) + else: + assert output.shape == (3882, 12) + # Check crossover errors (z_X) and mean value of observables (z_M) + npt.assert_allclose(output.z_X.mean(), -138.66, rtol=1e-4) + npt.assert_allclose(output.z_M.mean(), -2896.875915, rtol=1e-4) @pytest.mark.usefixtures("mock_x2sys_home") @pytest.mark.xfail( - condition=Version(__gmt_version__) < Version("6.5.0") or sys.platform == "darwin", + condition=Version(__gmt_version__) < Version("6.5.0"), reason="Upstream bug fixed in https://github.com/GenericMappingTools/gmt/pull/8188", ) def test_x2sys_cross_trackvalues(): @@ -243,7 +292,12 @@ def test_x2sys_cross_trackvalues(): output = x2sys_cross(tracks=["@tut_ship.xyz"], tag=tag, trackvalues=True) assert isinstance(output, pd.DataFrame) - assert output.shape == (14338, 12) - # Check mean of track 1 values (z_1) and track 2 values (z_2) - npt.assert_allclose(output.z_1.mean(), -2422.418556, rtol=1e-4) - npt.assert_allclose(output.z_2.mean(), -2402.268364, rtol=1e-4) + if sys.platform == "darwin": + assert output.shape == (14374, 12) + # Check mean of track 1 values (z_1) and track 2 values (z_2) + npt.assert_allclose(output.z_1.mean(), -2422.973372, rtol=1e-4) + npt.assert_allclose(output.z_2.mean(), -2402.87476, rtol=1e-4) + else: + assert output.shape == (14338, 12) + npt.assert_allclose(output.z_1.mean(), -2422.418556, rtol=1e-4) + npt.assert_allclose(output.z_2.mean(), -2402.268364, rtol=1e-4) From b5242be7cd84f4a1b34288a950faf64c85282988 Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Tue, 11 Jun 2024 16:25:48 +1200 Subject: [PATCH 155/218] Add pre-commit config with pre-commit-hooks (#3283) Adding a .pre-commit-config.yaml file with some pre-commit hooks (check-added-large-files, check-yaml, end-of-file-fixer, trailing-whitespace, forbid-crlf, remove-crlf, chmod). * Add .pre-commit-config.yaml to MAINIFEST.in * Add forbid-crlf, remove-crlf and chmod hooks, remove from GitHub Actions * Add `pre-commit run --all-files` to `make format` * Add pre-commit to Style Checks and format-command workflows * Update note on style_checks.yaml to mention ruff+pre-commit is ran * Mention that pre-commit is used to enforce LF ending and 644 permissions --------- Co-authored-by: Dongdong Tian --- .github/workflows/format-command.yml | 5 +---- .github/workflows/style_checks.yaml | 13 ++++--------- .pre-commit-config.yaml | 17 +++++++++++++++++ CODE_OF_CONDUCT.md | 2 +- MANIFEST.in | 1 + Makefile | 1 + doc/_templates/autosummary/class.rst | 1 - doc/_templates/autosummary/exception.rst | 1 - doc/_templates/autosummary/function.rst | 1 - doc/changes.md | 4 ++-- doc/contributing.md | 4 ++-- doc/team.md | 1 - environment.yml | 1 + examples/projections/README.txt | 2 +- pygmt/tests/data/contours.txt | 2 +- 15 files changed, 32 insertions(+), 24 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 2c9553b80e2..625856aaee2 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -32,16 +32,13 @@ jobs: # Install formatting tools - name: Install formatting tools run: | - python -m pip install ruff + python -m pip install ruff pre-commit python -m pip list - sudo apt-get install dos2unix # Run "make format" and commit the change to the PR branch - name: Commit to the PR branch if any changes run: | make format - git ls-files -z | xargs -0 dos2unix --quiet - git ls-files -z | xargs -0 chmod 644 if [[ $(git ls-files -m) ]]; then git config --global user.name 'actions-bot' git config --global user.email '58130806+actions-bot@users.noreply.github.com' diff --git a/.github/workflows/style_checks.yaml b/.github/workflows/style_checks.yaml index 31914a4e9d4..b7d57b5b532 100644 --- a/.github/workflows/style_checks.yaml +++ b/.github/workflows/style_checks.yaml @@ -34,18 +34,13 @@ jobs: - name: Install packages run: | - python -m pip install ruff + python -m pip install ruff pre-commit python -m pip list - sudo apt-get install dos2unix - - name: Formatting check (ruff) - run: make check - - - name: Ensure files use UNIX line breaks and have 644 permission + - name: Formatting check (ruff + pre-commit) run: | - git ls-files -z | xargs -0 dos2unix --quiet - git ls-files -z | xargs -0 chmod 644 - if [[ $(git ls-files -m) ]]; then git --no-pager diff HEAD; exit 1; fi + make check + pre-commit run --all-files - name: Ensure example scripts have at least one code block separator run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..d758c6b5187 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: check-added-large-files + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace +- repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.5.5 + hooks: + - id: forbid-crlf + - id: remove-crlf + - id: chmod + args: ['644'] diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 7ff1c459c25..7246f53e333 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,2 +1,2 @@ All participants in the PyGMT community must abide by -the [Generic Mapping Tools organization Code of Conduct](https://github.com/GenericMappingTools/.github/blob/main/CODE_OF_CONDUCT.md). \ No newline at end of file +the [Generic Mapping Tools organization Code of Conduct](https://github.com/GenericMappingTools/.github/blob/main/CODE_OF_CONDUCT.md). diff --git a/MANIFEST.in b/MANIFEST.in index da8835b2c12..6b926e1bc26 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -6,6 +6,7 @@ prune examples* prune pygmt/tests* exclude .dvcignore exclude .gitignore +exclude .pre-commit-config.yaml exclude .readthedocs.yaml exclude AUTHORSHIP.md exclude CODE_OF_CONDUCT.md diff --git a/Makefile b/Makefile index 271e5d9083c..4494f2996c3 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,7 @@ test_no_images: _runtest format: ruff check --fix --exit-zero $(FORMAT_FILES) ruff format $(FORMAT_FILES) + pre-commit run --all-files check: ruff check $(FORMAT_FILES) diff --git a/doc/_templates/autosummary/class.rst b/doc/_templates/autosummary/class.rst index 5f1bd233edf..cda380f316a 100644 --- a/doc/_templates/autosummary/class.rst +++ b/doc/_templates/autosummary/class.rst @@ -30,4 +30,3 @@ .. raw:: html
- diff --git a/doc/_templates/autosummary/exception.rst b/doc/_templates/autosummary/exception.rst index 24f0ecbf66b..a3ee4ec8e25 100644 --- a/doc/_templates/autosummary/exception.rst +++ b/doc/_templates/autosummary/exception.rst @@ -8,4 +8,3 @@ .. raw:: html
- diff --git a/doc/_templates/autosummary/function.rst b/doc/_templates/autosummary/function.rst index 98c403ac6f3..ee551b4b1e4 100644 --- a/doc/_templates/autosummary/function.rst +++ b/doc/_templates/autosummary/function.rst @@ -10,4 +10,3 @@ .. raw:: html
- diff --git a/doc/changes.md b/doc/changes.md index e9a1a6ca17c..761ee93201f 100644 --- a/doc/changes.md +++ b/doc/changes.md @@ -7,9 +7,9 @@ ### Highlights * 🎉 **Twelfth minor release of PyGMT** 🎉 -* 🚀 Almost all module wrappers (with a few exceptions) now use in-memory GMT *virtual files* instead of intermediate temporary files to improve performance ([#2730](https://github.com/GenericMappingTools/pygmt/issues/2730)) +* 🚀 Almost all module wrappers (with a few exceptions) now use in-memory GMT *virtual files* instead of intermediate temporary files to improve performance ([#2730](https://github.com/GenericMappingTools/pygmt/issues/2730)) * Almost all module wrappers (with a few exceptions) now have consistent behavior for table-like output ([#1318](https://github.com/GenericMappingTools/pygmt/issues/1318)) -* Adopt [SPEC 0](https://scientific-python.org/specs/spec-0000/) policy for minimum supported versions of GMT, Python, and other core dependencies +* Adopt [SPEC 0](https://scientific-python.org/specs/spec-0000/) policy for minimum supported versions of GMT, Python, and other core dependencies ### Enhancements diff --git a/doc/contributing.md b/doc/contributing.md index 8ee7468d1d8..ec33c9a4daa 100644 --- a/doc/contributing.md +++ b/doc/contributing.md @@ -481,8 +481,8 @@ the code yourself. Before committing, run it to automatically format your code: make format ``` -For consistency, we also use UNIX-style line endings (`\n`) and file permission -644 (`-rw-r--r--`) throughout the whole project. +For consistency, we also use `pre-commit` hooks to enforce UNIX-style line endings +(`\n`) and file permission 644 (`-rw-r--r--`) throughout the whole project. Don't worry if you forget to do it. Our continuous integration systems will warn us and you can make a new commit with the formatted code. Even better, you can just write `/format` in the first line of any comment in a diff --git a/doc/team.md b/doc/team.md index 08a512bc979..d5af274ad82 100644 --- a/doc/team.md +++ b/doc/team.md @@ -116,4 +116,3 @@ year will be moved to Distinguished Contributors. :::: ::::: - diff --git a/environment.yml b/environment.yml index 57a4799442e..21961dd8b8f 100644 --- a/environment.yml +++ b/environment.yml @@ -25,6 +25,7 @@ dependencies: - pip # Dev dependencies (style checks) - codespell + - pre-commit - ruff>=0.3.0 # Dev dependencies (unit testing) - matplotlib-base diff --git a/examples/projections/README.txt b/examples/projections/README.txt index 5e43e4cc9af..594dc84a943 100644 --- a/examples/projections/README.txt +++ b/examples/projections/README.txt @@ -2,7 +2,7 @@ Projections =========== PyGMT supports many map projections. Use the ``projection`` parameter to specify which -one you want to use in all plotting methods. The projection is specified by a one-letter +one you want to use in all plotting methods. The projection is specified by a one-letter code along with (sometimes optional) reference longitude and latitude and the width of the map (for example, **A**\ *lon0/lat0*\ [*/horizon*\ ]\ */width*). The map height is determined based on the region and projection. diff --git a/pygmt/tests/data/contours.txt b/pygmt/tests/data/contours.txt index 59513b26929..34278aca356 100644 --- a/pygmt/tests/data/contours.txt +++ b/pygmt/tests/data/contours.txt @@ -5,4 +5,4 @@ 600 A 700 C 800 A -900 C \ No newline at end of file +900 C From 6c9318bd2393e72372bd7ee8bac51d3ee0fe266b Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 13 Jun 2024 10:57:59 +0800 Subject: [PATCH 156/218] Remove CODE_OF_CONDUCT.md and use the organization-wide CODE OF CONDUCT file (#3285) --- CODE_OF_CONDUCT.md | 2 -- MANIFEST.in | 1 - README.md | 2 +- 3 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 7246f53e333..00000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,2 +0,0 @@ -All participants in the PyGMT community must abide by -the [Generic Mapping Tools organization Code of Conduct](https://github.com/GenericMappingTools/.github/blob/main/CODE_OF_CONDUCT.md). diff --git a/MANIFEST.in b/MANIFEST.in index 6b926e1bc26..6ff94b2505e 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -9,7 +9,6 @@ exclude .gitignore exclude .pre-commit-config.yaml exclude .readthedocs.yaml exclude AUTHORSHIP.md -exclude CODE_OF_CONDUCT.md exclude CONTRIBUTING.md exclude Makefile exclude environment.yml diff --git a/README.md b/README.md index c415f5052df..7fd56461d74 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ [![Digital Object Identifier for the Zenodo archive](https://zenodo.org/badge/DOI/10.5281/3781524.svg)](https://doi.org/10.5281/zenodo.3781524) [![PyOpenSci](https://tinyurl.com/y22nb8up)](https://github.com/pyOpenSci/software-review/issues/43) [![GitHub license](https://img.shields.io/github/license/GenericMappingTools/pygmt?style=flat-square)](https://github.com/GenericMappingTools/pygmt/blob/main/LICENSE.txt) -[![Contributor Code of Conduct](https://img.shields.io/badge/Contributor%20Covenant-v2.1%20adopted-ff69b4.svg)](CODE_OF_CONDUCT.md) +[![Contributor Code of Conduct](https://img.shields.io/badge/Contributor%20Covenant-v2.1%20adopted-ff69b4.svg)](https://github.com/GenericMappingTools/.github/blob/main/CODE_OF_CONDUCT.md) From 7b09d880e1b83975a2334d3ab79244d690d93997 Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Mon, 17 Jun 2024 14:32:58 +1200 Subject: [PATCH 157/218] SPEC 0: Set minimum required version to NumPy 1.24+ (#3286) Following SPEC 0 policy. Bumps minimum supported NumPy version to 1.24 in the pyproject.toml, requirements.txt and environment.yml files. Also update doc/minversions.md to mention NumPy 1.24+ requirement. --- .github/workflows/ci_tests.yaml | 2 +- doc/minversions.md | 2 +- environment.yml | 2 +- pyproject.toml | 2 +- requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 5ce46dd2e93..220e1f1e03b 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -70,7 +70,7 @@ jobs: # Only install optional packages on Python 3.12 include: - python-version: '3.10' - numpy-version: '1.23' + numpy-version: '1.24' pandas-version: '=1.5' xarray-version: '=2022.06' optional-packages: '' diff --git a/doc/minversions.md b/doc/minversions.md index 98223a62125..1e5ac8f8e46 100644 --- a/doc/minversions.md +++ b/doc/minversions.md @@ -12,7 +12,7 @@ after their initial release. | PyGMT Version | GMT | Python | NumPy | Pandas | Xarray | |---|---|---|---|---|---| -| [Dev][]* [[Docs][Docs Dev]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | +| [Dev][]* [[Docs][Docs Dev]] | >=6.3.0 | >=3.10 | >=1.24 | >=1.5 | >=2022.06 | | [v0.12.0][] [[Docs][Docs v0.12.0]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | | [v0.11.0][] [[Docs][Docs v0.11.0]] | >=6.3.0 | >=3.9 | >=1.23 | | | | [v0.10.0][] [[Docs][Docs v0.10.0]] | >=6.3.0 | >=3.9 | >=1.22 | | | diff --git a/environment.yml b/environment.yml index 21961dd8b8f..4e828dc34ef 100644 --- a/environment.yml +++ b/environment.yml @@ -7,7 +7,7 @@ dependencies: # Required dependencies - gmt=6.5.0 - ghostscript=10.03.1 - - numpy>=1.23 + - numpy>=1.24 - pandas>=1.5 - xarray>=2022.06 - netCDF4 diff --git a/pyproject.toml b/pyproject.toml index 773bf07f5e6..0142001419d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ classifiers = [ "License :: OSI Approved :: BSD License", ] dependencies = [ - "numpy>=1.23", + "numpy>=1.24", "pandas>=1.5", "xarray>=2022.06", "netCDF4", diff --git a/requirements.txt b/requirements.txt index 000de0692eb..c48eb0f989f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # Required packages -numpy>=1.23 +numpy>=1.24 pandas>=1.5 xarray>=2022.06 netCDF4 From 6a6e9405f326bf9ab3f53b19b368cfee667d7f0d Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 17 Jun 2024 11:11:50 +0800 Subject: [PATCH 158/218] Fix a failing test for legacy GMT versions (#3287) --- pygmt/tests/test_datasets_earth_relief.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygmt/tests/test_datasets_earth_relief.py b/pygmt/tests/test_datasets_earth_relief.py index 129e1a4629e..f5dd8ecf512 100644 --- a/pygmt/tests/test_datasets_earth_relief.py +++ b/pygmt/tests/test_datasets_earth_relief.py @@ -209,4 +209,4 @@ def test_earth_relief_03s_default_registration(): npt.assert_allclose(data.coords["lon"].data.min(), -10) npt.assert_allclose(data.coords["lon"].data.max(), -9.8) npt.assert_allclose(data.min(), -2069.85, atol=0.5) - npt.assert_allclose(data.max(), -924.5, atol=0.5) + npt.assert_allclose(data.max(), -923.5, atol=0.5) From 686e56fcc6a23310db2667614c2ee0a950c23833 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 17 Jun 2024 15:35:22 +0800 Subject: [PATCH 159/218] Mark one x2sys_cross test as xfail in the GMT Legacy Tests workflow (#3288) --- pygmt/tests/test_x2sys_cross.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pygmt/tests/test_x2sys_cross.py b/pygmt/tests/test_x2sys_cross.py index 09f424d1a42..eba29aa6242 100644 --- a/pygmt/tests/test_x2sys_cross.py +++ b/pygmt/tests/test_x2sys_cross.py @@ -39,6 +39,10 @@ def fixture_tracks(): @pytest.mark.usefixtures("mock_x2sys_home") +@pytest.mark.xfail( + condition=Version(__gmt_version__) < Version("6.5.0"), + reason="Upstream bug fixed in https://github.com/GenericMappingTools/gmt/pull/8188", +) def test_x2sys_cross_input_file_output_file(): """ Run x2sys_cross by passing in a filename, and output internal crossovers to an ASCII From 3779eca27ddae530336dae68a4d7b13c77afea23 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 17 Jun 2024 15:35:31 +0800 Subject: [PATCH 160/218] Fix a typo in _show_versions.py (#3289) --- pygmt/_show_versions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygmt/_show_versions.py b/pygmt/_show_versions.py index 8a3c9acf2c1..6da2ac1c736 100644 --- a/pygmt/_show_versions.py +++ b/pygmt/_show_versions.py @@ -90,7 +90,7 @@ def _check_ghostscript_version(gs_version: str) -> str | None: if Version(__gmt_version__) < Version("6.5.0"): return ( f"GMT v{__gmt_version__} doesn't support Ghostscript " - "v{gs_version}. Please consider upgrading to GMT>=6.5.0 or " + f"v{gs_version}. Please consider upgrading to GMT>=6.5.0 or " "downgrading to Ghostscript v9.56." ) return None From 3719474499b1d233e372d6d21fdfd1ce88f2f383 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 17 Jun 2024 16:17:55 +0800 Subject: [PATCH 161/218] CI: Do not add the 'cores' parameters for GMT 6.3 (#3290) --- pygmt/tests/test_grdfilter.py | 7 ++++++- pygmt/tests/test_grdlandmask.py | 7 ++++++- pygmt/tests/test_grdsample.py | 7 ++++++- pygmt/tests/test_sph2grd.py | 7 ++++++- 4 files changed, 24 insertions(+), 4 deletions(-) diff --git a/pygmt/tests/test_grdfilter.py b/pygmt/tests/test_grdfilter.py index 5cbe3574767..a2604d8b00e 100644 --- a/pygmt/tests/test_grdfilter.py +++ b/pygmt/tests/test_grdfilter.py @@ -7,11 +7,16 @@ import numpy as np import pytest import xarray as xr +from packaging.version import Version from pygmt import grdfilter, load_dataarray +from pygmt.clib import __gmt_version__ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import GMTTempFile from pygmt.helpers.testing import load_static_earth_relief +# GMT 6.3 on conda-forge doesn't have OpenMP enabled. +cores = 2 if Version(__gmt_version__) > Version("6.3.0") else None + @pytest.fixture(scope="module", name="grid") def fixture_grid(): @@ -46,7 +51,7 @@ def test_grdfilter_dataarray_in_dataarray_out(grid, expected_grid): Test grdfilter with an input DataArray, and output as DataArray. """ result = grdfilter( - grid=grid, filter="g600", distance="4", region=[-53, -49, -20, -17], cores=2 + grid=grid, filter="g600", distance="4", region=[-53, -49, -20, -17], cores=cores ) # check information of the output grid assert isinstance(result, xr.DataArray) diff --git a/pygmt/tests/test_grdlandmask.py b/pygmt/tests/test_grdlandmask.py index ae51ba2eda4..d275da5cd4b 100644 --- a/pygmt/tests/test_grdlandmask.py +++ b/pygmt/tests/test_grdlandmask.py @@ -6,10 +6,15 @@ import pytest import xarray as xr +from packaging.version import Version from pygmt import grdlandmask, load_dataarray +from pygmt.clib import __gmt_version__ from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import GMTTempFile +# GMT 6.3 on conda-forge doesn't have OpenMP enabled. +cores = 2 if Version(__gmt_version__) > Version("6.3.0") else None + @pytest.fixture(scope="module", name="expected_grid") def fixture_expected_grid(): @@ -50,7 +55,7 @@ def test_grdlandmask_no_outgrid(expected_grid): """ Test grdlandmask with no set outgrid. """ - result = grdlandmask(spacing=1, region=[125, 130, 30, 35], cores=2) + result = grdlandmask(spacing=1, region=[125, 130, 30, 35], cores=cores) # check information of the output grid assert isinstance(result, xr.DataArray) assert result.gmt.gtype == 1 # Geographic grid diff --git a/pygmt/tests/test_grdsample.py b/pygmt/tests/test_grdsample.py index 4c9e64139c3..11812a1226a 100644 --- a/pygmt/tests/test_grdsample.py +++ b/pygmt/tests/test_grdsample.py @@ -6,10 +6,15 @@ import pytest import xarray as xr +from packaging.version import Version from pygmt import grdsample, load_dataarray +from pygmt.clib import __gmt_version__ from pygmt.helpers import GMTTempFile from pygmt.helpers.testing import load_static_earth_relief +# GMT 6.3 on conda-forge doesn't have OpenMP enabled. +cores = 2 if Version(__gmt_version__) > Version("6.3.0") else None + @pytest.fixture(scope="module", name="grid") def fixture_grid(): @@ -75,7 +80,7 @@ def test_grdsample_dataarray_out(grid, expected_grid, region, spacing): """ Test grdsample with no outgrid set and the spacing is changed. """ - result = grdsample(grid=grid, spacing=spacing, region=region, cores=2) + result = grdsample(grid=grid, spacing=spacing, region=region, cores=cores) # check information of the output grid assert isinstance(result, xr.DataArray) assert result.gmt.gtype == 1 # Geographic grid diff --git a/pygmt/tests/test_sph2grd.py b/pygmt/tests/test_sph2grd.py index 89b4abeff25..820b8923240 100644 --- a/pygmt/tests/test_sph2grd.py +++ b/pygmt/tests/test_sph2grd.py @@ -6,9 +6,14 @@ import numpy.testing as npt import pytest +from packaging.version import Version from pygmt import sph2grd +from pygmt.clib import __gmt_version__ from pygmt.helpers import GMTTempFile +# GMT 6.3 on conda-forge doesn't have OpenMP enabled. +cores = 2 if Version(__gmt_version__) > Version("6.3.0") else None + def test_sph2grd_outgrid(): """ @@ -27,7 +32,7 @@ def test_sph2grd_no_outgrid(): """ Test sph2grd with no set outgrid. """ - temp_grid = sph2grd(data="@EGM96_to_36.txt", spacing=1, region="g", cores=2) + temp_grid = sph2grd(data="@EGM96_to_36.txt", spacing=1, region="g", cores=cores) assert temp_grid.dims == ("y", "x") assert temp_grid.gmt.gtype == 0 # Cartesian grid assert temp_grid.gmt.registration == 0 # Gridline registration From 8076db4c8798aec4e1dd14ed4bc115de496b2f0f Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 18 Jun 2024 13:31:32 +0800 Subject: [PATCH 162/218] CI: Unpin NumPy in the GMT Dev Tests workflow and fix the dataarray_to_matrix doctest (#3208) (#3226) --- .github/workflows/ci_tests_dev.yaml | 2 +- pygmt/clib/conversion.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 4c2e9864f93..7cc17a559d7 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -143,7 +143,7 @@ jobs: run: | python -m pip install --pre --prefer-binary \ --extra-index https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ - 'numpy<2' pandas xarray netCDF4 packaging \ + numpy pandas xarray netCDF4 packaging \ build contextily dvc geopandas ipython pyarrow rioxarray \ pytest pytest-cov pytest-doctestplus pytest-mpl pytest-rerunfailures pytest-xdist\ sphinx-gallery diff --git a/pygmt/clib/conversion.py b/pygmt/clib/conversion.py index eb8daa61da1..95f2f08aa51 100644 --- a/pygmt/clib/conversion.py +++ b/pygmt/clib/conversion.py @@ -124,6 +124,8 @@ def dataarray_to_matrix(grid): grid = grid.sortby(variables=list(grid.dims), ascending=True) matrix = as_c_contiguous(grid[::-1].to_numpy()) + region = [float(i) for i in region] + inc = [float(i) for i in inc] return matrix, region, inc From ac447063e95ed70c68ca8cd2eebc20c8318c63bf Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 18 Jun 2024 15:50:32 +0800 Subject: [PATCH 163/218] CI: Test NumPy 2.0 in the GMT Tests workflow (#3292) --- .github/workflows/ci_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 220e1f1e03b..fbc15a92612 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -75,7 +75,7 @@ jobs: xarray-version: '=2022.06' optional-packages: '' - python-version: '3.12' - numpy-version: '1.26' + numpy-version: '2.0' pandas-version: '' xarray-version: '' optional-packages: ' contextily geopandas ipython pyarrow rioxarray sphinx-gallery' From 96f70f432388f8105ecf33ed5c7dc7481ffb9da6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Jun 2024 10:12:00 +1200 Subject: [PATCH 164/218] Build(deps): Bump codecov/codecov-action from 4.4.1 to 4.5.0 (#3293) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.4.1 to 4.5.0. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.4.1...v4.5.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index fbc15a92612..753a2c7fbaa 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -169,7 +169,7 @@ jobs: # Upload coverage to Codecov - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4.4.1 + uses: codecov/codecov-action@v4.5.0 if: success() || failure() with: use_oidc: true From c70da3ebb04e033cca74f84f1cc678627f9d9b28 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 19 Jun 2024 19:09:14 +0800 Subject: [PATCH 165/218] Workaround for the pd.read_csv's index_col bug in pandas 3.0 dev version (#3295) --- pygmt/tests/test_grdhisteq.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pygmt/tests/test_grdhisteq.py b/pygmt/tests/test_grdhisteq.py index 76892905269..3c5a3df2d8d 100644 --- a/pygmt/tests/test_grdhisteq.py +++ b/pygmt/tests/test_grdhisteq.py @@ -125,10 +125,9 @@ def test_compute_bins_outfile(grid, expected_df, region): header=None, names=["start", "stop", "bin_id"], dtype={"start": np.float32, "stop": np.float32, "bin_id": np.uint32}, - index_col="bin_id", ) pd.testing.assert_frame_equal( - left=temp_df, right=expected_df.set_index("bin_id") + left=temp_df.set_index("bin_id"), right=expected_df.set_index("bin_id") ) From 30d1e0951e059b822a9e07e197419e0151d4adad Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Thu, 20 Jun 2024 15:09:51 +1200 Subject: [PATCH 166/218] CI: Bump to ubuntu-24.04 and mambaforge-23.11 in ReadTheDocs (#3296) Xref https://docs.readthedocs.io/en/stable/config-file/v2.html#build-os and https://docs.readthedocs.io/en/stable/config-file/v2.html#build-tools-python. --- .readthedocs.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index b3c3a1a3e06..05e89e5b1c3 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -6,9 +6,9 @@ version: 2 # Set the version of Python and other tools you might need build: - os: ubuntu-22.04 + os: ubuntu-24.04 tools: - python: "mambaforge-22.9" + python: "mambaforge-23.11" jobs: post_checkout: # Cancel building pull requests when there aren't changes related to docs. From ec578e40402ad32c3fd9fbbeccce9dcfbf14b522 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 20 Jun 2024 11:32:14 +0800 Subject: [PATCH 167/218] CI: Remove the deprecated lychee option and specify the report file name in the Check Links workflow (#3297) --- .github/workflows/check-links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 70727b02a0c..7268d8a0af4 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -37,10 +37,10 @@ jobs: id: lychee uses: lycheeverse/lychee-action@v1.10.0 with: + output: /tmp/lychee-out.md # 429: Too many requests args: > --accept 429 - --exclude-mail --exclude "^https://doi.org/10.5281/zenodo$" --exclude "^https://zenodo.org/badge/DOI/$" --exclude "^https://zenodo.org/badge/DOI/10.5281/zenodo$" @@ -71,6 +71,6 @@ jobs: run: | cd repository/ title="Link Checker Report on ${{ steps.date.outputs.date }}" - gh issue create --title "$title" --body-file ./lychee/out.md + gh issue create --title "$title" --body-file /tmp/lychee-out.md env: GH_TOKEN: ${{ github.token }} From b47ba7aac6472a6ba1fc7fa549dac24f166da966 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 21 Jun 2024 11:17:24 +0800 Subject: [PATCH 168/218] CI: Exclude some GitHub links in the Check Links workflow (#3299) --- .github/workflows/check-links.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 7268d8a0af4..56ee1bb9f19 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -47,8 +47,11 @@ jobs: --exclude "^https://github.com/GenericMappingTools/pygmt/pull/[0-9]*$" --exclude "^https://github.com/GenericMappingTools/pygmt/issues/[0-9]*$" --exclude "^https://github.com/GenericMappingTools/gmt/releases/tag/X.Y.Z$" + --exclude "^https://github.com/GenericMappingTools/pygmt/edit" + --exclude "^https://github.com/GenericMappingTools/pygmt/issues/new" --exclude "^git" --exclude "^file://" + --exclude "https://www.adobe.com/jp/print/postscript/pdfs/PLRM.pdf" --exclude "^https://docs.generic-mapping-tools.org/6.5/%s$" --exclude "^https://docs.generic-mapping-tools.org/6.5/%3Cmodule-name%3E.html$" --exclude "^https://www.generic-mapping-tools.org/remote-datasets/%s$" From 632eadbabc64f761d1430b0a2b0353e7bf48da34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Jun 2024 07:44:43 +0800 Subject: [PATCH 169/218] Build(deps): Bump CodSpeedHQ/action from 2.4.1 to 2.4.2 (#3302) Bumps [CodSpeedHQ/action](https://github.com/codspeedhq/action) from 2.4.1 to 2.4.2. - [Release notes](https://github.com/codspeedhq/action/releases) - [Changelog](https://github.com/CodSpeedHQ/action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codspeedhq/action/compare/v2.4.1...v2.4.2) --- updated-dependencies: - dependency-name: CodSpeedHQ/action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 3855fec7b55..5d0deefe56e 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -89,7 +89,7 @@ jobs: # Run the benchmark tests - name: Run benchmarks - uses: CodSpeedHQ/action@v2.4.1 + uses: CodSpeedHQ/action@v2.4.2 with: # 'bash -el -c' is needed to use the custom shell. # See https://github.com/CodSpeedHQ/action/issues/65. From 2bccfe2332ea30fe5070e03efea9210433215f8c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Jun 2024 07:45:36 +0800 Subject: [PATCH 170/218] Build(deps): Bump actions/checkout from 4.1.6 to 4.1.7 (#3303) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.6 to 4.1.7. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4.1.6...v4.1.7) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- .github/workflows/cache_data.yaml | 2 +- .github/workflows/check-links.yml | 4 ++-- .github/workflows/ci_docs.yml | 4 ++-- .github/workflows/ci_doctests.yaml | 2 +- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/ci_tests_dev.yaml | 4 ++-- .github/workflows/ci_tests_legacy.yaml | 2 +- .github/workflows/dvc-diff.yml | 2 +- .github/workflows/format-command.yml | 2 +- .github/workflows/publish-to-pypi.yml | 2 +- .github/workflows/release-baseline-images.yml | 2 +- .github/workflows/style_checks.yaml | 2 +- .github/workflows/type_checks.yml | 2 +- 14 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 5d0deefe56e..f4c2153e73e 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -34,7 +34,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/cache_data.yaml b/.github/workflows/cache_data.yaml index 8e6c75c09fe..6e3d774a191 100644 --- a/.github/workflows/cache_data.yaml +++ b/.github/workflows/cache_data.yaml @@ -36,7 +36,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 56ee1bb9f19..a3a2abfda7d 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -23,12 +23,12 @@ jobs: steps: - name: Checkout the repository - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: path: repository - name: Checkout the documentation - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: ref: gh-pages path: documentation diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 90257a814ec..47ebaa5f2bb 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -69,7 +69,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that setuptools-scm works fetch-depth: 0 @@ -138,7 +138,7 @@ jobs: run: make -C doc clean all - name: Checkout the gh-pages branch - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: ref: gh-pages # Checkout to this folder instead of the current one diff --git a/.github/workflows/ci_doctests.yaml b/.github/workflows/ci_doctests.yaml index a752bf853a3..45bdcb47311 100644 --- a/.github/workflows/ci_doctests.yaml +++ b/.github/workflows/ci_doctests.yaml @@ -35,7 +35,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 753a2c7fbaa..362c8c5df76 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -94,7 +94,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/ci_tests_dev.yaml b/.github/workflows/ci_tests_dev.yaml index 7cc17a559d7..eeeafe93118 100644 --- a/.github/workflows/ci_tests_dev.yaml +++ b/.github/workflows/ci_tests_dev.yaml @@ -46,7 +46,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that setuptools-scm works fetch-depth: 0 @@ -88,7 +88,7 @@ jobs: # Checkout current GMT repository - name: Checkout the GMT source from ${{ matrix.gmt_git_ref }} branch - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: repository: 'GenericMappingTools/gmt' ref: ${{ matrix.gmt_git_ref }} diff --git a/.github/workflows/ci_tests_legacy.yaml b/.github/workflows/ci_tests_legacy.yaml index fdcaf227216..ef8efc6e4fe 100644 --- a/.github/workflows/ci_tests_legacy.yaml +++ b/.github/workflows/ci_tests_legacy.yaml @@ -44,7 +44,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/dvc-diff.yml b/.github/workflows/dvc-diff.yml index 8cdd04bdf93..b6497c0b33f 100644 --- a/.github/workflows/dvc-diff.yml +++ b/.github/workflows/dvc-diff.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that dvc diff works fetch-depth: 0 diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 625856aaee2..0bea9b70f4c 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -18,7 +18,7 @@ jobs: private-key: ${{ secrets.APP_PRIVATE_KEY }} # Checkout the pull request branch - - uses: actions/checkout@v4.1.6 + - uses: actions/checkout@v4.1.7 with: token: ${{ steps.generate-token.outputs.token }} repository: ${{ github.event.client_payload.pull_request.head.repo.full_name }} diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 266261d01e3..f6216d3908c 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -45,7 +45,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 with: # fetch all history so that setuptools-scm works fetch-depth: 0 diff --git a/.github/workflows/release-baseline-images.yml b/.github/workflows/release-baseline-images.yml index 38f20accafb..fd88cc89045 100644 --- a/.github/workflows/release-baseline-images.yml +++ b/.github/workflows/release-baseline-images.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 - name: Setup data version control (DVC) uses: iterative/setup-dvc@v1.1.2 diff --git a/.github/workflows/style_checks.yaml b/.github/workflows/style_checks.yaml index b7d57b5b532..46d0205423f 100644 --- a/.github/workflows/style_checks.yaml +++ b/.github/workflows/style_checks.yaml @@ -24,7 +24,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 # Setup Python - name: Set up Python diff --git a/.github/workflows/type_checks.yml b/.github/workflows/type_checks.yml index d423ba7719f..1589a516819 100644 --- a/.github/workflows/type_checks.yml +++ b/.github/workflows/type_checks.yml @@ -33,7 +33,7 @@ jobs: steps: # Checkout current git repository - name: Checkout - uses: actions/checkout@v4.1.6 + uses: actions/checkout@v4.1.7 # Setup Python - name: Set up Python From cdf49029700651ab6bcc21ffaaf13f7c7e7f9db5 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 28 Jun 2024 14:47:15 +0800 Subject: [PATCH 171/218] CI: Temporarily pin geopandas<1.0 in the Docs workflow (#3304) --- .github/workflows/ci_docs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 47ebaa5f2bb..74951d365f2 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -101,7 +101,7 @@ jobs: netCDF4 packaging contextily - geopandas + geopandas<1.0 ipython rioxarray build From 3574603f27dcbfe46b0277d14fb40c2f1c095bd1 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 29 Jun 2024 00:43:33 +0800 Subject: [PATCH 172/218] CI: Temporarily pin geopandas<1.0 in ReadTheDocs --- ci/requirements/docs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/requirements/docs.yml b/ci/requirements/docs.yml index 414f1419d44..b7381844b92 100644 --- a/ci/requirements/docs.yml +++ b/ci/requirements/docs.yml @@ -14,7 +14,7 @@ dependencies: - packaging # Optional dependencies - contextily - - geopandas + - geopandas<1.0 - ipython - rioxarray # Development dependencies (general) From b907b4a6f1ceeb71a4a5ec064b4c6d41158ae82d Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 29 Jun 2024 09:41:55 +0800 Subject: [PATCH 173/218] Refactor to improve the user experience with non-ASCII characters (#3206) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- doc/index.md | 1 + doc/techref/encodings.md | 108 ++++++++++++++++++++++++++++++++ doc/techref/index.md | 12 ++++ pygmt/encodings.py | 131 +++++++++++++++++++++++++++++++++++++++ pygmt/helpers/utils.py | 105 +++++-------------------------- pygmt/tests/test_text.py | 2 +- 6 files changed, 269 insertions(+), 90 deletions(-) create mode 100644 doc/techref/encodings.md create mode 100644 doc/techref/index.md create mode 100644 pygmt/encodings.py diff --git a/doc/index.md b/doc/index.md index cff9e9eea72..347224cc3d4 100644 --- a/doc/index.md +++ b/doc/index.md @@ -42,6 +42,7 @@ external_resources.md :caption: Reference documentation api/index.rst +techref/index.md changes.md minversions.md ``` diff --git a/doc/techref/encodings.md b/doc/techref/encodings.md new file mode 100644 index 00000000000..638370e9bd3 --- /dev/null +++ b/doc/techref/encodings.md @@ -0,0 +1,108 @@ +# Supported Encodings and Non-ASCII Characters + +GMT supports a number of encodings and each encoding contains a set of ASCII and non-ASCII +characters. Below are some of the most common encodings and characters that are supported. + +In PyGMT, you can use any of these ASCII and non-ASCII characters in arguments and text +strings. When using non-ASCII characters in PyGMT, the easiest way is to copy and paste +the character from the tables below. + +**Note**: The special character � (REPLACEMENT CHARACTER) is used to indicate that +the character is not defined in the encoding. + +## Adobe ISOLatin1+ Encoding + +| octal | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | +|---|---|---|---|---|---|---|---|---| +| **\03x** | � | • | … | ™ | — | – | fi | ž | +| **\04x** | | ! | " | # | $ | % | & | ’ | +| **\05x** | ( | ) | * | + | , | - | . | / | +| **\06x** | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | +| **\07x** | 8 | 9 | : | ; | < | = | > | ? | +| **\10x** | @ | A | B | C | D | E | F | G | +| **\11x** | H | I | J | K | L | M | N | O | +| **\12x** | P | Q | R | S | T | U | V | W | +| **\13x** | X | Y | Z | [ | \ | ] | ^ | _ | +| **\14x** | ‘ | a | b | c | d | e | f | g | +| **\15x** | h | i | j | k | l | m | n | o | +| **\16x** | p | q | r | s | t | u | v | w | +| **\17x** | x | y | z | { | | | } | ~ | š | +| **\20x** | Œ | † | ‡ | Ł | ⁄ | ‹ | Š | › | +| **\21x** | œ | Ÿ | Ž | ł | ‰ | „ | “ | ” | +| **\22x** | ı | ` | ´ | ^ | ˜ | ¯ | ˘ | ˙ | +| **\23x** | ¨ | ‚ | ˚ | ¸ | ' | ˝ | ˛ | ˇ | +| **\24x** | � | ¡ | ¢ | £ | ¤ | ¥ | ¦ | § | +| **\25x** | ¨ | © | ª | « | ¬ | ­ | ® | ¯ | +| **\26x** | ° | ± | ² | ³ | ´ | µ | ¶ | · | +| **\27x** | ¸ | ¹ | º | » | ¼ | ½ | ¾ | ¿ | +| **\30x** | À | Á |  | à | Ä | Å | Æ | Ç | +| **\31x** | È | É | Ê | Ë | Ì | Í | Î | Ï | +| **\32x** | Ð | Ñ | Ò | Ó | Ô | Õ | Ö | × | +| **\33x** | Ø | Ù | Ú | Û | Ü | Ý | Þ | ß | +| **\34x** | à | á | â | ã | ä | å | æ | ç | +| **\35x** | è | é | ê | ë | ì | í | î | ï | +| **\36x** | ð | ñ | ò | ó | ô | õ | ö | ÷ | +| **\37x** | ø | ù | ú | û | ü | ý | þ | ÿ | + +## Adobe Symbol Encoding + +| octal | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | +|---|---|---|---|---|---|---|---|---| +| **\04x** | | ! | ∀ | # | ∃ | % | & | ∋ | +| **\05x** | ( | ) | ∗ | + | , | − | . | / | +| **\06x** | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | +| **\07x** | 8 | 9 | : | ; | < | = | > | ? | +| **\10x** | ≅ | Α | Β | Χ | ∆ | Ε | Φ | Γ | +| **\11x** | Η | Ι | ϑ | Κ | Λ | Μ | Ν | Ο | +| **\12x** | Π | Θ | Ρ | Σ | Τ | Υ | ς | Ω | +| **\13x** | Ξ | Ψ | Ζ | [ | ∴ | ] | ⊥ | _ | +| **\14x** |  | α | β | χ | δ | ε | φ | γ | +| **\15x** | η | ι | ϕ | κ | λ | μ | ν | ο | +| **\16x** | π | θ | ρ | σ | τ | υ | ϖ | ω | +| **\17x** | ξ | ψ | ζ | { | | | } | ∼ | � | +| **\24x** | € | ϒ | ′ | ≤ | ∕ | ∞ | ƒ | ♣ | +| **\25x** | ♦ | ♥ | ♠ | ↔ | ← | ↑ | → | ↓ | +| **\26x** | ° | ± | ″ | ≥ | × | ∝ | ∂ | • | +| **\27x** | ÷ | ≠ | ≡ | ≈ | … | ⏐ | ⎯ | ↵ | +| **\30x** | ℵ | ℑ | ℜ | ℘ | ⊗ | ⊕ | ∅ | ∩ | +| **\31x** | ∪ | ⊃ | ⊇ | ⊄ | ⊂ | ⊆ | ∈ | ∉ | +| **\32x** | ∠ | ∇ | ® | © | ™ | ∏ | √ | ⋅ | +| **\33x** | ¬ | ∧ | ∨ | ⇔ | ⇐ | ⇑ | ⇒ | ⇓ | +| **\34x** | ◊ | 〈 | ® | © | ™ | ∑ | ⎛ | ⎜ | +| **\35x** | ⎝ | ⎡ | ⎢ | ⎣ | ⎧ | ⎨ | ⎩ | ⎪ | +| **\36x** | � | 〉 | ∫ | ⌠ | ⎮ | ⌡ | ⎞ | ⎟ | +| **\37x** | ⎠ | ⎤ | ⎥ | ⎦ | ⎫ | ⎬ | ⎭ | � | + +**Note**: The octal code `\140` represents the RADICAL EXTENDER character, which is not available in +the Unicode character set. + +## Adobe ZapfDingbats Encoding + +| octal | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | +|---|---|---|---|---|---|---|---|---| +| **\04x** | | ✁ | ✂ | ✃ | ✄ | ☎ | ✆ | ✇ | +| **\05x** | ✈ | ✉ | ☛ | ☞ | ✌ | ✍ | ✎ | ✏ | +| **\06x** | ✐ | ✑ | ✒ | ✓ | ✔ | ✕ | ✖ | ✗ | +| **\07x** | ✘ | ✙ | ✚ | ✛ | ✜ | ✝ | ✞ | ✟ | +| **\10x** | ✠ | ✡ | ✢ | ✣ | ✤ | ✥ | ✦ | ✧ | +| **\11x** | ★ | ✩ | ✪ | ✫ | ✬ | ✭ | ✮ | ✯ | +| **\12x** | ✰ | ✱ | ✲ | ✳ | ✴ | ✵ | ✶ | ✷ | +| **\13x** | ✸ | ✹ | ✺ | ✻ | ✼ | ✽ | ✾ | ✿ | +| **\14x** | ❀ | ❁ | ❂ | ❃ | ❄ | ❅ | ❆ | ❇ | +| **\15x** | ❈ | ❉ | ❊ | ❋ | ● | ❍ | ■ | ❏ | +| **\16x** | ❐ | ❑ | ❒ | ▲ | ▼ | ◆ | ❖ | ◗ | +| **\17x** | ❘ | ❙ | ❚ | ❛ | ❜ | ❝ | ❞ | � | +| **\20x** | ❨ | ❩ | ❪ | ❫ | ❬ | ❭ | ❮ | ❯ | +| **\21x** | ❰ | ❱ | ❲ | ❳ | ❴ | ❵ | � | � | +| **\24x** | � | ❡ | ❢ | ❣ | ❤ | ❥ | ❦ | ❧ | +| **\25x** | ♣ | ♦ | ♥ | ♠ | ① | ② | ③ | ④ | +| **\26x** | ⑤ | ⑥ | ⑦ | ⑧ | ⑨ | ⑩ | ❶ | ❷ | +| **\27x** | ❸ | ❹ | ❺ | ❻ | ❼ | ❽ | ❾ | ❿ | +| **\30x** | ➀ | ➁ | ➂ | ➃ | ➄ | ➅ | ➆ | ➇ | +| **\31x** | ➈ | ➉ | ➊ | ➋ | ➌ | ➍ | ➎ | ➏ | +| **\32x** | ➐ | ➑ | ➒ | ➓ | ➔ | → | ↔ | ↕ | +| **\33x** | ➘ | ➙ | ➚ | ➛ | ➜ | ➝ | ➞ | ➟ | +| **\34x** | ➠ | ➡ | ➢ | ➣ | ➤ | ➥ | ➦ | ➧ | +| **\35x** | ➨ | ➩ | ➪ | ➫ | ➬ | ➭ | ➮ | ➯ | +| **\36x** | � | ➱ | ➲ | ➳ | ➴ | ➵ | ➶ | ➷ | +| **\37x** | ➸ | ➹ | ➺ | ➻ | ➼ | ➽ | ➾ | � | diff --git a/doc/techref/index.md b/doc/techref/index.md new file mode 100644 index 00000000000..bf22ff1acc5 --- /dev/null +++ b/doc/techref/index.md @@ -0,0 +1,12 @@ +# Technical Reference + +The Technical Reference section provides detailed information on the technical aspects of +GMT and PyGMT, including supported encodings, fonts, bit and hachure patterns, and other +essential components for creating high-quality visualizations. For additional details, +visit the :gmt-docs:`GMT Technical Reference `. + +```{toctree} +:maxdepth: 1 + +encodings.md +``` diff --git a/pygmt/encodings.py b/pygmt/encodings.py new file mode 100644 index 00000000000..2cfda9b5728 --- /dev/null +++ b/pygmt/encodings.py @@ -0,0 +1,131 @@ +""" +Adobe character encodings supported by GMT. + +Currently, only Adobe Symbol, Adobe ZapfDingbats, and Adobe ISOLatin1+ encodings are +supported. + +The corresponding Unicode characters in each Adobe character encoding are generated +from the mapping table and conversion script in the GMT-octal-codes +(https://github.com/seisman/GMT-octal-codes) repository. Refer to that repository for +details. + +Some code points are undefined and are assigned with the replacement character +(``\ufffd``). + +References +---------- + +- GMT-octal-codes: https://github.com/seisman/GMT-octal-codes +- GMT official documentation: https://docs.generic-mapping-tools.org/dev/reference/octal-codes.html +- Adobe Postscript Language Reference: https://www.adobe.com/jp/print/postscript/pdfs/PLRM.pdf +- ISOLatin1+: https://en.wikipedia.org/wiki/PostScript_Latin_1_Encoding +- Adobe Symbol: https://en.wikipedia.org/wiki/Symbol_(typeface) +- Zapf Dingbats: https://en.wikipedia.org/wiki/Zapf_Dingbats +- Adobe Glyph List: https://github.com/adobe-type-tools/agl-aglfn +""" + +# Dictionary of character mappings for different encodings. +charset: dict = {} + +# Adobe ISOLatin1+ charset. +charset["ISOLatin1+"] = dict( + zip( + range(0o030, 0o400), + "\ufffd\u2022\u2026\u2122\u2014\u2013\ufb01\u017e" + "\u0020\u0021\u0022\u0023\u0024\u0025\u0026\u2019" + "\u0028\u0029\u002a\u002b\u002c\u2212\u002e\u002f" + "\u0030\u0031\u0032\u0033\u0034\u0035\u0036\u0037" + "\u0038\u0039\u003a\u003b\u003c\u003d\u003e\u003f" + "\u0040\u0041\u0042\u0043\u0044\u0045\u0046\u0047" + "\u0048\u0049\u004a\u004b\u004c\u004d\u004e\u004f" + "\u0050\u0051\u0052\u0053\u0054\u0055\u0056\u0057" + "\u0058\u0059\u005a\u005b\u005c\u005d\u005e\u005f" + "\u2018\u0061\u0062\u0063\u0064\u0065\u0066\u0067" + "\u0068\u0069\u006a\u006b\u006c\u006d\u006e\u006f" + "\u0070\u0071\u0072\u0073\u0074\u0075\u0076\u0077" + "\u0078\u0079\u007a\u007b\u007c\u007d\u007e\u0161" + "\u0152\u2020\u2021\u0141\u2044\u2039\u0160\u203a" + "\u0153\u0178\u017d\u0142\u2030\u201e\u201c\u201d" + "\u0131\u0060\u00b4\u02c6\u02dc\u00af\u02d8\u02d9" + "\u00a8\u201a\u02da\u00b8\u0027\u02dd\u02db\u02c7" + "\u0020\u00a1\u00a2\u00a3\u00a4\u00a5\u00a6\u00a7" + "\u00a8\u00a9\u00aa\u00ab\u00ac\u002d\u00ae\u00af" + "\u00b0\u00b1\u00b2\u00b3\u00b4\u00b5\u00b6\u00b7" + "\u00b8\u00b9\u00ba\u00bb\u00bc\u00bd\u00be\u00bf" + "\u00c0\u00c1\u00c2\u00c3\u00c4\u00c5\u00c6\u00c7" + "\u00c8\u00c9\u00ca\u00cb\u00cc\u00cd\u00ce\u00cf" + "\u00d0\u00d1\u00d2\u00d3\u00d4\u00d5\u00d6\u00d7" + "\u00d8\u00d9\u00da\u00db\u00dc\u00dd\u00de\u00df" + "\u00e0\u00e1\u00e2\u00e3\u00e4\u00e5\u00e6\u00e7" + "\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef" + "\u00f0\u00f1\u00f2\u00f3\u00f4\u00f5\u00f6\u00f7" + "\u00f8\u00f9\u00fa\u00fb\u00fc\u00fd\u00fe\u00ff", + strict=False, + ) +) + +# Adobe Symbol charset. +charset["Symbol"] = dict( + zip( + [*range(0o040, 0o200), *range(0o240, 0o400)], + "\u0020\u0021\u2200\u0023\u2203\u0025\u0026\u220b" + "\u0028\u0029\u2217\u002b\u002c\u2212\u002e\u002f" + "\u0030\u0031\u0032\u0033\u0034\u0035\u0036\u0037" + "\u0038\u0039\u003a\u003b\u003c\u003d\u003e\u003f" + "\u2245\u0391\u0392\u03a7\u2206\u0395\u03a6\u0393" + "\u0397\u0399\u03d1\u039a\u039b\u039c\u039d\u039f" + "\u03a0\u0398\u03a1\u03a3\u03a4\u03a5\u03c2\u2126" + "\u039e\u03a8\u0396\u005b\u2234\u005d\u22a5\u005f" + "\uf8e5\u03b1\u03b2\u03c7\u03b4\u03b5\u03c6\u03b3" + "\u03b7\u03b9\u03d5\u03ba\u03bb\u03bc\u03bd\u03bf" + "\u03c0\u03b8\u03c1\u03c3\u03c4\u03c5\u03d6\u03c9" + "\u03be\u03c8\u03b6\u007b\u007c\u007d\u223c\ufffd" + "\u20ac\u03d2\u2032\u2264\u2215\u221e\u0192\u2663" + "\u2666\u2665\u2660\u2194\u2190\u2191\u2192\u2193" + "\u00b0\u00b1\u2033\u2265\u00d7\u221d\u2202\u2022" + "\u00f7\u2260\u2261\u2248\u2026\u23d0\u23af\u21b5" + "\u2135\u2111\u211c\u2118\u2297\u2295\u2205\u2229" + "\u222a\u2283\u2287\u2284\u2282\u2286\u2208\u2209" + "\u2220\u2207\u00ae\u00a9\u2122\u220f\u221a\u22c5" + "\u00ac\u2227\u2228\u21d4\u21d0\u21d1\u21d2\u21d3" + "\u25ca\u2329\u00ae\u00a9\u2122\u2211\u239b\u239c" + "\u239d\u23a1\u23a2\u23a3\u23a7\u23a8\u23a9\u23aa" + "\ufffd\u232a\u222b\u2320\u23ae\u2321\u239e\u239f" + "\u23a0\u23a4\u23a5\u23a6\u23ab\u23ac\u23ad\ufffd", + strict=False, + ) +) + +# Adobe ZapfDingbats charset. +charset["ZapfDingbats"] = dict( + zip( + [*range(0o040, 0o220), *range(0o240, 0o400)], + "\u0020\u2701\u2702\u2703\u2704\u260e\u2706\u2707" + "\u2708\u2709\u261b\u261e\u270c\u270d\u270e\u270f" + "\u2710\u2711\u2712\u2713\u2714\u2715\u2716\u2717" + "\u2718\u2719\u271a\u271b\u271c\u271d\u271e\u271f" + "\u2720\u2721\u2722\u2723\u2724\u2725\u2726\u2727" + "\u2605\u2729\u272a\u272b\u272c\u272d\u272e\u272f" + "\u2730\u2731\u2732\u2733\u2734\u2735\u2736\u2737" + "\u2738\u2739\u273a\u273b\u273c\u273d\u273e\u273f" + "\u2740\u2741\u2742\u2743\u2744\u2745\u2746\u2747" + "\u2748\u2749\u274a\u274b\u25cf\u274d\u25a0\u274f" + "\u2750\u2751\u2752\u25b2\u25bc\u25c6\u2756\u25d7" + "\u2758\u2759\u275a\u275b\u275c\u275d\u275e\ufffd" + "\u2768\u2769\u276a\u276b\u276c\u276d\u276e\u276f" + "\u2770\u2771\u2772\u2773\u2774\u2775\ufffd\ufffd" + "\ufffd\u2761\u2762\u2763\u2764\u2765\u2766\u2767" + "\u2663\u2666\u2665\u2660\u2460\u2461\u2462\u2463" + "\u2464\u2465\u2466\u2467\u2468\u2469\u2776\u2777" + "\u2778\u2779\u277a\u277b\u277c\u277d\u277e\u277f" + "\u2780\u2781\u2782\u2783\u2784\u2785\u2786\u2787" + "\u2788\u2789\u278a\u278b\u278c\u278d\u278e\u278f" + "\u2790\u2791\u2792\u2793\u2794\u2192\u2194\u2195" + "\u2798\u2799\u279a\u279b\u279c\u279d\u279e\u279f" + "\u27a0\u27a1\u27a2\u27a3\u27a4\u27a5\u27a6\u27a7" + "\u27a8\u27a9\u27aa\u27ab\u27ac\u27ad\u27ae\u27af" + "\ufffd\u27b1\u27b2\u27b3\u27b4\u27b5\u27b6\u27b7" + "\u27b8\u27b9\u27ba\u27bb\u27bc\u27bd\u27be\ufffd", + strict=False, + ) +) diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index 781e0e4533f..8997a2b0df1 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -2,7 +2,6 @@ Utilities and common tasks for wrapping the GMT modules. """ -# ruff: noqa: RUF001 import os import pathlib import shutil @@ -16,6 +15,7 @@ from typing import Any import xarray as xr +from pygmt.encodings import charset from pygmt.exceptions import GMTInvalidInput @@ -205,31 +205,31 @@ def data_kind(data=None, x=None, y=None, z=None, required_z=False, required_data return kind -def non_ascii_to_octal(argstr): +def non_ascii_to_octal(argstr: str) -> str: r""" Translate non-ASCII characters to their corresponding octal codes. - Currently, only characters in the ISOLatin1+ charset and - Symbol/ZapfDingbats fonts are supported. + Currently, only characters in the ISOLatin1+ charset and Symbol/ZapfDingbats fonts + are supported. Parameters ---------- - argstr : str + argstr The string to be translated. Returns ------- - translated_argstr : str + translated_argstr The translated string. Examples -------- >>> non_ascii_to_octal("•‰“”±°ÿ") - '\\31\\214\\216\\217\\261\\260\\377' - >>> non_ascii_to_octal("αζΔΩ∑π∇") + '\\031\\214\\216\\217\\261\\260\\377' + >>> non_ascii_to_octal("αζ∆Ω∑π∇") '@~\\141@~@~\\172@~@~\\104@~@~\\127@~@~\\345@~@~\\160@~@~\\321@~' >>> non_ascii_to_octal("✁❞❡➾") - '@%34%\\41@%%@%34%\\176@%%@%34%\\241@%%@%34%\\376@%%' + '@%34%\\041@%%@%34%\\176@%%@%34%\\241@%%@%34%\\376@%%' >>> non_ascii_to_octal("ABC ±120° DEF α ♥") 'ABC \\261120\\260 DEF @~\\141@~ @%34%\\252@%%' """ # noqa: RUF002 @@ -238,88 +238,15 @@ def non_ascii_to_octal(argstr): return argstr # Dictionary mapping non-ASCII characters to octal codes - mapping = {} - - # Adobe Symbol charset - # References: - # 1. https://en.wikipedia.org/wiki/Symbol_(typeface) - # 2. https://unicode.org/Public/MAPPINGS/VENDORS/ADOBE/symbol.txt - # Notes: - # 1. \322 and \342 are "REGISTERED SIGN SERIF" and - # "REGISTERED SIGN SANS SERIF" respectively, but only "REGISTERED SIGN" - # is available in the unicode table. So both are mapped to - # "REGISTERED SIGN". \323, \343, \324 and \344 also have the same - # problem. - # 2. Characters for \140, \275, \276 are incorrect. + mapping: dict = {} + # Adobe Symbol charset. + mapping.update({c: f"@~\\{i:03o}@~" for i, c in charset["Symbol"].items()}) + # Adobe ZapfDingbats charset. Font number is 34. mapping.update( - { - c: "@~\\" + format(i, "o") + "@~" - for c, i in zip( - " !∀#∃%&∋()∗+,−./" # \04x-05x - "0123456789:;<=>?" # \06x-07x - "≅ΑΒΧΔΕΦΓΗΙϑΚΛΜΝΟ" # \10x-11x - "ΠΘΡΣΤΥςΩΞΨΖ[∴]⊥_" # \12x-13x - "αβχδεφγηιϕκλμνο" # \14x-15x - "πθρστυϖωξψζ{|}∼" # \16x-17x. \177 is undefined - "€ϒ′≤⁄∞ƒ♣♦♥♠↔←↑→↓" # \24x-\25x - "°±″≥×∝∂•÷≠≡≈…↵" # \26x-27x - "ℵℑℜ℘⊗⊕∅∩∪⊃⊇⊄⊂⊆∈∉" # \30x-31x - "∠∇®©™∏√⋅¬∧∨⇔⇐⇑⇒⇓" # \32x-33x - "◊〈®©™∑" # \34x-35x - "〉∫⌠⌡", # \36x-37x. \360 and \377 are undefined - [*range(32, 127), *range(160, 240), *range(241, 255)], - strict=True, - ) - } + {c: f"@%34%\\{i:03o}@%%" for i, c in charset["ZapfDingbats"].items()} ) - - # Adobe ZapfDingbats charset - # References: - # 1. https://en.wikipedia.org/wiki/Zapf_Dingbats - # 2. https://unicode.org/Public/MAPPINGS/VENDORS/ADOBE/zdingbat.txt - mapping.update( - { - c: "@%34%\\" + format(i, "o") + "@%%" - for c, i in zip( - " ✁✂✃✄☎✆✇✈✉☛☞✌✍✎✏" # \04x-\05x - "✐✑✒✓✔✕✖✗✘✙✚✛✜✝✞✟" # \06x-\07x - "✠✡✢✣✤✥✦✧★✩✪✫✬✭✮✯" # \10x-\11x - "✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿" # \12x-\13x - "❀❁❂❃❄❅❆❇❈❉❊❋●❍■❏" # \14x-\15x - "❐❑❒▲▼◆❖◗❘❙❚❛❜❝❞" # \16x-\17x. \177 is undefined - "❡❢❣❤❥❦❧♣♦♥♠①②③④" # \24x-\25x. \240 is undefined - "⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿" # \26x-\27x - "➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏" # \30x-\31x - "➐➑➒➓➔→↔↕➘➙➚➛➜➝➞➟" # \32x-\33x - "➠➡➢➣➤➥➦➧➨➩➪➫➬➭➮➯" # \34x-\35x - "➱➲➳➴➵➶➷➸➹➺➻➼➽➾", # \36x-\37x. \360 and \377 are undefined - [*range(32, 127), *range(161, 240), *range(241, 255)], - strict=True, - ) - } - ) - - # Adobe ISOLatin1+ charset (i.e., ISO-8859-1 with extensions) - # References: - # 1. https://en.wikipedia.org/wiki/ISO/IEC_8859-1 - # 2. https://docs.generic-mapping-tools.org/dev/reference/octal-codes.html - # 3. https://www.adobe.com/jp/print/postscript/pdfs/PLRM.pdf - mapping.update( - { - c: "\\" + format(i, "o") - for c, i in zip( - "•…™—–fiž" # \03x. \030 is undefined - "’‘" # \047 and \140 - "š" # \177 - "Œ†‡Ł⁄‹Š›œŸŽł‰„“”" # \20x-\21x - "ı`´ˆ˜¯˘˙¨‚˚¸'˝˛ˇ", # \22x-\23x - [*range(25, 32), 39, 96, *range(127, 160)], - strict=True, - ) - } - ) - # \240-\377 - mapping.update({chr(i): "\\" + format(i, "o") for i in range(160, 256)}) + # Adobe ISOLatin1+ charset. Put at the end. + mapping.update({c: f"\\{i:03o}" for i, c in charset["ISOLatin1+"].items()}) # Remove any printable characters mapping = {k: v for k, v in mapping.items() if k not in string.printable} diff --git a/pygmt/tests/test_text.py b/pygmt/tests/test_text.py index ab07e964954..6bd2c61383e 100644 --- a/pygmt/tests/test_text.py +++ b/pygmt/tests/test_text.py @@ -417,7 +417,7 @@ def test_text_nonascii(): fig.basemap(region=[0, 10, 0, 10], projection="X10c", frame=True) fig.text(position="TL", text="position-text:°α") # noqa: RUF001 fig.text(x=1, y=1, text="xytext:°α") # noqa: RUF001 - fig.text(x=[5, 5], y=[3, 5], text=["xytext1:αζΔ❡", "xytext2:∑π∇✉"]) + fig.text(x=[5, 5], y=[3, 5], text=["xytext1:αζ∆❡", "xytext2:∑π∇✉"]) return fig From 8d16243dd248730063667caa195945de592ba976 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 30 Jun 2024 12:08:13 +0800 Subject: [PATCH 174/218] Fix a typo with Markdown syntax (#3306) --- doc/techref/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/techref/index.md b/doc/techref/index.md index bf22ff1acc5..b152966773a 100644 --- a/doc/techref/index.md +++ b/doc/techref/index.md @@ -3,7 +3,7 @@ The Technical Reference section provides detailed information on the technical aspects of GMT and PyGMT, including supported encodings, fonts, bit and hachure patterns, and other essential components for creating high-quality visualizations. For additional details, -visit the :gmt-docs:`GMT Technical Reference `. +visit the {gmt-docs}`GMT Technical Reference `. ```{toctree} :maxdepth: 1 From 215e4a1a541c53079e0c63002a7cb8bf5552034c Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 2 Jul 2024 17:04:31 +0800 Subject: [PATCH 175/218] Add pytest conftest.py file to suppress warnings with NumPy 2.0 (#3307) Co-authored-by: actions-bot <58130806+actions-bot@users.noreply.github.com> Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/conftest.py | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 pygmt/conftest.py diff --git a/pygmt/conftest.py b/pygmt/conftest.py new file mode 100644 index 00000000000..bc896d44732 --- /dev/null +++ b/pygmt/conftest.py @@ -0,0 +1,11 @@ +""" +conftest.py for pytest. +""" + +import numpy as np +from packaging.version import Version + +# Keep this until we require numpy to be >=2.0 +# Address https://github.com/GenericMappingTools/pygmt/issues/2628. +if Version(np.__version__) >= Version("2.0.0.dev0+git20230726"): + np.set_printoptions(legacy="1.25") # type: ignore[arg-type] From e8d1f71d9f481aa53972604d0a8f462af47095e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Jul 2024 10:51:10 +1200 Subject: [PATCH 176/218] Build(deps): Bump pypa/gh-action-pypi-publish from 1.8.14 to 1.9.0 (#3308) Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.8.14 to 1.9.0. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.8.14...v1.9.0) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish-to-pypi.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index f6216d3908c..6527fc3b6ac 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -75,10 +75,10 @@ jobs: ls -lh dist/ - name: Publish to Test PyPI - uses: pypa/gh-action-pypi-publish@v1.8.14 + uses: pypa/gh-action-pypi-publish@v1.9.0 with: repository-url: https://test.pypi.org/legacy/ - name: Publish to PyPI if: startsWith(github.ref, 'refs/tags') - uses: pypa/gh-action-pypi-publish@v1.8.14 + uses: pypa/gh-action-pypi-publish@v1.9.0 From be56ab4fe6ac5a86b13c99dbd454e13c42732400 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Jul 2024 10:51:34 +1200 Subject: [PATCH 177/218] Build(deps): Bump actions/create-github-app-token from 1.10.1 to 1.10.3 (#3309) Bumps [actions/create-github-app-token](https://github.com/actions/create-github-app-token) from 1.10.1 to 1.10.3. - [Release notes](https://github.com/actions/create-github-app-token/releases) - [Commits](https://github.com/actions/create-github-app-token/compare/v1.10.1...v1.10.3) --- updated-dependencies: - dependency-name: actions/create-github-app-token dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/format-command.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 0bea9b70f4c..159315c2a83 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: # Generate token from GenericMappingTools bot - - uses: actions/create-github-app-token@v1.10.1 + - uses: actions/create-github-app-token@v1.10.3 id: generate-token with: app-id: ${{ secrets.APP_ID }} From 815081765e6b26e5ba990fa45fa0d36e1c3ce9d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Thu, 4 Jul 2024 01:08:11 +0200 Subject: [PATCH 178/218] Gallery examples: Minor typo fixes (#3311) --- examples/gallery/basemaps/double_y_axes.py | 2 +- examples/gallery/basemaps/ternary.py | 2 +- examples/gallery/embellishments/inset.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/gallery/basemaps/double_y_axes.py b/examples/gallery/basemaps/double_y_axes.py index 84db8a90f06..6ccaf629bea 100644 --- a/examples/gallery/basemaps/double_y_axes.py +++ b/examples/gallery/basemaps/double_y_axes.py @@ -1,5 +1,5 @@ """ -Double Y axes graph +Double Y-axes graph =================== The ``frame`` parameter of the plotting methods of the :class:`pygmt.Figure` diff --git a/examples/gallery/basemaps/ternary.py b/examples/gallery/basemaps/ternary.py index cb122ec3ad2..47edad5ec56 100644 --- a/examples/gallery/basemaps/ternary.py +++ b/examples/gallery/basemaps/ternary.py @@ -42,5 +42,5 @@ # Add a colorbar indicating the values given in the fourth column of # the input dataset -fig.colorbar(position="JBC+o0c/1.5c", frame=["x+lPermittivity"]) +fig.colorbar(position="JBC+o0c/1.5c", frame="x+lPermittivity") fig.show() diff --git a/examples/gallery/embellishments/inset.py b/examples/gallery/embellishments/inset.py index 8fc4b7032a9..a0cda285d67 100644 --- a/examples/gallery/embellishments/inset.py +++ b/examples/gallery/embellishments/inset.py @@ -18,7 +18,7 @@ fig.coast(region="MG+r2", land="brown", water="lightblue", shorelines="thin", frame="a") # Create an inset, setting the position to top left, the width to 3.5 cm, and # the x- and y-offsets to 0.2 cm. The margin is set to 0, and the border is -# "gold" with a pen size of 1.5p. +# "gold" with a pen size of 1.5 points. with fig.inset(position="jTL+w3.5c+o0.2c", margin=0, box="+p1.5p,gold"): # Create a figure in the inset using coast. This example uses the azimuthal # orthogonal projection centered at 47E, 20S. The land color is set to From f9a7aca4a5421b36a84204b608c0c8ed9cb468cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Fri, 5 Jul 2024 09:53:24 +0200 Subject: [PATCH 179/218] Fix typo in AUTHORS.md (#3313) * Use adverb --- AUTHORS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/AUTHORS.md b/AUTHORS.md index 16a2495efb3..d6f3a7a58ae 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -6,7 +6,7 @@ with [Paul Wessel](https://www.soest.hawaii.edu/wessel) at the University of Hawaiʻi at Mānoa. The following people have contributed code and/or documentation to the project -(alphabetical by name) and are considered to be "PyGMT Developers": +(alphabetically by name) and are considered to be "PyGMT Developers": * [Abhishek Anant](https://twitter.com/itsabhianant) | [0000-0002-5751-2010](https://orcid.org/0000-0002-5751-2010) | Unaffiliated * [Andre L. Belem](https://github.com/andrebelem) | [0000-0002-8865-6180](https://orcid.org/0000-0002-8865-6180) | Fluminense Federal University, Brazil From 41a0fe0a6a79849565348203737360855d40add8 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 8 Jul 2024 06:43:25 +0800 Subject: [PATCH 180/218] CI: Remove the pytest-xdist and pytest-rerunfailures plugins from the Benchmarks workflow (#3314) --- .github/workflows/benchmarks.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index f4c2153e73e..3275b2f941d 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -69,8 +69,6 @@ jobs: pytest pytest-codspeed pytest-mpl - pytest-rerunfailures - pytest-xdist # Download cached remote files (artifacts) from GitHub - name: Download remote data from GitHub @@ -93,4 +91,4 @@ jobs: with: # 'bash -el -c' is needed to use the custom shell. # See https://github.com/CodSpeedHQ/action/issues/65. - run: bash -el -c "python -c \"import pygmt; pygmt.show_versions()\"; PYGMT_USE_EXTERNAL_DISPLAY=false python -m pytest -r P -n auto --reruns 2 --pyargs pygmt --codspeed" + run: bash -el -c "python -c \"import pygmt; pygmt.show_versions()\"; PYGMT_USE_EXTERNAL_DISPLAY=false python -m pytest -r P --pyargs pygmt --codspeed" From 0f31b7a9aa31ea7f0bef990275fc5c019d098bf3 Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Mon, 8 Jul 2024 19:41:34 +1200 Subject: [PATCH 181/218] Enable ruff's literal-membership (PLR6201) rule and fix violations (#3317) * Enable ruff's literal-membership (PLR6201) rule Xref https://docs.astral.sh/ruff/rules/literal-membership. * Fix PLR6201 violations * Fix PT001 violation due to preview mode setting Xref https://docs.astral.sh/ruff/settings/#lint_flake8-pytest-style_fixture-parentheses and https://github.com/astral-sh/ruff/pull/12106. * Check geodataframe column dtype's str name instead of class object --- pygmt/accessors.py | 4 ++-- pygmt/clib/session.py | 6 +++--- pygmt/datasets/load_remote_dataset.py | 2 +- pygmt/figure.py | 8 ++++---- pygmt/helpers/tempfile.py | 4 ++-- pygmt/helpers/validators.py | 2 +- pygmt/src/meca.py | 2 +- pygmt/src/tilemap.py | 2 +- pygmt/tests/test_clib.py | 2 +- pygmt/tests/test_clib_loading.py | 2 +- pyproject.toml | 7 +++++-- 11 files changed, 22 insertions(+), 19 deletions(-) diff --git a/pygmt/accessors.py b/pygmt/accessors.py index 6d661a6dbc4..ae9d51429dd 100644 --- a/pygmt/accessors.py +++ b/pygmt/accessors.py @@ -141,7 +141,7 @@ def registration(self): @registration.setter def registration(self, value): - if value not in (0, 1): + if value not in {0, 1}: raise GMTInvalidInput( f"Invalid grid registration value: {value}, should be either " "0 for Gridline registration or 1 for Pixel registration." @@ -157,7 +157,7 @@ def gtype(self): @gtype.setter def gtype(self, value): - if value not in (0, 1): + if value not in {0, 1}: raise GMTInvalidInput( f"Invalid coordinate system type: {value}, should be " "either 0 for Cartesian or 1 for Geographic." diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index b0aaff44ec3..7abe9b77e91 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -944,7 +944,7 @@ def put_vector(self, dataset, column, vector): ) gmt_type = self._check_dtype_and_dim(vector, ndim=1) - if gmt_type in (self["GMT_TEXT"], self["GMT_DATETIME"]): + if gmt_type in {self["GMT_TEXT"], self["GMT_DATETIME"]}: if gmt_type == self["GMT_DATETIME"]: vector = np.datetime_as_string(array_to_datetime(vector)) vector_pointer = strings_to_ctypes_array(vector) @@ -1622,7 +1622,7 @@ def virtualfile_in( # noqa: PLR0912 }[kind] # Ensure the data is an iterable (Python list or tuple) - if kind in ("geojson", "grid", "image", "file", "arg"): + if kind in {"geojson", "grid", "image", "file", "arg"}: if kind == "image" and data.dtype != "uint8": msg = ( f"Input image has dtype: {data.dtype} which is unsupported, " @@ -1849,7 +1849,7 @@ def read_virtualfile( # _GMT_DATASET). if kind is None: # Return the ctypes void pointer return pointer - if kind in ["image", "cube"]: + if kind in {"image", "cube"}: raise NotImplementedError(f"kind={kind} is not supported yet.") dtype = {"dataset": _GMT_DATASET, "grid": _GMT_GRID}[kind] return ctp.cast(pointer, ctp.POINTER(dtype)) diff --git a/pygmt/datasets/load_remote_dataset.py b/pygmt/datasets/load_remote_dataset.py index c97455bb36f..b53ab05b38e 100644 --- a/pygmt/datasets/load_remote_dataset.py +++ b/pygmt/datasets/load_remote_dataset.py @@ -387,7 +387,7 @@ def _load_remote_dataset( if registration is None: # Use gridline registration unless only pixel registration is available registration = "gridline" if "gridline" in resinfo.registrations else "pixel" - elif registration in ("pixel", "gridline"): + elif registration in {"pixel", "gridline"}: if registration not in resinfo.registrations: raise GMTInvalidInput( f"{registration} registration is not available for the " diff --git a/pygmt/figure.py b/pygmt/figure.py index 5190c4acf77..ce8693a43f3 100644 --- a/pygmt/figure.py +++ b/pygmt/figure.py @@ -236,7 +236,7 @@ def psconvert(self, **kwargs): kwargs["A"] = "" prefix = kwargs.get("F") - if prefix in ["", None, False, True]: + if prefix in {"", None, False, True}: raise GMTInvalidInput( "The 'prefix' parameter must be specified with a valid value." ) @@ -363,7 +363,7 @@ def savefig( # noqa: PLR0912 kwargs["Qg"] = 2 if worldfile: - if ext in ["eps", "kml", "pdf", "tiff"]: + if ext in {"eps", "kml", "pdf", "tiff"}: raise GMTInvalidInput( f"Saving a world file is not supported for '{ext}' format." ) @@ -444,7 +444,7 @@ def show(self, dpi=300, width=500, method=None, waiting=0.5, **kwargs): if method is None: method = SHOW_CONFIG["method"] - if method not in ["external", "notebook", "none"]: + if method not in {"external", "notebook", "none"}: raise GMTInvalidInput( f"Invalid display method '{method}', " "should be either 'notebook', 'external', or 'none'." @@ -583,7 +583,7 @@ def set_display(method=None): >>> pygmt.set_display(method=None) >>> fig.show() # again, will show a PNG image in the current notebook """ - if method in ["notebook", "external", "none"]: + if method in {"notebook", "external", "none"}: SHOW_CONFIG["method"] = method elif method is not None: raise GMTInvalidInput( diff --git a/pygmt/helpers/tempfile.py b/pygmt/helpers/tempfile.py index 17d90be6935..469f387fc5f 100644 --- a/pygmt/helpers/tempfile.py +++ b/pygmt/helpers/tempfile.py @@ -148,7 +148,7 @@ def tempfile_from_geojson(geojson): geojson = geojson.reset_index(drop=False) schema = gpd.io.file.infer_schema(geojson) for col, dtype in schema["properties"].items(): - if dtype in ("int", "int64"): + if dtype in {"int", "int64"}: overflow = geojson[col].abs().max() > 2**31 - 1 schema["properties"][col] = "float" if overflow else "int32" ogrgmt_kwargs["schema"] = schema @@ -156,7 +156,7 @@ def tempfile_from_geojson(geojson): # The default engine "pyogrio" doesn't support the 'schema' parameter # but we can change the dtype directly. for col in geojson.columns: - if geojson[col].dtype in ("int", "int64", "Int64"): + if geojson[col].dtype.name in {"int", "int64", "Int64"}: overflow = geojson[col].abs().max() > 2**31 - 1 dtype = "float" if overflow else "int32" geojson[col] = geojson[col].astype(dtype) diff --git a/pygmt/helpers/validators.py b/pygmt/helpers/validators.py index 94916eac1f5..879cc023b1f 100644 --- a/pygmt/helpers/validators.py +++ b/pygmt/helpers/validators.py @@ -49,7 +49,7 @@ def validate_output_table_type( ... assert len(w) == 1 'file' """ - if output_type not in ["file", "numpy", "pandas"]: + if output_type not in {"file", "numpy", "pandas"}: raise GMTInvalidInput( "Must specify 'output_type' either as 'file', 'numpy', or 'pandas'." ) diff --git a/pygmt/src/meca.py b/pygmt/src/meca.py index e44f3cb00a4..9822dae1f23 100644 --- a/pygmt/src/meca.py +++ b/pygmt/src/meca.py @@ -95,7 +95,7 @@ def convention_code(convention, component="full"): f"Invalid component '{component}' for convention '{convention}'." ) return codes2[convention][component] - if convention in ["a", "c", "m", "d", "z", "p", "x", "y", "t"]: + if convention in {"a", "c", "m", "d", "z", "p", "x", "y", "t"}: return convention raise GMTInvalidInput(f"Invalid convention '{convention}'.") diff --git a/pygmt/src/tilemap.py b/pygmt/src/tilemap.py index 1fe98ea1f1b..a13c8d9c740 100644 --- a/pygmt/src/tilemap.py +++ b/pygmt/src/tilemap.py @@ -146,7 +146,7 @@ def tilemap( # Only set region if no_clip is None or False, so that plot is clipped to exact # bounding box region - if kwargs.get("N") in [None, False]: + if kwargs.get("N") in {None, False}: kwargs["R"] = "/".join(str(coordinate) for coordinate in region) with Session() as lib: diff --git a/pygmt/tests/test_clib.py b/pygmt/tests/test_clib.py index c2732de91d7..f833e01a37b 100644 --- a/pygmt/tests/test_clib.py +++ b/pygmt/tests/test_clib.py @@ -529,7 +529,7 @@ def test_get_default(): Make sure get_default works without crashing and gives reasonable results. """ with clib.Session() as lib: - assert lib.get_default("API_GRID_LAYOUT") in ["rows", "columns"] + assert lib.get_default("API_GRID_LAYOUT") in {"rows", "columns"} assert int(lib.get_default("API_CORES")) >= 1 assert Version(lib.get_default("API_VERSION")) >= Version("6.3.0") assert lib.get_default("PROJ_LENGTH_UNIT") == "cm" diff --git a/pygmt/tests/test_clib_loading.py b/pygmt/tests/test_clib_loading.py index 8cb679e9285..df92a73fbd0 100644 --- a/pygmt/tests/test_clib_loading.py +++ b/pygmt/tests/test_clib_loading.py @@ -131,7 +131,7 @@ def _mock_ctypes_cdll_return(self, libname): # libname is a loaded GMT library return self.loaded_libgmt - @pytest.fixture() + @pytest.fixture def _mock_ctypes(self, monkeypatch): """ Patch the ctypes.CDLL function. diff --git a/pyproject.toml b/pyproject.toml index 0142001419d..98ef1f40125 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -121,8 +121,9 @@ select = [ "YTT", # flake8-2020 ] extend-select = [ - "D213", # Summary lines should be positioned on the second physical line of the docstring. - "D410", # A blank line after section headings. + "D213", # Summary lines should be positioned on the second physical line of the docstring. + "D410", # A blank line after section headings. + "PLR6201", # Use a set literal when testing for membership ] ignore = [ "D200", # One-line docstring should fit on one line @@ -140,6 +141,8 @@ ignore = [ "S603", # Allow method calls that initiate a subprocess without a shell "SIM117", # Allow nested `with` statements ] +preview = true +explicit-preview-rules = true [tool.ruff.lint.isort] known-third-party = ["pygmt"] From 9c13eb04b14baa82ecb740279b1e78e1ec20af18 Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Mon, 8 Jul 2024 22:21:07 +1200 Subject: [PATCH 182/218] Enable ruff's unspecified-encoding (PLW1514) rule and fix violations (#3319) * Enable ruff's unspecified-encoding (PLW1514) rule Xref https://docs.astral.sh/ruff/rules/unspecified-encoding * Fix PLR1514 violations by setting encoding="locale" Default unsafe-fix is to use `encoding="locale"` * Switch from encoding="locale" to encoding="utf-8" PEP0597 hints at UTF-8 becoming the default encoding in the future, so pre-emptively applying it here. Xref https://peps.python.org/pep-0597/#prepare-to-change-the-default-encoding-to-utf-8 --- pygmt/src/plot.py | 2 +- pygmt/src/plot3d.py | 2 +- pygmt/tests/test_datatypes_dataset.py | 10 +++++----- pygmt/tests/test_helpers.py | 2 +- pygmt/tests/test_legend.py | 2 +- pygmt/tests/test_meca.py | 2 +- pygmt/tests/test_plot.py | 4 ++-- pygmt/tests/test_plot3d.py | 4 ++-- pygmt/tests/test_text.py | 4 +++- pyproject.toml | 1 + 10 files changed, 18 insertions(+), 15 deletions(-) diff --git a/pygmt/src/plot.py b/pygmt/src/plot.py index e660f370848..43b26232871 100644 --- a/pygmt/src/plot.py +++ b/pygmt/src/plot.py @@ -247,7 +247,7 @@ def plot( # noqa: PLR0912 kwargs["S"] = "s0.2c" elif kind == "file" and str(data).endswith(".gmt"): # OGR_GMT file try: - with Path(which(data)).open() as file: + with Path(which(data)).open(encoding="utf-8") as file: line = file.readline() if "@GMULTIPOINT" in line or "@GPOINT" in line: kwargs["S"] = "s0.2c" diff --git a/pygmt/src/plot3d.py b/pygmt/src/plot3d.py index 929847337c5..65d87761d5c 100644 --- a/pygmt/src/plot3d.py +++ b/pygmt/src/plot3d.py @@ -223,7 +223,7 @@ def plot3d( # noqa: PLR0912 kwargs["S"] = "u0.2c" elif kind == "file" and str(data).endswith(".gmt"): # OGR_GMT file try: - with Path(which(data)).open() as file: + with Path(which(data)).open(encoding="utf-8") as file: line = file.readline() if "@GMULTIPOINT" in line or "@GPOINT" in line: kwargs["S"] = "u0.2c" diff --git a/pygmt/tests/test_datatypes_dataset.py b/pygmt/tests/test_datatypes_dataset.py index aa261c74a62..78c51e827e5 100644 --- a/pygmt/tests/test_datatypes_dataset.py +++ b/pygmt/tests/test_datatypes_dataset.py @@ -57,7 +57,7 @@ def test_dataset(): Test the basic functionality of GMT_DATASET. """ with GMTTempFile(suffix=".txt") as tmpfile: - with Path(tmpfile.name).open(mode="w") as fp: + with Path(tmpfile.name).open(mode="w", encoding="utf-8") as fp: print(">", file=fp) print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) @@ -75,7 +75,7 @@ def test_dataset_empty(): Make sure that an empty DataFrame is returned if a file contains no data. """ with GMTTempFile(suffix=".txt") as tmpfile: - with Path(tmpfile.name).open(mode="w") as fp: + with Path(tmpfile.name).open(mode="w", encoding="utf-8") as fp: print("# This is a comment line.", file=fp) df = dataframe_from_gmt(tmpfile.name) @@ -89,7 +89,7 @@ def test_dataset_header(): Test parsing column names from dataset header. """ with GMTTempFile(suffix=".txt") as tmpfile: - with Path(tmpfile.name).open(mode="w") as fp: + with Path(tmpfile.name).open(mode="w", encoding="utf-8") as fp: print("# lon lat z text", file=fp) print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) @@ -109,7 +109,7 @@ def test_dataset_header_greater_than_nheaders(): Test passing a header line number that is greater than the number of header lines. """ with GMTTempFile(suffix=".txt") as tmpfile: - with Path(tmpfile.name).open(mode="w") as fp: + with Path(tmpfile.name).open(mode="w", encoding="utf-8") as fp: print("# lon lat z text", file=fp) print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) @@ -127,7 +127,7 @@ def test_dataset_header_too_many_names(): Test passing a header line with more column names than the number of columns. """ with GMTTempFile(suffix=".txt") as tmpfile: - with Path(tmpfile.name).open(mode="w") as fp: + with Path(tmpfile.name).open(mode="w", encoding="utf-8") as fp: print("# lon lat z text1 text2", file=fp) print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) diff --git a/pygmt/tests/test_helpers.py b/pygmt/tests/test_helpers.py index b805523f75d..ea9e4c87225 100644 --- a/pygmt/tests/test_helpers.py +++ b/pygmt/tests/test_helpers.py @@ -132,7 +132,7 @@ def test_gmttempfile_read(): Make sure GMTTempFile.read() works. """ with GMTTempFile() as tmpfile: - Path(tmpfile.name).write_text("in.dat: N = 2\t<1/3>\t<2/4>\n") + Path(tmpfile.name).write_text("in.dat: N = 2\t<1/3>\t<2/4>\n", encoding="utf-8") assert tmpfile.read() == "in.dat: N = 2 <1/3> <2/4>\n" assert tmpfile.read(keep_tabs=True) == "in.dat: N = 2\t<1/3>\t<2/4>\n" diff --git a/pygmt/tests/test_legend.py b/pygmt/tests/test_legend.py index 8721bb66384..5280c131cda 100644 --- a/pygmt/tests/test_legend.py +++ b/pygmt/tests/test_legend.py @@ -97,7 +97,7 @@ def test_legend_specfile(): """ with GMTTempFile() as specfile: - Path(specfile.name).write_text(specfile_contents) + Path(specfile.name).write_text(specfile_contents, encoding="utf-8") fig = Figure() fig.basemap(projection="x6i", region=[0, 1, 0, 1], frame=True) fig.legend(specfile.name, position="JTM+jCM+w5i") diff --git a/pygmt/tests/test_meca.py b/pygmt/tests/test_meca.py index e54799711ad..2e71cc9665d 100644 --- a/pygmt/tests/test_meca.py +++ b/pygmt/tests/test_meca.py @@ -74,7 +74,7 @@ def test_meca_spec_single_focalmecha_file(): fig = Figure() fig.basemap(region=[-1, 1, 4, 6], projection="M8c", frame=2) with GMTTempFile() as temp: - Path(temp.name).write_text("0 5 0 0 90 0 5") + Path(temp.name).write_text("0 5 0 0 90 0 5", encoding="utf-8") fig.meca(spec=temp.name, convention="aki", scale="2.5c") return fig diff --git a/pygmt/tests/test_plot.py b/pygmt/tests/test_plot.py index ecae491c6fa..1de025c3c0e 100644 --- a/pygmt/tests/test_plot.py +++ b/pygmt/tests/test_plot.py @@ -487,7 +487,7 @@ def test_plot_ogrgmt_file_multipoint_default_style(func): # FEATURE_DATA 1 2 """ - Path(tmpfile.name).write_text(gmt_file) + Path(tmpfile.name).write_text(gmt_file, encoding="utf-8") fig = Figure() fig.plot( data=func(tmpfile.name), region=[0, 2, 1, 3], projection="X2c", frame=True @@ -506,7 +506,7 @@ def test_plot_ogrgmt_file_multipoint_non_default_style(): # FEATURE_DATA 1 2 """ - Path(tmpfile.name).write_text(gmt_file) + Path(tmpfile.name).write_text(gmt_file, encoding="utf-8") fig = Figure() fig.plot( data=tmpfile.name, diff --git a/pygmt/tests/test_plot3d.py b/pygmt/tests/test_plot3d.py index 33f3c94812f..60453b3fe63 100644 --- a/pygmt/tests/test_plot3d.py +++ b/pygmt/tests/test_plot3d.py @@ -444,7 +444,7 @@ def test_plot3d_ogrgmt_file_multipoint_default_style(func): > 1 1 2 1.5 1.5 1""" - Path(tmpfile.name).write_text(gmt_file) + Path(tmpfile.name).write_text(gmt_file, encoding="utf-8") fig = Figure() fig.plot3d( data=func(tmpfile.name), @@ -469,7 +469,7 @@ def test_plot3d_ogrgmt_file_multipoint_non_default_style(): > 1 1 2 1.5 1.5 1""" - Path(tmpfile.name).write_text(gmt_file) + Path(tmpfile.name).write_text(gmt_file, encoding="utf-8") fig = Figure() fig.plot3d( data=tmpfile.name, diff --git a/pygmt/tests/test_text.py b/pygmt/tests/test_text.py index 6bd2c61383e..8543734bb30 100644 --- a/pygmt/tests/test_text.py +++ b/pygmt/tests/test_text.py @@ -299,7 +299,9 @@ def test_text_angle_font_justify_from_textfile(): """ fig = Figure() with GMTTempFile(suffix=".txt") as tempfile: - Path(tempfile.name).write_text("114 0.5 30 22p,Helvetica-Bold,black LM BORNEO") + Path(tempfile.name).write_text( + "114 0.5 30 22p,Helvetica-Bold,black LM BORNEO", encoding="utf-8" + ) fig.text( region=[113, 117.5, -0.5, 3], projection="M5c", diff --git a/pyproject.toml b/pyproject.toml index 98ef1f40125..98637f1f8b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,6 +124,7 @@ extend-select = [ "D213", # Summary lines should be positioned on the second physical line of the docstring. "D410", # A blank line after section headings. "PLR6201", # Use a set literal when testing for membership + "PLW1514", # {function_name} in text mode without explicit encoding argument ] ignore = [ "D200", # One-line docstring should fit on one line From 3a32169402b992e013d5a28006cb3fa8135f7d80 Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Wed, 10 Jul 2024 08:13:32 +1200 Subject: [PATCH 183/218] Replace rio.set_crs with rio.write_crs in load_tile_map function (#3321) * Replace rio.set_crs with rio.write_crs in load_tile_map function Rioxarray 0.16.0 has deprecated the use of `set_crs` in favour of `write_crs`. Xref https://github.com/corteva/rioxarray/pull/793 * Add spatial_ref coordinate to load_tile_map's doctest output The `write_crs` command will write an extra grid_mapping attribute to the encoding that shows up in the coordinates. --- pygmt/datasets/tile_map.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pygmt/datasets/tile_map.py b/pygmt/datasets/tile_map.py index fec7465bdbf..6d3bedefc80 100644 --- a/pygmt/datasets/tile_map.py +++ b/pygmt/datasets/tile_map.py @@ -113,9 +113,10 @@ def load_tile_map( Frozen({'band': 3, 'y': 256, 'x': 512}) >>> raster.coords # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Coordinates: - * band (band) uint8 ... 0 1 2 - * y (y) float64 ... -7.081e-10 -7.858e+04 ... -1.996e+07 ... - * x (x) float64 ... -2.004e+07 -1.996e+07 ... 1.996e+07 2.004e+07 + * band (band) uint8 ... 0 1 2 + * y (y) float64 ... -7.081e-10 -7.858e+04 ... -1.996e+07 -2.004e+07 + * x (x) float64 ... -2.004e+07 -1.996e+07 ... 1.996e+07 2.004e+07 + spatial_ref int64 ... 0 """ if not _HAS_CONTEXTILY: raise ImportError( @@ -166,6 +167,6 @@ def load_tile_map( # If rioxarray is installed, set the coordinate reference system if hasattr(dataarray, "rio"): - dataarray = dataarray.rio.set_crs(input_crs="EPSG:3857") + dataarray = dataarray.rio.write_crs(input_crs="EPSG:3857") return dataarray From 480c6711f82dbdd61c51d3201a4248868541c227 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 10 Jul 2024 11:24:44 +0800 Subject: [PATCH 184/218] load_tile_map: Fix the raster band indexing, should start from 1 (#3322) --- pygmt/datasets/tile_map.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pygmt/datasets/tile_map.py b/pygmt/datasets/tile_map.py index 6d3bedefc80..e773e1dce38 100644 --- a/pygmt/datasets/tile_map.py +++ b/pygmt/datasets/tile_map.py @@ -113,7 +113,7 @@ def load_tile_map( Frozen({'band': 3, 'y': 256, 'x': 512}) >>> raster.coords # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Coordinates: - * band (band) uint8 ... 0 1 2 + * band (band) uint8 ... 1 2 3 * y (y) float64 ... -7.081e-10 -7.858e+04 ... -1.996e+07 -2.004e+07 * x (x) float64 ... -2.004e+07 -1.996e+07 ... 1.996e+07 2.004e+07 spatial_ref int64 ... 0 @@ -122,8 +122,7 @@ def load_tile_map( raise ImportError( "Package `contextily` is required to be installed to use this function. " "Please use `python -m pip install contextily` or " - "`mamba install -c conda-forge contextily` " - "to install the package." + "`mamba install -c conda-forge contextily` to install the package." ) contextily_kwargs = {} @@ -158,7 +157,7 @@ def load_tile_map( dataarray = xr.DataArray( data=rgb_image, coords={ - "band": np.array(object=[0, 1, 2], dtype=np.uint8), # Red, Green, Blue + "band": np.array(object=[1, 2, 3], dtype=np.uint8), # Red, Green, Blue "y": np.linspace(start=top, stop=bottom, num=rgb_image.shape[1]), "x": np.linspace(start=left, stop=right, num=rgb_image.shape[2]), }, From 97a6f30cf3a262d3f6028a40b7c61836af86c25f Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 10 Jul 2024 14:06:05 +0800 Subject: [PATCH 185/218] load_tile_map: Register the rio accessor by importing rioxarray so the returned raster has CRS (#3323) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/datasets/tile_map.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pygmt/datasets/tile_map.py b/pygmt/datasets/tile_map.py index e773e1dce38..ed9150b6d61 100644 --- a/pygmt/datasets/tile_map.py +++ b/pygmt/datasets/tile_map.py @@ -3,6 +3,7 @@ :class:`xarray.DataArray`. """ +import contextlib from typing import Literal from packaging.version import Version @@ -16,6 +17,10 @@ TileProvider = None _HAS_CONTEXTILY = False +with contextlib.suppress(ImportError): + # rioxarray is needed to register the rio accessor + import rioxarray # noqa: F401 + import numpy as np import xarray as xr @@ -117,6 +122,10 @@ def load_tile_map( * y (y) float64 ... -7.081e-10 -7.858e+04 ... -1.996e+07 -2.004e+07 * x (x) float64 ... -2.004e+07 -1.996e+07 ... 1.996e+07 2.004e+07 spatial_ref int64 ... 0 + >>> # CRS is set only if rioxarray is available + >>> if hasattr(raster, "rio"): + ... raster.rio.crs + CRS.from_epsg(3857) """ if not _HAS_CONTEXTILY: raise ImportError( From 1f4ce5e9eebae19b7754cd7426fb294cd3f84566 Mon Sep 17 00:00:00 2001 From: Wei Ji <23487320+weiji14@users.noreply.github.com> Date: Tue, 16 Jul 2024 17:15:16 +1200 Subject: [PATCH 186/218] Run pytest with `--color=yes` to force GitHub Actions logs to have color (#3330) GitHub Actions does not show colour outputs for pytest by default (https://github.com/pytest-dev/pytest/issues/7443), but it can be enabled using `--color=yes` flag. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 98637f1f8b6..e61ec4bd818 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -166,7 +166,7 @@ max-args=10 [tool.pytest.ini_options] minversion = "6.0" -addopts = "--verbose --durations=0 --durations-min=0.2 --doctest-modules --mpl --mpl-results-path=results" +addopts = "--verbose --color=yes --durations=0 --durations-min=0.2 --doctest-modules --mpl --mpl-results-path=results" markers = [ "benchmark: mark a test with custom benchmark settings.", ] From 69005cba7ceb1f96319bb17cab65ca6c4f4c1d06 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 16 Jul 2024 17:15:13 +0800 Subject: [PATCH 187/218] clib: Improve docstrings, comments, type hints and codes of some Session methods (#3327) --- pygmt/clib/session.py | 170 ++++++++++++++++++++---------------------- 1 file changed, 79 insertions(+), 91 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 7abe9b77e91..1e9489d4720 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -10,6 +10,7 @@ import pathlib import sys import warnings +from collections.abc import Generator from typing import Literal import numpy as np @@ -262,7 +263,7 @@ def __getitem__(self, name: str) -> int: def get_enum(self, name: str) -> int: """ - Get the value of a GMT constant (C enum) from gmt_resources.h. + Get the value of a GMT constant (C enum) from ``gmt_resources.h``. Used to set configuration values for other API calls. Wraps ``GMT_Get_Enum``. @@ -374,7 +375,7 @@ def create(self, name): # destroyed raise GMTCLibError( "Failed to create a GMT API session: There is a currently open session." - " Must destroy it fist." + " Must destroy it first." ) # If the exception is raised, this means that there is no open session # and we're free to create a new one. @@ -518,43 +519,41 @@ def get_default(self, name: str) -> str: raise GMTCLibError(msg) return value.value.decode() - def get_common(self, option): + def get_common(self, option: str) -> bool | int | float | np.ndarray: """ Inquire if a GMT common option has been set and return its current value if possible. Parameters ---------- - option : str - The GMT common option to check. Valid options are ``"B"``, ``"I"``, - ``"J"``, ``"R"``, ``"U"``, ``"V"``, ``"X"``, ``"Y"``, ``"a"``, - ``"b"``, ``"f"``, ``"g"``, ``"h"``, ``"i"``, ``"n"``, ``"o"``, - ``"p"``, ``"r"``, ``"s"``, ``"t"``, and ``":"``. + option + The GMT common option to check. Valid options are ``"B"``, ``"I"``, ``"J"``, + ``"R"``, ``"U"``, ``"V"``, ``"X"``, ``"Y"``, ``"a"``, ``"b"``, ``"f"``, + ``"g"``, ``"h"``, ``"i"``, ``"n"``, ``"o"``, ``"p"``, ``"r"``, ``"s"``, + ``"t"``, and ``":"``. Returns ------- - value : bool, int, float, or numpy.ndarray - Whether the option was set or its value. - - If the option was not set, return ``False``. Otherwise, - the return value depends on the choice of the option. + value + Whether the option was set or its value. If the option was not set, return + ``False``. Otherwise, the return value depends on the choice of the option. - - options ``"B"``, ``"J"``, ``"U"``, ``"g"``, ``"n"``, ``"p"``, - and ``"s"``: return ``True`` if set, else ``False`` (bool) + - options ``"B"``, ``"J"``, ``"U"``, ``"g"``, ``"n"``, ``"p"``, and ``"s"``: + return ``True`` if set, else ``False`` (bool) - ``"I"``: 2-element array for the increments (float) - ``"R"``: 4-element array for the region (float) - ``"V"``: the verbose level (int) - ``"X"``: the xshift (float) - ``"Y"``: the yshift (float) - ``"a"``: geometry of the dataset (int) - - ``"b"``: return 0 if `-bi` was set and 1 if `-bo` was set (int) - - ``"f"``: return 0 if `-fi` was set and 1 if `-fo` was set (int) + - ``"b"``: return 0 if ``-bi`` was set and 1 if ``-bo`` was set (int) + - ``"f"``: return 0 if ``-fi`` was set and 1 if ``-fo`` was set (int) - ``"h"``: whether to delete existing header records (int) - ``"i"``: number of input columns (int) - ``"o"``: number of output columns (int) - ``"r"``: registration type (int) - ``"t"``: 2-element array for the transparency (float) - - ``":"``: return 0 if `-:i` was set and 1 if `-:o` was set (int) + - ``":"``: return 0 if ``-:i`` was set and 1 if ``-:o`` was set (int) Examples -------- @@ -586,28 +585,28 @@ def get_common(self, option): argtypes=[ctp.c_void_p, ctp.c_uint, ctp.POINTER(ctp.c_double)], restype=ctp.c_int, ) - value = np.empty(6) # numpy array to store the value of the option + value = np.empty(6, np.float64) # numpy array to store the value of the option status = c_get_common( self.session_pointer, ord(option), value.ctypes.data_as(ctp.POINTER(ctp.c_double)), ) - # GMT_NOTSET (-1) means the option is not set - if status == self["GMT_NOTSET"]: + if status == self["GMT_NOTSET"]: # GMT_NOTSET (-1) means the option is not set return False - # option is set and no other value is returned - if status == 0: + if status == 0: # Option is set and no other value is returned. return True - # option is set and option values (in double type) are returned via the - # 'value' array. 'status' is number of valid values in the array. - if option in "IRt": - return value[:status] - if option in "XY": # only one valid element in the array - return value[0] - # option is set and the option value (in integer type) is returned via - # the function return value (i.e., 'status') - return status + + # Otherwise, option is set and values are returned. + match option: + case "I" | "R" | "t": + # Option values (in double type) are returned via the 'value' array. + # 'status' is number of valid values in the array. + return value[:status] + case "X" | "Y": # Only one valid element in the array. + return value[0] + case _: # 'status' is the option value (in integer type). + return status def call_module(self, module: str, args: str | list[str]): """ @@ -1698,7 +1697,7 @@ def virtualfile_from_data( @contextlib.contextmanager def virtualfile_out( self, kind: Literal["dataset", "grid"] = "dataset", fname: str | None = None - ): + ) -> Generator[str, None, None]: r""" Create a virtual file or an actual file for storing output data. @@ -1718,7 +1717,7 @@ def virtualfile_out( Yields ------ - vfile : str + vfile Name of the virtual file or the actual file. Examples @@ -1803,6 +1802,12 @@ def read_virtualfile( Cast the data into a GMT data container. Valid values are ``"dataset"``, ``"grid"`` and ``None``. If ``None``, will return a ctypes void pointer. + Returns + ------- + pointer + Pointer to the GMT data container. If ``kind`` is ``None``, returns a ctypes + void pointer instead. + Examples -------- >>> from pathlib import Path @@ -1833,10 +1838,6 @@ def read_virtualfile( ... data_pointer = lib.read_virtualfile(voutgrd, kind="grid") ... assert isinstance(data_pointer, ctp.POINTER(_GMT_GRID)) - Returns - ------- - Pointer to the GMT data container. If ``kind`` is None, returns a ctypes void - pointer instead. """ c_read_virtualfile = self.get_libgmt_func( "GMT_Read_VirtualFile", @@ -1871,8 +1872,7 @@ def virtualfile_to_dataset( Parameters ---------- vfname - The virtual file name that stores the result data. Required for ``"pandas"`` - and ``"numpy"`` output type. + The virtual file name that stores the result data. output_type Desired output type of the result data. @@ -1929,44 +1929,37 @@ def virtualfile_to_dataset( ... assert result is None ... assert Path(outtmp.name).stat().st_size > 0 ... - ... # strings output + ... # strings, numpy and pandas outputs ... with Session() as lib: ... with lib.virtualfile_out(kind="dataset") as vouttbl: ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) + ... + ... # strings output ... outstr = lib.virtualfile_to_dataset( ... vfname=vouttbl, output_type="strings" ... ) - ... assert isinstance(outstr, np.ndarray) - ... assert outstr.dtype.kind in ("S", "U") + ... assert isinstance(outstr, np.ndarray) + ... assert outstr.dtype.kind in ("S", "U") ... - ... # numpy output - ... with Session() as lib: - ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) + ... # numpy output ... outnp = lib.virtualfile_to_dataset( ... vfname=vouttbl, output_type="numpy" ... ) - ... assert isinstance(outnp, np.ndarray) + ... assert isinstance(outnp, np.ndarray) ... - ... # pandas output - ... with Session() as lib: - ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) + ... # pandas output ... outpd = lib.virtualfile_to_dataset( ... vfname=vouttbl, output_type="pandas" ... ) - ... assert isinstance(outpd, pd.DataFrame) + ... assert isinstance(outpd, pd.DataFrame) ... - ... # pandas output with specified column names - ... with Session() as lib: - ... with lib.virtualfile_out(kind="dataset") as vouttbl: - ... lib.call_module("read", [tmpfile.name, vouttbl, "-Td"]) + ... # pandas output with specified column names ... outpd2 = lib.virtualfile_to_dataset( ... vfname=vouttbl, ... output_type="pandas", ... column_names=["col1", "col2", "col3", "coltext"], ... ) - ... assert isinstance(outpd2, pd.DataFrame) + ... assert isinstance(outpd2, pd.DataFrame) >>> outstr array(['TEXT1 TEXT23', 'TEXT4 TEXT567', 'TEXT8 TEXT90', 'TEXT123 TEXT456789'], dtype=' np.ndarray: """ - Extract the WESN bounding box of the currently active figure. + Extract the region of the currently active figure. - Retrieves the information from the PostScript file, so it works for - country codes as well. + Retrieves the information from the PostScript file, so it works for country + codes as well. Returns ------- - * wesn : 1-D array - A numpy 1-D array with the west, east, south, and north dimensions - of the current figure. + region + A numpy 1-D array with the west, east, south, and north dimensions of the + current figure. Examples -------- - >>> import pygmt >>> fig = pygmt.Figure() >>> fig.coast( - ... region=[0, 10, -20, -10], - ... projection="M6i", - ... frame=True, - ... land="black", + ... region=[0, 10, -20, -10], projection="M12c", frame=True, land="black" ... ) >>> with Session() as lib: - ... wesn = lib.extract_region() - >>> print(", ".join([f"{x:.2f}" for x in wesn])) + ... region = lib.extract_region() + >>> print(", ".join([f"{x:.2f}" for x in region])) 0.00, 10.00, -20.00, -10.00 - Using ISO country codes for the regions (for example ``"US.HI"`` for - Hawaiʻi): + Using ISO country codes for the regions (for example ``"US.HI"`` for Hawaiʻi): >>> fig = pygmt.Figure() - >>> fig.coast(region="US.HI", projection="M6i", frame=True, land="black") + >>> fig.coast(region="US.HI", projection="M12c", frame=True, land="black") >>> with Session() as lib: - ... wesn = lib.extract_region() - >>> print(", ".join([f"{x:.2f}" for x in wesn])) + ... region = lib.extract_region() + >>> print(", ".join([f"{x:.2f}" for x in region])) -164.71, -154.81, 18.91, 23.58 - The country codes can have an extra argument that rounds the region a - multiple of the argument (for example, ``"US.HI+r5"`` will round the - region to multiples of 5): + The country codes can have an extra argument that rounds the region to multiples + of the argument (for example, ``"US.HI+r5"`` will round the region to multiples + of 5): >>> fig = pygmt.Figure() - >>> fig.coast(region="US.HI+r5", projection="M6i", frame=True, land="black") + >>> fig.coast(region="US.HI+r5", projection="M12c", frame=True, land="black") >>> with Session() as lib: - ... wesn = lib.extract_region() - >>> print(", ".join([f"{x:.2f}" for x in wesn])) + ... region = lib.extract_region() + >>> print(", ".join([f"{x:.2f}" for x in region])) -165.00, -150.00, 15.00, 25.00 """ # noqa: RUF002 c_extract_region = self.get_libgmt_func( @@ -2124,12 +2112,12 @@ def extract_region(self): restype=ctp.c_int, ) - wesn = np.empty(4, dtype=np.float64) - wesn_pointer = wesn.ctypes.data_as(ctp.POINTER(ctp.c_double)) - # The second argument to GMT_Extract_Region is a file pointer to a - # PostScript file. It's only valid in classic mode. Use None to get a - # NULL pointer instead. - status = c_extract_region(self.session_pointer, None, wesn_pointer) + region = np.empty(4, dtype=np.float64) + status = c_extract_region( + self.session_pointer, + None, # File pointer to a PostScript file. Must be None in modern mode. + region.ctypes.data_as(ctp.POINTER(ctp.c_double)), + ) if status != 0: raise GMTCLibError("Failed to extract region from current figure.") - return wesn + return region From 1383de324a3451df23d927cc5430dc8860c81f4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Tue, 16 Jul 2024 16:08:28 +0200 Subject: [PATCH 188/218] Figure.coast: Improve documentation (#3325) --- pygmt/src/coast.py | 129 ++++++++++++++++++--------------------------- 1 file changed, 51 insertions(+), 78 deletions(-) diff --git a/pygmt/src/coast.py b/pygmt/src/coast.py index aef178ea74c..87a99692541 100644 --- a/pygmt/src/coast.py +++ b/pygmt/src/coast.py @@ -41,19 +41,17 @@ def coast(self, **kwargs): r""" Plot continents, shorelines, rivers, and borders on maps. - Plots grayshaded, colored, or textured land-masses [or water-masses] on + Plots grayshaded, colored, or textured land masses [or water masses] on maps and [optionally] draws coastlines, rivers, and political - boundaries. Alternatively, it can (1) issue clip paths that will - contain all land or all water areas, or (2) dump the data to an ASCII - table. The data files come in 5 different resolutions: (**f**)ull, + boundaries. The data files come in 5 different resolutions: (**f**)ull, (**h**)igh, (**i**)ntermediate, (**l**)ow, and (**c**)rude. The full resolution files amount to more than 55 Mb of data and provide great detail; for maps of larger geographical extent it is more economical to use one of the other resolutions. If the user selects to paint the - land-areas and does not specify fill of water-areas then the latter + land areas and does not specify fill of water areas then the latter will be transparent (i.e., earlier graphics drawn in those areas will - not be overwritten). Likewise, if the water-areas are painted and no - land fill is set then the land-areas will be transparent. + not be overwritten). Likewise, if the water areas are painted and no + land fill is set then the land areas will be transparent. A map projection must be supplied. @@ -71,63 +69,47 @@ def coast(self, **kwargs): lakes : str or list *fill*\ [**+l**\|\ **+r**]. Set the shade, color, or pattern for lakes and river-lakes. The - default is the fill chosen for wet areas set by the ``water`` + default is the fill chosen for "wet" areas set by the ``water`` parameter. Optionally, specify separate fills by appending **+l** for lakes or **+r** for river-lakes, and passing multiple strings in a list. resolution : str **f**\|\ **h**\|\ **i**\|\ **l**\|\ **c**. - Select the resolution of the data set to: (**f**\ )ull, - (**h**\ )igh, (**i**\ )ntermediate, (**l**\ )ow, - and (**c**\ )rude. + Select the resolution of the data set to: (**f**\ )ull, (**h**\ )igh, + (**i**\ )ntermediate, (**l**\ )ow, and (**c**\ )rude. land : str - Select filling or clipping of "dry" areas. + Select filling of "dry" areas. rivers : int, str, or list *river*\ [/*pen*]. Draw rivers. Specify the type of rivers and [optionally] append pen attributes [Default is ``"0.25p,black,solid"``]. - Choose from the list of river types below; pass a list to - ``rivers`` to use multiple arguments. - - 0 = Double-lined rivers (river-lakes) - - 1 = Permanent major rivers - - 2 = Additional major rivers - - 3 = Additional rivers - - 4 = Minor rivers - - 5 = Intermittent rivers - major - - 6 = Intermittent rivers - additional - - 7 = Intermittent rivers - minor - - 8 = Major canals - - 9 = Minor canals - - 10 = Irrigation canals + Choose from the list of river types below; pass a list to ``rivers`` + to use multiple arguments. + + - ``0``: Double-lined rivers (river-lakes) + - ``1``: Permanent major rivers + - ``2``: Additional major rivers + - ``3``: Additional rivers + - ``4``: Minor rivers + - ``5``: Intermittent rivers - major + - ``6``: Intermittent rivers - additional + - ``7``: Intermittent rivers - minor + - ``8``: Major canals + - ``9``: Minor canals + - ``10``: Irrigation canals You can also choose from several preconfigured river groups: - a = All rivers and canals (0-10) - - A = All rivers and canals except river-lakes (1-10) - - r = All permanent rivers (0-4) + - ``"a"``: All rivers and canals (0-10) + - ``"A"``: All rivers and canals except river-lakes (1-10) + - ``"r"``: All permanent rivers (0-4) + - ``"R"``: All permanent rivers except river-lakes (1-4) + - ``"i"``: All intermittent rivers (5-7) + - ``"c"``: All canals (8-10) - R = All permanent rivers except river-lakes (1-4) - - i = All intermittent rivers (5-7) - - c = All canals (8-10) map_scale : str - [**g**\|\ **j**\|\ **J**\|\ **n**\|\ **x**]\ *refpoint*\ - **+w**\ *length*. + [**g**\|\ **j**\|\ **J**\|\ **n**\|\ **x**]\ *refpoint*\ **+w**\ *length*. Draw a simple map scale centered on the reference point specified. box : bool or str [**+c**\ *clearances*][**+g**\ *fill*][**+i**\ [[*gap*/]\ *pen*]]\ @@ -150,48 +132,39 @@ def coast(self, **kwargs): borders : int, str, or list *border*\ [/*pen*]. Draw political boundaries. Specify the type of boundary and - [optionally] append pen attributes - [Default is ``"0.25p,black,solid"``]. - - Choose from the list of boundaries below. Pass a list to - ``borders`` to use multiple arguments. - - 1 = National boundaries + [optionally] append pen attributes [Default is ``"0.25p,black,solid"``]. - 2 = State boundaries within the Americas + Choose from the list of boundaries below. Pass a list to ``borders`` to + use multiple arguments. - 3 = Marine boundaries + - ``1``: National boundaries + - ``2``: State boundaries within the Americas + - ``3``: Marine boundaries + - ``"a"``: All boundaries (1-3) - a = All boundaries (1-3) water : str - Select filling or clipping of "wet" areas. + Select filling "wet" areas. shorelines : int, str, or list [*level*\ /]\ *pen*. Draw shorelines [Default is no shorelines]. Append pen attributes - [Default is ``"0.25p,black,solid"``] which - apply to all four levels. To set the pen for a single level, - pass a string with *level*\ /*pen*\ , where level is - 1-4 and represent coastline, lakeshore, island-in-lake shore, and - lake-in-island-in-lake shore. Pass a list of *level*\ /*pen* + [Default is ``"0.25p,black,solid"``] which apply to all four levels. + To set the pen for a single level, pass a string with *level*\ /*pen*\ , + where level is 1-4 and represent coastline, lakeshore, island-in-lake shore, + and lake-in-island-in-lake shore. Pass a list of *level*\ /*pen* strings to ``shorelines`` to set multiple levels. When specific level pens are set, those not listed will not be drawn. dcw : str or list - *code1,code2,…*\ [**+l**\|\ **L**\ ][**+g**\ *fill*\ ] - [**+p**\ *pen*\ ][**+z**]. - Select painting or dumping country polygons from the - `Digital Chart of the World + *code1,code2,…*\ [**+g**\ *fill*\ ][**+p**\ *pen*\ ][**+z**]. + Select painting country polygons from the `Digital Chart of the World `__. Append one or more comma-separated countries using the 2-character `ISO 3166-1 alpha-2 convention `__. - To select a state of a country (if available), append - .\ *state*, (e.g, US.TX for Texas). To specify a whole continent, - prepend **=** to any of the continent codes (e.g. =EU for Europe). - Append **+p**\ *pen* to draw polygon outlines - [Default is no outline] and **+g**\ *fill* to fill them - [Default is no fill]. Append **+l**\|\ **+L** to =\ *continent* to - only list countries in that continent; repeat if more than one - continent is requested. + To select a state of a country (if available), append .\ *state*, + (e.g, ``"US.TX"`` for Texas). To specify a whole continent, prepend **=** + to any of the continent codes (e.g. ``"=EU"`` for Europe). Append + **+p**\ *pen* to draw polygon outlines [Default is no outline] and + **+g**\ *fill* to fill them [Default is no fill]. {panel} {perspective} {transparency} @@ -204,11 +177,11 @@ def coast(self, **kwargs): >>> fig = pygmt.Figure() >>> # Call the coast method for the plot >>> fig.coast( - ... # Set the projection to Mercator, and plot size to 10 cm + ... # Set the projection to Mercator, and the plot width to 10 centimeters ... projection="M10c", ... # Set the region of the plot ... region=[-10, 30, 30, 60], - ... # Set the frame of the plot + ... # Set the frame of the plot, here annotations and major ticks ... frame="a", ... # Set the color of the land to "darkgreen" ... land="darkgreen", From 34be58a57248810faaf34bdb9d293ed4ddd938a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jul 2024 08:56:40 +1200 Subject: [PATCH 189/218] Build(deps): Bump CodSpeedHQ/action from 2.4.2 to 2.4.3 (#3332) Bumps [CodSpeedHQ/action](https://github.com/codspeedhq/action) from 2.4.2 to 2.4.3. - [Release notes](https://github.com/codspeedhq/action/releases) - [Changelog](https://github.com/CodSpeedHQ/action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codspeedhq/action/compare/v2.4.2...v2.4.3) --- updated-dependencies: - dependency-name: CodSpeedHQ/action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 3275b2f941d..e2230cb7a77 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -87,7 +87,7 @@ jobs: # Run the benchmark tests - name: Run benchmarks - uses: CodSpeedHQ/action@v2.4.2 + uses: CodSpeedHQ/action@v2.4.3 with: # 'bash -el -c' is needed to use the custom shell. # See https://github.com/CodSpeedHQ/action/issues/65. From a26b33b3d95e9e5c1e2a9f6ddf785ace5b7f33ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jul 2024 08:57:10 +1200 Subject: [PATCH 190/218] Build(deps): Bump actions/setup-python from 5.1.0 to 5.1.1 (#3333) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.1.0 to 5.1.1. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v5.1.0...v5.1.1) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/format-command.yml | 2 +- .github/workflows/publish-to-pypi.yml | 2 +- .github/workflows/style_checks.yaml | 2 +- .github/workflows/type_checks.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/format-command.yml b/.github/workflows/format-command.yml index 159315c2a83..a33d2eb2cdc 100644 --- a/.github/workflows/format-command.yml +++ b/.github/workflows/format-command.yml @@ -25,7 +25,7 @@ jobs: ref: ${{ github.event.client_payload.pull_request.head.ref }} # Setup Python environment - - uses: actions/setup-python@v5.1.0 + - uses: actions/setup-python@v5.1.1 with: python-version: '3.12' diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 6527fc3b6ac..995fd0ce13d 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -51,7 +51,7 @@ jobs: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v5.1.0 + uses: actions/setup-python@v5.1.1 with: python-version: '3.12' diff --git a/.github/workflows/style_checks.yaml b/.github/workflows/style_checks.yaml index 46d0205423f..119f1200066 100644 --- a/.github/workflows/style_checks.yaml +++ b/.github/workflows/style_checks.yaml @@ -28,7 +28,7 @@ jobs: # Setup Python - name: Set up Python - uses: actions/setup-python@v5.1.0 + uses: actions/setup-python@v5.1.1 with: python-version: '3.12' diff --git a/.github/workflows/type_checks.yml b/.github/workflows/type_checks.yml index 1589a516819..28d54993ee8 100644 --- a/.github/workflows/type_checks.yml +++ b/.github/workflows/type_checks.yml @@ -37,7 +37,7 @@ jobs: # Setup Python - name: Set up Python - uses: actions/setup-python@v5.1.0 + uses: actions/setup-python@v5.1.1 with: python-version: '3.12' From 318a8c4aff94bbb04b01f4ef4d11f14af43287a4 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Wed, 17 Jul 2024 09:44:16 +0800 Subject: [PATCH 191/218] Add typing hints for the returned value of load_xxx functions (#3329) --- pygmt/datasets/earth_age.py | 5 +++-- pygmt/datasets/earth_free_air_anomaly.py | 5 +++-- pygmt/datasets/earth_geoid.py | 5 +++-- pygmt/datasets/earth_magnetic_anomaly.py | 5 +++-- pygmt/datasets/earth_mask.py | 5 +++-- pygmt/datasets/earth_relief.py | 5 +++-- pygmt/datasets/earth_vertical_gravity_gradient.py | 5 +++-- pygmt/datasets/load_remote_dataset.py | 4 +++- pygmt/datasets/mars_relief.py | 5 +++-- pygmt/datasets/mercury_relief.py | 5 +++-- pygmt/datasets/moon_relief.py | 5 +++-- pygmt/datasets/pluto_relief.py | 5 +++-- pygmt/datasets/samples.py | 2 +- pygmt/datasets/tile_map.py | 3 ++- pygmt/datasets/venus_relief.py | 5 +++-- 15 files changed, 42 insertions(+), 27 deletions(-) diff --git a/pygmt/datasets/earth_age.py b/pygmt/datasets/earth_age.py index 1ee66513622..bec19158cd1 100644 --- a/pygmt/datasets/earth_age.py +++ b/pygmt/datasets/earth_age.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_earth_age"] @@ -19,7 +20,7 @@ def load_earth_age( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel"] = "gridline", -): +) -> xr.DataArray: r""" Load the Earth seafloor crustal age dataset in various resolutions. @@ -65,7 +66,7 @@ def load_earth_age( Returns ------- - grid : :class:`xarray.DataArray` + grid The Earth seafloor crustal age grid. Coordinates are latitude and longitude in degrees. Age is in millions of years (Myr). diff --git a/pygmt/datasets/earth_free_air_anomaly.py b/pygmt/datasets/earth_free_air_anomaly.py index 0630c7f8a38..da48977d688 100644 --- a/pygmt/datasets/earth_free_air_anomaly.py +++ b/pygmt/datasets/earth_free_air_anomaly.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_earth_free_air_anomaly"] @@ -19,7 +20,7 @@ def load_earth_free_air_anomaly( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, -): +) -> xr.DataArray: r""" Load the IGPP Earth free-air anomaly dataset in various resolutions. @@ -67,7 +68,7 @@ def load_earth_free_air_anomaly( Returns ------- - grid : :class:`xarray.DataArray` + grid The Earth free-air anomaly grid. Coordinates are latitude and longitude in degrees. Units are in mGal. diff --git a/pygmt/datasets/earth_geoid.py b/pygmt/datasets/earth_geoid.py index f2a747b8602..59b40562876 100644 --- a/pygmt/datasets/earth_geoid.py +++ b/pygmt/datasets/earth_geoid.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_earth_geoid"] @@ -19,7 +20,7 @@ def load_earth_geoid( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel"] = "gridline", -): +) -> xr.DataArray: r""" Load the EGM2008 Earth geoid dataset in various resolutions. @@ -58,7 +59,7 @@ def load_earth_geoid( Returns ------- - grid : :class:`xarray.DataArray` + grid The Earth geoid grid. Coordinates are latitude and longitude in degrees. Units are in meters. diff --git a/pygmt/datasets/earth_magnetic_anomaly.py b/pygmt/datasets/earth_magnetic_anomaly.py index b49bb29e2c8..61646a9743a 100644 --- a/pygmt/datasets/earth_magnetic_anomaly.py +++ b/pygmt/datasets/earth_magnetic_anomaly.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset from pygmt.exceptions import GMTInvalidInput @@ -21,7 +22,7 @@ def load_earth_magnetic_anomaly( region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, data_source: Literal["emag2", "emag2_4km", "wdmam"] = "emag2", -): +) -> xr.DataArray: r""" Load the Earth magnetic anomaly datasets in various resolutions. @@ -93,7 +94,7 @@ def load_earth_magnetic_anomaly( Returns ------- - grid : :class:`xarray.DataArray` + grid The Earth magnetic anomaly grid. Coordinates are latitude and longitude in degrees. Units are in nano Tesla (nT). diff --git a/pygmt/datasets/earth_mask.py b/pygmt/datasets/earth_mask.py index 22400a7369b..a44071b28b0 100644 --- a/pygmt/datasets/earth_mask.py +++ b/pygmt/datasets/earth_mask.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_earth_mask"] @@ -31,7 +32,7 @@ def load_earth_mask( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel"] = "gridline", -): +) -> xr.DataArray: r""" Load the GSHHG Earth mask dataset in various resolutions. @@ -69,7 +70,7 @@ def load_earth_mask( Returns ------- - grid : :class:`xarray.DataArray` + grid The Earth mask grid. Coordinates are latitude and longitude in degrees. The node values in the mask grids are all in the 0-4 range and reflect different surface types: diff --git a/pygmt/datasets/earth_relief.py b/pygmt/datasets/earth_relief.py index 823aa9ce43a..4a00b1e52dc 100644 --- a/pygmt/datasets/earth_relief.py +++ b/pygmt/datasets/earth_relief.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset from pygmt.exceptions import GMTInvalidInput @@ -36,7 +37,7 @@ def load_earth_relief( registration: Literal["gridline", "pixel", None] = None, data_source: Literal["igpp", "gebco", "gebcosi", "synbath"] = "igpp", use_srtm: bool = False, -): +) -> xr.DataArray: r""" Load the Earth relief datasets (topography and bathymetry) in various resolutions. @@ -106,7 +107,7 @@ def load_earth_relief( Returns ------- - grid : :class:`xarray.DataArray` + grid The Earth relief grid. Coordinates are latitude and longitude in degrees. Relief is in meters. diff --git a/pygmt/datasets/earth_vertical_gravity_gradient.py b/pygmt/datasets/earth_vertical_gravity_gradient.py index 71a6a649340..2ebba4563bc 100644 --- a/pygmt/datasets/earth_vertical_gravity_gradient.py +++ b/pygmt/datasets/earth_vertical_gravity_gradient.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_earth_vertical_gravity_gradient"] @@ -19,7 +20,7 @@ def load_earth_vertical_gravity_gradient( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, -): +) -> xr.DataArray: r""" Load the IGPP Earth vertical gravity gradient dataset in various resolutions. @@ -67,7 +68,7 @@ def load_earth_vertical_gravity_gradient( Returns ------- - grid : :class:`xarray.DataArray` + grid The Earth vertical gravity gradient grid. Coordinates are latitude and longitude in degrees. Units are in Eotvos. diff --git a/pygmt/datasets/load_remote_dataset.py b/pygmt/datasets/load_remote_dataset.py index b53ab05b38e..b03908414a5 100644 --- a/pygmt/datasets/load_remote_dataset.py +++ b/pygmt/datasets/load_remote_dataset.py @@ -12,6 +12,8 @@ from pygmt.src import which if TYPE_CHECKING: + from collections.abc import Sequence + import xarray as xr @@ -333,7 +335,7 @@ def _load_remote_dataset( name: str, prefix: str, resolution: str, - region: str | list, + region: Sequence[float] | str | None, registration: Literal["gridline", "pixel", None], ) -> xr.DataArray: r""" diff --git a/pygmt/datasets/mars_relief.py b/pygmt/datasets/mars_relief.py index 49b317db308..1d2cb631fd9 100644 --- a/pygmt/datasets/mars_relief.py +++ b/pygmt/datasets/mars_relief.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_mars_relief"] @@ -32,7 +33,7 @@ def load_mars_relief( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, -): +) -> xr.DataArray: r""" Load the Mars relief dataset in various resolutions. @@ -79,7 +80,7 @@ def load_mars_relief( Returns ------- - grid : :class:`xarray.DataArray` + grid The Mars relief grid. Coordinates are latitude and longitude in degrees. Relief is in meters. diff --git a/pygmt/datasets/mercury_relief.py b/pygmt/datasets/mercury_relief.py index 05b6024cb47..f3c360c356a 100644 --- a/pygmt/datasets/mercury_relief.py +++ b/pygmt/datasets/mercury_relief.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_mercury_relief"] @@ -30,7 +31,7 @@ def load_mercury_relief( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, -): +) -> xr.DataArray: r""" Load the Mercury relief dataset in various resolutions. @@ -77,7 +78,7 @@ def load_mercury_relief( Returns ------- - grid : :class:`xarray.DataArray` + grid The Mercury relief grid. Coordinates are latitude and longitude in degrees. Relief is in meters. diff --git a/pygmt/datasets/moon_relief.py b/pygmt/datasets/moon_relief.py index 6c8a68e599e..9daab0f47a5 100644 --- a/pygmt/datasets/moon_relief.py +++ b/pygmt/datasets/moon_relief.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_moon_relief"] @@ -32,7 +33,7 @@ def load_moon_relief( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, -): +) -> xr.DataArray: r""" Load the Moon relief dataset in various resolutions. @@ -79,7 +80,7 @@ def load_moon_relief( Returns ------- - grid : :class:`xarray.DataArray` + grid The Moon relief grid. Coordinates are latitude and longitude in degrees. Relief is in meters. diff --git a/pygmt/datasets/pluto_relief.py b/pygmt/datasets/pluto_relief.py index feb04ee83d3..620545899da 100644 --- a/pygmt/datasets/pluto_relief.py +++ b/pygmt/datasets/pluto_relief.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_pluto_relief"] @@ -30,7 +31,7 @@ def load_pluto_relief( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel", None] = None, -): +) -> xr.DataArray: r""" Load the Pluto relief dataset in various resolutions. @@ -77,7 +78,7 @@ def load_pluto_relief( Returns ------- - grid : :class:`xarray.DataArray` + grid The Pluto relief grid. Coordinates are latitude and longitude in degrees. Relief is in meters. diff --git a/pygmt/datasets/samples.py b/pygmt/datasets/samples.py index 297d1ce14fd..bf0864c14d5 100644 --- a/pygmt/datasets/samples.py +++ b/pygmt/datasets/samples.py @@ -273,7 +273,7 @@ class GMTSampleData(NamedTuple): } -def list_sample_data() -> dict: +def list_sample_data() -> dict[str, str]: """ Report datasets available for tests and documentation examples. diff --git a/pygmt/datasets/tile_map.py b/pygmt/datasets/tile_map.py index ed9150b6d61..94b2532bfd9 100644 --- a/pygmt/datasets/tile_map.py +++ b/pygmt/datasets/tile_map.py @@ -4,6 +4,7 @@ """ import contextlib +from collections.abc import Sequence from typing import Literal from packaging.version import Version @@ -28,7 +29,7 @@ def load_tile_map( - region: list, + region: Sequence[float], zoom: int | Literal["auto"] = "auto", source: TileProvider | str | None = None, lonlat: bool = True, diff --git a/pygmt/datasets/venus_relief.py b/pygmt/datasets/venus_relief.py index c83c49843ba..f8f682c461a 100644 --- a/pygmt/datasets/venus_relief.py +++ b/pygmt/datasets/venus_relief.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from typing import Literal +import xarray as xr from pygmt.datasets.load_remote_dataset import _load_remote_dataset __doctest_skip__ = ["load_venus_relief"] @@ -19,7 +20,7 @@ def load_venus_relief( ] = "01d", region: Sequence[float] | str | None = None, registration: Literal["gridline", "pixel"] = "gridline", -): +) -> xr.DataArray: r""" Load the Venus relief dataset in various resolutions. @@ -64,7 +65,7 @@ def load_venus_relief( Returns ------- - grid : :class:`xarray.DataArray` + grid The Venus relief grid. Coordinates are latitude and longitude in degrees. Relief is in meters. From 419e9319cd6cd12e181bf68bed5c3a93e696ae57 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 19 Jul 2024 09:30:16 +0800 Subject: [PATCH 192/218] build_arg_list: Raise an exception if an invalid output file name is given (#3336) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/helpers/utils.py | 8 +++++++- pygmt/tests/test_helpers.py | 13 +++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index 8997a2b0df1..dd202d2e840 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -350,7 +350,13 @@ def build_arg_list( gmt_args = [str(infile), *gmt_args] else: gmt_args = [str(_file) for _file in infile] + gmt_args - if outfile: + if outfile is not None: + if ( + not isinstance(outfile, str | pathlib.PurePath) + or str(outfile) in {"", ".", ".."} + or str(outfile).endswith(("/", "\\")) + ): + raise GMTInvalidInput(f"Invalid output file name '{outfile}'.") gmt_args.append(f"->{outfile}") return gmt_args diff --git a/pygmt/tests/test_helpers.py b/pygmt/tests/test_helpers.py index ea9e4c87225..ea966753535 100644 --- a/pygmt/tests/test_helpers.py +++ b/pygmt/tests/test_helpers.py @@ -12,6 +12,7 @@ from pygmt.helpers import ( GMTTempFile, args_in_kwargs, + build_arg_list, data_kind, kwargs_to_strings, unique_name, @@ -137,6 +138,18 @@ def test_gmttempfile_read(): assert tmpfile.read(keep_tabs=True) == "in.dat: N = 2\t<1/3>\t<2/4>\n" +@pytest.mark.parametrize( + "outfile", + [123, "", ".", "..", "path/to/dir/", "path\\to\\dir\\", Path(), Path("..")], +) +def test_build_arg_list_invalid_output(outfile): + """ + Test that build_arg_list raises an exception when output file name is invalid. + """ + with pytest.raises(GMTInvalidInput): + build_arg_list({}, outfile=outfile) + + def test_args_in_kwargs(): """ Test that args_in_kwargs function returns correct Boolean responses. From 6c436a3241371c01000e8edb8d5804cd2d75e33a Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 19 Jul 2024 10:30:23 +0800 Subject: [PATCH 193/218] pygmt.grd2cpt & pygmt.makecpt: Simplify the logic for dealing with CPT output (#3334) --- pygmt/src/grd2cpt.py | 16 ++++++++-------- pygmt/src/makecpt.py | 17 +++++++---------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/pygmt/src/grd2cpt.py b/pygmt/src/grd2cpt.py index de4a9ab5248..f6d954c3cad 100644 --- a/pygmt/src/grd2cpt.py +++ b/pygmt/src/grd2cpt.py @@ -183,13 +183,13 @@ def grd2cpt(grid, **kwargs): """ if kwargs.get("W") is not None and kwargs.get("Ww") is not None: raise GMTInvalidInput("Set only categorical or cyclic to True, not both.") + + if (output := kwargs.pop("H", None)) is not None: + kwargs["H"] = True + with Session() as lib: with lib.virtualfile_in(check_kind="raster", data=grid) as vingrd: - if kwargs.get("H") is None: # if no output is set - arg_str = build_arg_list(kwargs, infile=vingrd) - else: # if output is set - outfile, kwargs["H"] = kwargs["H"], True - if not outfile or not isinstance(outfile, str): - raise GMTInvalidInput("'output' should be a proper file name.") - arg_str = build_arg_list(kwargs, infile=vingrd, outfile=outfile) - lib.call_module(module="grd2cpt", args=arg_str) + lib.call_module( + module="grd2cpt", + args=build_arg_list(kwargs, infile=vingrd, outfile=output), + ) diff --git a/pygmt/src/makecpt.py b/pygmt/src/makecpt.py index 695ea4c5afa..e5ef6f5c556 100644 --- a/pygmt/src/makecpt.py +++ b/pygmt/src/makecpt.py @@ -153,14 +153,11 @@ def makecpt(**kwargs): range. Note that ``cyclic=True`` cannot be set together with ``categorical=True``. """ + if kwargs.get("W") is not None and kwargs.get("Ww") is not None: + raise GMTInvalidInput("Set only categorical or cyclic to True, not both.") + + if (output := kwargs.pop("H", None)) is not None: + kwargs["H"] = True + with Session() as lib: - if kwargs.get("W") is not None and kwargs.get("Ww") is not None: - raise GMTInvalidInput("Set only categorical or cyclic to True, not both.") - if kwargs.get("H") is None: # if no output is set - arg_str = build_arg_list(kwargs) - else: # if output is set - outfile, kwargs["H"] = kwargs.pop("H"), True - if not outfile or not isinstance(outfile, str): - raise GMTInvalidInput("'output' should be a proper file name.") - arg_str = build_arg_list(kwargs, outfile=outfile) - lib.call_module(module="makecpt", args=arg_str) + lib.call_module(module="makecpt", args=build_arg_list(kwargs, outfile=output)) From 917b3aa42c5987be0c12f95dd4433fd53da29649 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Fri, 19 Jul 2024 14:28:22 +0800 Subject: [PATCH 194/218] Wrap the GMT API function GMT_Read_Data to read data into GMT data containers (#3324) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- doc/api/index.rst | 1 + pygmt/clib/conversion.py | 4 +- pygmt/clib/session.py | 95 ++++++++++++++++++- pygmt/tests/test_clib_read_data.py | 141 +++++++++++++++++++++++++++++ 4 files changed, 239 insertions(+), 2 deletions(-) create mode 100644 pygmt/tests/test_clib_read_data.py diff --git a/doc/api/index.rst b/doc/api/index.rst index 5cf28bb3ebb..cff460ce2ff 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -309,6 +309,7 @@ Low level access (these are mostly used by the :mod:`pygmt.clib` package): clib.Session.put_matrix clib.Session.put_strings clib.Session.put_vector + clib.Session.read_data clib.Session.write_data clib.Session.open_virtualfile clib.Session.read_virtualfile diff --git a/pygmt/clib/conversion.py b/pygmt/clib/conversion.py index 95f2f08aa51..a296601e5a1 100644 --- a/pygmt/clib/conversion.py +++ b/pygmt/clib/conversion.py @@ -247,7 +247,9 @@ def as_c_contiguous(array): return array -def sequence_to_ctypes_array(sequence: Sequence, ctype, size: int) -> ctp.Array | None: +def sequence_to_ctypes_array( + sequence: Sequence | None, ctype, size: int +) -> ctp.Array | None: """ Convert a sequence of numbers into a ctypes array variable. diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 1e9489d4720..1b8b5483a28 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -10,7 +10,7 @@ import pathlib import sys import warnings -from collections.abc import Generator +from collections.abc import Generator, Sequence from typing import Literal import numpy as np @@ -1067,6 +1067,99 @@ def put_matrix(self, dataset, matrix, pad=0): if status != 0: raise GMTCLibError(f"Failed to put matrix of type {matrix.dtype}.") + def read_data( + self, + infile: str, + kind: Literal["dataset", "grid"], + family: str | None = None, + geometry: str | None = None, + mode: str = "GMT_READ_NORMAL", + region: Sequence[float] | None = None, + data=None, + ): + """ + Read a data file into a GMT data container. + + Wraps ``GMT_Read_Data`` but only allows reading from a file. The function + definition is different from the original C API function. + + Parameters + ---------- + infile + The input file name. + kind + The data kind of the input file. Valid values are ``"dataset"`` and + ``"grid"``. + family + A valid GMT data family name (e.g., ``"GMT_IS_DATASET"``). See the + ``FAMILIES`` attribute for valid names. If ``None``, will determine the data + family from the ``kind`` parameter. + geometry + A valid GMT data geometry name (e.g., ``"GMT_IS_POINT"``). See the + ``GEOMETRIES`` attribute for valid names. If ``None``, will determine the + data geometry from the ``kind`` parameter. + mode + How the data is to be read from the file. This option varies depending on + the given family. See the + :gmt-docs:`GMT API documentation ` + for details. Default is ``GMT_READ_NORMAL`` which corresponds to the default + read mode value of 0 in the ``GMT_enum_read`` enum. + region + Subregion of the data, in the form of [xmin, xmax, ymin, ymax, zmin, zmax]. + If ``None``, the whole data is read. + data + ``None`` or the pointer returned by this function after a first call. It's + useful when reading grids/images/cubes in two steps (get a grid/image/cube + structure with a header, then read the data). + + Returns + ------- + Pointer to the data container, or ``None`` if there were errors. + + Raises + ------ + GMTCLibError + If the GMT API function fails to read the data. + """ # noqa: W505 + c_read_data = self.get_libgmt_func( + "GMT_Read_Data", + argtypes=[ + ctp.c_void_p, # V_API + ctp.c_uint, # family + ctp.c_uint, # method + ctp.c_uint, # geometry + ctp.c_uint, # mode + ctp.POINTER(ctp.c_double), # wesn + ctp.c_char_p, # infile + ctp.c_void_p, # data + ], + restype=ctp.c_void_p, # data_ptr + ) + + # Determine the family, geometry and data container from kind + _family, _geometry, dtype = { + "dataset": ("GMT_IS_DATASET", "GMT_IS_PLP", _GMT_DATASET), + "grid": ("GMT_IS_GRID", "GMT_IS_SURFACE", _GMT_GRID), + }[kind] + if family is None: + family = _family + if geometry is None: + geometry = _geometry + + data_ptr = c_read_data( + self.session_pointer, + self[family], + self["GMT_IS_FILE"], # Reading from a file + self[geometry], + self[mode], + sequence_to_ctypes_array(region, ctp.c_double, 6), + infile.encode(), + data, + ) + if data_ptr is None: + raise GMTCLibError(f"Failed to read dataset from '{infile}'.") + return ctp.cast(data_ptr, ctp.POINTER(dtype)) + def write_data(self, family, geometry, mode, wesn, output, data): """ Write a GMT data container to a file. diff --git a/pygmt/tests/test_clib_read_data.py b/pygmt/tests/test_clib_read_data.py new file mode 100644 index 00000000000..43978b291c2 --- /dev/null +++ b/pygmt/tests/test_clib_read_data.py @@ -0,0 +1,141 @@ +""" +Test the Session.read_data method. +""" + +from pathlib import Path + +import pandas as pd +import pytest +import xarray as xr +from pygmt.clib import Session +from pygmt.exceptions import GMTCLibError +from pygmt.helpers import GMTTempFile +from pygmt.io import load_dataarray +from pygmt.src import which + +try: + import rioxarray # noqa: F401 + + _HAS_RIOXARRAY = True +except ImportError: + _HAS_RIOXARRAY = False + + +@pytest.fixture(scope="module", name="expected_xrgrid") +def fixture_expected_xrgrid(): + """ + The expected xr.DataArray object for the static_earth_relief.nc file. + """ + return load_dataarray(which("@static_earth_relief.nc")) + + +def test_clib_read_data_dataset(): + """ + Test the Session.read_data method for datasets. + """ + with GMTTempFile(suffix=".txt") as tmpfile: + # Prepare the sample data file + with Path(tmpfile.name).open(mode="w", encoding="utf-8") as fp: + print("# x y z name", file=fp) + print(">", file=fp) + print("1.0 2.0 3.0 TEXT1 TEXT23", file=fp) + print("4.0 5.0 6.0 TEXT4 TEXT567", file=fp) + print(">", file=fp) + print("7.0 8.0 9.0 TEXT8 TEXT90", file=fp) + print("10.0 11.0 12.0 TEXT123 TEXT456789", file=fp) + + with Session() as lib: + ds = lib.read_data(tmpfile.name, kind="dataset").contents + df = ds.to_dataframe(header=0) + expected_df = pd.DataFrame( + data={ + "x": [1.0, 4.0, 7.0, 10.0], + "y": [2.0, 5.0, 8.0, 11.0], + "z": [3.0, 6.0, 9.0, 12.0], + "name": pd.Series( + [ + "TEXT1 TEXT23", + "TEXT4 TEXT567", + "TEXT8 TEXT90", + "TEXT123 TEXT456789", + ], + dtype=pd.StringDtype(), + ), + } + ) + pd.testing.assert_frame_equal(df, expected_df) + + +def test_clib_read_data_grid(expected_xrgrid): + """ + Test the Session.read_data method for grids. + """ + with Session() as lib: + grid = lib.read_data("@static_earth_relief.nc", kind="grid").contents + xrgrid = grid.to_dataarray() + xr.testing.assert_equal(xrgrid, expected_xrgrid) + assert grid.header.contents.n_bands == 1 # Explicitly check n_bands + + +def test_clib_read_data_grid_two_steps(expected_xrgrid): + """ + Test the Session.read_data method for grids in two steps, first reading the header + and then the data. + """ + infile = "@static_earth_relief.nc" + with Session() as lib: + # Read the header first + data_ptr = lib.read_data(infile, kind="grid", mode="GMT_CONTAINER_ONLY") + grid = data_ptr.contents + header = grid.header.contents + assert header.n_rows == 14 + assert header.n_columns == 8 + assert header.wesn[:] == [-55.0, -47.0, -24.0, -10.0] + assert header.z_min == 190.0 + assert header.z_max == 981.0 + assert header.n_bands == 1 # Explicitly check n_bands + assert not grid.data # The data is not read yet + + # Read the data + lib.read_data(infile, kind="grid", mode="GMT_DATA_ONLY", data=data_ptr) + xrgrid = data_ptr.contents.to_dataarray() + xr.testing.assert_equal(xrgrid, expected_xrgrid) + + +def test_clib_read_data_grid_actual_image(): + """ + Test the Session.read_data method for grid, but actually the file is an image. + """ + with Session() as lib: + data_ptr = lib.read_data( + "@earth_day_01d_p", kind="grid", mode="GMT_CONTAINER_AND_DATA" + ) + image = data_ptr.contents + header = image.header.contents + assert header.n_rows == 180 + assert header.n_columns == 360 + assert header.wesn[:] == [-180.0, 180.0, -90.0, 90.0] + # Explicitly check n_bands. Only one band is read for 3-band images. + assert header.n_bands == 1 + + if _HAS_RIOXARRAY: # Full check if rioxarray is installed. + xrimage = image.to_dataarray() + expected_xrimage = xr.open_dataarray( + which("@earth_day_01d_p"), engine="rasterio" + ) + assert expected_xrimage.band.size == 3 # 3-band image. + xr.testing.assert_equal( + xrimage, + expected_xrimage.isel(band=0) + .drop_vars(["band", "spatial_ref"]) + .sortby("y"), + ) + + +def test_clib_read_data_fails(): + """ + Test that the Session.read_data method raises an exception if there are errors. + """ + with Session() as lib: + with pytest.raises(GMTCLibError): + lib.read_data("not-exsits.txt", kind="dataset") From d3101f3ffa21f4aa105ee2a1fdf705e8fe23b9f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sat, 20 Jul 2024 02:04:05 +0200 Subject: [PATCH 195/218] Gallery example "Color points by categories": Update "color" to "fill" in introduction text (#3340) --- examples/gallery/symbols/points_categorical.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/gallery/symbols/points_categorical.py b/examples/gallery/symbols/points_categorical.py index 08b848a5dfc..f638e0d63af 100644 --- a/examples/gallery/symbols/points_categorical.py +++ b/examples/gallery/symbols/points_categorical.py @@ -6,8 +6,8 @@ color-coded by categories. In the example below, we show how the `Palmer Penguins dataset `__ can be visualized. Here, we can pass the individual categories included in -the "species" column directly to the ``color`` parameter via -``color=df.species.cat.codes.astype(int)``. Additionally, we have to set +the "species" column directly to the ``fill`` parameter via +``fill=df.species.cat.codes.astype(int)``. Additionally, we have to set ``cmap=True``. A desired colormap can be selected via the :func:`pygmt.makecpt` function. """ From 9ec92be532ff3273121da5f9f5ab903e59e9ba74 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 20 Jul 2024 13:40:46 +0800 Subject: [PATCH 196/218] Refactor the data_kind and validate_data_input functions (#3335) --- pygmt/clib/session.py | 12 +++++-- pygmt/helpers/__init__.py | 1 + pygmt/helpers/utils.py | 71 +++++++++++++++---------------------- pygmt/src/plot.py | 2 +- pygmt/src/plot3d.py | 2 +- pygmt/src/text.py | 4 +-- pygmt/tests/test_helpers.py | 21 ----------- 7 files changed, 44 insertions(+), 69 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 1b8b5483a28..1a77659ac75 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -34,6 +34,7 @@ GMTVersionError, ) from pygmt.helpers import ( + _validate_data_input, data_kind, tempfile_from_geojson, tempfile_from_image, @@ -1684,8 +1685,15 @@ def virtualfile_in( # noqa: PLR0912 ... print(fout.read().strip()) : N = 3 <7/9> <4/6> <1/3> """ - kind = data_kind( - data, x, y, z, required_z=required_z, required_data=required_data + kind = data_kind(data, required=required_data) + _validate_data_input( + data=data, + x=x, + y=y, + z=z, + required_z=required_z, + required_data=required_data, + kind=kind, ) if check_kind: diff --git a/pygmt/helpers/__init__.py b/pygmt/helpers/__init__.py index 128b1e31a18..862abbbdd64 100644 --- a/pygmt/helpers/__init__.py +++ b/pygmt/helpers/__init__.py @@ -15,6 +15,7 @@ unique_name, ) from pygmt.helpers.utils import ( + _validate_data_input, args_in_kwargs, build_arg_list, build_arg_string, diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index dd202d2e840..2e981266575 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -12,7 +12,7 @@ import warnings import webbrowser from collections.abc import Iterable, Sequence -from typing import Any +from typing import Any, Literal import xarray as xr from pygmt.encodings import charset @@ -79,6 +79,10 @@ def _validate_data_input( Traceback (most recent call last): ... pygmt.exceptions.GMTInvalidInput: Too much data. Use either data or x/y/z. + >>> _validate_data_input(data="infile", x=[1, 2, 3], y=[4, 5, 6]) + Traceback (most recent call last): + ... + pygmt.exceptions.GMTInvalidInput: Too much data. Use either data or x/y/z. >>> _validate_data_input(data="infile", z=[7, 8, 9]) Traceback (most recent call last): ... @@ -111,21 +115,21 @@ def _validate_data_input( raise GMTInvalidInput("data must provide x, y, and z columns.") -def data_kind(data=None, x=None, y=None, z=None, required_z=False, required_data=True): +def data_kind( + data: Any = None, required: bool = True +) -> Literal["arg", "file", "geojson", "grid", "image", "matrix", "vectors"]: """ - Check what kind of data is provided to a module. + Check the kind of data that is provided to a module. - Possible types: + The ``data`` argument can be in any type, but only following types are supported: - * a file name provided as 'data' - * a pathlib.PurePath object provided as 'data' - * an xarray.DataArray object provided as 'data' - * a 2-D matrix provided as 'data' - * 1-D arrays x and y (and z, optionally) - * an optional argument (None, bool, int or float) provided as 'data' - - Arguments should be ``None`` if not used. If doesn't fit any of these - categories (or fits more than one), will raise an exception. + - a string or a :class:`pathlib.PurePath` object or a sequence of them, representing + a file name or a list of file names + - a 2-D or 3-D :class:`xarray.DataArray` object + - a 2-D matrix + - None, bool, int or float type representing an optional arguments + - a geo-like Python object that implements ``__geo_interface__`` (e.g., + geopandas.GeoDataFrame or shapely.geometry) Parameters ---------- @@ -133,55 +137,47 @@ def data_kind(data=None, x=None, y=None, z=None, required_z=False, required_data Pass in either a file name or :class:`pathlib.Path` to an ASCII data table, an :class:`xarray.DataArray`, a 1-D/2-D {table-classes} or an option argument. - x/y : 1-D arrays or None - x and y columns as numpy arrays. - z : 1-D array or None - z column as numpy array. To be used optionally when x and y are given. - required_z : bool - State whether the 'z' column is required. - required_data : bool + required Set to True when 'data' is required, or False when dealing with optional virtual files. [Default is True]. Returns ------- - kind : str - One of ``'arg'``, ``'file'``, ``'grid'``, ``image``, ``'geojson'``, - ``'matrix'``, or ``'vectors'``. + kind + The data kind. Examples -------- - >>> import numpy as np >>> import xarray as xr >>> import pathlib - >>> data_kind(data=None, x=np.array([1, 2, 3]), y=np.array([4, 5, 6])) + >>> data_kind(data=None) 'vectors' - >>> data_kind(data=np.arange(10).reshape((5, 2)), x=None, y=None) + >>> data_kind(data=np.arange(10).reshape((5, 2))) 'matrix' - >>> data_kind(data="my-data-file.txt", x=None, y=None) + >>> data_kind(data="my-data-file.txt") 'file' - >>> data_kind(data=pathlib.Path("my-data-file.txt"), x=None, y=None) + >>> data_kind(data=pathlib.Path("my-data-file.txt")) 'file' - >>> data_kind(data=None, x=None, y=None, required_data=False) + >>> data_kind(data=None, required=False) 'arg' - >>> data_kind(data=2.0, x=None, y=None, required_data=False) + >>> data_kind(data=2.0, required=False) 'arg' - >>> data_kind(data=True, x=None, y=None, required_data=False) + >>> data_kind(data=True, required=False) 'arg' >>> data_kind(data=xr.DataArray(np.random.rand(4, 3))) 'grid' >>> data_kind(data=xr.DataArray(np.random.rand(3, 4, 5))) 'image' """ - # determine the data kind + kind: Literal["arg", "file", "geojson", "grid", "image", "matrix", "vectors"] if isinstance(data, str | pathlib.PurePath) or ( isinstance(data, list | tuple) and all(isinstance(_file, str | pathlib.PurePath) for _file in data) ): # One or more files kind = "file" - elif isinstance(data, bool | int | float) or (data is None and not required_data): + elif isinstance(data, bool | int | float) or (data is None and not required): kind = "arg" elif isinstance(data, xr.DataArray): kind = "image" if len(data.dims) == 3 else "grid" @@ -193,15 +189,6 @@ def data_kind(data=None, x=None, y=None, z=None, required_z=False, required_data kind = "matrix" else: kind = "vectors" - _validate_data_input( - data=data, - x=x, - y=y, - z=z, - required_z=required_z, - required_data=required_data, - kind=kind, - ) return kind diff --git a/pygmt/src/plot.py b/pygmt/src/plot.py index 43b26232871..e66f08438e5 100644 --- a/pygmt/src/plot.py +++ b/pygmt/src/plot.py @@ -208,7 +208,7 @@ def plot( # noqa: PLR0912 """ kwargs = self._preprocess(**kwargs) - kind = data_kind(data, x, y) + kind = data_kind(data) extra_arrays = [] if kind == "vectors": # Add more columns for vectors input # Parameters for vector styles diff --git a/pygmt/src/plot3d.py b/pygmt/src/plot3d.py index 65d87761d5c..c86e5e259f1 100644 --- a/pygmt/src/plot3d.py +++ b/pygmt/src/plot3d.py @@ -183,7 +183,7 @@ def plot3d( # noqa: PLR0912 """ kwargs = self._preprocess(**kwargs) - kind = data_kind(data, x, y, z) + kind = data_kind(data) extra_arrays = [] if kind == "vectors": # Add more columns for vectors input diff --git a/pygmt/src/text.py b/pygmt/src/text.py index 04abf12ea3b..484f885997a 100644 --- a/pygmt/src/text.py +++ b/pygmt/src/text.py @@ -180,11 +180,11 @@ def text_( # noqa: PLR0912 # Ensure inputs are either textfiles, x/y/text, or position/text if position is None: - if (x is not None or y is not None) and textfiles is not None: + if any(v is not None for v in (x, y, text)) and textfiles is not None: raise GMTInvalidInput( "Provide either position only, or x/y pairs, or textfiles." ) - kind = data_kind(textfiles, x, y, text) + kind = data_kind(textfiles) if kind == "vectors" and text is None: raise GMTInvalidInput("Must provide text with x/y pairs") else: diff --git a/pygmt/tests/test_helpers.py b/pygmt/tests/test_helpers.py index ea966753535..98cc4c16d25 100644 --- a/pygmt/tests/test_helpers.py +++ b/pygmt/tests/test_helpers.py @@ -4,7 +4,6 @@ from pathlib import Path -import numpy as np import pytest import xarray as xr from pygmt import Figure @@ -13,7 +12,6 @@ GMTTempFile, args_in_kwargs, build_arg_list, - data_kind, kwargs_to_strings, unique_name, ) @@ -33,25 +31,6 @@ def test_load_static_earth_relief(): assert isinstance(data, xr.DataArray) -@pytest.mark.parametrize( - ("data", "x", "y"), - [ - (None, None, None), - ("data.txt", np.array([1, 2]), np.array([4, 5])), - ("data.txt", np.array([1, 2]), None), - ("data.txt", None, np.array([4, 5])), - (None, np.array([1, 2]), None), - (None, None, np.array([4, 5])), - ], -) -def test_data_kind_fails(data, x, y): - """ - Make sure data_kind raises exceptions when it should. - """ - with pytest.raises(GMTInvalidInput): - data_kind(data=data, x=x, y=y) - - def test_unique_name(): """ Make sure the names are really unique. From 1136be313b7307110981e0631fac5748de2b7979 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 21 Jul 2024 02:30:29 +0200 Subject: [PATCH 197/218] Gallery examples: Fix typos (#3341) --- examples/gallery/maps/country_polygons.py | 2 +- examples/gallery/symbols/points.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/gallery/maps/country_polygons.py b/examples/gallery/maps/country_polygons.py index b85c89add14..78064a00316 100644 --- a/examples/gallery/maps/country_polygons.py +++ b/examples/gallery/maps/country_polygons.py @@ -22,7 +22,7 @@ water="white", frame="afg", dcw=[ - # Great Britain (country code) with seagrean land + # Great Britain (country code) with seagreen land "GB+gseagreen", # Italy with a red border "IT+p0.5p,red3", diff --git a/examples/gallery/symbols/points.py b/examples/gallery/symbols/points.py index 58ab127d003..08042bcf089 100644 --- a/examples/gallery/symbols/points.py +++ b/examples/gallery/symbols/points.py @@ -2,8 +2,8 @@ Points ====== -The :meth:`pygmt.Figure.plot` method can plot points. The plot symbol and size -is set with the ``style`` parameter. +The :meth:`pygmt.Figure.plot` method can plot data points. The symbol and +size are set with the ``style`` parameter. """ # %% From 2740cd3f3d6e1c54cb2ddf56f264b0477c5a2abe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 21 Jul 2024 15:17:38 +0200 Subject: [PATCH 198/218] Update link to the cpt-city recourses (#3343) --- pygmt/src/grd2cpt.py | 2 +- pygmt/src/makecpt.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pygmt/src/grd2cpt.py b/pygmt/src/grd2cpt.py index f6d954c3cad..43f0db976e9 100644 --- a/pygmt/src/grd2cpt.py +++ b/pygmt/src/grd2cpt.py @@ -56,7 +56,7 @@ def grd2cpt(grid, **kwargs): *z*-value, the foreground color (F) assigned to values higher than the highest *z*-value, and the NaN color (N) painted wherever values are undefined. For color tables beyond the standard GMT offerings, visit - `cpt-city `_ and + `cpt-city `_ and `Scientific Colour-Maps `_. If the master CPT includes B, F, and N entries, these will be copied into diff --git a/pygmt/src/makecpt.py b/pygmt/src/makecpt.py index e5ef6f5c556..d241035da76 100644 --- a/pygmt/src/makecpt.py +++ b/pygmt/src/makecpt.py @@ -42,7 +42,7 @@ def makecpt(**kwargs): CPT based on an existing master (dynamic) CPT. The resulting CPT can be reversed relative to the master cpt, and can be made continuous or discrete. For color tables beyond the standard GMT offerings, visit - `cpt-city `_ and + `cpt-city `_ and `Scientific Colour-Maps `_. The CPT includes three additional colors beyond the range of z-values. From d8133b02abc269c8cd7995895c5ceb2cd3987332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Sun, 21 Jul 2024 16:20:17 +0200 Subject: [PATCH 199/218] External resources: Update thumbnail image for "PyGMT HOWTO" (#3344) --- doc/external_resources.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/external_resources.md b/doc/external_resources.md index 0cd744810ac..2f9550ee2a7 100644 --- a/doc/external_resources.md +++ b/doc/external_resources.md @@ -97,7 +97,7 @@ Andre Belem :text-align: center :margin: 0 3 0 0 -![](https://github.com/tktmyd/pygmt-howto-jp/raw/main/docs/_images/inf_on_map_12_0.png) +![](https://github.com/tktmyd/pygmt-howto-jp/blob/main/docs/_images/01aed847a81445c7a002cce113d95886793f6236fbcccb233862486589f17ed1.png) +++ Takuto Maeda :::: From 7376e6ef52cde7f0795b72403975fb259616e24e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Mon, 22 Jul 2024 02:35:20 +0200 Subject: [PATCH 200/218] Examples: Make position codes consistent in comments (#3312) --- examples/gallery/3d_plots/grdview_surface.py | 2 +- examples/gallery/basemaps/double_y_axes.py | 3 ++- examples/gallery/embellishments/colorbar.py | 11 +++++------ examples/gallery/embellishments/inset.py | 6 +++--- .../gallery/embellishments/inset_rectangle_region.py | 4 ++-- examples/gallery/embellishments/legend.py | 8 ++++---- examples/gallery/embellishments/logo.py | 2 +- examples/gallery/embellishments/timestamp.py | 6 +++--- examples/gallery/images/cross_section.py | 4 ++-- examples/gallery/images/grdlandmask.py | 5 ++--- examples/gallery/lines/wiggle.py | 6 +++--- examples/gallery/maps/borders.py | 6 +++--- examples/tutorials/advanced/insets.py | 12 ++++++------ 13 files changed, 37 insertions(+), 38 deletions(-) diff --git a/examples/gallery/3d_plots/grdview_surface.py b/examples/gallery/3d_plots/grdview_surface.py index beec0eb6c4b..9b5bbdede1d 100644 --- a/examples/gallery/3d_plots/grdview_surface.py +++ b/examples/gallery/3d_plots/grdview_surface.py @@ -60,7 +60,7 @@ def ackley(x, y): # Add colorbar for gridded data fig.colorbar( frame="a2f1", # Set annotations in steps of two, tick marks in steps of one - position="JRM", # Place colorbar at position Right Middle + position="JMR", # Place colorbar in the Middle Right (MR) corner ) fig.show() diff --git a/examples/gallery/basemaps/double_y_axes.py b/examples/gallery/basemaps/double_y_axes.py index 6ccaf629bea..aa8ba8a6815 100644 --- a/examples/gallery/basemaps/double_y_axes.py +++ b/examples/gallery/basemaps/double_y_axes.py @@ -63,7 +63,8 @@ class can control which axes should be plotted and optionally show annotations, # Plot points for y2 data fig.plot(x=x, y=y2, style="s0.28c", fill="red", label="y2") -# Create a legend in the top-left corner of the plot +# Create a legend in the Top Left (TL) corner of the plot with an +# offset of 0.1 centimeters fig.legend(position="jTL+o0.1c", box=True) fig.show() diff --git a/examples/gallery/embellishments/colorbar.py b/examples/gallery/embellishments/colorbar.py index 2febbdea6d4..aad985911c0 100644 --- a/examples/gallery/embellishments/colorbar.py +++ b/examples/gallery/embellishments/colorbar.py @@ -16,14 +16,13 @@ horizontal (**L**\ eft, **C**\ enter, **R**\ ight) alignment codes, e.g. ``position="jTR"`` for Top Right. - **g**: using map coordinates, e.g. ``position="g170/-45"`` for longitude - 170E, latitude 45S. + 170° East, latitude 45° South. - **x**: using paper coordinates, e.g. ``position="x5c/7c"`` for 5 cm, 7 cm from anchor point. - **n**: using normalized (0-1) coordinates, e.g. ``position="n0.4/0.8"``. Note that the anchor point defaults to the bottom left (**BL**). Append ``+h`` -to ``position`` to get a horizontal colorbar instead of a vertical one -(``+v``). +to ``position`` to get a horizontal colorbar instead of a vertical one (``+v``). """ # %% @@ -34,7 +33,7 @@ # ============ # Create a colorbar designed for seismic tomography - roma -# Colorbar is placed at bottom center (BC) by default if no position is given +# Colorbar is placed at Bottom Center (BC) by default if no position is given # Add quantity and unit as labels ("+l") to the x and y axes # Add annotations ("+a") in steps of 0.5 and ticks ("+f") in steps of 0.1 fig.colorbar(cmap="roma", frame=["xa0.5f0.1+lVelocity", "y+lm/s"]) @@ -78,8 +77,8 @@ # Plot the colorbar fig.colorbar( cmap=True, # Use colormap set up above - # Colorbar placed inside the plot bounding box (j) at Bottom Left (BL), - # offset (+o) by 0.5 cm horizontally and 0.8 cm vertically from anchor + # Colorbar placed inside the plot bounding box (j) in the Bottom Left (BL) corner, + # with an offset (+o) by 0.5 cm horizontally and 0.8 cm vertically from the anchor # point, and plotted horizontally (+h) position="jBL+o0.5c/0.8c+h", box=True, diff --git a/examples/gallery/embellishments/inset.py b/examples/gallery/embellishments/inset.py index a0cda285d67..dd2ba4f7631 100644 --- a/examples/gallery/embellishments/inset.py +++ b/examples/gallery/embellishments/inset.py @@ -16,9 +16,9 @@ # to "brown", the water to "lightblue", the shorelines width to "thin", and # adding a frame fig.coast(region="MG+r2", land="brown", water="lightblue", shorelines="thin", frame="a") -# Create an inset, setting the position to top left, the width to 3.5 cm, and -# the x- and y-offsets to 0.2 cm. The margin is set to 0, and the border is -# "gold" with a pen size of 1.5 points. +# Create an inset, placing it in the Top Left (TL) corner with a width of 3.5 cm and +# x- and y-offsets of 0.2 cm. The margin is set to 0, and the border is "gold" with a +# pen size of 1.5 points. with fig.inset(position="jTL+w3.5c+o0.2c", margin=0, box="+p1.5p,gold"): # Create a figure in the inset using coast. This example uses the azimuthal # orthogonal projection centered at 47E, 20S. The land color is set to diff --git a/examples/gallery/embellishments/inset_rectangle_region.py b/examples/gallery/embellishments/inset_rectangle_region.py index 12f84dbe847..e646182e6b4 100644 --- a/examples/gallery/embellishments/inset_rectangle_region.py +++ b/examples/gallery/embellishments/inset_rectangle_region.py @@ -25,8 +25,8 @@ # figure fig.coast(land="lightbrown", water="azure1", shorelines="2p", area_thresh=1000) -# Create an inset map, setting the position to bottom right, and the x- and -# y-offsets to 0.1 cm, respectively. +# Create an inset map, placing it in the Bottom Right (BR) corner with x- and +# y-offsets of 0.1 cm, respectively. # The inset map contains the Japan main land. "U54S/3c" means UTM projection # with a map width of 3 cm. The inset width and height are automatically # calculated from the specified ``region`` and ``projection`` parameters. diff --git a/examples/gallery/embellishments/legend.py b/examples/gallery/embellishments/legend.py index 9da1f95e73f..76cf852b028 100644 --- a/examples/gallery/embellishments/legend.py +++ b/examples/gallery/embellishments/legend.py @@ -43,8 +43,8 @@ fig.plot(x=x, y=y2, style="c0.07c", fill="dodgerblue", label="cos(x)+1.1") # Add a legend to the plot; place it within the plot bounding box with both -# reference ("J") and anchor ("+j") points being TopRight and with an offset -# of 0.2 centimeters in x and y directions; surround the legend with a box +# reference ("J") and anchor ("+j") points being the Top Right (TR) corner and an +# offset of 0.2 centimeters in x and y directions; surround the legend with a box fig.legend(position="JTR+jTR+o0.2c", box=True) # ----------------------------------------------------------------------------- @@ -55,8 +55,8 @@ fig.plot(x=x, y=y4, style="s0.07c", fill="orange", label="cos(x/2)-1.1") -# For a multi-column legend, users have to provide the width via "+w", here it -# is set to 6 centimeters; reference and anchor points are set to BottomRight +# For a multi-column legend, users have to provide the width via "+w", here it is +# set to 6 centimeters; reference and anchor points are the Bottom Right (BR) corner fig.legend(position="JBR+jBR+o0.2c+w6c", box=True) fig.show() diff --git a/examples/gallery/embellishments/logo.py b/examples/gallery/embellishments/logo.py index 6ced6b6c88e..fdddb817463 100644 --- a/examples/gallery/embellishments/logo.py +++ b/examples/gallery/embellishments/logo.py @@ -11,7 +11,7 @@ fig = pygmt.Figure() fig.basemap(region=[0, 10, 0, 2], projection="X6c", frame=True) -# add the GMT logo in the Top Right corner of the current map, +# Add the GMT logo in the Top Right (TR) corner of the current map, # scaled up to be 3 cm wide and offset by 0.3 cm in x direction # and 0.6 cm in y direction. fig.logo(position="jTR+o0.3c/0.6c+w3c") diff --git a/examples/gallery/embellishments/timestamp.py b/examples/gallery/embellishments/timestamp.py index 41fa95a67db..f8a853b9b72 100644 --- a/examples/gallery/embellishments/timestamp.py +++ b/examples/gallery/embellishments/timestamp.py @@ -3,9 +3,9 @@ ========= The :meth:`pygmt.Figure.timestamp` method can draw the GMT timestamp logo on the plot. -The timestamp will always be shown relative to the bottom-left corner of the plot. By -default, the ``offset`` and ``justify`` parameters are set to ``("-54p", "-54p")`` -(x, y directions) and ``"BL"`` (bottom-left), respectively. +The timestamp will always be shown relative to the Bottom Left (BL) corner of the plot. +By default, the ``offset`` and ``justify`` parameters are set to ``("-54p", "-54p")`` +(x, y directions) and ``"BL"`` (Bottom Left), respectively. """ # %% diff --git a/examples/gallery/images/cross_section.py b/examples/gallery/images/cross_section.py index b39261b0f1f..690b6797ea3 100644 --- a/examples/gallery/images/cross_section.py +++ b/examples/gallery/images/cross_section.py @@ -46,8 +46,8 @@ # Add a colorbar for the elevation fig.colorbar( - # Place the colorbar inside the plot (lower-case "j") with justification - # Bottom Right and an offset ("+o") of 0.7 centimeters and + # Place the colorbar inside the plot (lower-case "j") in the Bottom + # Right (BR) corner with an offset ("+o") of 0.7 centimeters and # 0.3 centimeters in x or y directions, respectively # Move the x label above the horizontal colorbar ("+ml") position="jBR+o0.7c/0.8c+h+w5c/0.3c+ml", diff --git a/examples/gallery/images/grdlandmask.py b/examples/gallery/images/grdlandmask.py index ae0c54d01b7..7576028091e 100644 --- a/examples/gallery/images/grdlandmask.py +++ b/examples/gallery/images/grdlandmask.py @@ -2,9 +2,8 @@ Create 'wet-dry' mask grid ========================== -The :func:`pygmt.grdlandmask` function allows setting -all nodes on land or water to a specified value using -the ``maskvalues`` parameter. +The :func:`pygmt.grdlandmask` function allows setting all nodes on land +or water to a specified value using the ``maskvalues`` parameter. """ # %% diff --git a/examples/gallery/lines/wiggle.py b/examples/gallery/lines/wiggle.py index 64095f815d6..82f7f2c5ab8 100644 --- a/examples/gallery/lines/wiggle.py +++ b/examples/gallery/lines/wiggle.py @@ -35,8 +35,8 @@ pen="1.0p", # Draw a blue track with a width of 0.5 points track="0.5p,blue", - # Plot a vertical scale bar at the right middle. The bar length is 100 in - # data (z) units. Set the z unit label to "nT". - position="jRM+w100+lnT", + # Plot a vertical scale bar at Middle Right (MR). The bar length (+w) + # is 100 in data (z) units. Set the z unit label (+l) to "nT". + position="jMR+w100+lnT", ) fig.show() diff --git a/examples/gallery/maps/borders.py b/examples/gallery/maps/borders.py index e8c6029281c..52203f8cc81 100644 --- a/examples/gallery/maps/borders.py +++ b/examples/gallery/maps/borders.py @@ -11,9 +11,9 @@ * **3** = Marine boundaries * **a** = All boundaries (1-3) -For example, to draw national boundaries with a line thickness of 1p and black -line color use ``borders="1/1p,black"``. You can draw multiple boundaries by -passing in a list to ``borders``. +For example, to draw national boundaries with a line thickness of 1 point and +black line color use ``borders="1/1p,black"``. You can draw multiple boundaries +by passing in a list to ``borders``. """ # %% diff --git a/examples/tutorials/advanced/insets.py b/examples/tutorials/advanced/insets.py index 4f8a6af7292..5be807c78a9 100644 --- a/examples/tutorials/advanced/insets.py +++ b/examples/tutorials/advanced/insets.py @@ -31,12 +31,12 @@ # %% # The :meth:`pygmt.Figure.inset` method uses a context manager, and is called # using a ``with`` statement. The ``position`` parameter, including the inset -# width, is required to plot the inset. Using the **j** argument, the location -# of the inset is set to one of the 9 anchors (bottom-middle-top and -# left-center-right). In the example below, ``BL`` sets the inset to the bottom -# left. The ``box`` parameter can set the fill and border of the inset. In the -# example below, ``+pblack`` sets the border color to black and ``+glightred`` -# sets the fill to light red. +# width, is required to plot the inset. Using the **j** modifier, the location +# of the inset is set to one of the 9 anchors (Top - Middle - Bottom and Left - +# Center - Right). In the example below, ``BL`` places the inset at the Bottom +# Left corner. The ``box`` parameter can set the fill and border of the inset. +# In the example below, ``+pblack`` sets the border color to black and +# ``+glightred`` sets the fill to light red. fig = pygmt.Figure() fig.coast( From 3ef4503d50fc1b2bb42fcfa02a7f6aab765e4d3d Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 22 Jul 2024 18:32:34 +0800 Subject: [PATCH 201/218] Temporarily pin sphinx-gallery<0.17.0 (#3350) --- .github/workflows/ci_docs.yml | 2 +- ci/requirements/docs.yml | 2 +- environment.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index 74951d365f2..a07db2dfc1f 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -113,7 +113,7 @@ jobs: sphinx-autodoc-typehints sphinx-copybutton sphinx-design - sphinx-gallery + sphinx-gallery<0.17.0 sphinx_rtd_theme # Download cached remote files (artifacts) from GitHub diff --git a/ci/requirements/docs.yml b/ci/requirements/docs.yml index b7381844b92..ddbaa3e2e1e 100644 --- a/ci/requirements/docs.yml +++ b/ci/requirements/docs.yml @@ -28,5 +28,5 @@ dependencies: - sphinx-autodoc-typehints - sphinx-copybutton - sphinx-design - - sphinx-gallery + - sphinx-gallery<0.17.0 - sphinx_rtd_theme diff --git a/environment.yml b/environment.yml index 4e828dc34ef..51120f7f48c 100644 --- a/environment.yml +++ b/environment.yml @@ -40,7 +40,7 @@ dependencies: - sphinx-autodoc-typehints - sphinx-copybutton - sphinx-design - - sphinx-gallery + - sphinx-gallery<0.17.0 - sphinx_rtd_theme # Dev dependencies (type hints) - mypy From e74615644fcd318bb88e1af991377e7a4bb1ad8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:45:14 +0200 Subject: [PATCH 202/218] External resources: Update thumbnail image for "PyGMT HOWTO" - 02 (#3345) --- doc/external_resources.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/external_resources.md b/doc/external_resources.md index 2f9550ee2a7..bdf17695baf 100644 --- a/doc/external_resources.md +++ b/doc/external_resources.md @@ -97,7 +97,7 @@ Andre Belem :text-align: center :margin: 0 3 0 0 -![](https://github.com/tktmyd/pygmt-howto-jp/blob/main/docs/_images/01aed847a81445c7a002cce113d95886793f6236fbcccb233862486589f17ed1.png) +![](https://github.com/tktmyd/pygmt-howto-jp/raw/main/docs/_images/01aed847a81445c7a002cce113d95886793f6236fbcccb233862486589f17ed1.png) +++ Takuto Maeda :::: From 3502252cb0e8daaa7c85420737c5174ce2529659 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 23 Jul 2024 13:49:04 +0800 Subject: [PATCH 203/218] Support non-ASCII characters in ISO-8859-x charset encodings (#3310) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Michael Grund <23025878+michaelgrund@users.noreply.github.com> Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- doc/techref/encodings.md | 38 +++-- pygmt/encodings.py | 32 +++-- pygmt/helpers/__init__.py | 1 + pygmt/helpers/utils.py | 133 ++++++++++++++++-- pygmt/src/text.py | 31 ++-- .../test_text_nonascii_iso8859.png.dvc | 5 + pygmt/tests/test_text.py | 13 ++ 7 files changed, 215 insertions(+), 38 deletions(-) create mode 100644 pygmt/tests/baseline/test_text_nonascii_iso8859.png.dvc diff --git a/doc/techref/encodings.md b/doc/techref/encodings.md index 638370e9bd3..faa866a8fcd 100644 --- a/doc/techref/encodings.md +++ b/doc/techref/encodings.md @@ -1,14 +1,12 @@ # Supported Encodings and Non-ASCII Characters -GMT supports a number of encodings and each encoding contains a set of ASCII and non-ASCII -characters. Below are some of the most common encodings and characters that are supported. +GMT supports a number of encodings and each encoding contains a set of ASCII and +non-ASCII characters. In PyGMT, you can use any of these ASCII and non-ASCII characters +in arguments and text strings. When using non-ASCII characters in PyGMT, the easiest way +is to copy and paste the character from the encoding tables below. -In PyGMT, you can use any of these ASCII and non-ASCII characters in arguments and text -strings. When using non-ASCII characters in PyGMT, the easiest way is to copy and paste -the character from the tables below. - -**Note**: The special character � (REPLACEMENT CHARACTER) is used to indicate that -the character is not defined in the encoding. +**Note**: The special character � (REPLACEMENT CHARACTER) is used to indicate +that the character is not defined in the encoding. ## Adobe ISOLatin1+ Encoding @@ -106,3 +104,27 @@ the Unicode character set. | **\35x** | ➨ | ➩ | ➪ | ➫ | ➬ | ➭ | ➮ | ➯ | | **\36x** | � | ➱ | ➲ | ➳ | ➴ | ➵ | ➶ | ➷ | | **\37x** | ➸ | ➹ | ➺ | ➻ | ➼ | ➽ | ➾ | � | + +## ISO/IEC 8859 + +GMT also supports the ISO/IEC 8859 standard for 8-bit character encodings. Refer to + for descriptions of the different parts of +the standard. + +For a list of the characters in each part of the standard, refer to the following links: + +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- diff --git a/pygmt/encodings.py b/pygmt/encodings.py index 2cfda9b5728..44ed3153e85 100644 --- a/pygmt/encodings.py +++ b/pygmt/encodings.py @@ -1,13 +1,13 @@ """ -Adobe character encodings supported by GMT. +Character encodings supported by GMT. -Currently, only Adobe Symbol, Adobe ZapfDingbats, and Adobe ISOLatin1+ encodings are -supported. +Currently, Adobe Symbol, Adobe ZapfDingbats, Adobe ISOLatin1+ and ISO-8859-x (x can be +1-11, 13-16) encodings are supported. Adobe Standard encoding is not supported. -The corresponding Unicode characters in each Adobe character encoding are generated -from the mapping table and conversion script in the GMT-octal-codes -(https://github.com/seisman/GMT-octal-codes) repository. Refer to that repository for -details. +The corresponding Unicode characters in each Adobe character encoding are generated from +the mapping tables and conversion scripts in the +`GMT-octal-codes repository `__. Refer to +that repository for details. Some code points are undefined and are assigned with the replacement character (``\ufffd``). @@ -16,14 +16,17 @@ ---------- - GMT-octal-codes: https://github.com/seisman/GMT-octal-codes -- GMT official documentation: https://docs.generic-mapping-tools.org/dev/reference/octal-codes.html +- GMT documentation: https://docs.generic-mapping-tools.org/dev/reference/octal-codes.html - Adobe Postscript Language Reference: https://www.adobe.com/jp/print/postscript/pdfs/PLRM.pdf -- ISOLatin1+: https://en.wikipedia.org/wiki/PostScript_Latin_1_Encoding +- Adobe ISOLatin1+: https://en.wikipedia.org/wiki/PostScript_Latin_1_Encoding - Adobe Symbol: https://en.wikipedia.org/wiki/Symbol_(typeface) -- Zapf Dingbats: https://en.wikipedia.org/wiki/Zapf_Dingbats +- Adobe ZapfDingbats: https://en.wikipedia.org/wiki/Zapf_Dingbats - Adobe Glyph List: https://github.com/adobe-type-tools/agl-aglfn +- ISO-8859: https://en.wikipedia.org/wiki/ISO/IEC_8859 """ +import codecs + # Dictionary of character mappings for different encodings. charset: dict = {} @@ -129,3 +132,12 @@ strict=False, ) ) + +# ISO-8859-x charsets and x can be 1-11, 13-16. +for i in range(1, 17): + if i == 12: # ISO-8859-12 was abandoned. + continue + charset[f"ISO-8859-{i}"] = { + code: codecs.decode(bytes([code]), f"iso8859_{i}", errors="replace") + for code in [*range(0o040, 0o200), *range(0o240, 0o400)] + } diff --git a/pygmt/helpers/__init__.py b/pygmt/helpers/__init__.py index 862abbbdd64..08583896b6c 100644 --- a/pygmt/helpers/__init__.py +++ b/pygmt/helpers/__init__.py @@ -15,6 +15,7 @@ unique_name, ) from pygmt.helpers.utils import ( + _check_encoding, _validate_data_input, args_in_kwargs, build_arg_list, diff --git a/pygmt/helpers/utils.py b/pygmt/helpers/utils.py index 2e981266575..cd54d6fc18e 100644 --- a/pygmt/helpers/utils.py +++ b/pygmt/helpers/utils.py @@ -115,6 +115,78 @@ def _validate_data_input( raise GMTInvalidInput("data must provide x, y, and z columns.") +def _check_encoding( + argstr: str, +) -> Literal[ + "ascii", + "ISOLatin1+", + "ISO-8859-1", + "ISO-8859-2", + "ISO-8859-3", + "ISO-8859-4", + "ISO-8859-5", + "ISO-8859-6", + "ISO-8859-7", + "ISO-8859-8", + "ISO-8859-9", + "ISO-8859-10", + "ISO-8859-11", + "ISO-8859-13", + "ISO-8859-14", + "ISO-8859-15", + "ISO-8859-16", +]: + """ + Check the charset encoding of a string. + + All characters in the string must be in the same charset encoding, otherwise the + default ``ISOLatin1+`` encoding is returned. Characters in the Adobe Symbol and + ZapfDingbats encodings are also checked because they're independent on the choice of + encodings. + + Parameters + ---------- + argstr + The string to be checked. + + Returns + ------- + encoding + The encoding of the string. + + Examples + -------- + >>> _check_encoding("123ABC+-?!") # ASCII characters only + 'ascii' + >>> _check_encoding("12AB±β①②") # Characters in ISOLatin1+ + 'ISOLatin1+' + >>> _check_encoding("12ABāáâãäåβ①②") # Characters in ISO-8859-4 + 'ISO-8859-4' + >>> _check_encoding("12ABŒā") # Mix characters in ISOLatin1+ (Œ) and ISO-8859-4 (ā) + 'ISOLatin1+' + >>> _check_encoding("123AB中文") # Characters not in any charset encoding + 'ISOLatin1+' + """ + # Return "ascii" if the string only contains ASCII characters. + if all(32 <= ord(c) <= 126 for c in argstr): + return "ascii" + # Loop through all supported encodings and check if all characters in the string + # are in the charset of the encoding. If all characters are in the charset, return + # the encoding. The ISOLatin1+ encoding is checked first because it is the default + # and most common encoding. + adobe_chars = set(charset["Symbol"].values()) | set( + charset["ZapfDingbats"].values() + ) + for encoding in ["ISOLatin1+"] + [f"ISO-8859-{i}" for i in range(1, 17)]: + if encoding == "ISO-8859-12": # ISO-8859-12 was abandoned. Skip it. + continue + if all(c in (set(charset[encoding].values()) | adobe_chars) for c in argstr): + return encoding # type: ignore[return-value] + # Return the "ISOLatin1+" encoding if the string contains characters from multiple + # charset encodings or contains characters that are not in any charset encoding. + return "ISOLatin1+" + + def data_kind( data: Any = None, required: bool = True ) -> Literal["arg", "file", "geojson", "grid", "image", "matrix", "vectors"]: @@ -192,17 +264,41 @@ def data_kind( return kind -def non_ascii_to_octal(argstr: str) -> str: +def non_ascii_to_octal( + argstr: str, + encoding: Literal[ + "ascii", + "ISOLatin1+", + "ISO-8859-1", + "ISO-8859-2", + "ISO-8859-3", + "ISO-8859-4", + "ISO-8859-5", + "ISO-8859-6", + "ISO-8859-7", + "ISO-8859-8", + "ISO-8859-9", + "ISO-8859-10", + "ISO-8859-11", + "ISO-8859-13", + "ISO-8859-14", + "ISO-8859-15", + "ISO-8859-16", + ] = "ISOLatin1+", +) -> str: r""" Translate non-ASCII characters to their corresponding octal codes. - Currently, only characters in the ISOLatin1+ charset and Symbol/ZapfDingbats fonts - are supported. + Currently, only non-ASCII characters in the Adobe ISOLatin1+, Adobe Symbol, Adobe + ZapfDingbats, and ISO-8850-x (x can be in 1-11, 13-17) encodings are supported. + The Adobe Standard encoding is not supported yet. Parameters ---------- argstr The string to be translated. + encoding + The encoding of characters in the string. Returns ------- @@ -219,9 +315,11 @@ def non_ascii_to_octal(argstr: str) -> str: '@%34%\\041@%%@%34%\\176@%%@%34%\\241@%%@%34%\\376@%%' >>> non_ascii_to_octal("ABC ±120° DEF α ♥") 'ABC \\261120\\260 DEF @~\\141@~ @%34%\\252@%%' + >>> non_ascii_to_octal("12ABāáâãäåβ①②", encoding="ISO-8859-4") + '12AB\\340\\341\\342\\343\\344\\345@~\\142@~@%34%\\254@%%@%34%\\255@%%' """ # noqa: RUF002 - # Return the string if it only contains printable ASCII characters from 32 to 126. - if all(32 <= ord(c) <= 126 for c in argstr): + # Return the input string if it only contains ASCII characters. + if encoding == "ascii" or all(32 <= ord(c) <= 126 for c in argstr): return argstr # Dictionary mapping non-ASCII characters to octal codes @@ -232,15 +330,15 @@ def non_ascii_to_octal(argstr: str) -> str: mapping.update( {c: f"@%34%\\{i:03o}@%%" for i, c in charset["ZapfDingbats"].items()} ) - # Adobe ISOLatin1+ charset. Put at the end. - mapping.update({c: f"\\{i:03o}" for i, c in charset["ISOLatin1+"].items()}) + # ISOLatin1+ or ISO-8859-x charset. + mapping.update({c: f"\\{i:03o}" for i, c in charset[encoding].items()}) # Remove any printable characters mapping = {k: v for k, v in mapping.items() if k not in string.printable} return argstr.translate(str.maketrans(mapping)) -def build_arg_list( +def build_arg_list( # noqa: PLR0912 kwdict: dict[str, Any], confdict: dict[str, str] | None = None, infile: str | pathlib.PurePath | Sequence[str | pathlib.PurePath] | None = None, @@ -310,6 +408,10 @@ def build_arg_list( ... ) ... ) ['f1.txt', 'f2.txt', '-A0', '-B', '--FORMAT_DATE_MAP=o dd', '->out.txt'] + >>> build_arg_list(dict(B="12ABāβ①②")) + ['-B12AB\\340@~\\142@~@%34%\\254@%%@%34%\\255@%%', '--PS_CHAR_ENCODING=ISO-8859-4'] + >>> build_arg_list(dict(B="12ABāβ①②"), confdict=dict(PS_CHAR_ENCODING="ISO-8859-5")) + ['-B12AB\\340@~\\142@~@%34%\\254@%%@%34%\\255@%%', '--PS_CHAR_ENCODING=ISO-8859-5'] >>> print(build_arg_list(dict(R="1/2/3/4", J="X4i", watre=True))) Traceback (most recent call last): ... @@ -324,11 +426,22 @@ def build_arg_list( elif value is True: gmt_args.append(f"-{key}") elif is_nonstr_iter(value): - gmt_args.extend(non_ascii_to_octal(f"-{key}{_value}") for _value in value) + gmt_args.extend(f"-{key}{_value}" for _value in value) else: - gmt_args.append(non_ascii_to_octal(f"-{key}{value}")) + gmt_args.append(f"-{key}{value}") + + # Convert non-ASCII characters (if any) in the arguments to octal codes + encoding = _check_encoding("".join(gmt_args)) + if encoding != "ascii": + gmt_args = [non_ascii_to_octal(arg, encoding=encoding) for arg in gmt_args] gmt_args = sorted(gmt_args) + # Set --PS_CHAR_ENCODING=encoding if necessary + if encoding not in {"ascii", "ISOLatin1+"} and not ( + confdict and "PS_CHAR_ENCODING" in confdict + ): + gmt_args.append(f"--PS_CHAR_ENCODING={encoding}") + if confdict: gmt_args.extend(f"--{key}={value}" for key, value in confdict.items()) diff --git a/pygmt/src/text.py b/pygmt/src/text.py index 484f885997a..3e72b30b328 100644 --- a/pygmt/src/text.py +++ b/pygmt/src/text.py @@ -6,6 +6,7 @@ from pygmt.clib import Session from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import ( + _check_encoding, build_arg_list, data_kind, fmt_docstring, @@ -59,13 +60,12 @@ def text_( # noqa: PLR0912 - ``x``/``y``, and ``text`` - ``position`` and ``text`` - The text strings passed via the ``text`` parameter can contain ASCII - characters and non-ASCII characters defined in the ISOLatin1+ encoding - (i.e., IEC_8859-1), and the Symbol and ZapfDingbats character sets. - See :gmt-docs:`reference/octal-codes.html` for the full list of supported - non-ASCII characters. + The text strings passed via the ``text`` parameter can contain ASCII characters and + non-ASCII characters defined in the Adobe ISOLatin1+, Adobe Symbol, Adobe + ZapfDingbats and ISO-8859-x (x can be 1-11, 13-16) encodings. Refer to + :doc:`techref/encodings` for the full list of supported non-ASCII characters. - Full option list at :gmt-docs:`text.html` + Full option list at :gmt-docs:`text.html`. {aliases} @@ -226,13 +226,24 @@ def text_( # noqa: PLR0912 kwargs["t"] = "" # Append text at last column. Text must be passed in as str type. + confdict = {} if kind == "vectors": - extra_arrays.append( - np.vectorize(non_ascii_to_octal)(np.atleast_1d(text).astype(str)) - ) + text = np.atleast_1d(text).astype(str) + encoding = _check_encoding("".join(text)) + if encoding != "ascii": + text = np.vectorize(non_ascii_to_octal, excluded="encoding")( + text, encoding=encoding + ) + extra_arrays.append(text) + + if encoding not in {"ascii", "ISOLatin1+"}: + confdict = {"PS_CHAR_ENCODING": encoding} with Session() as lib: with lib.virtualfile_in( check_kind="vector", data=textfiles, x=x, y=y, extra_arrays=extra_arrays ) as vintbl: - lib.call_module(module="text", args=build_arg_list(kwargs, infile=vintbl)) + lib.call_module( + module="text", + args=build_arg_list(kwargs, infile=vintbl, confdict=confdict), + ) diff --git a/pygmt/tests/baseline/test_text_nonascii_iso8859.png.dvc b/pygmt/tests/baseline/test_text_nonascii_iso8859.png.dvc new file mode 100644 index 00000000000..ee0f41600e9 --- /dev/null +++ b/pygmt/tests/baseline/test_text_nonascii_iso8859.png.dvc @@ -0,0 +1,5 @@ +outs: +- md5: a0f35a1d58c95e6589c7397e7660e946 + size: 17089 + hash: md5 + path: test_text_nonascii_iso8859.png diff --git a/pygmt/tests/test_text.py b/pygmt/tests/test_text.py index 8543734bb30..64781c514bc 100644 --- a/pygmt/tests/test_text.py +++ b/pygmt/tests/test_text.py @@ -434,3 +434,16 @@ def test_text_quotation_marks(): fig.basemap(projection="X4c/2c", region=[0, 4, 0, 2], frame=0) fig.text(x=2, y=1, text='\\234 ‘ ’ " “ ”', font="20p") # noqa: RUF001 return fig + + +@pytest.mark.mpl_image_compare +def test_text_nonascii_iso8859(): + """ + Test passing text strings with non-ascii characters in ISO-8859-4 encoding. + """ + fig = Figure() + fig.basemap(region=[0, 10, 0, 10], projection="X10c", frame=["WSEN+tAāáâãäåB"]) + fig.text(position="TL", text="position-text:1ÉĘËĖ2") + fig.text(x=1, y=1, text="xytext:1éęëė2") + fig.text(x=[5, 5], y=[3, 5], text=["xytext1:ųúûüũūαζ∆❡", "xytext2:íîī∑π∇✉"]) + return fig From d7560fa097819c7717a75dac85db1953da6ce430 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Tue, 23 Jul 2024 08:40:46 +0200 Subject: [PATCH 204/218] Fix the grant numbers for the support of PyGMT (#3347) --- doc/overview.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/overview.md b/doc/overview.md index cbad46c5849..2b1c7cd40e4 100644 --- a/doc/overview.md +++ b/doc/overview.md @@ -22,7 +22,7 @@ The project was started in 2017 by [Leonardo Uieda](http://www.leouieda.com) and [Paul Wessel](http://www.soest.hawaii.edu/wessel) (the co-creator and main developer of GMT) at the University of Hawaiʻi at Mānoa. The development of PyGMT has been supported by NSF grants [OCE-1558403](https://www.nsf.gov/awardsearch/showAward?AWD_ID=1558403) -and [EAR-1948603](https://www.nsf.gov/awardsearch/showAward?AWD_ID=1948602). +and [EAR-1948602](https://www.nsf.gov/awardsearch/showAward?AWD_ID=1948602). We welcome any feedback and ideas! Let us know by submitting [issues on GitHub](https://github.com/GenericMappingTools/pygmt/issues) or by posting on From e67cd66956323dc4f5f6967389aa2171e18e414e Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 25 Jul 2024 10:57:11 +0800 Subject: [PATCH 205/218] Fix the tile_map doctest which has different output on Windows in the GMT Legacy Tests (#3353) --- pygmt/datasets/tile_map.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygmt/datasets/tile_map.py b/pygmt/datasets/tile_map.py index 94b2532bfd9..15331e256bb 100644 --- a/pygmt/datasets/tile_map.py +++ b/pygmt/datasets/tile_map.py @@ -122,7 +122,7 @@ def load_tile_map( * band (band) uint8 ... 1 2 3 * y (y) float64 ... -7.081e-10 -7.858e+04 ... -1.996e+07 -2.004e+07 * x (x) float64 ... -2.004e+07 -1.996e+07 ... 1.996e+07 2.004e+07 - spatial_ref int64 ... 0 + spatial_ref int... 0 >>> # CRS is set only if rioxarray is available >>> if hasattr(raster, "rio"): ... raster.rio.crs From ecaab8fa60e41aca0fd17fdbaf80bad109692b51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Thu, 25 Jul 2024 09:28:58 +0200 Subject: [PATCH 206/218] Figure.text: Fix link to "Technical Reference/Supported Encodings..." (#3354) --- pygmt/src/text.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygmt/src/text.py b/pygmt/src/text.py index 3e72b30b328..ba13b94a4e8 100644 --- a/pygmt/src/text.py +++ b/pygmt/src/text.py @@ -63,7 +63,7 @@ def text_( # noqa: PLR0912 The text strings passed via the ``text`` parameter can contain ASCII characters and non-ASCII characters defined in the Adobe ISOLatin1+, Adobe Symbol, Adobe ZapfDingbats and ISO-8859-x (x can be 1-11, 13-16) encodings. Refer to - :doc:`techref/encodings` for the full list of supported non-ASCII characters. + :doc:`/techref/encodings` for the full list of supported non-ASCII characters. Full option list at :gmt-docs:`text.html`. From ff246c6dc174a0663ab23561e19c0ecbdcaea26e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Fri, 26 Jul 2024 04:14:40 +0200 Subject: [PATCH 207/218] Move projection table to Technical References (#3356) --- .github/workflows/ci_docs.yml | 2 +- ci/requirements/docs.yml | 2 +- doc/techref/index.md | 1 + .../table/README.txt => doc/techref/projections.rst | 4 ++-- environment.yml | 2 +- examples/projections/README.txt | 11 ++++++----- 6 files changed, 12 insertions(+), 10 deletions(-) rename examples/projections/table/README.txt => doc/techref/projections.rst (99%) diff --git a/.github/workflows/ci_docs.yml b/.github/workflows/ci_docs.yml index a07db2dfc1f..74951d365f2 100644 --- a/.github/workflows/ci_docs.yml +++ b/.github/workflows/ci_docs.yml @@ -113,7 +113,7 @@ jobs: sphinx-autodoc-typehints sphinx-copybutton sphinx-design - sphinx-gallery<0.17.0 + sphinx-gallery sphinx_rtd_theme # Download cached remote files (artifacts) from GitHub diff --git a/ci/requirements/docs.yml b/ci/requirements/docs.yml index ddbaa3e2e1e..b7381844b92 100644 --- a/ci/requirements/docs.yml +++ b/ci/requirements/docs.yml @@ -28,5 +28,5 @@ dependencies: - sphinx-autodoc-typehints - sphinx-copybutton - sphinx-design - - sphinx-gallery<0.17.0 + - sphinx-gallery - sphinx_rtd_theme diff --git a/doc/techref/index.md b/doc/techref/index.md index b152966773a..91b1e387fd8 100644 --- a/doc/techref/index.md +++ b/doc/techref/index.md @@ -8,5 +8,6 @@ visit the {gmt-docs}`GMT Technical Reference `. ```{toctree} :maxdepth: 1 +projections.rst encodings.md ``` diff --git a/examples/projections/table/README.txt b/doc/techref/projections.rst similarity index 99% rename from examples/projections/table/README.txt rename to doc/techref/projections.rst index 51bd837536c..0ee8eb2a8f1 100644 --- a/examples/projections/table/README.txt +++ b/doc/techref/projections.rst @@ -1,5 +1,5 @@ -Projection Table ----------------- +GMT Map Projections +------------------- The table below shows the projection codes for the 31 GMT projections: diff --git a/environment.yml b/environment.yml index 51120f7f48c..4e828dc34ef 100644 --- a/environment.yml +++ b/environment.yml @@ -40,7 +40,7 @@ dependencies: - sphinx-autodoc-typehints - sphinx-copybutton - sphinx-design - - sphinx-gallery<0.17.0 + - sphinx-gallery - sphinx_rtd_theme # Dev dependencies (type hints) - mypy diff --git a/examples/projections/README.txt b/examples/projections/README.txt index 594dc84a943..ceb7227f8e9 100644 --- a/examples/projections/README.txt +++ b/examples/projections/README.txt @@ -1,10 +1,11 @@ Projections =========== -PyGMT supports many map projections. Use the ``projection`` parameter to specify which -one you want to use in all plotting methods. The projection is specified by a one-letter -code along with (sometimes optional) reference longitude and latitude and the -width of the map (for example, **A**\ *lon0/lat0*\ [*/horizon*\ ]\ */width*). The map -height is determined based on the region and projection. +PyGMT supports many map projections; see :doc:`/techref/projections` for an overview. +Use the ``projection`` parameter to specify which one you want to use in all plotting +methods. The projection is specified by a one-letter code along with (sometimes optional) +reference longitude and latitude and the width of the map (for example, +**A**\ *lon0/lat0*\ [*/horizon*\ ]\ */width*). The map height is determined based on the +region and projection. These are all the available projections: From 537d684ca75237db73b0671a26debdf0b27ac1b8 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sat, 27 Jul 2024 14:45:25 +0800 Subject: [PATCH 208/218] Wrap GMT's standard data type GMT_IMAGE for images (#3338) Co-authored-by: Wei Ji <23487320+weiji14@users.noreply.github.com> --- pygmt/clib/session.py | 28 +++++---- pygmt/datatypes/__init__.py | 1 + pygmt/datatypes/image.py | 94 ++++++++++++++++++++++++++++++ pygmt/tests/test_clib_read_data.py | 37 ++++++++++++ 4 files changed, 149 insertions(+), 11 deletions(-) create mode 100644 pygmt/datatypes/image.py diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 1a77659ac75..ec5bdcf3be2 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -26,7 +26,7 @@ vectors_to_arrays, ) from pygmt.clib.loading import load_libgmt -from pygmt.datatypes import _GMT_DATASET, _GMT_GRID +from pygmt.datatypes import _GMT_DATASET, _GMT_GRID, _GMT_IMAGE from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, @@ -1071,7 +1071,7 @@ def put_matrix(self, dataset, matrix, pad=0): def read_data( self, infile: str, - kind: Literal["dataset", "grid"], + kind: Literal["dataset", "grid", "image"], family: str | None = None, geometry: str | None = None, mode: str = "GMT_READ_NORMAL", @@ -1089,8 +1089,8 @@ def read_data( infile The input file name. kind - The data kind of the input file. Valid values are ``"dataset"`` and - ``"grid"``. + The data kind of the input file. Valid values are ``"dataset"``, ``"grid"`` + and ``"image"``. family A valid GMT data family name (e.g., ``"GMT_IS_DATASET"``). See the ``FAMILIES`` attribute for valid names. If ``None``, will determine the data @@ -1141,6 +1141,7 @@ def read_data( _family, _geometry, dtype = { "dataset": ("GMT_IS_DATASET", "GMT_IS_PLP", _GMT_DATASET), "grid": ("GMT_IS_GRID", "GMT_IS_SURFACE", _GMT_GRID), + "image": ("GMT_IS_IMAGE", "GMT_IS_SURFACE", _GMT_IMAGE), }[kind] if family is None: family = _family @@ -1797,7 +1798,9 @@ def virtualfile_from_data( @contextlib.contextmanager def virtualfile_out( - self, kind: Literal["dataset", "grid"] = "dataset", fname: str | None = None + self, + kind: Literal["dataset", "grid", "image"] = "dataset", + fname: str | None = None, ) -> Generator[str, None, None]: r""" Create a virtual file or an actual file for storing output data. @@ -1810,8 +1813,8 @@ def virtualfile_out( Parameters ---------- kind - The data kind of the virtual file to create. Valid values are ``"dataset"`` - and ``"grid"``. Ignored if ``fname`` is specified. + The data kind of the virtual file to create. Valid values are ``"dataset"``, + ``"grid"``, and ``"image"``. Ignored if ``fname`` is specified. fname The name of the actual file to write the output data. No virtual file will be created. @@ -1854,8 +1857,10 @@ def virtualfile_out( family, geometry = { "dataset": ("GMT_IS_DATASET", "GMT_IS_PLP"), "grid": ("GMT_IS_GRID", "GMT_IS_SURFACE"), + "image": ("GMT_IS_IMAGE", "GMT_IS_SURFACE"), }[kind] - with self.open_virtualfile(family, geometry, "GMT_OUT", None) as vfile: + direction = "GMT_OUT|GMT_IS_REFERENCE" if kind == "image" else "GMT_OUT" + with self.open_virtualfile(family, geometry, direction, None) as vfile: yield vfile def inquire_virtualfile(self, vfname: str) -> int: @@ -1901,7 +1906,8 @@ def read_virtualfile( Name of the virtual file to read. kind Cast the data into a GMT data container. Valid values are ``"dataset"``, - ``"grid"`` and ``None``. If ``None``, will return a ctypes void pointer. + ``"grid"``, ``"image"`` and ``None``. If ``None``, will return a ctypes void + pointer. Returns ------- @@ -1951,9 +1957,9 @@ def read_virtualfile( # _GMT_DATASET). if kind is None: # Return the ctypes void pointer return pointer - if kind in {"image", "cube"}: + if kind == "cube": raise NotImplementedError(f"kind={kind} is not supported yet.") - dtype = {"dataset": _GMT_DATASET, "grid": _GMT_GRID}[kind] + dtype = {"dataset": _GMT_DATASET, "grid": _GMT_GRID, "image": _GMT_IMAGE}[kind] return ctp.cast(pointer, ctp.POINTER(dtype)) def virtualfile_to_dataset( diff --git a/pygmt/datatypes/__init__.py b/pygmt/datatypes/__init__.py index 237a050a9f7..3489dd19d10 100644 --- a/pygmt/datatypes/__init__.py +++ b/pygmt/datatypes/__init__.py @@ -4,3 +4,4 @@ from pygmt.datatypes.dataset import _GMT_DATASET from pygmt.datatypes.grid import _GMT_GRID +from pygmt.datatypes.image import _GMT_IMAGE diff --git a/pygmt/datatypes/image.py b/pygmt/datatypes/image.py new file mode 100644 index 00000000000..63c267b1ca0 --- /dev/null +++ b/pygmt/datatypes/image.py @@ -0,0 +1,94 @@ +""" +Wrapper for the GMT_IMAGE data type. +""" + +import ctypes as ctp +from typing import ClassVar + +from pygmt.datatypes.grid import _GMT_GRID_HEADER + + +class _GMT_IMAGE(ctp.Structure): # noqa: N801 + """ + GMT image data structure. + + Examples + -------- + >>> import numpy as np + >>> from pygmt.clib import Session + >>> with Session() as lib: + ... with lib.virtualfile_out(kind="image") as voutimg: + ... lib.call_module("read", ["@earth_day_01d", voutimg, "-Ti"]) + ... # Read the image from the virtual file + ... image = lib.read_virtualfile(vfname=voutimg, kind="image").contents + ... # The image header + ... header = image.header.contents + ... # Access the header properties + ... print(header.n_rows, header.n_columns, header.registration) + ... print(header.wesn[:], header.inc[:]) + ... print(header.z_scale_factor, header.z_add_offset) + ... print(header.x_units, header.y_units, header.z_units) + ... print(header.title) + ... print(header.command) + ... print(header.remark) + ... print(header.nm, header.size, header.complex_mode) + ... print(header.type, header.n_bands, header.mx, header.my) + ... print(header.pad[:]) + ... print(header.mem_layout, header.nan_value, header.xy_off) + ... # Image-specific attributes. + ... print(image.type, image.n_indexed_colors) + ... # The x and y coordinates + ... x = image.x[: header.n_columns] + ... y = image.y[: header.n_rows] + ... # The data array (with paddings) + ... data = np.reshape( + ... image.data[: header.n_bands * header.mx * header.my], + ... (header.my, header.mx, header.n_bands), + ... ) + ... # The data array (without paddings) + ... pad = header.pad[:] + ... data = data[pad[2] : header.my - pad[3], pad[0] : header.mx - pad[1], :] + 180 360 1 + [-180.0, 180.0, -90.0, 90.0] [1.0, 1.0] + 1.0 0.0 + b'x' b'y' b'z' + b'' + b'' + b'' + 64800 66976 0 + 0 3 364 184 + [2, 2, 2, 2] + b'BRPa' 0.0 0.5 + 1 0 + >>> x + [-179.5, -178.5, ..., 178.5, 179.5] + >>> y + [89.5, 88.5, ..., -88.5, -89.5] + >>> data.shape + (180, 360, 3) + >>> data.min(), data.max() + (10, 255) + """ + + _fields_: ClassVar = [ + # Data type, e.g. GMT_FLOAT + ("type", ctp.c_int), + # Array with color lookup values + ("colormap", ctp.POINTER(ctp.c_int)), + # Number of colors in a paletted image + ("n_indexed_colors", ctp.c_int), + # Pointer to full GMT header for the image + ("header", ctp.POINTER(_GMT_GRID_HEADER)), + # Pointer to actual image + ("data", ctp.POINTER(ctp.c_ubyte)), + # Pointer to an optional transparency layer stored in a separate variable + ("alpha", ctp.POINTER(ctp.c_ubyte)), + # Color interpolation + ("color_interp", ctp.c_char_p), + # Pointer to the x-coordinate vector + ("x", ctp.POINTER(ctp.c_double)), + # Pointer to the y-coordinate vector + ("y", ctp.POINTER(ctp.c_double)), + # Book-keeping variables "hidden" from the API + ("hidden", ctp.c_void_p), + ] diff --git a/pygmt/tests/test_clib_read_data.py b/pygmt/tests/test_clib_read_data.py index 43978b291c2..b2f92403df0 100644 --- a/pygmt/tests/test_clib_read_data.py +++ b/pygmt/tests/test_clib_read_data.py @@ -132,6 +132,43 @@ def test_clib_read_data_grid_actual_image(): ) +# Note: Simplify the tests for images after GMT_IMAGE.to_dataarray() is implemented. +def test_clib_read_data_image(): + """ + Test the Session.read_data method for images. + """ + with Session() as lib: + image = lib.read_data("@earth_day_01d_p", kind="image").contents + header = image.header.contents + assert header.n_rows == 180 + assert header.n_columns == 360 + assert header.n_bands == 3 + assert header.wesn[:] == [-180.0, 180.0, -90.0, 90.0] + assert image.data + + +def test_clib_read_data_image_two_steps(): + """ + Test the Session.read_data method for images in two steps, first reading the header + and then the data. + """ + infile = "@earth_day_01d_p" + with Session() as lib: + # Read the header first + data_ptr = lib.read_data(infile, kind="image", mode="GMT_CONTAINER_ONLY") + image = data_ptr.contents + header = image.header.contents + assert header.n_rows == 180 + assert header.n_columns == 360 + assert header.wesn[:] == [-180.0, 180.0, -90.0, 90.0] + assert header.n_bands == 3 # Explicitly check n_bands + assert not image.data # The data is not read yet + + # Read the data + lib.read_data(infile, kind="image", mode="GMT_DATA_ONLY", data=data_ptr) + assert image.data + + def test_clib_read_data_fails(): """ Test that the Session.read_data method raises an exception if there are errors. From 1df8f19373d3591a91e2316226452b09431d6050 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 29 Jul 2024 10:14:19 +0800 Subject: [PATCH 209/218] Add a test to make sure PyGMT works with paths that contain non-ASCII characters (#3280) --- pygmt/tests/test_which.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pygmt/tests/test_which.py b/pygmt/tests/test_which.py index 121d6cf4a27..642706fba2e 100644 --- a/pygmt/tests/test_which.py +++ b/pygmt/tests/test_which.py @@ -2,11 +2,15 @@ Test pygmt.which. """ +import os +import sys from pathlib import Path +from tempfile import TemporaryDirectory import pytest from pygmt import which from pygmt.helpers import unique_name +from pygmt.session_management import begin, end def test_which(): @@ -40,3 +44,35 @@ def test_which_fails(): which(bogus_file) with pytest.raises(FileNotFoundError): which(fname=[f"{bogus_file}.nc", f"{bogus_file}.txt"]) + + +@pytest.mark.skipif( + sys.platform == "win32", + reason="The Windows mkdir() function doesn't support multi-byte characters", +) +def test_which_nonascii_path(monkeypatch): + """ + Make sure PyGMT works with paths that contain non-ascii characters (e.g., Chinese). + """ + # Create a temporary directory with a Chinese suffix as a fake home directory. + with TemporaryDirectory(suffix="中文") as fakehome: + assert fakehome.endswith("中文") # Make sure fakename contains Chinese. + (Path(fakehome) / ".gmt").mkdir() # Create the ~/.gmt directory. + with monkeypatch.context() as mpatch: + # Set HOME to the fake home directory and GMT will use it. + mpatch.setenv("HOME", fakehome) + # Check if HOME is set correctly + assert os.getenv("HOME") == fakehome + assert os.environ["HOME"] == fakehome + + # Start a new session + begin() + # GMT should download the remote file under the new home directory. + fname = which(fname="@static_earth_relief.nc", download="c", verbose="d") + assert fname.startswith(fakehome) + assert fname.endswith("static_earth_relief.nc") + end() + + # Make sure HOME is reverted correctly. + assert os.getenv("HOME") != fakehome + assert os.environ["HOME"] != fakehome From 3a589fa58bdf5a179ee46b61a1b2de3f3d9092a4 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 29 Jul 2024 10:25:02 +0800 Subject: [PATCH 210/218] Refactor clib to avoid checking GMT version repeatedly and only check once when loading the GMT library (#3254) --- .../ISSUE_TEMPLATE/5-bump_gmt_checklist.md | 2 +- doc/conf.py | 5 ++- pygmt/clib/__init__.py | 15 ++++++-- pygmt/clib/loading.py | 27 ++++++++++++++ pygmt/clib/session.py | 31 +++------------- pygmt/tests/test_clib.py | 35 +++++++++---------- pygmt/tests/test_clib_loading.py | 20 ++++++++++- 7 files changed, 81 insertions(+), 54 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/5-bump_gmt_checklist.md b/.github/ISSUE_TEMPLATE/5-bump_gmt_checklist.md index a4591f12847..9652f2150ee 100644 --- a/.github/ISSUE_TEMPLATE/5-bump_gmt_checklist.md +++ b/.github/ISSUE_TEMPLATE/5-bump_gmt_checklist.md @@ -35,7 +35,7 @@ using the following command: **To-Do for bumping the minimum required GMT version**: - [ ] Bump the minimum required GMT version (1 PR) - - [ ] Update `required_version` in `pygmt/clib/session.py` + - [ ] Update `required_gmt_version` in `pygmt/clib/__init__.py` - [ ] Update `test_get_default` in `pygmt/tests/test_clib.py` - [ ] Update minimum required versions in `doc/minversions.md` - [ ] Remove unsupported GMT version from `.github/workflows/ci_tests_legacy.yaml` diff --git a/doc/conf.py b/doc/conf.py index b8a6c6ce4fc..9348960f325 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -7,15 +7,14 @@ # ruff: isort: off from sphinx_gallery.sorting import ExplicitOrder, ExampleTitleSortKey -import pygmt +from pygmt.clib import required_gmt_version from pygmt import __commit__, __version__ from pygmt.sphinx_gallery import PyGMTScraper # ruff: isort: on requires_python = metadata("pygmt")["Requires-Python"] -with pygmt.clib.Session() as lib: - requires_gmt = f">={lib.required_version}" +requires_gmt = f">={required_gmt_version}" extensions = [ "myst_parser", diff --git a/pygmt/clib/__init__.py b/pygmt/clib/__init__.py index 868616f2345..9f145716f0e 100644 --- a/pygmt/clib/__init__.py +++ b/pygmt/clib/__init__.py @@ -5,7 +5,16 @@ interface. Access to the C library is done through ctypes. """ -from pygmt.clib.session import Session +from packaging.version import Version +from pygmt.clib.session import Session, __gmt_version__ +from pygmt.exceptions import GMTVersionError -with Session() as lib: - __gmt_version__ = lib.info["version"] +required_gmt_version = "6.3.0" + +# Check if the GMT version is older than the required version. +if Version(__gmt_version__) < Version(required_gmt_version): + msg = ( + f"Using an incompatible GMT version {__gmt_version__}. " + f"Must be equal or newer than {required_gmt_version}." + ) + raise GMTVersionError(msg) diff --git a/pygmt/clib/loading.py b/pygmt/clib/loading.py index 7bcf576b9b6..9b785fe826e 100644 --- a/pygmt/clib/loading.py +++ b/pygmt/clib/loading.py @@ -64,6 +64,33 @@ def load_libgmt(lib_fullnames: Iterator[str] | None = None) -> ctypes.CDLL: return libgmt +def get_gmt_version(libgmt: ctypes.CDLL) -> str: + """ + Get the GMT version string of the GMT shared library. + + Parameters + ---------- + libgmt + The GMT shared library. + + Returns + ------- + The GMT version string in *major.minor.patch* format. + """ + func = libgmt.GMT_Get_Version + func.argtypes = ( + ctypes.c_void_p, # Unused parameter, so it can be None. + ctypes.POINTER(ctypes.c_uint), # major + ctypes.POINTER(ctypes.c_uint), # minor + ctypes.POINTER(ctypes.c_uint), # patch + ) + # The function return value is the current library version as a float, e.g., 6.5. + func.restype = ctypes.c_float + major, minor, patch = ctypes.c_uint(0), ctypes.c_uint(0), ctypes.c_uint(0) + func(None, major, minor, patch) + return f"{major.value}.{minor.value}.{patch.value}" + + def clib_names(os_name: str) -> list[str]: """ Return the name(s) of GMT's shared library for the current operating system. diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index ec5bdcf3be2..829381d339d 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -25,14 +25,9 @@ strings_to_ctypes_array, vectors_to_arrays, ) -from pygmt.clib.loading import load_libgmt +from pygmt.clib.loading import get_gmt_version, load_libgmt from pygmt.datatypes import _GMT_DATASET, _GMT_GRID, _GMT_IMAGE -from pygmt.exceptions import ( - GMTCLibError, - GMTCLibNoSessionError, - GMTInvalidInput, - GMTVersionError, -) +from pygmt.exceptions import GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput from pygmt.helpers import ( _validate_data_input, data_kind, @@ -98,6 +93,7 @@ # Load the GMT library outside the Session class to avoid repeated loading. _libgmt = load_libgmt() +__gmt_version__ = get_gmt_version(_libgmt) class Session: @@ -155,9 +151,6 @@ class Session: -55 -47 -24 -10 190 981 1 1 8 14 1 1 """ - # The minimum supported GMT version. - required_version = "6.3.0" - @property def session_pointer(self): """ @@ -212,27 +205,11 @@ def info(self): def __enter__(self): """ - Create a GMT API session and check the libgmt version. + Create a GMT API session. Calls :meth:`pygmt.clib.Session.create`. - - Raises - ------ - GMTVersionError - If the version reported by libgmt is less than - ``Session.required_version``. Will destroy the session before - raising the exception. """ self.create("pygmt-session") - # Need to store the version info because 'get_default' won't work after - # the session is destroyed. - version = self.info["version"] - if Version(version) < Version(self.required_version): - self.destroy() - raise GMTVersionError( - f"Using an incompatible GMT version {version}. " - f"Must be equal or newer than {self.required_version}." - ) return self def __exit__(self, exc_type, exc_value, traceback): diff --git a/pygmt/tests/test_clib.py b/pygmt/tests/test_clib.py index f833e01a37b..7591aff0729 100644 --- a/pygmt/tests/test_clib.py +++ b/pygmt/tests/test_clib.py @@ -577,27 +577,24 @@ def mock_defaults(api, name, value): # noqa: ARG001 ses.destroy() -def test_fails_for_wrong_version(): +def test_fails_for_wrong_version(monkeypatch): """ - Make sure the clib.Session raises an exception if GMT is too old. + Make sure that importing clib raise an exception if GMT is too old. """ + import importlib - # Mock GMT_Get_Default to return an old version - def mock_defaults(api, name, value): # noqa: ARG001 - """ - Return an old version. - """ - if name == b"API_VERSION": - value.value = b"5.4.3" - else: - value.value = b"bla" - return 0 + with monkeypatch.context() as mpatch: + # Make sure the current GMT major version is 6. + assert clib.__gmt_version__.split(".")[0] == "6" - lib = clib.Session() - with mock(lib, "GMT_Get_Default", mock_func=mock_defaults): + # Monkeypatch the version string returned by pygmt.clib.loading.get_gmt_version. + mpatch.setattr(clib.loading, "get_gmt_version", lambda libgmt: "5.4.3") # noqa: ARG005 + + # Reload clib.session and check the __gmt_version__ string. + importlib.reload(clib.session) + assert clib.session.__gmt_version__ == "5.4.3" + + # Should raise an exception when pygmt.clib is loaded/reloaded. with pytest.raises(GMTVersionError): - with lib: - assert lib.info["version"] != "5.4.3" - # Make sure the session is closed when the exception is raised. - with pytest.raises(GMTCLibNoSessionError): - assert lib.session_pointer + importlib.reload(clib) + assert clib.__gmt_version__ == "5.4.3" # Make sure it's still the old version diff --git a/pygmt/tests/test_clib_loading.py b/pygmt/tests/test_clib_loading.py index df92a73fbd0..3c65c5caf46 100644 --- a/pygmt/tests/test_clib_loading.py +++ b/pygmt/tests/test_clib_loading.py @@ -11,7 +11,13 @@ from pathlib import PurePath import pytest -from pygmt.clib.loading import check_libgmt, clib_full_names, clib_names, load_libgmt +from pygmt.clib.loading import ( + check_libgmt, + clib_full_names, + clib_names, + get_gmt_version, + load_libgmt, +) from pygmt.clib.session import Session from pygmt.exceptions import GMTCLibError, GMTCLibNotFoundError, GMTOSError @@ -360,3 +366,15 @@ def test_clib_full_names_gmt_library_path_incorrect_path_included( # Windows: find_library() searches the library in PATH, so one more npath = 2 if sys.platform == "win32" else 1 assert list(lib_fullpaths) == [gmt_lib_realpath] * npath + gmt_lib_names + + +############################################################################### +# Test get_gmt_version +def test_get_gmt_version(): + """ + Test if get_gmt_version returns a version string in major.minor.patch format. + """ + version = get_gmt_version(load_libgmt()) + assert isinstance(version, str) + assert len(version.split(".")) == 3 # In major.minor.patch format + assert version.split(".")[0] == "6" # Is GMT 6.x.x From a5012d9550db6566a9debf528f490e1cc579a1af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 10:45:47 +1200 Subject: [PATCH 211/218] Build(deps): Bump CodSpeedHQ/action from 2.4.3 to 3.0.0 (#3362) Bumps [CodSpeedHQ/action](https://github.com/codspeedhq/action) from 2.4.3 to 3.0.0. - [Release notes](https://github.com/codspeedhq/action/releases) - [Changelog](https://github.com/CodSpeedHQ/action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codspeedhq/action/compare/v2.4.3...v3.0.0) --- updated-dependencies: - dependency-name: CodSpeedHQ/action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmarks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index e2230cb7a77..fe0f9c2ceb3 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -87,7 +87,7 @@ jobs: # Run the benchmark tests - name: Run benchmarks - uses: CodSpeedHQ/action@v2.4.3 + uses: CodSpeedHQ/action@v3.0.0 with: # 'bash -el -c' is needed to use the custom shell. # See https://github.com/CodSpeedHQ/action/issues/65. From 3ac074c0ade6263c8d7b8f4d09f4cea2d627c46f Mon Sep 17 00:00:00 2001 From: "Andre L. Belem" Date: Wed, 31 Jul 2024 05:43:11 -0300 Subject: [PATCH 212/218] Updating External Resources: Add tutorial in Portuguese and using PyGMT in Google Colab (#3360) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yvonne Fröhlich <94163266+yvonnefroehlich@users.noreply.github.com> --- doc/external_resources.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/external_resources.md b/doc/external_resources.md index bdf17695baf..7bc9fcf514a 100644 --- a/doc/external_resources.md +++ b/doc/external_resources.md @@ -12,6 +12,16 @@ to submit a pull request with your recommended addition to the :::::{grid} 1 2 2 3 +::::{grid-item-card} 2024 PyGMT Webinar using Google Colab (in Portuguese) +:link: https://github.com/andrebelem/Oficina_PyGMT +:text-align: center +:margin: 0 3 0 0 + +![](https://github.com/andrebelem/Oficina_PyGMT/raw/main/Datasets/3D_sample.png) ++++ +Andre Belem +:::: + ::::{grid-item-card} 2022 EGU SC5.2: Crafting beautiful maps with PyGMT :link: https://www.generic-mapping-tools.org/egu22pygmt/ :text-align: center From 221e8d912c4e7575e2d4ccfeb3ec163c5c2f44ed Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Thu, 1 Aug 2024 14:00:41 +0800 Subject: [PATCH 213/218] Let pygmt.show_versions show GDAL version (#3364) --- pygmt/_show_versions.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pygmt/_show_versions.py b/pygmt/_show_versions.py index 6da2ac1c736..b1d474864e9 100644 --- a/pygmt/_show_versions.py +++ b/pygmt/_show_versions.py @@ -96,6 +96,18 @@ def _check_ghostscript_version(gs_version: str) -> str | None: return None +def _get_gdal_version(): + """ + Get GDAL version. + """ + try: + from osgeo import gdal + + return gdal.__version__ + except ImportError: + return None + + def show_versions(file=sys.stdout): """ Print various dependency versions which are useful when submitting bug reports. @@ -120,6 +132,7 @@ def show_versions(file=sys.stdout): } deps = [Requirement(v).name for v in importlib.metadata.requires("pygmt")] gs_version = _get_ghostscript_version() + gdal_version = _get_gdal_version() lines = [] lines.append("PyGMT information:") @@ -128,6 +141,7 @@ def show_versions(file=sys.stdout): lines.extend([f" {key}: {val}" for key, val in sys_info.items()]) lines.append("Dependency information:") lines.extend([f" {modname}: {_get_module_version(modname)}" for modname in deps]) + lines.append(f" gdal: {gdal_version}") lines.append(f" ghostscript: {gs_version}") lines.append("GMT library information:") lines.extend([f" {key}: {val}" for key, val in _get_clib_info().items()]) From 053c5d8919d0b043baff590523d5d738ede805b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yvonne=20Fr=C3=B6hlich?= <94163266+yvonnefroehlich@users.noreply.github.com> Date: Fri, 2 Aug 2024 06:14:27 +0200 Subject: [PATCH 214/218] Convert projection table from reStructuredText to Markdown (#3363) --- doc/techref/index.md | 2 +- doc/techref/projections.md | 49 ++++++++++++++++++++ doc/techref/projections.rst | 90 ------------------------------------- 3 files changed, 50 insertions(+), 91 deletions(-) create mode 100644 doc/techref/projections.md delete mode 100644 doc/techref/projections.rst diff --git a/doc/techref/index.md b/doc/techref/index.md index 91b1e387fd8..8ffd22fd851 100644 --- a/doc/techref/index.md +++ b/doc/techref/index.md @@ -8,6 +8,6 @@ visit the {gmt-docs}`GMT Technical Reference `. ```{toctree} :maxdepth: 1 -projections.rst +projections.md encodings.md ``` diff --git a/doc/techref/projections.md b/doc/techref/projections.md new file mode 100644 index 00000000000..4f41daf38f9 --- /dev/null +++ b/doc/techref/projections.md @@ -0,0 +1,49 @@ +--- +myst: + substitutions: + lon0: "lon{sub}`0`" + lat0: "lat{sub}`0`" + lon1: "lon{sub}`1`" + lat1: "lat{sub}`1`" + lat2: "lat{sub}`2`" + lonp: "lon{sub}`p`" + latp: "lat{sub}`p`" +--- + +# GMT Map Projections + +The table below shows the projection codes for the 31 GMT map projections: + +| PyGMT Projection Argument | Projection Name | +| --- | --- | +| **A**{{ lon0 }}/{{ lat0 }}[/*horizon*]/*width* | {doc}`Lambert azimuthal equal area ` | +| **B**{{ lon0 }}/{{ lat0 }}/{{ lat1 }}/{{ lat2 }}/*width* | {doc}`Albers conic equal area ` | +| **C**{{ lon0 }}/{{ lat0 }}/*width* | {doc}`Cassini cylindrical ` | +| **Cyl_stere**/[{{ lon0 }}/[{{ lat0 }}/]]*width* | {doc}`Cylindrical stereographic ` | +| **D**{{ lon0 }}/{{ lat0 }}/{{ lat1 }}/{{ lat2 }}/*width* | {doc}`Equidistant conic ` | +| **E**{{ lon0 }}/{{ lat0 }}[/*horizon*]/*width* | {doc}`Azimuthal equidistant ` | +| **F**{{ lon0 }}/{{ lat0 }}[/*horizon*]/*width* | {doc}`Azimuthal gnomonic ` | +| **G**{{ lon0 }}/{{ lat0 }}[/*horizon*]/*width* | {doc}`Azimuthal orthographic ` | +| **G**{{ lon0 }}/{{ lat0 }}/*width*[**+a***azimuth*][**+t***tilt*][**+v***vwidth*/*vheight*][**+w***twist*][**+z***altitude*] | {doc}`General perspective ` | +| **H**[{{ lon0 }}/]*width* | {doc}`Hammer equal area ` | +| **I**[{{ lon0 }}/]*width* | {doc}`Sinusoidal equal area ` | +| **J**[{{ lon0 }}/]*width* | {doc}`Miller cylindrical ` | +| **Kf**[{{ lon0 }}/]*width* | {doc}`Eckert IV equal area ` | +| **Ks**[{{ lon0 }}/]*width* | {doc}`Eckert VI equal area ` | +| **L**{{ lon0 }}/{{ lat0 }}/{{ lat1 }}/{{ lat2 }}/*width* | {doc}`Lambert conic conformal ` | +| **M**[{{ lon0 }}/[{{ lat0 }}/]]*width* | {doc}`Mercator cylindrical ` | +| **N**[{{ lon0 }}/]*width* | {doc}`Robinson ` | +| **Oa**{{ lon0 }}/{{ lat0 }}/*azimuth*/*width*[**+v**] | {doc}`Oblique Mercator, 1: origin and azimuth ` | +| **Ob**{{ lon0 }}/{{ lat0 }}/{{ lon1 }}/{{ lat1 }}/*width*[**+v**] | {doc}`Oblique Mercator, 2: two points ` | +| **Oc**{{ lon0 }}/{{ lat0 }}/{{ lonp }}/{{ latp }}/*width*[**+v**] | {doc}`Oblique Mercator, 3: origin and pole ` | +| **P***width*[**+a**][**+f**[**e**\|**p**\|*radius*]][**+r***offset*][**+t***origin*][**+z**[**p**\|*radius*]] | {doc}`Polar ` [azimuthal] ({math}`\theta, r`) (or cylindrical) | +| **Poly**/[{{ lon0 }}/[{{ lat0 }}/]]*width* | {doc}`Polyconic ` | +| **Q**[{{ lon0 }}/[{{ lat0 }}/]]*width* | {doc}`Equidistant cylindrica ` | +| **R**[{{ lon0 }}/]*width* | {doc}`Winkel Tripel ` | +| **S**{{ lon0 }}/{{ lat0 }}[/*horizon*]/*width* | {doc}`General stereographic ` | +| **T**{{ lon0 }}[/{{ lat0 }}]/*width* | {doc}`Transverse Mercator ` | +| **U***zone*/*width* | {doc}`Universal Transverse Mercator (UTM) ` | +| **V**[{{ lon0 }}/]*width* | {doc}`Van der Grinten ` | +| **W**[{{ lon0 }}/]*width* | {doc}`Mollweide ` | +| **X***width*[**l**\|**p***exp*\|**T**\|**t**][/*height*[**l**\|**p***exp*\|**T**\|**t**]][**d**] | {doc}`Linear `, {doc}`logarithmic `, {doc}`power `, and time | +| **Y**{{ lon0 }}/{{ lat0 }}/*width* | {doc}`Cylindrical equal area ` | diff --git a/doc/techref/projections.rst b/doc/techref/projections.rst deleted file mode 100644 index 0ee8eb2a8f1..00000000000 --- a/doc/techref/projections.rst +++ /dev/null @@ -1,90 +0,0 @@ -GMT Map Projections -------------------- - -The table below shows the projection codes for the 31 GMT projections: - -.. Substitution definitions: -.. |lon0| replace:: lon\ :sub:`0` -.. |lat0| replace:: lat\ :sub:`0` -.. |lon1| replace:: lon\ :sub:`1` -.. |lat1| replace:: lat\ :sub:`1` -.. |lat2| replace:: lat\ :sub:`2` -.. |lonp| replace:: lon\ :sub:`p` -.. |latp| replace:: lat\ :sub:`p` - -.. list-table:: - :widths: 20 28 - :header-rows: 1 - - * - PyGMT Projection Argument - - Projection Name - * - **A**\ |lon0|/|lat0|\ [/\ *horizon*]/\ *width* - - :doc:`Lambert azimuthal equal area ` - * - **B**\ |lon0|/|lat0|/|lat1|/|lat2|/\ *width* - - :doc:`Albers conic equal area ` - * - **C**\ |lon0|/|lat0|/\ *width* - - :doc:`Cassini cylindrical ` - * - **Cyl_stere**/\ [|lon0|/\ [|lat0|/]]\ *width* - - :doc:`Cylindrical stereographic ` - * - **D**\ |lon0|/|lat0|/|lat1|/|lat2|/\ *width* - - :doc:`Equidistant conic ` - * - **E**\ |lon0|/|lat0|\ [/\ *horizon*]/\ *width* - - :doc:`Azimuthal equidistant ` - * - **F**\ |lon0|/|lat0|\ [/\ *horizon*]/\ *width* - - :doc:`Azimuthal gnomonic ` - * - **G**\ |lon0|/|lat0|\ [/\ *horizon*]/\ *width* - - :doc:`Azimuthal orthographic ` - * - **G**\ |lon0|/|lat0|/\ *width*\ [**+a**\ *azimuth*]\ [**+t**\ *tilt*]\ - [**+v**\ *vwidth*/*vheight*]\ [**+w**\ *twist*]\ [**+z**\ *altitude*] - - :doc:`General perspective ` - * - **H**\ [|lon0|/]\ *width* - - :doc:`Hammer equal area ` - * - **I**\ [|lon0|/]\ *width* - - :doc:`Sinusoidal equal area ` - * - **J**\ [|lon0|/]\ *width* - - :doc:`Miller cylindrical ` - * - **Kf**\ [|lon0|/]\ *width* - - :doc:`Eckert IV equal area ` - * - **Ks**\ [|lon0|/]\ *width* - - :doc:`Eckert VI equal area ` - * - **L**\ |lon0|/|lat0|/|lat1|/|lat2|/\ *width* - - :doc:`Lambert conic conformal ` - * - **M**\ [|lon0|/\ [|lat0|/]]\ *width* - - :doc:`Mercator cylindrical ` - * - **N**\ [|lon0|/]\ *width* - - :doc:`Robinson ` - * - **Oa**\ |lon0|/|lat0|/\ *azimuth*/*width*\ [**+v**] - - :doc:`Oblique Mercator, 1: origin and azimuth ` - * - **Ob**\ |lon0|/|lat0|/|lon1|/|lat1|/\ *width*\ [**+v**] - - :doc:`Oblique Mercator, 2: two points ` - * - **Oc**\ |lon0|/|lat0|/|lonp|/|latp|/\ *width*\ [**+v**] - - :doc:`Oblique Mercator, 3: origin and pole ` - * - **P**\ *width*\ [**+a**]\ [**+f**\ [**e**\|\ **p**\|\ *radius*]]\ - [**+r**\ *offset*][**+t**\ *origin*][**+z**\ [**p**\|\ *radius*]] - - :doc:`Polar ` [azimuthal] - (:math:`\theta, r`) (or cylindrical) - * - **Poly**/\ [|lon0|/\ [|lat0|/]]\ *width* - - :doc:`Polyconic ` - * - **Q**\ [|lon0|/\ [|lat0|/]]\ *width* - - :doc:`Equidistant cylindrical ` - * - **R**\ [|lon0|/]\ *width* - - :doc:`Winkel Tripel ` - * - **S**\ |lon0|/|lat0|\ [/\ *horizon*]/\ *width* - - :doc:`General stereographic - ` - * - **T**\ |lon0|\ [/\ |lat0|]/\ *width* - - :doc:`Transverse Mercator ` - * - **U**\ *zone*/*width* - - :doc:`Universal Transverse Mercator (UTM) - ` - * - **V**\ [|lon0|/]\ *width* - - :doc:`Van der Grinten ` - * - **W**\ [|lon0|/]\ *width* - - :doc:`Mollweide ` - * - **X**\ *width*\ [**l**\|\ **p**\ *exp*\|\ **T**\|\ **t**][/\ *height*\ - [**l**\|\ **p**\ *exp*\|\ **T**\|\ **t**]][**d**] - - :doc:`Linear `, - :doc:`logarithmic `, - :doc:`power `, and time - * - **Y**\ |lon0|/|lat0|/\ *width* - - :doc:`Cylindrical equal area ` From 4cca4c1a6413c113bb9e5be257e67a64f28efcff Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Sun, 4 Aug 2024 13:40:39 +0800 Subject: [PATCH 215/218] Remove the verbose='d' setting in test_which_nonascii_path (#3368) --- pygmt/tests/test_which.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygmt/tests/test_which.py b/pygmt/tests/test_which.py index 642706fba2e..0bc453bc118 100644 --- a/pygmt/tests/test_which.py +++ b/pygmt/tests/test_which.py @@ -68,7 +68,7 @@ def test_which_nonascii_path(monkeypatch): # Start a new session begin() # GMT should download the remote file under the new home directory. - fname = which(fname="@static_earth_relief.nc", download="c", verbose="d") + fname = which(fname="@static_earth_relief.nc", download="c") assert fname.startswith(fakehome) assert fname.endswith("static_earth_relief.nc") end() From 53114b29444b02d9e4d62f0c478f9d642bbeb7b6 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 5 Aug 2024 09:12:19 +0800 Subject: [PATCH 216/218] SPEC 0: Bump minimum supported versions to xarray 2022.09 (#3372) --- .github/workflows/ci_tests.yaml | 2 +- doc/minversions.md | 2 +- environment.yml | 2 +- pyproject.toml | 2 +- requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 362c8c5df76..fe5220ba67b 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -72,7 +72,7 @@ jobs: - python-version: '3.10' numpy-version: '1.24' pandas-version: '=1.5' - xarray-version: '=2022.06' + xarray-version: '=2022.09' optional-packages: '' - python-version: '3.12' numpy-version: '2.0' diff --git a/doc/minversions.md b/doc/minversions.md index 1e5ac8f8e46..dc3e0f4c7e7 100644 --- a/doc/minversions.md +++ b/doc/minversions.md @@ -12,7 +12,7 @@ after their initial release. | PyGMT Version | GMT | Python | NumPy | Pandas | Xarray | |---|---|---|---|---|---| -| [Dev][]* [[Docs][Docs Dev]] | >=6.3.0 | >=3.10 | >=1.24 | >=1.5 | >=2022.06 | +| [Dev][]* [[Docs][Docs Dev]] | >=6.3.0 | >=3.10 | >=1.24 | >=1.5 | >=2022.09 | | [v0.12.0][] [[Docs][Docs v0.12.0]] | >=6.3.0 | >=3.10 | >=1.23 | >=1.5 | >=2022.06 | | [v0.11.0][] [[Docs][Docs v0.11.0]] | >=6.3.0 | >=3.9 | >=1.23 | | | | [v0.10.0][] [[Docs][Docs v0.10.0]] | >=6.3.0 | >=3.9 | >=1.22 | | | diff --git a/environment.yml b/environment.yml index 4e828dc34ef..3e76f1e23c6 100644 --- a/environment.yml +++ b/environment.yml @@ -9,7 +9,7 @@ dependencies: - ghostscript=10.03.1 - numpy>=1.24 - pandas>=1.5 - - xarray>=2022.06 + - xarray>=2022.09 - netCDF4 - packaging # Optional dependencies diff --git a/pyproject.toml b/pyproject.toml index e61ec4bd818..39571bf590e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ classifiers = [ dependencies = [ "numpy>=1.24", "pandas>=1.5", - "xarray>=2022.06", + "xarray>=2022.09", "netCDF4", "packaging", ] diff --git a/requirements.txt b/requirements.txt index c48eb0f989f..c718e89cfbb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # Required packages numpy>=1.24 pandas>=1.5 -xarray>=2022.06 +xarray>=2022.09 netCDF4 packaging From 8859ca368a33c83631a0f95f0d23b9cde99920a7 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Mon, 5 Aug 2024 10:05:45 +0800 Subject: [PATCH 217/218] Patch the callback print function to suppress the UnicodeDecodeError (#3367) --- pygmt/clib/session.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pygmt/clib/session.py b/pygmt/clib/session.py index 829381d339d..a0c11373d68 100644 --- a/pygmt/clib/session.py +++ b/pygmt/clib/session.py @@ -379,10 +379,14 @@ def print_func(file_pointer, message): # noqa: ARG001 We'll capture the messages and print them to stderr so that they will show up on the Jupyter notebook. """ - message = message.decode().strip() + # Have to use try..except due to upstream GMT bug in GMT <= 6.5.0. + # See https://github.com/GenericMappingTools/pygmt/issues/3205. + try: + message = message.decode().strip() + except UnicodeDecodeError: + return 0 self._error_log.append(message) - # flush to make sure the messages are printed even if we have a - # crash. + # Flush to make sure the messages are printed even if we have a crash. print(message, file=sys.stderr, flush=True) # noqa: T201 return 0 From dfedf16bc0c06b90ae2d95ec608a2b6b48dff5e0 Mon Sep 17 00:00:00 2001 From: Dongdong Tian Date: Tue, 6 Aug 2024 10:03:39 +0800 Subject: [PATCH 218/218] Format the author list into a table (#3373) --- AUTHORS.md | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/AUTHORS.md b/AUTHORS.md index d6f3a7a58ae..096895d2fe8 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -8,20 +8,22 @@ Hawaiʻi at Mānoa. The following people have contributed code and/or documentation to the project (alphabetically by name) and are considered to be "PyGMT Developers": -* [Abhishek Anant](https://twitter.com/itsabhianant) | [0000-0002-5751-2010](https://orcid.org/0000-0002-5751-2010) | Unaffiliated -* [Andre L. Belem](https://github.com/andrebelem) | [0000-0002-8865-6180](https://orcid.org/0000-0002-8865-6180) | Fluminense Federal University, Brazil -* [Dongdong Tian](https://seisman.info/) | [0000-0001-7967-1197](https://orcid.org/0000-0001-7967-1197) | China University of Geosciences, China -* [Jamie Quinn](http://jamiejquinn.com) | [0000-0002-0268-7032](https://orcid.org/0000-0002-0268-7032) | University College London, United Kingdom -* [Jiayuan Yao](https://github.com/core-man) | [0000-0001-7036-4238](https://orcid.org/0000-0001-7036-4238) | Nanyang Technological University, Singapore -* [Jing-Hui Tong](https://github.com/jhtong33) | [0009-0002-7195-3071](https://orcid.org/0009-0002-7195-3071) | National Taiwan Normal University, Taiwan -* [Kathryn Materna](https://github.com/kmaterna) | [0000-0002-6687-980X](https://orcid.org/0000-0002-6687-980X) | US Geological Survey, USA -* [Leonardo Uieda](http://www.leouieda.com/) | [0000-0001-6123-9515](https://orcid.org/0000-0001-6123-9515) | University of Liverpool, United Kingdom -* [Liam Toney](https://liam.earth/) | [0000-0003-0167-9433](https://orcid.org/0000-0003-0167-9433) | University of Alaska Fairbanks, USA -* [Malte Ziebarth](https://github.com/mjziebarth) | [0000-0002-5190-4478](https://orcid.org/0000-0002-5190-4478) | GFZ German Research Centre for Geosciences, Germany -* [Max Jones](https://github.com/maxrjones) | [0000-0003-0180-8928](https://orcid.org/0000-0003-0180-8928) | University of Hawaiʻi at Mānoa, USA -* [Michael Grund](https://github.com/michaelgrund) | [0000-0001-8759-2018](https://orcid.org/0000-0001-8759-2018) | SNP Innovation Lab GmbH, Germany -* [Tyler Newton](http://www.tnewton.com/) | [0000-0002-1560-6553](https://orcid.org/0000-0002-1560-6553) | University of Oregon, USA -* [Wei Ji Leong](https://github.com/weiji14) | [0000-0003-2354-1988](https://orcid.org/0000-0003-2354-1988) | Development Seed, USA -* [William Schlitzer](https://github.com/willschlitzer) | [0000-0002-5843-2282](https://orcid.org/0000-0002-5843-2282) | Unaffiliated -* [Yohai Magen](https://github.com/yohaimagen) | [0000-0002-4892-4013](https://orcid.org/0000-0002-4892-4013) | Tel Aviv University, Israel -* [Yvonne Fröhlich](https://github.com/yvonnefroehlich) | [0000-0002-8566-0619](https://orcid.org/0000-0002-8566-0619) | Karlsruhe Institute of Technology, Germany +| Name | ORCID | Affiliation | +|---|---|---| +| [Abhishek Anant](https://twitter.com/itsabhianant) | [0000-0002-5751-2010](https://orcid.org/0000-0002-5751-2010) | Unaffiliated | +| [Andre L. Belem](https://github.com/andrebelem) | [0000-0002-8865-6180](https://orcid.org/0000-0002-8865-6180) | Fluminense Federal University, Brazil | +| [Dongdong Tian](https://seisman.info/) | [0000-0001-7967-1197](https://orcid.org/0000-0001-7967-1197) | China University of Geosciences, China | +| [Jamie Quinn](http://jamiejquinn.com) | [0000-0002-0268-7032](https://orcid.org/0000-0002-0268-7032) | University College London, United Kingdom | +| [Jiayuan Yao](https://github.com/core-man) | [0000-0001-7036-4238](https://orcid.org/0000-0001-7036-4238) | Nanyang Technological University, Singapore | +| [Jing-Hui Tong](https://github.com/jhtong33) | [0009-0002-7195-3071](https://orcid.org/0009-0002-7195-3071) | National Taiwan Normal University, Taiwan | +| [Kathryn Materna](https://github.com/kmaterna) | [0000-0002-6687-980X](https://orcid.org/0000-0002-6687-980X) | US Geological Survey, USA | +| [Leonardo Uieda](http://www.leouieda.com/) | [0000-0001-6123-9515](https://orcid.org/0000-0001-6123-9515) | University of Liverpool, United Kingdom | +| [Liam Toney](https://liam.earth/) | [0000-0003-0167-9433](https://orcid.org/0000-0003-0167-9433) | University of Alaska Fairbanks, USA | +| [Malte Ziebarth](https://github.com/mjziebarth) | [0000-0002-5190-4478](https://orcid.org/0000-0002-5190-4478) | GFZ German Research Centre for Geosciences, Germany | +| [Max Jones](https://github.com/maxrjones) | [0000-0003-0180-8928](https://orcid.org/0000-0003-0180-8928) | University of Hawaiʻi at Mānoa, USA | +| [Michael Grund](https://github.com/michaelgrund) | [0000-0001-8759-2018](https://orcid.org/0000-0001-8759-2018) | SNP Innovation Lab GmbH, Germany | +| [Tyler Newton](http://www.tnewton.com/) | [0000-0002-1560-6553](https://orcid.org/0000-0002-1560-6553) | University of Oregon, USA | +| [Wei Ji Leong](https://github.com/weiji14) | [0000-0003-2354-1988](https://orcid.org/0000-0003-2354-1988) | Development Seed, USA | +| [William Schlitzer](https://github.com/willschlitzer) | [0000-0002-5843-2282](https://orcid.org/0000-0002-5843-2282) | Unaffiliated | +| [Yohai Magen](https://github.com/yohaimagen) | [0000-0002-4892-4013](https://orcid.org/0000-0002-4892-4013) | Tel Aviv University, Israel | +| [Yvonne Fröhlich](https://github.com/yvonnefroehlich) | [0000-0002-8566-0619](https://orcid.org/0000-0002-8566-0619) | Karlsruhe Institute of Technology, Germany |