Skip to content

Commit

Permalink
Merge branch 'main' into python312
Browse files Browse the repository at this point in the history
  • Loading branch information
valeriupredoi committed Oct 31, 2023
2 parents 5e1225e + 56cc385 commit 79d6c2f
Show file tree
Hide file tree
Showing 9 changed files with 217 additions and 250 deletions.
77 changes: 0 additions & 77 deletions .github/workflows/run-tests-comment.yml

This file was deleted.

2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
[![codecov](https://codecov.io/gh/ESMValGroup/ESMValCore/branch/main/graph/badge.svg?token=wQnDzguwq6)](https://codecov.io/gh/ESMValGroup/ESMValCore)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/5d496dea9ef64ec68e448a6df5a65783)](https://www.codacy.com/gh/ESMValGroup/ESMValCore?utm_source=github.com&utm_medium=referral&utm_content=ESMValGroup/ESMValCore&utm_campaign=Badge_Grade)
[![Docker Build Status](https://img.shields.io/docker/cloud/build/esmvalgroup/esmvalcore)](https://hub.docker.com/r/esmvalgroup/esmvalcore/)
[![Anaconda-Server Badge](https://anaconda.org/conda-forge/esmvalcore/badges/version.svg)](https://anaconda.org/conda-forge/esmvalcore)
[![Anaconda-Server Badge](https://img.shields.io/badge/Anaconda.org-2.9.0-blue.svg)](https://anaconda.org/conda-forge/esmvalcore)
[![Github Actions Test](https://github.com/ESMValGroup/ESMValCore/actions/workflows/run-tests.yml/badge.svg)](https://github.com/ESMValGroup/ESMValCore/actions/workflows/run-tests.yml)

![esmvaltoollogo](https://raw.githubusercontent.com/ESMValGroup/ESMValCore/main/doc/figures/ESMValTool-logo-2.png)
Expand Down
129 changes: 66 additions & 63 deletions conda-linux-64.lock

Large diffs are not rendered by default.

14 changes: 13 additions & 1 deletion doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,19 @@
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
#
# Avoid the following warning issued by pydata_sphinx_theme:
#
# "WARNING: The default value for `navigation_with_keys` will change to `False`
# in the next release. If you wish to preserve the old behavior for your site,
# set `navigation_with_keys=True` in the `html_theme_options` dict in your
# `conf.py` file.Be aware that `navigation_with_keys = True` has negative
# accessibility implications:
# https://github.com/pydata/pydata-sphinx-theme/issues/1492"
# Short synopsis of said issue: as of now, left/right keys take one
# to the previous/next page instead of scrolling horizontally; this
# should be fixed upstream, then we can set again navigation with keys True
html_theme_options = {"navigation_with_keys": False}

# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
Expand Down
12 changes: 0 additions & 12 deletions doc/contributing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -550,18 +550,6 @@ and the result of the tests ran by GitHub Actions can be viewed on the
of the repository (to learn more about the Github-hosted runners, please have a look
the `documentation <https://docs.github.com/en/actions/using-github-hosted-runners>`__).

When opening a pull request, if you wish to run the Github Actions `Test <https://github.com/ESMValGroup/ESMValCore/actions/workflows/run-tests.yml>`__ test,
you can activate it via a simple comment containing the @runGAtests tag
(e.g. "@runGAtests" or "@runGAtests please run" - in effect, tagging the runGAtests
bot that will start the test automatically). This is useful
to check if a certain feature that you included in the Pull Request, and can be tested
for via the test suite, works across the supported Python versions, and both on Linux and OSX.
The test is currently deactivated, so before triggering the test via comment, make sure you activate
the test in the main `Actions page <https://github.com/ESMValGroup/ESMValCore/actions>`__
(click on Test via PR Comment and activate it); also and be sure to deactivate it afterwards
(the Github API still needs a bit more development, and currently it triggers
the test for **each comment** irrespective of PR, that's why this needs to be activated/decativated).

The configuration of the tests run by CircleCI can be found in the directory
`.circleci <https://github.com/ESMValGroup/ESMValCore/blob/main/.circleci>`__,
while the configuration of the tests run by GitHub Actions can be found in the
Expand Down
2 changes: 1 addition & 1 deletion esmvalcore/_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ def __init__(self):
self.__setattr__(entry_point.name, entry_point.load()())

def version(self):
"""Show versions of all packages that conform ESMValTool.
"""Show versions of all packages that form ESMValTool.
In particular, this command will show the version ESMValCore and
any other package that adds a subcommand to 'esmvaltool'
Expand Down
26 changes: 5 additions & 21 deletions esmvalcore/_recipe/from_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,23 +160,6 @@ def _group_identical_facets(variable: Mapping[str, Any]) -> Recipe:
return result


class _SortableDict(dict):
"""A `dict` class that can be sorted."""

def __lt__(self, other):
return tuple(self.items()) < tuple(other.items())


def _change_dict_type(item, dict_type):
"""Change the dict type in a nested structure."""
change_dict_type = partial(_change_dict_type, dict_type=dict_type)
if isinstance(item, dict):
return dict_type((k, change_dict_type(v)) for k, v in item.items())
if isinstance(item, (list, tuple, set)):
return type(item)(change_dict_type(elem) for elem in item)
return item


def _group_ensemble_members(dataset_facets: Iterable[Facets]) -> list[Facets]:
"""Group ensemble members.
Expand All @@ -185,21 +168,22 @@ def _group_ensemble_members(dataset_facets: Iterable[Facets]) -> list[Facets]:
"""

def grouper(facets):
return tuple((k, facets[k]) for k in sorted(facets) if k != 'ensemble')
return sorted(
(f, str(v)) for f, v in facets.items() if f != 'ensemble')

result = []
dataset_facets = _change_dict_type(dataset_facets, _SortableDict)
dataset_facets = sorted(dataset_facets, key=grouper)
for group_facets, group in itertools.groupby(dataset_facets, key=grouper):
for _, group_iter in itertools.groupby(dataset_facets, key=grouper):
group = list(group_iter)
ensembles = [f['ensemble'] for f in group if 'ensemble' in f]
group_facets = group[0]
if not ensembles:
result.append(dict(group_facets))
else:
for ensemble in _group_ensemble_names(ensembles):
facets = dict(group_facets)
facets['ensemble'] = ensemble
result.append(facets)
result = _change_dict_type(result, dict)
return result


Expand Down
159 changes: 92 additions & 67 deletions esmvalcore/preprocessor/_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@
import os
from itertools import groupby
from pathlib import Path
from typing import Optional
from typing import Optional, NamedTuple
from warnings import catch_warnings, filterwarnings

import cftime
import iris
import iris.aux_factory
import iris.exceptions
import isodate
import numpy as np
import yaml
from cf_units import suppress_errors
Expand All @@ -22,7 +22,6 @@
from esmvalcore.iris_helpers import merge_cube_attributes

from .._task import write_ncl_settings
from ._time import clip_timerange

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -206,70 +205,96 @@ def _concatenate_cubes(cubes, check_level):
return concatenated


def _check_time_overlaps(cubes):
"""Handle time overlaps."""
times = [cube.coord('time').core_points() for cube in cubes]
for index, _ in enumerate(times[:-1]):
overlap = np.intersect1d(times[index], times[index + 1])
if overlap.size != 0:
overlapping_cubes = cubes[index:index + 2]
time_1 = overlapping_cubes[0].coord('time').core_points()
time_2 = overlapping_cubes[1].coord('time').core_points()

# case 1: both cubes start at the same time -> return longer cube
if time_1[0] == time_2[0]:
if time_1[-1] <= time_2[-1]:
cubes.pop(index)
discarded_cube_index = 0
used_cube_index = 1
else:
cubes.pop(index + 1)
discarded_cube_index = 1
used_cube_index = 0
logger.debug(
"Both cubes start at the same time but cube %s "
"ends before %s",
overlapping_cubes[discarded_cube_index],
overlapping_cubes[used_cube_index],
)
logger.debug(
"Cube %s contains all needed data so using it fully",
overlapping_cubes[used_cube_index],
)

# case 2: cube1 starts before cube2
# case 2.1: cube1 ends after cube2 -> return cube1
elif time_1[-1] > time_2[-1]:
cubes.pop(index + 1)
logger.debug("Using only data from %s", overlapping_cubes[0])

# case 2.2: cube1 ends before cube2 -> use full cube2
# and shorten cube1
else:
new_time = np.delete(
time_1,
np.argwhere(np.in1d(time_1, overlap)),
)
new_dates = overlapping_cubes[0].coord('time').units.num2date(
new_time)
logger.debug(
"Extracting time slice between %s and %s from cube %s "
"to use it for concatenation with cube %s",
new_dates[0],
new_dates[-1],
overlapping_cubes[0],
overlapping_cubes[1],
)

start_point = isodate.date_isoformat(
new_dates[0], format=isodate.isostrf.DATE_BAS_COMPLETE)
end_point = isodate.date_isoformat(
new_dates[-1], format=isodate.isostrf.DATE_BAS_COMPLETE)
new_cube = clip_timerange(overlapping_cubes[0],
f'{start_point}/{end_point}')

cubes[index] = new_cube
return cubes
class _TimesHelper:

def __init__(self, time):
self.times = time.core_points()
self.units = str(time.units)

def __getattr__(self, name):
return getattr(self.times, name)

def __len__(self):
return len(self.times)

def __getitem__(self, key):
return self.times[key]


def _check_time_overlaps(cubes: iris.cube.CubeList) -> iris.cube.CubeList:
"""Handle time overlaps.
Parameters
----------
cubes : iris.cube.CubeList
A list of cubes belonging to a single timeseries,
ordered by starting point with possible overlaps.
Returns
-------
iris.cube.CubeList
A list of cubes belonging to a single timeseries,
ordered by starting point with no overlaps.
"""
if len(cubes) < 2:
return cubes

class _TrackedCube(NamedTuple):
cube: iris.cube.Cube
times: iris.coords.DimCoord
start: float
end: float

@classmethod
def from_cube(cls, cube):
"""Construct tracked cube."""
times = cube.coord("time")
start, end = times.core_points()[[0, -1]]
return cls(cube, times, start, end)

new_cubes = iris.cube.CubeList()
current_cube = _TrackedCube.from_cube(cubes[0])
for new_cube in map(_TrackedCube.from_cube, cubes[1:]):
if new_cube.start > current_cube.end:
# no overlap, use current cube and start again from new cube
logger.debug("Using %s", current_cube.cube)
new_cubes.append(current_cube.cube)
current_cube = new_cube
continue
# overlap
if current_cube.end > new_cube.end:
# current cube ends after new one, just forget new cube
logger.debug(
"Discarding %s because the time range "
"is already covered by %s", new_cube.cube, current_cube.cube)
continue
if new_cube.start == current_cube.start:
# new cube completely covers current one
# forget current cube
current_cube = new_cube
logger.debug(
"Discarding %s because the time range is covered by %s",
current_cube.cube, new_cube.cube)
continue
# new cube ends after current one,
# use all of new cube, and shorten current cube to
# eliminate overlap with new cube
cut_index = cftime.time2index(
new_cube.start,
_TimesHelper(current_cube.times),
current_cube.times.units.calendar,
select="before",
) + 1
logger.debug("Using %s shortened to %s due to overlap",
current_cube.cube,
current_cube.times.cell(cut_index).point)
new_cubes.append(current_cube.cube[:cut_index])
current_cube = new_cube

logger.debug("Using %s", current_cube.cube)
new_cubes.append(current_cube.cube)

return new_cubes


def _fix_calendars(cubes):
Expand Down
Loading

0 comments on commit 79d6c2f

Please sign in to comment.