diff --git a/CHANGELOG.rst b/CHANGELOG.rst index abfc83ece..f476f6d7e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,7 +4,7 @@ Changelog v0.55.0 (unreleased) -------------------- -Contributors to this version: Juliette Lavoie (:user:`juliettelavoie`), Trevor James Smith (:user:`Zeitsperre`). +Contributors to this version: Juliette Lavoie (:user:`juliettelavoie`), Trevor James Smith (:user:`Zeitsperre`), Sascha Hofmann (:user:`saschahofmann`). New indicators ^^^^^^^^^^^^^^ @@ -16,6 +16,7 @@ New features and enhancements * New function ``ensemble.partition.general_partition`` (:pull:`2035`) * Added a new ``xclim.indices.generic.bivariate_count_occurrences`` function to count instances where operations and performed and validated for two variables. (:pull:`2030`). * `xclim` now tracks energy usage and carbon emissions ("last run", "average", and "total") during CI workflows using the `eco-ci-energy-estimation` GitHub Action. (:pull:`2046`). +* ``xclim.testing.helpers.test_timeseries`` now accepts a `calendar` argument that is forwarded to ``xr.cftime_range``. (:pull:`2019`). Internal changes ^^^^^^^^^^^^^^^^ @@ -23,6 +24,10 @@ Internal changes * Adjusted the ``TestOfficialYaml`` test to use a dynamic method for finding the installed location of `xclim`. (:pull:`2028`). * Adjusted two tests for better handling when running in Windows environments. (:pull:`2057`). +Bug fixes +^^^^^^^^^ +* Fixed a bug in ``xclim.sdba.Grouper.get_index`` that didn't correctly interpolate seasonal values (:issue:`2014`, :pull:`2019`). + v0.54.0 (2024-12-16) -------------------- Contributors to this version: Trevor James Smith (:user:`Zeitsperre`), Pascal Bourgault (:user:`aulemahal`), Éric Dupuis (:user:`coxipi`), Sascha Hofmann (:user:`saschahofmann`). diff --git a/docs/installation.rst b/docs/installation.rst index d83d5d7fd..d045b99fa 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -160,4 +160,4 @@ To create a conda environment including `xclim`'s dependencies and several optio conda env create -n my_xclim_env python=3.10 --file=environment.yml conda activate my_xclim_env - (my_xclim_env) python -m pip install -e --no-deps . + (my_xclim_env) python -m pip install --no-deps -e . diff --git a/docs/notebooks/sdba-advanced.ipynb b/docs/notebooks/sdba-advanced.ipynb index 1acd134dc..3e55fe3c8 100644 --- a/docs/notebooks/sdba-advanced.ipynb +++ b/docs/notebooks/sdba-advanced.ipynb @@ -16,20 +16,17 @@ "\n", "Some `xclim.sdba`-specific tips:\n", "\n", - "* Most adjustment method will need to perform operation on the whole `time` coordinate, so it is best to optimize chunking along the other dimensions. This is often different from how public data is shared, where more universal 3D chunks are used.\n", + "- Most adjustment method will need to perform operation on the whole `time` coordinate, so it is best to optimize chunking along the other dimensions. This is often different from how public data is shared, where more universal 3D chunks are used.\n", "\n", - " Chunking of outputs can be controlled in xarray's [to_netcdf](https://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_netcdf.html?highlight=to_netcdf#xarray.Dataset.to_netcdf). We also suggest using [Zarr](https://zarr.readthedocs.io/en/stable/) files. According to [its creators](https://ui.adsabs.harvard.edu/abs/2018AGUFMIN33A..06A/abstract), `zarr` stores should give better performances, especially because of their better ability for parallel I/O. See [Dataset.to_zarr](https://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_zarr.html?highlight=to_zarr#xarray.Dataset.to_zarr) and this useful [rechunking package](https://rechunker.readthedocs.io).\n", + " Chunking of outputs can be controlled in xarray's [to_netcdf](https://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_netcdf.html?highlight=to_netcdf#xarray.Dataset.to_netcdf). We also suggest using [Zarr](https://zarr.readthedocs.io/en/stable/) files. According to [its creators](https://ui.adsabs.harvard.edu/abs/2018AGUFMIN33A..06A/abstract), `zarr` stores should give better performances, especially because of their better ability for parallel I/O. See [Dataset.to_zarr](https://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_zarr.html?highlight=to_zarr#xarray.Dataset.to_zarr) and this useful [rechunking package](https://rechunker.readthedocs.io).\n", "\n", + "- One of the main bottleneck for adjustments with small groups is that dask needs to build and optimize an enormous task graph. This issue has been greatly reduced with xclim 0.27 and the use of `map_blocks` in the adjustment methods. However, not all adjustment methods use this optimized syntax.\n", "\n", - "* One of the main bottleneck for adjustments with small groups is that dask needs to build and optimize an enormous task graph. This issue has been greatly reduced with xclim 0.27 and the use of `map_blocks` in the adjustment methods. However, not all adjustment methods use this optimized syntax.\n", + " In order to help dask, one can split the processing in parts. For splitting training and adjustment, see [the section below](#Initializing-an-Adjustment-object-from-a-training-dataset).\n", "\n", - " In order to help dask, one can split the processing in parts. For splitting training and adjustment, see [the section below](#Initializing-an-Adjustment-object-from-a-training-dataset).\n", + "- Another massive bottleneck of parallelization of `xarray` is the thread-locking behaviour of some methods. It is quite difficult to isolate and avoid these locking instances, so one of the best workarounds is to use `dask` configurations with many _processes_ and few _threads_. The former do not share memory and thus are not impacted when a lock is activated from a thread in another worker. However, this adds many memory transfer operations and, by experience, reduces dask's ability to parallelize some pipelines. Such a dask Client is usually created with a large `n_workers` and a small `threads_per_worker`.\n", "\n", - "\n", - "* Another massive bottleneck of parallelization of `xarray` is the thread-locking behaviour of some methods. It is quite difficult to isolate and avoid these locking instances, so one of the best workarounds is to use `dask` configurations with many _processes_ and few _threads_. The former do not share memory and thus are not impacted when a lock is activated from a thread in another worker. However, this adds many memory transfer operations and, by experience, reduces dask's ability to parallelize some pipelines. Such a dask Client is usually created with a large `n_workers` and a small `threads_per_worker`.\n", - "\n", - "\n", - "* Sometimes, datasets have auxiliary coordinates (for example : lat / lon in a rotated pole dataset). Xarray handles these variables as data variables and will **not** load them if dask is used. However, in some operations, `xclim` or `xarray` will trigger access to those variables, triggering computations each time, since they are `dask`-based. To avoid this behaviour, one can load the coordinates, or simply remove them from the inputs." + "- Sometimes, datasets have auxiliary coordinates (for example : lat / lon in a rotated pole dataset). Xarray handles these variables as data variables and will **not** load them if dask is used. However, in some operations, `xclim` or `xarray` will trigger access to those variables, triggering computations each time, since they are `dask`-based. To avoid this behaviour, one can load the coordinates, or simply remove them from the inputs.\n" ] }, { @@ -42,7 +39,7 @@ "\n", "In xclim's implementation, the user can choose between local _constancy_ ($d=0$, local estimates are weighted averages) and local _linearity_ ($d=1$, local estimates are taken from linear regressions). Two weighting functions are currently implemented : \"tricube\" ($w(x) = (1 - x^3)^3$) and \"gaussian\" ($w(x) = e^{-x^2 / 2\\sigma^2}$). Finally, the number of Cleveland's _robustifying iterations_ is controllable through `niter`. After computing an estimate of $y(x)$, the weights are modulated by a function of the distance between the estimate and the points and the procedure is started over. These iterations are made to weaken the effect of outliers on the estimate.\n", "\n", - "The next example shows the application of the LOESS to daily temperature data. The black line and dot are the estimated $y$, outputs of the `sdba.loess.loess_smoothing` function, using local linear regression (passing $d = 1$), a window spanning 20% ($f = 0.2$) of the domain, the \"tricube\" weighting function and only one iteration. The red curve illustrates the weighting function on January 1st 2014, where the red circles are the nearest-neighbours used in the estimation." + "The next example shows the application of the LOESS to daily temperature data. The black line and dot are the estimated $y$, outputs of the `sdba.loess.loess_smoothing` function, using local linear regression (passing $d = 1$), a window spanning 20% ($f = 0.2$) of the domain, the \"tricube\" weighting function and only one iteration. The red curve illustrates the weighting function on January 1st 2014, where the red circles are the nearest-neighbours used in the estimation.\n" ] }, { @@ -114,12 +111,11 @@ "source": [ "LOESS smoothing can suffer from heavy boundary effects. On the previous graph, we can associate the strange bend on the left end of the line to them. The next example shows a stronger case. Usually, $\\frac{f}{2}N$ points on each side should be discarded. On the other hand, LOESS has the advantage of always staying within the bounds of the data.\n", "\n", - "\n", "### LOESS Detrending\n", "\n", "In climate science, it can be used in the detrending process. `xclim` provides `sdba.detrending.LoessDetrend` in order to compute trend with the LOESS smoothing and remove them from timeseries.\n", "\n", - "First we create some toy data with a sinusoidal annual cycle, random noise and a linear temperature increase." + "First we create some toy data with a sinusoidal annual cycle, random noise and a linear temperature increase.\n" ] }, { @@ -147,7 +143,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Then we compute the trend on the data. Here, we compute on the whole timeseries (`group='time'`) with the parameters suggested above." + "Then we compute the trend on the data. Here, we compute on the whole timeseries (`group='time'`) with the parameters suggested above.\n" ] }, { @@ -184,7 +180,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As said earlier, this example shows how the Loess has strong boundary effects. It is recommended to remove the $\\frac{f}{2}\\cdot N$ outermost points on each side, as shown by the gray bars in the graph above." + "As said earlier, this example shows how the Loess has strong boundary effects. It is recommended to remove the $\\frac{f}{2}\\cdot N$ outermost points on each side, as shown by the gray bars in the graph above.\n" ] }, { @@ -193,7 +189,7 @@ "source": [ "## Initializing an Adjustment object from a training dataset\n", "\n", - "For large scale uses, when the training step deserves its own computation and write to disk, or simply when there are multiples `sim` to be adjusted with the same training, it is helpful to be able to instantiate the Adjustment objects from the training dataset itself. This trick relies on a global attribute \"adj_params\" set on the training dataset." + "For large scale uses, when the training step deserves its own computation and write to disk, or simply when there are multiples `sim` to be adjusted with the same training, it is helpful to be able to instantiate the Adjustment objects from the training dataset itself. This trick relies on a global attribute \"adj_params\" set on the training dataset.\n" ] }, { @@ -252,7 +248,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The trained `QDM` exposes the training data in the `ds` attribute, Here, we will write it to disk, read it back and initialize a new object from it. Notice the `adj_params` in the dataset, that has the same value as the repr string printed just above. Also, notice the `_xclim_adjustment` attribute that contains a JSON string, so we can rebuild the adjustment object later." + "The trained `QDM` exposes the training data in the `ds` attribute, Here, we will write it to disk, read it back and initialize a new object from it. Notice the `adj_params` in the dataset, that has the same value as the repr string printed just above. Also, notice the `_xclim_adjustment` attribute that contains a JSON string, so we can rebuild the adjustment object later.\n" ] }, { @@ -284,7 +280,7 @@ "source": [ "In the case above, creating a full object from the dataset doesn't make the most sense since we are in the same python session, with the \"old\" object still available. This method effective when we reload the training data in a different python session, say on another computer. **However, take note that there is no retro-compatibility insurance.** If the `QuantileDeltaMapping` class was to change in a new xclim version, one would not be able to create the new object from a dataset saved with the old one.\n", "\n", - "For the case where we stay in the same python session, it is still useful to trigger the dask computations. For small datasets, that could mean a simple `QDM.ds.load()`, but sometimes even the training data is too large to be full loaded in memory. In that case, we could also do:" + "For the case where we stay in the same python session, it is still useful to trigger the dask computations. For small datasets, that could mean a simple `QDM.ds.load()`, but sometimes even the training data is too large to be full loaded in memory. In that case, we could also do:\n" ] }, { @@ -319,7 +315,7 @@ "\n", "For the moment, this feature is still under construction and only a few `Adjustment` actually provide these extra outputs. Please open issues on the GitHub repo if you have needs or ideas of interesting diagnostic variables.\n", "\n", - "For example, `QDM.adjust` adds `sim_q`, which gives the quantile of each element of `sim` within its group." + "For example, `QDM.adjust` adds `sim_q`, which gives the quantile of each element of `sim` within its group.\n" ] }, { @@ -364,7 +360,7 @@ "\n", "In the following example, `QDM` is configured with `group=\"time.dayofyear\"` which will perform the adjustment for each day of year (doy) separately. When using `stack_periods` the extracted windows are all concatenated along the new `period` axis, and they all share the same time coordinate. As such, for the `doy` information to make sense, we must use a calendar with uniform year lengths. Otherwise, the `doy` values would shift one day at each leap year.\n", "\n", - "" + "\n" ] }, { @@ -396,7 +392,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here, we retrieve the full timeseries (minus the lasy year that couldn't fit in any window)." + "Here, we retrieve the full timeseries (minus the lasy year that couldn't fit in any window).\n" ] }, { @@ -419,7 +415,7 @@ "\n", "We will transform the variables that need it to the additive space, adding some jitter in the process to avoid $log(0)$ situations. Then, we will stack the different variables into a single `DataArray`, allowing us to use `PrincipalComponents` in a multi-variate way. Following the PCA, a simple quantile-mapping method is used, both adjustment acting on the residuals, while the mean of the simulated trend is adjusted on its own. Each step will be explained.\n", "\n", - "First, open the data, convert the calendar and the units. Because we will perform adjustments on \"dayofyear\" groups (with a window), keeping standard calendars results in an extra \"dayofyear\" with only a quarter of the data. It's usual to transform to a \"noleap\" calendar, which drops the 29th of February, as it only has a small impact on the data." + "First, open the data, convert the calendar and the units. Because we will perform adjustments on \"dayofyear\" groups (with a window), keeping standard calendars results in an extra \"dayofyear\" with only a quarter of the data. It's usual to transform to a \"noleap\" calendar, which drops the 29th of February, as it only has a small impact on the data.\n" ] }, { @@ -452,13 +448,14 @@ "metadata": {}, "source": [ "### 1. Jitter, additive space transformation and variable stacking\n", + "\n", "Here, `tasmax` is already ready to be adjusted in an additive way, because all data points are far from the physical zero (0 K). This is not the case for `pr`, which is why we want to transform that variable to the additive space, to avoid splitting our workflow in two. For `pr` the \"log\" transformation is simply:\n", "\n", "$$ pr' = \\ln\\left(pr - b\\right) $$\n", "\n", "Where $b$ is the lower bound, here 0 mm/d. However, we could have exact zeros (0 mm/d) in the datasets, which will translate into $-\\infty$. To avoid this, we simply replace the smallest values by a random distribution of very small, but not problematic, values. In the following, all values below 0.1 mm/d are replaced by a uniform random distribution of values within the range (0, 0.1) mm/d (bounds excluded).\n", "\n", - "Finally, the variables are stacked together into a single `DataArray`." + "Finally, the variables are stacked together into a single `DataArray`.\n" ] }, { @@ -492,9 +489,10 @@ "metadata": {}, "source": [ "### 2. Get residuals and trends\n", - "The adjustment will be performed on residuals only. The adjusted timeseries `sim` will be detrended with the LOESS routine described above. Because of the short length of `ref` and `hist` and the potential boundary effects of using LOESS with them, we compute the 30-year mean. In other words, instead of _detrending_ inputs, we are _normalizing_ those inputs.\n", "\n", - "While the residuals are adjusted with `PrincipalComponents` and `EmpiricalQuantileMapping`, the trend of `sim` still needs to be offset according to the means of `ref` and `hist`. This is similar to what `DetrendedQuantileMapping` does. The offset step could have been done on the trend itself or at the end on `scen`, it doesn't really matter. We do it here because it keeps it close to where the `scaling` is computed." + "The adjustment will be performed on residuals only. The adjusted timeseries `sim` will be detrended with the LOESS routine described above. Because of the short length of `ref` and `hist` and the potential boundary effects of using LOESS with them, we compute the 30-year mean. In other words, instead of _detrending_ inputs, we are _normalizing_ those inputs.\n", + "\n", + "While the residuals are adjusted with `PrincipalComponents` and `EmpiricalQuantileMapping`, the trend of `sim` still needs to be offset according to the means of `ref` and `hist`. This is similar to what `DetrendedQuantileMapping` does. The offset step could have been done on the trend itself or at the end on `scen`, it doesn't really matter. We do it here because it keeps it close to where the `scaling` is computed.\n" ] }, { @@ -530,7 +528,8 @@ "metadata": {}, "source": [ "### 3. Adjustments\n", - "Following, Alavoine et Grenier (2022), we decided to perform the multivariate Principal Components adjustment first and then re-adjust with the simple Quantile-Mapping." + "\n", + "Following, Alavoine et Grenier (2022), we decided to perform the multivariate Principal Components adjustment first and then re-adjust with the simple Quantile-Mapping.\n" ] }, { @@ -560,7 +559,7 @@ " kind=\"+\",\n", ")\n", "\n", - "scen2_res = EQM.adjust(scen1_res, interp=\"linear\", extrapolation=\"constant\")" + "scen2_res = EQM.adjust(scen1_res, extrapolation=\"constant\")" ] }, { @@ -568,7 +567,8 @@ "metadata": {}, "source": [ "### 4. Re-trend and transform back to the physical space\n", - "Add back the trend (which includes the scaling), unstack the variables to a dataset and transform `pr` back to the physical space. All functions have conserved and handled the attributes, so we don't need to repeat the additive space bounds. The annual cycle of both variables on the reference period in Vancouver is plotted to confirm the adjustment adds a positive effect." + "\n", + "Add back the trend (which includes the scaling), unstack the variables to a dataset and transform `pr` back to the physical space. All functions have conserved and handled the attributes, so we don't need to repeat the additive space bounds. The annual cycle of both variables on the reference period in Vancouver is plotted to confirm the adjustment adds a positive effect.\n" ] }, { @@ -622,14 +622,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Frequency adaption with a rolling window" + "# Frequency adaption with a rolling window\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In the previous example, we performed bias adjustment with a rolling window. Here we show how to include frequency adaptation (see `sdba.ipynb` for the simple case `group=\"time\"`). We first generate the same precipitation dataset used in `sdba.ipynb`" + "In the previous example, we performed bias adjustment with a rolling window. Here we show how to include frequency adaptation (see `sdba.ipynb` for the simple case `group=\"time\"`). We first generate the same precipitation dataset used in `sdba.ipynb`\n" ] }, { @@ -665,7 +665,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Bias adjustment on a rolling window can be performed in the same way as shown in `sdba.ipynb`, but instead of being a single string precising the time grouping (e.g. `time.month`), the `group` argument is built with `sdba.Grouper` function" + "Bias adjustment on a rolling window can be performed in the same way as shown in `sdba.ipynb`, but instead of being a single string precising the time grouping (e.g. `time.month`), the `group` argument is built with `sdba.Grouper` function\n" ] }, { @@ -698,9 +698,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In the figure above, `scen` occasionally has small peaks where `sim` is 0, indicating that there are more \"dry days\" (days with almost no precipitation) in `hist` than in `ref`. The frequency-adaptation [Themeßl et al. (2010)](https://doi.org/10.1007/s10584-011-0224-4) performed in the step above only worked partially. \n", + "In the figure above, `scen` occasionally has small peaks where `sim` is 0, indicating that there are more \"dry days\" (days with almost no precipitation) in `hist` than in `ref`. The frequency-adaptation [Themeßl et al. (2010)](https://doi.org/10.1007/s10584-011-0224-4) performed in the step above only worked partially.\n", "\n", - "The reason for this is the following. The first step above combines precipitations in 365 overlapping blocks of 31 days * Y years, one block for each day of the year. Each block is adapted, and the 16th day-of-year slice (at the center of the block) is assigned to the corresponding day-of-year in the adapted dataset `hist_ad`. As we proceed to the training, we re-form those 31 days * Y years blocks, but this step does not invert the last one: There can still be more zeroes in the simulation than in the reference. \n", + "The reason for this is the following. The first step above combines precipitations in 365 overlapping blocks of 31 days _ Y years, one block for each day of the year. Each block is adapted, and the 16th day-of-year slice (at the center of the block) is assigned to the corresponding day-of-year in the adapted dataset `hist_ad`. As we proceed to the training, we re-form those 31 days _ Y years blocks, but this step does not invert the last one: There can still be more zeroes in the simulation than in the reference.\n", "\n", "To alleviate this issue, another way of proceeding is to perform a frequency adaptation on the blocks, and then use the same blocks in the training step, as we show below.\n" ] @@ -738,9 +738,9 @@ "\n", "It can be useful to perform diagnostic tests on adjusted simulations to assess if the bias correction method is working properly, or to compare two different bias correction techniques.\n", "\n", - "A diagnostic test includes calculations of a property (mean, 20-year return value, annual cycle amplitude, ...) on the simulation and on the scenario (adjusted simulation), then a measure (bias, relative bias, ratio, ...) of the difference. Usually, the property collapse the time dimension of the simulation/scenario and returns one value by grid point.\n", + "A diagnostic test includes calculations of a property (mean, 20-year return value, annual cycle amplitude, ...) on the simulation and on the scenario (adjusted simulation), then a measure (bias, relative bias, ratio, ...) of the difference. Usually, the property collapse the time dimension of the simulation/scenario and returns one value by grid point.\n", "\n", - "You'll find those in ``xclim.sdba.properties`` and ``xclim.sdba.measures``, where they are implemented as special subclasses of xclim's ``Indicator``, which means they can be worked with the same way as conventional indicators (used in YAML modules for example)." + "You'll find those in `xclim.sdba.properties` and `xclim.sdba.measures`, where they are implemented as special subclasses of xclim's `Indicator`, which means they can be worked with the same way as conventional indicators (used in YAML modules for example).\n" ] }, { @@ -820,7 +820,7 @@ "metadata": {}, "source": [ "It is possible the change the 'group' of the property from 'time' to 'time.season' or 'time.month'.\n", - " This will return 4 or 12 values per grid point, respectively." + "This will return 4 or 12 values per grid point, respectively.\n" ] }, { diff --git a/src/xclim/sdba/_adjustment.py b/src/xclim/sdba/_adjustment.py index 4398cec0e..291659e12 100644 --- a/src/xclim/sdba/_adjustment.py +++ b/src/xclim/sdba/_adjustment.py @@ -8,6 +8,7 @@ from __future__ import annotations +import warnings from collections.abc import Callable, Sequence import numpy as np @@ -587,6 +588,13 @@ def qdm_adjust(ds: xr.Dataset, *, group, interp, extrapolation, kind) -> xr.Data sim : Data to adjust. """ sim_q = group.apply(u.rank, ds.sim, main_only=True, pct=True) + if group.prop and interp != "nearest": + + msg = ( + f"Using a {interp} interpolation with QuantileDeltaMapping might create sudden jumps between different" + " groups. See discussion https://github.com/Ouranosinc/xclim/discussions/2048 for more information." + ) + warnings.warn(msg) af = u.interp_on_quantiles( sim_q, ds.quantiles, diff --git a/src/xclim/sdba/base.py b/src/xclim/sdba/base.py index ca5f93d02..0c2d3f626 100644 --- a/src/xclim/sdba/base.py +++ b/src/xclim/sdba/base.py @@ -179,7 +179,9 @@ def prop_name(self): """Create a significant name for the grouping.""" return "year" if self.prop == "group" else self.prop - def get_coordinate(self, ds: xr.Dataset | None = None) -> xr.DataArray: + def get_coordinate( + self, ds: xr.Dataset | xr.DataArray | None = None + ) -> xr.DataArray: """Return the coordinate as in the output of group.apply. Currently, only implemented for groupings with prop == `month` or `dayofyear`. @@ -293,21 +295,39 @@ def get_index( return da[self.dim].rename("group") ind = da.indexes[self.dim] - if self.prop == "week": - i = da[self.dim].copy(data=ind.isocalendar().week).astype(int) - elif self.prop == "season": - i = da[self.dim].copy(data=ind.month % 12 // 3) - else: - i = getattr(ind, self.prop) - if not np.issubdtype(i.dtype, np.integer): - raise ValueError( - f"Index {self.name} is not of type int (rather {i.dtype}), " - f"but {self.__class__.__name__} requires integer indexes." - ) + if interp and self.dim == "time": + if self.prop == "month": + i = ind.month - 0.5 + ind.day / ind.days_in_month - if interp and self.dim == "time" and self.prop == "month": - i = ind.month - 0.5 + ind.day / ind.days_in_month + elif self.prop == "season": + calendar = ind.calendar if hasattr(ind, "calendar") else "standard" + length_year = ( + 360 + if calendar == "360_day" + else 365 + (0 if calendar == "noleap" else ind.is_leap_year) + ) + # This is assuming that seasons have the same length. The factor 1/6 comes from the fact that + # the first season is shifted by 1 month the but the middle of the season is shifted in the other direction + # by half a month so -(1/12-1/24)*4 = -1/6 + i = ind.dayofyear / length_year * 4 - 1 / 6 + else: + raise ValueError( + f"Interpolation is not supported for {self.dim}.{self.prop}." + ) + else: + if self.prop == "week": + i = da[self.dim].copy(data=ind.isocalendar().week).astype(int) + elif self.prop == "season": + i = da[self.dim].copy(data=ind.month % 12 // 3) + else: + i = getattr(ind, self.prop) + + if not np.issubdtype(i.dtype, np.integer): + raise ValueError( + f"Index {self.name} is not of type int (rather {i.dtype}), " + f"but {self.__class__.__name__} requires integer indexes." + ) xi = xr.DataArray( i, diff --git a/src/xclim/sdba/nbutils.py b/src/xclim/sdba/nbutils.py index 28fad5647..e96c6e5cd 100644 --- a/src/xclim/sdba/nbutils.py +++ b/src/xclim/sdba/nbutils.py @@ -393,7 +393,7 @@ def _extrapolate_on_quantiles( Arguments are the same as _interp_on_quantiles_2D. """ bnds = _first_and_last_nonnull(oldx) - xp = np.arange(bnds.shape[0]) + xp = oldg[:, 0] toolow = newx < np.interp(newg, xp, bnds[:, 0]) toohigh = newx > np.interp(newg, xp, bnds[:, 1]) if method == "constant": diff --git a/src/xclim/sdba/utils.py b/src/xclim/sdba/utils.py index f8c2a6d2f..45a1c000b 100644 --- a/src/xclim/sdba/utils.py +++ b/src/xclim/sdba/utils.py @@ -219,6 +219,8 @@ def broadcast( sel.update({group.prop: group.get_index(x, interp=interp != "nearest")}) if sel: + if group.prop == "season": + grouped = grouped.assign_coords(season=map_season_to_int(grouped.season)) # Extract the correct mean factor for each time step. if interp == "nearest": # Interpolate both the time group and the quantile. grouped = grouped.sel(sel, method="nearest") @@ -473,11 +475,11 @@ def interp_on_quantiles( output_dtypes=[yq.dtype], ) return out - + prop_coords = group.get_coordinate(newx) if prop not in xq.dims: - xq = xq.expand_dims({prop: group.get_coordinate()}) + xq = xq.expand_dims({prop: prop_coords}) if prop not in yq.dims: - yq = yq.expand_dims({prop: group.get_coordinate()}) + yq = yq.expand_dims({prop: prop_coords}) # Adding the cyclic bounds fails for string coordinates like seasons # That's why we map the seasons to integers diff --git a/src/xclim/testing/helpers.py b/src/xclim/testing/helpers.py index 6b21fba59..7b1df4a90 100644 --- a/src/xclim/testing/helpers.py +++ b/src/xclim/testing/helpers.py @@ -190,6 +190,7 @@ def test_timeseries( freq: str = "D", as_dataset: bool = False, cftime: bool = False, + calendar: str | None = None, ) -> xr.DataArray | xr.Dataset: """ Create a generic timeseries object based on pre-defined dictionaries of existing variables. @@ -210,14 +211,18 @@ def test_timeseries( Whether to return a Dataset or a DataArray. Default is False. cftime : bool Whether to use cftime or not. Default is False. + calendar : str or None + Whether to use a calendar. If a calendar is provided, cftime is used. Returns ------- xr.DataArray or xr.Dataset A DataArray or Dataset with time, lon and lat dimensions. """ - if cftime: - coords = xr.cftime_range(start, periods=len(values), freq=freq) + if calendar or cftime: + coords = xr.cftime_range( + start, periods=len(values), freq=freq, calendar=calendar or "standard" + ) else: coords = pd.date_range(start, periods=len(values), freq=freq) diff --git a/tests/test_bootstrapping.py b/tests/test_bootstrapping.py index 484f7f163..fb98eaf34 100644 --- a/tests/test_bootstrapping.py +++ b/tests/test_bootstrapping.py @@ -23,30 +23,30 @@ class Test_bootstrap: @pytest.mark.slow @pytest.mark.parametrize("use_dask", [True, False]) @pytest.mark.parametrize( - "var,p,index,freq, cftime", + "var,p,index,freq, calendar", ( - ["tas", 98, tg90p, "MS", False], - ["tasmin", 98, tn90p, "YS-JUL", False], - ["tasmax", 98, tx90p, "QS-APR", False], - ["tasmax", 98, tx90p, "QS-APR", True], - ["tasmin", 2, tn10p, "MS", False], - ["tasmax", 2, tx10p, "YS", False], - ["tasmax", 2, tx10p, "YS", True], - ["tas", 2, tg10p, "MS", False], - ["tasmax", 98, warm_spell_duration_index, "MS", False], - ["tasmin", 2, cold_spell_duration_index, "MS", False], - ["pr", 99, days_over_precip_thresh, "MS", False], - ["pr", 98, fraction_over_precip_thresh, "MS", False], - ["pr", 98, fraction_over_precip_thresh, "MS", True], + ["tas", 98, tg90p, "MS", None], + ["tasmin", 98, tn90p, "YS-JUL", None], + ["tasmax", 98, tx90p, "QS-APR", None], + ["tasmax", 98, tx90p, "QS-APR", "standard"], + ["tasmin", 2, tn10p, "MS", None], + ["tasmax", 2, tx10p, "YS", None], + ["tasmax", 2, tx10p, "YS", "standard"], + ["tas", 2, tg10p, "MS", None], + ["tasmax", 98, warm_spell_duration_index, "MS", None], + ["tasmin", 2, cold_spell_duration_index, "MS", None], + ["pr", 99, days_over_precip_thresh, "MS", None], + ["pr", 98, fraction_over_precip_thresh, "MS", None], + ["pr", 98, fraction_over_precip_thresh, "MS", "standard"], ), ) - def test_bootstrap(self, var, p, index, freq, cftime, use_dask, random): + def test_bootstrap(self, var, p, index, freq, calendar, use_dask, random): # -- GIVEN arr = self.ar1( alpha=0.8, n=int(4 * 365.25), random=random, positive_values=(var == "pr") ) climate_var = _test_timeseries( - arr, start="2000-01-01", variable=var, cftime=cftime + arr, start="2000-01-01", variable=var, calendar=calendar ) if use_dask: climate_var = climate_var.chunk(dict(time=50)) diff --git a/tests/test_calendar.py b/tests/test_calendar.py index 58b8d5857..7d40e75c9 100644 --- a/tests/test_calendar.py +++ b/tests/test_calendar.py @@ -482,13 +482,13 @@ def test_convert_doy(): ) -@pytest.mark.parametrize("cftime", [True, False]) +@pytest.mark.parametrize("calendar", ["standard", None]) @pytest.mark.parametrize( "w,s,m,f,ss", [(30, 10, None, "YS", 0), (3, 1, None, "QS-DEC", 60), (6, None, None, "MS", 0)], ) -def test_stack_periods(tas_series, cftime, w, s, m, f, ss): - da = tas_series(np.arange(365 * 50), start="2000-01-01", cftime=cftime) +def test_stack_periods(tas_series, calendar, w, s, m, f, ss): + da = tas_series(np.arange(365 * 50), start="2000-01-01", calendar=calendar) da_stck = stack_periods( da, window=w, stride=s, min_length=m, freq=f, align_days=False @@ -502,7 +502,7 @@ def test_stack_periods(tas_series, cftime, w, s, m, f, ss): def test_stack_periods_special(tas_series): - da = tas_series(np.arange(365 * 48 + 12), cftime=True, start="2004-01-01") + da = tas_series(np.arange(365 * 48 + 12), calendar="standard", start="2004-01-01") with pytest.raises(ValueError, match="unaligned day-of-year"): stack_periods(da) diff --git a/tests/test_helpers.py b/tests/test_helpers.py index 8ac4e262f..064227052 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -179,10 +179,10 @@ def test_resample_map_passthrough(tas_series): assert not uses_dask(out) -@pytest.mark.parametrize("cftime", [False, True]) -def test_make_hourly_temperature(tasmax_series, tasmin_series, cftime): - tasmax = tasmax_series(np.array([20]), units="degC", cftime=cftime) - tasmin = tasmin_series(np.array([0]), units="degC", cftime=cftime).expand_dims( +@pytest.mark.parametrize("calendar", [None, "standard"]) +def test_make_hourly_temperature(tasmax_series, tasmin_series, calendar): + tasmax = tasmax_series(np.array([20]), units="degC", calendar=calendar) + tasmin = tasmin_series(np.array([0]), units="degC", calendar=calendar).expand_dims( lat=[0] ) diff --git a/tests/test_sdba/test_adjustment.py b/tests/test_sdba/test_adjustment.py index d9a348ea7..670b24cd0 100644 --- a/tests/test_sdba/test_adjustment.py +++ b/tests/test_sdba/test_adjustment.py @@ -842,7 +842,7 @@ def test_real_data(self, open_dataset): ref, hist, group=Grouper("time.dayofyear", window=31), nquantiles=quantiles ) - scen = EQM.adjust(hist, interp="linear", extrapolation="constant") + scen = EQM.adjust(hist, extrapolation="constant") EX = ExtremeValues.train(ref, hist, cluster_thresh="1 mm/day", q_thresh=0.97) new_scen = EX.adjust(scen, hist, frac=0.000000001) diff --git a/tests/test_sdba/test_base.py b/tests/test_sdba/test_base.py index 255ea061e..5619b5d47 100644 --- a/tests/test_sdba/test_base.py +++ b/tests/test_sdba/test_base.py @@ -50,11 +50,21 @@ def test_grouper_group(tas_series, group, window, nvals): @pytest.mark.parametrize( - "group,interp,val90", - [("time", False, True), ("time.month", False, 3), ("time.month", True, 3.5)], + "group,interp,val90,calendar", + [ + ("time", False, True, None), + ("time.month", False, 3, None), + ("time.month", True, 3.5, None), + ("time.season", False, 1, None), + ("time.season", True, 0.8278688524590164, None), + ("time.month", True, 3.533333333333333, "360_day"), + ("time.month", True, 3.533333333333333, "noleap"), + ("time.season", True, 0.8444444444444444, "360_day"), + ("time.season", True, 0.8305936073059361, "noleap"), + ], ) -def test_grouper_get_index(tas_series, group, interp, val90): - tas = tas_series(np.ones(366), start="2000-01-01") +def test_grouper_get_index(tas_series, group, interp, val90, calendar): + tas = tas_series(np.ones(366), start="2000-01-01", calendar=calendar) grouper = Grouper(group) indx = grouper.get_index(tas, interp=interp) # 90 is March 31st