From 9b52e654bf5b6821c75afc33bfaa1eace97a9c16 Mon Sep 17 00:00:00 2001 From: eschalk Date: Fri, 7 Jun 2024 07:05:33 +0200 Subject: [PATCH 01/10] Mass-replace '> Size:' --- xarray/backends/api.py | 2 +- xarray/coding/cftimeindex.py | 6 +- xarray/core/_aggregations.py | 480 ++++++++++++++--------------- xarray/core/accessor_dt.py | 18 +- xarray/core/accessor_str.py | 140 ++++----- xarray/core/alignment.py | 38 +-- xarray/core/combine.py | 32 +- xarray/core/common.py | 80 ++--- xarray/core/computation.py | 68 ++-- xarray/core/concat.py | 12 +- xarray/core/coordinates.py | 2 +- xarray/core/dataarray.py | 346 ++++++++++----------- xarray/core/dataset.py | 263 ++++++++-------- xarray/core/datatree.py | 4 +- xarray/core/formatting.py | 4 +- xarray/core/groupby.py | 8 +- xarray/core/merge.py | 22 +- xarray/core/options.py | 2 +- xarray/core/parallel.py | 7 +- xarray/core/rolling.py | 14 +- xarray/core/rolling_exp.py | 12 +- xarray/core/variable.py | 13 +- xarray/namedarray/_aggregations.py | 80 ++--- xarray/namedarray/_array_api.py | 18 +- xarray/plot/utils.py | 10 +- xarray/tests/test_dask.py | 6 +- xarray/tests/test_dataarray.py | 6 +- xarray/tests/test_dataset.py | 14 +- xarray/tests/test_formatting.py | 49 ++- xarray/tests/test_sparse.py | 8 +- xarray/tests/test_variable.py | 2 +- 31 files changed, 882 insertions(+), 884 deletions(-) diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 76fcac62cd3..e8d022d5e86 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -1457,7 +1457,7 @@ def save_mfdataset( ... coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)}, ... ) >>> ds - Size: 768B + 768B Dimensions: (time: 48) Coordinates: * time (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31 diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py index 6898809e3b0..a761c58bb8c 100644 --- a/xarray/coding/cftimeindex.py +++ b/xarray/coding/cftimeindex.py @@ -383,7 +383,7 @@ def _partial_date_slice(self, resolution, parsed): ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") - Size: 8B + 8B array([1]) Coordinates: * time (time) object 8B 2001-01-01 00:00:00 @@ -393,7 +393,7 @@ def _partial_date_slice(self, resolution, parsed): ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") - Size: 8B + 8B array(1) Coordinates: time datetime64[ns] 8B 2001-01-01 @@ -403,7 +403,7 @@ def _partial_date_slice(self, resolution, parsed): ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") - Size: 8B + 8B array([1]) Coordinates: * time (time) datetime64[ns] 8B 2001-01-01T01:00:00 diff --git a/xarray/core/_aggregations.py b/xarray/core/_aggregations.py index 96f860b3209..fd8bf9baade 100644 --- a/xarray/core/_aggregations.py +++ b/xarray/core/_aggregations.py @@ -84,7 +84,7 @@ def count( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -93,7 +93,7 @@ def count( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.count() - Size: 8B + 8B Dimensions: () Data variables: da int64 8B 5 @@ -156,7 +156,7 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 78B + 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -165,7 +165,7 @@ def all( da (time) bool 6B True True True True True False >>> ds.all() - Size: 1B + 1B Dimensions: () Data variables: da bool 1B False @@ -228,7 +228,7 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 78B + 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -237,7 +237,7 @@ def any( da (time) bool 6B True True True True True False >>> ds.any() - Size: 1B + 1B Dimensions: () Data variables: da bool 1B True @@ -306,7 +306,7 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -315,7 +315,7 @@ def max( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.max() - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 3.0 @@ -323,7 +323,7 @@ def max( Use ``skipna`` to control whether NaNs are ignored. >>> ds.max(skipna=False) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B nan @@ -393,7 +393,7 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -402,7 +402,7 @@ def min( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.min() - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 0.0 @@ -410,7 +410,7 @@ def min( Use ``skipna`` to control whether NaNs are ignored. >>> ds.min(skipna=False) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B nan @@ -484,7 +484,7 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -493,7 +493,7 @@ def mean( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.mean() - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 1.6 @@ -501,7 +501,7 @@ def mean( Use ``skipna`` to control whether NaNs are ignored. >>> ds.mean(skipna=False) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B nan @@ -582,7 +582,7 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -591,7 +591,7 @@ def prod( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.prod() - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 0.0 @@ -599,7 +599,7 @@ def prod( Use ``skipna`` to control whether NaNs are ignored. >>> ds.prod(skipna=False) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B nan @@ -607,7 +607,7 @@ def prod( Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.prod(skipna=True, min_count=2) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 0.0 @@ -689,7 +689,7 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -698,7 +698,7 @@ def sum( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.sum() - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 8.0 @@ -706,7 +706,7 @@ def sum( Use ``skipna`` to control whether NaNs are ignored. >>> ds.sum(skipna=False) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B nan @@ -714,7 +714,7 @@ def sum( Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.sum(skipna=True, min_count=2) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 8.0 @@ -793,7 +793,7 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -802,7 +802,7 @@ def std( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.std() - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 1.02 @@ -810,7 +810,7 @@ def std( Use ``skipna`` to control whether NaNs are ignored. >>> ds.std(skipna=False) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B nan @@ -818,7 +818,7 @@ def std( Specify ``ddof=1`` for an unbiased estimate. >>> ds.std(skipna=True, ddof=1) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 1.14 @@ -897,7 +897,7 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -906,7 +906,7 @@ def var( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.var() - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 1.04 @@ -914,7 +914,7 @@ def var( Use ``skipna`` to control whether NaNs are ignored. >>> ds.var(skipna=False) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B nan @@ -922,7 +922,7 @@ def var( Specify ``ddof=1`` for an unbiased estimate. >>> ds.var(skipna=True, ddof=1) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 1.3 @@ -997,7 +997,7 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -1006,7 +1006,7 @@ def median( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.median() - Size: 8B + 8B Dimensions: () Data variables: da float64 8B 2.0 @@ -1014,7 +1014,7 @@ def median( Use ``skipna`` to control whether NaNs are ignored. >>> ds.median(skipna=False) - Size: 8B + 8B Dimensions: () Data variables: da float64 8B nan @@ -1088,7 +1088,7 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -1097,7 +1097,7 @@ def cumsum( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.cumsum() - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -1106,7 +1106,7 @@ def cumsum( Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumsum(skipna=False) - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -1181,7 +1181,7 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -1190,7 +1190,7 @@ def cumprod( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.cumprod() - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -1199,7 +1199,7 @@ def cumprod( Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumprod(skipna=False) - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -1279,14 +1279,14 @@ def count( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.count() - Size: 8B + 8B array(5) """ return self.reduce( @@ -1345,14 +1345,14 @@ def all( ... ), ... ) >>> da - Size: 6B + 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.all() - Size: 1B + 1B array(False) """ return self.reduce( @@ -1411,14 +1411,14 @@ def any( ... ), ... ) >>> da - Size: 6B + 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.any() - Size: 1B + 1B array(True) """ return self.reduce( @@ -1483,20 +1483,20 @@ def max( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.max() - Size: 8B + 8B array(3.) Use ``skipna`` to control whether NaNs are ignored. >>> da.max(skipna=False) - Size: 8B + 8B array(nan) """ return self.reduce( @@ -1562,20 +1562,20 @@ def min( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.min() - Size: 8B + 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.min(skipna=False) - Size: 8B + 8B array(nan) """ return self.reduce( @@ -1645,20 +1645,20 @@ def mean( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.mean() - Size: 8B + 8B array(1.6) Use ``skipna`` to control whether NaNs are ignored. >>> da.mean(skipna=False) - Size: 8B + 8B array(nan) """ return self.reduce( @@ -1735,26 +1735,26 @@ def prod( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.prod() - Size: 8B + 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.prod(skipna=False) - Size: 8B + 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.prod(skipna=True, min_count=2) - Size: 8B + 8B array(0.) """ return self.reduce( @@ -1832,26 +1832,26 @@ def sum( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.sum() - Size: 8B + 8B array(8.) Use ``skipna`` to control whether NaNs are ignored. >>> da.sum(skipna=False) - Size: 8B + 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.sum(skipna=True, min_count=2) - Size: 8B + 8B array(8.) """ return self.reduce( @@ -1926,26 +1926,26 @@ def std( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.std() - Size: 8B + 8B array(1.0198039) Use ``skipna`` to control whether NaNs are ignored. >>> da.std(skipna=False) - Size: 8B + 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.std(skipna=True, ddof=1) - Size: 8B + 8B array(1.14017543) """ return self.reduce( @@ -2020,26 +2020,26 @@ def var( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.var() - Size: 8B + 8B array(1.04) Use ``skipna`` to control whether NaNs are ignored. >>> da.var(skipna=False) - Size: 8B + 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.var(skipna=True, ddof=1) - Size: 8B + 8B array(1.3) """ return self.reduce( @@ -2110,20 +2110,20 @@ def median( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.median() - Size: 8B + 8B array(2.) Use ``skipna`` to control whether NaNs are ignored. >>> da.median(skipna=False) - Size: 8B + 8B array(nan) """ return self.reduce( @@ -2193,14 +2193,14 @@ def cumsum( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumsum() - Size: 48B + 48B array([1., 3., 6., 6., 8., 8.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2209,7 +2209,7 @@ def cumsum( Use ``skipna`` to control whether NaNs are ignored. >>> da.cumsum(skipna=False) - Size: 48B + 48B array([ 1., 3., 6., 6., 8., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2282,14 +2282,14 @@ def cumprod( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumprod() - Size: 48B + 48B array([1., 2., 6., 0., 0., 0.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2298,7 +2298,7 @@ def cumprod( Use ``skipna`` to control whether NaNs are ignored. >>> da.cumprod(skipna=False) - Size: 48B + 48B array([ 1., 2., 6., 0., 0., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2406,7 +2406,7 @@ def count( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2415,7 +2415,7 @@ def count( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").count() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -2502,7 +2502,7 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 78B + 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2511,7 +2511,7 @@ def all( da (time) bool 6B True True True True True False >>> ds.groupby("labels").all() - Size: 27B + 27B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -2598,7 +2598,7 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 78B + 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2607,7 +2607,7 @@ def any( da (time) bool 6B True True True True True False >>> ds.groupby("labels").any() - Size: 27B + 27B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -2700,7 +2700,7 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2709,7 +2709,7 @@ def max( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").max() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -2719,7 +2719,7 @@ def max( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").max(skipna=False) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -2814,7 +2814,7 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2823,7 +2823,7 @@ def min( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").min() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -2833,7 +2833,7 @@ def min( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").min(skipna=False) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -2930,7 +2930,7 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -2939,7 +2939,7 @@ def mean( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").mean() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -2949,7 +2949,7 @@ def mean( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").mean(skipna=False) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3053,7 +3053,7 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -3062,7 +3062,7 @@ def prod( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").prod() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3072,7 +3072,7 @@ def prod( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").prod(skipna=False) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3082,7 +3082,7 @@ def prod( Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").prod(skipna=True, min_count=2) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3188,7 +3188,7 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -3197,7 +3197,7 @@ def sum( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").sum() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3207,7 +3207,7 @@ def sum( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").sum(skipna=False) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3217,7 +3217,7 @@ def sum( Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").sum(skipna=True, min_count=2) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3320,7 +3320,7 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -3329,7 +3329,7 @@ def std( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").std() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3339,7 +3339,7 @@ def std( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").std(skipna=False) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3349,7 +3349,7 @@ def std( Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").std(skipna=True, ddof=1) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3452,7 +3452,7 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -3461,7 +3461,7 @@ def var( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").var() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3471,7 +3471,7 @@ def var( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").var(skipna=False) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3481,7 +3481,7 @@ def var( Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").var(skipna=True, ddof=1) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3580,7 +3580,7 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -3589,7 +3589,7 @@ def median( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").median() - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3599,7 +3599,7 @@ def median( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").median(skipna=False) - Size: 48B + 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -3681,7 +3681,7 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -3690,7 +3690,7 @@ def cumsum( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").cumsum() - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -3699,7 +3699,7 @@ def cumsum( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumsum(skipna=False) - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -3780,7 +3780,7 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -3789,7 +3789,7 @@ def cumprod( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.groupby("labels").cumprod() - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -3798,7 +3798,7 @@ def cumprod( Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumprod(skipna=False) - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -3907,7 +3907,7 @@ def count( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -3916,7 +3916,7 @@ def count( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").count() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4003,7 +4003,7 @@ def all( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 78B + 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4012,7 +4012,7 @@ def all( da (time) bool 6B True True True True True False >>> ds.resample(time="3ME").all() - Size: 27B + 27B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4099,7 +4099,7 @@ def any( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 78B + 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4108,7 +4108,7 @@ def any( da (time) bool 6B True True True True True False >>> ds.resample(time="3ME").any() - Size: 27B + 27B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4201,7 +4201,7 @@ def max( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4210,7 +4210,7 @@ def max( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").max() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4220,7 +4220,7 @@ def max( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").max(skipna=False) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4315,7 +4315,7 @@ def min( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4324,7 +4324,7 @@ def min( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").min() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4334,7 +4334,7 @@ def min( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").min(skipna=False) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4431,7 +4431,7 @@ def mean( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4440,7 +4440,7 @@ def mean( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").mean() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4450,7 +4450,7 @@ def mean( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").mean(skipna=False) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4554,7 +4554,7 @@ def prod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4563,7 +4563,7 @@ def prod( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").prod() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4573,7 +4573,7 @@ def prod( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").prod(skipna=False) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4583,7 +4583,7 @@ def prod( Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3ME").prod(skipna=True, min_count=2) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4689,7 +4689,7 @@ def sum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4698,7 +4698,7 @@ def sum( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").sum() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4708,7 +4708,7 @@ def sum( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").sum(skipna=False) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4718,7 +4718,7 @@ def sum( Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3ME").sum(skipna=True, min_count=2) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4821,7 +4821,7 @@ def std( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4830,7 +4830,7 @@ def std( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").std() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4840,7 +4840,7 @@ def std( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").std(skipna=False) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4850,7 +4850,7 @@ def std( Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3ME").std(skipna=True, ddof=1) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4953,7 +4953,7 @@ def var( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -4962,7 +4962,7 @@ def var( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").var() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4972,7 +4972,7 @@ def var( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").var(skipna=False) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -4982,7 +4982,7 @@ def var( Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3ME").var(skipna=True, ddof=1) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -5081,7 +5081,7 @@ def median( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -5090,7 +5090,7 @@ def median( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").median() - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -5100,7 +5100,7 @@ def median( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").median(skipna=False) - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -5182,7 +5182,7 @@ def cumsum( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -5191,7 +5191,7 @@ def cumsum( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").cumsum() - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -5200,7 +5200,7 @@ def cumsum( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").cumsum(skipna=False) - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -5281,7 +5281,7 @@ def cumprod( ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds - Size: 120B + 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -5290,7 +5290,7 @@ def cumprod( da (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> ds.resample(time="3ME").cumprod() - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -5299,7 +5299,7 @@ def cumprod( Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").cumprod(skipna=False) - Size: 48B + 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: @@ -5407,14 +5407,14 @@ def count( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").count() - Size: 24B + 24B array([1, 2, 2]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -5496,14 +5496,14 @@ def all( ... ), ... ) >>> da - Size: 6B + 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").all() - Size: 3B + 3B array([False, True, True]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -5585,14 +5585,14 @@ def any( ... ), ... ) >>> da - Size: 6B + 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").any() - Size: 3B + 3B array([ True, True, True]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -5680,14 +5680,14 @@ def max( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").max() - Size: 24B + 24B array([1., 2., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -5695,7 +5695,7 @@ def max( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").max(skipna=False) - Size: 24B + 24B array([nan, 2., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -5785,14 +5785,14 @@ def min( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").min() - Size: 24B + 24B array([1., 2., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -5800,7 +5800,7 @@ def min( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").min(skipna=False) - Size: 24B + 24B array([nan, 2., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -5892,14 +5892,14 @@ def mean( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").mean() - Size: 24B + 24B array([1. , 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -5907,7 +5907,7 @@ def mean( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").mean(skipna=False) - Size: 24B + 24B array([nan, 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6006,14 +6006,14 @@ def prod( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").prod() - Size: 24B + 24B array([1., 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6021,7 +6021,7 @@ def prod( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").prod(skipna=False) - Size: 24B + 24B array([nan, 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6029,7 +6029,7 @@ def prod( Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").prod(skipna=True, min_count=2) - Size: 24B + 24B array([nan, 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6130,14 +6130,14 @@ def sum( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").sum() - Size: 24B + 24B array([1., 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6145,7 +6145,7 @@ def sum( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").sum(skipna=False) - Size: 24B + 24B array([nan, 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6153,7 +6153,7 @@ def sum( Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").sum(skipna=True, min_count=2) - Size: 24B + 24B array([nan, 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6251,14 +6251,14 @@ def std( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").std() - Size: 24B + 24B array([0. , 0. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6266,7 +6266,7 @@ def std( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").std(skipna=False) - Size: 24B + 24B array([nan, 0. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6274,7 +6274,7 @@ def std( Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").std(skipna=True, ddof=1) - Size: 24B + 24B array([ nan, 0. , 2.12132034]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6372,14 +6372,14 @@ def var( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").var() - Size: 24B + 24B array([0. , 0. , 2.25]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6387,7 +6387,7 @@ def var( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").var(skipna=False) - Size: 24B + 24B array([ nan, 0. , 2.25]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6395,7 +6395,7 @@ def var( Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").var(skipna=True, ddof=1) - Size: 24B + 24B array([nan, 0. , 4.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6489,14 +6489,14 @@ def median( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").median() - Size: 24B + 24B array([1. , 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6504,7 +6504,7 @@ def median( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").median(skipna=False) - Size: 24B + 24B array([nan, 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' @@ -6582,14 +6582,14 @@ def cumsum( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumsum() - Size: 48B + 48B array([1., 2., 3., 3., 4., 1.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -6598,7 +6598,7 @@ def cumsum( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").cumsum(skipna=False) - Size: 48B + 48B array([ 1., 2., 3., 3., 4., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -6677,14 +6677,14 @@ def cumprod( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumprod() - Size: 48B + 48B array([1., 2., 3., 0., 4., 1.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -6693,7 +6693,7 @@ def cumprod( Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").cumprod(skipna=False) - Size: 48B + 48B array([ 1., 2., 3., 0., 4., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 @@ -6800,14 +6800,14 @@ def count( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").count() - Size: 24B + 24B array([1, 3, 1]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -6889,14 +6889,14 @@ def all( ... ), ... ) >>> da - Size: 6B + 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").all() - Size: 3B + 3B array([ True, True, False]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -6978,14 +6978,14 @@ def any( ... ), ... ) >>> da - Size: 6B + 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").any() - Size: 3B + 3B array([ True, True, True]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7073,14 +7073,14 @@ def max( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").max() - Size: 24B + 24B array([1., 3., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7088,7 +7088,7 @@ def max( Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").max(skipna=False) - Size: 24B + 24B array([ 1., 3., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7178,14 +7178,14 @@ def min( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").min() - Size: 24B + 24B array([1., 0., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7193,7 +7193,7 @@ def min( Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").min(skipna=False) - Size: 24B + 24B array([ 1., 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7285,14 +7285,14 @@ def mean( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").mean() - Size: 24B + 24B array([1. , 1.66666667, 2. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7300,7 +7300,7 @@ def mean( Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").mean(skipna=False) - Size: 24B + 24B array([1. , 1.66666667, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7399,14 +7399,14 @@ def prod( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").prod() - Size: 24B + 24B array([1., 0., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7414,7 +7414,7 @@ def prod( Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").prod(skipna=False) - Size: 24B + 24B array([ 1., 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7422,7 +7422,7 @@ def prod( Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3ME").prod(skipna=True, min_count=2) - Size: 24B + 24B array([nan, 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7523,14 +7523,14 @@ def sum( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").sum() - Size: 24B + 24B array([1., 5., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7538,7 +7538,7 @@ def sum( Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").sum(skipna=False) - Size: 24B + 24B array([ 1., 5., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7546,7 +7546,7 @@ def sum( Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3ME").sum(skipna=True, min_count=2) - Size: 24B + 24B array([nan, 5., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7644,14 +7644,14 @@ def std( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").std() - Size: 24B + 24B array([0. , 1.24721913, 0. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7659,7 +7659,7 @@ def std( Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").std(skipna=False) - Size: 24B + 24B array([0. , 1.24721913, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7667,7 +7667,7 @@ def std( Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3ME").std(skipna=True, ddof=1) - Size: 24B + 24B array([ nan, 1.52752523, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7765,14 +7765,14 @@ def var( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").var() - Size: 24B + 24B array([0. , 1.55555556, 0. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7780,7 +7780,7 @@ def var( Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").var(skipna=False) - Size: 24B + 24B array([0. , 1.55555556, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7788,7 +7788,7 @@ def var( Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3ME").var(skipna=True, ddof=1) - Size: 24B + 24B array([ nan, 2.33333333, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7882,14 +7882,14 @@ def median( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").median() - Size: 24B + 24B array([1., 2., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7897,7 +7897,7 @@ def median( Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").median(skipna=False) - Size: 24B + 24B array([ 1., 2., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 @@ -7975,14 +7975,14 @@ def cumsum( ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").cumsum() - Size: 48B + 48B array([1., 2., 5., 5., 2., 2.]) Coordinates: labels (time) >> da.resample(time="3ME").cumsum(skipna=False) - Size: 48B + 48B array([ 1., 2., 5., 5., 2., nan]) Coordinates: labels (time) >> da - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").cumprod() - Size: 48B + 48B array([1., 2., 6., 0., 2., 2.]) Coordinates: labels (time) >> da.resample(time="3ME").cumprod(skipna=False) - Size: 48B + 48B array([ 1., 2., 6., 0., 2., nan]) Coordinates: labels (time) >> dates = pd.date_range(start="2000/01/01", freq="D", periods=10) >>> ts = xr.DataArray(dates, dims=("time")) >>> ts - Size: 80B + 80B array(['2000-01-01T00:00:00.000000000', '2000-01-02T00:00:00.000000000', '2000-01-03T00:00:00.000000000', '2000-01-04T00:00:00.000000000', '2000-01-05T00:00:00.000000000', '2000-01-06T00:00:00.000000000', @@ -325,12 +325,12 @@ class DatetimeAccessor(TimeAccessor[T_DataArray]): >>> ts.dt # doctest: +ELLIPSIS >>> ts.dt.dayofyear - Size: 80B + 80B array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 >>> ts.dt.quarter - Size: 80B + 80B array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 @@ -359,7 +359,7 @@ def strftime(self, date_format: str) -> T_DataArray: >>> import datetime >>> rng = xr.Dataset({"time": datetime.datetime(2000, 1, 1)}) >>> rng["time"].dt.strftime("%B %d, %Y, %r") - Size: 8B + 8B array('January 01, 2000, 12:00:00 AM', dtype=object) """ obj_type = type(self._obj) @@ -544,7 +544,7 @@ class TimedeltaAccessor(TimeAccessor[T_DataArray]): >>> dates = pd.timedelta_range(start="1 day", freq="6h", periods=20) >>> ts = xr.DataArray(dates, dims=("time")) >>> ts - Size: 160B + 160B array([ 86400000000000, 108000000000000, 129600000000000, 151200000000000, 172800000000000, 194400000000000, 216000000000000, 237600000000000, 259200000000000, 280800000000000, 302400000000000, 324000000000000, @@ -556,24 +556,24 @@ class TimedeltaAccessor(TimeAccessor[T_DataArray]): >>> ts.dt # doctest: +ELLIPSIS >>> ts.dt.days - Size: 160B + 160B array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.microseconds - Size: 160B + 160B array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.seconds - Size: 160B + 160B array([ 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.total_seconds() - Size: 160B + 160B array([ 86400., 108000., 129600., 151200., 172800., 194400., 216000., 237600., 259200., 280800., 302400., 324000., 345600., 367200., 388800., 410400., 432000., 453600., 475200., 496800.]) diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index a48fbc91faf..d4308fad98c 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -148,7 +148,7 @@ class StringAccessor(Generic[T_DataArray]): >>> da = xr.DataArray(["some", "text", "in", "an", "array"]) >>> da.str.len() - Size: 40B + 40B array([4, 4, 2, 2, 5]) Dimensions without coordinates: dim_0 @@ -159,7 +159,7 @@ class StringAccessor(Generic[T_DataArray]): >>> da1 = xr.DataArray(["first", "second", "third"], dims=["X"]) >>> da2 = xr.DataArray([1, 2, 3], dims=["Y"]) >>> da1.str + da2 - Size: 252B + 252B array([['first1', 'first2', 'first3'], ['second1', 'second2', 'second3'], ['third1', 'third2', 'third3']], dtype='>> da1 = xr.DataArray(["a", "b", "c", "d"], dims=["X"]) >>> reps = xr.DataArray([3, 4], dims=["Y"]) >>> da1.str * reps - Size: 128B + 128B array([['aaa', 'aaaa'], ['bbb', 'bbbb'], ['ccc', 'cccc'], @@ -179,7 +179,7 @@ class StringAccessor(Generic[T_DataArray]): >>> da2 = xr.DataArray([1, 2], dims=["Y"]) >>> da3 = xr.DataArray([0.1, 0.2], dims=["Z"]) >>> da1.str % (da2, da3) - Size: 240B + 240B array([[['1_0.1', '1_0.2'], ['2_0.1', '2_0.2']], @@ -197,8 +197,8 @@ class StringAccessor(Generic[T_DataArray]): >>> da1 = xr.DataArray(["%(a)s"], dims=["X"]) >>> da2 = xr.DataArray([1, 2, 3], dims=["Y"]) >>> da1 % {"a": da2} - Size: 8B - array([' Size: 24B\narray([1, 2, 3])\nDimensions without coordinates: Y'], + 8B + array([' 24B\narray([1, 2, 3])\nDimensions without coordinates: Y'], dtype=object) Dimensions without coordinates: X """ @@ -483,7 +483,7 @@ def cat(self, *others, sep: str | bytes | Any = "") -> T_DataArray: Concatenate the arrays using the separator >>> myarray.str.cat(values_1, values_2, values_3, values_4, sep=seps) - Size: 1kB + 1kB array([[['11111 a 3.4 test', '11111, a, 3.4, , test'], ['11111 bb 3.4 test', '11111, bb, 3.4, , test'], ['11111 cccc 3.4 test', '11111, cccc, 3.4, , test']], @@ -556,7 +556,7 @@ def join( Join the strings along a given dimension >>> values.str.join(dim="Y", sep=seps) - Size: 192B + 192B array([['a-bab-abc', 'a_bab_abc'], ['abcd--abcdef', 'abcd__abcdef']], dtype='>> values.str.format(noun0, noun1, adj0=adj0, adj1=adj1) - Size: 1kB + 1kB array([[['spam is unexpected', 'spam is unexpected'], ['egg is unexpected', 'egg is unexpected']], @@ -680,13 +680,13 @@ def capitalize(self) -> T_DataArray: ... ["temperature", "PRESSURE", "PreCipiTation", "daily rainfall"], dims="x" ... ) >>> da - Size: 224B + 224B array(['temperature', 'PRESSURE', 'PreCipiTation', 'daily rainfall'], dtype='>> capitalized = da.str.capitalize() >>> capitalized - Size: 224B + 224B array(['Temperature', 'Pressure', 'Precipitation', 'Daily rainfall'], dtype=' T_DataArray: -------- >>> da = xr.DataArray(["Temperature", "PRESSURE"], dims="x") >>> da - Size: 88B + 88B array(['Temperature', 'PRESSURE'], dtype='>> lowered = da.str.lower() >>> lowered - Size: 88B + 88B array(['temperature', 'pressure'], dtype=' T_DataArray: >>> import xarray as xr >>> da = xr.DataArray(["temperature", "PRESSURE", "HuMiDiTy"], dims="x") >>> da - Size: 132B + 132B array(['temperature', 'PRESSURE', 'HuMiDiTy'], dtype='>> swapcased = da.str.swapcase() >>> swapcased - Size: 132B + 132B array(['TEMPERATURE', 'pressure', 'hUmIdItY'], dtype=' T_DataArray: -------- >>> da = xr.DataArray(["temperature", "PRESSURE", "HuMiDiTy"], dims="x") >>> da - Size: 132B + 132B array(['temperature', 'PRESSURE', 'HuMiDiTy'], dtype='>> titled = da.str.title() >>> titled - Size: 132B + 132B array(['Temperature', 'Pressure', 'Humidity'], dtype=' T_DataArray: -------- >>> da = xr.DataArray(["temperature", "HuMiDiTy"], dims="x") >>> da - Size: 88B + 88B array(['temperature', 'HuMiDiTy'], dtype='>> uppered = da.str.upper() >>> uppered - Size: 88B + 88B array(['TEMPERATURE', 'HUMIDITY'], dtype=' T_DataArray: -------- >>> da = xr.DataArray(["TEMPERATURE", "HuMiDiTy"], dims="x") >>> da - Size: 88B + 88B array(['TEMPERATURE', 'HuMiDiTy'], dtype='>> casefolded = da.str.casefold() >>> casefolded - Size: 88B + 88B array(['temperature', 'humidity'], dtype='>> da = xr.DataArray(["ß", "İ"], dims="x") >>> da - Size: 8B + 8B array(['ß', 'İ'], dtype='>> casefolded = da.str.casefold() >>> casefolded - Size: 16B + 16B array(['ss', 'i̇'], dtype=' T_DataArray: -------- >>> da = xr.DataArray(["H2O", "NaCl-"], dims="x") >>> da - Size: 40B + 40B array(['H2O', 'NaCl-'], dtype='>> isalnum = da.str.isalnum() >>> isalnum - Size: 2B + 2B array([ True, False]) Dimensions without coordinates: x """ @@ -886,12 +886,12 @@ def isalpha(self) -> T_DataArray: -------- >>> da = xr.DataArray(["Mn", "H2O", "NaCl-"], dims="x") >>> da - Size: 60B + 60B array(['Mn', 'H2O', 'NaCl-'], dtype='>> isalpha = da.str.isalpha() >>> isalpha - Size: 3B + 3B array([ True, False, False]) Dimensions without coordinates: x """ @@ -910,12 +910,12 @@ def isdecimal(self) -> T_DataArray: -------- >>> da = xr.DataArray(["2.3", "123", "0"], dims="x") >>> da - Size: 36B + 36B array(['2.3', '123', '0'], dtype='>> isdecimal = da.str.isdecimal() >>> isdecimal - Size: 3B + 3B array([False, True, True]) Dimensions without coordinates: x """ @@ -934,12 +934,12 @@ def isdigit(self) -> T_DataArray: -------- >>> da = xr.DataArray(["123", "1.2", "0", "CO2", "NaCl"], dims="x") >>> da - Size: 80B + 80B array(['123', '1.2', '0', 'CO2', 'NaCl'], dtype='>> isdigit = da.str.isdigit() >>> isdigit - Size: 5B + 5B array([ True, False, True, False, False]) Dimensions without coordinates: x """ @@ -959,12 +959,12 @@ def islower(self) -> T_DataArray: -------- >>> da = xr.DataArray(["temperature", "HUMIDITY", "pREciPiTaTioN"], dims="x") >>> da - Size: 156B + 156B array(['temperature', 'HUMIDITY', 'pREciPiTaTioN'], dtype='>> islower = da.str.islower() >>> islower - Size: 3B + 3B array([ True, False, False]) Dimensions without coordinates: x """ @@ -983,12 +983,12 @@ def isnumeric(self) -> T_DataArray: -------- >>> da = xr.DataArray(["123", "2.3", "H2O", "NaCl-", "Mn"], dims="x") >>> da - Size: 100B + 100B array(['123', '2.3', 'H2O', 'NaCl-', 'Mn'], dtype='>> isnumeric = da.str.isnumeric() >>> isnumeric - Size: 5B + 5B array([ True, False, False, False, False]) Dimensions without coordinates: x """ @@ -1007,12 +1007,12 @@ def isspace(self) -> T_DataArray: -------- >>> da = xr.DataArray(["", " ", "\\t", "\\n"], dims="x") >>> da - Size: 16B + 16B array(['', ' ', '\\t', '\\n'], dtype='>> isspace = da.str.isspace() >>> isspace - Size: 4B + 4B array([False, True, True, True]) Dimensions without coordinates: x """ @@ -1038,13 +1038,13 @@ def istitle(self) -> T_DataArray: ... dims="title", ... ) >>> da - Size: 360B + 360B array(['The Evolution Of Species', 'The Theory of relativity', 'the quantum mechanics of atoms'], dtype='>> istitle = da.str.istitle() >>> istitle - Size: 3B + 3B array([ True, False, False]) Dimensions without coordinates: title """ @@ -1063,12 +1063,12 @@ def isupper(self) -> T_DataArray: -------- >>> da = xr.DataArray(["TEMPERATURE", "humidity", "PreCIpiTAtioN"], dims="x") >>> da - Size: 156B + 156B array(['TEMPERATURE', 'humidity', 'PreCIpiTAtioN'], dtype='>> isupper = da.str.isupper() >>> isupper - Size: 3B + 3B array([ True, False, False]) Dimensions without coordinates: x """ @@ -1111,20 +1111,20 @@ def count( -------- >>> da = xr.DataArray(["jjklmn", "opjjqrs", "t-JJ99vwx"], dims="x") >>> da - Size: 108B + 108B array(['jjklmn', 'opjjqrs', 't-JJ99vwx'], dtype='>> da.str.count("jj") - Size: 24B + 24B array([1, 1, 0]) Dimensions without coordinates: x Enable case-insensitive matching by setting case to false: >>> counts = da.str.count("jj", case=False) >>> counts - Size: 24B + 24B array([1, 1, 1]) Dimensions without coordinates: x @@ -1132,7 +1132,7 @@ def count( >>> pat = "JJ[0-9]{2}[a-z]{3}" >>> counts = da.str.count(pat) >>> counts - Size: 24B + 24B array([0, 0, 1]) Dimensions without coordinates: x @@ -1141,7 +1141,7 @@ def count( >>> pat = xr.DataArray(["jj", "JJ"], dims="y") >>> counts = da.str.count(pat) >>> counts - Size: 48B + 48B array([[1, 0], [1, 0], [0, 1]]) @@ -1175,12 +1175,12 @@ def startswith(self, pat: str | bytes | Any) -> T_DataArray: -------- >>> da = xr.DataArray(["$100", "£23", "100"], dims="x") >>> da - Size: 48B + 48B array(['$100', '£23', '100'], dtype='>> startswith = da.str.startswith("$") >>> startswith - Size: 3B + 3B array([ True, False, False]) Dimensions without coordinates: x """ @@ -1211,12 +1211,12 @@ def endswith(self, pat: str | bytes | Any) -> T_DataArray: -------- >>> da = xr.DataArray(["10C", "10c", "100F"], dims="x") >>> da - Size: 48B + 48B array(['10C', '10c', '100F'], dtype='>> endswith = da.str.endswith("C") >>> endswith - Size: 3B + 3B array([ True, False, False]) Dimensions without coordinates: x """ @@ -1261,7 +1261,7 @@ def pad( >>> da = xr.DataArray(["PAR184", "TKO65", "NBO9139", "NZ39"], dims="x") >>> da - Size: 112B + 112B array(['PAR184', 'TKO65', 'NBO9139', 'NZ39'], dtype='>> filled = da.str.pad(8, side="left", fillchar="0") >>> filled - Size: 128B + 128B array(['00PAR184', '000TKO65', '0NBO9139', '0000NZ39'], dtype='>> filled = da.str.pad(8, side="right", fillchar="0") >>> filled - Size: 128B + 128B array(['PAR18400', 'TKO65000', 'NBO91390', 'NZ390000'], dtype='>> filled = da.str.pad(8, side="both", fillchar="0") >>> filled - Size: 128B + 128B array(['0PAR1840', '0TKO6500', 'NBO91390', '00NZ3900'], dtype='>> width = xr.DataArray([8, 10], dims="y") >>> filled = da.str.pad(width, side="left", fillchar="0") >>> filled - Size: 320B + 320B array([['00PAR184', '0000PAR184'], ['000TKO65', '00000TKO65'], ['0NBO9139', '000NBO9139'], @@ -1306,7 +1306,7 @@ def pad( >>> fillchar = xr.DataArray(["0", "-"], dims="y") >>> filled = da.str.pad(8, side="left", fillchar=fillchar) >>> filled - Size: 256B + 256B array([['00PAR184', '--PAR184'], ['000TKO65', '---TKO65'], ['0NBO9139', '-NBO9139'], @@ -2024,7 +2024,7 @@ def extract( Extract matches >>> value.str.extract(r"(\w+)_Xy_(\d*)", dim="match") - Size: 288B + 288B array([[['a', '0'], ['bab', '110'], ['abc', '01']], @@ -2178,7 +2178,7 @@ def extractall( >>> value.str.extractall( ... r"(\w+)_Xy_(\d*)", group_dim="group", match_dim="match" ... ) - Size: 1kB + 1kB array([[[['a', '0'], ['', ''], ['', '']], @@ -2342,7 +2342,7 @@ def findall( Extract matches >>> value.str.findall(r"(\w+)_Xy_(\d*)") - Size: 48B + 48B array([[list([('a', '0')]), list([('bab', '110'), ('baab', '1100')]), list([('abc', '01'), ('cbc', '2210')])], [list([('abcd', ''), ('dcd', '33210'), ('dccd', '332210')]), @@ -2577,7 +2577,7 @@ def split( Split once and put the results in a new dimension >>> values.str.split(dim="splitted", maxsplit=1) - Size: 864B + 864B array([[['abc', 'def'], ['spam', 'eggs\tswallow'], ['red_blue', '']], @@ -2590,7 +2590,7 @@ def split( Split as many times as needed and put the results in a new dimension >>> values.str.split(dim="splitted") - Size: 768B + 768B array([[['abc', 'def', '', ''], ['spam', 'eggs', 'swallow', ''], ['red_blue', '', '', '']], @@ -2603,7 +2603,7 @@ def split( Split once and put the results in lists >>> values.str.split(dim=None, maxsplit=1) - Size: 48B + 48B array([[list(['abc', 'def']), list(['spam', 'eggs\tswallow']), list(['red_blue'])], [list(['test0', 'test1\ntest2\n\ntest3']), list([]), @@ -2613,7 +2613,7 @@ def split( Split as many times as needed and put the results in a list >>> values.str.split(dim=None) - Size: 48B + 48B array([[list(['abc', 'def']), list(['spam', 'eggs', 'swallow']), list(['red_blue'])], [list(['test0', 'test1', 'test2', 'test3']), list([]), @@ -2623,7 +2623,7 @@ def split( Split only on spaces >>> values.str.split(dim="splitted", sep=" ") - Size: 2kB + 2kB array([[['abc', 'def', ''], ['spam\t\teggs\tswallow', '', ''], ['red_blue', '', '']], @@ -2695,7 +2695,7 @@ def rsplit( Split once and put the results in a new dimension >>> values.str.rsplit(dim="splitted", maxsplit=1) - Size: 816B + 816B array([[['abc', 'def'], ['spam\t\teggs', 'swallow'], ['', 'red_blue']], @@ -2708,7 +2708,7 @@ def rsplit( Split as many times as needed and put the results in a new dimension >>> values.str.rsplit(dim="splitted") - Size: 768B + 768B array([[['', '', 'abc', 'def'], ['', 'spam', 'eggs', 'swallow'], ['', '', '', 'red_blue']], @@ -2721,7 +2721,7 @@ def rsplit( Split once and put the results in lists >>> values.str.rsplit(dim=None, maxsplit=1) - Size: 48B + 48B array([[list(['abc', 'def']), list(['spam\t\teggs', 'swallow']), list(['red_blue'])], [list(['test0\ntest1\ntest2', 'test3']), list([]), @@ -2731,7 +2731,7 @@ def rsplit( Split as many times as needed and put the results in a list >>> values.str.rsplit(dim=None) - Size: 48B + 48B array([[list(['abc', 'def']), list(['spam', 'eggs', 'swallow']), list(['red_blue'])], [list(['test0', 'test1', 'test2', 'test3']), list([]), @@ -2741,7 +2741,7 @@ def rsplit( Split only on spaces >>> values.str.rsplit(dim="splitted", sep=" ") - Size: 2kB + 2kB array([[['', 'abc', 'def'], ['', '', 'spam\t\teggs\tswallow'], ['', '', 'red_blue']], @@ -2808,7 +2808,7 @@ def get_dummies( Extract dummy values >>> values.str.get_dummies(dim="dummies") - Size: 30B + 30B array([[[ True, False, True, False, True], [False, True, False, False, False], [ True, False, True, True, False]], diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index 13e3400d170..26ae8ffcd6a 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -752,7 +752,7 @@ def align( ... ) >>> x - Size: 32B + 32B array([[25, 35], [10, 24]]) Coordinates: @@ -760,7 +760,7 @@ def align( * lon (lon) float64 16B 100.0 120.0 >>> y - Size: 32B + 32B array([[20, 5], [ 7, 13]]) Coordinates: @@ -769,13 +769,13 @@ def align( >>> a, b = xr.align(x, y) >>> a - Size: 16B + 16B array([[25, 35]]) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 16B 100.0 120.0 >>> b - Size: 16B + 16B array([[20, 5]]) Coordinates: * lat (lat) float64 8B 35.0 @@ -783,7 +783,7 @@ def align( >>> a, b = xr.align(x, y, join="outer") >>> a - Size: 48B + 48B array([[25., 35.], [10., 24.], [nan, nan]]) @@ -791,7 +791,7 @@ def align( * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b - Size: 48B + 48B array([[20., 5.], [nan, nan], [ 7., 13.]]) @@ -801,7 +801,7 @@ def align( >>> a, b = xr.align(x, y, join="outer", fill_value=-999) >>> a - Size: 48B + 48B array([[ 25, 35], [ 10, 24], [-999, -999]]) @@ -809,7 +809,7 @@ def align( * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b - Size: 48B + 48B array([[ 20, 5], [-999, -999], [ 7, 13]]) @@ -819,14 +819,14 @@ def align( >>> a, b = xr.align(x, y, join="left") >>> a - Size: 32B + 32B array([[25, 35], [10, 24]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> b - Size: 32B + 32B array([[20., 5.], [nan, nan]]) Coordinates: @@ -835,14 +835,14 @@ def align( >>> a, b = xr.align(x, y, join="right") >>> a - Size: 32B + 32B array([[25., 35.], [nan, nan]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b - Size: 32B + 32B array([[20, 5], [ 7, 13]]) Coordinates: @@ -856,14 +856,14 @@ def align( >>> a, b = xr.align(x, y, join="override") >>> a - Size: 32B + 32B array([[25, 35], [10, 24]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> b - Size: 32B + 32B array([[20, 5], [ 7, 13]]) Coordinates: @@ -1173,22 +1173,22 @@ def broadcast( >>> a = xr.DataArray([1, 2, 3], dims="x") >>> b = xr.DataArray([5, 6], dims="y") >>> a - Size: 24B + 24B array([1, 2, 3]) Dimensions without coordinates: x >>> b - Size: 16B + 16B array([5, 6]) Dimensions without coordinates: y >>> a2, b2 = xr.broadcast(a, b) >>> a2 - Size: 48B + 48B array([[1, 1], [2, 2], [3, 3]]) Dimensions without coordinates: x, y >>> b2 - Size: 48B + 48B array([[5, 6], [5, 6], [5, 6]]) @@ -1199,7 +1199,7 @@ def broadcast( >>> ds = xr.Dataset({"a": a, "b": b}) >>> (ds2,) = xr.broadcast(ds) # use tuple unpacking to extract one dataset >>> ds2 - Size: 96B + 96B Dimensions: (x: 3, y: 2) Dimensions without coordinates: x, y Data variables: diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 5cb0a3417fa..ba0dd0e00c5 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -484,7 +484,7 @@ def combine_nested( ... } ... ) >>> x1y1 - Size: 64B + 64B Dimensions: (x: 2, y: 2) Dimensions without coordinates: x, y Data variables: @@ -513,7 +513,7 @@ def combine_nested( >>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]] >>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"]) >>> combined - Size: 256B + 256B Dimensions: (x: 4, y: 4) Dimensions without coordinates: x, y Data variables: @@ -528,7 +528,7 @@ def combine_nested( >>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))}) >>> t1temp - Size: 40B + 40B Dimensions: (t: 5) Dimensions without coordinates: t Data variables: @@ -536,7 +536,7 @@ def combine_nested( >>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))}) >>> t1precip - Size: 40B + 40B Dimensions: (t: 5) Dimensions without coordinates: t Data variables: @@ -549,7 +549,7 @@ def combine_nested( >>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]] >>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None]) >>> combined - Size: 160B + 160B Dimensions: (t: 10) Dimensions without coordinates: t Data variables: @@ -797,7 +797,7 @@ def combine_by_coords( ... ) >>> x1 - Size: 136B + 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 0 1 @@ -807,7 +807,7 @@ def combine_by_coords( precipitation (y, x) float64 48B 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289 >>> x2 - Size: 136B + 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 2 3 @@ -817,7 +817,7 @@ def combine_by_coords( precipitation (y, x) float64 48B 0.7782 0.87 0.9786 0.7992 0.4615 0.7805 >>> x3 - Size: 136B + 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 2 3 @@ -827,7 +827,7 @@ def combine_by_coords( precipitation (y, x) float64 48B 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176 >>> xr.combine_by_coords([x2, x1]) - Size: 248B + 248B Dimensions: (y: 4, x: 3) Coordinates: * y (y) int64 32B 0 1 2 3 @@ -837,7 +837,7 @@ def combine_by_coords( precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805 >>> xr.combine_by_coords([x3, x1]) - Size: 464B + 464B Dimensions: (y: 4, x: 6) Coordinates: * y (y) int64 32B 0 1 2 3 @@ -847,7 +847,7 @@ def combine_by_coords( precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 >>> xr.combine_by_coords([x3, x1], join="override") - Size: 256B + 256B Dimensions: (y: 2, x: 6) Coordinates: * y (y) int64 16B 0 1 @@ -857,7 +857,7 @@ def combine_by_coords( precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 >>> xr.combine_by_coords([x1, x2, x3]) - Size: 464B + 464B Dimensions: (y: 4, x: 6) Coordinates: * y (y) int64 32B 0 1 2 3 @@ -875,7 +875,7 @@ def combine_by_coords( ... name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x" ... ) >>> named_da1 - Size: 16B + 16B array([1., 2.]) Coordinates: * x (x) int64 16B 0 1 @@ -884,13 +884,13 @@ def combine_by_coords( ... name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x" ... ) >>> named_da2 - Size: 16B + 16B array([3., 4.]) Coordinates: * x (x) int64 16B 2 3 >>> xr.combine_by_coords([named_da1, named_da2]) - Size: 64B + 64B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 @@ -902,7 +902,7 @@ def combine_by_coords( >>> unnamed_da1 = xr.DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") >>> unnamed_da2 = xr.DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") >>> xr.combine_by_coords([unnamed_da1, unnamed_da2]) - Size: 32B + 32B array([1., 2., 3., 4.]) Coordinates: * x (x) int64 32B 0 1 2 3 diff --git a/xarray/core/common.py b/xarray/core/common.py index 7b9a049c662..6730b61552c 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -526,12 +526,12 @@ def assign_coords( ... dims="lon", ... ) >>> da - Size: 32B + 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B 358 359 0 1 >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180)) - Size: 32B + 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B -2 -1 0 1 @@ -539,7 +539,7 @@ def assign_coords( The function also accepts dictionary arguments: >>> da.assign_coords({"lon": (((da.lon + 180) % 360) - 180)}) - Size: 32B + 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B -2 -1 0 1 @@ -548,7 +548,7 @@ def assign_coords( >>> lon_2 = np.array([300, 289, 0, 1]) >>> da.assign_coords(lon_2=("lon", lon_2)) - Size: 32B + 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B 358 359 0 1 @@ -578,7 +578,7 @@ def assign_coords( ... attrs=dict(description="Weather-related data"), ... ) >>> ds - Size: 360B + 360B Dimensions: (x: 2, y: 2, time: 4) Coordinates: lon (x, y) float64 32B 260.2 260.7 260.2 260.8 @@ -592,7 +592,7 @@ def assign_coords( Attributes: description: Weather-related data >>> ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180)) - Size: 360B + 360B Dimensions: (x: 2, y: 2, time: 4) Coordinates: lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 @@ -642,7 +642,7 @@ def assign_attrs(self, *args: Any, **kwargs: Any) -> Self: -------- >>> dataset = xr.Dataset({"temperature": [25, 30, 27]}) >>> dataset - Size: 24B + 24B Dimensions: (temperature: 3) Coordinates: * temperature (temperature) int64 24B 25 30 27 @@ -653,7 +653,7 @@ def assign_attrs(self, *args: Any, **kwargs: Any) -> Self: ... units="Celsius", description="Temperature data" ... ) >>> new_dataset - Size: 24B + 24B Dimensions: (temperature: 3) Coordinates: * temperature (temperature) int64 24B 25 30 27 @@ -746,7 +746,7 @@ def pipe( ... coords={"lat": [10, 20], "lon": [150, 160]}, ... ) >>> x - Size: 96B + 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 @@ -765,7 +765,7 @@ def pipe( ... return (data * mult_arg) - sub_arg ... >>> x.pipe(adder, 2) - Size: 96B + 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 @@ -775,7 +775,7 @@ def pipe( precipitation (lat, lon) float64 32B 2.424 2.646 2.438 2.892 >>> x.pipe(adder, arg=2) - Size: 96B + 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 @@ -789,7 +789,7 @@ def pipe( ... .pipe(div, arg=2) ... .pipe(sub_mult, sub_arg=2, mult_arg=2) ... ) - Size: 96B + 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 @@ -946,12 +946,12 @@ def _resample( ... dims="time", ... ) >>> da - Size: 96B + 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.resample(time="QS-DEC").mean() - Size: 32B + 32B array([ 1., 4., 7., 10.]) Coordinates: * time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01 @@ -959,7 +959,7 @@ def _resample( Upsample monthly time-series data to daily data: >>> da.resample(time="1D").interpolate("linear") # +doctest: ELLIPSIS - Size: 3kB + 3kB array([ 0. , 0.03225806, 0.06451613, 0.09677419, 0.12903226, 0.16129032, 0.19354839, 0.22580645, 0.25806452, 0.29032258, 0.32258065, 0.35483871, 0.38709677, 0.41935484, 0.4516129 , @@ -1007,7 +1007,7 @@ def _resample( Limit scope of upsampling method >>> da.resample(time="1D").nearest(tolerance="1D") - Size: 3kB + 3kB array([ 0., 0., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 1., 1., 1., nan, nan, nan, nan, nan, nan, @@ -1131,7 +1131,7 @@ def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: -------- >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=("x", "y")) >>> a - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -1140,7 +1140,7 @@ def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 4) - Size: 200B + 200B array([[ 0., 1., 2., 3., nan], [ 5., 6., 7., nan, nan], [10., 11., nan, nan, nan], @@ -1149,7 +1149,7 @@ def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 5, -1) - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, -1], [10, 11, 12, -1, -1], @@ -1158,7 +1158,7 @@ def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 4, drop=True) - Size: 128B + 128B array([[ 0., 1., 2., 3.], [ 5., 6., 7., nan], [10., 11., nan, nan], @@ -1166,7 +1166,7 @@ def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: Dimensions without coordinates: x, y >>> a.where(lambda x: x.x + x.y < 4, lambda x: -x) - Size: 200B + 200B array([[ 0, 1, 2, 3, -4], [ 5, 6, 7, -8, -9], [ 10, 11, -12, -13, -14], @@ -1175,7 +1175,7 @@ def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 4, drop=True) - Size: 128B + 128B array([[ 0., 1., 2., 3.], [ 5., 6., 7., nan], [10., 11., nan, nan], @@ -1272,11 +1272,11 @@ def isnull(self, keep_attrs: bool | None = None) -> Self: -------- >>> array = xr.DataArray([1, np.nan, 3], dims="x") >>> array - Size: 24B + 24B array([ 1., nan, 3.]) Dimensions without coordinates: x >>> array.isnull() - Size: 3B + 3B array([False, True, False]) Dimensions without coordinates: x """ @@ -1315,11 +1315,11 @@ def notnull(self, keep_attrs: bool | None = None) -> Self: -------- >>> array = xr.DataArray([1, np.nan, 3], dims="x") >>> array - Size: 24B + 24B array([ 1., nan, 3.]) Dimensions without coordinates: x >>> array.notnull() - Size: 3B + 3B array([ True, False, True]) Dimensions without coordinates: x """ @@ -1354,7 +1354,7 @@ def isin(self, test_elements: Any) -> Self: -------- >>> array = xr.DataArray([1, 2, 3], dims="x") >>> array.isin([1, 3]) - Size: 3B + 3B array([ True, False, True]) Dimensions without coordinates: x @@ -1584,7 +1584,7 @@ def full_like( ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -1592,7 +1592,7 @@ def full_like( * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 1) - Size: 48B + 48B array([[1, 1, 1], [1, 1, 1]]) Coordinates: @@ -1600,7 +1600,7 @@ def full_like( * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 0.5) - Size: 48B + 48B array([[0, 0, 0], [0, 0, 0]]) Coordinates: @@ -1608,7 +1608,7 @@ def full_like( * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 0.5, dtype=np.double) - Size: 48B + 48B array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]) Coordinates: @@ -1616,7 +1616,7 @@ def full_like( * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, np.nan, dtype=np.double) - Size: 48B + 48B array([[nan, nan, nan], [nan, nan, nan]]) Coordinates: @@ -1627,7 +1627,7 @@ def full_like( ... {"a": ("x", [3, 5, 2]), "b": ("x", [9, 1, 0])}, coords={"x": [2, 4, 6]} ... ) >>> ds - Size: 72B + 72B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 @@ -1635,7 +1635,7 @@ def full_like( a (x) int64 24B 3 5 2 b (x) int64 24B 9 1 0 >>> xr.full_like(ds, fill_value={"a": 1, "b": 2}) - Size: 72B + 72B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 @@ -1643,7 +1643,7 @@ def full_like( a (x) int64 24B 1 1 1 b (x) int64 24B 2 2 2 >>> xr.full_like(ds, fill_value={"a": 1, "b": 2}, dtype={"a": bool, "b": float}) - Size: 51B + 51B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 @@ -1858,7 +1858,7 @@ def zeros_like( ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -1866,7 +1866,7 @@ def zeros_like( * lon (lon) int64 24B 0 1 2 >>> xr.zeros_like(x) - Size: 48B + 48B array([[0, 0, 0], [0, 0, 0]]) Coordinates: @@ -1874,7 +1874,7 @@ def zeros_like( * lon (lon) int64 24B 0 1 2 >>> xr.zeros_like(x, dtype=float) - Size: 48B + 48B array([[0., 0., 0.], [0., 0., 0.]]) Coordinates: @@ -1995,7 +1995,7 @@ def ones_like( ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -2003,7 +2003,7 @@ def ones_like( * lon (lon) int64 24B 0 1 2 >>> xr.ones_like(x) - Size: 48B + 48B array([[1, 1, 1], [1, 1, 1]]) Coordinates: diff --git a/xarray/core/computation.py b/xarray/core/computation.py index f09b04b7765..079fbf94b8e 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1055,7 +1055,7 @@ def apply_ufunc( >>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])]) >>> magnitude(array, -array) - Size: 24B + 24B array([1.41421356, 2.82842712, 4.24264069]) Coordinates: * x (x) float64 24B 0.1 0.2 0.3 @@ -1068,7 +1068,7 @@ def apply_ufunc( >>> magnitude(3, np.array([0, 4])) array([3., 5.]) >>> magnitude(array, 0) - Size: 24B + 24B array([1., 2., 3.]) Coordinates: * x (x) float64 24B 0.1 0.2 0.3 @@ -1324,7 +1324,7 @@ def cov( ... ], ... ) >>> da_a - Size: 72B + 72B array([[1. , 2. , 3. ], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]) @@ -1340,7 +1340,7 @@ def cov( ... ], ... ) >>> da_b - Size: 72B + 72B array([[ 0.2, 0.4, 0.6], [15. , 10. , 5. ], [ 3.2, 0.6, 1.8]]) @@ -1348,10 +1348,10 @@ def cov( * space (space) >> xr.cov(da_a, da_b) - Size: 8B + 8B array(-3.53055556) >>> xr.cov(da_a, da_b, dim="time") - Size: 24B + 24B array([ 0.2 , -0.5 , 1.69333333]) Coordinates: * space (space) >> weights - Size: 24B + 24B array([4, 2, 1]) Coordinates: * space (space) >> xr.cov(da_a, da_b, dim="space", weights=weights) - Size: 24B + 24B array([-4.69346939, -4.49632653, -3.37959184]) Coordinates: * time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03 @@ -1428,7 +1428,7 @@ def corr( ... ], ... ) >>> da_a - Size: 72B + 72B array([[1. , 2. , 3. ], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]) @@ -1444,7 +1444,7 @@ def corr( ... ], ... ) >>> da_b - Size: 72B + 72B array([[ 0.2, 0.4, 0.6], [15. , 10. , 5. ], [ 3.2, 0.6, 1.8]]) @@ -1452,10 +1452,10 @@ def corr( * space (space) >> xr.corr(da_a, da_b) - Size: 8B + 8B array(-0.57087777) >>> xr.corr(da_a, da_b, dim="time") - Size: 24B + 24B array([ 1., -1., 1.]) Coordinates: * space (space) >> weights - Size: 24B + 24B array([4, 2, 1]) Coordinates: * space (space) >> xr.corr(da_a, da_b, dim="space", weights=weights) - Size: 24B + 24B array([-0.50240504, -0.83215028, -0.99057446]) Coordinates: * time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03 @@ -1581,7 +1581,7 @@ def cross( >>> a = xr.DataArray([1, 2, 3]) >>> b = xr.DataArray([4, 5, 6]) >>> xr.cross(a, b, dim="dim_0") - Size: 24B + 24B array([-3, 6, -3]) Dimensions without coordinates: dim_0 @@ -1591,7 +1591,7 @@ def cross( >>> a = xr.DataArray([1, 2]) >>> b = xr.DataArray([4, 5]) >>> xr.cross(a, b, dim="dim_0") - Size: 8B + 8B array(-3) Vector cross-product with 3 dimensions but zeros at the last axis @@ -1600,7 +1600,7 @@ def cross( >>> a = xr.DataArray([1, 2, 0]) >>> b = xr.DataArray([4, 5, 0]) >>> xr.cross(a, b, dim="dim_0") - Size: 24B + 24B array([ 0, 0, -3]) Dimensions without coordinates: dim_0 @@ -1617,7 +1617,7 @@ def cross( ... coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])), ... ) >>> xr.cross(a, b, dim="cartesian") - Size: 24B + 24B array([12, -6, -3]) Coordinates: * cartesian (cartesian) >> xr.cross(a, b, dim="cartesian") - Size: 24B + 24B array([-10, 2, 5]) Coordinates: * cartesian (cartesian) >> xr.cross(a, b, dim="cartesian") - Size: 48B + 48B array([[-3, 6, -3], [ 3, -6, 3]]) Coordinates: @@ -1678,7 +1678,7 @@ def cross( ... dim="cartesian", ... ) >>> c.to_dataset(dim="cartesian") - Size: 24B + 24B Dimensions: (dim_0: 1) Dimensions without coordinates: dim_0 Data variables: @@ -1803,14 +1803,14 @@ def dot( >>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"]) >>> da_a - Size: 48B + 48B array([[0, 1], [2, 3], [4, 5]]) Dimensions without coordinates: a, b >>> da_b - Size: 96B + 96B array([[[ 0, 1], [ 2, 3]], @@ -1822,36 +1822,36 @@ def dot( Dimensions without coordinates: a, b, c >>> da_c - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Dimensions without coordinates: c, d >>> xr.dot(da_a, da_b, dim=["a", "b"]) - Size: 16B + 16B array([110, 125]) Dimensions without coordinates: c >>> xr.dot(da_a, da_b, dim=["a"]) - Size: 32B + 32B array([[40, 46], [70, 79]]) Dimensions without coordinates: b, c >>> xr.dot(da_a, da_b, da_c, dim=["b", "c"]) - Size: 72B + 72B array([[ 9, 14, 19], [ 93, 150, 207], [273, 446, 619]]) Dimensions without coordinates: a, d >>> xr.dot(da_a, da_b) - Size: 16B + 16B array([110, 125]) Dimensions without coordinates: c >>> xr.dot(da_a, da_b, dim=...) - Size: 8B + 8B array(235) """ from xarray.core.dataarray import DataArray @@ -1955,13 +1955,13 @@ def where(cond, x, y, keep_attrs=None): ... name="sst", ... ) >>> x - Size: 80B + 80B array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) Coordinates: * lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9 >>> xr.where(x < 0.5, x, x * 100) - Size: 80B + 80B array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ]) Coordinates: * lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9 @@ -1973,7 +1973,7 @@ def where(cond, x, y, keep_attrs=None): ... name="sst", ... ) >>> y - Size: 72B + 72B array([[0. , 0.1, 0.2], [0.3, 0.4, 0.5], [0.6, 0.7, 0.8]]) @@ -1982,7 +1982,7 @@ def where(cond, x, y, keep_attrs=None): * lon (lon) int64 24B 10 11 12 >>> xr.where(y.lat < 1, y, -1) - Size: 72B + 72B array([[ 0. , 0.1, 0.2], [-1. , -1. , -1. ], [-1. , -1. , -1. ]]) @@ -1993,7 +1993,7 @@ def where(cond, x, y, keep_attrs=None): >>> cond = xr.DataArray([True, False], dims=["x"]) >>> x = xr.DataArray([1, 2], dims=["y"]) >>> xr.where(cond, x, 0) - Size: 32B + 32B array([[1, 2], [0, 0]]) Dimensions without coordinates: x, y diff --git a/xarray/core/concat.py b/xarray/core/concat.py index b1cca586992..49773a84923 100644 --- a/xarray/core/concat.py +++ b/xarray/core/concat.py @@ -183,7 +183,7 @@ def concat( ... np.arange(6).reshape(2, 3), [("x", ["a", "b"]), ("y", [10, 20, 30])] ... ) >>> da - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -191,7 +191,7 @@ def concat( * y (y) int64 24B 10 20 30 >>> xr.concat([da.isel(y=slice(0, 1)), da.isel(y=slice(1, None))], dim="y") - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -199,7 +199,7 @@ def concat( * y (y) int64 24B 10 20 30 >>> xr.concat([da.isel(x=0), da.isel(x=1)], "x") - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -207,7 +207,7 @@ def concat( * y (y) int64 24B 10 20 30 >>> xr.concat([da.isel(x=0), da.isel(x=1)], "new_dim") - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -216,7 +216,7 @@ def concat( Dimensions without coordinates: new_dim >>> xr.concat([da.isel(x=0), da.isel(x=1)], pd.Index([-90, -100], name="new_dim")) - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -228,7 +228,7 @@ def concat( >>> ds = xr.Dataset(coords={"x": 0}) >>> xr.concat([ds, ds], dim="x") - Size: 16B + 16B Dimensions: (x: 2) Coordinates: * x (x) int64 16B 0 0 diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 251edd1fc6f..806eabc5070 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -253,7 +253,7 @@ class Coordinates(AbstractCoordinates): >>> midx_coords = xr.Coordinates.from_pandas_multiindex(midx, "x") >>> xr.Dataset(coords=midx_coords) - Size: 96B + 96B Dimensions: (x: 4) Coordinates: * x (x) object 32B MultiIndex diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 4dc897c1878..6d02ee8cdd8 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -359,7 +359,7 @@ class DataArray( ... ), ... ) >>> da - Size: 96B + 96B array([[[29.11241877, 18.20125767, 22.82990387], [32.92714559, 29.94046392, 7.18177696]], @@ -378,7 +378,7 @@ class DataArray( Find out where the coldest temperature was: >>> da.isel(da.argmin(...)) - Size: 8B + 8B array(7.18177696) Coordinates: lon float64 8B -99.32 @@ -1034,7 +1034,7 @@ def reset_coords( ... name="Temperature", ... ) >>> da - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -1049,7 +1049,7 @@ def reset_coords( Return Dataset with target coordinate as a data variable rather than a coordinate variable: >>> da.reset_coords(names="Pressure") - Size: 480B + 480B Dimensions: (x: 5, y: 5) Coordinates: lon (x) int64 40B 10 11 12 13 14 @@ -1062,7 +1062,7 @@ def reset_coords( Return DataArray without targeted coordinate: >>> da.reset_coords(names="Pressure", drop=True) - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -1234,19 +1234,19 @@ def copy(self, deep: bool = True, data: Any = None) -> Self: >>> array = xr.DataArray([1, 2, 3], dims="x", coords={"x": ["a", "b", "c"]}) >>> array.copy() - Size: 24B + 24B array([1, 2, 3]) Coordinates: * x (x) >> array_0 = array.copy(deep=False) >>> array_0[0] = 7 >>> array_0 - Size: 24B + 24B array([7, 2, 3]) Coordinates: * x (x) >> array - Size: 24B + 24B array([7, 2, 3]) Coordinates: * x (x) Self: object is unaffected. >>> array.copy(data=[0.1, 0.2, 0.3]) - Size: 24B + 24B array([0.1, 0.2, 0.3]) Coordinates: * x (x) >> array - Size: 24B + 24B array([7, 2, 3]) Coordinates: * x (x) >> da = xr.DataArray(np.arange(25).reshape(5, 5), dims=("x", "y")) >>> da - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -1489,7 +1489,7 @@ def isel( >>> tgt_y = xr.DataArray(np.arange(0, 5), dims="points") >>> da = da.isel(x=tgt_x, y=tgt_y) >>> da - Size: 40B + 40B array([ 0, 6, 12, 18, 24]) Dimensions without coordinates: points """ @@ -1619,7 +1619,7 @@ def sel( ... dims=("x", "y"), ... ) >>> da - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -1633,7 +1633,7 @@ def sel( >>> tgt_y = xr.DataArray(np.linspace(0, 4, num=5), dims="points") >>> da = da.sel(x=tgt_x, y=tgt_y, method="nearest") >>> da - Size: 40B + 40B array([ 0, 6, 12, 18, 24]) Coordinates: x (points) int64 40B 0 1 2 3 4 @@ -1670,7 +1670,7 @@ def head( ... dims=("x", "y"), ... ) >>> da - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -1679,12 +1679,12 @@ def head( Dimensions without coordinates: x, y >>> da.head(x=1) - Size: 40B + 40B array([[0, 1, 2, 3, 4]]) Dimensions without coordinates: x, y >>> da.head({"x": 2, "y": 2}) - Size: 32B + 32B array([[0, 1], [5, 6]]) Dimensions without coordinates: x, y @@ -1713,7 +1713,7 @@ def tail( ... dims=("x", "y"), ... ) >>> da - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -1722,7 +1722,7 @@ def tail( Dimensions without coordinates: x, y >>> da.tail(y=1) - Size: 40B + 40B array([[ 4], [ 9], [14], @@ -1731,7 +1731,7 @@ def tail( Dimensions without coordinates: x, y >>> da.tail({"x": 2, "y": 2}) - Size: 32B + 32B array([[18, 19], [23, 24]]) Dimensions without coordinates: x, y @@ -1759,7 +1759,7 @@ def thin( ... coords={"x": [0, 1], "y": np.arange(0, 13)}, ... ) >>> x - Size: 208B + 208B array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]]) Coordinates: @@ -1768,13 +1768,13 @@ def thin( >>> >>> x.thin(3) - Size: 40B + 40B array([[ 0, 3, 6, 9, 12]]) Coordinates: * x (x) int64 8B 0 * y (y) int64 40B 0 3 6 9 12 >>> x.thin({"x": 2, "y": 5}) - Size: 24B + 24B array([[ 0, 5, 10]]) Coordinates: * x (x) int64 8B 0 @@ -1834,14 +1834,14 @@ def broadcast_like( ... coords={"x": ["a", "b", "c"], "y": ["a", "b"]}, ... ) >>> arr1 - Size: 48B + 48B array([[ 1.76405235, 0.40015721, 0.97873798], [ 2.2408932 , 1.86755799, -0.97727788]]) Coordinates: * x (x) >> arr2 - Size: 48B + 48B array([[ 0.95008842, -0.15135721], [-0.10321885, 0.4105985 ], [ 0.14404357, 1.45427351]]) @@ -1849,7 +1849,7 @@ def broadcast_like( * x (x) >> arr1.broadcast_like(arr2) - Size: 72B + 72B array([[ 1.76405235, 0.40015721, 0.97873798], [ 2.2408932 , 1.86755799, -0.97727788], [ nan, nan, nan]]) @@ -1968,7 +1968,7 @@ def reindex_like( ... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]}, ... ) >>> da1 - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -1982,7 +1982,7 @@ def reindex_like( ... coords={"x": [40, 30, 20, 10], "y": [90, 80, 70]}, ... ) >>> da2 - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -1994,7 +1994,7 @@ def reindex_like( Reindexing with both DataArrays having the same coordinates set, but in different order: >>> da1.reindex_like(da2) - Size: 96B + 96B array([[11, 10, 9], [ 8, 7, 6], [ 5, 4, 3], @@ -2011,7 +2011,7 @@ def reindex_like( ... coords={"x": [20, 10, 29, 39], "y": [70, 80, 90]}, ... ) >>> da1.reindex_like(da3) - Size: 96B + 96B array([[ 3., 4., 5.], [ 0., 1., 2.], [nan, nan, nan], @@ -2023,7 +2023,7 @@ def reindex_like( Filling missing values with the previous valid index with respect to the coordinates' value: >>> da1.reindex_like(da3, method="ffill") - Size: 96B + 96B array([[3, 4, 5], [0, 1, 2], [3, 4, 5], @@ -2035,7 +2035,7 @@ def reindex_like( Filling missing values while tolerating specified error for inexact matches: >>> da1.reindex_like(da3, method="ffill", tolerance=5) - Size: 96B + 96B array([[ 3., 4., 5.], [ 0., 1., 2.], [nan, nan, nan], @@ -2047,7 +2047,7 @@ def reindex_like( Filling missing values with manually specified values: >>> da1.reindex_like(da3, fill_value=19) - Size: 96B + 96B array([[ 3, 4, 5], [ 0, 1, 2], [19, 19, 19], @@ -2059,7 +2059,7 @@ def reindex_like( Note that unlike ``broadcast_like``, ``reindex_like`` doesn't create new dimensions: >>> da1.sel(x=20) - Size: 24B + 24B array([3, 4, 5]) Coordinates: x int64 8B 20 @@ -2068,7 +2068,7 @@ def reindex_like( ...so ``b`` in not added here: >>> da1.sel(x=20).reindex_like(da1) - Size: 24B + 24B array([3, 4, 5]) Coordinates: x int64 8B 20 @@ -2157,12 +2157,12 @@ def reindex( ... dims="lat", ... ) >>> da - Size: 32B + 32B array([0, 1, 2, 3]) Coordinates: * lat (lat) int64 32B 90 89 88 87 >>> da.reindex(lat=da.lat[::-1]) - Size: 32B + 32B array([3, 2, 1, 0]) Coordinates: * lat (lat) int64 32B 87 88 89 90 @@ -2255,7 +2255,7 @@ def interp( ... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]}, ... ) >>> da - Size: 96B + 96B array([[ 1., 4., 2., 9.], [ 2., 7., 6., nan], [ 6., nan, 5., 8.]]) @@ -2266,7 +2266,7 @@ def interp( 1D linear interpolation (the default): >>> da.interp(x=[0, 0.75, 1.25, 1.75]) - Size: 128B + 128B array([[1. , 4. , 2. , nan], [1.75, 6.25, 5. , nan], [3. , nan, 5.75, nan], @@ -2278,7 +2278,7 @@ def interp( 1D nearest interpolation: >>> da.interp(x=[0, 0.75, 1.25, 1.75], method="nearest") - Size: 128B + 128B array([[ 1., 4., 2., 9.], [ 2., 7., 6., nan], [ 2., 7., 6., nan], @@ -2294,7 +2294,7 @@ def interp( ... method="linear", ... kwargs={"fill_value": "extrapolate"}, ... ) - Size: 128B + 128B array([[ 2. , 7. , 6. , nan], [ 4. , nan, 5.5, nan], [ 8. , nan, 4.5, nan], @@ -2306,7 +2306,7 @@ def interp( 2D linear interpolation: >>> da.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear") - Size: 96B + 96B array([[2.5 , 3. , nan], [4. , 5.625, nan], [ nan, nan, nan], @@ -2384,7 +2384,7 @@ def interp_like( ... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]}, ... ) >>> da1 - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -2398,7 +2398,7 @@ def interp_like( ... coords={"x": [10, 20, 29, 39], "y": [70, 80, 90]}, ... ) >>> da2 - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -2410,7 +2410,7 @@ def interp_like( Interpolate the values in the coordinates of the other DataArray with respect to the source's values: >>> da2.interp_like(da1) - Size: 96B + 96B array([[0. , 1. , 2. ], [3. , 4. , 5. ], [6.3, 7.3, 8.3], @@ -2422,7 +2422,7 @@ def interp_like( Could also extrapolate missing values: >>> da2.interp_like(da1, kwargs={"fill_value": "extrapolate"}) - Size: 96B + 96B array([[ 0. , 1. , 2. ], [ 3. , 4. , 5. ], [ 6.3, 7.3, 8.3], @@ -2524,21 +2524,21 @@ def swap_dims( ... coords={"x": ["a", "b"], "y": ("x", [0, 1])}, ... ) >>> arr - Size: 16B + 16B array([0, 1]) Coordinates: * x (x) >> arr.swap_dims({"x": "y"}) - Size: 16B + 16B array([0, 1]) Coordinates: x (y) >> arr.swap_dims({"x": "z"}) - Size: 16B + 16B array([0, 1]) Coordinates: x (z) >> da = xr.DataArray(np.arange(5), dims=("x")) >>> da - Size: 40B + 40B array([0, 1, 2, 3, 4]) Dimensions without coordinates: x Add new dimension of length 2: >>> da.expand_dims(dim={"y": 2}) - Size: 80B + 80B array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) Dimensions without coordinates: y, x >>> da.expand_dims(dim={"y": 2}, axis=1) - Size: 80B + 80B array([[0, 0], [1, 1], [2, 2], @@ -2631,7 +2631,7 @@ def expand_dims( Add a new dimension with coordinates from array: >>> da.expand_dims(dim={"y": np.arange(5)}, axis=0) - Size: 200B + 200B array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], @@ -2696,7 +2696,7 @@ def set_index( ... coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ... ) >>> arr - Size: 48B + 48B array([[1., 1., 1.], [1., 1., 1.]]) Coordinates: @@ -2704,7 +2704,7 @@ def set_index( * y (y) int64 24B 0 1 2 a (x) int64 16B 3 4 >>> arr.set_index(x="a") - Size: 48B + 48B array([[1., 1., 1.], [1., 1., 1.]]) Coordinates: @@ -2857,7 +2857,7 @@ def stack( ... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])], ... ) >>> arr - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -2924,7 +2924,7 @@ def unstack( ... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])], ... ) >>> arr - Size: 48B + 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: @@ -2976,7 +2976,7 @@ def to_unstacked_dataset(self, dim: Hashable, level: int | Hashable = 0) -> Data ... ) >>> data = xr.Dataset({"a": arr, "b": arr.isel(y=0)}) >>> data - Size: 96B + 96B Dimensions: (x: 2, y: 3) Coordinates: * x (x) >> da - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -3114,7 +3114,7 @@ def drop_vars( Removing a single variable: >>> da.drop_vars("x") - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -3126,7 +3126,7 @@ def drop_vars( Removing a list of variables: >>> da.drop_vars(["x", "y"]) - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -3134,7 +3134,7 @@ def drop_vars( Dimensions without coordinates: x, y >>> da.drop_vars(lambda x: x.coords) - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -3224,7 +3224,7 @@ def drop_sel( ... dims=("x", "y"), ... ) >>> da - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -3235,7 +3235,7 @@ def drop_sel( * y (y) int64 40B 0 3 6 9 12 >>> da.drop_sel(x=[0, 2], y=9) - Size: 96B + 96B array([[10, 11, 12, 14], [15, 16, 17, 19], [20, 21, 22, 24]]) @@ -3244,7 +3244,7 @@ def drop_sel( * y (y) int64 32B 0 3 6 12 >>> da.drop_sel({"x": 6, "y": [0, 3]}) - Size: 96B + 96B array([[ 2, 3, 4], [ 7, 8, 9], [12, 13, 14], @@ -3283,7 +3283,7 @@ def drop_isel( -------- >>> da = xr.DataArray(np.arange(25).reshape(5, 5), dims=("X", "Y")) >>> da - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -3292,14 +3292,14 @@ def drop_isel( Dimensions without coordinates: X, Y >>> da.drop_isel(X=[0, 4], Y=2) - Size: 96B + 96B array([[ 5, 6, 8, 9], [10, 11, 13, 14], [15, 16, 18, 19]]) Dimensions without coordinates: X, Y >>> da.drop_isel({"X": 3, "Y": 3}) - Size: 128B + 128B array([[ 0, 1, 2, 4], [ 5, 6, 7, 9], [10, 11, 12, 14], @@ -3354,7 +3354,7 @@ def dropna( ... ), ... ) >>> da - Size: 128B + 128B array([[ 0., 4., 2., 9.], [nan, nan, nan, nan], [nan, 4., 2., 0.], @@ -3365,7 +3365,7 @@ def dropna( Dimensions without coordinates: Y, X >>> da.dropna(dim="Y", how="any") - Size: 64B + 64B array([[0., 4., 2., 9.], [3., 1., 0., 0.]]) Coordinates: @@ -3376,7 +3376,7 @@ def dropna( Drop values only if all values along the dimension are NaN: >>> da.dropna(dim="Y", how="all") - Size: 96B + 96B array([[ 0., 4., 2., 9.], [nan, 4., 2., 0.], [ 3., 1., 0., 0.]]) @@ -3418,7 +3418,7 @@ def fillna(self, value: Any) -> Self: ... ), ... ) >>> da - Size: 48B + 48B array([ 1., 4., nan, 0., 3., nan]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 @@ -3427,7 +3427,7 @@ def fillna(self, value: Any) -> Self: Fill all NaN values with 0: >>> da.fillna(0) - Size: 48B + 48B array([1., 4., 0., 0., 3., 0.]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 @@ -3436,7 +3436,7 @@ def fillna(self, value: Any) -> Self: Fill NaN values with corresponding values in array: >>> da.fillna(np.array([2, 9, 4, 2, 8, 9])) - Size: 48B + 48B array([1., 4., 4., 0., 3., 9.]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 @@ -3544,19 +3544,19 @@ def interpolate_na( ... [np.nan, 2, 3, np.nan, 0], dims="x", coords={"x": [0, 1, 2, 3, 4]} ... ) >>> da - Size: 40B + 40B array([nan, 2., 3., nan, 0.]) Coordinates: * x (x) int64 40B 0 1 2 3 4 >>> da.interpolate_na(dim="x", method="linear") - Size: 40B + 40B array([nan, 2. , 3. , 1.5, 0. ]) Coordinates: * x (x) int64 40B 0 1 2 3 4 >>> da.interpolate_na(dim="x", method="linear", fill_value="extrapolate") - Size: 40B + 40B array([1. , 2. , 3. , 1.5, 0. ]) Coordinates: * x (x) int64 40B 0 1 2 3 4 @@ -3615,7 +3615,7 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: ... ), ... ) >>> da - Size: 120B + 120B array([[nan, 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], @@ -3629,7 +3629,7 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: Fill all NaN values: >>> da.ffill(dim="Y", limit=None) - Size: 120B + 120B array([[nan, 1., 3.], [ 0., 1., 5.], [ 5., 1., 5.], @@ -3643,7 +3643,7 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: Fill only the first of consecutive NaN values: >>> da.ffill(dim="Y", limit=1) - Size: 120B + 120B array([[nan, 1., 3.], [ 0., 1., 5.], [ 5., nan, 5.], @@ -3699,7 +3699,7 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: ... ), ... ) >>> da - Size: 120B + 120B array([[ 0., 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], @@ -3713,7 +3713,7 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: Fill all NaN values: >>> da.bfill(dim="Y", limit=None) - Size: 120B + 120B array([[ 0., 1., 3.], [ 0., 2., 5.], [ 5., 2., 0.], @@ -3727,7 +3727,7 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: Fill only the first of consecutive NaN values: >>> da.bfill(dim="Y", limit=1) - Size: 120B + 120B array([[ 0., 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], @@ -4406,7 +4406,7 @@ def from_dict(cls, d: Mapping[str, Any]) -> Self: >>> d = {"dims": "t", "data": [1, 2, 3]} >>> da = xr.DataArray.from_dict(d) >>> da - Size: 24B + 24B array([1, 2, 3]) Dimensions without coordinates: t @@ -4421,7 +4421,7 @@ def from_dict(cls, d: Mapping[str, Any]) -> Self: ... } >>> da = xr.DataArray.from_dict(d) >>> da - Size: 24B + 24B array([10, 20, 30]) Coordinates: * t (t) int64 24B 0 1 2 @@ -4528,11 +4528,11 @@ def broadcast_equals(self, other: Self) -> bool: >>> a = xr.DataArray([1, 2], dims="X") >>> b = xr.DataArray([[1, 1], [2, 2]], dims=["X", "Y"]) >>> a - Size: 16B + 16B array([1, 2]) Dimensions without coordinates: X >>> b - Size: 32B + 32B array([[1, 1], [2, 2]]) Dimensions without coordinates: X, Y @@ -4584,21 +4584,21 @@ def equals(self, other: Self) -> bool: >>> c = xr.DataArray([1, 2, 3], dims="Y") >>> d = xr.DataArray([3, 2, 1], dims="X") >>> a - Size: 24B + 24B array([1, 2, 3]) Dimensions without coordinates: X >>> b - Size: 24B + 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> c - Size: 24B + 24B array([1, 2, 3]) Dimensions without coordinates: Y >>> d - Size: 24B + 24B array([3, 2, 1]) Dimensions without coordinates: X @@ -4639,19 +4639,19 @@ def identical(self, other: Self) -> bool: >>> b = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m"), name="Width") >>> c = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="ft"), name="Width") >>> a - Size: 24B + 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> b - Size: 24B + 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> c - Size: 24B + 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: @@ -4825,12 +4825,12 @@ def diff( -------- >>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ["x"]) >>> arr.diff("x") - Size: 24B + 24B array([0, 1, 0]) Coordinates: * x (x) int64 24B 2 3 4 >>> arr.diff("x", 2) - Size: 16B + 16B array([ 1, -1]) Coordinates: * x (x) int64 16B 3 4 @@ -4883,7 +4883,7 @@ def shift( -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.shift(x=1) - Size: 24B + 24B array([nan, 5., 6.]) Dimensions without coordinates: x """ @@ -4932,7 +4932,7 @@ def roll( -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.roll(x=1) - Size: 24B + 24B array([7, 5, 6]) Dimensions without coordinates: x """ @@ -5074,19 +5074,19 @@ def sortby( ... dims="time", ... ) >>> da - Size: 40B + 40B array([5, 4, 3, 2, 1]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-01 2000-01-02 ... 2000-01-05 >>> da.sortby(da) - Size: 40B + 40B array([1, 2, 3, 4, 5]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01 >>> da.sortby(lambda x: x) - Size: 40B + 40B array([1, 2, 3, 4, 5]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01 @@ -5178,23 +5178,23 @@ def quantile( ... dims=("x", "y"), ... ) >>> da.quantile(0) # or da.quantile(0, dim=...) - Size: 8B + 8B array(0.7) Coordinates: quantile float64 8B 0.0 >>> da.quantile(0, dim="x") - Size: 32B + 32B array([0.7, 4.2, 2.6, 1.5]) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 quantile float64 8B 0.0 >>> da.quantile([0, 0.5, 1]) - Size: 24B + 24B array([0.7, 3.4, 9.4]) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 >>> da.quantile([0, 0.5, 1], dim="x") - Size: 96B + 96B array([[0.7 , 4.2 , 2.6 , 1.5 ], [3.6 , 5.75, 6. , 1.7 ], [6.5 , 7.3 , 9.4 , 1.9 ]]) @@ -5257,7 +5257,7 @@ def rank( -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.rank("x") - Size: 24B + 24B array([1., 2., 3.]) Dimensions without coordinates: x """ @@ -5306,7 +5306,7 @@ def differentiate( ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -5316,7 +5316,7 @@ def differentiate( Dimensions without coordinates: y >>> >>> da.differentiate("x") - Size: 96B + 96B array([[30. , 30. , 30. ], [27.54545455, 27.54545455, 27.54545455], [27.54545455, 27.54545455, 27.54545455], @@ -5365,7 +5365,7 @@ def integrate( ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -5375,7 +5375,7 @@ def integrate( Dimensions without coordinates: y >>> >>> da.integrate("x") - Size: 24B + 24B array([5.4, 6.6, 7.8]) Dimensions without coordinates: y """ @@ -5422,7 +5422,7 @@ def cumulative_integrate( ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da - Size: 96B + 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], @@ -5432,7 +5432,7 @@ def cumulative_integrate( Dimensions without coordinates: y >>> >>> da.cumulative_integrate("x") - Size: 96B + 96B array([[0. , 0. , 0. ], [0.15, 0.25, 0.35], [4.65, 5.75, 6.85], @@ -5539,7 +5539,7 @@ def map_blocks( ... coords={"time": time, "month": month}, ... ).chunk() >>> array.map_blocks(calculate_anomaly, template=array).compute() - Size: 192B + 192B array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862, 0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714, -0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 , @@ -5555,7 +5555,7 @@ def map_blocks( >>> array.map_blocks( ... calculate_anomaly, kwargs={"groupby_type": "time.year"}, template=array ... ) # doctest: +ELLIPSIS - Size: 192B + 192B dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 @@ -5750,7 +5750,7 @@ def pad( -------- >>> arr = xr.DataArray([5, 6, 7], coords=[("x", [0, 1, 2])]) >>> arr.pad(x=(1, 2), constant_values=0) - Size: 48B + 48B array([0, 5, 6, 7, 0, 0]) Coordinates: * x (x) float64 48B nan 0.0 1.0 2.0 nan nan @@ -5761,7 +5761,7 @@ def pad( ... coords={"x": [0, 1], "y": [10, 20, 30, 40], "z": ("x", [100, 200])}, ... ) >>> da.pad(x=1) - Size: 128B + 128B array([[nan, nan, nan, nan], [ 0., 1., 2., 3.], [10., 11., 12., 13.], @@ -5775,7 +5775,7 @@ def pad( lead to a loss of precision: >>> da.pad(x=1, constant_values=1.23456789) - Size: 128B + 128B array([[ 1, 1, 1, 1], [ 0, 1, 2, 3], [10, 11, 12, 13], @@ -5852,13 +5852,13 @@ def idxmin( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array.min() - Size: 8B + 8B array(-2) >>> array.argmin(...) - {'x': Size: 8B + {'x': 8B array(4)} >>> array.idxmin() - Size: 4B + 4B array('e', dtype='>> array = xr.DataArray( @@ -5871,17 +5871,17 @@ def idxmin( ... coords={"y": [-1, 0, 1], "x": np.arange(5.0) ** 2}, ... ) >>> array.min(dim="x") - Size: 24B + 24B array([-2., -4., 1.]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.argmin(dim="x") - Size: 24B + 24B array([4, 0, 2]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.idxmin(dim="x") - Size: 24B + 24B array([16., 0., 4.]) Coordinates: * y (y) int64 24B -1 0 1 @@ -5950,13 +5950,13 @@ def idxmax( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array.max() - Size: 8B + 8B array(2) >>> array.argmax(...) - {'x': Size: 8B + {'x': 8B array(1)} >>> array.idxmax() - Size: 4B + 4B array('b', dtype='>> array = xr.DataArray( @@ -5969,17 +5969,17 @@ def idxmax( ... coords={"y": [-1, 0, 1], "x": np.arange(5.0) ** 2}, ... ) >>> array.max(dim="x") - Size: 24B + 24B array([2., 2., 1.]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.argmax(dim="x") - Size: 24B + 24B array([0, 2, 2]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.idxmax(dim="x") - Size: 24B + 24B array([0., 4., 4.]) Coordinates: * y (y) int64 24B -1 0 1 @@ -6043,13 +6043,13 @@ def argmin( -------- >>> array = xr.DataArray([0, 2, -1, 3], dims="x") >>> array.min() - Size: 8B + 8B array(-1) >>> array.argmin(...) - {'x': Size: 8B + {'x': 8B array(2)} >>> array.isel(array.argmin(...)) - Size: 8B + 8B array(-1) >>> array = xr.DataArray( @@ -6057,35 +6057,35 @@ def argmin( ... dims=("x", "y", "z"), ... ) >>> array.min(dim="x") - Size: 72B + 72B array([[ 1, 2, 1], [ 2, -5, 1], [ 2, 1, 1]]) Dimensions without coordinates: y, z >>> array.argmin(dim="x") - Size: 72B + 72B array([[1, 0, 0], [1, 1, 1], [0, 0, 1]]) Dimensions without coordinates: y, z >>> array.argmin(dim=["x"]) - {'x': Size: 72B + {'x': 72B array([[1, 0, 0], [1, 1, 1], [0, 0, 1]]) Dimensions without coordinates: y, z} >>> array.min(dim=("x", "z")) - Size: 24B + 24B array([ 1, -5, 1]) Dimensions without coordinates: y >>> array.argmin(dim=["x", "z"]) - {'x': Size: 24B + {'x': 24B array([0, 1, 0]) - Dimensions without coordinates: y, 'z': Size: 24B + Dimensions without coordinates: y, 'z': 24B array([2, 1, 1]) Dimensions without coordinates: y} >>> array.isel(array.argmin(dim=["x", "z"])) - Size: 24B + 24B array([ 1, -5, 1]) Dimensions without coordinates: y """ @@ -6145,13 +6145,13 @@ def argmax( -------- >>> array = xr.DataArray([0, 2, -1, 3], dims="x") >>> array.max() - Size: 8B + 8B array(3) >>> array.argmax(...) - {'x': Size: 8B + {'x': 8B array(3)} >>> array.isel(array.argmax(...)) - Size: 8B + 8B array(3) >>> array = xr.DataArray( @@ -6159,35 +6159,35 @@ def argmax( ... dims=("x", "y", "z"), ... ) >>> array.max(dim="x") - Size: 72B + 72B array([[3, 3, 2], [3, 5, 2], [2, 3, 3]]) Dimensions without coordinates: y, z >>> array.argmax(dim="x") - Size: 72B + 72B array([[0, 1, 1], [0, 1, 0], [0, 1, 0]]) Dimensions without coordinates: y, z >>> array.argmax(dim=["x"]) - {'x': Size: 72B + {'x': 72B array([[0, 1, 1], [0, 1, 0], [0, 1, 0]]) Dimensions without coordinates: y, z} >>> array.max(dim=("x", "z")) - Size: 24B + 24B array([3, 5, 3]) Dimensions without coordinates: y >>> array.argmax(dim=["x", "z"]) - {'x': Size: 24B + {'x': 24B array([0, 1, 0]) - Dimensions without coordinates: y, 'z': Size: 24B + Dimensions without coordinates: y, 'z': 24B array([0, 1, 2]) Dimensions without coordinates: y} >>> array.isel(array.argmax(dim=["x", "z"])) - Size: 24B + 24B array([3, 5, 3]) Dimensions without coordinates: y """ @@ -6257,11 +6257,11 @@ def query( -------- >>> da = xr.DataArray(np.arange(0, 5, 1), dims="x", name="a") >>> da - Size: 40B + 40B array([0, 1, 2, 3, 4]) Dimensions without coordinates: x >>> da.query(x="a > 2") - Size: 16B + 16B array([3, 4]) Dimensions without coordinates: x """ @@ -6369,7 +6369,7 @@ def curvefit( ... coords={"x": [0, 1, 2], "time": t}, ... ) >>> da - Size: 264B + 264B array([[ 0.1012573 , 0.0354669 , 0.01993775, 0.00602771, -0.00352513, 0.00428975, 0.01328788, 0.009562 , -0.00700381, -0.01264187, -0.0062282 ], @@ -6389,13 +6389,13 @@ def curvefit( >>> fit_result["curvefit_coefficients"].sel( ... param="time_constant" ... ) # doctest: +NUMBER - Size: 24B + 24B array([1.05692036, 1.73549638, 2.94215771]) Coordinates: * x (x) int64 24B 0 1 2 param >> fit_result["curvefit_coefficients"].sel(param="amplitude") - Size: 24B + 24B array([0.1005489 , 0.19631423, 0.30003579]) Coordinates: * x (x) int64 24B 0 1 2 @@ -6415,13 +6415,13 @@ def curvefit( ... }, ... ) >>> fit_result["curvefit_coefficients"].sel(param="time_constant") - Size: 24B + 24B array([1.0569213 , 1.73550052, 2.94215733]) Coordinates: * x (x) int64 24B 0 1 2 param >> fit_result["curvefit_coefficients"].sel(param="amplitude") - Size: 24B + 24B array([0.10054889, 0.1963141 , 0.3000358 ]) Coordinates: * x (x) int64 24B 0 1 2 @@ -6480,7 +6480,7 @@ def drop_duplicates( ... coords={"x": np.array([0, 0, 1, 2, 3]), "y": np.array([0, 1, 2, 3, 3])}, ... ) >>> da - Size: 200B + 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], @@ -6491,7 +6491,7 @@ def drop_duplicates( * y (y) int64 40B 0 1 2 3 3 >>> da.drop_duplicates(dim="x") - Size: 160B + 160B array([[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], @@ -6501,7 +6501,7 @@ def drop_duplicates( * y (y) int64 40B 0 1 2 3 3 >>> da.drop_duplicates(dim="x", keep="last") - Size: 160B + 160B array([[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], @@ -6513,7 +6513,7 @@ def drop_duplicates( Drop all duplicate dimension values: >>> da.drop_duplicates(dim=...) - Size: 128B + 128B array([[ 0, 1, 2, 3], [10, 11, 12, 13], [15, 16, 17, 18], @@ -6716,13 +6716,13 @@ def groupby( ... dims="time", ... ) >>> da - Size: 15kB + 15kB array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03]) Coordinates: * time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31 >>> da.groupby("time.dayofyear") - da.groupby("time.dayofyear").mean("time") - Size: 15kB + 15kB array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5]) Coordinates: * time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31 @@ -6937,12 +6937,12 @@ def rolling( ... dims="time", ... ) >>> da - Size: 96B + 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.rolling(time=3, center=True).mean() - Size: 96B + 96B array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 @@ -6950,7 +6950,7 @@ def rolling( Remove the NaNs using ``dropna()``: >>> da.rolling(time=3, center=True).mean().dropna("time") - Size: 80B + 80B array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-15 2000-02-15 ... 2000-10-15 @@ -7004,13 +7004,13 @@ def cumulative( ... ) >>> da - Size: 96B + 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.cumulative("time").sum() - Size: 96B + 96B array([ 0., 1., 3., 6., 10., 15., 21., 28., 36., 45., 55., 66.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 @@ -7079,7 +7079,7 @@ def coarsen( ... coords={"time": pd.date_range("1999-12-15", periods=364)}, ... ) >>> da # +doctest: ELLIPSIS - Size: 3kB + 3kB array([ 0. , 1.00275482, 2.00550964, 3.00826446, 4.01101928, 5.0137741 , 6.01652893, 7.01928375, 8.02203857, 9.02479339, 10.02754821, 11.03030303, @@ -7124,7 +7124,7 @@ def coarsen( Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-12-12 >>> da.coarsen(time=3, boundary="trim").mean() # +doctest: ELLIPSIS - Size: 968B + 968B array([ 1.00275482, 4.01101928, 7.01928375, 10.02754821, 13.03581267, 16.04407713, 19.0523416 , 22.06060606, 25.06887052, 28.07713499, 31.08539945, 34.09366391, @@ -7270,12 +7270,12 @@ def resample( ... dims="time", ... ) >>> da - Size: 96B + 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.resample(time="QS-DEC").mean() - Size: 32B + 32B array([ 1., 4., 7., 10.]) Coordinates: * time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01 @@ -7283,7 +7283,7 @@ def resample( Upsample monthly time-series data to daily data: >>> da.resample(time="1D").interpolate("linear") # +doctest: ELLIPSIS - Size: 3kB + 3kB array([ 0. , 0.03225806, 0.06451613, 0.09677419, 0.12903226, 0.16129032, 0.19354839, 0.22580645, 0.25806452, 0.29032258, 0.32258065, 0.35483871, 0.38709677, 0.41935484, 0.4516129 , @@ -7331,7 +7331,7 @@ def resample( Limit scope of upsampling method >>> da.resample(time="1D").nearest(tolerance="1D") - Size: 3kB + 3kB array([ 0., 0., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 1., 1., 1., nan, nan, nan, nan, nan, nan, diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 09597670573..284a8bcb14d 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -291,7 +291,6 @@ def _maybe_chunk( chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, ): - from xarray.namedarray.daskmanager import DaskManager if chunks is not None: @@ -629,7 +628,7 @@ class Dataset( ... attrs=dict(description="Weather related data."), ... ) >>> ds - Size: 552B + 552B Dimensions: (loc: 2, instrument: 3, time: 4) Coordinates: lon (loc) float64 16B -99.83 -99.32 @@ -648,7 +647,7 @@ class Dataset( other variables had: >>> ds.isel(ds.temperature.argmin(...)) - Size: 80B + 80B Dimensions: () Coordinates: lon float64 8B -99.32 @@ -1301,7 +1300,7 @@ def copy(self, deep: bool = False, data: DataVars | None = None) -> Self: ... coords={"x": ["one", "two"]}, ... ) >>> ds.copy() - Size: 88B + 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) Self: >>> ds_0 = ds.copy(deep=False) >>> ds_0["foo"][0, 0] = 7 >>> ds_0 - Size: 88B + 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) Self: bar (x) int64 16B -1 2 >>> ds - Size: 88B + 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) Self: object is unaffected. >>> ds.copy(data={"foo": np.arange(6).reshape(2, 3), "bar": ["a", "b"]}) - Size: 80B + 80B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) Self: bar (x) >> ds - Size: 88B + 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) bool: ... coords={"space": [0], "time": [0, 1, 2]}, ... ) >>> a - Size: 56B + 56B Dimensions: (space: 1, time: 3) Coordinates: * space (space) int64 8B 0 @@ -1778,7 +1777,7 @@ def broadcast_equals(self, other: Self) -> bool: ... coords={"time": [0, 1, 2], "space": [0]}, ... ) >>> b - Size: 56B + 56B Dimensions: (time: 3, space: 1) Coordinates: * time (time) int64 24B 0 1 2 @@ -1831,7 +1830,7 @@ def equals(self, other: Self) -> bool: ... coords={"space": [0], "time": [0, 1, 2]}, ... ) >>> dataset1 - Size: 56B + 56B Dimensions: (space: 1, time: 3) Coordinates: * space (space) int64 8B 0 @@ -1847,7 +1846,7 @@ def equals(self, other: Self) -> bool: ... coords={"time": [0, 1, 2], "space": [0]}, ... ) >>> dataset2 - Size: 56B + 56B Dimensions: (time: 3, space: 1) Coordinates: * time (time) int64 24B 0 1 2 @@ -1914,7 +1913,7 @@ def identical(self, other: Self) -> bool: ... attrs={"units": "ft"}, ... ) >>> a - Size: 48B + 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 @@ -1924,7 +1923,7 @@ def identical(self, other: Self) -> bool: units: m >>> b - Size: 48B + 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 @@ -1934,7 +1933,7 @@ def identical(self, other: Self) -> bool: units: m >>> c - Size: 48B + 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 @@ -2021,7 +2020,7 @@ def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self: ... } ... ) >>> dataset - Size: 48B + 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03 @@ -2029,7 +2028,7 @@ def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self: pressure (time) float64 24B 1.013 1.2 3.5 >>> dataset.set_coords("pressure") - Size: 48B + 48B Dimensions: (time: 3) Coordinates: pressure (time) float64 24B 1.013 1.2 3.5 @@ -2101,7 +2100,7 @@ def reset_coords( # Dataset before resetting coordinates >>> dataset - Size: 184B + 184B Dimensions: (time: 2, lat: 2, lon: 2) Coordinates: * time (time) datetime64[ns] 16B 2023-01-01 2023-01-02 @@ -2119,7 +2118,7 @@ def reset_coords( # Dataset after resetting coordinates >>> dataset_reset - Size: 184B + 184B Dimensions: (time: 2, lat: 2, lon: 2) Coordinates: * time (time) datetime64[ns] 16B 2023-01-01 2023-01-02 @@ -2921,7 +2920,7 @@ def isel( # A specific element from the dataset is selected >>> dataset.isel(student=1, test=0) - Size: 68B + 68B Dimensions: () Coordinates: student >> slice_of_data = dataset.isel(student=slice(0, 2), test=slice(0, 2)) >>> slice_of_data - Size: 168B + 168B Dimensions: (student: 2, test: 2) Coordinates: * student (student) >> index_array = xr.DataArray([0, 2], dims="student") >>> indexed_data = dataset.isel(student=index_array) >>> indexed_data - Size: 224B + 224B Dimensions: (student: 2, test: 3) Coordinates: * student (student) >> busiest_days = dataset.sortby("pageviews", ascending=False) >>> busiest_days.head() - Size: 120B + 120B Dimensions: (date: 5) Coordinates: * date (date) datetime64[ns] 40B 2023-01-05 2023-01-04 ... 2023-01-03 @@ -3182,7 +3181,7 @@ def head( # Retrieve the 3 most busiest days in terms of pageviews >>> busiest_days.head(3) - Size: 72B + 72B Dimensions: (date: 3) Coordinates: * date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02 @@ -3193,7 +3192,7 @@ def head( # Using a dictionary to specify the number of elements for specific dimensions >>> busiest_days.head({"date": 3}) - Size: 72B + 72B Dimensions: (date: 3) Coordinates: * date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02 @@ -3261,7 +3260,7 @@ def tail( ... ) >>> sorted_dataset = dataset.sortby("energy_expenditure", ascending=False) >>> sorted_dataset - Size: 240B + 240B Dimensions: (activity: 5) Coordinates: * activity (activity) >> sorted_dataset.tail(3) - Size: 144B + 144B Dimensions: (activity: 3) Coordinates: * activity (activity) >> sorted_dataset.tail({"activity": 3}) - Size: 144B + 144B Dimensions: (activity: 3) Coordinates: * activity (activity) >> x_ds = xr.Dataset({"foo": x}) >>> x_ds - Size: 328B + 328B Dimensions: (x: 2, y: 13) Coordinates: * x (x) int64 16B 0 1 @@ -3360,7 +3359,7 @@ def thin( foo (x, y) int64 208B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 25 >>> x_ds.thin(3) - Size: 88B + 88B Dimensions: (x: 1, y: 5) Coordinates: * x (x) int64 8B 0 @@ -3368,7 +3367,7 @@ def thin( Data variables: foo (x, y) int64 40B 0 3 6 9 12 >>> x.thin({"x": 2, "y": 5}) - Size: 24B + 24B array([[ 0, 5, 10]]) Coordinates: * x (x) int64 8B 0 @@ -3636,7 +3635,7 @@ def reindex( ... coords={"station": ["boston", "nyc", "seattle", "denver"]}, ... ) >>> x - Size: 176B + 176B Dimensions: (station: 4) Coordinates: * station (station) >> new_index = ["boston", "austin", "seattle", "lincoln"] >>> x.reindex({"station": new_index}) - Size: 176B + 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.reindex({"station": new_index}, fill_value=0) - Size: 176B + 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.reindex( ... {"station": new_index}, fill_value={"temperature": 0, "pressure": 100} ... ) - Size: 176B + 176B Dimensions: (station: 4) Coordinates: * station (station) >> x2 - Size: 144B + 144B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2019-01-01 2019-01-02 ... 2019-01-06 @@ -3719,7 +3718,7 @@ def reindex( >>> time_index2 = pd.date_range("12/29/2018", periods=10, freq="D") >>> x2.reindex({"time": time_index2}) - Size: 240B + 240B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07 @@ -3735,7 +3734,7 @@ def reindex( >>> x3 = x2.reindex({"time": time_index2}, method="bfill") >>> x3 - Size: 240B + 240B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07 @@ -3747,7 +3746,7 @@ def reindex( will not be filled by any of the value propagation schemes. >>> x2.where(x2.temperature.isnull(), drop=True) - Size: 24B + 24B Dimensions: (time: 1) Coordinates: * time (time) datetime64[ns] 8B 2019-01-03 @@ -3755,7 +3754,7 @@ def reindex( temperature (time) float64 8B nan pressure (time) float64 8B 395.9 >>> x3.where(x3.temperature.isnull(), drop=True) - Size: 48B + 48B Dimensions: (time: 2) Coordinates: * time (time) datetime64[ns] 16B 2019-01-03 2019-01-07 @@ -3888,7 +3887,7 @@ def interp( ... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]}, ... ) >>> ds - Size: 176B + 176B Dimensions: (x: 3, y: 4) Coordinates: * x (x) int64 24B 0 1 2 @@ -3900,7 +3899,7 @@ def interp( 1D interpolation with the default method (linear): >>> ds.interp(x=[0, 0.75, 1.25, 1.75]) - Size: 224B + 224B Dimensions: (x: 4, y: 4) Coordinates: * y (y) int64 32B 10 12 14 16 @@ -3912,7 +3911,7 @@ def interp( 1D interpolation with a different method: >>> ds.interp(x=[0, 0.75, 1.25, 1.75], method="nearest") - Size: 224B + 224B Dimensions: (x: 4, y: 4) Coordinates: * y (y) int64 32B 10 12 14 16 @@ -3928,7 +3927,7 @@ def interp( ... method="linear", ... kwargs={"fill_value": "extrapolate"}, ... ) - Size: 224B + 224B Dimensions: (x: 4, y: 4) Coordinates: * y (y) int64 32B 10 12 14 16 @@ -3940,7 +3939,7 @@ def interp( 2D interpolation: >>> ds.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear") - Size: 184B + 184B Dimensions: (x: 4, y: 3) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 @@ -4428,7 +4427,7 @@ def swap_dims( ... coords={"x": ["a", "b"], "y": ("x", [0, 1])}, ... ) >>> ds - Size: 56B + 56B Dimensions: (x: 2) Coordinates: * x (x) >> ds.swap_dims({"x": "y"}) - Size: 56B + 56B Dimensions: (y: 2) Coordinates: x (y) >> ds.swap_dims({"x": "z"}) - Size: 56B + 56B Dimensions: (z: 2) Coordinates: x (z) >> dataset = xr.Dataset({"temperature": ([], 25.0)}) >>> dataset - Size: 8B + 8B Dimensions: () Data variables: temperature float64 8B 25.0 @@ -4566,7 +4565,7 @@ def expand_dims( # Expand the dataset with a new dimension called "time" >>> dataset.expand_dims(dim="time") - Size: 8B + 8B Dimensions: (time: 1) Dimensions without coordinates: time Data variables: @@ -4577,7 +4576,7 @@ def expand_dims( >>> temperature_1d = xr.DataArray([25.0, 26.5, 24.8], dims="x") >>> dataset_1d = xr.Dataset({"temperature": temperature_1d}) >>> dataset_1d - Size: 24B + 24B Dimensions: (x: 3) Dimensions without coordinates: x Data variables: @@ -4586,7 +4585,7 @@ def expand_dims( # Expand the dataset with a new dimension called "time" using axis argument >>> dataset_1d.expand_dims(dim="time", axis=0) - Size: 24B + 24B Dimensions: (time: 1, x: 3) Dimensions without coordinates: time, x Data variables: @@ -4597,7 +4596,7 @@ def expand_dims( >>> temperature_2d = xr.DataArray(np.random.rand(3, 4), dims=("y", "x")) >>> dataset_2d = xr.Dataset({"temperature": temperature_2d}) >>> dataset_2d - Size: 96B + 96B Dimensions: (y: 3, x: 4) Dimensions without coordinates: y, x Data variables: @@ -4606,7 +4605,7 @@ def expand_dims( # Expand the dataset with a new dimension called "time" using axis argument >>> dataset_2d.expand_dims(dim="time", axis=2) - Size: 96B + 96B Dimensions: (y: 3, x: 4, time: 1) Dimensions without coordinates: y, x, time Data variables: @@ -4616,7 +4615,7 @@ def expand_dims( >>> ds = xr.Dataset(coords={"x": 0}) >>> ds - Size: 8B + 8B Dimensions: () Coordinates: x int64 8B 0 @@ -4624,7 +4623,7 @@ def expand_dims( *empty* >>> ds.expand_dims("x") - Size: 8B + 8B Dimensions: (x: 1) Coordinates: * x (x) int64 8B 0 @@ -4795,7 +4794,7 @@ def set_index( ... ) >>> ds = xr.Dataset({"v": arr}) >>> ds - Size: 104B + 104B Dimensions: (x: 2, y: 3) Coordinates: * x (x) int64 16B 0 1 @@ -4804,7 +4803,7 @@ def set_index( Data variables: v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0 >>> ds.set_index(x="a") - Size: 88B + 88B Dimensions: (x: 2, y: 3) Coordinates: * x (x) int64 16B 3 4 @@ -5412,7 +5411,7 @@ def to_stacked_array( ... ) >>> data - Size: 76B + 76B Dimensions: (x: 2, y: 3) Coordinates: * y (y) >> data.to_stacked_array("z", sample_dims=["x"]) - Size: 64B + 64B array([[0, 1, 2, 6], [3, 4, 5, 7]]) Coordinates: @@ -5856,7 +5855,7 @@ def drop_vars( ... }, ... ) >>> dataset - Size: 136B + 136B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 @@ -5870,7 +5869,7 @@ def drop_vars( Drop the 'humidity' variable >>> dataset.drop_vars(["humidity"]) - Size: 104B + 104B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 @@ -5883,7 +5882,7 @@ def drop_vars( Drop the 'humidity', 'temperature' variables >>> dataset.drop_vars(["humidity", "temperature"]) - Size: 72B + 72B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 @@ -5895,7 +5894,7 @@ def drop_vars( Drop all indexes >>> dataset.drop_vars(lambda x: x.indexes) - Size: 96B + 96B Dimensions: (time: 1, latitude: 2, longitude: 2) Dimensions without coordinates: time, latitude, longitude Data variables: @@ -5906,7 +5905,7 @@ def drop_vars( Attempt to drop non-existent variable with errors="ignore" >>> dataset.drop_vars(["pressure"], errors="ignore") - Size: 136B + 136B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 @@ -6112,7 +6111,7 @@ def drop_sel( >>> labels = ["a", "b", "c"] >>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels}) >>> ds - Size: 60B + 60B Dimensions: (x: 2, y: 3) Coordinates: * y (y) >> ds.drop_sel(y=["a", "c"]) - Size: 20B + 20B Dimensions: (x: 2, y: 1) Coordinates: * y (y) >> ds.drop_sel(y="b") - Size: 40B + 40B Dimensions: (x: 2, y: 2) Coordinates: * y (y) Self: >>> labels = ["a", "b", "c"] >>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels}) >>> ds - Size: 60B + 60B Dimensions: (x: 2, y: 3) Coordinates: * y (y) Self: Data variables: A (x, y) int64 48B 0 1 2 3 4 5 >>> ds.drop_isel(y=[0, 2]) - Size: 20B + 20B Dimensions: (x: 2, y: 1) Coordinates: * y (y) Self: Data variables: A (x, y) int64 16B 1 4 >>> ds.drop_isel(y=1) - Size: 40B + 40B Dimensions: (x: 2, y: 2) Coordinates: * y (y) >> dataset - Size: 104B + 104B Dimensions: (time: 4, location: 2) Coordinates: * time (time) int64 32B 1 2 3 4 @@ -6369,7 +6368,7 @@ def dropna( # Drop NaN values from the dataset >>> dataset.dropna(dim="time") - Size: 80B + 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 @@ -6380,7 +6379,7 @@ def dropna( # Drop labels with any NAN values >>> dataset.dropna(dim="time", how="any") - Size: 80B + 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 @@ -6391,7 +6390,7 @@ def dropna( # Drop labels with all NAN values >>> dataset.dropna(dim="time", how="all") - Size: 104B + 104B Dimensions: (time: 4, location: 2) Coordinates: * time (time) int64 32B 1 2 3 4 @@ -6402,7 +6401,7 @@ def dropna( # Drop labels with less than 2 non-NA values >>> dataset.dropna(dim="time", thresh=2) - Size: 80B + 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 @@ -6482,7 +6481,7 @@ def fillna(self, value: Any) -> Self: ... coords={"x": [0, 1, 2, 3]}, ... ) >>> ds - Size: 160B + 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 @@ -6495,7 +6494,7 @@ def fillna(self, value: Any) -> Self: Replace all `NaN` values with 0s. >>> ds.fillna(0) - Size: 160B + 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 @@ -6509,7 +6508,7 @@ def fillna(self, value: Any) -> Self: >>> values = {"A": 0, "B": 1, "C": 2, "D": 3} >>> ds.fillna(value=values) - Size: 160B + 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 @@ -6623,7 +6622,7 @@ def interpolate_na( ... coords={"x": [0, 1, 2, 3, 4]}, ... ) >>> ds - Size: 200B + 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 @@ -6634,7 +6633,7 @@ def interpolate_na( D (x) float64 40B nan 3.0 nan -1.0 4.0 >>> ds.interpolate_na(dim="x", method="linear") - Size: 200B + 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 @@ -6645,7 +6644,7 @@ def interpolate_na( D (x) float64 40B nan 3.0 1.0 -1.0 4.0 >>> ds.interpolate_na(dim="x", method="linear", fill_value="extrapolate") - Size: 200B + 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 @@ -6693,7 +6692,7 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: ... ) >>> dataset = xr.Dataset({"data": (("time",), data)}, coords={"time": time}) >>> dataset - Size: 160B + 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 @@ -6703,7 +6702,7 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: # Perform forward fill (ffill) on the dataset >>> dataset.ffill(dim="time") - Size: 160B + 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 @@ -6713,7 +6712,7 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: # Limit the forward filling to a maximum of 2 consecutive NaN values >>> dataset.ffill(dim="time", limit=2) - Size: 160B + 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 @@ -6758,7 +6757,7 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: ... ) >>> dataset = xr.Dataset({"data": (("time",), data)}, coords={"time": time}) >>> dataset - Size: 160B + 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 @@ -6768,7 +6767,7 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: # filled dataset, fills NaN values by propagating values backward >>> dataset.bfill(dim="time") - Size: 160B + 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 @@ -6778,7 +6777,7 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: # Limit the backward filling to a maximum of 2 consecutive NaN values >>> dataset.bfill(dim="time", limit=2) - Size: 160B + 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 @@ -6881,7 +6880,7 @@ def reduce( >>> percentile_scores = dataset.reduce(np.percentile, q=75, dim="test") >>> percentile_scores - Size: 132B + 132B Dimensions: (student: 3) Coordinates: * student (student) >> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])}) >>> ds - Size: 64B + 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773 bar (x) int64 16B -1 2 >>> ds.map(np.fabs) - Size: 64B + 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: @@ -7088,7 +7087,7 @@ def assign( ... coords={"lat": [10, 20], "lon": [150, 160]}, ... ) >>> x - Size: 96B + 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 @@ -7100,7 +7099,7 @@ def assign( Where the value is a callable, evaluated on dataset: >>> x.assign(temperature_f=lambda x: x.temperature_c * 9 / 5 + 32) - Size: 128B + 128B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 @@ -7113,7 +7112,7 @@ def assign( Alternatively, the same behavior can be achieved by directly referencing an existing dataarray: >>> x.assign(temperature_f=x["temperature_c"] * 9 / 5 + 32) - Size: 128B + 128B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 @@ -7622,7 +7621,7 @@ def from_dict(cls, d: Mapping[Any, Any]) -> Self: ... } >>> ds = xr.Dataset.from_dict(d) >>> ds - Size: 60B + 60B Dimensions: (t: 3) Coordinates: * t (t) int64 24B 0 1 2 @@ -7643,7 +7642,7 @@ def from_dict(cls, d: Mapping[Any, Any]) -> Self: ... } >>> ds = xr.Dataset.from_dict(d) >>> ds - Size: 60B + 60B Dimensions: (t: 3) Coordinates: * t (t) int64 24B 0 1 2 @@ -7822,13 +7821,13 @@ def diff( -------- >>> ds = xr.Dataset({"foo": ("x", [5, 5, 6, 6])}) >>> ds.diff("x") - Size: 24B + 24B Dimensions: (x: 3) Dimensions without coordinates: x Data variables: foo (x) int64 24B 0 1 0 >>> ds.diff("x", 2) - Size: 16B + 16B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: @@ -7918,7 +7917,7 @@ def shift( -------- >>> ds = xr.Dataset({"foo": ("x", list("abcde"))}) >>> ds.shift(x=2) - Size: 40B + 40B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: @@ -7987,7 +7986,7 @@ def roll( -------- >>> ds = xr.Dataset({"foo": ("x", list("abcde"))}, coords={"x": np.arange(5)}) >>> ds.roll(x=2) - Size: 60B + 60B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 @@ -7995,7 +7994,7 @@ def roll( foo (x) >> ds.roll(x=2, roll_coords=True) - Size: 60B + 60B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 3 4 0 1 2 @@ -8092,7 +8091,7 @@ def sortby( ... coords={"x": ["b", "a"], "y": [1, 0]}, ... ) >>> ds.sortby("x") - Size: 88B + 88B Dimensions: (x: 2, y: 2) Coordinates: * x (x) >> ds.sortby(lambda x: -x["y"]) - Size: 88B + 88B Dimensions: (x: 2, y: 2) Coordinates: * x (x) >> ds.quantile(0) # or ds.quantile(0, dim=...) - Size: 16B + 16B Dimensions: () Coordinates: quantile float64 8B 0.0 Data variables: a float64 8B 0.7 >>> ds.quantile(0, dim="x") - Size: 72B + 72B Dimensions: (y: 4) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 @@ -8232,14 +8231,14 @@ def quantile( Data variables: a (y) float64 32B 0.7 4.2 2.6 1.5 >>> ds.quantile([0, 0.5, 1]) - Size: 48B + 48B Dimensions: (quantile: 3) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 Data variables: a (quantile) float64 24B 0.7 3.4 9.4 >>> ds.quantile([0, 0.5, 1], dim="x") - Size: 152B + 152B Dimensions: (quantile: 3, y: 4) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 @@ -8482,7 +8481,7 @@ def integrate( ... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])}, ... ) >>> ds - Size: 128B + 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 @@ -8491,13 +8490,13 @@ def integrate( a (x) int64 32B 5 5 6 6 b (x) int64 32B 1 2 1 0 >>> ds.integrate("x") - Size: 16B + 16B Dimensions: () Data variables: a float64 8B 16.5 b float64 8B 3.5 >>> ds.integrate("y") - Size: 16B + 16B Dimensions: () Data variables: a float64 8B 20.0 @@ -8605,7 +8604,7 @@ def cumulative_integrate( ... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])}, ... ) >>> ds - Size: 128B + 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 @@ -8614,7 +8613,7 @@ def cumulative_integrate( a (x) int64 32B 5 5 6 6 b (x) int64 32B 1 2 1 0 >>> ds.cumulative_integrate("x") - Size: 128B + 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 @@ -8623,7 +8622,7 @@ def cumulative_integrate( a (x) float64 32B 0.0 5.0 10.5 16.5 b (x) float64 32B 0.0 1.5 3.0 3.5 >>> ds.cumulative_integrate("y") - Size: 128B + 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 @@ -8718,7 +8717,7 @@ def filter_by_attrs(self, **kwargs) -> Self: Get variables matching a specific standard_name: >>> ds.filter_by_attrs(standard_name="convective_precipitation_flux") - Size: 192B + 192B Dimensions: (x: 2, y: 2, time: 3) Coordinates: lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 @@ -8733,7 +8732,7 @@ def filter_by_attrs(self, **kwargs) -> Self: >>> standard_name = lambda v: v is not None >>> ds.filter_by_attrs(standard_name=standard_name) - Size: 288B + 288B Dimensions: (x: 2, y: 2, time: 3) Coordinates: lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 @@ -8857,7 +8856,7 @@ def map_blocks( ... ).chunk() >>> ds = xr.Dataset({"a": array}) >>> ds.map_blocks(calculate_anomaly, template=ds).compute() - Size: 576B + 576B Dimensions: (time: 24) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 @@ -8873,7 +8872,7 @@ def map_blocks( ... kwargs={"groupby_type": "time.year"}, ... template=ds, ... ) - Size: 576B + 576B Dimensions: (time: 24) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 @@ -9199,7 +9198,7 @@ def pad( -------- >>> ds = xr.Dataset({"foo": ("x", range(5))}) >>> ds.pad(x=(1, 2)) - Size: 64B + 64B Dimensions: (x: 8) Dimensions without coordinates: x Data variables: @@ -9329,7 +9328,7 @@ def idxmin( ... ) >>> ds = xr.Dataset({"int": array1, "float": array2}) >>> ds.min(dim="x") - Size: 56B + 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 @@ -9337,7 +9336,7 @@ def idxmin( int int64 8B -2 float (y) float64 24B -2.0 -4.0 1.0 >>> ds.argmin(dim="x") - Size: 56B + 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 @@ -9345,7 +9344,7 @@ def idxmin( int int64 8B 4 float (y) int64 24B 4 0 2 >>> ds.idxmin(dim="x") - Size: 52B + 52B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 @@ -9428,7 +9427,7 @@ def idxmax( ... ) >>> ds = xr.Dataset({"int": array1, "float": array2}) >>> ds.max(dim="x") - Size: 56B + 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 @@ -9436,7 +9435,7 @@ def idxmax( int int64 8B 2 float (y) float64 24B 2.0 2.0 1.0 >>> ds.argmax(dim="x") - Size: 56B + 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 @@ -9444,7 +9443,7 @@ def idxmax( int int64 8B 1 float (y) int64 24B 0 2 2 >>> ds.idxmax(dim="x") - Size: 52B + 52B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 @@ -9517,7 +9516,7 @@ def argmin(self, dim: Hashable | None = None, **kwargs) -> Self: ... student=argmin_indices["math_scores"] ... ) >>> min_score_in_math - Size: 84B + 84B array(['Bob', 'Bob', 'Alice'], dtype=' Self: ... student=argmin_indices["english_scores"] ... ) >>> min_score_in_english - Size: 84B + 84B array(['Charlie', 'Bob', 'Charlie'], dtype=' Self: >>> argmax_indices = dataset.argmax(dim="test") >>> argmax_indices - Size: 132B + 132B Dimensions: (student: 3) Coordinates: * student (student) >> ds - Size: 80B + 80B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: @@ -9696,12 +9695,12 @@ def eval( b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 >>> ds.eval("a + b") - Size: 40B + 40B array([0. , 1.25, 2.5 , 3.75, 5. ]) Dimensions without coordinates: x >>> ds.eval("c = a + b") - Size: 120B + 120B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: @@ -9783,14 +9782,14 @@ def query( >>> b = np.linspace(0, 1, 5) >>> ds = xr.Dataset({"a": ("x", a), "b": ("x", b)}) >>> ds - Size: 80B + 80B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: a (x) int64 40B 0 1 2 3 4 b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 >>> ds.query(x="a > 2") - Size: 32B + 32B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: diff --git a/xarray/core/datatree.py b/xarray/core/datatree.py index 5737cdcb686..5a0b05e2cf5 100644 --- a/xarray/core/datatree.py +++ b/xarray/core/datatree.py @@ -275,14 +275,14 @@ def map( # type: ignore[override] >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])}) >>> ds - Size: 64B + 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773 bar (x) int64 16B -1 2 >>> ds.map(np.fabs) - Size: 64B + 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py index ad65a44d7d5..fc0c2324792 100644 --- a/xarray/core/formatting.py +++ b/xarray/core/formatting.py @@ -679,7 +679,7 @@ def array_repr(arr): dims = dim_summary_limited(arr, col_width=len(start) + 1, max_rows=max_rows) nbytes_str = render_human_readable_nbytes(arr.nbytes) summary = [ - f"{start}({dims})> Size: {nbytes_str}", + f"{start}({dims})> {nbytes_str}", data_repr, ] if hasattr(arr, "coords"): @@ -715,7 +715,7 @@ def array_repr(arr): @recursive_repr("") def dataset_repr(ds): nbytes_str = render_human_readable_nbytes(ds.nbytes) - summary = [f" Size: {nbytes_str}"] + summary = [f" {nbytes_str}"] col_width = _calculate_col_width(ds.variables) max_rows = OPTIONS["display_max_rows"] diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index 5966c32df92..39549ba5bae 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -1328,7 +1328,7 @@ def quantile( ... ) >>> ds = xr.Dataset({"a": da}) >>> da.groupby("x").quantile(0) - Size: 64B + 64B array([[0.7, 4.2, 0.7, 1.5], [6.5, 7.3, 2.6, 1.9]]) Coordinates: @@ -1336,7 +1336,7 @@ def quantile( quantile float64 8B 0.0 * x (x) int64 16B 0 1 >>> ds.groupby("y").quantile(0, dim=...) - Size: 40B + 40B Dimensions: (y: 2) Coordinates: quantile float64 8B 0.0 @@ -1344,7 +1344,7 @@ def quantile( Data variables: a (y) float64 16B 0.7 0.7 >>> da.groupby("x").quantile([0, 0.5, 1]) - Size: 192B + 192B array([[[0.7 , 1. , 1.3 ], [4.2 , 6.3 , 8.4 ], [0.7 , 5.05, 9.4 ], @@ -1359,7 +1359,7 @@ def quantile( * quantile (quantile) float64 24B 0.0 0.5 1.0 * x (x) int64 16B 0 1 >>> ds.groupby("y").quantile([0, 0.5, 1], dim=...) - Size: 88B + 88B Dimensions: (y: 2, quantile: 3) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 diff --git a/xarray/core/merge.py b/xarray/core/merge.py index a90e59e7c0b..5382596a2ac 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -819,7 +819,7 @@ def merge( ... ) >>> x - Size: 32B + 32B array([[1., 2.], [3., 5.]]) Coordinates: @@ -827,7 +827,7 @@ def merge( * lon (lon) float64 16B 100.0 120.0 >>> y - Size: 32B + 32B array([[5., 6.], [7., 8.]]) Coordinates: @@ -835,7 +835,7 @@ def merge( * lon (lon) float64 16B 100.0 150.0 >>> z - Size: 32B + 32B array([[0., 3.], [4., 9.]]) Coordinates: @@ -843,7 +843,7 @@ def merge( * lon (lon) float64 16B 100.0 150.0 >>> xr.merge([x, y, z]) - Size: 256B + 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 @@ -855,7 +855,7 @@ def merge( var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="identical") - Size: 256B + 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 @@ -867,7 +867,7 @@ def merge( var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="equals") - Size: 256B + 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 @@ -879,7 +879,7 @@ def merge( var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="equals", fill_value=-999.0) - Size: 256B + 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 @@ -891,7 +891,7 @@ def merge( var3 (time, lon) float64 48B 0.0 -999.0 3.0 4.0 -999.0 9.0 >>> xr.merge([x, y, z], join="override") - Size: 144B + 144B Dimensions: (lat: 2, lon: 2, time: 2) Coordinates: * lat (lat) float64 16B 35.0 40.0 @@ -903,7 +903,7 @@ def merge( var3 (time, lon) float64 32B 0.0 3.0 4.0 9.0 >>> xr.merge([x, y, z], join="inner") - Size: 64B + 64B Dimensions: (lat: 1, lon: 1, time: 2) Coordinates: * lat (lat) float64 8B 35.0 @@ -915,7 +915,7 @@ def merge( var3 (time, lon) float64 16B 0.0 4.0 >>> xr.merge([x, y, z], compat="identical", join="inner") - Size: 64B + 64B Dimensions: (lat: 1, lon: 1, time: 2) Coordinates: * lat (lat) float64 8B 35.0 @@ -927,7 +927,7 @@ def merge( var3 (time, lon) float64 16B 0.0 4.0 >>> xr.merge([x, y, z], compat="broadcast_equals", join="outer") - Size: 256B + 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 diff --git a/xarray/core/options.py b/xarray/core/options.py index f5614104357..f3ab5729ea0 100644 --- a/xarray/core/options.py +++ b/xarray/core/options.py @@ -261,7 +261,7 @@ class set_options: >>> with xr.set_options(display_width=40): ... print(ds) ... - Size: 8kB + 8kB Dimensions: (x: 1000) Coordinates: * x (x) int64 8kB 0 1 ... 999 diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py index 41311497f8b..51e772892e5 100644 --- a/xarray/core/parallel.py +++ b/xarray/core/parallel.py @@ -306,7 +306,7 @@ def map_blocks( ... coords={"time": time, "month": month}, ... ).chunk() >>> array.map_blocks(calculate_anomaly, template=array).compute() - Size: 192B + 192B array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862, 0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714, -0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 , @@ -324,7 +324,7 @@ def map_blocks( ... kwargs={"groupby_type": "time.year"}, ... template=array, ... ) # doctest: +ELLIPSIS - Size: 192B + 192B dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 @@ -372,7 +372,8 @@ def _wrapper( # ChainMap wants MutableMapping, but xindexes is Mapping merged_indexes = collections.ChainMap( - expected["indexes"], merged_coordinates.xindexes # type: ignore[arg-type] + expected["indexes"], + merged_coordinates.xindexes, # type: ignore[arg-type] ) expected_index = merged_indexes.get(name, None) if expected_index is not None and not index.equals(expected_index): diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index 6cf49fc995b..b5643cd4072 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -349,7 +349,7 @@ def construct( >>> rolling = da.rolling(b=3) >>> rolling.construct("window_dim") - Size: 192B + 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], @@ -363,7 +363,7 @@ def construct( >>> rolling = da.rolling(b=3, center=True) >>> rolling.construct("window_dim") - Size: 192B + 192B array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], @@ -455,7 +455,7 @@ def reduce( >>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b")) >>> rolling = da.rolling(b=3) >>> rolling.construct("window_dim") - Size: 192B + 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], @@ -468,14 +468,14 @@ def reduce( Dimensions without coordinates: a, b, window_dim >>> rolling.reduce(np.sum) - Size: 64B + 64B array([[nan, nan, 3., 6.], [nan, nan, 15., 18.]]) Dimensions without coordinates: a, b >>> rolling = da.rolling(b=3, min_periods=1) >>> rolling.reduce(np.nansum) - Size: 64B + 64B array([[ 0., 1., 3., 6.], [ 4., 9., 15., 18.]]) Dimensions without coordinates: a, b @@ -1018,7 +1018,7 @@ def construct( -------- >>> da = xr.DataArray(np.arange(24), dims="time") >>> da.coarsen(time=12).construct(time=("year", "month")) - Size: 192B + 192B array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]]) Dimensions without coordinates: year, month @@ -1174,7 +1174,7 @@ def reduce( >>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b")) >>> coarsen = da.coarsen(b=2) >>> coarsen.reduce(np.sum) - Size: 32B + 32B array([[ 1, 5], [ 9, 13]]) Dimensions without coordinates: a, b diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index 4e085a0a7eb..7ffd787967d 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -116,7 +116,7 @@ def mean(self, keep_attrs: bool | None = None) -> T_DataWithCoords: -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").mean() - Size: 40B + 40B array([1. , 1. , 1.69230769, 1.9 , 1.96694215]) Dimensions without coordinates: x """ @@ -154,7 +154,7 @@ def sum(self, keep_attrs: bool | None = None) -> T_DataWithCoords: -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").sum() - Size: 40B + 40B array([1. , 1.33333333, 2.44444444, 2.81481481, 2.9382716 ]) Dimensions without coordinates: x """ @@ -187,7 +187,7 @@ def std(self) -> T_DataWithCoords: -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").std() - Size: 40B + 40B array([ nan, 0. , 0.67936622, 0.42966892, 0.25389527]) Dimensions without coordinates: x """ @@ -221,7 +221,7 @@ def var(self) -> T_DataWithCoords: -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").var() - Size: 40B + 40B array([ nan, 0. , 0.46153846, 0.18461538, 0.06446281]) Dimensions without coordinates: x """ @@ -253,7 +253,7 @@ def cov(self, other: T_DataWithCoords) -> T_DataWithCoords: -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").cov(da**2) - Size: 40B + 40B array([ nan, 0. , 1.38461538, 0.55384615, 0.19338843]) Dimensions without coordinates: x """ @@ -287,7 +287,7 @@ def corr(self, other: T_DataWithCoords) -> T_DataWithCoords: -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").corr(da.shift(x=1)) - Size: 40B + 40B array([ nan, nan, nan, 0.4330127 , 0.48038446]) Dimensions without coordinates: x """ diff --git a/xarray/core/variable.py b/xarray/core/variable.py index f0685882595..451b5b9597c 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -823,7 +823,6 @@ def _getitem_with_mask(self, key, fill_value=dtypes.NA): dims, indexer, new_order = self._broadcast_indexes(key) if self.size: - if is_duck_dask_array(self._data): # dask's indexing is faster this way; also vindex does not # support negative indices yet: @@ -2027,7 +2026,7 @@ def rolling_window( -------- >>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4))) >>> v.rolling_window("b", 3, "window_dim") - Size: 192B + 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], @@ -2039,7 +2038,7 @@ def rolling_window( [ 5., 6., 7.]]]) >>> v.rolling_window("b", 3, "window_dim", center=True) - Size: 192B + 192B array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], @@ -2216,10 +2215,10 @@ def isnull(self, keep_attrs: bool | None = None): -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var - Size: 24B + 24B array([ 1., nan, 3.]) >>> var.isnull() - Size: 3B + 3B array([False, True, False]) """ from xarray.core.computation import apply_ufunc @@ -2250,10 +2249,10 @@ def notnull(self, keep_attrs: bool | None = None): -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var - Size: 24B + 24B array([ 1., nan, 3.]) >>> var.notnull() - Size: 3B + 3B array([ True, False, True]) """ from xarray.core.computation import apply_ufunc diff --git a/xarray/namedarray/_aggregations.py b/xarray/namedarray/_aggregations.py index 9f58aeb791d..43a23606f25 100644 --- a/xarray/namedarray/_aggregations.py +++ b/xarray/namedarray/_aggregations.py @@ -66,11 +66,11 @@ def count( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.count() - Size: 8B + 8B array(5) """ return self.reduce( @@ -120,11 +120,11 @@ def all( ... np.array([True, True, True, True, True, False], dtype=bool), ... ) >>> na - Size: 6B + 6B array([ True, True, True, True, True, False]) >>> na.all() - Size: 1B + 1B array(False) """ return self.reduce( @@ -174,11 +174,11 @@ def any( ... np.array([True, True, True, True, True, False], dtype=bool), ... ) >>> na - Size: 6B + 6B array([ True, True, True, True, True, False]) >>> na.any() - Size: 1B + 1B array(True) """ return self.reduce( @@ -235,17 +235,17 @@ def max( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.max() - Size: 8B + 8B array(3.) Use ``skipna`` to control whether NaNs are ignored. >>> na.max(skipna=False) - Size: 8B + 8B array(nan) """ return self.reduce( @@ -303,17 +303,17 @@ def min( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.min() - Size: 8B + 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> na.min(skipna=False) - Size: 8B + 8B array(nan) """ return self.reduce( @@ -375,17 +375,17 @@ def mean( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.mean() - Size: 8B + 8B array(1.6) Use ``skipna`` to control whether NaNs are ignored. >>> na.mean(skipna=False) - Size: 8B + 8B array(nan) """ return self.reduce( @@ -454,23 +454,23 @@ def prod( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.prod() - Size: 8B + 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> na.prod(skipna=False) - Size: 8B + 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> na.prod(skipna=True, min_count=2) - Size: 8B + 8B array(0.) """ return self.reduce( @@ -540,23 +540,23 @@ def sum( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.sum() - Size: 8B + 8B array(8.) Use ``skipna`` to control whether NaNs are ignored. >>> na.sum(skipna=False) - Size: 8B + 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> na.sum(skipna=True, min_count=2) - Size: 8B + 8B array(8.) """ return self.reduce( @@ -623,23 +623,23 @@ def std( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.std() - Size: 8B + 8B array(1.0198039) Use ``skipna`` to control whether NaNs are ignored. >>> na.std(skipna=False) - Size: 8B + 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> na.std(skipna=True, ddof=1) - Size: 8B + 8B array(1.14017543) """ return self.reduce( @@ -706,23 +706,23 @@ def var( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.var() - Size: 8B + 8B array(1.04) Use ``skipna`` to control whether NaNs are ignored. >>> na.var(skipna=False) - Size: 8B + 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> na.var(skipna=True, ddof=1) - Size: 8B + 8B array(1.3) """ return self.reduce( @@ -785,17 +785,17 @@ def median( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.median() - Size: 8B + 8B array(2.) Use ``skipna`` to control whether NaNs are ignored. >>> na.median(skipna=False) - Size: 8B + 8B array(nan) """ return self.reduce( @@ -857,17 +857,17 @@ def cumsum( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.cumsum() - Size: 48B + 48B array([1., 3., 6., 6., 8., 8.]) Use ``skipna`` to control whether NaNs are ignored. >>> na.cumsum(skipna=False) - Size: 48B + 48B array([ 1., 3., 6., 6., 8., nan]) """ return self.reduce( @@ -929,17 +929,17 @@ def cumprod( ... np.array([1, 2, 3, 0, 2, np.nan]), ... ) >>> na - Size: 48B + 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.cumprod() - Size: 48B + 48B array([1., 2., 6., 0., 0., 0.]) Use ``skipna`` to control whether NaNs are ignored. >>> na.cumprod(skipna=False) - Size: 48B + 48B array([ 1., 2., 6., 0., 0., nan]) """ return self.reduce( diff --git a/xarray/namedarray/_array_api.py b/xarray/namedarray/_array_api.py index acbfc8af4f1..7c2cc36b292 100644 --- a/xarray/namedarray/_array_api.py +++ b/xarray/namedarray/_array_api.py @@ -61,10 +61,10 @@ def astype( -------- >>> narr = NamedArray(("x",), np.asarray([1.5, 2.5])) >>> narr - Size: 16B + 16B array([1.5, 2.5]) >>> astype(narr, np.dtype(np.int32)) - Size: 8B + 8B array([1, 2], dtype=int32) """ if isinstance(x._data, _arrayapi): @@ -79,7 +79,8 @@ def astype( def imag( - x: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], / # type: ignore[type-var] + x: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], + /, # type: ignore[type-var] ) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]: """ Returns the imaginary component of a complex number for each element x_i of the @@ -102,7 +103,7 @@ def imag( -------- >>> narr = NamedArray(("x",), np.asarray([1.0 + 2j, 2 + 4j])) >>> imag(narr) - Size: 16B + 16B array([2., 4.]) """ xp = _get_data_namespace(x) @@ -111,7 +112,8 @@ def imag( def real( - x: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], / # type: ignore[type-var] + x: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], + /, # type: ignore[type-var] ) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]: """ Returns the real component of a complex number for each element x_i of the @@ -134,7 +136,7 @@ def real( -------- >>> narr = NamedArray(("x",), np.asarray([1.0 + 2j, 2 + 4j])) >>> real(narr) - Size: 16B + 16B array([1., 2.]) """ xp = _get_data_namespace(x) @@ -172,11 +174,11 @@ def expand_dims( -------- >>> x = NamedArray(("x", "y"), np.asarray([[1.0, 2.0], [3.0, 4.0]])) >>> expand_dims(x) - Size: 32B + 32B array([[[1., 2.], [3., 4.]]]) >>> expand_dims(x, dim="z") - Size: 32B + 32B array([[[1., 2.], [3., 4.]]]) """ diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py index 8789bc2f9c2..06d929d60a7 100644 --- a/xarray/plot/utils.py +++ b/xarray/plot/utils.py @@ -1489,28 +1489,28 @@ def values(self) -> DataArray | None: -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).values - Size: 40B + 40B array([3, 1, 1, 3, 5]) Dimensions without coordinates: dim_0 >>> _Normalize(a, width=(18, 36, 72)).values - Size: 40B + 40B array([45., 18., 18., 45., 72.]) Dimensions without coordinates: dim_0 >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> _Normalize(a).values - Size: 48B + 48B array([0.5, 0. , 0. , 0.5, 2. , 3. ]) Dimensions without coordinates: dim_0 >>> _Normalize(a, width=(18, 36, 72)).values - Size: 48B + 48B array([27., 18., 18., 27., 54., 72.]) Dimensions without coordinates: dim_0 >>> _Normalize(a * 0, width=(18, 36, 72)).values - Size: 48B + 48B array([36., 36., 36., 36., 36., 36.]) Dimensions without coordinates: dim_0 diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index 517fc0c2d62..95e613a09f8 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -194,7 +194,7 @@ def test_binary_op_bitshift(self) -> None: def test_repr(self): expected = dedent( f"""\ - Size: 192B + 192B {self.lazy_var.data!r}""" ) assert expected == repr(self.lazy_var) @@ -655,7 +655,7 @@ def test_dataarray_repr(self): a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) expected = dedent( f"""\ - Size: 8B + 8B {data!r} Coordinates: y (x) int64 8B dask.array @@ -670,7 +670,7 @@ def test_dataset_repr(self): ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) expected = dedent( """\ - Size: 16B + 16B Dimensions: (x: 1) Coordinates: y (x) int64 8B dask.array diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index 4e916d62155..12cb4e4d86c 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -97,7 +97,7 @@ def test_repr(self) -> None: data_array = DataArray(v, coords, name="my_variable") expected = dedent( """\ - Size: 48B + 48B array([[1, 2, 3], [4, 5, 6]], dtype=uint64) Coordinates: @@ -112,7 +112,7 @@ def test_repr(self) -> None: def test_repr_multiindex(self) -> None: expected = dedent( """\ - Size: 32B + 32B array([0, 1, 2, 3], dtype=uint64) Coordinates: * x (x) object 32B MultiIndex @@ -131,7 +131,7 @@ def test_repr_multiindex_long(self) -> None: ).astype(np.uint64) expected = dedent( """\ - Size: 256B + 256B array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], dtype=uint64) diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index 584776197e3..e8af9373981 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -280,7 +280,7 @@ def test_repr(self) -> None: # need to insert str dtype at runtime to handle different endianness expected = dedent( """\ - Size: 2kB + 2kB Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8) Coordinates: * dim2 (dim2) float64 72B 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 @@ -307,7 +307,7 @@ def test_repr(self) -> None: expected = dedent( """\ - Size: 0B + 0B Dimensions: () Data variables: *empty*""" @@ -320,7 +320,7 @@ def test_repr(self) -> None: data = Dataset({"foo": ("x", np.ones(10))}).mean() expected = dedent( """\ - Size: 8B + 8B Dimensions: () Data variables: foo float64 8B 1.0""" @@ -337,7 +337,7 @@ def test_repr_multiindex(self) -> None: data = create_test_multiindex() expected = dedent( """\ - Size: 96B + 96B Dimensions: (x: 4) Coordinates: * x (x) object 32B MultiIndex @@ -358,7 +358,7 @@ def test_repr_multiindex(self) -> None: data = Dataset({}, midx_coords) expected = dedent( """\ - Size: 96B + 96B Dimensions: (x: 4) Coordinates: * x (x) object 32B MultiIndex @@ -386,7 +386,7 @@ def test_unicode_data(self) -> None: byteorder = "<" if sys.byteorder == "little" else ">" expected = dedent( """\ - Size: 12B + 12B Dimensions: (foø: 1) Coordinates: * foø (foø) %cU3 12B %r @@ -418,7 +418,7 @@ def __repr__(self): dataset = Dataset({"foo": ("x", Array())}) expected = dedent( """\ - Size: 16B + 16B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index 2c40ac88f98..08551d9823a 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -480,7 +480,7 @@ def test_array_repr(self) -> None: expected = dedent( """\ - Size: 8B + 8B array([0], dtype=uint64) Dimensions without coordinates: test""" ) @@ -499,7 +499,7 @@ def test_array_repr(self) -> None: actual = formatting.array_repr(ds[(1, 2)]) expected = dedent( """\ - Size: 8B + 8B 0 Dimensions without coordinates: test""" ) @@ -744,7 +744,7 @@ def test_repr_file_collapsed(tmp_path) -> None: actual = repr(arr) expected = dedent( """\ - Size: 2kB + 2kB [300 values with dtype=int64] Dimensions without coordinates: test""" ) @@ -755,7 +755,7 @@ def test_repr_file_collapsed(tmp_path) -> None: actual = arr_loaded.__repr__() expected = dedent( """\ - Size: 2kB + 2kB 0 1 2 3 4 5 6 7 8 9 10 11 12 ... 288 289 290 291 292 293 294 295 296 297 298 299 Dimensions without coordinates: test""" ) @@ -821,7 +821,7 @@ def test__mapping_repr(display_max_rows, n_vars, n_attr) -> None: ) expected_size = "1kB" expected = f"""\ - Size: {expected_size} + {expected_size} {dims_start}({dims_values}) Coordinates: ({n_vars}) Data variables: ({n_vars}) @@ -946,7 +946,7 @@ def test_display_nbytes() -> None: actual = repr(xds) expected = """ - Size: 3kB + 3kB Dimensions: (foo: 1200, bar: 111) Coordinates: * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 @@ -958,7 +958,7 @@ def test_display_nbytes() -> None: actual = repr(xds["foo"]) expected = """ - Size: 2kB + 2kB array([ 0, 1, 2, ..., 1197, 1198, 1199], dtype=int16) Coordinates: * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 @@ -967,7 +967,6 @@ def test_display_nbytes() -> None: def test_array_repr_dtypes(): - # These dtypes are expected to be represented similarly # on Ubuntu, macOS and Windows environments of the CI. # Unsigned integer could be used as easy replacements @@ -980,7 +979,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="int8"), dims="x") actual = repr(ds) expected = """ - Size: 1B + 1B array([0], dtype=int8) Dimensions without coordinates: x """.strip() @@ -989,7 +988,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="int16"), dims="x") actual = repr(ds) expected = """ - Size: 2B + 2B array([0], dtype=int16) Dimensions without coordinates: x """.strip() @@ -1000,7 +999,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="uint8"), dims="x") actual = repr(ds) expected = """ - Size: 1B + 1B array([0], dtype=uint8) Dimensions without coordinates: x """.strip() @@ -1009,7 +1008,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="uint16"), dims="x") actual = repr(ds) expected = """ - Size: 2B + 2B array([0], dtype=uint16) Dimensions without coordinates: x """.strip() @@ -1018,7 +1017,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="uint32"), dims="x") actual = repr(ds) expected = """ - Size: 4B + 4B array([0], dtype=uint32) Dimensions without coordinates: x """.strip() @@ -1027,7 +1026,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="uint64"), dims="x") actual = repr(ds) expected = """ - Size: 8B + 8B array([0], dtype=uint64) Dimensions without coordinates: x """.strip() @@ -1038,7 +1037,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0.0]), dims="x") actual = repr(ds) expected = """ - Size: 8B + 8B array([0.]) Dimensions without coordinates: x """.strip() @@ -1047,7 +1046,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="float16"), dims="x") actual = repr(ds) expected = """ - Size: 2B + 2B array([0.], dtype=float16) Dimensions without coordinates: x """.strip() @@ -1056,7 +1055,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="float32"), dims="x") actual = repr(ds) expected = """ - Size: 4B + 4B array([0.], dtype=float32) Dimensions without coordinates: x """.strip() @@ -1065,7 +1064,7 @@ def test_array_repr_dtypes(): ds = xr.DataArray(np.array([0], dtype="float64"), dims="x") actual = repr(ds) expected = """ - Size: 8B + 8B array([0.]) Dimensions without coordinates: x """.strip() @@ -1077,13 +1076,12 @@ def test_array_repr_dtypes(): reason="Default numpy's dtypes vary according to OS", ) def test_array_repr_dtypes_unix() -> None: - # Signed integer dtypes ds = xr.DataArray(np.array([0]), dims="x") actual = repr(ds) expected = """ - Size: 8B + 8B array([0]) Dimensions without coordinates: x """.strip() @@ -1092,7 +1090,7 @@ def test_array_repr_dtypes_unix() -> None: ds = xr.DataArray(np.array([0], dtype="int32"), dims="x") actual = repr(ds) expected = """ - Size: 4B + 4B array([0], dtype=int32) Dimensions without coordinates: x """.strip() @@ -1101,7 +1099,7 @@ def test_array_repr_dtypes_unix() -> None: ds = xr.DataArray(np.array([0], dtype="int64"), dims="x") actual = repr(ds) expected = """ - Size: 8B + 8B array([0]) Dimensions without coordinates: x """.strip() @@ -1113,13 +1111,12 @@ def test_array_repr_dtypes_unix() -> None: reason="Default numpy's dtypes vary according to OS", ) def test_array_repr_dtypes_on_windows() -> None: - # Integer dtypes ds = xr.DataArray(np.array([0]), dims="x") actual = repr(ds) expected = """ - Size: 4B + 4B array([0]) Dimensions without coordinates: x """.strip() @@ -1128,7 +1125,7 @@ def test_array_repr_dtypes_on_windows() -> None: ds = xr.DataArray(np.array([0], dtype="int32"), dims="x") actual = repr(ds) expected = """ - Size: 4B + 4B array([0]) Dimensions without coordinates: x """.strip() @@ -1137,7 +1134,7 @@ def test_array_repr_dtypes_on_windows() -> None: ds = xr.DataArray(np.array([0], dtype="int64"), dims="x") actual = repr(ds) expected = """ - Size: 8B + 8B array([0], dtype=int64) Dimensions without coordinates: x """.strip() diff --git a/xarray/tests/test_sparse.py b/xarray/tests/test_sparse.py index f0a97fc7e69..77b8e4ab0a0 100644 --- a/xarray/tests/test_sparse.py +++ b/xarray/tests/test_sparse.py @@ -297,7 +297,7 @@ def test_bivariate_ufunc(self): def test_repr(self): expected = dedent( """\ - Size: 288B + 288B """ ) assert expected == repr(self.var) @@ -681,7 +681,7 @@ def test_dataarray_repr(self): ) expected = dedent( """\ - Size: 64B + 64B Coordinates: y (x) int64 48B @@ -696,7 +696,7 @@ def test_dataset_repr(self): ) expected = dedent( """\ - Size: 112B + 112B Dimensions: (x: 4) Coordinates: y (x) int64 48B @@ -713,7 +713,7 @@ def test_sparse_dask_dataset_repr(self): ).chunk() expected = dedent( """\ - Size: 32B + 32B Dimensions: (x: 4) Dimensions without coordinates: x Data variables: diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py index 3167de2e2f0..fbe9d188ef8 100644 --- a/xarray/tests/test_variable.py +++ b/xarray/tests/test_variable.py @@ -1247,7 +1247,7 @@ def test_repr(self): v = v.astype(np.uint64) expected = dedent( """ - Size: 48B + 48B array([[1, 2, 3], [4, 5, 6]], dtype=uint64) Attributes: From 64e857bbb6fbcd72795fed4486b6ddda97568d80 Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 13:37:55 +0200 Subject: [PATCH 02/10] display_variables_nbytes --- doc/user-guide/options.rst | 1 + xarray/core/formatting.py | 12 ++++- xarray/core/options.py | 12 ++++- xarray/tests/test_formatting.py | 89 +++++++++++++++++++++++++++++++++ 4 files changed, 112 insertions(+), 2 deletions(-) diff --git a/doc/user-guide/options.rst b/doc/user-guide/options.rst index 12844eccbe4..849d76d7b44 100644 --- a/doc/user-guide/options.rst +++ b/doc/user-guide/options.rst @@ -15,6 +15,7 @@ Xarray offers a small number of configuration options through :py:func:`set_opti - ``display_expand_data_vars`` - ``display_max_rows`` - ``display_style`` + - ``display_variables_nbytes`` 2. Control behaviour during operations: ``arithmetic_join``, ``keep_attrs``, ``use_bottleneck``. 3. Control colormaps for plots:``cmap_divergent``, ``cmap_sequential``. diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py index fc0c2324792..6bd8132c017 100644 --- a/xarray/core/formatting.py +++ b/xarray/core/formatting.py @@ -341,7 +341,17 @@ def summarize_variable( else: dims_str = "" - nbytes_str = f" {render_human_readable_nbytes(variable.nbytes)}" + display_variables_nbytes = OPTIONS["display_variables_nbytes"] + if display_variables_nbytes == "default": + show_nbytes = variable.chunks is not None + else: + show_nbytes = display_variables_nbytes + + if show_nbytes: + nbytes_str = f" {render_human_readable_nbytes(variable.nbytes)}" + else: + nbytes_str = "" + front_str = f"{first_col}{dims_str}{variable.dtype}{nbytes_str} " values_width = max_width - len(front_str) diff --git a/xarray/core/options.py b/xarray/core/options.py index f3ab5729ea0..a5ad856e064 100644 --- a/xarray/core/options.py +++ b/xarray/core/options.py @@ -23,6 +23,7 @@ "display_expand_groups", "display_expand_indexes", "display_default_indexes", + "display_variables_nbytes", "enable_cftimeindex", "file_cache_maxsize", "keep_attrs", @@ -49,6 +50,7 @@ class T_Options(TypedDict): display_expand_groups: Literal["default", True, False] display_expand_indexes: Literal["default", True, False] display_default_indexes: Literal["default", True, False] + display_variables_nbytes: Literal["default", True, False] enable_cftimeindex: bool file_cache_maxsize: int keep_attrs: Literal["default", True, False] @@ -75,6 +77,7 @@ class T_Options(TypedDict): "display_expand_groups": "default", "display_expand_indexes": "default", "display_default_indexes": False, + "display_variables_nbytes": "default", "enable_cftimeindex": True, "file_cache_maxsize": 128, "keep_attrs": "default", @@ -106,6 +109,7 @@ def _positive_integer(value: int) -> bool: "display_expand_data": lambda choice: choice in [True, False, "default"], "display_expand_indexes": lambda choice: choice in [True, False, "default"], "display_default_indexes": lambda choice: choice in [True, False, "default"], + "display_variables_nbytes": lambda choice: choice in [True, False, "default"], "enable_cftimeindex": lambda value: isinstance(value, bool), "file_cache_maxsize": _positive_integer, "keep_attrs": lambda choice: choice in [True, False, "default"], @@ -216,7 +220,13 @@ class set_options: * ``True`` : to always expand indexes * ``False`` : to always collapse indexes * ``default`` : to expand unless over a pre-defined limit (always collapse for html style) - display_max_rows : int, default: 12 + display_variables_nbytes : {"default", True, False} + Whether to show the nbytes of individual variables for the representation of + ``DataArray`` or ``Dataset``. Can be + + * ``True`` : to always show the nbytes for variables + * ``False`` : to always hide the nbytes for variables + * ``default`` : to only show the nbytes for lazy variables (e.g. dask arrays) Maximum display rows. display_values_threshold : int, default: 200 Total number of array elements which trigger summarization rather diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index 08551d9823a..e1b97cc8adc 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -532,6 +532,95 @@ def test_array_repr_recursive(self) -> None: formatting.array_repr(var) formatting.array_repr(da) + def test_display_variables_nbytes(self) -> None: + data = np.array([11, 22, 33], dtype=np.uint8) + xda = xr.DataArray( + data=data, coords=dict(x=np.array([10, 20, 30], dtype=np.float64)) + ) + xds = xr.Dataset({"myvar": xda}) + + with xr.set_options(display_variables_nbytes=True): + printout = xds.__str__() + assert printout == dedent( + """\ + 27B + Dimensions: (x: 3) + Coordinates: + * x (x) float64 24B 10.0 20.0 30.0 + Data variables: + myvar (x) uint8 3B 11 22 33""" + ) + + with xr.set_options(display_variables_nbytes=False): + printout = xds.__str__() + assert printout == dedent( + """\ + 27B + Dimensions: (x: 3) + Coordinates: + * x (x) float64 10.0 20.0 30.0 + Data variables: + myvar (x) uint8 11 22 33""" + ) + + with xr.set_options(display_variables_nbytes="default"): + printout = xds.__str__() + assert printout == dedent( + """\ + 27B + Dimensions: (x: 3) + Coordinates: + * x (x) float64 10.0 20.0 30.0 + Data variables: + myvar (x) uint8 11 22 33""" + ) + + @requires_dask + def test_display_variables_nbytes_lazy(self) -> None: + import dask.array as da + + data = da.array([11, 22, 33], dtype=np.uint8) + xda = xr.DataArray( + data=data, coords=dict(x=np.array([10, 20, 30], dtype=np.float64)) + ) + xds = xr.Dataset({"myvar": xda}) + + with xr.set_options(display_variables_nbytes=True): + printout = xds.__str__() + assert printout == dedent( + """\ + 27B + Dimensions: (x: 3) + Coordinates: + * x (x) float64 24B 10.0 20.0 30.0 + Data variables: + myvar (x) uint8 3B dask.array""" + ) + + with xr.set_options(display_variables_nbytes=False): + printout = xds.__str__() + assert printout == dedent( + """\ + 27B + Dimensions: (x: 3) + Coordinates: + * x (x) float64 10.0 20.0 30.0 + Data variables: + myvar (x) uint8 dask.array""" + ) + + with xr.set_options(display_variables_nbytes="default"): + printout = xds.__str__() + assert printout == dedent( + """\ + 27B + Dimensions: (x: 3) + Coordinates: + * x (x) float64 10.0 20.0 30.0 + Data variables: + myvar (x) uint8 3B dask.array""" + ) + @requires_dask def test_array_scalar_format(self) -> None: # Test numpy scalars: From 27756dac2fcb3fee38dcb40cbf08eb75150ab33a Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 13:42:39 +0200 Subject: [PATCH 03/10] Initial whats-new without PR --- doc/whats-new.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 6a97ceaff00..43e16f25bb6 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -23,6 +23,15 @@ v2024.05.1 (unreleased) New Features ~~~~~~~~~~~~ +- Removed the "Size: " prefix before the ``nbytes`` in the ``DataArray`` and ``Dataset`` representations, + and added a ``display_variables_nbytes`` option to show or hide the ``nbytes`` of individual variables + in the ``DataArray`` and ``Dataset`` representations. The option can take one of those values: + * ``True`` : to always show the nbytes for variables + * ``False`` : to always hide the nbytes for variables + * ``default`` : to only show the nbytes for lazy variables (e.g. dask arrays) + (:issue:`8690`, :pull:`TODO`). + By `Etienne Schalk `_. + Performance ~~~~~~~~~~~ From c02db4065e6860f9ba58817725b783512d217d63 Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 13:53:36 +0200 Subject: [PATCH 04/10] Added PR ref --- doc/whats-new.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 43e16f25bb6..ef78286abcb 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -29,7 +29,7 @@ New Features * ``True`` : to always show the nbytes for variables * ``False`` : to always hide the nbytes for variables * ``default`` : to only show the nbytes for lazy variables (e.g. dask arrays) - (:issue:`8690`, :pull:`TODO`). + (:issue:`8690`, :pull:`9078`). By `Etienne Schalk `_. Performance From 447211d5e3f80d4adbde63d00d9b63ebd55d46a0 Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 19:23:08 +0200 Subject: [PATCH 05/10] Regexp to remove nbytes at variable level --- xarray/backends/api.py | 4 +- xarray/coding/cftimeindex.py | 6 +- xarray/core/_aggregations.py | 776 +++++++++++++++---------------- xarray/core/accessor_dt.py | 16 +- xarray/core/accessor_str.py | 2 +- xarray/core/alignment.py | 60 +-- xarray/core/combine.py | 82 ++-- xarray/core/common.py | 132 +++--- xarray/core/computation.py | 58 +-- xarray/core/concat.py | 24 +- xarray/core/coordinates.py | 26 +- xarray/core/dataarray.py | 384 +++++++-------- xarray/core/dataset.py | 796 ++++++++++++++++---------------- xarray/core/datatree.py | 8 +- xarray/core/groupby.py | 24 +- xarray/core/merge.py | 108 ++--- xarray/core/parallel.py | 8 +- xarray/tests/test_dask.py | 6 +- xarray/tests/test_dataarray.py | 20 +- xarray/tests/test_dataset.py | 44 +- xarray/tests/test_formatting.py | 40 +- xarray/tests/test_sparse.py | 8 +- 22 files changed, 1316 insertions(+), 1316 deletions(-) diff --git a/xarray/backends/api.py b/xarray/backends/api.py index e8d022d5e86..bdb883e8e27 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -1460,9 +1460,9 @@ def save_mfdataset( 768B Dimensions: (time: 48) Coordinates: - * time (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31 + * time (time) datetime64[ns] 2010-01-31 2010-02-28 ... 2013-12-31 Data variables: - a (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0 + a (time) float64 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0 >>> years, datasets = zip(*ds.groupby("time.year")) >>> paths = [f"{y}.nc" for y in years] >>> xr.save_mfdataset(datasets, paths) diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py index a761c58bb8c..f386c7a941e 100644 --- a/xarray/coding/cftimeindex.py +++ b/xarray/coding/cftimeindex.py @@ -386,7 +386,7 @@ def _partial_date_slice(self, resolution, parsed): 8B array([1]) Coordinates: - * time (time) object 8B 2001-01-01 00:00:00 + * time (time) object 2001-01-01 00:00:00 >>> da = xr.DataArray( ... [1, 2], ... coords=[[pd.Timestamp(2001, 1, 1), pd.Timestamp(2001, 2, 1)]], @@ -396,7 +396,7 @@ def _partial_date_slice(self, resolution, parsed): 8B array(1) Coordinates: - time datetime64[ns] 8B 2001-01-01 + time datetime64[ns] 2001-01-01 >>> da = xr.DataArray( ... [1, 2], ... coords=[[pd.Timestamp(2001, 1, 1, 1), pd.Timestamp(2001, 2, 1)]], @@ -406,7 +406,7 @@ def _partial_date_slice(self, resolution, parsed): 8B array([1]) Coordinates: - * time (time) datetime64[ns] 8B 2001-01-01T01:00:00 + * time (time) datetime64[ns] 2001-01-01T01:00:00 """ start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed) diff --git a/xarray/core/_aggregations.py b/xarray/core/_aggregations.py index fd8bf9baade..10eb9fc7d59 100644 --- a/xarray/core/_aggregations.py +++ b/xarray/core/_aggregations.py @@ -87,16 +87,16 @@ def count( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.count() 8B Dimensions: () Data variables: - da int64 8B 5 + da int64 5 """ return self.reduce( duck_array_ops.count, @@ -159,16 +159,16 @@ def all( 78B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.all() 1B Dimensions: () Data variables: - da bool 1B False + da bool False """ return self.reduce( duck_array_ops.array_all, @@ -231,16 +231,16 @@ def any( 78B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.any() 1B Dimensions: () Data variables: - da bool 1B True + da bool True """ return self.reduce( duck_array_ops.array_any, @@ -309,16 +309,16 @@ def max( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.max() 8B Dimensions: () Data variables: - da float64 8B 3.0 + da float64 3.0 Use ``skipna`` to control whether NaNs are ignored. @@ -326,7 +326,7 @@ def max( 8B Dimensions: () Data variables: - da float64 8B nan + da float64 nan """ return self.reduce( duck_array_ops.max, @@ -396,16 +396,16 @@ def min( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.min() 8B Dimensions: () Data variables: - da float64 8B 0.0 + da float64 0.0 Use ``skipna`` to control whether NaNs are ignored. @@ -413,7 +413,7 @@ def min( 8B Dimensions: () Data variables: - da float64 8B nan + da float64 nan """ return self.reduce( duck_array_ops.min, @@ -487,16 +487,16 @@ def mean( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.mean() 8B Dimensions: () Data variables: - da float64 8B 1.6 + da float64 1.6 Use ``skipna`` to control whether NaNs are ignored. @@ -504,7 +504,7 @@ def mean( 8B Dimensions: () Data variables: - da float64 8B nan + da float64 nan """ return self.reduce( duck_array_ops.mean, @@ -585,16 +585,16 @@ def prod( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.prod() 8B Dimensions: () Data variables: - da float64 8B 0.0 + da float64 0.0 Use ``skipna`` to control whether NaNs are ignored. @@ -602,7 +602,7 @@ def prod( 8B Dimensions: () Data variables: - da float64 8B nan + da float64 nan Specify ``min_count`` for finer control over when NaNs are ignored. @@ -610,7 +610,7 @@ def prod( 8B Dimensions: () Data variables: - da float64 8B 0.0 + da float64 0.0 """ return self.reduce( duck_array_ops.prod, @@ -692,16 +692,16 @@ def sum( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.sum() 8B Dimensions: () Data variables: - da float64 8B 8.0 + da float64 8.0 Use ``skipna`` to control whether NaNs are ignored. @@ -709,7 +709,7 @@ def sum( 8B Dimensions: () Data variables: - da float64 8B nan + da float64 nan Specify ``min_count`` for finer control over when NaNs are ignored. @@ -717,7 +717,7 @@ def sum( 8B Dimensions: () Data variables: - da float64 8B 8.0 + da float64 8.0 """ return self.reduce( duck_array_ops.sum, @@ -796,16 +796,16 @@ def std( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.std() 8B Dimensions: () Data variables: - da float64 8B 1.02 + da float64 1.02 Use ``skipna`` to control whether NaNs are ignored. @@ -813,7 +813,7 @@ def std( 8B Dimensions: () Data variables: - da float64 8B nan + da float64 nan Specify ``ddof=1`` for an unbiased estimate. @@ -821,7 +821,7 @@ def std( 8B Dimensions: () Data variables: - da float64 8B 1.14 + da float64 1.14 """ return self.reduce( duck_array_ops.std, @@ -900,16 +900,16 @@ def var( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.var() 8B Dimensions: () Data variables: - da float64 8B 1.04 + da float64 1.04 Use ``skipna`` to control whether NaNs are ignored. @@ -917,7 +917,7 @@ def var( 8B Dimensions: () Data variables: - da float64 8B nan + da float64 nan Specify ``ddof=1`` for an unbiased estimate. @@ -925,7 +925,7 @@ def var( 8B Dimensions: () Data variables: - da float64 8B 1.3 + da float64 1.3 """ return self.reduce( duck_array_ops.var, @@ -1000,16 +1000,16 @@ def median( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.median() 8B Dimensions: () Data variables: - da float64 8B 2.0 + da float64 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -1017,7 +1017,7 @@ def median( 8B Dimensions: () Data variables: - da float64 8B nan + da float64 nan """ return self.reduce( duck_array_ops.median, @@ -1091,17 +1091,17 @@ def cumsum( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.cumsum() 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 3.0 6.0 6.0 8.0 8.0 + da (time) float64 1.0 3.0 6.0 6.0 8.0 8.0 Use ``skipna`` to control whether NaNs are ignored. @@ -1110,7 +1110,7 @@ def cumsum( Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 3.0 6.0 6.0 8.0 nan + da (time) float64 1.0 3.0 6.0 6.0 8.0 nan """ return self.reduce( duck_array_ops.cumsum, @@ -1184,17 +1184,17 @@ def cumprod( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.cumprod() 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 6.0 0.0 0.0 0.0 + da (time) float64 1.0 2.0 6.0 0.0 0.0 0.0 Use ``skipna`` to control whether NaNs are ignored. @@ -1203,7 +1203,7 @@ def cumprod( Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 6.0 0.0 0.0 nan + da (time) float64 1.0 2.0 6.0 0.0 0.0 nan """ return self.reduce( duck_array_ops.cumprod, @@ -1282,8 +1282,8 @@ def count( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.count() 8B @@ -1348,8 +1348,8 @@ def all( 6B array([ True, True, True, True, True, False]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.all() 1B @@ -1414,8 +1414,8 @@ def any( 6B array([ True, True, True, True, True, False]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.any() 1B @@ -1486,8 +1486,8 @@ def max( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.max() 8B @@ -1565,8 +1565,8 @@ def min( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.min() 8B @@ -1648,8 +1648,8 @@ def mean( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.mean() 8B @@ -1738,8 +1738,8 @@ def prod( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.prod() 8B @@ -1835,8 +1835,8 @@ def sum( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.sum() 8B @@ -1929,8 +1929,8 @@ def std( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.std() 8B @@ -2023,8 +2023,8 @@ def var( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.var() 8B @@ -2113,8 +2113,8 @@ def median( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.median() 8B @@ -2196,15 +2196,15 @@ def cumsum( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.cumsum() 48B array([1., 3., 6., 6., 8., 8.]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) 48B array([ 1., 3., 6., 6., 8., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.cumprod() 48B array([1., 2., 6., 0., 0., 0.]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) 48B array([ 1., 2., 6., 0., 0., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").count() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) int64 24B 1 2 2 + da (labels) int64 1 2 2 """ if ( flox_available @@ -2505,18 +2505,18 @@ def all( 78B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").all() 27B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) bool 3B False True True + da (labels) bool False True True """ if ( flox_available @@ -2601,18 +2601,18 @@ def any( 78B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").any() 27B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) bool 3B True True True + da (labels) bool True True True """ if ( flox_available @@ -2703,18 +2703,18 @@ def max( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").max() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B 1.0 2.0 3.0 + da (labels) float64 1.0 2.0 3.0 Use ``skipna`` to control whether NaNs are ignored. @@ -2722,9 +2722,9 @@ def max( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 2.0 3.0 + da (labels) float64 nan 2.0 3.0 """ if ( flox_available @@ -2817,18 +2817,18 @@ def min( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").min() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B 1.0 2.0 0.0 + da (labels) float64 1.0 2.0 0.0 Use ``skipna`` to control whether NaNs are ignored. @@ -2836,9 +2836,9 @@ def min( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 2.0 0.0 + da (labels) float64 nan 2.0 0.0 """ if ( flox_available @@ -2933,18 +2933,18 @@ def mean( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").mean() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B 1.0 2.0 1.5 + da (labels) float64 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. @@ -2952,9 +2952,9 @@ def mean( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 2.0 1.5 + da (labels) float64 nan 2.0 1.5 """ if ( flox_available @@ -3056,18 +3056,18 @@ def prod( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").prod() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B 1.0 4.0 0.0 + da (labels) float64 1.0 4.0 0.0 Use ``skipna`` to control whether NaNs are ignored. @@ -3075,9 +3075,9 @@ def prod( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 4.0 0.0 + da (labels) float64 nan 4.0 0.0 Specify ``min_count`` for finer control over when NaNs are ignored. @@ -3085,9 +3085,9 @@ def prod( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 4.0 0.0 + da (labels) float64 nan 4.0 0.0 """ if ( flox_available @@ -3191,18 +3191,18 @@ def sum( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").sum() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B 1.0 4.0 3.0 + da (labels) float64 1.0 4.0 3.0 Use ``skipna`` to control whether NaNs are ignored. @@ -3210,9 +3210,9 @@ def sum( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 4.0 3.0 + da (labels) float64 nan 4.0 3.0 Specify ``min_count`` for finer control over when NaNs are ignored. @@ -3220,9 +3220,9 @@ def sum( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 4.0 3.0 + da (labels) float64 nan 4.0 3.0 """ if ( flox_available @@ -3323,18 +3323,18 @@ def std( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").std() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B 0.0 0.0 1.5 + da (labels) float64 0.0 0.0 1.5 Use ``skipna`` to control whether NaNs are ignored. @@ -3342,9 +3342,9 @@ def std( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 0.0 1.5 + da (labels) float64 nan 0.0 1.5 Specify ``ddof=1`` for an unbiased estimate. @@ -3352,9 +3352,9 @@ def std( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 0.0 2.121 + da (labels) float64 nan 0.0 2.121 """ if ( flox_available @@ -3455,18 +3455,18 @@ def var( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").var() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B 0.0 0.0 2.25 + da (labels) float64 0.0 0.0 2.25 Use ``skipna`` to control whether NaNs are ignored. @@ -3474,9 +3474,9 @@ def var( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 0.0 2.25 + da (labels) float64 nan 0.0 2.25 Specify ``ddof=1`` for an unbiased estimate. @@ -3484,9 +3484,9 @@ def var( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 0.0 4.5 + da (labels) float64 nan 0.0 4.5 """ if ( flox_available @@ -3583,18 +3583,18 @@ def median( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").median() 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B 1.0 2.0 1.5 + da (labels) float64 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. @@ -3602,9 +3602,9 @@ def median( 48B Dimensions: (labels: 3) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Data variables: - da (labels) float64 24B nan 2.0 1.5 + da (labels) float64 nan 2.0 1.5 """ return self._reduce_without_squeeze_warn( duck_array_ops.median, @@ -3684,17 +3684,17 @@ def cumsum( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").cumsum() 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 3.0 3.0 4.0 1.0 + da (time) float64 1.0 2.0 3.0 3.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. @@ -3703,7 +3703,7 @@ def cumsum( Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 3.0 3.0 4.0 nan + da (time) float64 1.0 2.0 3.0 3.0 4.0 nan """ return self._reduce_without_squeeze_warn( duck_array_ops.cumsum, @@ -3783,17 +3783,17 @@ def cumprod( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.groupby("labels").cumprod() 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 3.0 0.0 4.0 1.0 + da (time) float64 1.0 2.0 3.0 0.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. @@ -3802,7 +3802,7 @@ def cumprod( Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 3.0 0.0 4.0 nan + da (time) float64 1.0 2.0 3.0 0.0 4.0 nan """ return self._reduce_without_squeeze_warn( duck_array_ops.cumprod, @@ -3910,18 +3910,18 @@ def count( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").count() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) int64 24B 1 3 1 + da (time) int64 1 3 1 """ if ( flox_available @@ -4006,18 +4006,18 @@ def all( 78B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").all() 27B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) bool 3B True True False + da (time) bool True True False """ if ( flox_available @@ -4102,18 +4102,18 @@ def any( 78B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").any() 27B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) bool 3B True True True + da (time) bool True True True """ if ( flox_available @@ -4204,18 +4204,18 @@ def max( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").max() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 3.0 2.0 + da (time) float64 1.0 3.0 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -4223,9 +4223,9 @@ def max( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 3.0 nan + da (time) float64 1.0 3.0 nan """ if ( flox_available @@ -4318,18 +4318,18 @@ def min( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").min() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 0.0 2.0 + da (time) float64 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -4337,9 +4337,9 @@ def min( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 0.0 nan + da (time) float64 1.0 0.0 nan """ if ( flox_available @@ -4434,18 +4434,18 @@ def mean( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").mean() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 1.667 2.0 + da (time) float64 1.0 1.667 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -4453,9 +4453,9 @@ def mean( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 1.667 nan + da (time) float64 1.0 1.667 nan """ if ( flox_available @@ -4557,18 +4557,18 @@ def prod( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").prod() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 0.0 2.0 + da (time) float64 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -4576,9 +4576,9 @@ def prod( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 0.0 nan + da (time) float64 1.0 0.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. @@ -4586,9 +4586,9 @@ def prod( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B nan 0.0 nan + da (time) float64 nan 0.0 nan """ if ( flox_available @@ -4692,18 +4692,18 @@ def sum( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").sum() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 5.0 2.0 + da (time) float64 1.0 5.0 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -4711,9 +4711,9 @@ def sum( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 5.0 nan + da (time) float64 1.0 5.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. @@ -4721,9 +4721,9 @@ def sum( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B nan 5.0 nan + da (time) float64 nan 5.0 nan """ if ( flox_available @@ -4824,18 +4824,18 @@ def std( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").std() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 0.0 1.247 0.0 + da (time) float64 0.0 1.247 0.0 Use ``skipna`` to control whether NaNs are ignored. @@ -4843,9 +4843,9 @@ def std( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 0.0 1.247 nan + da (time) float64 0.0 1.247 nan Specify ``ddof=1`` for an unbiased estimate. @@ -4853,9 +4853,9 @@ def std( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B nan 1.528 nan + da (time) float64 nan 1.528 nan """ if ( flox_available @@ -4956,18 +4956,18 @@ def var( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").var() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 0.0 1.556 0.0 + da (time) float64 0.0 1.556 0.0 Use ``skipna`` to control whether NaNs are ignored. @@ -4975,9 +4975,9 @@ def var( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 0.0 1.556 nan + da (time) float64 0.0 1.556 nan Specify ``ddof=1`` for an unbiased estimate. @@ -4985,9 +4985,9 @@ def var( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B nan 2.333 nan + da (time) float64 nan 2.333 nan """ if ( flox_available @@ -5084,18 +5084,18 @@ def median( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").median() 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 2.0 2.0 + da (time) float64 1.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -5103,9 +5103,9 @@ def median( 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Data variables: - da (time) float64 24B 1.0 2.0 nan + da (time) float64 1.0 2.0 nan """ return self._reduce_without_squeeze_warn( duck_array_ops.median, @@ -5185,17 +5185,17 @@ def cumsum( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").cumsum() 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 5.0 5.0 2.0 2.0 + da (time) float64 1.0 2.0 5.0 5.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -5204,7 +5204,7 @@ def cumsum( Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 5.0 5.0 2.0 nan + da (time) float64 1.0 2.0 5.0 5.0 2.0 nan """ return self._reduce_without_squeeze_warn( duck_array_ops.cumsum, @@ -5284,17 +5284,17 @@ def cumprod( 120B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> ds.resample(time="3ME").cumprod() 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 6.0 0.0 2.0 2.0 + da (time) float64 1.0 2.0 6.0 0.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. @@ -5303,7 +5303,7 @@ def cumprod( Dimensions: (time: 6) Dimensions without coordinates: time Data variables: - da (time) float64 48B 1.0 2.0 6.0 0.0 2.0 nan + da (time) float64 1.0 2.0 6.0 0.0 2.0 nan """ return self._reduce_without_squeeze_warn( duck_array_ops.cumprod, @@ -5410,14 +5410,14 @@ def count( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").count() 24B array([1, 2, 2]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5499,14 +5499,14 @@ def all( 6B array([ True, True, True, True, True, False]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").all() 3B array([False, True, True]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5588,14 +5588,14 @@ def any( 6B array([ True, True, True, True, True, False]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").any() 3B array([ True, True, True]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5683,14 +5683,14 @@ def max( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").max() 24B array([1., 2., 3.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. @@ -5698,7 +5698,7 @@ def max( 24B array([nan, 2., 3.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5788,14 +5788,14 @@ def min( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").min() 24B array([1., 2., 0.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. @@ -5803,7 +5803,7 @@ def min( 24B array([nan, 2., 0.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -5895,14 +5895,14 @@ def mean( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").mean() 24B array([1. , 2. , 1.5]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. @@ -5910,7 +5910,7 @@ def mean( 24B array([nan, 2. , 1.5]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6009,14 +6009,14 @@ def prod( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").prod() 24B array([1., 4., 0.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. @@ -6024,7 +6024,7 @@ def prod( 24B array([nan, 4., 0.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. @@ -6032,7 +6032,7 @@ def prod( 24B array([nan, 4., 0.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6133,14 +6133,14 @@ def sum( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").sum() 24B array([1., 4., 3.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. @@ -6148,7 +6148,7 @@ def sum( 24B array([nan, 4., 3.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. @@ -6156,7 +6156,7 @@ def sum( 24B array([nan, 4., 3.]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6254,14 +6254,14 @@ def std( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").std() 24B array([0. , 0. , 1.5]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. @@ -6269,7 +6269,7 @@ def std( 24B array([nan, 0. , 1.5]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. @@ -6277,7 +6277,7 @@ def std( 24B array([ nan, 0. , 2.12132034]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6375,14 +6375,14 @@ def var( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").var() 24B array([0. , 0. , 2.25]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. @@ -6390,7 +6390,7 @@ def var( 24B array([ nan, 0. , 2.25]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. @@ -6398,7 +6398,7 @@ def var( 24B array([nan, 0. , 4.5]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ if ( flox_available @@ -6492,14 +6492,14 @@ def median( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").median() 24B array([1. , 2. , 1.5]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. @@ -6507,7 +6507,7 @@ def median( 24B array([nan, 2. , 1.5]) Coordinates: - * labels (labels) object 24B 'a' 'b' 'c' + * labels (labels) object 'a' 'b' 'c' """ return self._reduce_without_squeeze_warn( duck_array_ops.median, @@ -6585,15 +6585,15 @@ def cumsum( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").cumsum() 48B array([1., 2., 3., 3., 4., 1.]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) 48B array([ 1., 2., 3., 3., 4., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.groupby("labels").cumprod() 48B array([1., 2., 3., 0., 4., 1.]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) 48B array([ 1., 2., 3., 0., 4., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").count() 24B array([1, 3, 1]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6892,14 +6892,14 @@ def all( 6B array([ True, True, True, True, True, False]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").all() 3B array([ True, True, False]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -6981,14 +6981,14 @@ def any( 6B array([ True, True, True, True, True, False]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").any() 3B array([ True, True, True]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7076,14 +7076,14 @@ def max( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").max() 24B array([1., 3., 2.]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. @@ -7091,7 +7091,7 @@ def max( 24B array([ 1., 3., nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7181,14 +7181,14 @@ def min( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").min() 24B array([1., 0., 2.]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. @@ -7196,7 +7196,7 @@ def min( 24B array([ 1., 0., nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7288,14 +7288,14 @@ def mean( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").mean() 24B array([1. , 1.66666667, 2. ]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. @@ -7303,7 +7303,7 @@ def mean( 24B array([1. , 1.66666667, nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7402,14 +7402,14 @@ def prod( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").prod() 24B array([1., 0., 2.]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. @@ -7417,7 +7417,7 @@ def prod( 24B array([ 1., 0., nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. @@ -7425,7 +7425,7 @@ def prod( 24B array([nan, 0., nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7526,14 +7526,14 @@ def sum( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").sum() 24B array([1., 5., 2.]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. @@ -7541,7 +7541,7 @@ def sum( 24B array([ 1., 5., nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. @@ -7549,7 +7549,7 @@ def sum( 24B array([nan, 5., nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7647,14 +7647,14 @@ def std( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").std() 24B array([0. , 1.24721913, 0. ]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. @@ -7662,7 +7662,7 @@ def std( 24B array([0. , 1.24721913, nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. @@ -7670,7 +7670,7 @@ def std( 24B array([ nan, 1.52752523, nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7768,14 +7768,14 @@ def var( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").var() 24B array([0. , 1.55555556, 0. ]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. @@ -7783,7 +7783,7 @@ def var( 24B array([0. , 1.55555556, nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. @@ -7791,7 +7791,7 @@ def var( 24B array([ nan, 2.33333333, nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available @@ -7885,14 +7885,14 @@ def median( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").median() 24B array([1., 2., 2.]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. @@ -7900,7 +7900,7 @@ def median( 24B array([ 1., 2., nan]) Coordinates: - * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 + * time (time) datetime64[ns] 2001-01-31 2001-04-30 2001-07-31 """ return self._reduce_without_squeeze_warn( duck_array_ops.median, @@ -7978,14 +7978,14 @@ def cumsum( 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").cumsum() 48B array([1., 2., 5., 5., 2., 2.]) Coordinates: - labels (time) 48B array([ 1., 2., 5., 5., 2., nan]) Coordinates: - labels (time) 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: - * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 - labels (time) >> da.resample(time="3ME").cumprod() 48B array([1., 2., 6., 0., 2., 2.]) Coordinates: - labels (time) 48B array([ 1., 2., 6., 0., 2., nan]) Coordinates: - labels (time) >> ts.dt # doctest: +ELLIPSIS >>> ts.dt.dayofyear 80B array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) Coordinates: - * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 + * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10 >>> ts.dt.quarter 80B array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) Coordinates: - * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 + * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-10 """ @@ -552,33 +552,33 @@ class TimedeltaAccessor(TimeAccessor[T_DataArray]): 432000000000000, 453600000000000, 475200000000000, 496800000000000], dtype='timedelta64[ns]') Coordinates: - * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 + * time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt # doctest: +ELLIPSIS >>> ts.dt.days 160B array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]) Coordinates: - * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 + * time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.microseconds 160B array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Coordinates: - * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 + * time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.seconds 160B array([ 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800]) Coordinates: - * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 + * time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.total_seconds() 160B array([ 86400., 108000., 129600., 151200., 172800., 194400., 216000., 237600., 259200., 280800., 302400., 324000., 345600., 367200., 388800., 410400., 432000., 453600., 475200., 496800.]) Coordinates: - * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 + * time (time) timedelta64[ns] 1 days 00:00:00 ... 5 days 18:00:00 """ @property diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index d4308fad98c..3c7eb84bca1 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -2817,7 +2817,7 @@ def get_dummies( [False, False, True, False, True], [ True, False, False, False, False]]]) Coordinates: - * dummies (dummies) >> y 32B array([[20, 5], [ 7, 13]]) Coordinates: - * lat (lat) float64 16B 35.0 42.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 42.0 + * lon (lon) float64 100.0 120.0 >>> a, b = xr.align(x, y) >>> a 16B array([[25, 35]]) Coordinates: - * lat (lat) float64 8B 35.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 + * lon (lon) float64 100.0 120.0 >>> b 16B array([[20, 5]]) Coordinates: - * lat (lat) float64 8B 35.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 + * lon (lon) float64 100.0 120.0 >>> a, b = xr.align(x, y, join="outer") >>> a @@ -788,16 +788,16 @@ def align( [10., 24.], [nan, nan]]) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 >>> b 48B array([[20., 5.], [nan, nan], [ 7., 13.]]) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 >>> a, b = xr.align(x, y, join="outer", fill_value=-999) >>> a @@ -806,16 +806,16 @@ def align( [ 10, 24], [-999, -999]]) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 >>> b 48B array([[ 20, 5], [-999, -999], [ 7, 13]]) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 >>> a, b = xr.align(x, y, join="left") >>> a @@ -823,15 +823,15 @@ def align( array([[25, 35], [10, 24]]) Coordinates: - * lat (lat) float64 16B 35.0 40.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 + * lon (lon) float64 100.0 120.0 >>> b 32B array([[20., 5.], [nan, nan]]) Coordinates: - * lat (lat) float64 16B 35.0 40.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 + * lon (lon) float64 100.0 120.0 >>> a, b = xr.align(x, y, join="right") >>> a @@ -839,15 +839,15 @@ def align( array([[25., 35.], [nan, nan]]) Coordinates: - * lat (lat) float64 16B 35.0 42.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 42.0 + * lon (lon) float64 100.0 120.0 >>> b 32B array([[20, 5], [ 7, 13]]) Coordinates: - * lat (lat) float64 16B 35.0 42.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 42.0 + * lon (lon) float64 100.0 120.0 >>> a, b = xr.align(x, y, join="exact") Traceback (most recent call last): @@ -860,15 +860,15 @@ def align( array([[25, 35], [10, 24]]) Coordinates: - * lat (lat) float64 16B 35.0 40.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 + * lon (lon) float64 100.0 120.0 >>> b 32B array([[20, 5], [ 7, 13]]) Coordinates: - * lat (lat) float64 16B 35.0 40.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 + * lon (lon) float64 100.0 120.0 """ aligner = Aligner( @@ -1203,8 +1203,8 @@ def broadcast( Dimensions: (x: 3, y: 2) Dimensions without coordinates: x, y Data variables: - a (x, y) int64 48B 1 1 2 2 3 3 - b (x, y) int64 48B 5 6 5 6 5 6 + a (x, y) int64 1 1 2 2 3 3 + b (x, y) int64 5 6 5 6 5 6 """ if exclude is None: diff --git a/xarray/core/combine.py b/xarray/core/combine.py index ba0dd0e00c5..d9551ab983c 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -488,8 +488,8 @@ def combine_nested( Dimensions: (x: 2, y: 2) Dimensions without coordinates: x, y Data variables: - temperature (x, y) float64 32B 1.764 0.4002 0.9787 2.241 - precipitation (x, y) float64 32B 1.868 -0.9773 0.9501 -0.1514 + temperature (x, y) float64 1.764 0.4002 0.9787 2.241 + precipitation (x, y) float64 1.868 -0.9773 0.9501 -0.1514 >>> x1y2 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), @@ -517,8 +517,8 @@ def combine_nested( Dimensions: (x: 4, y: 4) Dimensions without coordinates: x, y Data variables: - temperature (x, y) float64 128B 1.764 0.4002 -0.1032 ... 0.04576 -0.1872 - precipitation (x, y) float64 128B 1.868 -0.9773 0.761 ... 0.1549 0.3782 + temperature (x, y) float64 1.764 0.4002 -0.1032 ... 0.04576 -0.1872 + precipitation (x, y) float64 1.868 -0.9773 0.761 ... 0.1549 0.3782 ``combine_nested`` can also be used to explicitly merge datasets with different variables. For example if we have 4 datasets, which are divided @@ -532,7 +532,7 @@ def combine_nested( Dimensions: (t: 5) Dimensions without coordinates: t Data variables: - temperature (t) float64 40B -0.8878 -1.981 -0.3479 0.1563 1.23 + temperature (t) float64 -0.8878 -1.981 -0.3479 0.1563 1.23 >>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))}) >>> t1precip @@ -540,7 +540,7 @@ def combine_nested( Dimensions: (t: 5) Dimensions without coordinates: t Data variables: - precipitation (t) float64 40B 1.202 -0.3873 -0.3023 -1.049 -1.42 + precipitation (t) float64 1.202 -0.3873 -0.3023 -1.049 -1.42 >>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))}) >>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))}) @@ -553,8 +553,8 @@ def combine_nested( Dimensions: (t: 10) Dimensions without coordinates: t Data variables: - temperature (t) float64 80B -0.8878 -1.981 -0.3479 ... -0.4381 -1.253 - precipitation (t) float64 80B 1.202 -0.3873 -0.3023 ... -0.8955 0.3869 + temperature (t) float64 -0.8878 -1.981 -0.3479 ... -0.4381 -1.253 + precipitation (t) float64 1.202 -0.3873 -0.3023 ... -0.8955 0.3869 See also -------- @@ -800,71 +800,71 @@ def combine_by_coords( 136B Dimensions: (y: 2, x: 3) Coordinates: - * y (y) int64 16B 0 1 - * x (x) int64 24B 10 20 30 + * y (y) int64 0 1 + * x (x) int64 10 20 30 Data variables: - temperature (y, x) float64 48B 10.98 14.3 12.06 10.9 8.473 12.92 - precipitation (y, x) float64 48B 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289 + temperature (y, x) float64 10.98 14.3 12.06 10.9 8.473 12.92 + precipitation (y, x) float64 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289 >>> x2 136B Dimensions: (y: 2, x: 3) Coordinates: - * y (y) int64 16B 2 3 - * x (x) int64 24B 10 20 30 + * y (y) int64 2 3 + * x (x) int64 10 20 30 Data variables: - temperature (y, x) float64 48B 11.36 18.51 1.421 1.743 0.4044 16.65 - precipitation (y, x) float64 48B 0.7782 0.87 0.9786 0.7992 0.4615 0.7805 + temperature (y, x) float64 11.36 18.51 1.421 1.743 0.4044 16.65 + precipitation (y, x) float64 0.7782 0.87 0.9786 0.7992 0.4615 0.7805 >>> x3 136B Dimensions: (y: 2, x: 3) Coordinates: - * y (y) int64 16B 2 3 - * x (x) int64 24B 40 50 60 + * y (y) int64 2 3 + * x (x) int64 40 50 60 Data variables: - temperature (y, x) float64 48B 2.365 12.8 2.867 18.89 10.44 8.293 - precipitation (y, x) float64 48B 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176 + temperature (y, x) float64 2.365 12.8 2.867 18.89 10.44 8.293 + precipitation (y, x) float64 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176 >>> xr.combine_by_coords([x2, x1]) 248B Dimensions: (y: 4, x: 3) Coordinates: - * y (y) int64 32B 0 1 2 3 - * x (x) int64 24B 10 20 30 + * y (y) int64 0 1 2 3 + * x (x) int64 10 20 30 Data variables: - temperature (y, x) float64 96B 10.98 14.3 12.06 ... 1.743 0.4044 16.65 - precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805 + temperature (y, x) float64 10.98 14.3 12.06 ... 1.743 0.4044 16.65 + precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.4615 0.7805 >>> xr.combine_by_coords([x3, x1]) 464B Dimensions: (y: 4, x: 6) Coordinates: - * y (y) int64 32B 0 1 2 3 - * x (x) int64 48B 10 20 30 40 50 60 + * y (y) int64 0 1 2 3 + * x (x) int64 10 20 30 40 50 60 Data variables: - temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293 - precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 + temperature (y, x) float64 10.98 14.3 12.06 ... 18.89 10.44 8.293 + precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.01879 0.6176 >>> xr.combine_by_coords([x3, x1], join="override") 256B Dimensions: (y: 2, x: 6) Coordinates: - * y (y) int64 16B 0 1 - * x (x) int64 48B 10 20 30 40 50 60 + * y (y) int64 0 1 + * x (x) int64 10 20 30 40 50 60 Data variables: - temperature (y, x) float64 96B 10.98 14.3 12.06 ... 18.89 10.44 8.293 - precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 + temperature (y, x) float64 10.98 14.3 12.06 ... 18.89 10.44 8.293 + precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.01879 0.6176 >>> xr.combine_by_coords([x1, x2, x3]) 464B Dimensions: (y: 4, x: 6) Coordinates: - * y (y) int64 32B 0 1 2 3 - * x (x) int64 48B 10 20 30 40 50 60 + * y (y) int64 0 1 2 3 + * x (x) int64 10 20 30 40 50 60 Data variables: - temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293 - precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 + temperature (y, x) float64 10.98 14.3 12.06 ... 18.89 10.44 8.293 + precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.01879 0.6176 You can also combine DataArray objects, but the behaviour will differ depending on whether or not the DataArrays are named. If all DataArrays are named then they will @@ -878,7 +878,7 @@ def combine_by_coords( 16B array([1., 2.]) Coordinates: - * x (x) int64 16B 0 1 + * x (x) int64 0 1 >>> named_da2 = xr.DataArray( ... name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x" @@ -887,15 +887,15 @@ def combine_by_coords( 16B array([3., 4.]) Coordinates: - * x (x) int64 16B 2 3 + * x (x) int64 2 3 >>> xr.combine_by_coords([named_da1, named_da2]) 64B Dimensions: (x: 4) Coordinates: - * x (x) int64 32B 0 1 2 3 + * x (x) int64 0 1 2 3 Data variables: - a (x) float64 32B 1.0 2.0 3.0 4.0 + a (x) float64 1.0 2.0 3.0 4.0 If all the DataArrays are unnamed, a single DataArray will be returned, e.g. @@ -905,7 +905,7 @@ def combine_by_coords( 32B array([1., 2., 3., 4.]) Coordinates: - * x (x) int64 32B 0 1 2 3 + * x (x) int64 0 1 2 3 Finally, if you attempt to combine a mix of unnamed DataArrays with either named DataArrays or Datasets, a ValueError will be raised (as this is an ambiguous operation). diff --git a/xarray/core/common.py b/xarray/core/common.py index 6730b61552c..83a2d191b69 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -529,12 +529,12 @@ def assign_coords( 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: - * lon (lon) int64 32B 358 359 0 1 + * lon (lon) int64 358 359 0 1 >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180)) 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: - * lon (lon) int64 32B -2 -1 0 1 + * lon (lon) int64 -2 -1 0 1 The function also accepts dictionary arguments: @@ -542,7 +542,7 @@ def assign_coords( 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: - * lon (lon) int64 32B -2 -1 0 1 + * lon (lon) int64 -2 -1 0 1 New coordinate can also be attached to an existing dimension: @@ -551,8 +551,8 @@ def assign_coords( 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: - * lon (lon) int64 32B 358 359 0 1 - lon_2 (lon) int64 32B 300 289 0 1 + * lon (lon) int64 358 359 0 1 + lon_2 (lon) int64 300 289 0 1 Note that the same result can also be obtained with a dict e.g. @@ -581,28 +581,28 @@ def assign_coords( 360B Dimensions: (x: 2, y: 2, time: 4) Coordinates: - lon (x, y) float64 32B 260.2 260.7 260.2 260.8 - lat (x, y) float64 32B 42.25 42.21 42.63 42.59 - * time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09 - reference_time datetime64[ns] 8B 2014-09-05 + lon (x, y) float64 260.2 260.7 260.2 260.8 + lat (x, y) float64 42.25 42.21 42.63 42.59 + * time (time) datetime64[ns] 2014-09-06 ... 2014-09-09 + reference_time datetime64[ns] 2014-09-05 Dimensions without coordinates: x, y Data variables: - temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0 - precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0 + temperature (x, y, time) float64 20.0 20.8 21.6 ... 30.4 31.2 32.0 + precipitation (x, y, time) float64 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0 Attributes: description: Weather-related data >>> ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180)) 360B Dimensions: (x: 2, y: 2, time: 4) Coordinates: - lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 - lat (x, y) float64 32B 42.25 42.21 42.63 42.59 - * time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09 - reference_time datetime64[ns] 8B 2014-09-05 + lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 + lat (x, y) float64 42.25 42.21 42.63 42.59 + * time (time) datetime64[ns] 2014-09-06 ... 2014-09-09 + reference_time datetime64[ns] 2014-09-05 Dimensions without coordinates: x, y Data variables: - temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0 - precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0 + temperature (x, y, time) float64 20.0 20.8 21.6 ... 30.4 31.2 32.0 + precipitation (x, y, time) float64 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0 Attributes: description: Weather-related data @@ -645,7 +645,7 @@ def assign_attrs(self, *args: Any, **kwargs: Any) -> Self: 24B Dimensions: (temperature: 3) Coordinates: - * temperature (temperature) int64 24B 25 30 27 + * temperature (temperature) int64 25 30 27 Data variables: *empty* @@ -656,7 +656,7 @@ def assign_attrs(self, *args: Any, **kwargs: Any) -> Self: 24B Dimensions: (temperature: 3) Coordinates: - * temperature (temperature) int64 24B 25 30 27 + * temperature (temperature) int64 25 30 27 Data variables: *empty* Attributes: @@ -749,11 +749,11 @@ def pipe( 96B Dimensions: (lat: 2, lon: 2) Coordinates: - * lat (lat) int64 16B 10 20 - * lon (lon) int64 16B 150 160 + * lat (lat) int64 10 20 + * lon (lon) int64 150 160 Data variables: - temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 - precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 + temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9 + precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918 >>> def adder(data, arg): ... return data + arg @@ -768,21 +768,21 @@ def pipe( 96B Dimensions: (lat: 2, lon: 2) Coordinates: - * lat (lat) int64 16B 10 20 - * lon (lon) int64 16B 150 160 + * lat (lat) int64 10 20 + * lon (lon) int64 150 160 Data variables: - temperature_c (lat, lon) float64 32B 12.98 16.3 14.06 12.9 - precipitation (lat, lon) float64 32B 2.424 2.646 2.438 2.892 + temperature_c (lat, lon) float64 12.98 16.3 14.06 12.9 + precipitation (lat, lon) float64 2.424 2.646 2.438 2.892 >>> x.pipe(adder, arg=2) 96B Dimensions: (lat: 2, lon: 2) Coordinates: - * lat (lat) int64 16B 10 20 - * lon (lon) int64 16B 150 160 + * lat (lat) int64 10 20 + * lon (lon) int64 150 160 Data variables: - temperature_c (lat, lon) float64 32B 12.98 16.3 14.06 12.9 - precipitation (lat, lon) float64 32B 2.424 2.646 2.438 2.892 + temperature_c (lat, lon) float64 12.98 16.3 14.06 12.9 + precipitation (lat, lon) float64 2.424 2.646 2.438 2.892 >>> ( ... x.pipe(adder, arg=2) @@ -792,11 +792,11 @@ def pipe( 96B Dimensions: (lat: 2, lon: 2) Coordinates: - * lat (lat) int64 16B 10 20 - * lon (lon) int64 16B 150 160 + * lat (lat) int64 10 20 + * lon (lon) int64 150 160 Data variables: - temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 - precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 + temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9 + precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918 See Also -------- @@ -949,12 +949,12 @@ def _resample( 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: - * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.resample(time="QS-DEC").mean() 32B array([ 1., 4., 7., 10.]) Coordinates: - * time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01 + * time (time) datetime64[ns] 1999-12-01 2000-03-01 ... 2000-09-01 Upsample monthly time-series data to daily data: @@ -1588,40 +1588,40 @@ def full_like( array([[0, 1, 2], [3, 4, 5]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 >>> xr.full_like(x, 1) 48B array([[1, 1, 1], [1, 1, 1]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 >>> xr.full_like(x, 0.5) 48B array([[0, 0, 0], [0, 0, 0]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 >>> xr.full_like(x, 0.5, dtype=np.double) 48B array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 >>> xr.full_like(x, np.nan, dtype=np.double) 48B array([[nan, nan, nan], [nan, nan, nan]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 >>> ds = xr.Dataset( ... {"a": ("x", [3, 5, 2]), "b": ("x", [9, 1, 0])}, coords={"x": [2, 4, 6]} @@ -1630,26 +1630,26 @@ def full_like( 72B Dimensions: (x: 3) Coordinates: - * x (x) int64 24B 2 4 6 + * x (x) int64 2 4 6 Data variables: - a (x) int64 24B 3 5 2 - b (x) int64 24B 9 1 0 + a (x) int64 3 5 2 + b (x) int64 9 1 0 >>> xr.full_like(ds, fill_value={"a": 1, "b": 2}) 72B Dimensions: (x: 3) Coordinates: - * x (x) int64 24B 2 4 6 + * x (x) int64 2 4 6 Data variables: - a (x) int64 24B 1 1 1 - b (x) int64 24B 2 2 2 + a (x) int64 1 1 1 + b (x) int64 2 2 2 >>> xr.full_like(ds, fill_value={"a": 1, "b": 2}, dtype={"a": bool, "b": float}) 51B Dimensions: (x: 3) Coordinates: - * x (x) int64 24B 2 4 6 + * x (x) int64 2 4 6 Data variables: - a (x) bool 3B True True True - b (x) float64 24B 2.0 2.0 2.0 + a (x) bool True True True + b (x) float64 2.0 2.0 2.0 See Also -------- @@ -1862,24 +1862,24 @@ def zeros_like( array([[0, 1, 2], [3, 4, 5]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 >>> xr.zeros_like(x) 48B array([[0, 0, 0], [0, 0, 0]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 >>> xr.zeros_like(x, dtype=float) 48B array([[0., 0., 0.], [0., 0., 0.]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 See Also -------- @@ -1999,16 +1999,16 @@ def ones_like( array([[0, 1, 2], [3, 4, 5]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 >>> xr.ones_like(x) 48B array([[1, 1, 1], [1, 1, 1]]) Coordinates: - * lat (lat) int64 16B 1 2 - * lon (lon) int64 24B 0 1 2 + * lat (lat) int64 1 2 + * lon (lon) int64 0 1 2 See Also -------- diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 079fbf94b8e..498beaa3291 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1058,7 +1058,7 @@ def apply_ufunc( 24B array([1.41421356, 2.82842712, 4.24264069]) Coordinates: - * x (x) float64 24B 0.1 0.2 0.3 + * x (x) float64 0.1 0.2 0.3 Plain scalars, numpy arrays and a mix of these with xarray objects is also supported: @@ -1071,7 +1071,7 @@ def apply_ufunc( 24B array([1., 2., 3.]) Coordinates: - * x (x) float64 24B 0.1 0.2 0.3 + * x (x) float64 0.1 0.2 0.3 Other examples of how you could use ``apply_ufunc`` to write functions to (very nearly) replicate existing xarray functionality: @@ -1329,8 +1329,8 @@ def cov( [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]) Coordinates: - * space (space) >> da_b = DataArray( ... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), @@ -1345,8 +1345,8 @@ def cov( [15. , 10. , 5. ], [ 3.2, 0.6, 1.8]]) Coordinates: - * space (space) >> xr.cov(da_a, da_b) 8B array(-3.53055556) @@ -1354,7 +1354,7 @@ def cov( 24B array([ 0.2 , -0.5 , 1.69333333]) Coordinates: - * space (space) >> weights = DataArray( ... [4, 2, 1], ... dims=("space"), @@ -1366,12 +1366,12 @@ def cov( 24B array([4, 2, 1]) Coordinates: - * space (space) >> xr.cov(da_a, da_b, dim="space", weights=weights) 24B array([-4.69346939, -4.49632653, -3.37959184]) Coordinates: - * time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03 + * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 """ from xarray.core.dataarray import DataArray @@ -1433,8 +1433,8 @@ def corr( [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]) Coordinates: - * space (space) >> da_b = DataArray( ... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), @@ -1449,8 +1449,8 @@ def corr( [15. , 10. , 5. ], [ 3.2, 0.6, 1.8]]) Coordinates: - * space (space) >> xr.corr(da_a, da_b) 8B array(-0.57087777) @@ -1458,7 +1458,7 @@ def corr( 24B array([ 1., -1., 1.]) Coordinates: - * space (space) >> weights = DataArray( ... [4, 2, 1], ... dims=("space"), @@ -1470,12 +1470,12 @@ def corr( 24B array([4, 2, 1]) Coordinates: - * space (space) >> xr.corr(da_a, da_b, dim="space", weights=weights) 24B array([-0.50240504, -0.83215028, -0.99057446]) Coordinates: - * time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03 + * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 """ from xarray.core.dataarray import DataArray @@ -1620,7 +1620,7 @@ def cross( 24B array([12, -6, -3]) Coordinates: - * cartesian (cartesian) 24B array([-10, 2, 5]) Coordinates: - * cartesian (cartesian) 80B array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) Coordinates: - * lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9 + * lat (lat) int64 0 1 2 3 4 5 6 7 8 9 >>> xr.where(x < 0.5, x, x * 100) 80B array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ]) Coordinates: - * lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9 + * lat (lat) int64 0 1 2 3 4 5 6 7 8 9 >>> y = xr.DataArray( ... 0.1 * np.arange(9).reshape(3, 3), @@ -1978,8 +1978,8 @@ def where(cond, x, y, keep_attrs=None): [0.3, 0.4, 0.5], [0.6, 0.7, 0.8]]) Coordinates: - * lat (lat) int64 24B 0 1 2 - * lon (lon) int64 24B 10 11 12 + * lat (lat) int64 0 1 2 + * lon (lon) int64 10 11 12 >>> xr.where(y.lat < 1, y, -1) 72B @@ -1987,8 +1987,8 @@ def where(cond, x, y, keep_attrs=None): [-1. , -1. , -1. ], [-1. , -1. , -1. ]]) Coordinates: - * lat (lat) int64 24B 0 1 2 - * lon (lon) int64 24B 10 11 12 + * lat (lat) int64 0 1 2 + * lon (lon) int64 10 11 12 >>> cond = xr.DataArray([True, False], dims=["x"]) >>> x = xr.DataArray([1, 2], dims=["y"]) diff --git a/xarray/core/concat.py b/xarray/core/concat.py index 49773a84923..17ea186e413 100644 --- a/xarray/core/concat.py +++ b/xarray/core/concat.py @@ -187,32 +187,32 @@ def concat( array([[0, 1, 2], [3, 4, 5]]) Coordinates: - * x (x) >> xr.concat([da.isel(y=slice(0, 1)), da.isel(y=slice(1, None))], dim="y") 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: - * x (x) >> xr.concat([da.isel(x=0), da.isel(x=1)], "x") 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: - * x (x) >> xr.concat([da.isel(x=0), da.isel(x=1)], "new_dim") 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: - x (new_dim) >> xr.concat([da.isel(x=0), da.isel(x=1)], pd.Index([-90, -100], name="new_dim")) @@ -220,9 +220,9 @@ def concat( array([[0, 1, 2], [3, 4, 5]]) Coordinates: - x (new_dim) 16B Dimensions: (x: 2) Coordinates: - * x (x) int64 16B 0 0 + * x (x) int64 0 0 Data variables: *empty* diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 806eabc5070..d915d623209 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -224,13 +224,13 @@ class Coordinates(AbstractCoordinates): >>> xr.Coordinates({"x": [1, 2]}) Coordinates: - * x (x) int64 16B 1 2 + * x (x) int64 1 2 Create a dimension coordinate with no index: >>> xr.Coordinates(coords={"x": [1, 2]}, indexes={}) Coordinates: - x (x) int64 16B 1 2 + x (x) int64 1 2 Create a new Coordinates object from existing dataset coordinates (indexes are passed): @@ -238,16 +238,16 @@ class Coordinates(AbstractCoordinates): >>> ds = xr.Dataset(coords={"x": [1, 2]}) >>> xr.Coordinates(ds.coords) Coordinates: - * x (x) int64 16B 1 2 + * x (x) int64 1 2 Create indexed coordinates from a ``pandas.MultiIndex`` object: >>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]]) >>> xr.Coordinates.from_pandas_multiindex(midx, "x") Coordinates: - * x (x) object 32B MultiIndex - * x_level_0 (x) object 32B 'a' 'a' 'b' 'b' - * x_level_1 (x) int64 32B 0 1 0 1 + * x (x) object MultiIndex + * x_level_0 (x) object 'a' 'a' 'b' 'b' + * x_level_1 (x) int64 0 1 0 1 Create a new Dataset object by passing a Coordinates object: @@ -256,9 +256,9 @@ class Coordinates(AbstractCoordinates): 96B Dimensions: (x: 4) Coordinates: - * x (x) object 32B MultiIndex - * x_level_0 (x) object 32B 'a' 'a' 'b' 'b' - * x_level_1 (x) int64 32B 0 1 0 1 + * x (x) object MultiIndex + * x_level_0 (x) object 'a' 'a' 'b' 'b' + * x_level_1 (x) int64 0 1 0 1 Data variables: *empty* @@ -602,14 +602,14 @@ def assign(self, coords: Mapping | None = None, **coords_kwargs: Any) -> Self: >>> coords.assign(x=[1, 2]) Coordinates: - * x (x) int64 16B 1 2 + * x (x) int64 1 2 >>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]]) >>> coords.assign(xr.Coordinates.from_pandas_multiindex(midx, "y")) Coordinates: - * y (y) object 32B MultiIndex - * y_level_0 (y) object 32B 'a' 'a' 'b' 'b' - * y_level_1 (y) int64 32B 0 1 0 1 + * y (y) object MultiIndex + * y_level_0 (y) object 'a' 'a' 'b' 'b' + * y_level_1 (y) int64 0 1 0 1 """ # TODO: this doesn't support a callable, which is inconsistent with `DataArray.assign_coords` diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 6d02ee8cdd8..57f3120ac4b 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -366,10 +366,10 @@ class DataArray( [[22.60070734, 13.78914233, 14.17424919], [18.28478802, 16.15234857, 26.63418806]]]) Coordinates: - lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 - lat (x, y) float64 32B 42.25 42.21 42.63 42.59 - * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 - reference_time datetime64[ns] 8B 2014-09-05 + lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 + lat (x, y) float64 42.25 42.21 42.63 42.59 + * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 + reference_time datetime64[ns] 2014-09-05 Dimensions without coordinates: x, y Attributes: description: Ambient temperature. @@ -381,10 +381,10 @@ class DataArray( 8B array(7.18177696) Coordinates: - lon float64 8B -99.32 - lat float64 8B 42.21 - time datetime64[ns] 8B 2014-09-08 - reference_time datetime64[ns] 8B 2014-09-05 + lon float64 -99.32 + lat float64 42.21 + time datetime64[ns] 2014-09-08 + reference_time datetime64[ns] 2014-09-05 Attributes: description: Ambient temperature. units: degC @@ -1041,9 +1041,9 @@ def reset_coords( [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: - lon (x) int64 40B 10 11 12 13 14 - lat (y) int64 40B 20 21 22 23 24 - Pressure (x, y) int64 200B 50 51 52 53 54 55 56 57 ... 68 69 70 71 72 73 74 + lon (x) int64 10 11 12 13 14 + lat (y) int64 20 21 22 23 24 + Pressure (x, y) int64 50 51 52 53 54 55 56 57 ... 68 69 70 71 72 73 74 Dimensions without coordinates: x, y Return Dataset with target coordinate as a data variable rather than a coordinate variable: @@ -1052,12 +1052,12 @@ def reset_coords( 480B Dimensions: (x: 5, y: 5) Coordinates: - lon (x) int64 40B 10 11 12 13 14 - lat (y) int64 40B 20 21 22 23 24 + lon (x) int64 10 11 12 13 14 + lat (y) int64 20 21 22 23 24 Dimensions without coordinates: x, y Data variables: - Pressure (x, y) int64 200B 50 51 52 53 54 55 56 ... 68 69 70 71 72 73 74 - Temperature (x, y) int64 200B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 + Pressure (x, y) int64 50 51 52 53 54 55 56 ... 68 69 70 71 72 73 74 + Temperature (x, y) int64 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 Return DataArray without targeted coordinate: @@ -1069,8 +1069,8 @@ def reset_coords( [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: - lon (x) int64 40B 10 11 12 13 14 - lat (y) int64 40B 20 21 22 23 24 + lon (x) int64 10 11 12 13 14 + lat (y) int64 20 21 22 23 24 Dimensions without coordinates: x, y """ if names is None: @@ -1237,19 +1237,19 @@ def copy(self, deep: bool = True, data: Any = None) -> Self: 24B array([1, 2, 3]) Coordinates: - * x (x) >> array_0 = array.copy(deep=False) >>> array_0[0] = 7 >>> array_0 24B array([7, 2, 3]) Coordinates: - * x (x) >> array 24B array([7, 2, 3]) Coordinates: - * x (x) Self: 24B array([0.1, 0.2, 0.3]) Coordinates: - * x (x) >> array 24B array([7, 2, 3]) Coordinates: - * x (x) >> tgt_x = xr.DataArray(np.linspace(0, 4, num=5), dims="points") >>> tgt_y = xr.DataArray(np.linspace(0, 4, num=5), dims="points") @@ -1636,8 +1636,8 @@ def sel( 40B array([ 0, 6, 12, 18, 24]) Coordinates: - x (points) int64 40B 0 1 2 3 4 - y (points) int64 40B 0 1 2 3 4 + x (points) int64 0 1 2 3 4 + y (points) int64 0 1 2 3 4 Dimensions without coordinates: points """ ds = self._to_temp_dataset().sel( @@ -1763,22 +1763,22 @@ def thin( array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]]) Coordinates: - * x (x) int64 16B 0 1 - * y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12 + * x (x) int64 0 1 + * y (y) int64 0 1 2 3 4 5 6 7 8 9 10 11 12 >>> >>> x.thin(3) 40B array([[ 0, 3, 6, 9, 12]]) Coordinates: - * x (x) int64 8B 0 - * y (y) int64 40B 0 3 6 9 12 + * x (x) int64 0 + * y (y) int64 0 3 6 9 12 >>> x.thin({"x": 2, "y": 5}) 24B array([[ 0, 5, 10]]) Coordinates: - * x (x) int64 8B 0 - * y (y) int64 24B 0 5 10 + * x (x) int64 0 + * y (y) int64 0 5 10 See Also -------- @@ -1838,24 +1838,24 @@ def broadcast_like( array([[ 1.76405235, 0.40015721, 0.97873798], [ 2.2408932 , 1.86755799, -0.97727788]]) Coordinates: - * x (x) >> arr2 48B array([[ 0.95008842, -0.15135721], [-0.10321885, 0.4105985 ], [ 0.14404357, 1.45427351]]) Coordinates: - * x (x) >> arr1.broadcast_like(arr2) 72B array([[ 1.76405235, 0.40015721, 0.97873798], [ 2.2408932 , 1.86755799, -0.97727788], [ nan, nan, nan]]) Coordinates: - * x (x) >> da2 = xr.DataArray( ... data=data, ... dims=["x", "y"], @@ -1988,8 +1988,8 @@ def reindex_like( [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: - * x (x) int64 32B 40 30 20 10 - * y (y) int64 24B 90 80 70 + * x (x) int64 40 30 20 10 + * y (y) int64 90 80 70 Reindexing with both DataArrays having the same coordinates set, but in different order: @@ -2000,8 +2000,8 @@ def reindex_like( [ 5, 4, 3], [ 2, 1, 0]]) Coordinates: - * x (x) int64 32B 40 30 20 10 - * y (y) int64 24B 90 80 70 + * x (x) int64 40 30 20 10 + * y (y) int64 90 80 70 Reindexing with the other array having additional coordinates: @@ -2017,8 +2017,8 @@ def reindex_like( [nan, nan, nan], [nan, nan, nan]]) Coordinates: - * x (x) int64 32B 20 10 29 39 - * y (y) int64 24B 70 80 90 + * x (x) int64 20 10 29 39 + * y (y) int64 70 80 90 Filling missing values with the previous valid index with respect to the coordinates' value: @@ -2029,8 +2029,8 @@ def reindex_like( [3, 4, 5], [6, 7, 8]]) Coordinates: - * x (x) int64 32B 20 10 29 39 - * y (y) int64 24B 70 80 90 + * x (x) int64 20 10 29 39 + * y (y) int64 70 80 90 Filling missing values while tolerating specified error for inexact matches: @@ -2041,8 +2041,8 @@ def reindex_like( [nan, nan, nan], [nan, nan, nan]]) Coordinates: - * x (x) int64 32B 20 10 29 39 - * y (y) int64 24B 70 80 90 + * x (x) int64 20 10 29 39 + * y (y) int64 70 80 90 Filling missing values with manually specified values: @@ -2053,8 +2053,8 @@ def reindex_like( [19, 19, 19], [19, 19, 19]]) Coordinates: - * x (x) int64 32B 20 10 29 39 - * y (y) int64 24B 70 80 90 + * x (x) int64 20 10 29 39 + * y (y) int64 70 80 90 Note that unlike ``broadcast_like``, ``reindex_like`` doesn't create new dimensions: @@ -2062,8 +2062,8 @@ def reindex_like( 24B array([3, 4, 5]) Coordinates: - x int64 8B 20 - * y (y) int64 24B 70 80 90 + x int64 20 + * y (y) int64 70 80 90 ...so ``b`` in not added here: @@ -2071,8 +2071,8 @@ def reindex_like( 24B array([3, 4, 5]) Coordinates: - x int64 8B 20 - * y (y) int64 24B 70 80 90 + x int64 20 + * y (y) int64 70 80 90 See Also -------- @@ -2160,12 +2160,12 @@ def reindex( 32B array([0, 1, 2, 3]) Coordinates: - * lat (lat) int64 32B 90 89 88 87 + * lat (lat) int64 90 89 88 87 >>> da.reindex(lat=da.lat[::-1]) 32B array([3, 2, 1, 0]) Coordinates: - * lat (lat) int64 32B 87 88 89 90 + * lat (lat) int64 87 88 89 90 See Also -------- @@ -2260,8 +2260,8 @@ def interp( [ 2., 7., 6., nan], [ 6., nan, 5., 8.]]) Coordinates: - * x (x) int64 24B 0 1 2 - * y (y) int64 32B 10 12 14 16 + * x (x) int64 0 1 2 + * y (y) int64 10 12 14 16 1D linear interpolation (the default): @@ -2272,8 +2272,8 @@ def interp( [3. , nan, 5.75, nan], [5. , nan, 5.25, nan]]) Coordinates: - * y (y) int64 32B 10 12 14 16 - * x (x) float64 32B 0.0 0.75 1.25 1.75 + * y (y) int64 10 12 14 16 + * x (x) float64 0.0 0.75 1.25 1.75 1D nearest interpolation: @@ -2284,8 +2284,8 @@ def interp( [ 2., 7., 6., nan], [ 6., nan, 5., 8.]]) Coordinates: - * y (y) int64 32B 10 12 14 16 - * x (x) float64 32B 0.0 0.75 1.25 1.75 + * y (y) int64 10 12 14 16 + * x (x) float64 0.0 0.75 1.25 1.75 1D linear extrapolation: @@ -2300,8 +2300,8 @@ def interp( [ 8. , nan, 4.5, nan], [12. , nan, 3.5, nan]]) Coordinates: - * y (y) int64 32B 10 12 14 16 - * x (x) float64 32B 1.0 1.5 2.5 3.5 + * y (y) int64 10 12 14 16 + * x (x) float64 1.0 1.5 2.5 3.5 2D linear interpolation: @@ -2312,8 +2312,8 @@ def interp( [ nan, nan, nan], [ nan, nan, nan]]) Coordinates: - * x (x) float64 32B 0.0 0.75 1.25 1.75 - * y (y) int64 24B 11 13 15 + * x (x) float64 0.0 0.75 1.25 1.75 + * y (y) int64 11 13 15 """ if self.dtype.kind not in "uifc": raise TypeError( @@ -2390,8 +2390,8 @@ def interp_like( [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: - * x (x) int64 32B 10 20 30 40 - * y (y) int64 24B 70 80 90 + * x (x) int64 10 20 30 40 + * y (y) int64 70 80 90 >>> da2 = xr.DataArray( ... data=data, ... dims=["x", "y"], @@ -2404,8 +2404,8 @@ def interp_like( [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: - * x (x) int64 32B 10 20 29 39 - * y (y) int64 24B 70 80 90 + * x (x) int64 10 20 29 39 + * y (y) int64 70 80 90 Interpolate the values in the coordinates of the other DataArray with respect to the source's values: @@ -2416,8 +2416,8 @@ def interp_like( [6.3, 7.3, 8.3], [nan, nan, nan]]) Coordinates: - * x (x) int64 32B 10 20 30 40 - * y (y) int64 24B 70 80 90 + * x (x) int64 10 20 30 40 + * y (y) int64 70 80 90 Could also extrapolate missing values: @@ -2428,8 +2428,8 @@ def interp_like( [ 6.3, 7.3, 8.3], [ 9.3, 10.3, 11.3]]) Coordinates: - * x (x) int64 32B 10 20 30 40 - * y (y) int64 24B 70 80 90 + * x (x) int64 10 20 30 40 + * y (y) int64 70 80 90 Notes ----- @@ -2527,22 +2527,22 @@ def swap_dims( 16B array([0, 1]) Coordinates: - * x (x) >> arr.swap_dims({"x": "y"}) 16B array([0, 1]) Coordinates: - x (y) >> arr.swap_dims({"x": "z"}) 16B array([0, 1]) Coordinates: - x (z) >> arr.set_index(x="a") 48B array([[1., 1., 1.], [1., 1., 1.]]) Coordinates: - * x (x) int64 16B 3 4 - * y (y) int64 24B 0 1 2 + * x (x) int64 3 4 + * y (y) int64 0 1 2 See Also -------- @@ -2861,8 +2861,8 @@ def stack( array([[0, 1, 2], [3, 4, 5]]) Coordinates: - * x (x) >> stacked = arr.stack(z=("x", "y")) >>> stacked.indexes["z"] MultiIndex([('a', 0), @@ -2928,8 +2928,8 @@ def unstack( array([[0, 1, 2], [3, 4, 5]]) Coordinates: - * x (x) >> stacked = arr.stack(z=("x", "y")) >>> stacked.indexes["z"] MultiIndex([('a', 0), @@ -2979,11 +2979,11 @@ def to_unstacked_dataset(self, dim: Hashable, level: int | Hashable = 0) -> Data 96B Dimensions: (x: 2, y: 3) Coordinates: - * x (x) >> stacked = data.to_stacked_array("z", ["x"]) >>> stacked.indexes["z"] MultiIndex([('a', 0), @@ -3108,8 +3108,8 @@ def drop_vars( [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: - * x (x) int64 32B 10 20 30 40 - * y (y) int64 24B 70 80 90 + * x (x) int64 10 20 30 40 + * y (y) int64 70 80 90 Removing a single variable: @@ -3120,7 +3120,7 @@ def drop_vars( [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: - * y (y) int64 24B 70 80 90 + * y (y) int64 70 80 90 Dimensions without coordinates: x Removing a list of variables: @@ -3231,8 +3231,8 @@ def drop_sel( [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: - * x (x) int64 40B 0 2 4 6 8 - * y (y) int64 40B 0 3 6 9 12 + * x (x) int64 0 2 4 6 8 + * y (y) int64 0 3 6 9 12 >>> da.drop_sel(x=[0, 2], y=9) 96B @@ -3240,8 +3240,8 @@ def drop_sel( [15, 16, 17, 19], [20, 21, 22, 24]]) Coordinates: - * x (x) int64 24B 4 6 8 - * y (y) int64 32B 0 3 6 12 + * x (x) int64 4 6 8 + * y (y) int64 0 3 6 12 >>> da.drop_sel({"x": 6, "y": [0, 3]}) 96B @@ -3250,8 +3250,8 @@ def drop_sel( [12, 13, 14], [22, 23, 24]]) Coordinates: - * x (x) int64 32B 0 2 4 8 - * y (y) int64 24B 6 9 12 + * x (x) int64 0 2 4 8 + * y (y) int64 6 9 12 """ if labels_kwargs or isinstance(labels, dict): labels = either_dict_or_kwargs(labels, labels_kwargs, "drop") @@ -3360,8 +3360,8 @@ def dropna( [nan, 4., 2., 0.], [ 3., 1., 0., 0.]]) Coordinates: - lat (Y) float64 32B -20.0 -20.25 -20.5 -20.75 - lon (X) float64 32B 10.0 10.25 10.5 10.75 + lat (Y) float64 -20.0 -20.25 -20.5 -20.75 + lon (X) float64 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X >>> da.dropna(dim="Y", how="any") @@ -3369,8 +3369,8 @@ def dropna( array([[0., 4., 2., 9.], [3., 1., 0., 0.]]) Coordinates: - lat (Y) float64 16B -20.0 -20.75 - lon (X) float64 32B 10.0 10.25 10.5 10.75 + lat (Y) float64 -20.0 -20.75 + lon (X) float64 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X Drop values only if all values along the dimension are NaN: @@ -3381,8 +3381,8 @@ def dropna( [nan, 4., 2., 0.], [ 3., 1., 0., 0.]]) Coordinates: - lat (Y) float64 24B -20.0 -20.5 -20.75 - lon (X) float64 32B 10.0 10.25 10.5 10.75 + lat (Y) float64 -20.0 -20.5 -20.75 + lon (X) float64 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X """ ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh) @@ -3421,8 +3421,8 @@ def fillna(self, value: Any) -> Self: 48B array([ 1., 4., nan, 0., 3., nan]) Coordinates: - * Z (Z) int64 48B 0 1 2 3 4 5 - height (Z) int64 48B 0 10 20 30 40 50 + * Z (Z) int64 0 1 2 3 4 5 + height (Z) int64 0 10 20 30 40 50 Fill all NaN values with 0: @@ -3430,8 +3430,8 @@ def fillna(self, value: Any) -> Self: 48B array([1., 4., 0., 0., 3., 0.]) Coordinates: - * Z (Z) int64 48B 0 1 2 3 4 5 - height (Z) int64 48B 0 10 20 30 40 50 + * Z (Z) int64 0 1 2 3 4 5 + height (Z) int64 0 10 20 30 40 50 Fill NaN values with corresponding values in array: @@ -3439,8 +3439,8 @@ def fillna(self, value: Any) -> Self: 48B array([1., 4., 4., 0., 3., 9.]) Coordinates: - * Z (Z) int64 48B 0 1 2 3 4 5 - height (Z) int64 48B 0 10 20 30 40 50 + * Z (Z) int64 0 1 2 3 4 5 + height (Z) int64 0 10 20 30 40 50 """ if utils.is_dict_like(value): raise TypeError( @@ -3547,19 +3547,19 @@ def interpolate_na( 40B array([nan, 2., 3., nan, 0.]) Coordinates: - * x (x) int64 40B 0 1 2 3 4 + * x (x) int64 0 1 2 3 4 >>> da.interpolate_na(dim="x", method="linear") 40B array([nan, 2. , 3. , 1.5, 0. ]) Coordinates: - * x (x) int64 40B 0 1 2 3 4 + * x (x) int64 0 1 2 3 4 >>> da.interpolate_na(dim="x", method="linear", fill_value="extrapolate") 40B array([1. , 2. , 3. , 1.5, 0. ]) Coordinates: - * x (x) int64 40B 0 1 2 3 4 + * x (x) int64 0 1 2 3 4 """ from xarray.core.missing import interp_na @@ -3622,8 +3622,8 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: [ 3., nan, nan], [ 0., 2., 0.]]) Coordinates: - lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 - lon (X) float64 24B 10.0 10.25 10.5 + lat (Y) float64 -20.0 -20.25 -20.5 -20.75 -21.0 + lon (X) float64 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill all NaN values: @@ -3636,8 +3636,8 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: [ 3., 1., 5.], [ 0., 2., 0.]]) Coordinates: - lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 - lon (X) float64 24B 10.0 10.25 10.5 + lat (Y) float64 -20.0 -20.25 -20.5 -20.75 -21.0 + lon (X) float64 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill only the first of consecutive NaN values: @@ -3650,8 +3650,8 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: [ 3., nan, nan], [ 0., 2., 0.]]) Coordinates: - lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 - lon (X) float64 24B 10.0 10.25 10.5 + lat (Y) float64 -20.0 -20.25 -20.5 -20.75 -21.0 + lon (X) float64 10.0 10.25 10.5 Dimensions without coordinates: Y, X """ from xarray.core.missing import ffill @@ -3706,8 +3706,8 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: [ 3., nan, nan], [nan, 2., 0.]]) Coordinates: - lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 - lon (X) float64 24B 10.0 10.25 10.5 + lat (Y) float64 -20.0 -20.25 -20.5 -20.75 -21.0 + lon (X) float64 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill all NaN values: @@ -3720,8 +3720,8 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: [ 3., 2., 0.], [nan, 2., 0.]]) Coordinates: - lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 - lon (X) float64 24B 10.0 10.25 10.5 + lat (Y) float64 -20.0 -20.25 -20.5 -20.75 -21.0 + lon (X) float64 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill only the first of consecutive NaN values: @@ -3734,8 +3734,8 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: [ 3., 2., 0.], [nan, 2., 0.]]) Coordinates: - lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 - lon (X) float64 24B 10.0 10.25 10.5 + lat (Y) float64 -20.0 -20.25 -20.5 -20.75 -21.0 + lon (X) float64 10.0 10.25 10.5 Dimensions without coordinates: Y, X """ from xarray.core.missing import bfill @@ -4424,7 +4424,7 @@ def from_dict(cls, d: Mapping[str, Any]) -> Self: 24B array([10, 20, 30]) Coordinates: - * t (t) int64 24B 0 1 2 + * t (t) int64 0 1 2 Attributes: title: air temperature """ @@ -4828,12 +4828,12 @@ def diff( 24B array([0, 1, 0]) Coordinates: - * x (x) int64 24B 2 3 4 + * x (x) int64 2 3 4 >>> arr.diff("x", 2) 16B array([ 1, -1]) Coordinates: - * x (x) int64 16B 3 4 + * x (x) int64 3 4 See Also -------- @@ -5077,19 +5077,19 @@ def sortby( 40B array([5, 4, 3, 2, 1]) Coordinates: - * time (time) datetime64[ns] 40B 2000-01-01 2000-01-02 ... 2000-01-05 + * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-05 >>> da.sortby(da) 40B array([1, 2, 3, 4, 5]) Coordinates: - * time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01 + * time (time) datetime64[ns] 2000-01-05 2000-01-04 ... 2000-01-01 >>> da.sortby(lambda x: x) 40B array([1, 2, 3, 4, 5]) Coordinates: - * time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01 + * time (time) datetime64[ns] 2000-01-05 2000-01-04 ... 2000-01-01 """ # We need to convert the callable here rather than pass it through to the # dataset method, since otherwise the dataset method would try to call the @@ -5181,26 +5181,26 @@ def quantile( 8B array(0.7) Coordinates: - quantile float64 8B 0.0 + quantile float64 0.0 >>> da.quantile(0, dim="x") 32B array([0.7, 4.2, 2.6, 1.5]) Coordinates: - * y (y) float64 32B 1.0 1.5 2.0 2.5 - quantile float64 8B 0.0 + * y (y) float64 1.0 1.5 2.0 2.5 + quantile float64 0.0 >>> da.quantile([0, 0.5, 1]) 24B array([0.7, 3.4, 9.4]) Coordinates: - * quantile (quantile) float64 24B 0.0 0.5 1.0 + * quantile (quantile) float64 0.0 0.5 1.0 >>> da.quantile([0, 0.5, 1], dim="x") 96B array([[0.7 , 4.2 , 2.6 , 1.5 ], [3.6 , 5.75, 6. , 1.7 ], [6.5 , 7.3 , 9.4 , 1.9 ]]) Coordinates: - * y (y) float64 32B 1.0 1.5 2.0 2.5 - * quantile (quantile) float64 24B 0.0 0.5 1.0 + * y (y) float64 1.0 1.5 2.0 2.5 + * quantile (quantile) float64 0.0 0.5 1.0 References ---------- @@ -5312,7 +5312,7 @@ def differentiate( [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: - * x (x) float64 32B 0.0 0.1 1.1 1.2 + * x (x) float64 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.differentiate("x") @@ -5322,7 +5322,7 @@ def differentiate( [27.54545455, 27.54545455, 27.54545455], [30. , 30. , 30. ]]) Coordinates: - * x (x) float64 32B 0.0 0.1 1.1 1.2 + * x (x) float64 0.0 0.1 1.1 1.2 Dimensions without coordinates: y """ ds = self._to_temp_dataset().differentiate(coord, edge_order, datetime_unit) @@ -5371,7 +5371,7 @@ def integrate( [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: - * x (x) float64 32B 0.0 0.1 1.1 1.2 + * x (x) float64 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.integrate("x") @@ -5428,7 +5428,7 @@ def cumulative_integrate( [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: - * x (x) float64 32B 0.0 0.1 1.1 1.2 + * x (x) float64 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.cumulative_integrate("x") @@ -5438,7 +5438,7 @@ def cumulative_integrate( [4.65, 5.75, 6.85], [5.4 , 6.6 , 7.8 ]]) Coordinates: - * x (x) float64 32B 0.0 0.1 1.1 1.2 + * x (x) float64 0.0 0.1 1.1 1.2 Dimensions without coordinates: y """ ds = self._to_temp_dataset().cumulative_integrate(coord, datetime_unit) @@ -5546,8 +5546,8 @@ def map_blocks( 0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108, 0.07673453, 0.22865714, 0.19063865, -0.0590131 ]) Coordinates: - * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 + * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 + month (time) int64 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: @@ -5558,8 +5558,8 @@ def map_blocks( 192B dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: - * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 192B dask.array + * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 + month (time) int64 dask.array """ from xarray.core.parallel import map_blocks @@ -5753,7 +5753,7 @@ def pad( 48B array([0, 5, 6, 7, 0, 0]) Coordinates: - * x (x) float64 48B nan 0.0 1.0 2.0 nan nan + * x (x) float64 nan 0.0 1.0 2.0 nan nan >>> da = xr.DataArray( ... [[0, 1, 2, 3], [10, 11, 12, 13]], @@ -5767,9 +5767,9 @@ def pad( [10., 11., 12., 13.], [nan, nan, nan, nan]]) Coordinates: - * x (x) float64 32B nan 0.0 1.0 nan - * y (y) int64 32B 10 20 30 40 - z (x) float64 32B nan 100.0 200.0 nan + * x (x) float64 nan 0.0 1.0 nan + * y (y) int64 10 20 30 40 + z (x) float64 nan 100.0 200.0 nan Careful, ``constant_values`` are coerced to the data type of the array which may lead to a loss of precision: @@ -5781,9 +5781,9 @@ def pad( [10, 11, 12, 13], [ 1, 1, 1, 1]]) Coordinates: - * x (x) float64 32B nan 0.0 1.0 nan - * y (y) int64 32B 10 20 30 40 - z (x) float64 32B nan 100.0 200.0 nan + * x (x) float64 nan 0.0 1.0 nan + * y (y) int64 10 20 30 40 + z (x) float64 nan 100.0 200.0 nan """ ds = self._to_temp_dataset().pad( pad_width=pad_width, @@ -5874,17 +5874,17 @@ def idxmin( 24B array([-2., -4., 1.]) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 >>> array.argmin(dim="x") 24B array([4, 0, 2]) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 >>> array.idxmin(dim="x") 24B array([16., 0., 4.]) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 """ return computation._calc_idxminmax( array=self, @@ -5972,17 +5972,17 @@ def idxmax( 24B array([2., 2., 1.]) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 >>> array.argmax(dim="x") 24B array([0, 2, 2]) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 >>> array.idxmax(dim="x") 24B array([0., 4., 4.]) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 """ return computation._calc_idxminmax( array=self, @@ -6380,8 +6380,8 @@ def curvefit( 0.04744543, 0.03602333, 0.03129354, 0.01074885, 0.01284436, 0.00910995]]) Coordinates: - * x (x) int64 24B 0 1 2 - * time (time) int64 88B 0 1 2 3 4 5 6 7 8 9 10 + * x (x) int64 0 1 2 + * time (time) int64 0 1 2 3 4 5 6 7 8 9 10 Fit the exponential decay function to the data along the ``time`` dimension: @@ -6392,14 +6392,14 @@ def curvefit( 24B array([1.05692036, 1.73549638, 2.94215771]) Coordinates: - * x (x) int64 24B 0 1 2 - param >> fit_result["curvefit_coefficients"].sel(param="amplitude") 24B array([0.1005489 , 0.19631423, 0.30003579]) Coordinates: - * x (x) int64 24B 0 1 2 - param 24B array([1.0569213 , 1.73550052, 2.94215733]) Coordinates: - * x (x) int64 24B 0 1 2 - param >> fit_result["curvefit_coefficients"].sel(param="amplitude") 24B array([0.10054889, 0.1963141 , 0.3000358 ]) Coordinates: - * x (x) int64 24B 0 1 2 - param >> da.drop_duplicates(dim="x") 160B @@ -6497,8 +6497,8 @@ def drop_duplicates( [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: - * x (x) int64 32B 0 1 2 3 - * y (y) int64 40B 0 1 2 3 3 + * x (x) int64 0 1 2 3 + * y (y) int64 0 1 2 3 3 >>> da.drop_duplicates(dim="x", keep="last") 160B @@ -6507,8 +6507,8 @@ def drop_duplicates( [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: - * x (x) int64 32B 0 1 2 3 - * y (y) int64 40B 0 1 2 3 3 + * x (x) int64 0 1 2 3 + * y (y) int64 0 1 2 3 3 Drop all duplicate dimension values: @@ -6519,8 +6519,8 @@ def drop_duplicates( [15, 16, 17, 18], [20, 21, 22, 23]]) Coordinates: - * x (x) int64 32B 0 1 2 3 - * y (y) int64 32B 0 1 2 3 + * x (x) int64 0 1 2 3 + * y (y) int64 0 1 2 3 """ deduplicated = self._to_temp_dataset().drop_duplicates(dim, keep=keep) return self._from_temp_dataset(deduplicated) @@ -6940,12 +6940,12 @@ def rolling( 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: - * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.rolling(time=3, center=True).mean() 96B array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan]) Coordinates: - * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15 Remove the NaNs using ``dropna()``: @@ -6953,7 +6953,7 @@ def rolling( 80B array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]) Coordinates: - * time (time) datetime64[ns] 80B 2000-01-15 2000-02-15 ... 2000-10-15 + * time (time) datetime64[ns] 2000-01-15 2000-02-15 ... 2000-10-15 See Also -------- @@ -7007,13 +7007,13 @@ def cumulative( 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: - * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.cumulative("time").sum() 96B array([ 0., 1., 3., 6., 10., 15., 21., 28., 36., 45., 55., 66.]) Coordinates: - * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15 See Also -------- @@ -7157,7 +7157,7 @@ def coarsen( 349.96143251, 352.96969697, 355.97796143, 358.9862259 , 361.99449036]) Coordinates: - * time (time) datetime64[ns] 968B 1999-12-16 1999-12-19 ... 2000-12-10 + * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10 >>> See Also @@ -7273,12 +7273,12 @@ def resample( 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: - * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.resample(time="QS-DEC").mean() 32B array([ 1., 4., 7., 10.]) Coordinates: - * time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01 + * time (time) datetime64[ns] 1999-12-01 2000-03-01 ... 2000-09-01 Upsample monthly time-series data to daily data: diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 284a8bcb14d..ecdd4637ce5 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -631,15 +631,15 @@ class Dataset( 552B Dimensions: (loc: 2, instrument: 3, time: 4) Coordinates: - lon (loc) float64 16B -99.83 -99.32 - lat (loc) float64 16B 42.25 42.21 - * instrument (instrument) 80B Dimensions: () Coordinates: - lon float64 8B -99.32 - lat float64 8B 42.21 - instrument Self: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: - * x (x) >> ds_0 = ds.copy(deep=False) >>> ds_0["foo"][0, 0] = 7 @@ -1315,21 +1315,21 @@ def copy(self, deep: bool = False, data: DataVars | None = None) -> Self: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: - * x (x) >> ds 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: - * x (x) Self: 80B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: - * x (x) >> ds 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: - * x (x) bool: 56B Dimensions: (space: 1, time: 3) Coordinates: - * space (space) int64 8B 0 - * time (time) int64 24B 0 1 2 + * space (space) int64 0 + * time (time) int64 0 1 2 Data variables: - variable_name (space, time) int64 24B 1 2 3 + variable_name (space, time) int64 1 2 3 # 2D array with shape (3, 1) @@ -1780,10 +1780,10 @@ def broadcast_equals(self, other: Self) -> bool: 56B Dimensions: (time: 3, space: 1) Coordinates: - * time (time) int64 24B 0 1 2 - * space (space) int64 8B 0 + * time (time) int64 0 1 2 + * space (space) int64 0 Data variables: - variable_name (time, space) int64 24B 1 2 3 + variable_name (time, space) int64 1 2 3 .equals returns True if two Datasets have the same values, dimensions, and coordinates. .broadcast_equals returns True if the results of broadcasting two Datasets against each other have the same values, dimensions, and coordinates. @@ -1833,10 +1833,10 @@ def equals(self, other: Self) -> bool: 56B Dimensions: (space: 1, time: 3) Coordinates: - * space (space) int64 8B 0 - * time (time) int64 24B 0 1 2 + * space (space) int64 0 + * time (time) int64 0 1 2 Data variables: - variable_name (space, time) int64 24B 1 2 3 + variable_name (space, time) int64 1 2 3 # 2D array with shape (3, 1) @@ -1849,10 +1849,10 @@ def equals(self, other: Self) -> bool: 56B Dimensions: (time: 3, space: 1) Coordinates: - * time (time) int64 24B 0 1 2 - * space (space) int64 8B 0 + * time (time) int64 0 1 2 + * space (space) int64 0 Data variables: - variable_name (time, space) int64 24B 1 2 3 + variable_name (time, space) int64 1 2 3 >>> dataset1.equals(dataset2) False @@ -1916,9 +1916,9 @@ def identical(self, other: Self) -> bool: 48B Dimensions: (X: 3) Coordinates: - * X (X) int64 24B 1 2 3 + * X (X) int64 1 2 3 Data variables: - Width (X) int64 24B 1 2 3 + Width (X) int64 1 2 3 Attributes: units: m @@ -1926,9 +1926,9 @@ def identical(self, other: Self) -> bool: 48B Dimensions: (X: 3) Coordinates: - * X (X) int64 24B 1 2 3 + * X (X) int64 1 2 3 Data variables: - Width (X) int64 24B 1 2 3 + Width (X) int64 1 2 3 Attributes: units: m @@ -1936,9 +1936,9 @@ def identical(self, other: Self) -> bool: 48B Dimensions: (X: 3) Coordinates: - * X (X) int64 24B 1 2 3 + * X (X) int64 1 2 3 Data variables: - Width (X) int64 24B 1 2 3 + Width (X) int64 1 2 3 Attributes: units: ft @@ -2023,16 +2023,16 @@ def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self: 48B Dimensions: (time: 3) Coordinates: - * time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 2023-01-03 Data variables: - pressure (time) float64 24B 1.013 1.2 3.5 + pressure (time) float64 1.013 1.2 3.5 >>> dataset.set_coords("pressure") 48B Dimensions: (time: 3) Coordinates: - pressure (time) float64 24B 1.013 1.2 3.5 - * time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03 + pressure (time) float64 1.013 1.2 3.5 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 2023-01-03 Data variables: *empty* @@ -2103,13 +2103,13 @@ def reset_coords( 184B Dimensions: (time: 2, lat: 2, lon: 2) Coordinates: - * time (time) datetime64[ns] 16B 2023-01-01 2023-01-02 - * lat (lat) int64 16B 40 41 - * lon (lon) int64 16B -80 -79 - altitude int64 8B 1000 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 + * lat (lat) int64 40 41 + * lon (lon) int64 -80 -79 + altitude int64 1000 Data variables: - temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32 - precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9 + temperature (time, lat, lon) int64 25 26 27 28 29 30 31 32 + precipitation (time, lat, lon) float64 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9 # Reset the 'altitude' coordinate @@ -2121,13 +2121,13 @@ def reset_coords( 184B Dimensions: (time: 2, lat: 2, lon: 2) Coordinates: - * time (time) datetime64[ns] 16B 2023-01-01 2023-01-02 - * lat (lat) int64 16B 40 41 - * lon (lon) int64 16B -80 -79 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 + * lat (lat) int64 40 41 + * lon (lon) int64 -80 -79 Data variables: - temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32 - precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9 - altitude int64 8B 1000 + temperature (time, lat, lon) int64 25 26 27 28 29 30 31 32 + precipitation (time, lat, lon) float64 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9 + altitude int64 1000 Returns ------- @@ -2923,11 +2923,11 @@ def isel( 68B Dimensions: () Coordinates: - student 168B Dimensions: (student: 2, test: 2) Coordinates: - * student (student) >> index_array = xr.DataArray([0, 2], dims="student") >>> indexed_data = dataset.isel(student=index_array) @@ -2948,11 +2948,11 @@ def isel( 224B Dimensions: (student: 2, test: 3) Coordinates: - * student (student) 120B Dimensions: (date: 5) Coordinates: - * date (date) datetime64[ns] 40B 2023-01-05 2023-01-04 ... 2023-01-03 + * date (date) datetime64[ns] 2023-01-05 2023-01-04 ... 2023-01-03 Data variables: - pageviews (date) int64 40B 2000 1800 1500 1200 900 - visitors (date) int64 40B 1500 1200 1000 800 600 + pageviews (date) int64 2000 1800 1500 1200 900 + visitors (date) int64 1500 1200 1000 800 600 # Retrieve the 3 most busiest days in terms of pageviews @@ -3184,10 +3184,10 @@ def head( 72B Dimensions: (date: 3) Coordinates: - * date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02 + * date (date) datetime64[ns] 2023-01-05 2023-01-04 2023-01-02 Data variables: - pageviews (date) int64 24B 2000 1800 1500 - visitors (date) int64 24B 1500 1200 1000 + pageviews (date) int64 2000 1800 1500 + visitors (date) int64 1500 1200 1000 # Using a dictionary to specify the number of elements for specific dimensions @@ -3195,10 +3195,10 @@ def head( 72B Dimensions: (date: 3) Coordinates: - * date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02 + * date (date) datetime64[ns] 2023-01-05 2023-01-04 2023-01-02 Data variables: - pageviews (date) int64 24B 2000 1800 1500 - visitors (date) int64 24B 1500 1200 1000 + pageviews (date) int64 2000 1800 1500 + visitors (date) int64 1500 1200 1000 See Also -------- @@ -3263,10 +3263,10 @@ def tail( 240B Dimensions: (activity: 5) Coordinates: - * activity (activity) 144B Dimensions: (activity: 3) Coordinates: - * activity (activity) >> sorted_dataset.tail({"activity": 3}) 144B Dimensions: (activity: 3) Coordinates: - * activity (activity) 328B Dimensions: (x: 2, y: 13) Coordinates: - * x (x) int64 16B 0 1 - * y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12 + * x (x) int64 0 1 + * y (y) int64 0 1 2 3 4 5 6 7 8 9 10 11 12 Data variables: - foo (x, y) int64 208B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 25 + foo (x, y) int64 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 25 >>> x_ds.thin(3) 88B Dimensions: (x: 1, y: 5) Coordinates: - * x (x) int64 8B 0 - * y (y) int64 40B 0 3 6 9 12 + * x (x) int64 0 + * y (y) int64 0 3 6 9 12 Data variables: - foo (x, y) int64 40B 0 3 6 9 12 + foo (x, y) int64 0 3 6 9 12 >>> x.thin({"x": 2, "y": 5}) 24B array([[ 0, 5, 10]]) Coordinates: - * x (x) int64 8B 0 - * y (y) int64 24B 0 5 10 + * x (x) int64 0 + * y (y) int64 0 5 10 See Also -------- @@ -3638,10 +3638,10 @@ def reindex( 176B Dimensions: (station: 4) Coordinates: - * station (station) >> x.indexes Indexes: station Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station') @@ -3654,10 +3654,10 @@ def reindex( 176B Dimensions: (station: 4) Coordinates: - * station (station) 176B Dimensions: (station: 4) Coordinates: - * station (station) 176B Dimensions: (station: 4) Coordinates: - * station (station) 144B Dimensions: (time: 6) Coordinates: - * time (time) datetime64[ns] 48B 2019-01-01 2019-01-02 ... 2019-01-06 + * time (time) datetime64[ns] 2019-01-01 2019-01-02 ... 2019-01-06 Data variables: - temperature (time) float64 48B 15.57 12.77 nan 0.3081 16.59 15.12 - pressure (time) float64 48B 481.8 191.7 395.9 264.4 284.0 462.8 + temperature (time) float64 15.57 12.77 nan 0.3081 16.59 15.12 + pressure (time) float64 481.8 191.7 395.9 264.4 284.0 462.8 Suppose we decide to expand the dataset to cover a wider date range. @@ -3721,10 +3721,10 @@ def reindex( 240B Dimensions: (time: 10) Coordinates: - * time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07 + * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07 Data variables: - temperature (time) float64 80B nan nan nan 15.57 ... 0.3081 16.59 15.12 nan - pressure (time) float64 80B nan nan nan 481.8 ... 264.4 284.0 462.8 nan + temperature (time) float64 nan nan nan 15.57 ... 0.3081 16.59 15.12 nan + pressure (time) float64 nan nan nan 481.8 ... 264.4 284.0 462.8 nan The index entries that did not have a value in the original data frame (for example, `2018-12-29`) are by default filled with NaN. If desired, we can fill in the missing values using one of several options. @@ -3737,10 +3737,10 @@ def reindex( 240B Dimensions: (time: 10) Coordinates: - * time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07 + * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07 Data variables: - temperature (time) float64 80B 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan - pressure (time) float64 80B 481.8 481.8 481.8 481.8 ... 284.0 462.8 nan + temperature (time) float64 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan + pressure (time) float64 481.8 481.8 481.8 481.8 ... 284.0 462.8 nan Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`) will not be filled by any of the value propagation schemes. @@ -3749,18 +3749,18 @@ def reindex( 24B Dimensions: (time: 1) Coordinates: - * time (time) datetime64[ns] 8B 2019-01-03 + * time (time) datetime64[ns] 2019-01-03 Data variables: - temperature (time) float64 8B nan - pressure (time) float64 8B 395.9 + temperature (time) float64 nan + pressure (time) float64 395.9 >>> x3.where(x3.temperature.isnull(), drop=True) 48B Dimensions: (time: 2) Coordinates: - * time (time) datetime64[ns] 16B 2019-01-03 2019-01-07 + * time (time) datetime64[ns] 2019-01-03 2019-01-07 Data variables: - temperature (time) float64 16B nan nan - pressure (time) float64 16B 395.9 nan + temperature (time) float64 nan nan + pressure (time) float64 395.9 nan This is because filling while reindexing does not look at dataset values, but only compares the original and desired indexes. If you do want to fill in the `NaN` values present in the @@ -3890,11 +3890,11 @@ def interp( 176B Dimensions: (x: 3, y: 4) Coordinates: - * x (x) int64 24B 0 1 2 - * y (y) int64 32B 10 12 14 16 + * x (x) int64 0 1 2 + * y (y) int64 10 12 14 16 Data variables: - a (x) int64 24B 5 7 4 - b (x, y) float64 96B 1.0 4.0 2.0 9.0 2.0 7.0 6.0 nan 6.0 nan 5.0 8.0 + a (x) int64 5 7 4 + b (x, y) float64 1.0 4.0 2.0 9.0 2.0 7.0 6.0 nan 6.0 nan 5.0 8.0 1D interpolation with the default method (linear): @@ -3902,11 +3902,11 @@ def interp( 224B Dimensions: (x: 4, y: 4) Coordinates: - * y (y) int64 32B 10 12 14 16 - * x (x) float64 32B 0.0 0.75 1.25 1.75 + * y (y) int64 10 12 14 16 + * x (x) float64 0.0 0.75 1.25 1.75 Data variables: - a (x) float64 32B 5.0 6.5 6.25 4.75 - b (x, y) float64 128B 1.0 4.0 2.0 nan 1.75 ... nan 5.0 nan 5.25 nan + a (x) float64 5.0 6.5 6.25 4.75 + b (x, y) float64 1.0 4.0 2.0 nan 1.75 ... nan 5.0 nan 5.25 nan 1D interpolation with a different method: @@ -3914,11 +3914,11 @@ def interp( 224B Dimensions: (x: 4, y: 4) Coordinates: - * y (y) int64 32B 10 12 14 16 - * x (x) float64 32B 0.0 0.75 1.25 1.75 + * y (y) int64 10 12 14 16 + * x (x) float64 0.0 0.75 1.25 1.75 Data variables: - a (x) float64 32B 5.0 7.0 7.0 4.0 - b (x, y) float64 128B 1.0 4.0 2.0 9.0 2.0 7.0 ... nan 6.0 nan 5.0 8.0 + a (x) float64 5.0 7.0 7.0 4.0 + b (x, y) float64 1.0 4.0 2.0 9.0 2.0 7.0 ... nan 6.0 nan 5.0 8.0 1D extrapolation: @@ -3930,11 +3930,11 @@ def interp( 224B Dimensions: (x: 4, y: 4) Coordinates: - * y (y) int64 32B 10 12 14 16 - * x (x) float64 32B 1.0 1.5 2.5 3.5 + * y (y) int64 10 12 14 16 + * x (x) float64 1.0 1.5 2.5 3.5 Data variables: - a (x) float64 32B 7.0 5.5 2.5 -0.5 - b (x, y) float64 128B 2.0 7.0 6.0 nan 4.0 ... nan 12.0 nan 3.5 nan + a (x) float64 7.0 5.5 2.5 -0.5 + b (x, y) float64 2.0 7.0 6.0 nan 4.0 ... nan 12.0 nan 3.5 nan 2D interpolation: @@ -3942,11 +3942,11 @@ def interp( 184B Dimensions: (x: 4, y: 3) Coordinates: - * x (x) float64 32B 0.0 0.75 1.25 1.75 - * y (y) int64 24B 11 13 15 + * x (x) float64 0.0 0.75 1.25 1.75 + * y (y) int64 11 13 15 Data variables: - a (x) float64 32B 5.0 6.5 6.25 4.75 - b (x, y) float64 96B 2.5 3.0 nan 4.0 5.625 ... nan nan nan nan nan + a (x) float64 5.0 6.5 6.25 4.75 + b (x, y) float64 2.5 3.0 nan 4.0 5.625 ... nan nan nan nan nan """ from xarray.core import missing @@ -4430,32 +4430,32 @@ def swap_dims( 56B Dimensions: (x: 2) Coordinates: - * x (x) >> ds.swap_dims({"x": "y"}) 56B Dimensions: (y: 2) Coordinates: - x (y) >> ds.swap_dims({"x": "z"}) 56B Dimensions: (z: 2) Coordinates: - x (z) 8B Dimensions: () Data variables: - temperature float64 8B 25.0 + temperature float64 25.0 # Expand the dataset with a new dimension called "time" @@ -4569,7 +4569,7 @@ def expand_dims( Dimensions: (time: 1) Dimensions without coordinates: time Data variables: - temperature (time) float64 8B 25.0 + temperature (time) float64 25.0 # 1D data @@ -4580,7 +4580,7 @@ def expand_dims( Dimensions: (x: 3) Dimensions without coordinates: x Data variables: - temperature (x) float64 24B 25.0 26.5 24.8 + temperature (x) float64 25.0 26.5 24.8 # Expand the dataset with a new dimension called "time" using axis argument @@ -4589,7 +4589,7 @@ def expand_dims( Dimensions: (time: 1, x: 3) Dimensions without coordinates: time, x Data variables: - temperature (time, x) float64 24B 25.0 26.5 24.8 + temperature (time, x) float64 25.0 26.5 24.8 # 2D data @@ -4600,7 +4600,7 @@ def expand_dims( Dimensions: (y: 3, x: 4) Dimensions without coordinates: y, x Data variables: - temperature (y, x) float64 96B 0.5488 0.7152 0.6028 ... 0.7917 0.5289 + temperature (y, x) float64 0.5488 0.7152 0.6028 ... 0.7917 0.5289 # Expand the dataset with a new dimension called "time" using axis argument @@ -4609,7 +4609,7 @@ def expand_dims( Dimensions: (y: 3, x: 4, time: 1) Dimensions without coordinates: y, x, time Data variables: - temperature (y, x, time) float64 96B 0.5488 0.7152 0.6028 ... 0.7917 0.5289 + temperature (y, x, time) float64 0.5488 0.7152 0.6028 ... 0.7917 0.5289 # Expand a scalar variable along a new dimension of the same name with and without creating a new index @@ -4618,7 +4618,7 @@ def expand_dims( 8B Dimensions: () Coordinates: - x int64 8B 0 + x int64 0 Data variables: *empty* @@ -4626,7 +4626,7 @@ def expand_dims( 8B Dimensions: (x: 1) Coordinates: - * x (x) int64 8B 0 + * x (x) int64 0 Data variables: *empty* @@ -4797,19 +4797,19 @@ def set_index( 104B Dimensions: (x: 2, y: 3) Coordinates: - * x (x) int64 16B 0 1 - * y (y) int64 24B 0 1 2 - a (x) int64 16B 3 4 + * x (x) int64 0 1 + * y (y) int64 0 1 2 + a (x) int64 3 4 Data variables: - v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0 + v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0 >>> ds.set_index(x="a") 88B Dimensions: (x: 2, y: 3) Coordinates: - * x (x) int64 16B 3 4 - * y (y) int64 24B 0 1 2 + * x (x) int64 3 4 + * y (y) int64 0 1 2 Data variables: - v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0 + v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0 See Also -------- @@ -5414,20 +5414,20 @@ def to_stacked_array( 76B Dimensions: (x: 2, y: 3) Coordinates: - * y (y) >> data.to_stacked_array("z", sample_dims=["x"]) 64B array([[0, 1, 2, 6], [3, 4, 5, 7]]) Coordinates: - * z (z) object 32B MultiIndex - * variable (z) 136B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: - * time (time) datetime64[ns] 8B 2023-07-01 - * latitude (latitude) float64 16B 40.0 40.2 - * longitude (longitude) float64 16B -75.0 -74.8 + * time (time) datetime64[ns] 2023-07-01 + * latitude (latitude) float64 40.0 40.2 + * longitude (longitude) float64 -75.0 -74.8 Data variables: - temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 - humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 - wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 + temperature (time, latitude, longitude) float64 25.5 26.3 27.1 28.0 + humidity (time, latitude, longitude) float64 65.0 63.8 58.2 59.6 + wind_speed (time, latitude, longitude) float64 10.2 8.5 12.1 9.8 Drop the 'humidity' variable @@ -5872,12 +5872,12 @@ def drop_vars( 104B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: - * time (time) datetime64[ns] 8B 2023-07-01 - * latitude (latitude) float64 16B 40.0 40.2 - * longitude (longitude) float64 16B -75.0 -74.8 + * time (time) datetime64[ns] 2023-07-01 + * latitude (latitude) float64 40.0 40.2 + * longitude (longitude) float64 -75.0 -74.8 Data variables: - temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 - wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 + temperature (time, latitude, longitude) float64 25.5 26.3 27.1 28.0 + wind_speed (time, latitude, longitude) float64 10.2 8.5 12.1 9.8 Drop the 'humidity', 'temperature' variables @@ -5885,11 +5885,11 @@ def drop_vars( 72B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: - * time (time) datetime64[ns] 8B 2023-07-01 - * latitude (latitude) float64 16B 40.0 40.2 - * longitude (longitude) float64 16B -75.0 -74.8 + * time (time) datetime64[ns] 2023-07-01 + * latitude (latitude) float64 40.0 40.2 + * longitude (longitude) float64 -75.0 -74.8 Data variables: - wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 + wind_speed (time, latitude, longitude) float64 10.2 8.5 12.1 9.8 Drop all indexes @@ -5898,9 +5898,9 @@ def drop_vars( Dimensions: (time: 1, latitude: 2, longitude: 2) Dimensions without coordinates: time, latitude, longitude Data variables: - temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 - humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 - wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 + temperature (time, latitude, longitude) float64 25.5 26.3 27.1 28.0 + humidity (time, latitude, longitude) float64 65.0 63.8 58.2 59.6 + wind_speed (time, latitude, longitude) float64 10.2 8.5 12.1 9.8 Attempt to drop non-existent variable with errors="ignore" @@ -5908,13 +5908,13 @@ def drop_vars( 136B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: - * time (time) datetime64[ns] 8B 2023-07-01 - * latitude (latitude) float64 16B 40.0 40.2 - * longitude (longitude) float64 16B -75.0 -74.8 + * time (time) datetime64[ns] 2023-07-01 + * latitude (latitude) float64 40.0 40.2 + * longitude (longitude) float64 -75.0 -74.8 Data variables: - temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 - humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 - wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 + temperature (time, latitude, longitude) float64 25.5 26.3 27.1 28.0 + humidity (time, latitude, longitude) float64 65.0 63.8 58.2 59.6 + wind_speed (time, latitude, longitude) float64 10.2 8.5 12.1 9.8 Attempt to drop non-existent variable with errors="raise" @@ -6114,26 +6114,26 @@ def drop_sel( 60B Dimensions: (x: 2, y: 3) Coordinates: - * y (y) >> ds.drop_sel(y=["a", "c"]) 20B Dimensions: (x: 2, y: 1) Coordinates: - * y (y) >> ds.drop_sel(y="b") 40B Dimensions: (x: 2, y: 2) Coordinates: - * y (y) Self: 60B Dimensions: (x: 2, y: 3) Coordinates: - * y (y) >> ds.drop_isel(y=[0, 2]) 20B Dimensions: (x: 2, y: 1) Coordinates: - * y (y) >> ds.drop_isel(y=1) 40B Dimensions: (x: 2, y: 2) Coordinates: - * y (y) 104B Dimensions: (time: 4, location: 2) Coordinates: - * time (time) int64 32B 1 2 3 4 - * location (location) 80B Dimensions: (time: 3, location: 2) Coordinates: - * time (time) int64 24B 1 3 4 - * location (location) 80B Dimensions: (time: 3, location: 2) Coordinates: - * time (time) int64 24B 1 3 4 - * location (location) 104B Dimensions: (time: 4, location: 2) Coordinates: - * time (time) int64 32B 1 2 3 4 - * location (location) 80B Dimensions: (time: 3, location: 2) Coordinates: - * time (time) int64 24B 1 3 4 - * location (location) Self: 160B Dimensions: (x: 4) Coordinates: - * x (x) int64 32B 0 1 2 3 + * x (x) int64 0 1 2 3 Data variables: - A (x) float64 32B nan 2.0 nan 0.0 - B (x) float64 32B 3.0 4.0 nan 1.0 - C (x) float64 32B nan nan nan 5.0 - D (x) float64 32B nan 3.0 nan 4.0 + A (x) float64 nan 2.0 nan 0.0 + B (x) float64 3.0 4.0 nan 1.0 + C (x) float64 nan nan nan 5.0 + D (x) float64 nan 3.0 nan 4.0 Replace all `NaN` values with 0s. @@ -6497,12 +6497,12 @@ def fillna(self, value: Any) -> Self: 160B Dimensions: (x: 4) Coordinates: - * x (x) int64 32B 0 1 2 3 + * x (x) int64 0 1 2 3 Data variables: - A (x) float64 32B 0.0 2.0 0.0 0.0 - B (x) float64 32B 3.0 4.0 0.0 1.0 - C (x) float64 32B 0.0 0.0 0.0 5.0 - D (x) float64 32B 0.0 3.0 0.0 4.0 + A (x) float64 0.0 2.0 0.0 0.0 + B (x) float64 3.0 4.0 0.0 1.0 + C (x) float64 0.0 0.0 0.0 5.0 + D (x) float64 0.0 3.0 0.0 4.0 Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively. @@ -6511,12 +6511,12 @@ def fillna(self, value: Any) -> Self: 160B Dimensions: (x: 4) Coordinates: - * x (x) int64 32B 0 1 2 3 + * x (x) int64 0 1 2 3 Data variables: - A (x) float64 32B 0.0 2.0 0.0 0.0 - B (x) float64 32B 3.0 4.0 1.0 1.0 - C (x) float64 32B 2.0 2.0 2.0 5.0 - D (x) float64 32B 3.0 3.0 3.0 4.0 + A (x) float64 0.0 2.0 0.0 0.0 + B (x) float64 3.0 4.0 1.0 1.0 + C (x) float64 2.0 2.0 2.0 5.0 + D (x) float64 3.0 3.0 3.0 4.0 """ if utils.is_dict_like(value): value_keys = getattr(value, "data_vars", value).keys() @@ -6625,34 +6625,34 @@ def interpolate_na( 200B Dimensions: (x: 5) Coordinates: - * x (x) int64 40B 0 1 2 3 4 + * x (x) int64 0 1 2 3 4 Data variables: - A (x) float64 40B nan 2.0 3.0 nan 0.0 - B (x) float64 40B 3.0 4.0 nan 1.0 7.0 - C (x) float64 40B nan nan nan 5.0 0.0 - D (x) float64 40B nan 3.0 nan -1.0 4.0 + A (x) float64 nan 2.0 3.0 nan 0.0 + B (x) float64 3.0 4.0 nan 1.0 7.0 + C (x) float64 nan nan nan 5.0 0.0 + D (x) float64 nan 3.0 nan -1.0 4.0 >>> ds.interpolate_na(dim="x", method="linear") 200B Dimensions: (x: 5) Coordinates: - * x (x) int64 40B 0 1 2 3 4 + * x (x) int64 0 1 2 3 4 Data variables: - A (x) float64 40B nan 2.0 3.0 1.5 0.0 - B (x) float64 40B 3.0 4.0 2.5 1.0 7.0 - C (x) float64 40B nan nan nan 5.0 0.0 - D (x) float64 40B nan 3.0 1.0 -1.0 4.0 + A (x) float64 nan 2.0 3.0 1.5 0.0 + B (x) float64 3.0 4.0 2.5 1.0 7.0 + C (x) float64 nan nan nan 5.0 0.0 + D (x) float64 nan 3.0 1.0 -1.0 4.0 >>> ds.interpolate_na(dim="x", method="linear", fill_value="extrapolate") 200B Dimensions: (x: 5) Coordinates: - * x (x) int64 40B 0 1 2 3 4 + * x (x) int64 0 1 2 3 4 Data variables: - A (x) float64 40B 1.0 2.0 3.0 1.5 0.0 - B (x) float64 40B 3.0 4.0 2.5 1.0 7.0 - C (x) float64 40B 20.0 15.0 10.0 5.0 0.0 - D (x) float64 40B 5.0 3.0 1.0 -1.0 4.0 + A (x) float64 1.0 2.0 3.0 1.5 0.0 + B (x) float64 3.0 4.0 2.5 1.0 7.0 + C (x) float64 20.0 15.0 10.0 5.0 0.0 + D (x) float64 5.0 3.0 1.0 -1.0 4.0 """ from xarray.core.missing import _apply_over_vars_with_dim, interp_na @@ -6695,9 +6695,9 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: 160B Dimensions: (time: 10) Coordinates: - * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: - data (time) float64 80B 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0 + data (time) float64 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0 # Perform forward fill (ffill) on the dataset @@ -6705,9 +6705,9 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: 160B Dimensions: (time: 10) Coordinates: - * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: - data (time) float64 80B 1.0 1.0 1.0 1.0 5.0 5.0 5.0 8.0 8.0 10.0 + data (time) float64 1.0 1.0 1.0 1.0 5.0 5.0 5.0 8.0 8.0 10.0 # Limit the forward filling to a maximum of 2 consecutive NaN values @@ -6715,9 +6715,9 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Self: 160B Dimensions: (time: 10) Coordinates: - * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: - data (time) float64 80B 1.0 1.0 1.0 nan 5.0 5.0 5.0 8.0 8.0 10.0 + data (time) float64 1.0 1.0 1.0 nan 5.0 5.0 5.0 8.0 8.0 10.0 Returns ------- @@ -6760,9 +6760,9 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: 160B Dimensions: (time: 10) Coordinates: - * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: - data (time) float64 80B 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0 + data (time) float64 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0 # filled dataset, fills NaN values by propagating values backward @@ -6770,9 +6770,9 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: 160B Dimensions: (time: 10) Coordinates: - * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: - data (time) float64 80B 1.0 5.0 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0 + data (time) float64 1.0 5.0 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0 # Limit the backward filling to a maximum of 2 consecutive NaN values @@ -6780,9 +6780,9 @@ def bfill(self, dim: Hashable, limit: int | None = None) -> Self: 160B Dimensions: (time: 10) Coordinates: - * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 + * time (time) datetime64[ns] 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: - data (time) float64 80B 1.0 nan 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0 + data (time) float64 1.0 nan 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0 Returns ------- @@ -6883,10 +6883,10 @@ def reduce( 132B Dimensions: (student: 3) Coordinates: - * student (student) >> ds.map(np.fabs) 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: - foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773 - bar (x) float64 16B 1.0 2.0 + foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 0.9773 + bar (x) float64 1.0 2.0 """ if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) @@ -7090,11 +7090,11 @@ def assign( 96B Dimensions: (lat: 2, lon: 2) Coordinates: - * lat (lat) int64 16B 10 20 - * lon (lon) int64 16B 150 160 + * lat (lat) int64 10 20 + * lon (lon) int64 150 160 Data variables: - temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 - precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 + temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9 + precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918 Where the value is a callable, evaluated on dataset: @@ -7102,12 +7102,12 @@ def assign( 128B Dimensions: (lat: 2, lon: 2) Coordinates: - * lat (lat) int64 16B 10 20 - * lon (lon) int64 16B 150 160 + * lat (lat) int64 10 20 + * lon (lon) int64 150 160 Data variables: - temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 - precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 - temperature_f (lat, lon) float64 32B 51.76 57.75 53.7 51.62 + temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9 + precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918 + temperature_f (lat, lon) float64 51.76 57.75 53.7 51.62 Alternatively, the same behavior can be achieved by directly referencing an existing dataarray: @@ -7115,12 +7115,12 @@ def assign( 128B Dimensions: (lat: 2, lon: 2) Coordinates: - * lat (lat) int64 16B 10 20 - * lon (lon) int64 16B 150 160 + * lat (lat) int64 10 20 + * lon (lon) int64 150 160 Data variables: - temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 - precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 - temperature_f (lat, lon) float64 32B 51.76 57.75 53.7 51.62 + temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9 + precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918 + temperature_f (lat, lon) float64 51.76 57.75 53.7 51.62 """ variables = either_dict_or_kwargs(variables, variables_kwargs, "assign") @@ -7624,10 +7624,10 @@ def from_dict(cls, d: Mapping[Any, Any]) -> Self: 60B Dimensions: (t: 3) Coordinates: - * t (t) int64 24B 0 1 2 + * t (t) int64 0 1 2 Data variables: - a (t) >> d = { ... "coords": { @@ -7645,10 +7645,10 @@ def from_dict(cls, d: Mapping[Any, Any]) -> Self: 60B Dimensions: (t: 3) Coordinates: - * t (t) int64 24B 0 1 2 + * t (t) int64 0 1 2 Data variables: - a (t) int64 24B 10 20 30 - b (t) >> ds.diff("x", 2) 16B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: - foo (x) int64 16B 1 -1 + foo (x) int64 1 -1 See Also -------- @@ -7921,7 +7921,7 @@ def shift( Dimensions: (x: 5) Dimensions without coordinates: x Data variables: - foo (x) object 40B nan nan 'a' 'b' 'c' + foo (x) object nan nan 'a' 'b' 'c' """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift") invalid = tuple(k for k in shifts if k not in self.dims) @@ -7989,17 +7989,17 @@ def roll( 60B Dimensions: (x: 5) Coordinates: - * x (x) int64 40B 0 1 2 3 4 + * x (x) int64 0 1 2 3 4 Data variables: - foo (x) >> ds.roll(x=2, roll_coords=True) 60B Dimensions: (x: 5) Coordinates: - * x (x) int64 40B 3 4 0 1 2 + * x (x) int64 3 4 0 1 2 Data variables: - foo (x) 88B Dimensions: (x: 2, y: 2) Coordinates: - * x (x) >> ds.sortby(lambda x: -x["y"]) 88B Dimensions: (x: 2, y: 2) Coordinates: - * x (x) 16B Dimensions: () Coordinates: - quantile float64 8B 0.0 + quantile float64 0.0 Data variables: - a float64 8B 0.7 + a float64 0.7 >>> ds.quantile(0, dim="x") 72B Dimensions: (y: 4) Coordinates: - * y (y) float64 32B 1.0 1.5 2.0 2.5 - quantile float64 8B 0.0 + * y (y) float64 1.0 1.5 2.0 2.5 + quantile float64 0.0 Data variables: - a (y) float64 32B 0.7 4.2 2.6 1.5 + a (y) float64 0.7 4.2 2.6 1.5 >>> ds.quantile([0, 0.5, 1]) 48B Dimensions: (quantile: 3) Coordinates: - * quantile (quantile) float64 24B 0.0 0.5 1.0 + * quantile (quantile) float64 0.0 0.5 1.0 Data variables: - a (quantile) float64 24B 0.7 3.4 9.4 + a (quantile) float64 0.7 3.4 9.4 >>> ds.quantile([0, 0.5, 1], dim="x") 152B Dimensions: (quantile: 3, y: 4) Coordinates: - * y (y) float64 32B 1.0 1.5 2.0 2.5 - * quantile (quantile) float64 24B 0.0 0.5 1.0 + * y (y) float64 1.0 1.5 2.0 2.5 + * quantile (quantile) float64 0.0 0.5 1.0 Data variables: - a (quantile, y) float64 96B 0.7 4.2 2.6 1.5 3.6 ... 6.5 7.3 9.4 1.9 + a (quantile, y) float64 0.7 4.2 2.6 1.5 3.6 ... 6.5 7.3 9.4 1.9 References ---------- @@ -8484,23 +8484,23 @@ def integrate( 128B Dimensions: (x: 4) Coordinates: - * x (x) int64 32B 0 1 2 3 - y (x) int64 32B 1 7 3 5 + * x (x) int64 0 1 2 3 + y (x) int64 1 7 3 5 Data variables: - a (x) int64 32B 5 5 6 6 - b (x) int64 32B 1 2 1 0 + a (x) int64 5 5 6 6 + b (x) int64 1 2 1 0 >>> ds.integrate("x") 16B Dimensions: () Data variables: - a float64 8B 16.5 - b float64 8B 3.5 + a float64 16.5 + b float64 3.5 >>> ds.integrate("y") 16B Dimensions: () Data variables: - a float64 8B 20.0 - b float64 8B 4.0 + a float64 20.0 + b float64 4.0 """ if not isinstance(coord, (list, tuple)): coord = (coord,) @@ -8607,29 +8607,29 @@ def cumulative_integrate( 128B Dimensions: (x: 4) Coordinates: - * x (x) int64 32B 0 1 2 3 - y (x) int64 32B 1 7 3 5 + * x (x) int64 0 1 2 3 + y (x) int64 1 7 3 5 Data variables: - a (x) int64 32B 5 5 6 6 - b (x) int64 32B 1 2 1 0 + a (x) int64 5 5 6 6 + b (x) int64 1 2 1 0 >>> ds.cumulative_integrate("x") 128B Dimensions: (x: 4) Coordinates: - * x (x) int64 32B 0 1 2 3 - y (x) int64 32B 1 7 3 5 + * x (x) int64 0 1 2 3 + y (x) int64 1 7 3 5 Data variables: - a (x) float64 32B 0.0 5.0 10.5 16.5 - b (x) float64 32B 0.0 1.5 3.0 3.5 + a (x) float64 0.0 5.0 10.5 16.5 + b (x) float64 0.0 1.5 3.0 3.5 >>> ds.cumulative_integrate("y") 128B Dimensions: (x: 4) Coordinates: - * x (x) int64 32B 0 1 2 3 - y (x) int64 32B 1 7 3 5 + * x (x) int64 0 1 2 3 + y (x) int64 1 7 3 5 Data variables: - a (x) float64 32B 0.0 30.0 8.0 20.0 - b (x) float64 32B 0.0 9.0 3.0 4.0 + a (x) float64 0.0 30.0 8.0 20.0 + b (x) float64 0.0 9.0 3.0 4.0 """ if not isinstance(coord, (list, tuple)): coord = (coord,) @@ -8720,13 +8720,13 @@ def filter_by_attrs(self, **kwargs) -> Self: 192B Dimensions: (x: 2, y: 2, time: 3) Coordinates: - lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 - lat (x, y) float64 32B 42.25 42.21 42.63 42.59 - * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 - reference_time datetime64[ns] 8B 2014-09-05 + lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 + lat (x, y) float64 42.25 42.21 42.63 42.59 + * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 + reference_time datetime64[ns] 2014-09-05 Dimensions without coordinates: x, y Data variables: - precipitation (x, y, time) float64 96B 5.68 9.256 0.7104 ... 4.615 7.805 + precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 4.615 7.805 Get all variables that have a standard_name attribute: @@ -8735,14 +8735,14 @@ def filter_by_attrs(self, **kwargs) -> Self: 288B Dimensions: (x: 2, y: 2, time: 3) Coordinates: - lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 - lat (x, y) float64 32B 42.25 42.21 42.63 42.59 - * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 - reference_time datetime64[ns] 8B 2014-09-05 + lon (x, y) float64 -99.83 -99.32 -99.79 -99.23 + lat (x, y) float64 42.25 42.21 42.63 42.59 + * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08 + reference_time datetime64[ns] 2014-09-05 Dimensions without coordinates: x, y Data variables: - temperature (x, y, time) float64 96B 29.11 18.2 22.83 ... 16.15 26.63 - precipitation (x, y, time) float64 96B 5.68 9.256 0.7104 ... 4.615 7.805 + temperature (x, y, time) float64 29.11 18.2 22.83 ... 16.15 26.63 + precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 4.615 7.805 """ selection = [] @@ -8859,10 +8859,10 @@ def map_blocks( 576B Dimensions: (time: 24) Coordinates: - * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 + * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 + month (time) int64 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Data variables: - a (time) float64 192B 0.1289 0.1132 -0.0856 ... 0.1906 -0.05901 + a (time) float64 0.1289 0.1132 -0.0856 ... 0.1906 -0.05901 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: @@ -8875,10 +8875,10 @@ def map_blocks( 576B Dimensions: (time: 24) Coordinates: - * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 192B dask.array + * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 + month (time) int64 dask.array Data variables: - a (time) float64 192B dask.array + a (time) float64 dask.array """ from xarray.core.parallel import map_blocks @@ -9202,7 +9202,7 @@ def pad( Dimensions: (x: 8) Dimensions without coordinates: x Data variables: - foo (x) float64 64B nan 0.0 1.0 2.0 3.0 4.0 nan nan + foo (x) float64 nan 0.0 1.0 2.0 3.0 4.0 nan nan """ pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad") @@ -9331,26 +9331,26 @@ def idxmin( 56B Dimensions: (y: 3) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 Data variables: - int int64 8B -2 - float (y) float64 24B -2.0 -4.0 1.0 + int int64 -2 + float (y) float64 -2.0 -4.0 1.0 >>> ds.argmin(dim="x") 56B Dimensions: (y: 3) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 Data variables: - int int64 8B 4 - float (y) int64 24B 4 0 2 + int int64 4 + float (y) int64 4 0 2 >>> ds.idxmin(dim="x") 52B Dimensions: (y: 3) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 Data variables: - int 56B Dimensions: (y: 3) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 Data variables: - int int64 8B 2 - float (y) float64 24B 2.0 2.0 1.0 + int int64 2 + float (y) float64 2.0 2.0 1.0 >>> ds.argmax(dim="x") 56B Dimensions: (y: 3) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 Data variables: - int int64 8B 1 - float (y) int64 24B 0 2 2 + int int64 1 + float (y) int64 0 2 2 >>> ds.idxmax(dim="x") 52B Dimensions: (y: 3) Coordinates: - * y (y) int64 24B -1 0 1 + * y (y) int64 -1 0 1 Data variables: - int Self: 84B array(['Bob', 'Bob', 'Alice'], dtype='>> min_score_in_english = dataset["student"].isel( ... student=argmin_indices["english_scores"] @@ -9529,8 +9529,8 @@ def argmin(self, dim: Hashable | None = None, **kwargs) -> Self: 84B array(['Charlie', 'Bob', 'Charlie'], dtype=' Self: 132B Dimensions: (student: 3) Coordinates: - * student (student) >> ds.eval("a + b") 40B @@ -9704,9 +9704,9 @@ def eval( Dimensions: (x: 5) Dimensions without coordinates: x Data variables: - a (x) int64 40B 0 1 2 3 4 - b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 - c (x) float64 40B 0.0 1.25 2.5 3.75 5.0 + a (x) int64 0 1 2 3 4 + b (x) float64 0.0 0.25 0.5 0.75 1.0 + c (x) float64 0.0 1.25 2.5 3.75 5.0 """ return pd.eval( @@ -9786,15 +9786,15 @@ def query( Dimensions: (x: 5) Dimensions without coordinates: x Data variables: - a (x) int64 40B 0 1 2 3 4 - b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 + a (x) int64 0 1 2 3 4 + b (x) float64 0.0 0.25 0.5 0.75 1.0 >>> ds.query(x="a > 2") 32B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: - a (x) int64 16B 3 4 - b (x) float64 16B 0.75 1.0 + a (x) int64 3 4 + b (x) float64 0.75 1.0 """ # allow queries to be given either as a dict or as kwargs diff --git a/xarray/core/datatree.py b/xarray/core/datatree.py index 5a0b05e2cf5..90418cabd61 100644 --- a/xarray/core/datatree.py +++ b/xarray/core/datatree.py @@ -279,15 +279,15 @@ def map( # type: ignore[override] Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: - foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773 - bar (x) int64 16B -1 2 + foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 -0.9773 + bar (x) int64 -1 2 >>> ds.map(np.fabs) 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: - foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773 - bar (x) float64 16B 1.0 2.0 + foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 0.9773 + bar (x) float64 1.0 2.0 """ # Copied from xarray.Dataset so as not to call type(self), which causes problems (see https://github.com/xarray-contrib/datatree/issues/188). diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index 39549ba5bae..18137b4a1ba 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -1332,17 +1332,17 @@ def quantile( array([[0.7, 4.2, 0.7, 1.5], [6.5, 7.3, 2.6, 1.9]]) Coordinates: - * y (y) int64 32B 1 1 2 2 - quantile float64 8B 0.0 - * x (x) int64 16B 0 1 + * y (y) int64 1 1 2 2 + quantile float64 0.0 + * x (x) int64 0 1 >>> ds.groupby("y").quantile(0, dim=...) 40B Dimensions: (y: 2) Coordinates: - quantile float64 8B 0.0 - * y (y) int64 16B 1 2 + quantile float64 0.0 + * y (y) int64 1 2 Data variables: - a (y) float64 16B 0.7 0.7 + a (y) float64 0.7 0.7 >>> da.groupby("x").quantile([0, 0.5, 1]) 192B array([[[0.7 , 1. , 1.3 ], @@ -1355,17 +1355,17 @@ def quantile( [2.6 , 2.6 , 2.6 ], [1.9 , 1.9 , 1.9 ]]]) Coordinates: - * y (y) int64 32B 1 1 2 2 - * quantile (quantile) float64 24B 0.0 0.5 1.0 - * x (x) int64 16B 0 1 + * y (y) int64 1 1 2 2 + * quantile (quantile) float64 0.0 0.5 1.0 + * x (x) int64 0 1 >>> ds.groupby("y").quantile([0, 0.5, 1], dim=...) 88B Dimensions: (y: 2, quantile: 3) Coordinates: - * quantile (quantile) float64 24B 0.0 0.5 1.0 - * y (y) int64 16B 1 2 + * quantile (quantile) float64 0.0 0.5 1.0 + * y (y) int64 1 2 Data variables: - a (y, quantile) float64 48B 0.7 5.35 8.4 0.7 2.25 9.4 + a (y, quantile) float64 0.7 5.35 8.4 0.7 2.25 9.4 References ---------- diff --git a/xarray/core/merge.py b/xarray/core/merge.py index 5382596a2ac..f7ec32bddfd 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -823,120 +823,120 @@ def merge( array([[1., 2.], [3., 5.]]) Coordinates: - * lat (lat) float64 16B 35.0 40.0 - * lon (lon) float64 16B 100.0 120.0 + * lat (lat) float64 35.0 40.0 + * lon (lon) float64 100.0 120.0 >>> y 32B array([[5., 6.], [7., 8.]]) Coordinates: - * lat (lat) float64 16B 35.0 42.0 - * lon (lon) float64 16B 100.0 150.0 + * lat (lat) float64 35.0 42.0 + * lon (lon) float64 100.0 150.0 >>> z 32B array([[0., 3.], [4., 9.]]) Coordinates: - * time (time) float64 16B 30.0 60.0 - * lon (lon) float64 16B 100.0 150.0 + * time (time) float64 30.0 60.0 + * lon (lon) float64 100.0 150.0 >>> xr.merge([x, y, z]) 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 24B 100.0 120.0 150.0 - * time (time) float64 16B 30.0 60.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 150.0 + * time (time) float64 30.0 60.0 Data variables: - var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan - var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 - var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 + var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan + var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 + var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="identical") 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 24B 100.0 120.0 150.0 - * time (time) float64 16B 30.0 60.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 150.0 + * time (time) float64 30.0 60.0 Data variables: - var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan - var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 - var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 + var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan + var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 + var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="equals") 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 24B 100.0 120.0 150.0 - * time (time) float64 16B 30.0 60.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 150.0 + * time (time) float64 30.0 60.0 Data variables: - var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan - var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 - var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 + var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan + var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 + var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="equals", fill_value=-999.0) 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 24B 100.0 120.0 150.0 - * time (time) float64 16B 30.0 60.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 150.0 + * time (time) float64 30.0 60.0 Data variables: - var1 (lat, lon) float64 72B 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0 - var2 (lat, lon) float64 72B 5.0 -999.0 6.0 -999.0 ... 7.0 -999.0 8.0 - var3 (time, lon) float64 48B 0.0 -999.0 3.0 4.0 -999.0 9.0 + var1 (lat, lon) float64 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0 + var2 (lat, lon) float64 5.0 -999.0 6.0 -999.0 ... 7.0 -999.0 8.0 + var3 (time, lon) float64 0.0 -999.0 3.0 4.0 -999.0 9.0 >>> xr.merge([x, y, z], join="override") 144B Dimensions: (lat: 2, lon: 2, time: 2) Coordinates: - * lat (lat) float64 16B 35.0 40.0 - * lon (lon) float64 16B 100.0 120.0 - * time (time) float64 16B 30.0 60.0 + * lat (lat) float64 35.0 40.0 + * lon (lon) float64 100.0 120.0 + * time (time) float64 30.0 60.0 Data variables: - var1 (lat, lon) float64 32B 1.0 2.0 3.0 5.0 - var2 (lat, lon) float64 32B 5.0 6.0 7.0 8.0 - var3 (time, lon) float64 32B 0.0 3.0 4.0 9.0 + var1 (lat, lon) float64 1.0 2.0 3.0 5.0 + var2 (lat, lon) float64 5.0 6.0 7.0 8.0 + var3 (time, lon) float64 0.0 3.0 4.0 9.0 >>> xr.merge([x, y, z], join="inner") 64B Dimensions: (lat: 1, lon: 1, time: 2) Coordinates: - * lat (lat) float64 8B 35.0 - * lon (lon) float64 8B 100.0 - * time (time) float64 16B 30.0 60.0 + * lat (lat) float64 35.0 + * lon (lon) float64 100.0 + * time (time) float64 30.0 60.0 Data variables: - var1 (lat, lon) float64 8B 1.0 - var2 (lat, lon) float64 8B 5.0 - var3 (time, lon) float64 16B 0.0 4.0 + var1 (lat, lon) float64 1.0 + var2 (lat, lon) float64 5.0 + var3 (time, lon) float64 0.0 4.0 >>> xr.merge([x, y, z], compat="identical", join="inner") 64B Dimensions: (lat: 1, lon: 1, time: 2) Coordinates: - * lat (lat) float64 8B 35.0 - * lon (lon) float64 8B 100.0 - * time (time) float64 16B 30.0 60.0 + * lat (lat) float64 35.0 + * lon (lon) float64 100.0 + * time (time) float64 30.0 60.0 Data variables: - var1 (lat, lon) float64 8B 1.0 - var2 (lat, lon) float64 8B 5.0 - var3 (time, lon) float64 16B 0.0 4.0 + var1 (lat, lon) float64 1.0 + var2 (lat, lon) float64 5.0 + var3 (time, lon) float64 0.0 4.0 >>> xr.merge([x, y, z], compat="broadcast_equals", join="outer") 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: - * lat (lat) float64 24B 35.0 40.0 42.0 - * lon (lon) float64 24B 100.0 120.0 150.0 - * time (time) float64 16B 30.0 60.0 + * lat (lat) float64 35.0 40.0 42.0 + * lon (lon) float64 100.0 120.0 150.0 + * time (time) float64 30.0 60.0 Data variables: - var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan - var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 - var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 + var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan + var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 + var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], join="exact") Traceback (most recent call last): diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py index 51e772892e5..02f5be37fe1 100644 --- a/xarray/core/parallel.py +++ b/xarray/core/parallel.py @@ -313,8 +313,8 @@ def map_blocks( 0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108, 0.07673453, 0.22865714, 0.19063865, -0.0590131 ]) Coordinates: - * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 + * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 + month (time) int64 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: @@ -327,8 +327,8 @@ def map_blocks( 192B dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: - * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 192B dask.array + * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 + month (time) int64 dask.array """ def _wrapper( diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index 95e613a09f8..e08e65a2924 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -658,7 +658,7 @@ def test_dataarray_repr(self): 8B {data!r} Coordinates: - y (x) int64 8B dask.array + y (x) int64 dask.array Dimensions without coordinates: x""" ) assert expected == repr(a) @@ -673,10 +673,10 @@ def test_dataset_repr(self): 16B Dimensions: (x: 1) Coordinates: - y (x) int64 8B dask.array + y (x) int64 dask.array Dimensions without coordinates: x Data variables: - a (x) int64 8B dask.array""" + a (x) int64 dask.array""" ) assert expected == repr(ds) assert kernel_call_count == 0 # should not evaluate dask array diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index 12cb4e4d86c..fe287891e92 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -101,8 +101,8 @@ def test_repr(self) -> None: array([[1, 2, 3], [4, 5, 6]], dtype=uint64) Coordinates: - * x (x) uint64 24B 0 1 2 - other uint64 8B 0 + * x (x) uint64 0 1 2 + other uint64 0 Dimensions without coordinates: time Attributes: foo: bar""" @@ -115,9 +115,9 @@ def test_repr_multiindex(self) -> None: 32B array([0, 1, 2, 3], dtype=uint64) Coordinates: - * x (x) object 32B MultiIndex - * level_1 (x) object 32B 'a' 'a' 'b' 'b' - * level_2 (x) int64 32B 1 2 1 2""" + * x (x) object MultiIndex + * level_1 (x) object 'a' 'a' 'b' 'b' + * level_2 (x) int64 1 2 1 2""" ) assert expected == repr(self.mda) @@ -136,9 +136,9 @@ def test_repr_multiindex_long(self) -> None: 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], dtype=uint64) Coordinates: - * x (x) object 256B MultiIndex - * level_1 (x) object 256B 'a' 'a' 'a' 'a' 'a' 'a' ... 'd' 'd' 'd' 'd' 'd' 'd' - * level_2 (x) int64 256B 1 2 3 4 5 6 7 8 1 2 3 4 ... 5 6 7 8 1 2 3 4 5 6 7 8""" + * x (x) object MultiIndex + * level_1 (x) object 'a' 'a' 'a' 'a' 'a' 'a' ... 'd' 'd' 'd' 'd' 'd' 'd' + * level_2 (x) int64 1 2 3 4 5 6 7 8 1 2 3 4 ... 5 6 7 8 1 2 3 4 5 6 7 8""" ) assert expected == repr(mda_long) @@ -1451,8 +1451,8 @@ def test_coords(self) -> None: expected_repr = dedent( """\ Coordinates: - * x (x) int64 16B -1 -2 - * y (y) int64 24B 0 1 2""" + * x (x) int64 -1 -2 + * y (y) int64 0 1 2""" ) actual = repr(da.coords) assert expected_repr == actual diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index e8af9373981..5d8e2a3d286 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -283,15 +283,15 @@ def test_repr(self) -> None: 2kB Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8) Coordinates: - * dim2 (dim2) float64 72B 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 - * dim3 (dim3) {} 40B 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' - * time (time) datetime64[ns] 160B 2000-01-01 2000-01-02 ... 2000-01-20 - numbers (dim3) int64 80B 0 1 2 0 0 1 1 2 2 3 + * dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 + * dim3 (dim3) {} 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' + * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-20 + numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3 Dimensions without coordinates: dim1 Data variables: - var1 (dim1, dim2) float64 576B -1.086 0.9973 0.283 ... 0.4684 -0.8312 - var2 (dim1, dim2) float64 576B 1.162 -1.097 -2.123 ... 1.267 0.3328 - var3 (dim3, dim1) float64 640B 0.5565 -0.2121 0.4563 ... -0.2452 -0.3616 + var1 (dim1, dim2) float64 -1.086 0.9973 0.283 ... 0.4684 -0.8312 + var2 (dim1, dim2) float64 1.162 -1.097 -2.123 ... 1.267 0.3328 + var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 ... -0.2452 -0.3616 Attributes: foo: bar""".format( data["dim3"].dtype @@ -323,7 +323,7 @@ def test_repr(self) -> None: 8B Dimensions: () Data variables: - foo float64 8B 1.0""" + foo float64 1.0""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) print(actual) @@ -340,9 +340,9 @@ def test_repr_multiindex(self) -> None: 96B Dimensions: (x: 4) Coordinates: - * x (x) object 32B MultiIndex - * level_1 (x) object 32B 'a' 'a' 'b' 'b' - * level_2 (x) int64 32B 1 2 1 2 + * x (x) object MultiIndex + * level_1 (x) object 'a' 'a' 'b' 'b' + * level_2 (x) int64 1 2 1 2 Data variables: *empty*""" ) @@ -361,9 +361,9 @@ def test_repr_multiindex(self) -> None: 96B Dimensions: (x: 4) Coordinates: - * x (x) object 32B MultiIndex - * a_quite_long_level_name (x) object 32B 'a' 'a' 'b' 'b' - * level_2 (x) int64 32B 1 2 1 2 + * x (x) object MultiIndex + * a_quite_long_level_name (x) object 'a' 'a' 'b' 'b' + * level_2 (x) int64 1 2 1 2 Data variables: *empty*""" ) @@ -389,7 +389,7 @@ def test_unicode_data(self) -> None: 12B Dimensions: (foø: 1) Coordinates: - * foø (foø) %cU3 12B %r + * foø (foø) %cU3 %r Data variables: *empty* Attributes: @@ -422,7 +422,7 @@ def __repr__(self): Dimensions: (x: 2) Dimensions without coordinates: x Data variables: - foo (x) float64 16B Custom Array""" + foo (x) float64 Custom Array""" ) assert expected == repr(dataset) @@ -866,10 +866,10 @@ def test_coords_properties(self) -> None: expected = dedent( """\ Coordinates: - * x (x) int64 16B -1 -2 - * y (y) int64 24B 0 1 2 - a (x) int64 16B 4 5 - b int64 8B -10""" + * x (x) int64 -1 -2 + * y (y) int64 0 1 2 + a (x) int64 4 5 + b int64 -10""" ) actual = repr(coords) assert expected == actual @@ -1059,8 +1059,8 @@ def test_data_vars_properties(self) -> None: expected = dedent( """\ Data variables: - foo (x) float64 8B 1.0 - bar float64 8B 2.0""" + foo (x) float64 1.0 + bar float64 2.0""" ) actual = repr(ds.data_vars) assert expected == actual diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index e1b97cc8adc..9b871db198d 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -318,12 +318,12 @@ def test_diff_array_repr(self) -> None: R array([1, 2], dtype=int64) Differing coordinates: - L * x (x) %cU1 8B 'a' 'b' - R * x (x) %cU1 8B 'a' 'c' + L * x (x) %cU1 'a' 'b' + R * x (x) %cU1 'a' 'c' Coordinates only on the left object: - * y (y) int64 24B 1 2 3 + * y (y) int64 1 2 3 Coordinates only on the right object: - label (x) int64 16B 1 2 + label (x) int64 1 2 Differing attributes: L units: m R units: kg @@ -438,22 +438,22 @@ def test_diff_dataset_repr(self) -> None: Differing dimensions: (x: 2, y: 3) != (x: 2) Differing coordinates: - L * x (x) %cU1 8B 'a' 'b' + L * x (x) %cU1 'a' 'b' Differing variable attributes: foo: bar - R * x (x) %cU1 8B 'a' 'c' + R * x (x) %cU1 'a' 'c' Differing variable attributes: source: 0 foo: baz Coordinates only on the left object: - * y (y) int64 24B 1 2 3 + * y (y) int64 1 2 3 Coordinates only on the right object: - label (x) int64 16B 1 2 + label (x) int64 1 2 Differing data variables: - L var1 (x, y) int64 48B 1 2 3 4 5 6 - R var1 (x) int64 16B 1 2 + L var1 (x, y) int64 1 2 3 4 5 6 + R var1 (x) int64 1 2 Data variables only on the left object: - var2 (x) int64 16B 3 4 + var2 (x) int64 3 4 Differing attributes: L title: mytitle R title: newtitle @@ -546,9 +546,9 @@ def test_display_variables_nbytes(self) -> None: 27B Dimensions: (x: 3) Coordinates: - * x (x) float64 24B 10.0 20.0 30.0 + * x (x) float64 10.0 20.0 30.0 Data variables: - myvar (x) uint8 3B 11 22 33""" + myvar (x) uint8 11 22 33""" ) with xr.set_options(display_variables_nbytes=False): @@ -592,9 +592,9 @@ def test_display_variables_nbytes_lazy(self) -> None: 27B Dimensions: (x: 3) Coordinates: - * x (x) float64 24B 10.0 20.0 30.0 + * x (x) float64 10.0 20.0 30.0 Data variables: - myvar (x) uint8 3B dask.array""" + myvar (x) uint8 dask.array""" ) with xr.set_options(display_variables_nbytes=False): @@ -618,7 +618,7 @@ def test_display_variables_nbytes_lazy(self) -> None: Coordinates: * x (x) float64 10.0 20.0 30.0 Data variables: - myvar (x) uint8 3B dask.array""" + myvar (x) uint8 dask.array""" ) @requires_dask @@ -735,13 +735,13 @@ def test_diff_datatree_repr_node_data(self): Data in nodes at position '/a' do not match: Data variables only on the left object: - v int64 8B 1 + v int64 1 Data in nodes at position '/a/b' do not match: Differing data variables: - L w int64 8B 5 - R w int64 8B 6""" + L w int64 5 + R w int64 6""" ) actual = formatting.diff_datatree_repr(dt_1, dt_2, "equals") assert actual == expected @@ -1039,7 +1039,7 @@ def test_display_nbytes() -> None: Dimensions: (foo: 1200, bar: 111) Coordinates: * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 - * bar (bar) int16 222B 0 1 2 3 4 5 6 7 ... 104 105 106 107 108 109 110 + * bar (bar) int16 0 1 2 3 4 5 6 7 ... 104 105 106 107 108 109 110 Data variables: *empty* """.strip() diff --git a/xarray/tests/test_sparse.py b/xarray/tests/test_sparse.py index 77b8e4ab0a0..316a1209252 100644 --- a/xarray/tests/test_sparse.py +++ b/xarray/tests/test_sparse.py @@ -684,7 +684,7 @@ def test_dataarray_repr(self): 64B Coordinates: - y (x) int64 48B + y (x) int64 Dimensions without coordinates: x""" ) assert expected == repr(a) @@ -699,10 +699,10 @@ def test_dataset_repr(self): 112B Dimensions: (x: 4) Coordinates: - y (x) int64 48B + y (x) int64 Dimensions without coordinates: x Data variables: - a (x) float64 64B """ + a (x) float64 """ ) assert expected == repr(ds) @@ -717,7 +717,7 @@ def test_sparse_dask_dataset_repr(self): Dimensions: (x: 4) Dimensions without coordinates: x Data variables: - a (x) float64 32B dask.array""" + a (x) float64 dask.array""" ) assert expected == repr(ds) From 57f516ba1efbe1e63f515df450e4741128a79527 Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 19:30:07 +0200 Subject: [PATCH 06/10] Remote kilobytes --- xarray/core/common.py | 4 ++-- xarray/core/dataarray.py | 12 ++++++------ xarray/core/options.py | 2 +- xarray/tests/test_formatting.py | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/xarray/core/common.py b/xarray/core/common.py index 83a2d191b69..e18804347ff 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -1002,7 +1002,7 @@ def _resample( 10.80645161, 10.83870968, 10.87096774, 10.90322581, 10.93548387, 10.96774194, 11. ]) Coordinates: - * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15 Limit scope of upsampling method @@ -1035,7 +1035,7 @@ def _resample( nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 11., 11.]) Coordinates: - * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15 See Also -------- diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 57f3120ac4b..37578b93726 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -6720,13 +6720,13 @@ def groupby( array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03]) Coordinates: - * time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31 + * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2004-12-31 >>> da.groupby("time.dayofyear") - da.groupby("time.dayofyear").mean("time") 15kB array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5]) Coordinates: - * time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31 - dayofyear (time) int64 15kB 1 2 3 4 5 6 7 8 ... 360 361 362 363 364 365 366 + * time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2004-12-31 + dayofyear (time) int64 1 2 3 4 5 6 7 8 ... 360 361 362 363 364 365 366 See Also -------- @@ -7122,7 +7122,7 @@ def coarsen( 356.98071625, 357.98347107, 358.9862259 , 359.98898072, 360.99173554, 361.99449036, 362.99724518, 364. ]) Coordinates: - * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-12-12 + * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12 >>> da.coarsen(time=3, boundary="trim").mean() # +doctest: ELLIPSIS 968B array([ 1.00275482, 4.01101928, 7.01928375, 10.02754821, @@ -7326,7 +7326,7 @@ def resample( 10.80645161, 10.83870968, 10.87096774, 10.90322581, 10.93548387, 10.96774194, 11. ]) Coordinates: - * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15 Limit scope of upsampling method @@ -7359,7 +7359,7 @@ def resample( nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 11., 11.]) Coordinates: - * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 + * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15 See Also -------- diff --git a/xarray/core/options.py b/xarray/core/options.py index a5ad856e064..9f1a1519471 100644 --- a/xarray/core/options.py +++ b/xarray/core/options.py @@ -274,7 +274,7 @@ class set_options: 8kB Dimensions: (x: 1000) Coordinates: - * x (x) int64 8kB 0 1 ... 999 + * x (x) int64 0 1 ... 999 Data variables: *empty* diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index 9b871db198d..1736394c4ad 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -1038,7 +1038,7 @@ def test_display_nbytes() -> None: 3kB Dimensions: (foo: 1200, bar: 111) Coordinates: - * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 + * foo (foo) int16 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 * bar (bar) int16 0 1 2 3 4 5 6 7 ... 104 105 106 107 108 109 110 Data variables: *empty* @@ -1050,7 +1050,7 @@ def test_display_nbytes() -> None: 2kB array([ 0, 1, 2, ..., 1197, 1198, 1199], dtype=int16) Coordinates: - * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 + * foo (foo) int16 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 """.strip() assert actual == expected From 278ff3d4abcfd0ae8eca24ba48ed98752174c66f Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 19:34:38 +0200 Subject: [PATCH 07/10] Add back bytes on dask arrays --- xarray/core/dataarray.py | 2 +- xarray/core/dataset.py | 4 ++-- xarray/core/parallel.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 37578b93726..c469a2c27b0 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -5559,7 +5559,7 @@ def map_blocks( dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 dask.array + month (time) int64 192B dask.array """ from xarray.core.parallel import map_blocks diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index ecdd4637ce5..843a768f452 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -8876,9 +8876,9 @@ def map_blocks( Dimensions: (time: 24) Coordinates: * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 dask.array + month (time) int64 192B dask.array Data variables: - a (time) float64 dask.array + a (time) float64 192B dask.array """ from xarray.core.parallel import map_blocks diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py index 02f5be37fe1..6e499c380d7 100644 --- a/xarray/core/parallel.py +++ b/xarray/core/parallel.py @@ -328,7 +328,7 @@ def map_blocks( dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 - month (time) int64 dask.array + month (time) int64 192B dask.array """ def _wrapper( From 89e8b28f3ad825decdc1508f39d574652d9d647c Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 19:59:05 +0200 Subject: [PATCH 08/10] Fix remaining tests, mypyt --- xarray/namedarray/_array_api.py | 8 ++++---- xarray/tests/test_dataarray.py | 7 +++---- xarray/tests/test_dataset.py | 10 ++++------ xarray/tests/test_formatting.py | 24 ++++++++++++------------ 4 files changed, 23 insertions(+), 26 deletions(-) diff --git a/xarray/namedarray/_array_api.py b/xarray/namedarray/_array_api.py index 7c2cc36b292..ffb988c2f5e 100644 --- a/xarray/namedarray/_array_api.py +++ b/xarray/namedarray/_array_api.py @@ -79,8 +79,8 @@ def astype( def imag( - x: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], - /, # type: ignore[type-var] + x: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] + /, ) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]: """ Returns the imaginary component of a complex number for each element x_i of the @@ -112,8 +112,8 @@ def imag( def real( - x: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], - /, # type: ignore[type-var] + x: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] + /, ) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]: """ Returns the real component of a complex number for each element x_i of the diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index fe287891e92..fab2200362d 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -133,12 +133,11 @@ def test_repr_multiindex_long(self) -> None: """\ 256B array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], - dtype=uint64) + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], dtype=uint64) Coordinates: * x (x) object MultiIndex - * level_1 (x) object 'a' 'a' 'a' 'a' 'a' 'a' ... 'd' 'd' 'd' 'd' 'd' 'd' - * level_2 (x) int64 1 2 3 4 5 6 7 8 1 2 3 4 ... 5 6 7 8 1 2 3 4 5 6 7 8""" + * level_1 (x) object 'a' 'a' 'a' 'a' 'a' 'a' 'a' ... 'd' 'd' 'd' 'd' 'd' 'd' + * level_2 (x) int64 1 2 3 4 5 6 7 8 1 2 3 4 5 6 ... 4 5 6 7 8 1 2 3 4 5 6 7 8""" ) assert expected == repr(mda_long) diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index 5d8e2a3d286..de57c0fee91 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -284,18 +284,16 @@ def test_repr(self) -> None: Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8) Coordinates: * dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 - * dim3 (dim3) {} 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' + * dim3 (dim3) None: 27B Dimensions: (x: 3) Coordinates: - * x (x) float64 10.0 20.0 30.0 + * x (x) float64 24B 10.0 20.0 30.0 Data variables: - myvar (x) uint8 11 22 33""" + myvar (x) uint8 3B 11 22 33""" ) with xr.set_options(display_variables_nbytes=False): @@ -558,7 +558,7 @@ def test_display_variables_nbytes(self) -> None: 27B Dimensions: (x: 3) Coordinates: - * x (x) float64 10.0 20.0 30.0 + * x (x) float64 10.0 20.0 30.0 Data variables: myvar (x) uint8 11 22 33""" ) @@ -570,7 +570,7 @@ def test_display_variables_nbytes(self) -> None: 27B Dimensions: (x: 3) Coordinates: - * x (x) float64 10.0 20.0 30.0 + * x (x) float64 10.0 20.0 30.0 Data variables: myvar (x) uint8 11 22 33""" ) @@ -592,9 +592,9 @@ def test_display_variables_nbytes_lazy(self) -> None: 27B Dimensions: (x: 3) Coordinates: - * x (x) float64 10.0 20.0 30.0 + * x (x) float64 24B 10.0 20.0 30.0 Data variables: - myvar (x) uint8 dask.array""" + myvar (x) uint8 3B dask.array""" ) with xr.set_options(display_variables_nbytes=False): @@ -604,7 +604,7 @@ def test_display_variables_nbytes_lazy(self) -> None: 27B Dimensions: (x: 3) Coordinates: - * x (x) float64 10.0 20.0 30.0 + * x (x) float64 10.0 20.0 30.0 Data variables: myvar (x) uint8 dask.array""" ) @@ -616,9 +616,9 @@ def test_display_variables_nbytes_lazy(self) -> None: 27B Dimensions: (x: 3) Coordinates: - * x (x) float64 10.0 20.0 30.0 + * x (x) float64 10.0 20.0 30.0 Data variables: - myvar (x) uint8 dask.array""" + myvar (x) uint8 3B dask.array""" ) @requires_dask @@ -1038,8 +1038,8 @@ def test_display_nbytes() -> None: 3kB Dimensions: (foo: 1200, bar: 111) Coordinates: - * foo (foo) int16 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 - * bar (bar) int16 0 1 2 3 4 5 6 7 ... 104 105 106 107 108 109 110 + * foo (foo) int16 0 1 2 3 4 5 6 7 ... 1193 1194 1195 1196 1197 1198 1199 + * bar (bar) int16 0 1 2 3 4 5 6 7 8 ... 103 104 105 106 107 108 109 110 Data variables: *empty* """.strip() @@ -1050,7 +1050,7 @@ def test_display_nbytes() -> None: 2kB array([ 0, 1, 2, ..., 1197, 1198, 1199], dtype=int16) Coordinates: - * foo (foo) int16 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 + * foo (foo) int16 0 1 2 3 4 5 6 7 ... 1193 1194 1195 1196 1197 1198 1199 """.strip() assert actual == expected From 4192afb73701233096281706f6c333c5a6588ecd Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 20:05:34 +0200 Subject: [PATCH 09/10] fix test_repr_multiindex_long --- xarray/tests/test_dataarray.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index fab2200362d..044e9e0fcf3 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -133,7 +133,8 @@ def test_repr_multiindex_long(self) -> None: """\ 256B array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], dtype=uint64) + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], + dtype=uint64) Coordinates: * x (x) object MultiIndex * level_1 (x) object 'a' 'a' 'a' 'a' 'a' 'a' 'a' ... 'd' 'd' 'd' 'd' 'd' 'd' From 2d3ba87ce39b64388dfab3581d6525f337f42700 Mon Sep 17 00:00:00 2001 From: eschalk Date: Sat, 8 Jun 2024 20:17:55 +0200 Subject: [PATCH 10/10] Give life back to lazy array nbytes individual variable display --- xarray/tests/test_dask.py | 6 +++--- xarray/tests/test_sparse.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index e08e65a2924..95e613a09f8 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -658,7 +658,7 @@ def test_dataarray_repr(self): 8B {data!r} Coordinates: - y (x) int64 dask.array + y (x) int64 8B dask.array Dimensions without coordinates: x""" ) assert expected == repr(a) @@ -673,10 +673,10 @@ def test_dataset_repr(self): 16B Dimensions: (x: 1) Coordinates: - y (x) int64 dask.array + y (x) int64 8B dask.array Dimensions without coordinates: x Data variables: - a (x) int64 dask.array""" + a (x) int64 8B dask.array""" ) assert expected == repr(ds) assert kernel_call_count == 0 # should not evaluate dask array diff --git a/xarray/tests/test_sparse.py b/xarray/tests/test_sparse.py index 316a1209252..bab5becc8b1 100644 --- a/xarray/tests/test_sparse.py +++ b/xarray/tests/test_sparse.py @@ -717,7 +717,7 @@ def test_sparse_dask_dataset_repr(self): Dimensions: (x: 4) Dimensions without coordinates: x Data variables: - a (x) float64 dask.array""" + a (x) float64 32B dask.array""" ) assert expected == repr(ds)