From 19bb3c3347d011099b04aad13d2a5e07e48218c5 Mon Sep 17 00:00:00 2001 From: Tyler Sutterley Date: Tue, 3 Jan 2023 15:42:49 -0800 Subject: [PATCH] refactor: SLR, plm and time series functions (#98) * test: single implicit import of gravity toolkit * chore: increase version number --- doc/source/_static/style.css | 8 + .../{read_SLR_C20.rst => SLR/C20.rst} | 14 +- .../{read_SLR_C30.rst => SLR/C30.rst} | 15 +- .../{read_SLR_C40.rst => SLR/C40.rst} | 14 +- .../{read_SLR_C50.rst => SLR/C50.rst} | 14 +- .../{read_SLR_CS2.rst => SLR/CS2.rst} | 14 +- .../api_reference/associated_legendre.rst | 25 + doc/source/api_reference/gen_disc_load.rst | 2 +- doc/source/api_reference/gen_harmonics.rst | 2 +- .../api_reference/gen_spherical_cap.rst | 2 +- doc/source/api_reference/gen_stokes.rst | 2 +- .../api_reference/harmonic_summation.rst | 2 + .../api_reference/piecewise_regress.rst | 19 - doc/source/api_reference/plm_colombo.rst | 19 - doc/source/api_reference/plm_holmes.rst | 20 - doc/source/api_reference/plm_mohlenkamp.rst | 19 - .../api_reference/sea_level_equation.rst | 2 +- .../api_reference/time_series/amplitude.rst | 19 + .../api_reference/time_series/piecewise.rst | 19 + .../api_reference/time_series/regress.rst | 19 + .../{ => time_series}/savitzky_golay.rst | 14 +- .../api_reference/time_series/smooth.rst | 19 + doc/source/api_reference/tsamplitude.rst | 19 - doc/source/api_reference/tsregress.rst | 19 - doc/source/api_reference/tssmooth.rst | 19 - doc/source/index.rst | 24 +- gravity_toolkit/SLR/C20.py | 450 ++++++++++++++++++ gravity_toolkit/SLR/C30.py | 269 +++++++++++ gravity_toolkit/SLR/C40.py | 128 +++++ gravity_toolkit/SLR/C50.py | 200 ++++++++ gravity_toolkit/SLR/CS2.py | 281 +++++++++++ gravity_toolkit/SLR/__init__.py | 5 + gravity_toolkit/__init__.py | 18 +- gravity_toolkit/associated_legendre.py | 386 +++++++++++++++ gravity_toolkit/gen_disc_load.py | 8 +- gravity_toolkit/gen_harmonics.py | 8 +- gravity_toolkit/gen_spherical_cap.py | 8 +- gravity_toolkit/gen_stokes.py | 7 +- gravity_toolkit/grace_input_months.py | 75 ++- gravity_toolkit/harmonic_summation.py | 84 +++- gravity_toolkit/harmonics.py | 41 +- gravity_toolkit/piecewise_regress.py | 210 +------- gravity_toolkit/plm_colombo.py | 4 + gravity_toolkit/plm_holmes.py | 4 + gravity_toolkit/plm_mohlenkamp.py | 6 +- gravity_toolkit/read_SLR_C20.py | 323 +------------ gravity_toolkit/read_SLR_C30.py | 171 +------ gravity_toolkit/read_SLR_C40.py | 63 +-- gravity_toolkit/read_SLR_C50.py | 123 +---- gravity_toolkit/read_SLR_CS2.py | 180 +------ gravity_toolkit/savitzky_golay.py | 57 +-- gravity_toolkit/sea_level_equation.py | 8 +- gravity_toolkit/time_series/__init__.py | 5 + gravity_toolkit/time_series/amplitude.py | 53 +++ gravity_toolkit/time_series/piecewise.py | 374 +++++++++++++++ gravity_toolkit/time_series/regress.py | 359 ++++++++++++++ gravity_toolkit/time_series/savitzky_golay.py | 151 ++++++ gravity_toolkit/time_series/smooth.py | 351 ++++++++++++++ gravity_toolkit/tsamplitude.py | 13 +- gravity_toolkit/tsregress.py | 183 +------ gravity_toolkit/tssmooth.py | 221 +-------- scripts/calc_degree_one.py | 6 +- scripts/calc_mascon.py | 9 +- scripts/calc_sensitivity_kernel.py | 6 +- scripts/combine_harmonics.py | 6 +- scripts/convert_harmonics.py | 6 +- scripts/grace_spatial_error.py | 13 +- scripts/grace_spatial_maps.py | 6 +- scripts/monte_carlo_degree_one.py | 10 +- scripts/regress_grace_maps.py | 11 +- scripts/run_sea_level_equation.py | 6 +- scripts/scale_grace_maps.py | 9 +- test/test_download_and_read.py | 33 +- test/test_harmonics.py | 89 +++- test/test_legendre.py | 16 +- test/test_love_numbers.py | 21 +- test/test_point_masses.py | 26 +- test/test_time.py | 26 +- version.txt | 2 +- 79 files changed, 3627 insertions(+), 1865 deletions(-) rename doc/source/api_reference/{read_SLR_C20.rst => SLR/C20.rst} (66%) rename doc/source/api_reference/{read_SLR_C30.rst => SLR/C30.rst} (66%) rename doc/source/api_reference/{read_SLR_C40.rst => SLR/C40.rst} (63%) rename doc/source/api_reference/{read_SLR_C50.rst => SLR/C50.rst} (63%) rename doc/source/api_reference/{read_SLR_CS2.rst => SLR/CS2.rst} (67%) create mode 100644 doc/source/api_reference/associated_legendre.rst delete mode 100644 doc/source/api_reference/piecewise_regress.rst delete mode 100644 doc/source/api_reference/plm_colombo.rst delete mode 100644 doc/source/api_reference/plm_holmes.rst delete mode 100644 doc/source/api_reference/plm_mohlenkamp.rst create mode 100644 doc/source/api_reference/time_series/amplitude.rst create mode 100644 doc/source/api_reference/time_series/piecewise.rst create mode 100644 doc/source/api_reference/time_series/regress.rst rename doc/source/api_reference/{ => time_series}/savitzky_golay.rst (57%) create mode 100644 doc/source/api_reference/time_series/smooth.rst delete mode 100644 doc/source/api_reference/tsamplitude.rst delete mode 100644 doc/source/api_reference/tsregress.rst delete mode 100644 doc/source/api_reference/tssmooth.rst create mode 100644 gravity_toolkit/SLR/C20.py create mode 100644 gravity_toolkit/SLR/C30.py create mode 100644 gravity_toolkit/SLR/C40.py create mode 100644 gravity_toolkit/SLR/C50.py create mode 100644 gravity_toolkit/SLR/CS2.py create mode 100644 gravity_toolkit/SLR/__init__.py create mode 100644 gravity_toolkit/associated_legendre.py create mode 100644 gravity_toolkit/time_series/__init__.py create mode 100755 gravity_toolkit/time_series/amplitude.py create mode 100755 gravity_toolkit/time_series/piecewise.py create mode 100755 gravity_toolkit/time_series/regress.py create mode 100644 gravity_toolkit/time_series/savitzky_golay.py create mode 100755 gravity_toolkit/time_series/smooth.py diff --git a/doc/source/_static/style.css b/doc/source/_static/style.css index 0011c7f1..ecc98701 100644 --- a/doc/source/_static/style.css +++ b/doc/source/_static/style.css @@ -14,3 +14,11 @@ visibility: visible; margin-right: -0.7em; } +/* fix for property line spacing +* https://github.com/rtfd/sphinx_rtd_theme/issues/1301 +*/ +.property { + display: block !important; + padding-right: 8px; + max-width: 100%; +} \ No newline at end of file diff --git a/doc/source/api_reference/read_SLR_C20.rst b/doc/source/api_reference/SLR/C20.rst similarity index 66% rename from doc/source/api_reference/read_SLR_C20.rst rename to doc/source/api_reference/SLR/C20.rst index db154ae4..e877a5b0 100644 --- a/doc/source/api_reference/read_SLR_C20.rst +++ b/doc/source/api_reference/SLR/C20.rst @@ -1,6 +1,6 @@ -============ -read_SLR_C20 -============ +======= +SLR.C20 +======= - Reads monthly oblateness (degree 2 zonal) spherical harmonic data files from satellite laser ranging (SLR) @@ -15,11 +15,11 @@ Calling Sequence .. code-block:: python - from gravity_toolkit.read_SLR_C20 import read_SLR_C20 - SLR_C20 = read_SLR_C20(SLR_file) + import gravity_toolkit.SLR + SLR_C20 = gravity_toolkit.SLR.C20(SLR_file) `Source code`__ -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/read_SLR_C20.py +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/SLR/C20.py -.. autofunction:: gravity_toolkit.read_SLR_C20 +.. autofunction:: gravity_toolkit.SLR.C20 diff --git a/doc/source/api_reference/read_SLR_C30.rst b/doc/source/api_reference/SLR/C30.rst similarity index 66% rename from doc/source/api_reference/read_SLR_C30.rst rename to doc/source/api_reference/SLR/C30.rst index 1a7a2461..7ae77b30 100644 --- a/doc/source/api_reference/read_SLR_C30.rst +++ b/doc/source/api_reference/SLR/C30.rst @@ -1,6 +1,6 @@ -============ -read_SLR_C30 -============ +======= +SLR.C30 +======= - Reads monthly degree 3 zonal spherical harmonic data files from satellite laser ranging (SLR) @@ -14,12 +14,11 @@ Calling Sequence .. code-block:: python - from gravity_toolkit.read_SLR_C30 import read_SLR_C30 - SLR_C30 = read_SLR_C30(SLR_file) + import gravity_toolkit.SLR + SLR_C30 = gravity_toolkit.SLR.C30(SLR_file) `Source code`__ -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/read_SLR_C30.py +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/SLR/C30.py - -.. autofunction:: gravity_toolkit.read_SLR_C30 +.. autofunction:: gravity_toolkit.SLR.C30 diff --git a/doc/source/api_reference/read_SLR_C40.rst b/doc/source/api_reference/SLR/C40.rst similarity index 63% rename from doc/source/api_reference/read_SLR_C40.rst rename to doc/source/api_reference/SLR/C40.rst index e9703864..0679ab71 100644 --- a/doc/source/api_reference/read_SLR_C40.rst +++ b/doc/source/api_reference/SLR/C40.rst @@ -1,6 +1,6 @@ -============ -read_SLR_C40 -============ +======= +SLR.C40 +======= - Reads monthly degree 4 zonal spherical harmonic data files from satellite laser ranging (SLR) @@ -13,11 +13,11 @@ Calling Sequence .. code-block:: python - from gravity_toolkit.read_SLR_C40 import read_SLR_C40 - SLR_C40 = read_SLR_C40(SLR_file) + import gravity_toolkit.SLR + SLR_C40 = gravity_toolkit.SLR.C40(SLR_file) `Source code`__ -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/read_SLR_C40.py +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/SLR/C40.py -.. autofunction:: gravity_toolkit.read_SLR_C40 +.. autofunction:: gravity_toolkit.SLR.C40 diff --git a/doc/source/api_reference/read_SLR_C50.rst b/doc/source/api_reference/SLR/C50.rst similarity index 63% rename from doc/source/api_reference/read_SLR_C50.rst rename to doc/source/api_reference/SLR/C50.rst index cced3628..d0ec0515 100644 --- a/doc/source/api_reference/read_SLR_C50.rst +++ b/doc/source/api_reference/SLR/C50.rst @@ -1,6 +1,6 @@ -============ -read_SLR_C50 -============ +======= +SLR.C50 +======= - Reads monthly degree 5 zonal spherical harmonic data files from satellite laser ranging (SLR) @@ -13,11 +13,11 @@ Calling Sequence .. code-block:: python - from gravity_toolkit.read_SLR_C50 import read_SLR_C50 - SLR_C50 = read_SLR_C50(SLR_file) + import gravity_toolkit.SLR + SLR_C50 = gravity_toolkit.SLR.C50(SLR_file) `Source code`__ -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/read_SLR_C50.py +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/SLR/C50.py -.. autofunction:: gravity_toolkit.read_SLR_C50 +.. autofunction:: gravity_toolkit.SLR.C50 diff --git a/doc/source/api_reference/read_SLR_CS2.rst b/doc/source/api_reference/SLR/CS2.rst similarity index 67% rename from doc/source/api_reference/read_SLR_CS2.rst rename to doc/source/api_reference/SLR/CS2.rst index 751571f9..5236a221 100644 --- a/doc/source/api_reference/read_SLR_CS2.rst +++ b/doc/source/api_reference/SLR/CS2.rst @@ -1,6 +1,6 @@ -============ -read_SLR_CS2 -============ +======= +SLR.CS2 +======= - Reads monthly degree 2,m (figure axis and azimuthal dependence) spherical harmonic data files from satellite laser ranging (SLR) @@ -14,11 +14,11 @@ Calling Sequence .. code-block:: python - from gravity_toolkit.read_SLR_CS2 import read_SLR_CS2 - SLR_CS2 = read_SLR_CS2(SLR_file) + import gravity_toolkit.SLR + SLR_CS2 = gravity_toolkit.SLR.CS2(SLR_file) `Source code`__ -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/read_SLR_CS2.py +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/SLR/CS2.py -.. autofunction:: gravity_toolkit.read_SLR_CS2 +.. autofunction:: gravity_toolkit.SLR.CS2 diff --git a/doc/source/api_reference/associated_legendre.rst b/doc/source/api_reference/associated_legendre.rst new file mode 100644 index 00000000..70a7defa --- /dev/null +++ b/doc/source/api_reference/associated_legendre.rst @@ -0,0 +1,25 @@ +=================== +associated_legendre +=================== + +- Computes fully-normalized associated Legendre Polynomials and their first derivative for a vector of ``x`` values + +Calling Sequence +################ + +.. code-block:: python + + import gravity_toolkit.associated_legendre + PLM, dPLM = gravtk.associated_legendre.polynomials(LMAX, x, method='holmes') + +`Source code`__ + +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/associated_legendre.py + +.. autofunction:: gravity_toolkit.associated_legendre + +.. autofunction:: gravity_toolkit.plm_colombo + +.. autofunction:: gravity_toolkit.plm_holmes + +.. autofunction:: gravity_toolkit.plm_mohlenkamp diff --git a/doc/source/api_reference/gen_disc_load.rst b/doc/source/api_reference/gen_disc_load.rst index b2807698..d357c952 100644 --- a/doc/source/api_reference/gen_disc_load.rst +++ b/doc/source/api_reference/gen_disc_load.rst @@ -10,7 +10,7 @@ Calling Sequence .. code-block:: python from gravity_toolkit.gen_disc_load import gen_disc_load - from gravity_toolkit.plm_holmes import plm_holmes + from gravity_toolkit.associated_legendre import plm_holmes PLM, dPLM = plm_holmes(LMAX, np.cos(th)) Ylms = gen_disc_load(data, lon, lat, area, LMAX=LMAX, PLM=PLM, LOVE=(hl,kl,ll)) diff --git a/doc/source/api_reference/gen_harmonics.rst b/doc/source/api_reference/gen_harmonics.rst index 30d6ec62..2f3e6f86 100644 --- a/doc/source/api_reference/gen_harmonics.rst +++ b/doc/source/api_reference/gen_harmonics.rst @@ -11,7 +11,7 @@ Calling Sequence .. code-block:: python from gravity_toolkit.gen_harmonics import gen_harmonics - from gravity_toolkit.plm_holmes import plm_holmes + from gravity_toolkit.associated_legendre import plm_holmes PLM, dPLM = plm_holmes(LMAX, np.cos(th)) Ylms = gen_harmonics(data, lon, lat, LMAX=LMAX, PLM=PLM) diff --git a/doc/source/api_reference/gen_spherical_cap.rst b/doc/source/api_reference/gen_spherical_cap.rst index 70922f7f..9ff3bbaf 100644 --- a/doc/source/api_reference/gen_spherical_cap.rst +++ b/doc/source/api_reference/gen_spherical_cap.rst @@ -10,7 +10,7 @@ Calling Sequence .. code-block:: python from gravity_toolkit.gen_spherical_cap import gen_spherical_cap - from gravity_toolkit.plm_holmes import plm_holmes + from gravity_toolkit.associated_legendre import plm_holmes PLM, dPLM = plm_holmes(LMAX, np.cos(th)) Ylms = gen_spherical_cap(data, lon, lat, UNITS=1, LMAX=LMAX, PLM=PLM, LOVE=(hl,kl,ll)) diff --git a/doc/source/api_reference/gen_stokes.rst b/doc/source/api_reference/gen_stokes.rst index 2e6acf4d..9937aa11 100644 --- a/doc/source/api_reference/gen_stokes.rst +++ b/doc/source/api_reference/gen_stokes.rst @@ -10,7 +10,7 @@ Calling Sequence .. code-block:: python from gravity_toolkit.gen_stokes import gen_stokes - from gravity_toolkit.plm_holmes import plm_holmes + from gravity_toolkit.associated_legendre import plm_holmes PLM, dPLM = plm_holmes(LMAX, np.cos(th)) Ylms = gen_stokes(data, lon, lat, UNITS=1, LMAX=LMAX, PLM=PLM, LOVE=(hl,kl,ll)) diff --git a/doc/source/api_reference/harmonic_summation.rst b/doc/source/api_reference/harmonic_summation.rst index 8e1d485b..d1d39f08 100644 --- a/doc/source/api_reference/harmonic_summation.rst +++ b/doc/source/api_reference/harmonic_summation.rst @@ -17,3 +17,5 @@ Calling Sequence .. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/harmonic_summation.py .. autofunction:: gravity_toolkit.harmonic_summation + +.. autofunction:: gravity_toolkit.harmonic_transform diff --git a/doc/source/api_reference/piecewise_regress.rst b/doc/source/api_reference/piecewise_regress.rst deleted file mode 100644 index 837df977..00000000 --- a/doc/source/api_reference/piecewise_regress.rst +++ /dev/null @@ -1,19 +0,0 @@ -================= -piecewise_regress -================= - -- Fits a synthetic signal to data over a time period by ordinary or weighted least-squares for breakpoint analysis - -Calling Sequence -################ - -.. code-block:: python - - from gravity_toolkit.piecewise_regress import piecewise_regress - tsbeta = piecewise_regress(t_in, d_in, BREAKPOINT=len(t_in)//2, CYCLES=[0.5,1.0]) - -`Source code`__ - -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/piecewise_regress.py - -.. autofunction:: gravity_toolkit.piecewise_regress diff --git a/doc/source/api_reference/plm_colombo.rst b/doc/source/api_reference/plm_colombo.rst deleted file mode 100644 index c4e1108d..00000000 --- a/doc/source/api_reference/plm_colombo.rst +++ /dev/null @@ -1,19 +0,0 @@ -=========== -plm_colombo -=========== - -- Computes fully-normalized associated Legendre Polynomials and their first derivative for a vector of ``x`` values using a standard forward column method - -Calling Sequence -################ - -.. code-block:: python - - from gravity_toolkit.plm_colombo import plm_colombo - PLM, dPLM = plm_colombo(LMAX, x) - -`Source code`__ - -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/plm_colombo.py - -.. autofunction:: gravity_toolkit.plm_colombo diff --git a/doc/source/api_reference/plm_holmes.rst b/doc/source/api_reference/plm_holmes.rst deleted file mode 100644 index cdb118e6..00000000 --- a/doc/source/api_reference/plm_holmes.rst +++ /dev/null @@ -1,20 +0,0 @@ -========== -plm_holmes -========== - -- Computes fully-normalized associated Legendre Polynomials and their first derivative for a vector of ``x`` values using the Holmes and Featherstone recursion relation -- Recursion relation is stable up to very high degree and order - -Calling Sequence -################ - -.. code-block:: python - - from gravity_toolkit.plm_holmes import plm_holmes - PLM, dPLM = plm_holmes(LMAX, x) - -`Source code`__ - -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/plm_holmes.py - -.. autofunction:: gravity_toolkit.plm_holmes diff --git a/doc/source/api_reference/plm_mohlenkamp.rst b/doc/source/api_reference/plm_mohlenkamp.rst deleted file mode 100644 index 34f6c710..00000000 --- a/doc/source/api_reference/plm_mohlenkamp.rst +++ /dev/null @@ -1,19 +0,0 @@ -============== -plm_mohlenkamp -============== - -- Computes fully-normalized associated Legendre Polynomials for a vector of ``x`` values using Martin Mohlenkamp's recursion relation - -Calling Sequence -################ - -.. code-block:: python - - from gravity_toolkit.plm_mohlenkamp import plm_mohlenkamp - plm = plm_mohlenkamp(LMAX, x) - -`Source code`__ - -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/plm_mohlenkamp.py - -.. autofunction:: gravity_toolkit.plm_mohlenkamp diff --git a/doc/source/api_reference/sea_level_equation.rst b/doc/source/api_reference/sea_level_equation.rst index d6f81dcd..fd71d741 100644 --- a/doc/source/api_reference/sea_level_equation.rst +++ b/doc/source/api_reference/sea_level_equation.rst @@ -9,7 +9,7 @@ Calling Sequence .. code-block:: python - from gravity_toolkit.plm_holmes import plm_holmes + from gravity_toolkit.associated_legendre import plm_holmes from gravity_toolkit.sea_level_equation import sea_level_equation PLM, dPLM = plm_holmes(LMAX, np.cos(th)) Ylms = sea_level_equation(loadClm, loadSlm, lon, lat, land_function, diff --git a/doc/source/api_reference/time_series/amplitude.rst b/doc/source/api_reference/time_series/amplitude.rst new file mode 100644 index 00000000..b28273d1 --- /dev/null +++ b/doc/source/api_reference/time_series/amplitude.rst @@ -0,0 +1,19 @@ +===================== +time_series.amplitude +===================== + +- Calculate the amplitude and phase of a harmonic function from calculated sine and cosine of a series of measurements + +Calling Sequence +################ + +.. code-block:: python + + import gravity_toolkit.time_series + ampl,ph = gravity_toolkit.time_series.amplitude(bsin,bcos) + +`Source code`__ + +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/time_series/amplitude.py + +.. autofunction:: gravity_toolkit.time_series.amplitude diff --git a/doc/source/api_reference/time_series/piecewise.rst b/doc/source/api_reference/time_series/piecewise.rst new file mode 100644 index 00000000..d5084cbe --- /dev/null +++ b/doc/source/api_reference/time_series/piecewise.rst @@ -0,0 +1,19 @@ +===================== +time_series.piecewise +===================== + +- Fits a synthetic signal to data over a time period by ordinary or weighted least-squares for breakpoint analysis + +Calling Sequence +################ + +.. code-block:: python + + import gravity_toolkit.time_series + tsbeta = gravity_toolkit.time_series.piecewise(t_in, d_in, BREAKPOINT=len(t_in)//2, CYCLES=[0.5,1.0]) + +`Source code`__ + +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/time_series/piecewise.py + +.. autofunction:: gravity_toolkit.time_series.piecewise diff --git a/doc/source/api_reference/time_series/regress.rst b/doc/source/api_reference/time_series/regress.rst new file mode 100644 index 00000000..8ed0de71 --- /dev/null +++ b/doc/source/api_reference/time_series/regress.rst @@ -0,0 +1,19 @@ +=================== +time_series.regress +=================== + +- Fits a synthetic signal to data over a time period by ordinary or weighted least-squares + +Calling Sequence +################ + +.. code-block:: python + + import gravity_toolkit.time_series + tsbeta = gravity_toolkit.time_series.regress(t_in, d_in, ORDER=1, CYCLES=[0.5,1.0], CONF=0.95) + +`Source code`__ + +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/time_series/regress.py + +.. autofunction:: gravity_toolkit.time_series.regress diff --git a/doc/source/api_reference/savitzky_golay.rst b/doc/source/api_reference/time_series/savitzky_golay.rst similarity index 57% rename from doc/source/api_reference/savitzky_golay.rst rename to doc/source/api_reference/time_series/savitzky_golay.rst index def9a9f2..b13e7b2c 100644 --- a/doc/source/api_reference/savitzky_golay.rst +++ b/doc/source/api_reference/time_series/savitzky_golay.rst @@ -1,6 +1,6 @@ -============== -savitzky_golay -============== +========================== +time_series.savitzky_golay +========================== - Smooth and optionally differentiate data of non-uniform sampling with a Savitzky-Golay filter - A type of low-pass filter, particularly suited for smoothing noisy data @@ -11,11 +11,11 @@ Calling Sequence .. code-block:: python - from gravity_toolkit.savitzky_golay import savitzky_golay - sg = savitzky_golay(t_in, d_in, WINDOW=13, ORDER=2) + import gravity_toolkit.time_series + sg = gravity_toolkit.time_series.savitzky_golay(t_in, d_in, WINDOW=13, ORDER=2) `Source code`__ -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/savitzky_golay.py +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/time_series/savitzky_golay.py -.. autofunction:: gravity_toolkit.savitzky_golay +.. autofunction:: gravity_toolkit.time_series.savitzky_golay diff --git a/doc/source/api_reference/time_series/smooth.rst b/doc/source/api_reference/time_series/smooth.rst new file mode 100644 index 00000000..2e8960b7 --- /dev/null +++ b/doc/source/api_reference/time_series/smooth.rst @@ -0,0 +1,19 @@ +================== +time_series.smooth +================== + +- Computes the moving average of a time-series + +Calling Sequence +################ + +.. code-block:: python + + import gravity_toolkit.time_series + smth = gravity_toolkit.time_series.smooth(t_in, d_in, HFWTH=6) + +`Source code`__ + +.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/time_series.smooth.py + +.. autofunction:: gravity_toolkit.time_series.smooth diff --git a/doc/source/api_reference/tsamplitude.rst b/doc/source/api_reference/tsamplitude.rst deleted file mode 100644 index 2953e1a5..00000000 --- a/doc/source/api_reference/tsamplitude.rst +++ /dev/null @@ -1,19 +0,0 @@ -=========== -tsamplitude -=========== - -- Calculate the amplitude and phase of a harmonic function from calculated sine and cosine of a series of measurements - -Calling Sequence -################ - -.. code-block:: python - - from gravity_toolkit.tsamplitude import tsamplitude - ampl,ph = tsamplitude(bsin,bcos) - -`Source code`__ - -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/tsamplitude.py - -.. autofunction:: gravity_toolkit.tsamplitude diff --git a/doc/source/api_reference/tsregress.rst b/doc/source/api_reference/tsregress.rst deleted file mode 100644 index d533c4b5..00000000 --- a/doc/source/api_reference/tsregress.rst +++ /dev/null @@ -1,19 +0,0 @@ -========= -tsregress -========= - -- Fits a synthetic signal to data over a time period by ordinary or weighted least-squares - -Calling Sequence -################ - -.. code-block:: python - - from gravity_toolkit.tsregress import tsregress - tsbeta = tsregress(t_in, d_in, ORDER=1, CYCLES=[0.5,1.0], CONF=0.95) - -`Source code`__ - -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/tsregress.py - -.. autofunction:: gravity_toolkit.tsregress diff --git a/doc/source/api_reference/tssmooth.rst b/doc/source/api_reference/tssmooth.rst deleted file mode 100644 index 4fae329a..00000000 --- a/doc/source/api_reference/tssmooth.rst +++ /dev/null @@ -1,19 +0,0 @@ -======== -tssmooth -======== - -- Computes the moving average of a time-series - -Calling Sequence -################ - -.. code-block:: python - - from gravity_toolkit.tssmooth import tssmooth - smth = tssmooth(t_in, d_in, HFWTH=6) - -`Source code`__ - -.. __: https://github.com/tsutterley/read-GRACE-harmonics/blob/main/gravity_toolkit/tssmooth.py - -.. autofunction:: gravity_toolkit.tssmooth diff --git a/doc/source/index.rst b/doc/source/index.rst index 57dc4bfc..777bec26 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -35,6 +35,7 @@ missions :hidden: :caption: API Reference + api_reference/associated_legendre.rst api_reference/clenshaw_summation.rst api_reference/degree_amplitude.rst api_reference/destripe_harmonics.rst @@ -58,28 +59,25 @@ missions api_reference/legendre_polynomials.rst api_reference/mascons.rst api_reference/ocean_stokes.rst - api_reference/piecewise_regress.rst - api_reference/plm_colombo.rst - api_reference/plm_holmes.rst - api_reference/plm_mohlenkamp.rst api_reference/read_gfc_harmonics.rst api_reference/read_GIA_model.rst api_reference/read_GRACE_harmonics.rst api_reference/read_love_numbers.rst - api_reference/read_SLR_C20.rst - api_reference/read_SLR_CS2.rst - api_reference/read_SLR_C30.rst - api_reference/read_SLR_C40.rst - api_reference/read_SLR_C50.rst api_reference/read_SLR_harmonics.rst - api_reference/savitzky_golay.rst api_reference/sea_level_equation.rst + api_reference/SLR/C20.rst + api_reference/SLR/CS2.rst + api_reference/SLR/C30.rst + api_reference/SLR/C40.rst + api_reference/SLR/C50.rst api_reference/spatial.rst api_reference/time.rst + api_reference/time_series/amplitude.rst + api_reference/time_series/piecewise.rst + api_reference/time_series/regress.rst + api_reference/time_series/savitzky_golay.rst + api_reference/time_series/smooth.rst api_reference/tools.rst - api_reference/tsamplitude.rst - api_reference/tsregress.rst - api_reference/tssmooth.rst api_reference/units.rst api_reference/utilities.rst diff --git a/gravity_toolkit/SLR/C20.py b/gravity_toolkit/SLR/C20.py new file mode 100644 index 00000000..17d21fab --- /dev/null +++ b/gravity_toolkit/SLR/C20.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python +u""" +C20.py +Written by Tyler Sutterley (01/2023) + +Reads in C20 spherical harmonic coefficients derived from SLR measurements + +Dataset distributed by NASA PO.DAAC + https://podaac-tools.jpl.nasa.gov/drive/files/GeodeticsGravity/grace/docs + TN-05_C20_SLR.txt + TN-07_C20_SLR.txt + TN-11_C20_SLR.txt + TN-14_C30_C30_GSFC_SLR.txt +Dataset distributed by UTCSR + ftp://ftp.csr.utexas.edu/pub/slr/degree_2/C20_RL05.txt +Datasets distributed by GFZ + ftp://isdcftp.gfz-potsdam.de/grace/Level-2/GFZ/RL06_SLR_C20/ + GFZ_RL06_C20_SLR.dat + ftp://isdcftp.gfz-potsdam.de/grace/GravIS/GFZ/Level-2B/aux_data/ + GRAVIS-2B_GFZOP_GRACE+SLR_LOW_DEGREES_0002.dat + +CALLING SEQUENCE: + SLR_C20 = gravity_toolkit.SLR.C20(SLR_file) + +INPUTS: + SLR_file: + RL04: TN-05_C20_SLR.txt + RL05: TN-07_C20_SLR.txt + RL06: TN-11_C20_SLR.txt + CSR: C20_RL05.txt + GFZ: GFZ_RL06_C20_SLR.dat + GFZ (combined): GRAVIS-2B_GFZOP_GRACE+SLR_LOW_DEGREES_0002.dat + +OUTPUTS: + data: SLR degree 2 order 0 cosine stokes coefficients (C20) + error: SLR degree 2 order 0 cosine stokes coefficient error (eC20) + month: GRACE/GRACE-FO month of measurement (April 2002 = 004) + date: date of SLR measurement + +OPTIONS: + AOD: remove background De-aliasing product from the SLR solution (for CSR) + HEADER: file contains header text to be skipped (default: True) + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python + https://numpy.org + https://numpy.org/doc/stable/user/numpy-for-matlab-users.html + dateutil: powerful extensions to datetime + https://dateutil.readthedocs.io/en/stable/ + +PROGRAM DEPENDENCIES: + time.py: utilities for calculating time operations + +REFERENCES: + Cheng, M. and Tapley, B. D., "Variations in the Earth's oblateness during + the past 28 years", Journal of Geophysical Research: Solid Earth, + 109(B9), B09402, 2004. 10.1029/2004JB003028 + Loomis, B. D., Rachlin, K. E., and Luthcke, S. B., "Improved Earth + Oblateness Rate Reveals Increased Ice Sheet Losses and Mass-Driven Sea + Level Rise", Geophysical Research Letters, 46(12), 6910-6917, 2019. + https://doi.org/10.1029/2019GL082929 + Koenig, R., Schreiner, P, and Dahle, C. "Monthly estimates of C(2,0) + generated by GFZ from SLR satellites based on GFZ GRACE/GRACE-FO + RL06 background models." V. 1.0. GFZ Data Services, (2019). + https://doi.org/10.5880/GFZ.GRAVIS_06_C20_SLR + +UPDATE HISTORY: + Updated 01/2023: refactored satellite laser ranging read functions + Updated 04/2022: updated docstrings to numpy documentation format + include utf-8 encoding in reads to be windows compliant + Updated 09/2021: use functions for converting to and from GRACE months + Updated 05/2021: added GFZ SLR and GravIS oblateness solutions + define int/float precision to prevent deprecation warning + Updated 02/2021: use adjust_months function to fix special months cases + replaced numpy bool to prevent deprecation warning + Updated 12/2020: using utilities from time module + Updated 08/2020: flake8 compatible regular expression strings + Updated 07/2020: added function docstrings + Updated 08/2019: add catch to verify input SLR file exists + Updated 07/2019: added tilde-expansion of input SLR file + Updated 06/2019: added new GRACE-FO special month (October 2018) + Updated 11/2018: new TN-11 files only list GRACE months available + Updated 06/2016: added option HEADER for files that do not have header text + Updated 05/2016: added option AOD to not remove the AOD correction + Updated 03/2016: minor update to read PO.DAAC + Updated 05/2015: minor change to file determination (only regular expressions) + Updated 02/2015: updated UT/CSR portion and comments + Updated 09/2014: rewrite of the TN-07 read program + using regular expressions and convert_calendar_decimal + Updated 01/2014: updated to use UT/CSR monthly time-series + as an alternative to PO.DAAC as it is updated more regularly + Updated 05/2013: adapted for python + Updated 09/2012: Changed month scheme to output. + Used to remove the GRACE missing months in this program by feeding in the GRACE months + BUT, as the new SLR files start with an earlier date, decided to parallel + the degree-1 read program, and remove the missing months in the read_grace program + Updated 06/2012: OVERHAUL of dating and modification for 'special' GRACE months + Initiated from an incorrect date tag in the SLR data file + New dating will convert from the MJD file into date fraction + Some GRACE 'months' have the accelerometer turned off + for half the month to preserve battery power + These months use half of the prior month in the GRACE global gravity solution + For these months the SLR file has a second dataline for the modified period + Will use these marked (*) data to replace the GRACE C2,0 + ALSO converted the mon and slrdate inputs into options + Updated 01/2012: Updated to feed in SLR file from outside + Will accommodate upcoming GRACE RL05, which will use different SLR files + Written 12/2011 +""" +import os +import re +import numpy as np +import gravity_toolkit.time + +# PURPOSE: read oblateness data from Satellite Laser Ranging (SLR) +def C20(SLR_file, AOD=True, HEADER=True): + """ + Reads C20 spherical harmonic coefficients from SLR measurements + + Parameters + ---------- + SLR_file: str + Satellite Laser Ranging file + AOD: bool, default True + Remove background De-aliasing product from the + Center for Space Research (CSR) SLR solutions + HEADER: bool, default True + File contains header text to be skipped + + Returns + ------- + data: float + SLR degree 2 order 0 cosine stokes coefficients + error: float + SLR degree 2 order 0 cosine stokes coefficient error + month: int + GRACE/GRACE-FO month of measurement + date: float + date of SLR measurement + """ + + # check that SLR file exists + if not os.access(os.path.expanduser(SLR_file), os.F_OK): + raise FileNotFoundError('SLR file not found in file system') + + # output dictionary with data variables + dinput = {} + # determine if imported file is from PO.DAAC or CSR + if bool(re.search(r'C20_RL\d+',SLR_file,re.I)): + # SLR C20 file from CSR + # Just for checking new months when TN series isn't up to date as the + # SLR estimates always use the full set of days in each calendar month. + # format of the input file (note 64 bit floating point for C20) + # Column 1: Approximate mid-point of monthly solution (years) + # Column 2: C20 from SLR (normalized) + # Column 3: Delta C20 relative to a mean value (1E-10) + # Column 4: Solution sigma (1E-10) + # Column 5: Mean value of Atmosphere-Ocean De-aliasing model (1E-10) + # Columns 6-7: Start and end dates of data used in solution + dtype = {} + dtype['names'] = ('time','C20','delta','sigma','AOD','start','end') + dtype['formats'] = ('f','f8','f','f','f','f','f') + # header text is commented and won't be read + file_input = np.loadtxt(os.path.expanduser(SLR_file),dtype=dtype) + # date and GRACE/GRACE-FO month + dinput['time'] = file_input['time'] + dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) + # monthly spherical harmonic replacement solutions + dinput['data'] = file_input['C20'].copy() + # monthly spherical harmonic formal standard deviations + dinput['error'] = file_input['sigma']*1e-10 + # Background gravity model includes solid earth and ocean tides, solid + # earth and ocean pole tides, and the Atmosphere-Ocean De-aliasing + # product. The monthly mean of the AOD model has been restored. + if AOD: + # Removing AOD product that was restored in the solution + dinput['data'] -= file_input['AOD']*1e-10 + elif bool(re.search(r'GFZ_(RL\d+)_C20_SLR',SLR_file,re.I)): + # SLR C20 file from GFZ + # Column 1: MJD of BEGINNING of solution span + # Column 2: Year and fraction of year of BEGINNING of solution span + # Column 3: Replacement C(2,0) + # Column 4: Replacement C(2,0) - mean C(2,0) (1.0E-10) + # Column 5: C(2,0) formal error (1.0E-10) + with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: + file_contents = f.read().splitlines() + # number of lines contained in the file + file_lines = len(file_contents) + # counts the number of lines in the header + count = 0 + # Reading over header text + while HEADER: + # file line at count + line = file_contents[count] + # find PRODUCT: within line to set HEADER flag to False when found + HEADER = not bool(re.match(r'PRODUCT:+',line)) + # add 1 to counter + count += 1 + + # number of months within the file + n_mon = file_lines - count + # date and GRACE/GRACE-FO month + dinput['time'] = np.zeros((n_mon)) + dinput['month'] = np.zeros((n_mon),dtype=np.int64) + # monthly spherical harmonic replacement solutions + dinput['data'] = np.zeros((n_mon)) + # monthly spherical harmonic formal standard deviations + dinput['error'] = np.zeros((n_mon)) + # time count + t = 0 + # for every other line: + for line in file_contents[count:]: + # find numerical instances in line including exponents, + # decimal points and negatives + line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) + # check if line has G* or Gm flags + if bool(re.search(r'(G\*|Gm)',line)): + # reading decimal year for start of span + dinput['time'][t] = np.float64(line_contents[1]) + # Spherical Harmonic data for line + dinput['data'][t] = np.float64(line_contents[2]) + dinput['error'][t] = np.float64(line_contents[4])*1e-10 + # GRACE/GRACE-FO month of SLR solutions + dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( + dinput['time'][t], around=np.round) + # add to t count + t += 1 + # truncate variables if necessary + for key,val in dinput.items(): + dinput[key] = val[:t] + + elif bool(re.search(r'GRAVIS-2B_GFZOP',SLR_file,re.I)): + # Combined GRACE/SLR solution file produced by GFZ + # Column 1: MJD of BEGINNING of solution data span + # Column 2: Year and fraction of year of BEGINNING of solution span + # Column 3: Replacement C(2,0) + # Column 4: Replacement C(2,0) - mean C(2,0) (1.0E-10) + # Column 5: C(2,0) formal standard deviation (1.0E-12) + with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: + file_contents = f.read().splitlines() + # number of lines contained in the file + file_lines = len(file_contents) + + # counts the number of lines in the header + count = 0 + # Reading over header text + while HEADER: + # file line at count + line = file_contents[count] + # find PRODUCT: within line to set HEADER flag to False when found + HEADER = not bool(re.match(r'PRODUCT:+',line)) + # add 1 to counter + count += 1 + + # number of months within the file + n_mon = file_lines - count + # date and GRACE/GRACE-FO month + dinput['time'] = np.zeros((n_mon)) + dinput['month'] = np.zeros((n_mon),dtype=int) + # monthly spherical harmonic replacement solutions + dinput['data'] = np.zeros((n_mon)) + # monthly spherical harmonic formal standard deviations + dinput['error'] = np.zeros((n_mon)) + # time count + t = 0 + # for every other line: + for line in file_contents[count:]: + # find numerical instances in line including exponents, + # decimal points and negatives + line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) + count = len(line_contents) + # check for empty lines + if (count > 0): + # reading decimal year for start of span + dinput['time'][t] = np.float64(line_contents[1]) + # Spherical Harmonic data for line + dinput['data'][t] = np.float64(line_contents[2]) + dinput['error'][t] = np.float64(line_contents[4])*1e-10 + # GRACE/GRACE-FO month of SLR solutions + dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( + dinput['time'][t], around=np.round) + # add to t count + t += 1 + # truncate variables if necessary + for key,val in dinput.items(): + dinput[key] = val[:t] + + elif bool(re.search(r'TN-(11|14)',SLR_file,re.I)): + # SLR C20 RL06 file from PO.DAAC + with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: + file_contents = f.read().splitlines() + # number of lines contained in the file + file_lines = len(file_contents) + + # counts the number of lines in the header + count = 0 + # Reading over header text + while HEADER: + # file line at count + line = file_contents[count] + # find PRODUCT: within line to set HEADER flag to False when found + HEADER = not bool(re.match(r'PRODUCT:+',line,re.IGNORECASE)) + # add 1 to counter + count += 1 + + # number of months within the file + n_mon = file_lines - count + # date and GRACE/GRACE-FO month + dinput['time'] = np.zeros((n_mon)) + dinput['month'] = np.zeros((n_mon),dtype=np.int64) + # monthly spherical harmonic replacement solutions + dinput['data'] = np.zeros((n_mon)) + # monthly spherical harmonic formal standard deviations + dinput['error'] = np.zeros((n_mon)) + # time count + t = 0 + # for every other line: + for line in file_contents[count:]: + # find numerical instances in line including exponents, + # decimal points and negatives + line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) + # check for empty lines as there are + # slight differences in RL04 TN-05_C20_SLR.txt + # with blanks between the PRODUCT: line and the data + count = len(line_contents) + # if count is greater than 0 + if (count > 0): + # modified julian date for line + MJD = np.float64(line_contents[0]) + # converting from MJD into month, day and year + YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( + MJD+2400000.5, format='tuple') + # converting from month, day, year into decimal year + dinput['time'][t] = gravity_toolkit.time.convert_calendar_decimal( + YY, MM, day=DD, hour=hh) + # Spherical Harmonic data for line + dinput['data'][t] = np.float64(line_contents[2]) + dinput['error'][t] = np.float64(line_contents[4])*1e-10 + # GRACE/GRACE-FO month of SLR solutions + dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( + dinput['time'][t], around=np.round) + # add to t count + t += 1 + # truncate variables if necessary + for key,val in dinput.items(): + dinput[key] = val[:t] + else: + # SLR C20 file from PO.DAAC + with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: + file_contents = f.read().splitlines() + # number of lines contained in the file + file_lines = len(file_contents) + + # counts the number of lines in the header + count = 0 + # Reading over header text + while HEADER: + # file line at count + line = file_contents[count] + # find PRODUCT: within line to set HEADER flag to False when found + HEADER = not bool(re.match(r'PRODUCT:+',line)) + # add 1 to counter + count += 1 + + # number of months within the file + n_mon = file_lines - count + # GRACE/GRACE-FO dates + date_conv = np.zeros((n_mon)) + # monthly spherical harmonic replacement solutions + C20_input = np.zeros((n_mon)) + # monthly spherical harmonic formal standard deviations + eC20_input = np.zeros((n_mon)) + # flag denoting if replacement solution + slr_flag = np.zeros((n_mon),dtype=bool) + # time count + t = 0 + # for every other line: + for line in file_contents[count:]: + # find numerical instances in line including exponents, + # decimal points and negatives + line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) + # check for empty lines as there are + # slight differences in RL04 TN-05_C20_SLR.txt + # with blanks between the PRODUCT: line and the data + count = len(line_contents) + # if count is greater than 0 + if (count > 0): + # modified julian date for line + MJD = np.float64(line_contents[0]) + # converting from MJD into month, day and year + YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( + MJD+2400000.5, format='tuple') + # converting from month, day, year into decimal year + date_conv[t] = gravity_toolkit.time.convert_calendar_decimal( + YY, MM, day=DD, hour=hh) + # Spherical Harmonic data for line + C20_input[t] = np.float64(line_contents[2]) + eC20_input[t] = np.float64(line_contents[4])*1e-10 + # line has * flag + if bool(re.search(r'\*',line)): + slr_flag[t] = True + # add to t count + t += 1 + + # truncate for RL04 if necessary + date_conv = date_conv[:t] + C20_input = C20_input[:t] + eC20_input = eC20_input[:t] + slr_flag = slr_flag[:t] + + # GRACE/GRACE-FO month of SLR solutions + mon = gravity_toolkit.time.calendar_to_grace(date_conv,around=np.round) + # number of unique months + dinput['month'] = np.unique(mon) + n_uniq = len(dinput['month']) + # Removing overlapping months to use the data for + # months with limited GRACE accelerometer use + dinput['time'] = np.zeros((n_uniq)) + dinput['data'] = np.zeros((n_uniq)) + dinput['error'] = np.zeros((n_uniq)) + # New SLR datasets have * flags for the modified GRACE periods + # these GRACE months use half of a prior month in their solution + # this will find these months (marked above with slr_flag) + for t in range(n_uniq): + count = np.count_nonzero(mon == dinput['month'][t]) + # there is only one solution for the month + if (count == 1): + i = np.nonzero(mon == dinput['month'][t]) + dinput['time'][t] = date_conv[i] + dinput['data'][t] = C20_input[i] + dinput['error'][t] = eC20_input[i] + # there is a special solution for the month + # will the solution flagged with slr_flag + elif (count == 2): + i = np.nonzero((mon == dinput['month'][t]) & slr_flag) + dinput['time'][t] = date_conv[i] + dinput['data'][t] = C20_input[i] + dinput['error'][t] = eC20_input[i] + + # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with + # Accelerometer shutoffs make the relation between month number + # and date more complicated as days from other months are used + # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) + # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) + # For all: May 2015 (161) is centered in Apr 2015 (160) + # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) + dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) + + # return the SLR-derived oblateness solutions + return dinput \ No newline at end of file diff --git a/gravity_toolkit/SLR/C30.py b/gravity_toolkit/SLR/C30.py new file mode 100644 index 00000000..4f4b065c --- /dev/null +++ b/gravity_toolkit/SLR/C30.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python +u""" +C30.py +Written by Yara Mohajerani and Tyler Sutterley (01/2023) + +Reads monthly degree 3 zonal spherical harmonic data files from SLR + +Dataset distributed by NASA PO.DAAC + https://podaac-tools.jpl.nasa.gov/drive/files/GeodeticsGravity/gracefo/docs + TN-14_C30_C30_GSFC_SLR.txt + ftp://ftp.csr.utexas.edu/pub/slr/degree_5/ + CSR_Monthly_5x5_Gravity_Harmonics.txt +Dataset distributed by GFZ + ftp://isdcftp.gfz-potsdam.de/grace/GravIS/GFZ/Level-2B/aux_data/ + GRAVIS-2B_GFZOP_GRACE+SLR_LOW_DEGREES_0002.dat + +CALLING SEQUENCE: + SLR_C30 = gravity_toolkit.SLR.C30(SLR_file) + +INPUTS: + SLR_file: + CSR: CSR_Monthly_5x5_Gravity_Harmonics.txt + GFZ: GRAVIS-2B_GFZOP_GRACE+SLR_LOW_DEGREES_0002.dat + GSFC: TN-14_C30_C30_GSFC_SLR.txt + LARES: C30_LARES_filtered.txt + +OUTPUTS: + data: SLR degree 3 order 0 cosine stokes coefficients (C30) + error: SLR degree 3 order 0 cosine stokes coefficient error (eC30) + month: GRACE/GRACE-FO month of measurement (April 2002 = 004) + time: date of SLR measurement + +OPTIONS: + HEADER: file contains header text to be skipped (default: True) + C30_MEAN: mean C30 to add to LARES C30 anomalies + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python + https://numpy.org + https://numpy.org/doc/stable/user/numpy-for-matlab-users.html + dateutil: powerful extensions to datetime + https://dateutil.readthedocs.io/en/stable/ + +PROGRAM DEPENDENCIES: + time.py: utilities for calculating time operations + read_SLR_harmonics.py: low-degree spherical harmonic coefficients from SLR + +REFERENCES: + Loomis, Rachlin, and Luthcke, "Improved Earth Oblateness Rate Reveals + Increased Ice Sheet Losses and Mass-Driven Sea Level Rise", + Geophysical Research Letters, 46(12), 6910-6917, (2019). + https://doi.org/10.1029/2019GL082929 + Loomis, Rachlin, Wiese, Landerer, and Luthcke, "Replacing GRACE/GRACE-FO + C30 with satellite laser ranging: Impacts on Antarctic Ice Sheet + mass change". Geophysical Research Letters, 47, (2020). + https://doi.org/10.1029/2019GL085488 + Dahle and Murboeck, "Post-processed GRACE/GRACE-FO Geopotential + GSM Coefficients GFZ RL06 (Level-2B Product)." + V. 0002. GFZ Data Services, (2019). + https://doi.org/10.5880/GFZ.GRAVIS_06_L2B + +UPDATE HISTORY: + Updated 01/2023: refactored satellite laser ranging read functions + Updated 04/2022: updated docstrings to numpy documentation format + include utf-8 encoding in reads to be windows compliant + Updated 09/2021: use functions for converting to and from GRACE months + Updated 05/2021: added GFZ GravIS GRACE/SLR low degree solutions + define int/float precision to prevent deprecation warning + Updated 04/2021: renamed SLR monthly 5x5 function from CSR + Updated 02/2021: use adjust_months function to fix special months cases + Updated 12/2020: using utilities from time module + Updated 08/2020: flake8 compatible regular expression strings + Updated 07/2020: added function docstrings + Updated 08/2019: new GSFC format with more columns + add catch to verify input SLR file exists + added LARES filtered C30 files from John Ries (C30_LARES_filtered.txt) + add C30 mean (9.5717395773300e-07) to LARES solutions + Updated 07/2019: added SLR C3,0 files from PO.DAAC (GSFC) + read CSR monthly 5x5 file and extract C3,0 coefficients + Written 05/2019 +""" +import os +import re +import numpy as np +import gravity_toolkit.time +import gravity_toolkit.read_SLR_harmonics + +# PURPOSE: read Degree 3 zonal data from Satellite Laser Ranging (SLR) +def C30(SLR_file, C30_MEAN=9.5717395773300e-07, HEADER=True): + """ + Reads C30 spherical harmonic coefficients from SLR measurements + + Parameters + ---------- + SLR_file: str + Satellite Laser Ranging file + C30_MEAN: float, default 9.5717395773300e-07 + Mean C30 to add to LARES C30 anomalies + HEADER: bool, default True + File contains header text to be skipped + + Returns + ------- + data: float + SLR degree 3 order 0 cosine stokes coefficients + error: float + SLR degree 3 order 0 cosine stokes coefficient error + month: int + GRACE/GRACE-FO month of measurement + time: float + date of SLR measurement + """ + + # check that SLR file exists + if not os.access(os.path.expanduser(SLR_file), os.F_OK): + raise FileNotFoundError('SLR file not found in file system') + # output dictionary with input data + dinput = {} + + if bool(re.search(r'TN-(14)',SLR_file,re.I)): + + # SLR C30 RL06 file from PO.DAAC produced by GSFC + with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: + file_contents = f.read().splitlines() + # number of lines contained in the file + file_lines = len(file_contents) + + # counts the number of lines in the header + count = 0 + # Reading over header text + while HEADER: + # file line at count + line = file_contents[count] + # find PRODUCT: within line to set HEADER flag to False when found + HEADER = not bool(re.match(r'Product:+',line)) + # add 1 to counter + count += 1 + + # number of months within the file + n_mon = file_lines - count + # date and GRACE/GRACE-FO month + dinput['time'] = np.zeros((n_mon)) + dinput['month'] = np.zeros((n_mon),dtype=int) + # monthly spherical harmonic replacement solutions + dinput['data'] = np.zeros((n_mon)) + # monthly spherical harmonic formal standard deviations + dinput['error'] = np.zeros((n_mon)) + # time count + t = 0 + # for every other line: + for line in file_contents[count:]: + # find numerical instances in line including exponents, + # decimal points and negatives + line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) + count = len(line_contents) + # only read lines where C30 data exists (don't read NaN lines) + if (count > 7): + # modified julian date for line + MJD = np.float64(line_contents[0]) + # converting from MJD into month, day and year + YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( + MJD+2400000.5, format='tuple') + # converting from month, day, year into decimal year + dinput['time'][t] = gravity_toolkit.time.convert_calendar_decimal( + YY, MM, day=DD, hour=hh) + # Spherical Harmonic data for line + dinput['data'][t] = np.float64(line_contents[5]) + dinput['error'][t] = np.float64(line_contents[7])*1e-10 + # GRACE/GRACE-FO month of SLR solutions + dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( + dinput['time'][t], around=np.round) + # add to t count + t += 1 + # verify that there imported C30 solutions + # (TN-14 data format has changed in the past) + if (t == 0): + raise Exception('No GSFC C30 data imported') + # truncate variables if necessary + for key,val in dinput.items(): + dinput[key] = val[:t] + elif bool(re.search(r'C30_LARES',SLR_file,re.I)): + # read LARES filtered values + LARES_input = np.loadtxt(SLR_file,skiprows=1) + dinput['time'] = LARES_input[:,0].copy() + # convert C30 from anomalies to absolute + dinput['data'] = 1e-10*LARES_input[:,1] + C30_MEAN + # filtered data does not have errors + dinput['error'] = np.zeros_like(LARES_input[:,1]) + # calculate GRACE/GRACE-FO month + dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) + elif bool(re.search(r'GRAVIS-2B_GFZOP',SLR_file,re.I)): + # Combined GRACE/SLR solution file produced by GFZ + # Column 1: MJD of BEGINNING of solution data span + # Column 2: Year and fraction of year of BEGINNING of solution span + # Column 6: Replacement C(3,0) + # Column 7: Replacement C(3,0) - mean C(3,0) (1.0E-10) + # Column 8: C(3,0) formal standard deviation (1.0E-12) + with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: + file_contents = f.read().splitlines() + # number of lines contained in the file + file_lines = len(file_contents) + + # counts the number of lines in the header + count = 0 + # Reading over header text + while HEADER: + # file line at count + line = file_contents[count] + # find PRODUCT: within line to set HEADER flag to False when found + HEADER = not bool(re.match(r'PRODUCT:+',line)) + # add 1 to counter + count += 1 + + # number of months within the file + n_mon = file_lines - count + # date and GRACE/GRACE-FO month + dinput['time'] = np.zeros((n_mon)) + dinput['month'] = np.zeros((n_mon),dtype=int) + # monthly spherical harmonic replacement solutions + dinput['data'] = np.zeros((n_mon)) + # monthly spherical harmonic formal standard deviations + dinput['error'] = np.zeros((n_mon)) + # time count + t = 0 + # for every other line: + for line in file_contents[count:]: + # find numerical instances in line including exponents, + # decimal points and negatives + line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) + count = len(line_contents) + # check for empty lines + if (count > 0): + # reading decimal year for start of span + dinput['time'][t] = np.float64(line_contents[1]) + # Spherical Harmonic data for line + dinput['data'][t] = np.float64(line_contents[5]) + dinput['error'][t] = np.float64(line_contents[7])*1e-10 + # GRACE/GRACE-FO month of SLR solutions + dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( + dinput['time'][t], around=np.round) + # add to t count + t += 1 + # truncate variables if necessary + for key,val in dinput.items(): + dinput[key] = val[:t] + else: + # CSR 5x5 + 6,1 file from CSR and extract C3,0 coefficients + Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) + # extract dates, C30 harmonics and errors + dinput['time'] = Ylms['time'].copy() + dinput['data'] = Ylms['clm'][3,0,:].copy() + dinput['error'] = Ylms['error']['clm'][3,0,:].copy() + # converting from MJD into month, day and year + YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( + Ylms['MJD']+2400000.5, format='tuple') + # calculate GRACE/GRACE-FO month + dinput['month'] = gravity_toolkit.time.calendar_to_grace(YY,MM) + + # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with + # Accelerometer shutoffs make the relation between month number + # and date more complicated as days from other months are used + # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) + # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) + # For all: May 2015 (161) is centered in Apr 2015 (160) + # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) + dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) + + # return the SLR-derived degree 3 zonal solutions + return dinput diff --git a/gravity_toolkit/SLR/C40.py b/gravity_toolkit/SLR/C40.py new file mode 100644 index 00000000..2c5c1171 --- /dev/null +++ b/gravity_toolkit/SLR/C40.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +u""" +C40.py +Written by Tyler Sutterley (01/2023) + +Reads monthly degree 4 zonal spherical harmonic data files from SLR + +Dataset distributed by CSR + ftp://ftp.csr.utexas.edu/pub/slr/degree_5/ + CSR_Monthly_5x5_Gravity_Harmonics.txt +Dataset distributed by GSFC + https://earth.gsfc.nasa.gov/geo/data/slr + gsfc_slr_5x5c61s61.txt + +CALLING SEQUENCE: + SLR_C40 = gravity_toolkit.SLR.C40(SLR_file) + +INPUTS: + SLR_file: + GSFC: gsfc_slr_5x5c61s61.txt + CSR: CSR_Monthly_5x5_Gravity_Harmonics.txt + +OUTPUTS: + data: SLR degree 4 order 0 cosine stokes coefficients (C40) + error: SLR degree 4 order 0 cosine stokes coefficient error (eC40) + month: GRACE/GRACE-FO month of measurement (April 2002 = 004) + time: date of SLR measurement + +OPTIONS: + HEADER: file contains header text to be skipped (default: True) + C40_MEAN: mean C40 to add to LARES C40 anomalies + DATE: mid-point of monthly solution for calculating 28-day arc averages + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python + https://numpy.org + https://numpy.org/doc/stable/user/numpy-for-matlab-users.html + dateutil: powerful extensions to datetime + https://dateutil.readthedocs.io/en/stable/ + +PROGRAM DEPENDENCIES: + time.py: utilities for calculating time operations + read_SLR_harmonics.py: low-degree spherical harmonic coefficients from SLR + +UPDATE HISTORY: + Updated 01/2023: refactored satellite laser ranging read functions + Written 09/2022 +""" +import os +import re +import numpy as np +import gravity_toolkit.time +import gravity_toolkit.read_SLR_harmonics + +# PURPOSE: read Degree 4 zonal data from Satellite Laser Ranging (SLR) +def C40(SLR_file, C40_MEAN=0.0, DATE=None, **kwargs): + """ + Reads C40 spherical harmonic coefficients from SLR measurements + + Parameters + ---------- + SLR_file: str + Satellite Laser Ranging file + C40_MEAN: float, default 0.0 + Mean C40 to add to LARES C40 anomalies + DATE: float or NoneType, default None + Mid-point of monthly solution for calculating 28-day arc averages + + Returns + ------- + data: float + SLR degree 4 order 0 cosine stokes coefficients + error: float + SLR degree 4 order 0 cosine stokes coefficient error + month: int + GRACE/GRACE-FO month of measurement + time: float + date of SLR measurement + """ + + # check that SLR file exists + if not os.access(os.path.expanduser(SLR_file), os.F_OK): + raise FileNotFoundError('SLR file not found in file system') + # output dictionary with input data + dinput = {} + + if bool(re.search(r'gsfc_slr_5x5c61s61',SLR_file,re.I)): + # read 5x5 + 6,1 file from GSFC and extract coefficients + Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) + # calculate 28-day moving-average solution from 7-day arcs + dinput.update(gravity_toolkit.convert_weekly(Ylms['time'], + Ylms['clm'][4,0,:], DATE=DATE, NEIGHBORS=28)) + # no estimated spherical harmonic errors + dinput['error'] = np.zeros_like(DATE,dtype='f8') + elif bool(re.search(r'C40_LARES',SLR_file,re.I)): + # read LARES filtered values + LARES_input = np.loadtxt(SLR_file,skiprows=1) + dinput['time'] = LARES_input[:,0].copy() + # convert C40 from anomalies to absolute + dinput['data'] = 1e-10*LARES_input[:,1] + C40_MEAN + # filtered data does not have errors + dinput['error'] = np.zeros_like(LARES_input[:,1]) + # calculate GRACE/GRACE-FO month + dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) + else: + # read 5x5 + 6,1 file from CSR and extract C4,0 coefficients + Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) + # extract dates, C40 harmonics and errors + dinput['time'] = Ylms['time'].copy() + dinput['data'] = Ylms['clm'][4,0,:].copy() + dinput['error'] = Ylms['error']['clm'][4,0,:].copy() + # converting from MJD into month, day and year + YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( + Ylms['MJD']+2400000.5, format='tuple') + # calculate GRACE/GRACE-FO month + dinput['month'] = gravity_toolkit.time.calendar_to_grace(YY,MM) + + # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with + # Accelerometer shutoffs make the relation between month number + # and date more complicated as days from other months are used + # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) + # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) + # For all: May 2015 (161) is centered in Apr 2015 (160) + # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) + dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) + + # return the SLR-derived degree 4 zonal solutions + return dinput diff --git a/gravity_toolkit/SLR/C50.py b/gravity_toolkit/SLR/C50.py new file mode 100644 index 00000000..31fb7695 --- /dev/null +++ b/gravity_toolkit/SLR/C50.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python +u""" +C50.py +Written by Yara Mohajerani and Tyler Sutterley (01/2023) + +Reads monthly degree 5 zonal spherical harmonic data files from SLR + +Dataset distributed by CSR + ftp://ftp.csr.utexas.edu/pub/slr/degree_5/ + CSR_Monthly_5x5_Gravity_Harmonics.txt +Dataset distributed by GSFC + https://earth.gsfc.nasa.gov/geo/data/slr + gsfc_slr_5x5c61s61.txt + +CALLING SEQUENCE: + SLR_C50 = gravity_toolkit.SLR.C50(SLR_file) + +INPUTS: + SLR_file: + GSFC: gsfc_slr_5x5c61s61.txt + CSR: CSR_Monthly_5x5_Gravity_Harmonics.txt + +OUTPUTS: + data: SLR degree 5 order 0 cosine stokes coefficients (C50) + error: SLR degree 5 order 0 cosine stokes coefficient error (eC50) + month: GRACE/GRACE-FO month of measurement (April 2002 = 004) + time: date of SLR measurement + +OPTIONS: + HEADER: file contains header text to be skipped (default: True) + C50_MEAN: mean C50 to add to LARES C50 anomalies + DATE: mid-point of monthly solution for calculating 28-day arc averages + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python + https://numpy.org + https://numpy.org/doc/stable/user/numpy-for-matlab-users.html + dateutil: powerful extensions to datetime + https://dateutil.readthedocs.io/en/stable/ + +PROGRAM DEPENDENCIES: + time.py: utilities for calculating time operations + read_SLR_harmonics.py: low-degree spherical harmonic coefficients from SLR + +UPDATE HISTORY: + Updated 01/2023: refactored satellite laser ranging read functions + Updated 04/2022: updated docstrings to numpy documentation format + include utf-8 encoding in reads to be windows compliant + Updated 12/2021: use function for converting from 7-day arcs + Updated 11/2021: reader for new weekly 5x5+6,1 fields from NASA GSFC + Updated 09/2021: use functions for converting to and from GRACE months + Updated 05/2021: simplified program similar to other SLR readers + define int/float precision to prevent deprecation warning + Updated 04/2021: using utilities from time module + Updated 08/2020: flake8 compatible regular expression strings + Updated 07/2020: added function docstrings + Written 11/2019 +""" +import os +import re +import numpy as np +import gravity_toolkit.time +import gravity_toolkit.read_SLR_harmonics + +# PURPOSE: read Degree 5 zonal data from Satellite Laser Ranging (SLR) +def C50(SLR_file, C50_MEAN=0.0, DATE=None, HEADER=True): + """ + Reads C50 spherical harmonic coefficients from SLR measurements + + Parameters + ---------- + SLR_file: str + Satellite Laser Ranging file + C50_MEAN: float, default 0.0 + Mean C50 to add to LARES C50 anomalies + DATE: float or NoneType, default None + Mid-point of monthly solution for calculating 28-day arc averages + HEADER: bool, default True + File contains header text to be skipped + + Returns + ------- + data: float + SLR degree 5 order 0 cosine stokes coefficients + error: float + SLR degree 5 order 0 cosine stokes coefficient error + month: int + GRACE/GRACE-FO month of measurement + time: float + date of SLR measurement + """ + + # check that SLR file exists + if not os.access(os.path.expanduser(SLR_file), os.F_OK): + raise FileNotFoundError('SLR file not found in file system') + # output dictionary with input data + dinput = {} + + if bool(re.search(r'GSFC_SLR_C(20)_C(30)_C(50)',SLR_file,re.I)): + + # SLR C50 RL06 file from GSFC + with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: + file_contents = f.read().splitlines() + # number of lines contained in the file + file_lines = len(file_contents) + + # counts the number of lines in the header + count = 0 + # Reading over header text + while HEADER: + # file line at count + line = file_contents[count] + # find PRODUCT: within line to set HEADER flag to False when found + HEADER = not bool(re.match(r'Product:+',line)) + # add 1 to counter + count += 1 + + # number of months within the file + n_mon = file_lines - count + # date and GRACE/GRACE-FO month + dinput['time'] = np.zeros((n_mon)) + dinput['month'] = np.zeros((n_mon),dtype=int) + # monthly spherical harmonic replacement solutions + dinput['data'] = np.zeros((n_mon)) + # monthly spherical harmonic formal standard deviations + dinput['error'] = np.zeros((n_mon)) + # time count + t = 0 + # for every other line: + for line in file_contents[count:]: + # find numerical instances in line including exponents, + # decimal points and negatives + line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) + count = len(line_contents) + # only read lines where C50 data exists (don't read NaN lines) + if (count > 7): + # modified julian date for line + MJD = np.float64(line_contents[0]) + # converting from MJD into month, day and year + YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( + MJD+2400000.5, format='tuple') + # converting from month, day, year into decimal year + dinput['time'][t] = gravity_toolkit.time.convert_calendar_decimal( + YY, MM, day=DD, hour=hh) + # Spherical Harmonic data for line + dinput['data'][t] = np.float64(line_contents[10]) + dinput['error'][t] = np.float64(line_contents[12])*1e-10 + # GRACE/GRACE-FO month of SLR solutions + dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( + dinput['time'][t], around=np.round) + # add to t count + t += 1 + # verify that there imported C50 solutions + if (t == 0): + raise Exception('No GSFC C50 data imported') + # truncate variables if necessary + for key,val in dinput.items(): + dinput[key] = val[:t] + elif bool(re.search(r'gsfc_slr_5x5c61s61',SLR_file,re.I)): + # read 5x5 + 6,1 file from GSFC and extract coefficients + Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) + # calculate 28-day moving-average solution from 7-day arcs + dinput.update(gravity_toolkit.convert_weekly(Ylms['time'], + Ylms['clm'][5,0,:], DATE=DATE, NEIGHBORS=28)) + # no estimated spherical harmonic errors + dinput['error'] = np.zeros_like(DATE,dtype='f8') + elif bool(re.search(r'C50_LARES',SLR_file,re.I)): + # read LARES filtered values + LARES_input = np.loadtxt(SLR_file,skiprows=1) + dinput['time'] = LARES_input[:,0].copy() + # convert C50 from anomalies to absolute + dinput['data'] = 1e-10*LARES_input[:,1] + C50_MEAN + # filtered data does not have errors + dinput['error'] = np.zeros_like(LARES_input[:,1]) + # calculate GRACE/GRACE-FO month + dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) + else: + # read 5x5 + 6,1 file from CSR and extract C5,0 coefficients + Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) + # extract dates, C50 harmonics and errors + dinput['time'] = Ylms['time'].copy() + dinput['data'] = Ylms['clm'][5,0,:].copy() + dinput['error'] = Ylms['error']['clm'][5,0,:].copy() + # converting from MJD into month, day and year + YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( + Ylms['MJD']+2400000.5, format='tuple') + # calculate GRACE/GRACE-FO month + dinput['month'] = gravity_toolkit.time.calendar_to_grace(YY,MM) + + # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with + # Accelerometer shutoffs make the relation between month number + # and date more complicated as days from other months are used + # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) + # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) + # For all: May 2015 (161) is centered in Apr 2015 (160) + # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) + dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) + + # return the SLR-derived degree 5 zonal solutions + return dinput diff --git a/gravity_toolkit/SLR/CS2.py b/gravity_toolkit/SLR/CS2.py new file mode 100644 index 00000000..7cdb0738 --- /dev/null +++ b/gravity_toolkit/SLR/CS2.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python +u""" +CS2.py +Written by Hugo Lecomte and Tyler Sutterley (01/2023) + +Reads monthly degree 2,m (figure axis and azimuthal dependence) + spherical harmonic data files from satellite laser ranging (SLR) + +Dataset distributed by CSR + http://download.csr.utexas.edu/pub/slr/degree_2/ + C21_S21_RL06.txt or C22_S22_RL06.txt +Dataset distributed by GFZ + ftp://isdcftp.gfz-potsdam.de/grace/GravIS/GFZ/Level-2B/aux_data/ + GRAVIS-2B_GFZOP_GRACE+SLR_LOW_DEGREES_0002.dat +Dataset distributed by GSFC + https://earth.gsfc.nasa.gov/geo/data/slr + +CALLING SEQUENCE: + SLR_2m = gravity_toolkit.SLR.CS2(SLR_file) + +INPUTS: + SLR_file: + CSR 2,1: C21_S21_RL06.txt + CSR 2,2: C22_S22_RL06.txt + GFZ: GRAVIS-2B_GFZOP_GRACE+SLR_LOW_DEGREES_0002.dat + GSFC: GSFC_C21_S21.txt + +OPTIONS: + HEADER: file contains header text to be skipped (default: True) + DATE: mid-point of monthly solution for calculating 28-day arc averages + +OUTPUTS: + C2m: SLR degree 2 order m cosine stokes coefficients + S2m: SLR degree 2 order m sine stokes coefficients + eC2m: SLR degree 2 order m cosine stokes coefficient error + eS2m: SLR degree 2 order m sine stokes coefficient error + month: GRACE/GRACE-FO month of measurement (Apr. 2002 = 004) + time: date of SLR measurement + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python + https://numpy.org + https://numpy.org/doc/stable/user/numpy-for-matlab-users.html + dateutil: powerful extensions to datetime + https://dateutil.readthedocs.io/en/stable/ + +PROGRAM DEPENDENCIES: + time.py: utilities for calculating time operations + read_SLR_harmonics.py: low-degree spherical harmonic coefficients from SLR + +REFERENCES: + Cheng et al., " Variations of the Earth's figure axis from satellite + laser ranging and GRACE", Journal of Geophysical Research, + 116, B01409, (2011). https://doi.org/10.1029/2010JB000850 + Dahle et al., "The GFZ GRACE RL06 Monthly Gravity Field Time Series: + Processing Details, and Quality Assessment", Remote Sensing, + 11(18), 2116, (2019). https://doi.org/10.3390/rs11182116 + Dahle and Murboeck, "Post-processed GRACE/GRACE-FO Geopotential + GSM Coefficients GFZ RL06 (Level-2B Product)." + V. 0002. GFZ Data Services, (2019). + https://doi.org/10.5880/GFZ.GRAVIS_06_L2B + Chen el al., "Assessment of degree-2 order-1 gravitational changes + from GRACE and GRACE Follow-on, Earth rotation, satellite laser + ranging, and models", Journal of Geodesy, 95(38), (2021). + https://doi.org/10.1007/s00190-021-01492-x + +UPDATE HISTORY: + Updated 01/2023: refactored satellite laser ranging read functions + Updated 04/2022: updated docstrings to numpy documentation format + include utf-8 encoding in reads to be windows compliant + Updated 11/2021: reader for new weekly 5x5+6,1 fields from NASA GSFC + Updated 09/2021: use functions for converting to and from GRACE months + Updated 08/2021: output empty spherical harmonic errors for GSFC + Updated 06/2021: added GSFC 7-day SLR figure axis solutions + Updated 05/2021: added GFZ GravIS GRACE/SLR low degree solutions + Updated 04/2021: use adjust_months function to fix special months cases + Written 11/2020 +""" +import os +import re +import numpy as np +import gravity_toolkit.time +import gravity_toolkit.read_SLR_harmonics + +# PURPOSE: read Degree 2,m data from Satellite Laser Ranging (SLR) +def CS2(SLR_file, ORDER=1, DATE=None, HEADER=True): + """ + Reads CS2,m spherical harmonic coefficients from SLR measurements + + Parameters + ---------- + SLR_file: str + Satellite Laser Ranging file + ORDER: int, default 1 + Spherical harmonic order to extract from low-degree fields + DATE: float or NoneType, default None + Mid-point of monthly solution for calculating 28-day arc averages + HEADER: bool, default True + File contains header text to be skipped + + Returns + ------- + C2m: float + SLR degree 2 order m cosine stokes coefficients + S2m: float + SLR degree 2 order m sine stokes coefficients + eC2m: float + SLR degree 2 order m cosine stokes coefficient error + eS2m: float + SLR degree 2 order m sine stokes coefficient error + month: int + GRACE/GRACE-FO month of measurement + time: float + date of SLR measurement + """ + + # check that SLR file exists + if not os.access(os.path.expanduser(SLR_file), os.F_OK): + raise FileNotFoundError('SLR file not found in file system') + # output dictionary with input data + dinput = {} + + if bool(re.search(r'GSFC_C2(\d)_S2(\d)',SLR_file,re.I)): + # 7-day arc SLR file produced by GSFC + # input variable names and types + dtype = {} + dtype['names'] = ('time','C2','S2') + dtype['formats'] = ('f','f8','f8') + # read SLR 2,1 file from GSFC + # Column 1: Approximate mid-point of 7-day solution (years) + # Column 2: Solution from SLR (normalized) + # Column 3: Solution from SLR (normalized) + content = np.loadtxt(os.path.expanduser(SLR_file),dtype=dtype) + # duplicate time and harmonics + tdec = np.repeat(content['time'],7) + c2m = np.repeat(content['C2'],7) + s2m = np.repeat(content['S2'],7) + # calculate daily dates to use in centered moving average + tdec += (np.mod(np.arange(len(tdec)),7) - 3.5)/365.25 + # number of dates to use in average + n_neighbors = 28 + # calculate 28-day moving-average solution from 7-day arcs + dinput['time'] = np.zeros_like(DATE) + dinput['C2m'] = np.zeros_like(DATE,dtype='f8') + dinput['S2m'] = np.zeros_like(DATE,dtype='f8') + # no estimated spherical harmonic errors + dinput['eC2m'] = np.zeros_like(DATE,dtype='f8') + dinput['eS2m'] = np.zeros_like(DATE,dtype='f8') + for i,D in enumerate(DATE): + isort = np.argsort((tdec - D)**2)[:n_neighbors] + dinput['time'][i] = np.mean(tdec[isort]) + dinput['C2m'][i] = np.mean(c2m[isort]) + dinput['S2m'][i] = np.mean(s2m[isort]) + # GRACE/GRACE-FO month + dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) + elif bool(re.search(r'gsfc_slr_5x5c61s61',SLR_file,re.I)): + # read 5x5 + 6,1 file from GSFC and extract coefficients + Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) + # duplicate time and harmonics + tdec = np.repeat(Ylms['time'],7) + c2m = np.repeat(Ylms['clm'][2,ORDER],7) + s2m = np.repeat(Ylms['slm'][2,ORDER],7) + # calculate daily dates to use in centered moving average + tdec += (np.mod(np.arange(len(tdec)),7) - 3.5)/365.25 + # number of dates to use in average + n_neighbors = 28 + # calculate 28-day moving-average solution from 7-day arcs + dinput['time'] = np.zeros_like(DATE) + dinput['C2m'] = np.zeros_like(DATE,dtype='f8') + dinput['S2m'] = np.zeros_like(DATE,dtype='f8') + # no estimated spherical harmonic errors + dinput['eC2m'] = np.zeros_like(DATE,dtype='f8') + dinput['eS2m'] = np.zeros_like(DATE,dtype='f8') + for i,D in enumerate(DATE): + isort = np.argsort((tdec - D)**2)[:n_neighbors] + dinput['time'][i] = np.mean(tdec[isort]) + dinput['C2m'][i] = np.mean(c2m[isort]) + dinput['S2m'][i] = np.mean(s2m[isort]) + # GRACE/GRACE-FO month + dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) + elif bool(re.search(r'C2(\d)_S2(\d)_(RL\d{2})',SLR_file,re.I)): + # SLR RL06 file produced by CSR + # input variable names and types + dtype = {} + dtype['names'] = ('time','C2','S2','eC2','eS2', + 'C2aod','S2aod','start','end') + dtype['formats'] = ('f','f8','f8','f','f','f','f','f','f') + # read SLR 2,1 or 2,2 RL06 file from CSR + # header text is commented and won't be read + # Column 1: Approximate mid-point of monthly solution (years) + # Column 2: Solution from SLR (normalized) + # Column 3: Solution from SLR (normalized) + # Column 4: Solution sigma (1E-10) + # Column 5: Solution sigma (1E-10) + # Column 6: Mean value of Atmosphere-Ocean De-aliasing model (1E-10) + # Column 7: Mean value of Atmosphere-Ocean De-aliasing model (1E-10) + # Columns 8-9: Start and end dates of data used in solution + content = np.loadtxt(os.path.expanduser(SLR_file),dtype=dtype) + # date and GRACE/GRACE-FO month + dinput['time'] = content['time'].copy() + dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) + # remove the monthly mean of the AOD model + dinput['C2m'] = content['C2'] - content['C2aod']*10**-10 + dinput['S2m'] = content['S2'] - content['S2aod']*10**-10 + # scale SLR solution sigmas + dinput['eC2m'] = content['eC2']*10**-10 + dinput['eS2m'] = content['eS2']*10**-10 + elif bool(re.search(r'GRAVIS-2B_GFZOP',SLR_file,re.I)): + # Combined GRACE/SLR solution file produced by GFZ + # Column 1: MJD of BEGINNING of solution data span + # Column 2: Year and fraction of year of BEGINNING of solution span + # Column 9: Replacement C(2,1) + # Column 10: Replacement C(2,1) - mean C(2,1) (1.0E-10) + # Column 11: C(2,1) formal standard deviation (1.0E-12) + # Column 12: Replacement S(2,1) + # Column 13: Replacement S(2,1) - mean S(2,1) (1.0E-10) + # Column 14: S(2,1) formal standard deviation (1.0E-12) + with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: + file_contents = f.read().splitlines() + # number of lines contained in the file + file_lines = len(file_contents) + + # counts the number of lines in the header + count = 0 + # Reading over header text + while HEADER: + # file line at count + line = file_contents[count] + # find PRODUCT: within line to set HEADER flag to False when found + HEADER = not bool(re.match(r'PRODUCT:+',line)) + # add 1 to counter + count += 1 + + # number of months within the file + n_mon = file_lines - count + # date and GRACE/GRACE-FO month + dinput['time'] = np.zeros((n_mon)) + dinput['month'] = np.zeros((n_mon),dtype=int) + # monthly spherical harmonic replacement solutions + dinput['C2m'] = np.zeros((n_mon)) + dinput['S2m'] = np.zeros((n_mon)) + # monthly spherical harmonic formal standard deviations + dinput['eC2m'] = np.zeros((n_mon)) + dinput['eS2m'] = np.zeros((n_mon)) + # time count + t = 0 + # for every other line: + for line in file_contents[count:]: + # find numerical instances in line including exponents, + # decimal points and negatives + line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) + count = len(line_contents) + # check for empty lines + if (count > 0): + # reading decimal year for start of span + dinput['time'][t] = np.float64(line_contents[1]) + # Spherical Harmonic data for line + dinput['C2m'][t] = np.float64(line_contents[8]) + dinput['eC2m'][t] = np.float64(line_contents[10])*1e-10 + dinput['S2m'][t] = np.float64(line_contents[11]) + dinput['eS2m'][t] = np.float64(line_contents[13])*1e-10 + # GRACE/GRACE-FO month of SLR solutions + dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( + dinput['time'][t], around=np.round) + # add to t count + t += 1 + # truncate variables if necessary + for key,val in dinput.items(): + dinput[key] = val[:t] + + # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with + # Accelerometer shutoffs make the relation between month number + # and date more complicated as days from other months are used + # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) + # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) + # For all: May 2015 (161) is centered in Apr 2015 (160) + # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) + dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) + + # return the SLR-derived degree 2 solutions + return dinput diff --git a/gravity_toolkit/SLR/__init__.py b/gravity_toolkit/SLR/__init__.py new file mode 100644 index 00000000..002c9a00 --- /dev/null +++ b/gravity_toolkit/SLR/__init__.py @@ -0,0 +1,5 @@ +from .C20 import * +from .C30 import * +from .C40 import * +from .C50 import * +from .CS2 import * diff --git a/gravity_toolkit/__init__.py b/gravity_toolkit/__init__.py index 9b54ca2f..62a73fff 100644 --- a/gravity_toolkit/__init__.py +++ b/gravity_toolkit/__init__.py @@ -21,6 +21,9 @@ import gravity_toolkit.tools import gravity_toolkit.utilities import gravity_toolkit.version +from gravity_toolkit import SLR +from gravity_toolkit import time_series +from gravity_toolkit.associated_legendre import associated_legendre, plm_colombo, plm_holmes, plm_mohlenkamp from gravity_toolkit.clenshaw_summation import clenshaw_summation from gravity_toolkit.degree_amplitude import degree_amplitude from gravity_toolkit.destripe_harmonics import destripe_harmonics @@ -39,31 +42,18 @@ from gravity_toolkit.grace_months_index import grace_months_index from gravity_toolkit.harmonics import harmonics from gravity_toolkit.harmonic_gradients import harmonic_gradients -from gravity_toolkit.harmonic_summation import harmonic_summation +from gravity_toolkit.harmonic_summation import harmonic_summation, harmonic_transform from gravity_toolkit.legendre_polynomials import legendre_polynomials from gravity_toolkit.legendre import legendre from gravity_toolkit.ocean_stokes import ocean_stokes -from gravity_toolkit.piecewise_regress import piecewise_regress -from gravity_toolkit.plm_colombo import plm_colombo -from gravity_toolkit.plm_holmes import plm_holmes -from gravity_toolkit.plm_mohlenkamp import plm_mohlenkamp from gravity_toolkit.read_gfc_harmonics import read_gfc_harmonics from gravity_toolkit.read_GIA_model import read_GIA_model, gia from gravity_toolkit.read_GRACE_harmonics import read_GRACE_harmonics from gravity_toolkit.read_ICGEM_harmonics import read_ICGEM_harmonics from gravity_toolkit.read_love_numbers import read_love_numbers,load_love_numbers -from gravity_toolkit.read_SLR_C20 import read_SLR_C20 -from gravity_toolkit.read_SLR_CS2 import read_SLR_CS2 -from gravity_toolkit.read_SLR_C30 import read_SLR_C30 -from gravity_toolkit.read_SLR_C40 import read_SLR_C40 -from gravity_toolkit.read_SLR_C50 import read_SLR_C50 from gravity_toolkit.read_SLR_harmonics import read_SLR_harmonics, convert_weekly -from gravity_toolkit.savitzky_golay import savitzky_golay from gravity_toolkit.sea_level_equation import sea_level_equation from gravity_toolkit.spatial import spatial -from gravity_toolkit.tsamplitude import tsamplitude -from gravity_toolkit.tsregress import tsregress -from gravity_toolkit.tssmooth import tssmooth from gravity_toolkit.units import units # get version number __version__ = gravity_toolkit.version.version diff --git a/gravity_toolkit/associated_legendre.py b/gravity_toolkit/associated_legendre.py new file mode 100644 index 00000000..369c9854 --- /dev/null +++ b/gravity_toolkit/associated_legendre.py @@ -0,0 +1,386 @@ +#!/usr/bin/env python +u""" +associated_legendre.py +Written by Tyler Sutterley (01/2023) + +Computes fully-normalized associated Legendre Polynomials + +UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials + Updated 04/2022: updated docstrings to numpy documentation format + Updated 05/2021: define int/float precision to prevent deprecation warning + Updated 09/2020: verify dimensions of input x variable + Updated 08/2020: prevent zero divisions by changing u==0 to eps of data type + Updated 07/2020: added function docstrings + Updated 10/2018: using future division for python3 Compatibility + Updated 07/2017: output first differential of legendre polynomials + Updated 05/2015: added parameter MMAX for MMAX != LMAX + Updated 09/2013: new format for file headers + Written 03/2013 +""" +from __future__ import division +import numpy as np + +def associated_legendre(LMAX, x, method='holmes', MMAX=None, astype=np.float64): + """ + Computes fully-normalized associated Legendre Polynomials and their + first derivative + + Parameters + ---------- + LMAX: int + maximum degree of Legrendre polynomials + x: float + elements ranging from -1 to 1 + + Typically ``cos(theta)``, where ``theta`` is the colatitude in radians + method: str, default 'holmes' + Method for computing the associated Legrendre polynomials + + - ``'columbo'`` + - ``'holmes'`` + - ``'mohlenkamp'`` + MMAX: int or NoneType, default None + maximum order of Associated Legrendre polynomials + astype: obj, default np.float64 + output variable data type + + Returns + ------- + plms: float + fully-normalized Legendre polynomials + dplms: float + first derivative of Legendre polynomials + """ + if (method.lower() == 'colombo'): + return plm_colombo(LMAX, x, MMAX=MMAX, astype=astype) + elif (method.lower() == 'holmes'): + return plm_holmes(LMAX, x, MMAX=MMAX, astype=astype) + elif (method.lower() == 'mohlenkamp'): + return plm_mohlenkamp(LMAX, x, MMAX=MMAX, astype=astype) + raise ValueError(f'Unknown method {method}') + +def plm_colombo(LMAX, x, MMAX=None, astype=np.float64): + """ + Computes fully-normalized associated Legendre Polynomials and their + first derivative using a Standard forward column method [Colombo1981]_ + + Parameters + ---------- + LMAX: int + maximum degree of Legrendre polynomials + x: float + elements ranging from -1 to 1 + + Typically ``cos(theta)``, where ``theta`` is the colatitude in radians + MMAX: int or NoneType, default None + maximum order of Associated Legrendre polynomials + astype: obj, default np.float64 + output variable data type + + Returns + ------- + plms: float + fully-normalized Legendre polynomials + dplms: float + first derivative of Legendre polynomials + + References + ---------- + .. [Colombo1981] O. L. Colombo, + "Numerical Methods for Harmonic Analysis on the Sphere", + Air Force Contract No. F19628-79-C-0027, + *OSURF Proj. No. 711664*, 140 pp., (1981). + .. [Losch2003] M. Losch and V. Seufer, + "How to Compute Geoid Undulations (Geoid Height Relative + to a Given Reference Ellipsoid) from Spherical Harmonic + Coefficients for Satellite Altimetry Applications", (2003). + `eprint ID: 11802 `_ + .. [Holmes2002] S. A. Holmes and W. E. Featherstone, + "A unified approach to the Clenshaw summation and the + recursive computation of very high degree and order + normalised associated Legendre functions", + *Journal of Geodesy*, 76, 279--299, (2002). + `doi: 10.1007/s00190-002-0216-2 `_ + """ + + # removing singleton dimensions of x + x = np.atleast_1d(x).flatten().astype(astype) + # length of the x array + jm = len(x) + # verify data type of spherical harmonic truncation + LMAX = np.int64(LMAX) + # upper bound of spherical harmonic orders (default = LMAX) + if MMAX is None: + MMAX = np.copy(LMAX) + + # allocating for the plm matrix and differentials + plm = np.zeros((LMAX+1,LMAX+1,jm)) + dplm = np.zeros((LMAX+1,LMAX+1,jm)) + + # u is sine of colatitude (cosine of latitude) so that 0 <= s <= 1 + # for x=cos(th): u=sin(th) + u = np.sqrt(1.0 - x**2) + # update where u==0 to eps of data type to prevent invalid divisions + u[u == 0] = np.finfo(u.dtype).eps + + # Calculating the initial polynomials for the recursion + plm[0,0,:] = 1.0 + plm[1,0,:] = np.sqrt(3.0)*x + plm[1,1,:] = np.sqrt(3.0)*u + # calculating first derivatives for harmonics of degree 1 + dplm[1,0,:] = (1.0/u)*(x*plm[1,0,:] - np.sqrt(3)*plm[0,0,:]) + dplm[1,1,:] = (x/u)*plm[1,1,:] + for l in range(2, LMAX+1): + for m in range(0, l):# Zonal and Tesseral harmonics (non-sectorial) + # Computes the non-sectorial terms from previously computed + # sectorial terms. + alm = np.sqrt(((2.0*l-1.0)*(2.0*l+1.0))/((l-m)*(l+m))) + blm = np.sqrt(((2.0*l+1.0)*(l+m-1.0)*(l-m-1.0))/((l-m)*(l+m)*(2.0*l-3.0))) + # if (m == l-1): plm[l-2,m,:] will be 0 + plm[l,m,:] = alm*x*plm[l-1,m,:] - blm*plm[l-2,m,:] + # calculate first derivatives + flm = np.sqrt(((l**2.0 - m**2.0)*(2.0*l + 1.0))/(2.0*l - 1.0)) + dplm[l,m,:] = (1.0/u)*(l*x*plm[l,m,:] - flm*plm[l-1,m,:]) + + # Sectorial harmonics + # The sectorial harmonics serve as seed values for the recursion + # starting with P00 and P11 (outside the loop) + plm[l,l,:] = u*np.sqrt((2.0*l+1.0)/(2.0*l))*np.squeeze(plm[l-1,l-1,:]) + # calculate first derivatives for sectorial harmonics + dplm[l,l,:] = np.longdouble(l)*(x/u)*plm[l,l,:] + + # return the legendre polynomials and their first derivative + # truncating orders to MMAX + return plm[:,:MMAX+1,:], dplm[:,:MMAX+1,:] + +def plm_holmes(LMAX, x, MMAX=None, astype=np.float64): + """ + Computes fully-normalized associated Legendre Polynomials and their + first derivative using Holmes and Featherstone relation [Holmes2002]_ + + Parameters + ---------- + LMAX: int + maximum degree of Legrendre polynomials + x: float + elements ranging from -1 to 1 + + Typically ``cos(theta)``, where ``theta`` is the colatitude in radians + MMAX: int or NoneType, default None + maximum order of Associated Legrendre polynomials + astype: obj, default np.float64 + output variable data type + + Returns + ------- + plms: float + fully-normalized Legendre polynomials + dplms: float + first derivative of Legendre polynomials + + References + ---------- + .. [Losch2003] M. Losch and V. Seufer, + "How to Compute Geoid Undulations (Geoid Height Relative + to a Given Reference Ellipsoid) from Spherical Harmonic + Coefficients for Satellite Altimetry Applications", (2003). + `eprint ID: 11802 `_ + .. [Holmes2002] S. A. Holmes and W. E. Featherstone, + "A unified approach to the Clenshaw summation and the + recursive computation of very high degree and order + normalised associated Legendre functions", + *Journal of Geodesy*, 76, 279--299, (2002). + `doi: 10.1007/s00190-002-0216-2 `_ + """ + + # removing singleton dimensions of x + x = np.atleast_1d(x).flatten().astype(astype) + # length of the x array + jm = len(x) + # verify data type of spherical harmonic truncation + LMAX = np.int64(LMAX) + # upper bound of spherical harmonic orders (default = LMAX) + if MMAX is None: + MMAX = np.copy(LMAX) + # scaling factor + scalef = 1.0e-280 + + # allocate for multiplicative factors, and plms + f1 = np.zeros(((LMAX+1)*(LMAX+2)//2), dtype=astype) + f2 = np.zeros(((LMAX+1)*(LMAX+2)//2), dtype=astype) + p = np.zeros(((LMAX+1)*(LMAX+2)//2,jm), dtype=astype) + plm = np.zeros((LMAX+1,LMAX+1,jm), dtype=astype) + dplm = np.zeros((LMAX+1,LMAX+1,jm), dtype=astype) + + # Precompute multiplicative factors used in recursion relationships + # Note that prefactors are not used for the case when m=l and m=l-1, + # as a different recursion is used for these two values. + k = 2# k = l*(l+1)/2 + m + for l in range(2, LMAX+1): + k += 1 + f1[k] = np.sqrt(2.0*l-1.0)*np.sqrt(2.0*l+1.0)/np.longdouble(l) + f2[k] = np.longdouble(l-1.0)*np.sqrt(2.0*l+1.0)/(np.sqrt(2.0*l-3.0)*np.longdouble(l)) + for m in range(1, l-1): + k += 1 + f1[k] = np.sqrt(2.0*l+1.0)*np.sqrt(2.0*l-1.0)/(np.sqrt(l+m)*np.sqrt(l-m)) + f2[k] = np.sqrt(2.0*l+1.0)*np.sqrt(l-m-1.0)*np.sqrt(l+m-1.0)/ \ + (np.sqrt(2.0*l-3.0)*np.sqrt(l+m)*np.sqrt(l-m)) + k += 2 + + # u is sine of colatitude (cosine of latitude) so that 0 <= s <= 1 + # for x=cos(th): u=sin(th) + u = np.sqrt(1.0 - x**2) + # update where u==0 to eps of data type to prevent invalid divisions + u[u == 0] = np.finfo(u.dtype).eps + + # Calculate P(l,0). These are not scaled. + p[0,:] = 1.0 + p[1,:] = np.sqrt(3.0)*x + k = 1 + for l in range(2, LMAX+1): + k += l + p[k,:] = f1[k]*x*p[k-l,:] - f2[k]*p[k-2*l+1,:] + + # Calculate P(m,m), P(m+1,m), and P(l,m) + pmm = np.sqrt(2.0)*scalef + rescalem = 1.0/scalef + kstart = 0 + + for m in range(1, LMAX): + rescalem = rescalem * u + # Calculate P(m,m) + kstart += m+1 + pmm = pmm * np.sqrt(2*m+1)/np.sqrt(2*m) + p[kstart,:] = pmm + # Calculate P(m+1,m) + k = kstart+m+1 + p[k,:] = x*np.sqrt(2*m+3)*pmm + # Calculate P(l,m) + for l in range(m+2, LMAX+1): + k += l + p[k,:] = x*f1[k]*p[k-l,:] - f2[k]*p[k-2*l+1,:] + p[k-2*l+1,:] = p[k-2*l+1,:] * rescalem + # rescale + p[k,:] = p[k,:] * rescalem + p[k-LMAX,:] = p[k-LMAX,:] * rescalem + + # Calculate P(LMAX,LMAX) + rescalem = rescalem * u + kstart += m+2 + p[kstart,:] = pmm * np.sqrt(2*LMAX+1) / np.sqrt(2*LMAX) * rescalem + # reshape Legendre polynomials to output dimensions + for m in range(LMAX+1): + for l in range(m,LMAX+1): + lm = (l*(l+1))//2 + m + plm[l,m,:] = p[lm,:] + # calculate first derivatives + if (l == m): + dplm[l,m,:] = np.longdouble(m)*(x/u)*plm[l,m,:] + else: + flm = np.sqrt(((l**2.0 - m**2.0)*(2.0*l + 1.0))/(2.0*l - 1.0)) + dplm[l,m,:]= (1.0/u)*(l*x*plm[l,m,:] - flm*plm[l-1,m,:]) + + # return the legendre polynomials and their first derivative + # truncating orders to MMAX + return plm[:,:MMAX+1,:], dplm[:,:MMAX+1,:] + +def plm_mohlenkamp(LMAX, x, MMAX=None, astype=np.float64): + """ + Computes fully-normalized associated Legendre Polynomials + using Martin Mohlenkamp's recursion relation [Mohlenkamp2016]_ + + Derived from [Szego1939]_ recurrence formula for Jacobi Polynomials + + Parameters + ---------- + LMAX: int + maximum degree of Legrendre polynomials + x: float + elements ranging from -1 to 1 + + Typically ``cos(theta)``, where ``theta`` is the colatitude in radians + MMAX: int or NoneType, default None + maximum order of Associated Legrendre polynomials + + Returns + ------- + plms: float + fully-normalized Legendre polynomials + dplms: float + first derivative of Legendre polynomials + + References + ---------- + .. [Mohlenkamp2016] M. J. Mohlenkamp, + "A User's Guide to Spherical Harmonics", (2016). + `[pdf] `_ + .. [Szego1939] Gabor Szeg\ |ouml|\ , "Orthogonal Polynomials", 440 pp., (1939). + `[pdf] `_ + + .. |ouml| unicode:: U+00F6 .. LATIN SMALL LETTER O WITH DIAERESIS + """ + + # Verify LMAX as integer + LMAX = np.int64(LMAX) + # upper bound of spherical harmonic orders (default = LMAX) + if MMAX is None: + MMAX = np.copy(LMAX) + + # removing singleton dimensions of x + x = np.atleast_1d(x).flatten() + # length of the x array + sx = len(x) + + # Initialize the output Legendre polynomials + plm = np.zeros((LMAX+1,MMAX+1,sx), dtype=astype) + dplm = np.zeros((LMAX+1,LMAX+1,sx), dtype=astype) + # Jacobi polynomial for the recurrence relation + jlmm = np.zeros((LMAX+1,MMAX+1,sx)) + # for x=cos(th): u= sin(th) + u = np.sqrt(1.0 - x**2) + # update where u==0 to eps of data type to prevent invalid divisions + u[u == 0] = np.finfo(u.dtype).eps + + # for all spherical harmonic orders of interest + for mm in range(0,MMAX+1):# equivalent to 0:MMAX + # Initialize the recurrence relation + # J-1,m,m Term == 0 + # J0,m,m Term + if (mm > 0): + # j ranges from 1 to mm for the product + j = np.arange(0,mm)+1.0 + jlmm[0,mm,:] = np.prod(np.sqrt(1.0 + 1.0/(2.0*j)))/np.sqrt(2.0) + else: # if mm == 0: jlmm = 1/sqrt(2) + jlmm[0,mm,:] = 1.0/np.sqrt(2.0) + # Jk,m,m Terms + for k in range(1, LMAX+1):# computation for SH degrees + # Initialization begins at -1 + # this is to make the formula parallel the function written in + # Martin Mohlenkamp's Guide to Spherical Harmonics + # Jacobi General Terms + if (k == 1):# for degree 1 terms + jlmm[k,mm,:] = 2.0*x * jlmm[k-1,mm,:] * \ + np.sqrt(1.0 + (mm - 0.5)/k) * \ + np.sqrt(1.0 - (mm - 0.5)/(k + 2.0*mm)) + else:# for all other spherical harmonic degrees + jlmm[k,mm,:] = 2.0*x * jlmm[k-1,mm,:] * \ + np.sqrt(1.0 + (mm - 0.5)/k) * \ + np.sqrt(1.0 - (mm - 0.5)/(k + 2.0*mm)) - \ + jlmm[k-2,mm,:] * np.sqrt(1.0 + 4.0/(2.0*k + 2.0*mm - 3.0)) * \ + np.sqrt(1.0 - (1.0/k)) * np.sqrt(1.0 - 1.0/(k + 2.0*mm)) + # Normalization is geodesy convention + for l in range(mm,LMAX+1): # equivalent to mm:LMAX + if (mm == 0):# Geodesy normalization (m=0) == sqrt(2)*sin(th)^0 + # u^mm term is dropped as u^0 = 1 + plm[l,mm,:] = np.sqrt(2.0)*jlmm[l-mm,mm,:] + else:# Geodesy normalization all others == 2*sin(th)^mm + plm[l,mm,:] = 2.0*(u**mm)*jlmm[l-mm,mm,:] + # calculate first derivatives + if (l == mm): + dplm[l,mm,:] = np.longdouble(mm)*(x/u)*plm[l,mm,:] + else: + flm = np.sqrt(((l**2.0 - mm**2.0)*(2.0*l + 1.0))/(2.0*l - 1.0)) + dplm[l,mm,:]= (1.0/u)*(l*x*plm[l,mm,:] - flm*plm[l-1,mm,:]) + # return the legendre polynomials and their first derivative + return plm, dplm diff --git a/gravity_toolkit/gen_disc_load.py b/gravity_toolkit/gen_disc_load.py index 4fbf034a..2daae780 100644 --- a/gravity_toolkit/gen_disc_load.py +++ b/gravity_toolkit/gen_disc_load.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" gen_disc_load.py -Written by Tyler Sutterley (11/2022) +Written by Tyler Sutterley (01/2023) Calculates gravitational spherical harmonic coefficients for a uniform disc load CALLING SEQUENCE: @@ -34,7 +34,8 @@ numpy: Scientific Computing Tools For Python (https://numpy.org) PROGRAM DEPENDENCIES: - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials legendre_polynomials.py: Computes fully normalized Legendre polynomials units.py: class for converting spherical harmonic data to specific units harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO @@ -54,6 +55,7 @@ https://doi.org/10.1007/s00190-011-0522-7 UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 04/2022: updated docstrings to numpy documentation format Updated 11/2021: added UNITS option for converting from different inputs @@ -71,7 +73,7 @@ import numpy as np import gravity_toolkit.units import gravity_toolkit.harmonics -from gravity_toolkit.plm_holmes import plm_holmes +from gravity_toolkit.associated_legendre import plm_holmes from gravity_toolkit.legendre_polynomials import legendre_polynomials def gen_disc_load(data, lon, lat, area, LMAX=60, MMAX=None, UNITS=2, diff --git a/gravity_toolkit/gen_harmonics.py b/gravity_toolkit/gen_harmonics.py index 33cf1398..2324e6a2 100644 --- a/gravity_toolkit/gen_harmonics.py +++ b/gravity_toolkit/gen_harmonics.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" gen_harmonics.py -Written by Tyler Sutterley (04/2022) +Written by Tyler Sutterley (01/2023) Converts data from the spatial domain to spherical harmonic coefficients Does not compute the solid Earth elastic response or convert units @@ -33,7 +33,8 @@ numpy: Scientific Computing Tools For Python (https://numpy.org) PROGRAM DEPENDENCIES: - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials fourier_legendre.py: Computes the Fourier coefficients of the associated Legendre functions harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO @@ -46,6 +47,7 @@ Associated Legendre Functions", Journal of Geodesy (2002) UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 04/2022: updated docstrings to numpy documentation format Updated 09/2021: merged integration and fourier harmonics programs Updated 05/2021: define int/float precision to prevent deprecation warning @@ -60,7 +62,7 @@ """ import numpy as np import gravity_toolkit.harmonics -from gravity_toolkit.plm_holmes import plm_holmes +from gravity_toolkit.associated_legendre import plm_holmes from gravity_toolkit.fourier_legendre import fourier_legendre def gen_harmonics(data, lon, lat, **kwargs): diff --git a/gravity_toolkit/gen_spherical_cap.py b/gravity_toolkit/gen_spherical_cap.py index fcb2fae4..aaad38e7 100755 --- a/gravity_toolkit/gen_spherical_cap.py +++ b/gravity_toolkit/gen_spherical_cap.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" gen_spherical_cap.py -Written by Tyler Sutterley (11/2022) +Written by Tyler Sutterley (01/2023) Calculates gravitational spherical harmonic coefficients for a spherical cap Spherical cap derivation from Longman (1962), Farrell (1972), Pollack (1973) @@ -43,7 +43,8 @@ numpy: Scientific Computing Tools For Python (https://numpy.org) PROGRAM DEPENDENCIES: - plm_holmes.py: Computes fully-normalized associated Legendre polynomials + associated_legendre.py: Computes fully-normalized associated + Legendre polynomials legendre_polynomials.py: Computes fully normalized Legendre polynomials units.py: class for converting spherical harmonic data to specific units harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO @@ -63,6 +64,7 @@ https://doi.org/10.1007/s00190-011-0522-7 UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 04/2022: updated docstrings to numpy documentation format Updated 11/2021: added UNITS list option for converting from custom units @@ -92,7 +94,7 @@ import numpy as np import gravity_toolkit.units import gravity_toolkit.harmonics -from gravity_toolkit.plm_holmes import plm_holmes +from gravity_toolkit.associated_legendre import plm_holmes from gravity_toolkit.legendre_polynomials import legendre_polynomials def gen_spherical_cap(data, lon, lat, LMAX=60, MMAX=None, diff --git a/gravity_toolkit/gen_stokes.py b/gravity_toolkit/gen_stokes.py index 095e1c6b..1271cb56 100755 --- a/gravity_toolkit/gen_stokes.py +++ b/gravity_toolkit/gen_stokes.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" gen_stokes.py -Written by Tyler Sutterley (11/2022) +Written by Tyler Sutterley (01/2023) Converts data from the spatial domain to spherical harmonic coefficients @@ -36,13 +36,14 @@ numpy: Scientific Computing Tools For Python (https://numpy.org) PROGRAM DEPENDENCIES: - plm_holmes.py: computes fully-normalized associated Legendre polynomials + associated_legendre.py: computes fully-normalized associated Legendre polynomials units.py: class for converting spherical harmonic data to specific units harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO destripe_harmonics.py: calculates the decorrelation (destriping) filter and filters the GRACE/GRACE-FO coefficients for striping errors UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 04/2022: updated docstrings to numpy documentation format Updated 11/2021: added UNITS list option for converting from custom units @@ -70,7 +71,7 @@ import numpy as np import gravity_toolkit.units import gravity_toolkit.harmonics -from gravity_toolkit.plm_holmes import plm_holmes +from gravity_toolkit.associated_legendre import plm_holmes def gen_stokes(data, lon, lat, LMIN=0, LMAX=60, MMAX=None, UNITS=1, PLM=None, LOVE=None): diff --git a/gravity_toolkit/grace_input_months.py b/gravity_toolkit/grace_input_months.py index b11d0081..813f9bfa 100644 --- a/gravity_toolkit/grace_input_months.py +++ b/gravity_toolkit/grace_input_months.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" grace_input_months.py -Written by Tyler Sutterley (11/2022) +Written by Tyler Sutterley (01/2023) Contributions by Hugo Lecomte and Yara Mohajerani Reads GRACE/GRACE-FO files for a specified spherical harmonic degree and order @@ -98,13 +98,17 @@ PROGRAM DEPENDENCIES: time.py: utilities for calculating time operations grace_date.py: reads GRACE index file and calculates dates for each month - read_SLR_C20.py: reads C20 files from satellite laser ranging (CSR or GSFC) - read_SLR_C30.py: reads C30 files from satellite laser ranging (CSR or GSFC) + SLR.C20.py: reads C20 files from satellite laser ranging (CSR or GSFC) + SLR.CS2.py: reads C2,S2 files from satellite laser ranging (CSR or GSFC) + SLR.C30.py: reads C30 files from satellite laser ranging (CSR or GSFC) + SLR.C40.py: reads C40 files from satellite laser ranging (CSR or GSFC) + SLR.C50.py: reads C50 files from satellite laser ranging (CSR or GSFC) geocenter.py: data class for reading and processing geocenter data read_GRACE_harmonics.py: read spherical harmonic data from SHM files read_gfc_harmonics.py: reads spherical harmonic data from gfc files UPDATE HISTORY: + Updated 01/2023: refactored satellite laser ranging read functions Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 10/2022: tilde-expansion of input working data directory Updated 09/2022: use logging for debugging level verbose output @@ -168,12 +172,8 @@ import logging import numpy as np import gravity_toolkit.geocenter +import gravity_toolkit.SLR from gravity_toolkit.grace_date import grace_date -from gravity_toolkit.read_SLR_C20 import read_SLR_C20 -from gravity_toolkit.read_SLR_CS2 import read_SLR_CS2 -from gravity_toolkit.read_SLR_C30 import read_SLR_C30 -from gravity_toolkit.read_SLR_C40 import read_SLR_C40 -from gravity_toolkit.read_SLR_C50 import read_SLR_C50 from gravity_toolkit.read_GRACE_harmonics import read_GRACE_harmonics from gravity_toolkit.read_gfc_harmonics import read_gfc_harmonics @@ -414,9 +414,7 @@ def grace_input_months(base_dir, PROC, DREL, DSET, LMAX, start_mon, end_mon, # SLR low-degree harmonic, geocenter and correction flags FLAGS = [] - # Replacing C2,0 with SLR C2,0 - # Running function read_SLR_C20.py - # reading SLR C2,0 file for given release if specified + # Replacing C2,0 with SLR values if (SLR_C20 == 'CSR'): if (DREL == 'RL04'): SLR_file = os.path.join(base_dir,'TN-05_C20_SLR.txt') @@ -428,31 +426,30 @@ def grace_input_months(base_dir, PROC, DREL, DSET, LMAX, start_mon, end_mon, # log SLR file if debugging logging.debug(f'Reading SLR C20 file: {SLR_file}') # read SLR file - C20_input = read_SLR_C20(SLR_file) + C20_input = gravity_toolkit.SLR.C20(SLR_file) FLAGS.append('_wCSR_C20') elif (SLR_C20 == 'GFZ'): SLR_file = os.path.join(base_dir,f'GFZ_{DREL}_C20_SLR.dat') # log SLR file if debugging logging.debug(f'Reading SLR C20 file: {SLR_file}') # read SLR file - C20_input = read_SLR_C20(SLR_file) + C20_input = gravity_toolkit.SLR.C20(SLR_file) FLAGS.append('_wGFZ_C20') elif (SLR_C20 == 'GSFC'): SLR_file = os.path.join(base_dir,'TN-14_C30_C20_GSFC_SLR.txt') # log SLR file if debugging logging.debug(f'Reading SLR C20 file: {SLR_file}') # read SLR file - C20_input = read_SLR_C20(SLR_file) + C20_input = gravity_toolkit.SLR.C20(SLR_file) FLAGS.append('_wGSFC_C20') - # Replacing C2,1/S2,1 with SLR - # Running function read_SLR_CS2.py + # Replacing C2,1/S2,1 with SLR values if (kwargs['SLR_21'] == 'CSR'): SLR_file = os.path.join(base_dir,f'C21_S21_{DREL}.txt') # log SLR file if debugging logging.debug(f'Reading SLR C21/S21 file: {SLR_file}') # read SLR file - C21_input = read_SLR_CS2(SLR_file) + C21_input = gravity_toolkit.SLR.CS2(SLR_file) FLAGS.append('_wCSR_21') elif (kwargs['SLR_21'] == 'GFZ'): GravIS_file = 'GRAVIS-2B_GFZOP_GRACE+SLR_LOW_DEGREES_0002.dat' @@ -460,7 +457,7 @@ def grace_input_months(base_dir, PROC, DREL, DSET, LMAX, start_mon, end_mon, # log SLR file if debugging logging.debug(f'Reading SLR C21/S21 file: {SLR_file}') # read SLR file - C21_input = read_SLR_CS2(SLR_file) + C21_input = gravity_toolkit.SLR.CS2(SLR_file) FLAGS.append('_wGFZ_21') elif (kwargs['SLR_21'] == 'GSFC'): # calculate monthly averages from 7-day arcs @@ -469,48 +466,48 @@ def grace_input_months(base_dir, PROC, DREL, DSET, LMAX, start_mon, end_mon, # log SLR file if debugging logging.debug(f'Reading SLR C21/S21 file: {SLR_file}') # read SLR file - C21_input = read_SLR_CS2(SLR_file, DATE=grace_Ylms['time'], ORDER=1) + C21_input = gravity_toolkit.SLR.CS2(SLR_file, + DATE=grace_Ylms['time'], ORDER=1) FLAGS.append('_wGSFC_21') - # Replacing C2,2/S2,2 with SLR - # Running function read_SLR_CS2.py + # Replacing C2,2/S2,2 with SLR values if (kwargs['SLR_22'] == 'CSR'): SLR_file = os.path.join(base_dir,f'C22_S22_{DREL}.txt') # log SLR file if debugging logging.debug(f'Reading SLR C22/S22 file: {SLR_file}') # read SLR file - C22_input = read_SLR_CS2(SLR_file) + C22_input = gravity_toolkit.SLR.CS2(SLR_file) FLAGS.append('_wCSR_22') elif (kwargs['SLR_22'] == 'GSFC'): SLR_file = os.path.join(base_dir,'gsfc_slr_5x5c61s61.txt') # log SLR file if debugging logging.debug(f'Reading SLR C22/S22 file: {SLR_file}') # read SLR file - C22_input = read_SLR_CS2(SLR_file, DATE=grace_Ylms['time'], ORDER=2) + C22_input = gravity_toolkit.SLR.CS2(SLR_file, + DATE=grace_Ylms['time'], ORDER=2) FLAGS.append('_wGSFC_22') - # Replacing C3,0 with SLR C3,0 - # Running function read_SLR_C30.py + # Replacing C3,0 with SLR values if (kwargs['SLR_C30'] == 'CSR'): SLR_file = os.path.join(base_dir,'CSR_Monthly_5x5_Gravity_Harmonics.txt') # log SLR file if debugging logging.debug(f'Reading SLR C30 file: {SLR_file}') # read SLR file - C30_input = read_SLR_C30(SLR_file) + C30_input = gravity_toolkit.SLR.C30(SLR_file) FLAGS.append('_wCSR_C30') elif (kwargs['SLR_C30'] == 'LARES'): SLR_file = os.path.join(base_dir,'C30_LARES_filtered.txt') # log SLR file if debugging logging.debug(f'Reading SLR C30 file: {SLR_file}') # read SLR file - C30_input = read_SLR_C30(SLR_file) + C30_input = gravity_toolkit.SLR.C30(SLR_file) FLAGS.append('_wLARES_C30') elif (kwargs['SLR_C30'] == 'GSFC'): SLR_file = os.path.join(base_dir,'TN-14_C30_C20_GSFC_SLR.txt') # log SLR file if debugging logging.debug(f'Reading SLR C30 file: {SLR_file}') # read SLR file - C30_input = read_SLR_C30(SLR_file) + C30_input = gravity_toolkit.SLR.C30(SLR_file) FLAGS.append('_wGSFC_C30') elif (kwargs['SLR_C30'] == 'GFZ'): GravIS_file = 'GRAVIS-2B_GFZOP_GRACE+SLR_LOW_DEGREES_0002.dat' @@ -518,48 +515,47 @@ def grace_input_months(base_dir, PROC, DREL, DSET, LMAX, start_mon, end_mon, # log SLR file if debugging logging.debug(f'Reading SLR C30 file: {SLR_file}') # read SLR file - C30_input = read_SLR_C30(SLR_file) + C30_input = gravity_toolkit.SLR.C30(SLR_file) FLAGS.append('_wGFZ_C30') - # Replacing C4,0 with SLR C4,0 - # Running function read_SLR_C40.py + # Replacing C4,0 with SLR values if (kwargs['SLR_C40'] == 'CSR'): SLR_file = os.path.join(base_dir,'CSR_Monthly_5x5_Gravity_Harmonics.txt') # log SLR file if debugging logging.debug(f'Reading SLR C40 file: {SLR_file}') # read SLR file - C40_input = read_SLR_C40(SLR_file) + C40_input = gravity_toolkit.SLR.C40(SLR_file) FLAGS.append('_wCSR_C40') elif (kwargs['SLR_C40'] == 'LARES'): SLR_file = os.path.join(base_dir,'C40_LARES_filtered.txt') # log SLR file if debugging logging.debug(f'Reading SLR C40 file: {SLR_file}') # read SLR file - C40_input = read_SLR_C40(SLR_file) + C40_input = gravity_toolkit.SLR.C40(SLR_file) FLAGS.append('_wLARES_C40') elif (kwargs['SLR_C40'] == 'GSFC'): SLR_file = os.path.join(base_dir,'gsfc_slr_5x5c61s61.txt') # log SLR file if debugging logging.debug(f'Reading SLR C40 file: {SLR_file}') # read SLR file - C40_input = read_SLR_C40(SLR_file, DATE=grace_Ylms['time']) + C40_input = gravity_toolkit.SLR.C40(SLR_file, + DATE=grace_Ylms['time']) FLAGS.append('_wGSFC_C40') - # Replacing C5,0 with SLR C5,0 - # Running function read_SLR_C50.py + # Replacing C5,0 with SLR values if (kwargs['SLR_C50'] == 'CSR'): SLR_file = os.path.join(base_dir,'CSR_Monthly_5x5_Gravity_Harmonics.txt') # log SLR file if debugging logging.debug(f'Reading SLR C50 file: {SLR_file}') # read SLR file - C50_input = read_SLR_C50(SLR_file) + C50_input = gravity_toolkit.SLR.C50(SLR_file) FLAGS.append('_wCSR_C50') elif (kwargs['SLR_C50'] == 'LARES'): SLR_file = os.path.join(base_dir,'C50_LARES_filtered.txt') # log SLR file if debugging logging.debug(f'Reading SLR C50 file: {SLR_file}') # read SLR file - C50_input = read_SLR_C50(SLR_file) + C50_input = gravity_toolkit.SLR.C50(SLR_file) FLAGS.append('_wLARES_C50') elif (kwargs['SLR_C50'] == 'GSFC'): # SLR_file = os.path.join(base_dir,'GSFC_SLR_C20_C30_C50_GSM_replacement.txt') @@ -567,7 +563,8 @@ def grace_input_months(base_dir, PROC, DREL, DSET, LMAX, start_mon, end_mon, # log SLR file if debugging logging.debug(f'Reading SLR C50 file: {SLR_file}') # read SLR file - C50_input = read_SLR_C50(SLR_file, DATE=grace_Ylms['time']) + C50_input = gravity_toolkit.SLR.C50(SLR_file, + DATE=grace_Ylms['time']) FLAGS.append('_wGSFC_C50') # Correcting for Degree 1 (geocenter variations) diff --git a/gravity_toolkit/harmonic_summation.py b/gravity_toolkit/harmonic_summation.py index 691b8fc7..bacaecf9 100755 --- a/gravity_toolkit/harmonic_summation.py +++ b/gravity_toolkit/harmonic_summation.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" harmonic_summation.py -Written by Tyler Sutterley (04/2022) +Written by Tyler Sutterley (01/2023) Returns the spatial field for a series of spherical harmonics @@ -24,16 +24,19 @@ numpy: Scientific Computing Tools For Python (https://numpy.org) PROGRAM DEPENDENCIES: - plm_holmes.py: Computes fully-normalized associated Legendre polynomials + associated_legendre.py: Computes fully-normalized associated + Legendre polynomials UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials + added fft-based transform function Updated 04/2022: updated docstrings to numpy documentation format Updated 07/2020: added function docstrings Updated 05/2015: added parameter MMAX for MMAX != LMAX. Written 05/2013 """ import numpy as np -from gravity_toolkit.plm_holmes import plm_holmes +from gravity_toolkit.associated_legendre import plm_holmes def harmonic_summation(clm1, slm1, lon, lat, LMIN=0, LMAX=60, MMAX=None, PLM=None): @@ -74,7 +77,7 @@ def harmonic_summation(clm1, slm1, lon, lat, # Longitude in radians phi = (np.squeeze(lon)*np.pi/180.0)[np.newaxis,:] - # Colatitude in radians + # colatitude in radians th = (90.0 - np.squeeze(lat))*np.pi/180.0 thmax = len(th) @@ -107,3 +110,76 @@ def harmonic_summation(clm1, slm1, lon, lat, # return output data return s + +def harmonic_transform(clm1, slm1, lon, lat, + LMIN=0, LMAX=60, MMAX=None, PLM=None): + """ + Converts data from spherical harmonic coefficients to a spatial field + using Fast-Fourier Transforms + + Parameters + ---------- + clm1: float + cosine spherical harmonic coefficients in output units + slm1: float + sine spherical harmonic coefficients in output units + lon: float + longitude array + lat: float + latitude array + LMIN: int, default 0 + Lower bound of Spherical Harmonic Degrees + LMAX: int, default 60 + Upper bound of Spherical Harmonic Degrees + MMAX: int or NoneType, default None + Upper bound of Spherical Harmonic Orders + PLM: float or NoneType, default None + Fully-normalized associated Legendre polynomials + + Returns + ------- + spatial: float + spatial field + """ + # if LMAX is not specified, will use the size of the input harmonics + if (LMAX == 0): + LMAX = np.shape(clm1)[0]-1 + # upper bound of spherical harmonic orders (default = LMAX) + if MMAX is None: + MMAX = np.copy(LMAX) + + # verify that longitudes cover the complete sphere + assert np.isclose(np.min(lon), 0.0) + assert np.isclose(np.max(lon), 360.0) + # number of longitudinal points + phimax = len(np.squeeze(lon)) + # colatitude in radians + th = (90.0 - np.squeeze(lat))*np.pi/180.0 + thmax = len(th) + + # combined Ylms and Fourier coefficients (complex) + Ylms = np.zeros((LMAX+1,MMAX+1),dtype=np.complex128) + delta_M = np.zeros((MMAX+1,thmax),dtype=np.complex128)# [m,th] + if PLM is None: + # if plms are not pre-computed: calculate Legendre polynomials + PLM, dPLM = plm_holmes(LMAX, np.cos(th)) + + # Real (cosine) and imaginary (sine) components + # Truncating harmonics to degree and order LMAX + # removing coefficients below LMIN and above MMAX + Ylms[LMIN:LMAX+1,:MMAX+1] = clm1[LMIN:LMAX+1,0:MMAX+1] - \ + slm1[LMIN:LMAX+1,0:MMAX+1]*1j + # calculate Ylms summation for each theta band + for k in range(0,thmax): + # summation over all spherical harmonic degrees + delta_M[:,k] = np.sum(PLM[:,:,k]*Ylms[:,:],axis=0)/2.0 + + # output spatial field from FFT transformation + s = np.zeros((phimax,thmax)) + # calculate fft for each theta band (over phis with axis=0) + s[:-1,:] = 2.0*(phimax-1)*np.fft.ifft(delta_M,n=phimax-1,axis=0).real + # complete sphere (values at 360 == values at 0) + s[-1,:] = s[0,:] + + # return output data + return s diff --git a/gravity_toolkit/harmonics.py b/gravity_toolkit/harmonics.py index a78a836c..46b8dc72 100644 --- a/gravity_toolkit/harmonics.py +++ b/gravity_toolkit/harmonics.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" harmonics.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Contributions by Hugo Lecomte Spherical harmonic data class for processing GRACE/GRACE-FO Level-2 data @@ -25,6 +25,8 @@ destripe_harmonics.py: filters spherical harmonics for correlated errors UPDATE HISTORY: + Updated 01/2023: made amplitude a property of the harmonics class + added property for the complex form of the spherical harmonics Updated 12/2022: add software information to output HDF5 and netCDF4 moved GIA model reader to be an inherited class of harmonics make harmonics objects iterable and with length @@ -1731,42 +1733,41 @@ def destripe(self, **kwargs): # return the destriped field return temp - def amplitude(self, mmax=None): + @property + def amplitude(self): """ - Calculates the degree amplitude of a harmonics object - - Parameters - ---------- - mmax: int or NoneType, default None - maximum order of spherical harmonics + Degree amplitude of the spherical harmonics """ # temporary matrix for squared harmonics temp = self.power(2) - # truncate to order mmax - if mmax is not None: - temp.truncate(self.lmax, mmax=mmax) # check if a single field or a temporal field if (self.ndim == 2): # allocate for degree amplitudes - self.amp = np.zeros((self.lmax+1)) + amp = np.zeros((self.lmax+1)) for l in range(self.lmax+1): # truncate at mmax - m = np.arange(l,temp.mmax+1) + m = np.arange(0,temp.mmax+1) # degree amplitude of spherical harmonic degree - self.amp[l] = np.sqrt(np.sum(temp.clm[l,m] + temp.slm[l,m])) - + amp[l] = np.sqrt(np.sum(temp.clm[l,m] + temp.slm[l,m])) else: # allocate for degree amplitudes n = self.shape[-1] - self.amp = np.zeros((self.lmax+1,n)) + amp = np.zeros((self.lmax+1,n)) for l in range(self.lmax+1): # truncate at mmax - m = np.arange(l,temp.mmax+1) + m = np.arange(0,temp.mmax+1) # degree amplitude of spherical harmonic degree var = temp.clm[l,m,:] + temp.slm[l,m,:] - self.amp[l,:] = np.sqrt(np.sum(var,axis=0)) - # return the harmonics object with degree amplitudes - return self + amp[l,:] = np.sqrt(np.sum(var, axis=0)) + # return the degree amplitudes + return amp + + @property + def ilm(self): + """ + Complex form of the spherical harmonics + """ + return self.clm - self.slm*1j def __len__(self): """Number of months diff --git a/gravity_toolkit/piecewise_regress.py b/gravity_toolkit/piecewise_regress.py index 85f1cb78..431d869a 100755 --- a/gravity_toolkit/piecewise_regress.py +++ b/gravity_toolkit/piecewise_regress.py @@ -88,13 +88,10 @@ Updated 01/2012: added std weighting for a error weighted least-squares Written 10/2011 """ -import numpy as np -import scipy.stats -import scipy.special +import warnings +import gravity_toolkit.time_series -def piecewise_regress(t_in, d_in, BREAK_TIME=None, BREAKPOINT=None, - CYCLES=[0.5,1.0], DATA_ERR=0, WEIGHT=False, STDEV=0, CONF=0, - AICc=False): +def piecewise_regress(*args, **kwargs): """ Fits a synthetic signal to data over a time period by ordinary or weighted least-squares for breakpoint analysis [Toms2003]_ @@ -174,199 +171,8 @@ def piecewise_regress(t_in, d_in, BREAK_TIME=None, BREAKPOINT=None, 2nd Edition, 488 pp., (2002). `doi: 10.1007/b97636 `_ """ - - t_in = np.squeeze(t_in) - d_in = np.squeeze(d_in) - nmax = len(t_in) - - # If indice of cutoff time entered: will calculate cutoff time - # If cutoff time entered: will find the cutoff indice - if BREAKPOINT is not None: - tco = t_in[BREAKPOINT] - nco = np.squeeze(BREAKPOINT) - elif BREAK_TIME is not None: - nco = np.argmin(np.abs(t_in - BREAK_TIME)) - tco = np.copy(BREAK_TIME) - - # create design matrix for sharp breakpoint piecewise regression - # y = beta_0 + beta_1*t + e (for x <= alpha) - # y = beta_0 + beta_1*t + beta_2*(t-alpha) + e (for x > alpha) - DMAT = [] - # add polynomial orders (0=constant, 1=linear) - for o in range(2): - DMAT.append(t_in**o) - # Linear Term 2 (change from linear term1: trend2 = beta1+beta2) - P_x1 = np.zeros((nmax)) - P_x1[nco:] = t_in[nco:] - tco - DMAT.append(P_x1) - # add cyclical terms (0.5=semi-annual, 1=annual) - for c in CYCLES: - DMAT.append(np.sin(2.0*np.pi*t_in/np.float64(c))) - DMAT.append(np.cos(2.0*np.pi*t_in/np.float64(c))) - # take the transpose of the design matrix - DMAT = np.transpose(DMAT) - - # Calculating Least-Squares Coefficients - if WEIGHT: - # Weighted Least-Squares fitting - if (np.ndim(DATA_ERR) == 0): - raise ValueError('Input DATA_ERR for Weighted Least-Squares') - # check if any error values are 0 (prevent infinite weights) - if np.count_nonzero(DATA_ERR == 0.0): - # change to minimum floating point value - DATA_ERR[DATA_ERR == 0.0] = np.finfo(np.float64).eps - # Weight Precision - wi = np.squeeze(DATA_ERR**(-2)) - # If uncorrelated weights are the diagonal - W = np.diag(wi) - # Least-Squares fitting - # Temporary Matrix: Inv(X'.W.X) - TM1 = np.linalg.inv(np.dot(np.transpose(DMAT),np.dot(W,DMAT))) - # Temporary Matrix: (X'.W.Y) - TM2 = np.dot(np.transpose(DMAT),np.dot(W,d_in)) - # Least Squares Solutions: Inv(X'.W.X).(X'.W.Y) - beta_mat = np.dot(TM1,TM2) - else:# Standard Least-Squares fitting (the [0] denotes coefficients output) - beta_mat = np.linalg.lstsq(DMAT,d_in,rcond=-1)[0] - # Weights are equal - wi = 1.0 - - # Calculating trend2 = beta1 + beta2 - # beta2 = change in linear term from beta1 - beta_out = np.copy(beta_mat)# output beta - beta_out[2] = beta_mat[1] + beta_mat[2] - - # number of terms in least-squares solution - n_terms = len(beta_mat) - # modelled time-series - mod = np.dot(DMAT,beta_mat) - # time-series residuals - res = d_in[0:nmax] - np.dot(DMAT,beta_mat) - # Fitted Values without climate oscillations - simple = np.dot(DMAT[:,0:3],beta_mat[0:3]) - - # Error Analysis - # nu = Degrees of Freedom = number of measurements-number of parameters - nu = nmax - n_terms - - # calculating R^2 values - # SStotal = sum((Y-mean(Y))**2) - SStotal = np.dot(np.transpose(d_in[0:nmax] - np.mean(d_in[0:nmax])), - (d_in[0:nmax] - np.mean(d_in[0:nmax]))) - # SSerror = sum((Y-X*B)**2) - SSerror = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), - (d_in[0:nmax] - np.dot(DMAT,beta_mat))) - # R**2 term = 1- SSerror/SStotal - rsquare = 1.0 - (SSerror/SStotal) - # Adjusted R**2 term: weighted by degrees of freedom - rsq_adj = 1.0 - (SSerror/SStotal)*np.float64((nmax-1.0)/nu) - # Fit Criterion - # number of parameters including the intercept and the variance - K = np.float64(n_terms + 1) - # Log-Likelihood with weights (if unweighted, weight portions == 0) - # log(L) = -0.5*n*log(sigma^2) - 0.5*n*log(2*pi) - 0.5*n - #log_lik = -0.5*nmax*(np.log(2.0 * np.pi) + 1.0 + np.log(np.sum((res**2)/nmax))) - log_lik = 0.5*(np.sum(np.log(wi)) - nmax*(np.log(2.0 * np.pi) + 1.0 - - np.log(nmax) + np.log(np.sum(wi * (res**2))))) - - # Aikaike's Information Criterion - AIC = -2.0*log_lik + 2.0*K - if AICc: - # Second-Order AIC correcting for small sample sizes (restricted) - # Burnham and Anderson (2002) advocate use of AICc where - # ratio num/K is small - # A small ratio is defined in the definition at approximately < 40 - AIC += (2.0*K*(K+1.0))/(nmax - K - 1.0) - - # Bayesian Information Criterion (Schwarz Criterion) - BIC = -2.0*log_lik + np.log(nmax)*K - - # Error Analysis - if WEIGHT: - # WEIGHTED LEAST-SQUARES CASE (unequal error) - # Covariance Matrix - Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),np.dot(W,DMAT))) - # Normal Equations - NORMEQ = np.dot(Hinv,np.transpose(np.dot(W,DMAT))) - temp_err = np.zeros((n_terms)) - # Propagating RMS errors - for i in range(0,n_terms): - temp_err[i] = np.sqrt(np.sum((NORMEQ[i,:]*DATA_ERR)**2)) - - # Recalculating beta2 error - beta_err = np.copy(temp_err) - beta_err[2] = np.sqrt(temp_err[1]**2 + temp_err[2]**2) - # Weighted sum of squares Error - WSSE = np.dot(np.transpose(wi*(d_in[0:nmax] - np.dot(DMAT,beta_mat))), - wi*(d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) - - return {'beta':beta_out, 'error':beta_err, 'R2':rsquare, - 'R2Adj':rsq_adj, 'WSSE':WSSE, 'AIC':AIC, 'BIC':BIC, - 'LOGLIK':log_lik, 'model':mod, 'residual':res, - 'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} - - elif ((not WEIGHT) and (DATA_ERR != 0)): - # LEAST-SQUARES CASE WITH KNOWN AND EQUAL ERROR - P_err = DATA_ERR*np.ones((nmax)) - Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),DMAT)) - # Normal Equations - NORMEQ = np.dot(Hinv,np.transpose(DMAT)) - temp_err = np.zeros((n_terms)) - for i in range(0,n_terms): - temp_err[i] = np.sum((NORMEQ[i,:]*P_err)**2) - # Recalculating beta2 error - beta_err = np.copy(temp_err) - beta_err[2] = np.sqrt(temp_err[1]**2 + temp_err[2]**2) - # Mean square error - MSE = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), - (d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) - - return {'beta':beta_out, 'error':beta_err, 'R2':rsquare, - 'R2Adj':rsq_adj, 'MSE':MSE, 'AIC':AIC, 'BIC':BIC, - 'LOGLIK':log_lik, 'model':mod, 'residual':res, - 'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} - else: - # STANDARD LEAST-SQUARES CASE - # Regression with Errors with Unknown Standard Deviations - # MSE = (1/nu)*sum((Y-X*B)**2) - # Mean square error - MSE = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), - (d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) - # Root mean square error - RMSE = np.sqrt(MSE) - # Normalized root mean square error - NRMSE = RMSE/(np.max(d_in[0:nmax])-np.min(d_in[0:nmax])) - # Covariance Matrix - # Multiplying the design matrix by itself - Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),DMAT)) - # Taking the diagonal components of the cov matrix - hdiag = np.diag(Hinv) - # set either the standard deviation or the confidence interval - if (STDEV != 0): - # Setting the standard deviation of the output error - alpha = 1.0 - scipy.special.erf(STDEV/np.sqrt(2.0)) - elif (CONF != 0): - # Setting the confidence interval of the output error - alpha = 1.0 - CONF - else: - # Default is 95% confidence interval - alpha = 1.0 - (0.95) - # Student T-Distribution with D.O.F. nu - # t.ppf parallels tinv in matlab - tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu) - # beta_err is the error for each coefficient - # beta_err = t(nu,1-alpha/2)*standard error - temp_std = np.sqrt(MSE*hdiag) - temp_err = tstar*temp_std - - # Recalculating standard error for beta2 - st_err = np.copy(temp_std) - st_err[2] = np.sqrt(temp_std[1]**2 + temp_std[2]**2) - # Recalculating beta2 error - beta_err = np.copy(temp_err) - beta_err[2] = np.sqrt(temp_err[1]**2 + temp_err[2]**2) - - return {'beta':beta_out, 'error':beta_err, 'std_err':st_err, 'R2':rsquare, - 'R2Adj':rsq_adj, 'MSE':MSE, 'NRMSE':NRMSE, 'AIC':AIC, 'BIC':BIC, - 'LOGLIK':log_lik, 'model':mod, 'simple': simple, 'residual':res, - 'N':n_terms, 'DOF': nu, 'cov_mat':Hinv} + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.time_series instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.time_series.piecewise(*args,**kwargs) diff --git a/gravity_toolkit/plm_colombo.py b/gravity_toolkit/plm_colombo.py index fd239744..269aaa87 100755 --- a/gravity_toolkit/plm_colombo.py +++ b/gravity_toolkit/plm_colombo.py @@ -42,6 +42,7 @@ Updated 09/2013: new format for file headers Written 03/2013 """ +import warnings import numpy as np def plm_colombo(LMAX, x, ASTYPE=np.float64): @@ -85,6 +86,9 @@ def plm_colombo(LMAX, x, ASTYPE=np.float64): *Journal of Geodesy*, 76, 279--299, (2002). `doi: 10.1007/s00190-002-0216-2 `_ """ + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.associated_legendre instead", + DeprecationWarning) # removing singleton dimensions of x x = np.atleast_1d(x).flatten().astype(ASTYPE) diff --git a/gravity_toolkit/plm_holmes.py b/gravity_toolkit/plm_holmes.py index 7802ebad..d193288a 100755 --- a/gravity_toolkit/plm_holmes.py +++ b/gravity_toolkit/plm_holmes.py @@ -53,6 +53,7 @@ Written 05/2015 """ from __future__ import division +import warnings import numpy as np def plm_holmes(LMAX, x, ASTYPE=np.float64): @@ -92,6 +93,9 @@ def plm_holmes(LMAX, x, ASTYPE=np.float64): *Journal of Geodesy*, 76, 279--299, (2002). `doi: 10.1007/s00190-002-0216-2 `_ """ + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.associated_legendre instead", + DeprecationWarning) # removing singleton dimensions of x x = np.atleast_1d(x).flatten().astype(ASTYPE) diff --git a/gravity_toolkit/plm_mohlenkamp.py b/gravity_toolkit/plm_mohlenkamp.py index 0d6834da..1aabb73f 100755 --- a/gravity_toolkit/plm_mohlenkamp.py +++ b/gravity_toolkit/plm_mohlenkamp.py @@ -50,6 +50,7 @@ Updated 05/2015: added parameter MMAX for MMAX != LMAX Written 09/2013 """ +import warnings import numpy as np def plm_mohlenkamp(LMAX, x, MMAX=None): @@ -80,13 +81,16 @@ def plm_mohlenkamp(LMAX, x, MMAX=None): References ---------- .. [Mohlenkamp2016] M. J. Mohlenkamp, - "A User’s Guide to Spherical Harmonics", (2016). + "A User's Guide to Spherical Harmonics", (2016). `[pdf] `_ .. [Szego1939] Gabor Szeg\ |ouml|\ , "Orthogonal Polynomials", 440 pp., (1939). `[pdf] `_ .. |ouml| unicode:: U+00F6 .. LATIN SMALL LETTER O WITH DIAERESIS """ + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.associated_legendre instead", + DeprecationWarning) # Verify LMAX as integer LMAX = np.int64(LMAX) diff --git a/gravity_toolkit/read_SLR_C20.py b/gravity_toolkit/read_SLR_C20.py index 1cdfcfd7..8e376634 100644 --- a/gravity_toolkit/read_SLR_C20.py +++ b/gravity_toolkit/read_SLR_C20.py @@ -106,13 +106,11 @@ Will accommodate upcoming GRACE RL05, which will use different SLR files Written 12/2011 """ -import os -import re -import numpy as np -import gravity_toolkit.time +import warnings +import gravity_toolkit.SLR # PURPOSE: read oblateness data from Satellite Laser Ranging (SLR) -def read_SLR_C20(SLR_file, AOD=True, HEADER=True): +def read_SLR_C20(*args, **kwargs): """ Reads C20 spherical harmonic coefficients from SLR measurements @@ -137,313 +135,8 @@ def read_SLR_C20(SLR_file, AOD=True, HEADER=True): date: float date of SLR measurement """ - - # check that SLR file exists - if not os.access(os.path.expanduser(SLR_file), os.F_OK): - raise FileNotFoundError('SLR file not found in file system') - - # output dictionary with data variables - dinput = {} - # determine if imported file is from PO.DAAC or CSR - if bool(re.search(r'C20_RL\d+',SLR_file,re.I)): - # SLR C20 file from CSR - # Just for checking new months when TN series isn't up to date as the - # SLR estimates always use the full set of days in each calendar month. - # format of the input file (note 64 bit floating point for C20) - # Column 1: Approximate mid-point of monthly solution (years) - # Column 2: C20 from SLR (normalized) - # Column 3: Delta C20 relative to a mean value (1E-10) - # Column 4: Solution sigma (1E-10) - # Column 5: Mean value of Atmosphere-Ocean De-aliasing model (1E-10) - # Columns 6-7: Start and end dates of data used in solution - dtype = {} - dtype['names'] = ('time','C20','delta','sigma','AOD','start','end') - dtype['formats'] = ('f','f8','f','f','f','f','f') - # header text is commented and won't be read - file_input = np.loadtxt(os.path.expanduser(SLR_file),dtype=dtype) - # date and GRACE/GRACE-FO month - dinput['time'] = file_input['time'] - dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) - # monthly spherical harmonic replacement solutions - dinput['data'] = file_input['C20'].copy() - # monthly spherical harmonic formal standard deviations - dinput['error'] = file_input['sigma']*1e-10 - # Background gravity model includes solid earth and ocean tides, solid - # earth and ocean pole tides, and the Atmosphere-Ocean De-aliasing - # product. The monthly mean of the AOD model has been restored. - if AOD: - # Removing AOD product that was restored in the solution - dinput['data'] -= file_input['AOD']*1e-10 - elif bool(re.search(r'GFZ_(RL\d+)_C20_SLR',SLR_file,re.I)): - # SLR C20 file from GFZ - # Column 1: MJD of BEGINNING of solution span - # Column 2: Year and fraction of year of BEGINNING of solution span - # Column 3: Replacement C(2,0) - # Column 4: Replacement C(2,0) - mean C(2,0) (1.0E-10) - # Column 5: C(2,0) formal error (1.0E-10) - with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: - file_contents = f.read().splitlines() - # number of lines contained in the file - file_lines = len(file_contents) - # counts the number of lines in the header - count = 0 - # Reading over header text - while HEADER: - # file line at count - line = file_contents[count] - # find PRODUCT: within line to set HEADER flag to False when found - HEADER = not bool(re.match(r'PRODUCT:+',line)) - # add 1 to counter - count += 1 - - # number of months within the file - n_mon = file_lines - count - # date and GRACE/GRACE-FO month - dinput['time'] = np.zeros((n_mon)) - dinput['month'] = np.zeros((n_mon),dtype=np.int64) - # monthly spherical harmonic replacement solutions - dinput['data'] = np.zeros((n_mon)) - # monthly spherical harmonic formal standard deviations - dinput['error'] = np.zeros((n_mon)) - # time count - t = 0 - # for every other line: - for line in file_contents[count:]: - # find numerical instances in line including exponents, - # decimal points and negatives - line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) - # check if line has G* or Gm flags - if bool(re.search(r'(G\*|Gm)',line)): - # reading decimal year for start of span - dinput['time'][t] = np.float64(line_contents[1]) - # Spherical Harmonic data for line - dinput['data'][t] = np.float64(line_contents[2]) - dinput['error'][t] = np.float64(line_contents[4])*1e-10 - # GRACE/GRACE-FO month of SLR solutions - dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( - dinput['time'][t], around=np.round) - # add to t count - t += 1 - # truncate variables if necessary - for key,val in dinput.items(): - dinput[key] = val[:t] - - elif bool(re.search(r'GRAVIS-2B_GFZOP',SLR_file,re.I)): - # Combined GRACE/SLR solution file produced by GFZ - # Column 1: MJD of BEGINNING of solution data span - # Column 2: Year and fraction of year of BEGINNING of solution span - # Column 3: Replacement C(2,0) - # Column 4: Replacement C(2,0) - mean C(2,0) (1.0E-10) - # Column 5: C(2,0) formal standard deviation (1.0E-12) - with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: - file_contents = f.read().splitlines() - # number of lines contained in the file - file_lines = len(file_contents) - - # counts the number of lines in the header - count = 0 - # Reading over header text - while HEADER: - # file line at count - line = file_contents[count] - # find PRODUCT: within line to set HEADER flag to False when found - HEADER = not bool(re.match(r'PRODUCT:+',line)) - # add 1 to counter - count += 1 - - # number of months within the file - n_mon = file_lines - count - # date and GRACE/GRACE-FO month - dinput['time'] = np.zeros((n_mon)) - dinput['month'] = np.zeros((n_mon),dtype=int) - # monthly spherical harmonic replacement solutions - dinput['data'] = np.zeros((n_mon)) - # monthly spherical harmonic formal standard deviations - dinput['error'] = np.zeros((n_mon)) - # time count - t = 0 - # for every other line: - for line in file_contents[count:]: - # find numerical instances in line including exponents, - # decimal points and negatives - line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) - count = len(line_contents) - # check for empty lines - if (count > 0): - # reading decimal year for start of span - dinput['time'][t] = np.float64(line_contents[1]) - # Spherical Harmonic data for line - dinput['data'][t] = np.float64(line_contents[2]) - dinput['error'][t] = np.float64(line_contents[4])*1e-10 - # GRACE/GRACE-FO month of SLR solutions - dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( - dinput['time'][t], around=np.round) - # add to t count - t += 1 - # truncate variables if necessary - for key,val in dinput.items(): - dinput[key] = val[:t] - - elif bool(re.search(r'TN-(11|14)',SLR_file,re.I)): - # SLR C20 RL06 file from PO.DAAC - with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: - file_contents = f.read().splitlines() - # number of lines contained in the file - file_lines = len(file_contents) - - # counts the number of lines in the header - count = 0 - # Reading over header text - while HEADER: - # file line at count - line = file_contents[count] - # find PRODUCT: within line to set HEADER flag to False when found - HEADER = not bool(re.match(r'PRODUCT:+',line,re.IGNORECASE)) - # add 1 to counter - count += 1 - - # number of months within the file - n_mon = file_lines - count - # date and GRACE/GRACE-FO month - dinput['time'] = np.zeros((n_mon)) - dinput['month'] = np.zeros((n_mon),dtype=np.int64) - # monthly spherical harmonic replacement solutions - dinput['data'] = np.zeros((n_mon)) - # monthly spherical harmonic formal standard deviations - dinput['error'] = np.zeros((n_mon)) - # time count - t = 0 - # for every other line: - for line in file_contents[count:]: - # find numerical instances in line including exponents, - # decimal points and negatives - line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) - # check for empty lines as there are - # slight differences in RL04 TN-05_C20_SLR.txt - # with blanks between the PRODUCT: line and the data - count = len(line_contents) - # if count is greater than 0 - if (count > 0): - # modified julian date for line - MJD = np.float64(line_contents[0]) - # converting from MJD into month, day and year - YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( - MJD+2400000.5, format='tuple') - # converting from month, day, year into decimal year - dinput['time'][t] = gravity_toolkit.time.convert_calendar_decimal( - YY, MM, day=DD, hour=hh) - # Spherical Harmonic data for line - dinput['data'][t] = np.float64(line_contents[2]) - dinput['error'][t] = np.float64(line_contents[4])*1e-10 - # GRACE/GRACE-FO month of SLR solutions - dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( - dinput['time'][t], around=np.round) - # add to t count - t += 1 - # truncate variables if necessary - for key,val in dinput.items(): - dinput[key] = val[:t] - else: - # SLR C20 file from PO.DAAC - with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: - file_contents = f.read().splitlines() - # number of lines contained in the file - file_lines = len(file_contents) - - # counts the number of lines in the header - count = 0 - # Reading over header text - while HEADER: - # file line at count - line = file_contents[count] - # find PRODUCT: within line to set HEADER flag to False when found - HEADER = not bool(re.match(r'PRODUCT:+',line)) - # add 1 to counter - count += 1 - - # number of months within the file - n_mon = file_lines - count - # GRACE/GRACE-FO dates - date_conv = np.zeros((n_mon)) - # monthly spherical harmonic replacement solutions - C20_input = np.zeros((n_mon)) - # monthly spherical harmonic formal standard deviations - eC20_input = np.zeros((n_mon)) - # flag denoting if replacement solution - slr_flag = np.zeros((n_mon),dtype=bool) - # time count - t = 0 - # for every other line: - for line in file_contents[count:]: - # find numerical instances in line including exponents, - # decimal points and negatives - line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) - # check for empty lines as there are - # slight differences in RL04 TN-05_C20_SLR.txt - # with blanks between the PRODUCT: line and the data - count = len(line_contents) - # if count is greater than 0 - if (count > 0): - # modified julian date for line - MJD = np.float64(line_contents[0]) - # converting from MJD into month, day and year - YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( - MJD+2400000.5, format='tuple') - # converting from month, day, year into decimal year - date_conv[t] = gravity_toolkit.time.convert_calendar_decimal( - YY, MM, day=DD, hour=hh) - # Spherical Harmonic data for line - C20_input[t] = np.float64(line_contents[2]) - eC20_input[t] = np.float64(line_contents[4])*1e-10 - # line has * flag - if bool(re.search(r'\*',line)): - slr_flag[t] = True - # add to t count - t += 1 - - # truncate for RL04 if necessary - date_conv = date_conv[:t] - C20_input = C20_input[:t] - eC20_input = eC20_input[:t] - slr_flag = slr_flag[:t] - - # GRACE/GRACE-FO month of SLR solutions - mon = gravity_toolkit.time.calendar_to_grace(date_conv,around=np.round) - # number of unique months - dinput['month'] = np.unique(mon) - n_uniq = len(dinput['month']) - # Removing overlapping months to use the data for - # months with limited GRACE accelerometer use - dinput['time'] = np.zeros((n_uniq)) - dinput['data'] = np.zeros((n_uniq)) - dinput['error'] = np.zeros((n_uniq)) - # New SLR datasets have * flags for the modified GRACE periods - # these GRACE months use half of a prior month in their solution - # this will find these months (marked above with slr_flag) - for t in range(n_uniq): - count = np.count_nonzero(mon == dinput['month'][t]) - # there is only one solution for the month - if (count == 1): - i = np.nonzero(mon == dinput['month'][t]) - dinput['time'][t] = date_conv[i] - dinput['data'][t] = C20_input[i] - dinput['error'][t] = eC20_input[i] - # there is a special solution for the month - # will the solution flagged with slr_flag - elif (count == 2): - i = np.nonzero((mon == dinput['month'][t]) & slr_flag) - dinput['time'][t] = date_conv[i] - dinput['data'][t] = C20_input[i] - dinput['error'][t] = eC20_input[i] - - # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with - # Accelerometer shutoffs make the relation between month number - # and date more complicated as days from other months are used - # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) - # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) - # For all: May 2015 (161) is centered in Apr 2015 (160) - # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) - dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) - - # return the SLR-derived oblateness solutions - return dinput \ No newline at end of file + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.SLR instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.SLR.C20(*args,**kwargs) diff --git a/gravity_toolkit/read_SLR_C30.py b/gravity_toolkit/read_SLR_C30.py index 63c247e7..5fe14f8e 100644 --- a/gravity_toolkit/read_SLR_C30.py +++ b/gravity_toolkit/read_SLR_C30.py @@ -78,14 +78,11 @@ read CSR monthly 5x5 file and extract C3,0 coefficients Written 05/2019 """ -import os -import re -import numpy as np -import gravity_toolkit.time -import gravity_toolkit.read_SLR_harmonics +import warnings +import gravity_toolkit.SLR # PURPOSE: read Degree 3 zonal data from Satellite Laser Ranging (SLR) -def read_SLR_C30(SLR_file, C30_MEAN=9.5717395773300e-07, HEADER=True): +def read_SLR_C30(*args, **kwargs): """ Reads C30 spherical harmonic coefficients from SLR measurements @@ -109,160 +106,8 @@ def read_SLR_C30(SLR_file, C30_MEAN=9.5717395773300e-07, HEADER=True): time: float date of SLR measurement """ - - # check that SLR file exists - if not os.access(os.path.expanduser(SLR_file), os.F_OK): - raise FileNotFoundError('SLR file not found in file system') - # output dictionary with input data - dinput = {} - - if bool(re.search(r'TN-(14)',SLR_file,re.I)): - - # SLR C30 RL06 file from PO.DAAC produced by GSFC - with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: - file_contents = f.read().splitlines() - # number of lines contained in the file - file_lines = len(file_contents) - - # counts the number of lines in the header - count = 0 - # Reading over header text - while HEADER: - # file line at count - line = file_contents[count] - # find PRODUCT: within line to set HEADER flag to False when found - HEADER = not bool(re.match(r'Product:+',line)) - # add 1 to counter - count += 1 - - # number of months within the file - n_mon = file_lines - count - # date and GRACE/GRACE-FO month - dinput['time'] = np.zeros((n_mon)) - dinput['month'] = np.zeros((n_mon),dtype=int) - # monthly spherical harmonic replacement solutions - dinput['data'] = np.zeros((n_mon)) - # monthly spherical harmonic formal standard deviations - dinput['error'] = np.zeros((n_mon)) - # time count - t = 0 - # for every other line: - for line in file_contents[count:]: - # find numerical instances in line including exponents, - # decimal points and negatives - line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) - count = len(line_contents) - # only read lines where C30 data exists (don't read NaN lines) - if (count > 7): - # modified julian date for line - MJD = np.float64(line_contents[0]) - # converting from MJD into month, day and year - YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( - MJD+2400000.5, format='tuple') - # converting from month, day, year into decimal year - dinput['time'][t] = gravity_toolkit.time.convert_calendar_decimal( - YY, MM, day=DD, hour=hh) - # Spherical Harmonic data for line - dinput['data'][t] = np.float64(line_contents[5]) - dinput['error'][t] = np.float64(line_contents[7])*1e-10 - # GRACE/GRACE-FO month of SLR solutions - dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( - dinput['time'][t], around=np.round) - # add to t count - t += 1 - # verify that there imported C30 solutions - # (TN-14 data format has changed in the past) - if (t == 0): - raise Exception('No GSFC C30 data imported') - # truncate variables if necessary - for key,val in dinput.items(): - dinput[key] = val[:t] - elif bool(re.search(r'C30_LARES',SLR_file,re.I)): - # read LARES filtered values - LARES_input = np.loadtxt(SLR_file,skiprows=1) - dinput['time'] = LARES_input[:,0].copy() - # convert C30 from anomalies to absolute - dinput['data'] = 1e-10*LARES_input[:,1] + C30_MEAN - # filtered data does not have errors - dinput['error'] = np.zeros_like(LARES_input[:,1]) - # calculate GRACE/GRACE-FO month - dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) - elif bool(re.search(r'GRAVIS-2B_GFZOP',SLR_file,re.I)): - # Combined GRACE/SLR solution file produced by GFZ - # Column 1: MJD of BEGINNING of solution data span - # Column 2: Year and fraction of year of BEGINNING of solution span - # Column 6: Replacement C(3,0) - # Column 7: Replacement C(3,0) - mean C(3,0) (1.0E-10) - # Column 8: C(3,0) formal standard deviation (1.0E-12) - with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: - file_contents = f.read().splitlines() - # number of lines contained in the file - file_lines = len(file_contents) - - # counts the number of lines in the header - count = 0 - # Reading over header text - while HEADER: - # file line at count - line = file_contents[count] - # find PRODUCT: within line to set HEADER flag to False when found - HEADER = not bool(re.match(r'PRODUCT:+',line)) - # add 1 to counter - count += 1 - - # number of months within the file - n_mon = file_lines - count - # date and GRACE/GRACE-FO month - dinput['time'] = np.zeros((n_mon)) - dinput['month'] = np.zeros((n_mon),dtype=int) - # monthly spherical harmonic replacement solutions - dinput['data'] = np.zeros((n_mon)) - # monthly spherical harmonic formal standard deviations - dinput['error'] = np.zeros((n_mon)) - # time count - t = 0 - # for every other line: - for line in file_contents[count:]: - # find numerical instances in line including exponents, - # decimal points and negatives - line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) - count = len(line_contents) - # check for empty lines - if (count > 0): - # reading decimal year for start of span - dinput['time'][t] = np.float64(line_contents[1]) - # Spherical Harmonic data for line - dinput['data'][t] = np.float64(line_contents[5]) - dinput['error'][t] = np.float64(line_contents[7])*1e-10 - # GRACE/GRACE-FO month of SLR solutions - dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( - dinput['time'][t], around=np.round) - # add to t count - t += 1 - # truncate variables if necessary - for key,val in dinput.items(): - dinput[key] = val[:t] - else: - # CSR 5x5 + 6,1 file from CSR and extract C3,0 coefficients - Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) - # extract dates, C30 harmonics and errors - dinput['time'] = Ylms['time'].copy() - dinput['data'] = Ylms['clm'][3,0,:].copy() - dinput['error'] = Ylms['error']['clm'][3,0,:].copy() - # converting from MJD into month, day and year - YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( - Ylms['MJD']+2400000.5, format='tuple') - # calculate GRACE/GRACE-FO month - dinput['month'] = gravity_toolkit.time.calendar_to_grace(YY,MM) - - # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with - # Accelerometer shutoffs make the relation between month number - # and date more complicated as days from other months are used - # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) - # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) - # For all: May 2015 (161) is centered in Apr 2015 (160) - # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) - dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) - - # return the SLR-derived degree 3 zonal solutions - return dinput + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.SLR instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.SLR.C30(*args,**kwargs) diff --git a/gravity_toolkit/read_SLR_C40.py b/gravity_toolkit/read_SLR_C40.py index 80671b5e..911206ae 100644 --- a/gravity_toolkit/read_SLR_C40.py +++ b/gravity_toolkit/read_SLR_C40.py @@ -45,14 +45,11 @@ UPDATE HISTORY: Written 09/2022 """ -import os -import re -import numpy as np -import gravity_toolkit.time -import gravity_toolkit.read_SLR_harmonics +import warnings +import gravity_toolkit.SLR # PURPOSE: read Degree 4 zonal data from Satellite Laser Ranging (SLR) -def read_SLR_C40(SLR_file, C40_MEAN=0.0, DATE=None, **kwargs): +def read_SLR_C40(*args, **kwargs): """ Reads C40 spherical harmonic coefficients from SLR measurements @@ -76,52 +73,8 @@ def read_SLR_C40(SLR_file, C40_MEAN=0.0, DATE=None, **kwargs): time: float date of SLR measurement """ - - # check that SLR file exists - if not os.access(os.path.expanduser(SLR_file), os.F_OK): - raise FileNotFoundError('SLR file not found in file system') - # output dictionary with input data - dinput = {} - - if bool(re.search(r'gsfc_slr_5x5c61s61',SLR_file,re.I)): - # read 5x5 + 6,1 file from GSFC and extract coefficients - Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) - # calculate 28-day moving-average solution from 7-day arcs - dinput.update(gravity_toolkit.convert_weekly(Ylms['time'], - Ylms['clm'][4,0,:], DATE=DATE, NEIGHBORS=28)) - # no estimated spherical harmonic errors - dinput['error'] = np.zeros_like(DATE,dtype='f8') - elif bool(re.search(r'C40_LARES',SLR_file,re.I)): - # read LARES filtered values - LARES_input = np.loadtxt(SLR_file,skiprows=1) - dinput['time'] = LARES_input[:,0].copy() - # convert C40 from anomalies to absolute - dinput['data'] = 1e-10*LARES_input[:,1] + C40_MEAN - # filtered data does not have errors - dinput['error'] = np.zeros_like(LARES_input[:,1]) - # calculate GRACE/GRACE-FO month - dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) - else: - # read 5x5 + 6,1 file from CSR and extract C4,0 coefficients - Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) - # extract dates, C40 harmonics and errors - dinput['time'] = Ylms['time'].copy() - dinput['data'] = Ylms['clm'][4,0,:].copy() - dinput['error'] = Ylms['error']['clm'][4,0,:].copy() - # converting from MJD into month, day and year - YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( - Ylms['MJD']+2400000.5, format='tuple') - # calculate GRACE/GRACE-FO month - dinput['month'] = gravity_toolkit.time.calendar_to_grace(YY,MM) - - # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with - # Accelerometer shutoffs make the relation between month number - # and date more complicated as days from other months are used - # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) - # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) - # For all: May 2015 (161) is centered in Apr 2015 (160) - # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) - dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) - - # return the SLR-derived degree 4 zonal solutions - return dinput + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.SLR instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.SLR.C40(*args,**kwargs) diff --git a/gravity_toolkit/read_SLR_C50.py b/gravity_toolkit/read_SLR_C50.py index 7c7fcd3c..ab493113 100644 --- a/gravity_toolkit/read_SLR_C50.py +++ b/gravity_toolkit/read_SLR_C50.py @@ -55,14 +55,11 @@ Updated 07/2020: added function docstrings Written 11/2019 """ -import os -import re -import numpy as np -import gravity_toolkit.time -import gravity_toolkit.read_SLR_harmonics +import warnings +import gravity_toolkit.SLR # PURPOSE: read Degree 5 zonal data from Satellite Laser Ranging (SLR) -def read_SLR_C50(SLR_file, C50_MEAN=0.0, DATE=None, HEADER=True): +def read_SLR_C50(*args, **kwargs): """ Reads C50 spherical harmonic coefficients from SLR measurements @@ -88,112 +85,8 @@ def read_SLR_C50(SLR_file, C50_MEAN=0.0, DATE=None, HEADER=True): time: float date of SLR measurement """ - - # check that SLR file exists - if not os.access(os.path.expanduser(SLR_file), os.F_OK): - raise FileNotFoundError('SLR file not found in file system') - # output dictionary with input data - dinput = {} - - if bool(re.search(r'GSFC_SLR_C(20)_C(30)_C(50)',SLR_file,re.I)): - - # SLR C50 RL06 file from GSFC - with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: - file_contents = f.read().splitlines() - # number of lines contained in the file - file_lines = len(file_contents) - - # counts the number of lines in the header - count = 0 - # Reading over header text - while HEADER: - # file line at count - line = file_contents[count] - # find PRODUCT: within line to set HEADER flag to False when found - HEADER = not bool(re.match(r'Product:+',line)) - # add 1 to counter - count += 1 - - # number of months within the file - n_mon = file_lines - count - # date and GRACE/GRACE-FO month - dinput['time'] = np.zeros((n_mon)) - dinput['month'] = np.zeros((n_mon),dtype=int) - # monthly spherical harmonic replacement solutions - dinput['data'] = np.zeros((n_mon)) - # monthly spherical harmonic formal standard deviations - dinput['error'] = np.zeros((n_mon)) - # time count - t = 0 - # for every other line: - for line in file_contents[count:]: - # find numerical instances in line including exponents, - # decimal points and negatives - line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) - count = len(line_contents) - # only read lines where C50 data exists (don't read NaN lines) - if (count > 7): - # modified julian date for line - MJD = np.float64(line_contents[0]) - # converting from MJD into month, day and year - YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( - MJD+2400000.5, format='tuple') - # converting from month, day, year into decimal year - dinput['time'][t] = gravity_toolkit.time.convert_calendar_decimal( - YY, MM, day=DD, hour=hh) - # Spherical Harmonic data for line - dinput['data'][t] = np.float64(line_contents[10]) - dinput['error'][t] = np.float64(line_contents[12])*1e-10 - # GRACE/GRACE-FO month of SLR solutions - dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( - dinput['time'][t], around=np.round) - # add to t count - t += 1 - # verify that there imported C50 solutions - if (t == 0): - raise Exception('No GSFC C50 data imported') - # truncate variables if necessary - for key,val in dinput.items(): - dinput[key] = val[:t] - elif bool(re.search(r'gsfc_slr_5x5c61s61',SLR_file,re.I)): - # read 5x5 + 6,1 file from GSFC and extract coefficients - Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) - # calculate 28-day moving-average solution from 7-day arcs - dinput.update(gravity_toolkit.convert_weekly(Ylms['time'], - Ylms['clm'][5,0,:], DATE=DATE, NEIGHBORS=28)) - # no estimated spherical harmonic errors - dinput['error'] = np.zeros_like(DATE,dtype='f8') - elif bool(re.search(r'C50_LARES',SLR_file,re.I)): - # read LARES filtered values - LARES_input = np.loadtxt(SLR_file,skiprows=1) - dinput['time'] = LARES_input[:,0].copy() - # convert C50 from anomalies to absolute - dinput['data'] = 1e-10*LARES_input[:,1] + C50_MEAN - # filtered data does not have errors - dinput['error'] = np.zeros_like(LARES_input[:,1]) - # calculate GRACE/GRACE-FO month - dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) - else: - # read 5x5 + 6,1 file from CSR and extract C5,0 coefficients - Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) - # extract dates, C50 harmonics and errors - dinput['time'] = Ylms['time'].copy() - dinput['data'] = Ylms['clm'][5,0,:].copy() - dinput['error'] = Ylms['error']['clm'][5,0,:].copy() - # converting from MJD into month, day and year - YY,MM,DD,hh,mm,ss = gravity_toolkit.time.convert_julian( - Ylms['MJD']+2400000.5, format='tuple') - # calculate GRACE/GRACE-FO month - dinput['month'] = gravity_toolkit.time.calendar_to_grace(YY,MM) - - # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with - # Accelerometer shutoffs make the relation between month number - # and date more complicated as days from other months are used - # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) - # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) - # For all: May 2015 (161) is centered in Apr 2015 (160) - # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) - dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) - - # return the SLR-derived degree 5 zonal solutions - return dinput + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.SLR instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.SLR.C50(*args,**kwargs) diff --git a/gravity_toolkit/read_SLR_CS2.py b/gravity_toolkit/read_SLR_CS2.py index fbf631cf..aff8f845 100644 --- a/gravity_toolkit/read_SLR_CS2.py +++ b/gravity_toolkit/read_SLR_CS2.py @@ -75,14 +75,11 @@ Updated 04/2021: use adjust_months function to fix special months cases Written 11/2020 """ -import os -import re -import numpy as np -import gravity_toolkit.time -import gravity_toolkit.read_SLR_harmonics +import warnings +import gravity_toolkit.SLR # PURPOSE: read Degree 2,m data from Satellite Laser Ranging (SLR) -def read_SLR_CS2(SLR_file, ORDER=1, DATE=None, HEADER=True): +def read_SLR_CS2(*args, **kwargs): """ Reads CS2,m spherical harmonic coefficients from SLR measurements @@ -112,169 +109,8 @@ def read_SLR_CS2(SLR_file, ORDER=1, DATE=None, HEADER=True): time: float date of SLR measurement """ - - # check that SLR file exists - if not os.access(os.path.expanduser(SLR_file), os.F_OK): - raise FileNotFoundError('SLR file not found in file system') - # output dictionary with input data - dinput = {} - - if bool(re.search(r'GSFC_C2(\d)_S2(\d)',SLR_file,re.I)): - # 7-day arc SLR file produced by GSFC - # input variable names and types - dtype = {} - dtype['names'] = ('time','C2','S2') - dtype['formats'] = ('f','f8','f8') - # read SLR 2,1 file from GSFC - # Column 1: Approximate mid-point of 7-day solution (years) - # Column 2: Solution from SLR (normalized) - # Column 3: Solution from SLR (normalized) - content = np.loadtxt(os.path.expanduser(SLR_file),dtype=dtype) - # duplicate time and harmonics - tdec = np.repeat(content['time'],7) - c2m = np.repeat(content['C2'],7) - s2m = np.repeat(content['S2'],7) - # calculate daily dates to use in centered moving average - tdec += (np.mod(np.arange(len(tdec)),7) - 3.5)/365.25 - # number of dates to use in average - n_neighbors = 28 - # calculate 28-day moving-average solution from 7-day arcs - dinput['time'] = np.zeros_like(DATE) - dinput['C2m'] = np.zeros_like(DATE,dtype='f8') - dinput['S2m'] = np.zeros_like(DATE,dtype='f8') - # no estimated spherical harmonic errors - dinput['eC2m'] = np.zeros_like(DATE,dtype='f8') - dinput['eS2m'] = np.zeros_like(DATE,dtype='f8') - for i,D in enumerate(DATE): - isort = np.argsort((tdec - D)**2)[:n_neighbors] - dinput['time'][i] = np.mean(tdec[isort]) - dinput['C2m'][i] = np.mean(c2m[isort]) - dinput['S2m'][i] = np.mean(s2m[isort]) - # GRACE/GRACE-FO month - dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) - elif bool(re.search(r'gsfc_slr_5x5c61s61',SLR_file,re.I)): - # read 5x5 + 6,1 file from GSFC and extract coefficients - Ylms = gravity_toolkit.read_SLR_harmonics(SLR_file, HEADER=True) - # duplicate time and harmonics - tdec = np.repeat(Ylms['time'],7) - c2m = np.repeat(Ylms['clm'][2,ORDER],7) - s2m = np.repeat(Ylms['slm'][2,ORDER],7) - # calculate daily dates to use in centered moving average - tdec += (np.mod(np.arange(len(tdec)),7) - 3.5)/365.25 - # number of dates to use in average - n_neighbors = 28 - # calculate 28-day moving-average solution from 7-day arcs - dinput['time'] = np.zeros_like(DATE) - dinput['C2m'] = np.zeros_like(DATE,dtype='f8') - dinput['S2m'] = np.zeros_like(DATE,dtype='f8') - # no estimated spherical harmonic errors - dinput['eC2m'] = np.zeros_like(DATE,dtype='f8') - dinput['eS2m'] = np.zeros_like(DATE,dtype='f8') - for i,D in enumerate(DATE): - isort = np.argsort((tdec - D)**2)[:n_neighbors] - dinput['time'][i] = np.mean(tdec[isort]) - dinput['C2m'][i] = np.mean(c2m[isort]) - dinput['S2m'][i] = np.mean(s2m[isort]) - # GRACE/GRACE-FO month - dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) - elif bool(re.search(r'C2(\d)_S2(\d)_(RL\d{2})',SLR_file,re.I)): - # SLR RL06 file produced by CSR - # input variable names and types - dtype = {} - dtype['names'] = ('time','C2','S2','eC2','eS2', - 'C2aod','S2aod','start','end') - dtype['formats'] = ('f','f8','f8','f','f','f','f','f','f') - # read SLR 2,1 or 2,2 RL06 file from CSR - # header text is commented and won't be read - # Column 1: Approximate mid-point of monthly solution (years) - # Column 2: Solution from SLR (normalized) - # Column 3: Solution from SLR (normalized) - # Column 4: Solution sigma (1E-10) - # Column 5: Solution sigma (1E-10) - # Column 6: Mean value of Atmosphere-Ocean De-aliasing model (1E-10) - # Column 7: Mean value of Atmosphere-Ocean De-aliasing model (1E-10) - # Columns 8-9: Start and end dates of data used in solution - content = np.loadtxt(os.path.expanduser(SLR_file),dtype=dtype) - # date and GRACE/GRACE-FO month - dinput['time'] = content['time'].copy() - dinput['month'] = gravity_toolkit.time.calendar_to_grace(dinput['time']) - # remove the monthly mean of the AOD model - dinput['C2m'] = content['C2'] - content['C2aod']*10**-10 - dinput['S2m'] = content['S2'] - content['S2aod']*10**-10 - # scale SLR solution sigmas - dinput['eC2m'] = content['eC2']*10**-10 - dinput['eS2m'] = content['eS2']*10**-10 - elif bool(re.search(r'GRAVIS-2B_GFZOP',SLR_file,re.I)): - # Combined GRACE/SLR solution file produced by GFZ - # Column 1: MJD of BEGINNING of solution data span - # Column 2: Year and fraction of year of BEGINNING of solution span - # Column 9: Replacement C(2,1) - # Column 10: Replacement C(2,1) - mean C(2,1) (1.0E-10) - # Column 11: C(2,1) formal standard deviation (1.0E-12) - # Column 12: Replacement S(2,1) - # Column 13: Replacement S(2,1) - mean S(2,1) (1.0E-10) - # Column 14: S(2,1) formal standard deviation (1.0E-12) - with open(os.path.expanduser(SLR_file), mode='r', encoding='utf8') as f: - file_contents = f.read().splitlines() - # number of lines contained in the file - file_lines = len(file_contents) - - # counts the number of lines in the header - count = 0 - # Reading over header text - while HEADER: - # file line at count - line = file_contents[count] - # find PRODUCT: within line to set HEADER flag to False when found - HEADER = not bool(re.match(r'PRODUCT:+',line)) - # add 1 to counter - count += 1 - - # number of months within the file - n_mon = file_lines - count - # date and GRACE/GRACE-FO month - dinput['time'] = np.zeros((n_mon)) - dinput['month'] = np.zeros((n_mon),dtype=int) - # monthly spherical harmonic replacement solutions - dinput['C2m'] = np.zeros((n_mon)) - dinput['S2m'] = np.zeros((n_mon)) - # monthly spherical harmonic formal standard deviations - dinput['eC2m'] = np.zeros((n_mon)) - dinput['eS2m'] = np.zeros((n_mon)) - # time count - t = 0 - # for every other line: - for line in file_contents[count:]: - # find numerical instances in line including exponents, - # decimal points and negatives - line_contents = re.findall(r'[-+]?\d*\.\d*(?:[eE][-+]?\d+)?',line) - count = len(line_contents) - # check for empty lines - if (count > 0): - # reading decimal year for start of span - dinput['time'][t] = np.float64(line_contents[1]) - # Spherical Harmonic data for line - dinput['C2m'][t] = np.float64(line_contents[8]) - dinput['eC2m'][t] = np.float64(line_contents[10])*1e-10 - dinput['S2m'][t] = np.float64(line_contents[11]) - dinput['eS2m'][t] = np.float64(line_contents[13])*1e-10 - # GRACE/GRACE-FO month of SLR solutions - dinput['month'][t] = gravity_toolkit.time.calendar_to_grace( - dinput['time'][t], around=np.round) - # add to t count - t += 1 - # truncate variables if necessary - for key,val in dinput.items(): - dinput[key] = val[:t] - - # The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with - # Accelerometer shutoffs make the relation between month number - # and date more complicated as days from other months are used - # For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118) - # For JPL: Dec 2011 (120) is centered in Jan 2012 (121) - # For all: May 2015 (161) is centered in Apr 2015 (160) - # For GSFC: Oct 2018 (202) is centered in Nov 2018 (203) - dinput['month'] = gravity_toolkit.time.adjust_months(dinput['month']) - - # return the SLR-derived degree 2 solutions - return dinput + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.SLR instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.SLR.CS2(*args,**kwargs) diff --git a/gravity_toolkit/savitzky_golay.py b/gravity_toolkit/savitzky_golay.py index 7a0eebfe..67b983ec 100644 --- a/gravity_toolkit/savitzky_golay.py +++ b/gravity_toolkit/savitzky_golay.py @@ -54,13 +54,10 @@ Updated 08/2015: changed sys.exit to raise ValueError Written 06/2014 """ -from __future__ import print_function, division +import warnings +import gravity_toolkit.time_series -import numpy as np -import scipy.special - -def savitzky_golay(t_in, y_in, WINDOW=None, ORDER=2, DERIV=0, - RATE=1, DATA_ERR=0): +def savitzky_golay(*args, **kwargs): """ Smooth and optionally differentiate data with a Savitzky-Golay filter [Savitzky1964]_ [Press2007]_ @@ -104,46 +101,8 @@ def savitzky_golay(t_in, y_in, WINDOW=None, ORDER=2, DERIV=0, Computing*, W.H. Press, S.A. Teukolsky, W. T. Vetterling, B.P. Flannery. Cambridge University Press, (2007). """ - - # verify that WINDOW is positive, odd and greater than ORDER+1 - if WINDOW is None: - WINDOW = ORDER + -1*(ORDER % 2) + 3 - if WINDOW % 2 != 1 or WINDOW < 1: - raise ValueError("WINDOW size must be a positive odd number") - if WINDOW < ORDER + 2: - raise ValueError("WINDOW is too small for the polynomials order") - # remove any singleton dimensions - t_in = np.squeeze(t_in) - y_in = np.squeeze(y_in) - nmax = len(t_in) - - # order range - order_range = np.arange(ORDER+1) - # filter half-window - half_window = (WINDOW - 1) // 2 - # output time-series (removing half-windows on ends) - t_out = t_in[half_window:nmax-half_window] - # output smoothed timeseries (or derivative) - y_out = np.zeros((nmax-2*half_window)) - y_err = np.zeros((nmax-2*half_window)) - for n in range(0, (nmax-(2*half_window))): - yran = y_in[n + np.arange(0, 2*half_window+1)] - # Vandermonde matrix for the time-series - b = np.mat([[(t_in[k]-t_in[n+half_window])**i for i in order_range] - for k in range(n, n+2*half_window+1)]) - # compute the pseudoinverse of the design matrix - m=np.linalg.pinv(b).A[DERIV]*RATE**DERIV*scipy.special.factorial(DERIV) - # pad the signal at the extremes with values taken from the signal - firstvals = yran[0] - np.abs(yran[1:half_window+1][::-1] - yran[0]) - lastvals = yran[-1] + np.abs(yran[-half_window-1:-1][::-1] - yran[-1]) - yn = np.concatenate((firstvals, yran, lastvals)) - # compute the convolution and use middle value - y_out[n] = np.convolve(m[::-1], yn, mode='valid')[half_window] - if (DATA_ERR != 0): - # if data error is known and of equal value - P_err = DATA_ERR*np.ones((4*half_window+1)) - # compute the convolution and use middle value - y_err[n] = np.sqrt(np.convolve(m[::-1]**2, P_err**2, - mode='valid')[half_window]) - - return {'data':y_out, 'error':y_err, 'time':t_out} + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.time_series instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.time_series.savitzky_golay(*args,**kwargs) diff --git a/gravity_toolkit/sea_level_equation.py b/gravity_toolkit/sea_level_equation.py index 00fd2f38..9d69a22a 100644 --- a/gravity_toolkit/sea_level_equation.py +++ b/gravity_toolkit/sea_level_equation.py @@ -1,6 +1,6 @@ #!/usr/bin/env python u""" -sea_level_equation.py (11/2022) +sea_level_equation.py (01/2023) Solves the sea level equation with the option of including polar motion feedback Uses a Clenshaw summation to calculate the spherical harmonic summation @@ -43,7 +43,8 @@ numpy: Scientific Computing Tools For Python (https://numpy.org) PROGRAM DEPENDENCIES: - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials gen_harmonics.py: Computes spherical harmonic coefficients from a grid units.py: class for converting spherical harmonic data to specific units harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO @@ -89,6 +90,7 @@ https://doi.org/10.1029/JB090iB11p09363 UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 04/2022: updated docstrings to numpy documentation format Updated 10/2021: using python logging for handling verbose output @@ -120,7 +122,7 @@ import logging import numpy as np from gravity_toolkit.gen_harmonics import gen_harmonics -from gravity_toolkit.plm_holmes import plm_holmes +from gravity_toolkit.associated_legendre import plm_holmes from gravity_toolkit.units import units # PURPOSE: Computes Sea Level Fingerprints including polar motion feedback diff --git a/gravity_toolkit/time_series/__init__.py b/gravity_toolkit/time_series/__init__.py new file mode 100644 index 00000000..05063db6 --- /dev/null +++ b/gravity_toolkit/time_series/__init__.py @@ -0,0 +1,5 @@ +from .amplitude import * +from .piecewise import * +from .regress import * +from .savitzky_golay import * +from .smooth import * diff --git a/gravity_toolkit/time_series/amplitude.py b/gravity_toolkit/time_series/amplitude.py new file mode 100755 index 00000000..7d174cb2 --- /dev/null +++ b/gravity_toolkit/time_series/amplitude.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +u""" +amplitude.py +Written by Tyler Sutterley (01/2023) + +Calculate the amplitude and phase of a harmonic function from calculated + sine and cosine of a series of measurements + +CALLING SEQUENCE: + ampl,ph = gravity_toolkit.time_series.amplitude(bsin, bcos) + +INPUTS: + bsin: amplitude of the calculated sine values + bcos: amplitude of the calculated cosine values + +OUTPUTS: + ampl: amplitude from the harmonic functions + ph: phase from the harmonic functions in degrees + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python (https://numpy.org) + +UPDATE HISTORY: + Updated 01/2023: refactored time series analysis functions + Updated 04/2022: updated docstrings to numpy documentation format + Updated 07/2020: added function docstrings + Updated 10/2019: output both amplitude and phase + Updated 05/2013: converted to python + Written 07/2012: +""" +import numpy as np + +def amplitude(bsin, bcos): + """ + Calculate the amplitude and phase of a harmonic function + + Parameters + ---------- + bsin: float + amplitude of the calculated sine values + bcos: float + amplitude of the calculated cosine values + + Returns + ------- + ampl: float + amplitude from the harmonic functions + ph: float + phase from the harmonic functions in degrees + """ + ampl = np.sqrt(bsin**2.0 + bcos**2.0) + ph = 180.0*np.arctan2(bcos, bsin)/np.pi + return (ampl,ph) diff --git a/gravity_toolkit/time_series/piecewise.py b/gravity_toolkit/time_series/piecewise.py new file mode 100755 index 00000000..2e3362cf --- /dev/null +++ b/gravity_toolkit/time_series/piecewise.py @@ -0,0 +1,374 @@ +#!/usr/bin/env python +u""" +piecewise.py +Written by Tyler Sutterley (01/2023) + +Fits a synthetic signal to data over a time period by ordinary or weighted + least-squares for breakpoint analysis + +Derivation of Sharp Breakpoint Piecewise Regression: + https://esajournals.onlinelibrary.wiley.com/doi/abs/10.1890/02-0472 + y = beta_0 + beta_1*t + e (for x <= alpha) + y = beta_0 + beta_1*t + beta_2*(t-alpha) + e (for x > alpha) + +Fit significance derivations are based on Burnham and Anderson (2002) + Model Selection and Multimodel Inference + +CALLING SEQUENCE: + pcwbeta = gravity_toolkit.time_series.piecewise(tdec, data, + CYCLES=[0.5,1.0], BREAKPOINT=ind) + +INPUTS: + t_in: input time array + d_in: input data array + +OUTPUTS: + beta: regressed coefficients array + error: regression fit error for each coefficient for an input deviation + STDEV: standard deviation of output error + CONF: confidence interval of output error + std_err: standard error for each coefficient + R2: coefficient of determination (r**2). + Proportion of variability accounted by the model + R2Adj: adjusted r**2. adjusts the r**2 for the number of terms in the model + MSE: mean square error + WSSE: Weighted sum of squares error + NRMSE: normalized root mean square error + AIC: Akaike information criterion (Second-Order, AICc) + BIC: Bayesian information criterion (Schwarz criterion) + model: modeled timeseries + simple: modeled timeseries without oscillating components + residual: model residual + DOF: degrees of freedom + N: number of terms used in fit + cov_mat: covariance matrix + +OPTIONS: + BREAK_TIME: breakpoint time for piecewise regression + BREAKPOINT: breakpoint indice of piecewise regression + DATA_ERR: data precision + single value if equal + array if unequal for weighted least squares + WEIGHT: Set if measurement errors for use in weighted least squares + CYCLES: list of cyclical terms (0.5=semi-annual, 1=annual) + STDEV: standard deviation of output error + CONF: confidence interval of output error + AICc: use second order AIC + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python (https://numpy.org) + scipy: Scientific Tools for Python (https://docs.scipy.org/doc/) + +UPDATE HISTORY: + Updated 01/2023: refactored time series analysis functions + Updated 04/2022: updated docstrings to numpy documentation format + Updated 05/2021: define int/float precision to prevent deprecation warning + Updated 01/2021: added function docstrings + Updated 10/2019: changing Y/N flags to True/False + Updated 01/2019: added option S2 to include 161-day tidal aliasing terms + Updated 12/2018: put transpose of design matrix within FIT_TYPE if statement + Updated 08/2018: import packages before function definition + Updated 09/2017: use rcond=-1 in numpy least-squares algorithms + Updated 08/2015: changed sys.exit to raise ValueError + Updated 11/2014: added simple output for model without climate oscillations + Updated 07/2014: import scipy.stats and scipy.special + Updated 06/2014: changed message to sys.exit + Updated 02/2014: minor update to if statements + Updated 10/2013: updated Log-likelihood (converted from Least-Squares (LS) + log-likelihood to maximum likelihood (ML) log-likelihood) + Added calculation for AICc (corrected for small sample size + Updated 09/2013: updated weighted least-squares and added AIC, BIC and LOGLIK + options for parameter evaluation + Added cases with known standard deviations and weighted least-squares + Minor change: P_cons, P_lin1 and P_lin2 changed to P_x0, P_x1a and P_x1b + Updated 07/2013: updated errors for beta2 + Updated 05/2013: converted to python + Updated 06/2012: added options for MSE and NRMSE. adjusted rsq_adj + Updated 06/2012: changed design matrix creation to 'FIT_TYPE' + added r_square to calcuating goodness of fit compared to others + Updated 03/2012: combined polynomial and harmonic regression functions + Updated 01/2012: added std weighting for a error weighted least-squares + Written 10/2011 +""" +import numpy as np +import scipy.stats +import scipy.special + +def piecewise(t_in, d_in, BREAK_TIME=None, BREAKPOINT=None, + CYCLES=[0.5,1.0], DATA_ERR=0, WEIGHT=False, STDEV=0, CONF=0, + AICc=False): + """ + Fits a synthetic signal to data over a time period by ordinary or + weighted least-squares for breakpoint analysis [Toms2003]_ + + Parameters + ---------- + t_in: float + input time array + d_in: float + input data array + BREAK_TIME: float or NoneType, default None + breakpoint time for piecewise regression + BREAKPOINT: int or NoneType, default None + breakpoint indice of piecewise regression + CYCLES: list, default [0.5, 1.0] + list of cyclical terms in fractions of year + DATA_ERR: float or list + data precision + + - single value if equal + - array if unequal for weighted least squares + WEIGHT: bool, default False + Use weighted least squares with measurement errors + STDEV: float, default 0 + Standard deviation of output error + CONF: float, default 0 + Confidence interval of output error + AICc: bool, default False + Use second order AIC for small sample sizes [Burnham2002]_ + + Returns + ------- + beta: float + regressed coefficients array + error: float + regression fit error for each coefficient for an input deviation + + - ``STDEV``: standard deviation of output error + - ``CONF``: confidence interval of output error + std_err: float + standard error for each coefficient + R2: float + coefficient of determination (r\ :sup:`2`) + R2Adj: float + r\ :sup:`2` adjusted for the number of terms in the model + MSE: float + mean square error + WSSE: float + Weighted sum of squares error + NRMSE: float + normalized root mean square error + AIC: float + Akaike information criterion + BIC: float + Bayesian information criterion (Schwarz criterion) + model: float + modeled timeseries + simple: float + modeled timeseries without oscillating components + residual: float + model residual + DOF: int + degrees of freedom + N: int + number of terms used in fit + cov_mat: float + covariance matrix + + References + ---------- + .. [Toms2003] J. D. Toms and M. L. Lesperance, + "Piecewise Regression: A Tool For Identifying Ecological + Thresholds", *Ecology*, 84, 2034-2041, (2003). + `doi: 10.1890/02-0472 `_ + .. [Burnham2002] K. P. Burnham and D. R. Anderson, + *Model Selection and Multimodel Inference*, + 2nd Edition, 488 pp., (2002). + `doi: 10.1007/b97636 `_ + """ + + t_in = np.squeeze(t_in) + d_in = np.squeeze(d_in) + nmax = len(t_in) + + # If indice of cutoff time entered: will calculate cutoff time + # If cutoff time entered: will find the cutoff indice + if BREAKPOINT is not None: + tco = t_in[BREAKPOINT] + nco = np.squeeze(BREAKPOINT) + elif BREAK_TIME is not None: + nco = np.argmin(np.abs(t_in - BREAK_TIME)) + tco = np.copy(BREAK_TIME) + + # create design matrix for sharp breakpoint piecewise regression + # y = beta_0 + beta_1*t + e (for x <= alpha) + # y = beta_0 + beta_1*t + beta_2*(t-alpha) + e (for x > alpha) + DMAT = [] + # add polynomial orders (0=constant, 1=linear) + for o in range(2): + DMAT.append(t_in**o) + # Linear Term 2 (change from linear term1: trend2 = beta1+beta2) + P_x1 = np.zeros((nmax)) + P_x1[nco:] = t_in[nco:] - tco + DMAT.append(P_x1) + # add cyclical terms (0.5=semi-annual, 1=annual) + for c in CYCLES: + DMAT.append(np.sin(2.0*np.pi*t_in/np.float64(c))) + DMAT.append(np.cos(2.0*np.pi*t_in/np.float64(c))) + # take the transpose of the design matrix + DMAT = np.transpose(DMAT) + + # Calculating Least-Squares Coefficients + if WEIGHT: + # Weighted Least-Squares fitting + if (np.ndim(DATA_ERR) == 0): + raise ValueError('Input DATA_ERR for Weighted Least-Squares') + # check if any error values are 0 (prevent infinite weights) + if np.count_nonzero(DATA_ERR == 0.0): + # change to minimum floating point value + DATA_ERR[DATA_ERR == 0.0] = np.finfo(np.float64).eps + # Weight Precision + wi = np.squeeze(DATA_ERR**(-2)) + # If uncorrelated weights are the diagonal + W = np.diag(wi) + # Least-Squares fitting + # Temporary Matrix: Inv(X'.W.X) + TM1 = np.linalg.inv(np.dot(np.transpose(DMAT),np.dot(W,DMAT))) + # Temporary Matrix: (X'.W.Y) + TM2 = np.dot(np.transpose(DMAT),np.dot(W,d_in)) + # Least Squares Solutions: Inv(X'.W.X).(X'.W.Y) + beta_mat = np.dot(TM1,TM2) + else:# Standard Least-Squares fitting (the [0] denotes coefficients output) + beta_mat = np.linalg.lstsq(DMAT,d_in,rcond=-1)[0] + # Weights are equal + wi = 1.0 + + # Calculating trend2 = beta1 + beta2 + # beta2 = change in linear term from beta1 + beta_out = np.copy(beta_mat)# output beta + beta_out[2] = beta_mat[1] + beta_mat[2] + + # number of terms in least-squares solution + n_terms = len(beta_mat) + # modelled time-series + mod = np.dot(DMAT,beta_mat) + # time-series residuals + res = d_in[0:nmax] - np.dot(DMAT,beta_mat) + # Fitted Values without climate oscillations + simple = np.dot(DMAT[:,0:3],beta_mat[0:3]) + + # Error Analysis + # nu = Degrees of Freedom = number of measurements-number of parameters + nu = nmax - n_terms + + # calculating R^2 values + # SStotal = sum((Y-mean(Y))**2) + SStotal = np.dot(np.transpose(d_in[0:nmax] - np.mean(d_in[0:nmax])), + (d_in[0:nmax] - np.mean(d_in[0:nmax]))) + # SSerror = sum((Y-X*B)**2) + SSerror = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), + (d_in[0:nmax] - np.dot(DMAT,beta_mat))) + # R**2 term = 1- SSerror/SStotal + rsquare = 1.0 - (SSerror/SStotal) + # Adjusted R**2 term: weighted by degrees of freedom + rsq_adj = 1.0 - (SSerror/SStotal)*np.float64((nmax-1.0)/nu) + # Fit Criterion + # number of parameters including the intercept and the variance + K = np.float64(n_terms + 1) + # Log-Likelihood with weights (if unweighted, weight portions == 0) + # log(L) = -0.5*n*log(sigma^2) - 0.5*n*log(2*pi) - 0.5*n + #log_lik = -0.5*nmax*(np.log(2.0 * np.pi) + 1.0 + np.log(np.sum((res**2)/nmax))) + log_lik = 0.5*(np.sum(np.log(wi)) - nmax*(np.log(2.0 * np.pi) + 1.0 - + np.log(nmax) + np.log(np.sum(wi * (res**2))))) + + # Aikaike's Information Criterion + AIC = -2.0*log_lik + 2.0*K + if AICc: + # Second-Order AIC correcting for small sample sizes (restricted) + # Burnham and Anderson (2002) advocate use of AICc where + # ratio num/K is small + # A small ratio is defined in the definition at approximately < 40 + AIC += (2.0*K*(K+1.0))/(nmax - K - 1.0) + + # Bayesian Information Criterion (Schwarz Criterion) + BIC = -2.0*log_lik + np.log(nmax)*K + + # Error Analysis + if WEIGHT: + # WEIGHTED LEAST-SQUARES CASE (unequal error) + # Covariance Matrix + Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),np.dot(W,DMAT))) + # Normal Equations + NORMEQ = np.dot(Hinv,np.transpose(np.dot(W,DMAT))) + temp_err = np.zeros((n_terms)) + # Propagating RMS errors + for i in range(0,n_terms): + temp_err[i] = np.sqrt(np.sum((NORMEQ[i,:]*DATA_ERR)**2)) + + # Recalculating beta2 error + beta_err = np.copy(temp_err) + beta_err[2] = np.sqrt(temp_err[1]**2 + temp_err[2]**2) + # Weighted sum of squares Error + WSSE = np.dot(np.transpose(wi*(d_in[0:nmax] - np.dot(DMAT,beta_mat))), + wi*(d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) + + return {'beta':beta_out, 'error':beta_err, 'R2':rsquare, + 'R2Adj':rsq_adj, 'WSSE':WSSE, 'AIC':AIC, 'BIC':BIC, + 'LOGLIK':log_lik, 'model':mod, 'residual':res, + 'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} + + elif ((not WEIGHT) and (DATA_ERR != 0)): + # LEAST-SQUARES CASE WITH KNOWN AND EQUAL ERROR + P_err = DATA_ERR*np.ones((nmax)) + Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),DMAT)) + # Normal Equations + NORMEQ = np.dot(Hinv,np.transpose(DMAT)) + temp_err = np.zeros((n_terms)) + for i in range(0,n_terms): + temp_err[i] = np.sum((NORMEQ[i,:]*P_err)**2) + # Recalculating beta2 error + beta_err = np.copy(temp_err) + beta_err[2] = np.sqrt(temp_err[1]**2 + temp_err[2]**2) + # Mean square error + MSE = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), + (d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) + + return {'beta':beta_out, 'error':beta_err, 'R2':rsquare, + 'R2Adj':rsq_adj, 'MSE':MSE, 'AIC':AIC, 'BIC':BIC, + 'LOGLIK':log_lik, 'model':mod, 'residual':res, + 'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} + else: + # STANDARD LEAST-SQUARES CASE + # Regression with Errors with Unknown Standard Deviations + # MSE = (1/nu)*sum((Y-X*B)**2) + # Mean square error + MSE = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), + (d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) + # Root mean square error + RMSE = np.sqrt(MSE) + # Normalized root mean square error + NRMSE = RMSE/(np.max(d_in[0:nmax])-np.min(d_in[0:nmax])) + # Covariance Matrix + # Multiplying the design matrix by itself + Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),DMAT)) + # Taking the diagonal components of the cov matrix + hdiag = np.diag(Hinv) + # set either the standard deviation or the confidence interval + if (STDEV != 0): + # Setting the standard deviation of the output error + alpha = 1.0 - scipy.special.erf(STDEV/np.sqrt(2.0)) + elif (CONF != 0): + # Setting the confidence interval of the output error + alpha = 1.0 - CONF + else: + # Default is 95% confidence interval + alpha = 1.0 - (0.95) + # Student T-Distribution with D.O.F. nu + # t.ppf parallels tinv in matlab + tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu) + # beta_err is the error for each coefficient + # beta_err = t(nu,1-alpha/2)*standard error + temp_std = np.sqrt(MSE*hdiag) + temp_err = tstar*temp_std + + # Recalculating standard error for beta2 + st_err = np.copy(temp_std) + st_err[2] = np.sqrt(temp_std[1]**2 + temp_std[2]**2) + # Recalculating beta2 error + beta_err = np.copy(temp_err) + beta_err[2] = np.sqrt(temp_err[1]**2 + temp_err[2]**2) + + return {'beta':beta_out, 'error':beta_err, 'std_err':st_err, 'R2':rsquare, + 'R2Adj':rsq_adj, 'MSE':MSE, 'NRMSE':NRMSE, 'AIC':AIC, 'BIC':BIC, + 'LOGLIK':log_lik, 'model':mod, 'simple': simple, 'residual':res, + 'N':n_terms, 'DOF': nu, 'cov_mat':Hinv} diff --git a/gravity_toolkit/time_series/regress.py b/gravity_toolkit/time_series/regress.py new file mode 100755 index 00000000..9152f3f7 --- /dev/null +++ b/gravity_toolkit/time_series/regress.py @@ -0,0 +1,359 @@ +#!/usr/bin/env python +u""" +regress.py +Written by Tyler Sutterley (01/2023) + +Fits a synthetic signal to data over a time period by ordinary or weighted + least-squares + +Fit significance derivations are based on Burnham and Anderson (2002) + Model Selection and Multimodel Inference + +CALLING SEQUENCE: + tsbeta = gravity_toolkit.time_series.regress(t_in, d_in, ORDER=1, + CYCLES=[0.5,1.0], CONF=0.95) + +INPUTS: + t_in: input time array + d_in: input data array + +OUTPUTS: + beta: regressed coefficients array + error: regression fit error for each coefficient for an input deviation + STDEV: standard deviation of output error + CONF: confidence interval of output error + std_err: standard error for each coefficient + R2: coefficient of determination (r**2). + Proportion of variability accounted by the model + R2Adj: adjusted r**2. adjusts the r**2 for the number of terms in the model + MSE: mean square error + WSSE: Weighted sum of squares error + NRMSE: normalized root mean square error + AIC: Akaike information criterion (Second-Order, AICc) + BIC: Bayesian information criterion (Schwarz criterion) + model: modeled timeseries + simple: modeled timeseries without oscillating components + residual: model residual + DOF: degrees of freedom + N: number of terms used in fit + cov_mat: covariance matrix + +OPTIONS: + DATA_ERR: data precision + single value if equal + array if unequal for weighted least squares + WEIGHT: Set if measurement errors for use in weighted least squares + RELATIVE: relative period + ORDER: maximum polynomial order in fit (0=constant, 1=linear, 2=quadratic) + CYCLES: list of cyclical terms (0.5=semi-annual, 1=annual) + STDEV: standard deviation of output error + CONF: confidence interval of output error + AICc: use second order AIC + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python (https://numpy.org) + scipy: Scientific Tools for Python (https://docs.scipy.org/doc/) + +UPDATE HISTORY: + Updated 01/2023: refactored time series analysis functions + Updated 04/2022: updated docstrings to numpy documentation format + Updated 05/2021: define int/float precision to prevent deprecation warning + Updated 07/2020: added function docstrings + Updated 10/2019: changing Y/N flags to True/False + Updated 12/2018: put transpose of design matrix within FIT_TYPE if statement + Updated 08/2018: import packages before function definition + Updated 10/2017: output a seasonal model (will be 0 if no oscillating terms) + Updated 09/2017: using rcond=-1 in numpy least-squares algorithms + Updated 03/2017: added a catch for zero error in weighted least-squares + Updated 08/2015: changed sys.exit to raise ValueError + Updated 09/2014: made AICc option for second order AIC + previously was default with no option for standard AIC + Updated 07/2014: output the covariance matrix Hinv + import scipy.stats and scipy.special + Updated 06/2014: changed message to sys.exit + new output for number of terms + Updated 04/2014: added parameter RELATIVE for the relative time + Updated 02/2014: minor update to if statements. output simple regression + Updated 10/2013: + Added calculation for AICc (corrected for small sample size) + Added output DOF (degrees of freedom, nu) + Updated 09/2013: updated weighted least-squares and added AIC, BIC and + LOGLIK options for parameter evaluation + Fixed case with known standard deviations + Changed Weight flag to Y/N + Minor change: P_cons, P_lin and P_quad changed to P_x0, P_x1 and P_x2 + Updated 07/2013: added output for the modelled time-series + Updated 05/2013: converted to Python + Updated 02/2013: added in case for equal data error + and made associated edits.. added option WEIGHT + Updated 10/2012: added in additional FIT_TYPES that do not have a trend + added option for the Schwarz Criterion for model selection + Updated 08/2012: added option for changing the confidence interval or + adding a standard deviation of the error + changed 'std' to data_err for measurement errors + Updated 06/2012: added options for MSE and NRMSE. adjusted rsq_adj + Updated 06/2012: changed design matrix creation to 'FIT_TYPE' + added r_square to calcuating goodness of fit compared to others + Updated 03/2012: combined polynomial and harmonic regression functions + Updated 01/2012: added std weighting for a error weighted least-squares + Written 10/2011 +""" +import numpy as np +import scipy.stats +import scipy.special + +def regress(t_in, d_in, ORDER=1, CYCLES=[0.5,1.0], DATA_ERR=0, + WEIGHT=False, RELATIVE=Ellipsis, STDEV=0, CONF=0, AICc=True): + """ + Fits a synthetic signal to data over a time period by + ordinary or weighted least-squares + + Parameters + ---------- + t_in: float + input time array + d_in: float + input data array + ORDER: int, default 1 + maximum polynomial order in fit + + * ``0``: constant + * ``1``: linear + * ``2``: quadratic + CYCLES: list, default [0.5, 1.0] + list of cyclical terms + DATA_ERR: float or list + Data precision + + - single value if equal + - array if unequal for weighted least squares + WEIGHT: bool, default False + Use weighted least squares with measurement errors + RELATIVE: float or List, default Ellipsis + Epoch for calculating relative dates + + - float: use exact value as epoch + - list: use mean from indices of available times + - ``Ellipsis``: use mean of all available times + STDEV: float, default 0 + Standard deviation of output error + CONF: float, default 0 + Confidence interval of output error + AICc: bool, default False + Use second order AIC for small sample sizes [Burnham2002]_ + + Returns + ------- + beta: float + regressed coefficients array + error: float + regression fit error for each coefficient for an input deviation + + - ``STDEV``: standard deviation of output error + - ``CONF``: confidence interval of output error + std_err: float + standard error for each coefficient + R2: float + coefficient of determination (r\ :sup:`2`) + R2Adj: float + r\ :sup:`2` adjusted for the number of terms in the model + MSE: float + mean square error + WSSE: float + Weighted sum of squares error + NRMSE: float + normalized root mean square error + AIC: float + Akaike information criterion + BIC: float + Bayesian information criterion (Schwarz criterion) + model: float + modeled timeseries + simple: float + modeled timeseries without oscillating components + residual: float + model residual + DOF: int + degrees of freedom + N: int + number of terms used in fit + cov_mat: float + covariance matrix + + Reference + --------- + .. [Burnham2002] K. P. Burnham and D. R. Anderson, + *Model Selection and Multimodel Inference*, + 2nd Edition, 488 pp., (2002). + `doi: 10.1007/b97636 `_ + """ + + # remove singleton dimensions + t_in = np.squeeze(t_in) + d_in = np.squeeze(d_in) + nmax = len(t_in) + # calculate epoch for calculating relative times + if isinstance(RELATIVE, (list, np.ndarray)): + t_rel = t_in[RELATIVE].mean() + elif isinstance(RELATIVE, (float, int, np.float_, np.int_)): + t_rel = np.copy(RELATIVE) + elif (RELATIVE == Ellipsis): + t_rel = t_in[RELATIVE].mean() + + # create design matrix based on polynomial order and harmonics + DMAT = [] + # add polynomial orders (0=constant, 1=linear, 2=quadratic) + for o in range(ORDER+1): + DMAT.append((t_in-t_rel)**o) + # add cyclical terms (0.5=semi-annual, 1=annual) + for c in CYCLES: + DMAT.append(np.sin(2.0*np.pi*t_in/np.float64(c))) + DMAT.append(np.cos(2.0*np.pi*t_in/np.float64(c))) + # take the transpose of the design matrix + DMAT = np.transpose(DMAT) + + # Calculating Least-Squares Coefficients + if WEIGHT: + # Weighted Least-Squares fitting + if (np.ndim(DATA_ERR) == 0): + raise ValueError('Input DATA_ERR for Weighted Least-Squares') + # check if any error values are 0 (prevent infinite weights) + if np.count_nonzero(DATA_ERR == 0.0): + # change to minimum floating point value + DATA_ERR[DATA_ERR == 0.0] = np.finfo(np.float64).eps + # Weight Precision + wi = np.squeeze(DATA_ERR**(-2)) + # If uncorrelated weights are the diagonal + W = np.diag(wi) + # Least-Squares fitting + # Temporary Matrix: Inv(X'.W.X) + TM1 = np.linalg.inv(np.dot(np.transpose(DMAT),np.dot(W,DMAT))) + # Temporary Matrix: (X'.W.Y) + TM2 = np.dot(np.transpose(DMAT),np.dot(W,d_in)) + # Least Squares Solutions: Inv(X'.W.X).(X'.W.Y) + beta_mat = np.dot(TM1,TM2) + else:# Standard Least-Squares fitting (the [0] denotes coefficients output) + beta_mat = np.linalg.lstsq(DMAT,d_in,rcond=-1)[0] + # Weights are equal + wi = 1.0 + + # number of terms in least-squares solution + n_terms = len(beta_mat) + # modelled time-series + mod = np.dot(DMAT,beta_mat) + # residual + res = d_in[0:nmax] - np.dot(DMAT,beta_mat) + # Fitted Values without (and with) climate oscillations + simple = np.dot(DMAT[:,0:(ORDER+1)],beta_mat[0:(ORDER+1)]) + season = mod - simple + + # nu = Degrees of Freedom + nu = nmax - n_terms + + # calculating R^2 values + # SStotal = sum((Y-mean(Y))**2) + SStotal = np.dot(np.transpose(d_in[0:nmax] - np.mean(d_in[0:nmax])), + (d_in[0:nmax] - np.mean(d_in[0:nmax]))) + # SSerror = sum((Y-X*B)**2) + SSerror = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), + (d_in[0:nmax] - np.dot(DMAT,beta_mat))) + # R**2 term = 1- SSerror/SStotal + rsquare = 1.0 - (SSerror/SStotal) + # Adjusted R**2 term: weighted by degrees of freedom + rsq_adj = 1.0 - (SSerror/SStotal)*np.float64((nmax-1.0)/nu) + # Fit Criterion + # number of parameters including the intercept and the variance + K = np.float64(n_terms + 1) + # Log-Likelihood with weights (if unweighted, weight portions == 0) + # log(L) = -0.5*n*log(sigma^2) - 0.5*n*log(2*pi) - 0.5*n + #log_lik = -0.5*nmax*(np.log(2.0 * np.pi) + 1.0 + np.log(np.sum((res**2)/nmax))) + log_lik = 0.5*(np.sum(np.log(wi)) - nmax*(np.log(2.0 * np.pi) + 1.0 - + np.log(nmax) + np.log(np.sum(wi * (res**2))))) + + # Aikaike's Information Criterion + AIC = -2.0*log_lik + 2.0*K + if AICc: + # Second-Order AIC correcting for small sample sizes (restricted) + # Burnham and Anderson (2002) advocate use of AICc where + # ratio num/K is small + # A small ratio is defined in the definition at approximately < 40 + AIC += (2.0*K*(K+1.0))/(nmax - K - 1.0) + + # Bayesian Information Criterion (Schwarz Criterion) + BIC = -2.0*log_lik + np.log(nmax)*K + + # Error Analysis + if WEIGHT: + # WEIGHTED LEAST-SQUARES CASE (unequal error) + # Covariance Matrix + Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),np.dot(W,DMAT))) + # Normal Equations + NORMEQ = np.dot(Hinv,np.transpose(np.dot(W,DMAT))) + beta_err = np.zeros((n_terms)) + # Propagating RMS errors + for i in range(0,n_terms): + beta_err[i] = np.sqrt(np.sum((NORMEQ[i,:]*DATA_ERR)**2)) + # Weighted sum of squares Error + WSSE = np.dot(np.transpose(wi*(d_in[0:nmax] - np.dot(DMAT,beta_mat))), + wi*(d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) + + return {'beta':beta_mat, 'error':beta_err, 'R2':rsquare, + 'R2Adj':rsq_adj, 'WSSE':WSSE, 'AIC':AIC, 'BIC':BIC, + 'LOGLIK':log_lik, 'model':mod, 'residual':res, 'simple':simple, + 'season':season, 'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} + + elif ((not WEIGHT) and (DATA_ERR != 0)): + # LEAST-SQUARES CASE WITH KNOWN AND EQUAL ERROR + P_err = DATA_ERR*np.ones((nmax)) + Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),DMAT)) + # Normal Equations + NORMEQ = np.dot(Hinv,np.transpose(DMAT)) + beta_err = np.zeros((n_terms)) + for i in range(0,n_terms): + beta_err[i] = np.sqrt(np.sum((NORMEQ[i,:]*P_err)**2)) + # Mean square error + MSE = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), + (d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) + + return {'beta':beta_mat, 'error':beta_err, 'R2':rsquare, + 'R2Adj':rsq_adj, 'MSE':MSE, 'AIC':AIC, 'BIC':BIC, + 'LOGLIK':log_lik, 'model':mod, 'residual':res, 'simple':simple, + 'season':season,'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} + else: + # STANDARD LEAST-SQUARES CASE + # Regression with Errors with Unknown Standard Deviations + # MSE = (1/nu)*sum((Y-X*B)**2) + # Mean square error + MSE = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), + (d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) + # Root mean square error + RMSE = np.sqrt(MSE) + # Normalized root mean square error + NRMSE = RMSE/(np.max(d_in[0:nmax])-np.min(d_in[0:nmax])) + # Covariance Matrix + # Multiplying the design matrix by itself + Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),DMAT)) + # Taking the diagonal components of the cov matrix + hdiag = np.diag(Hinv) + # set either the standard deviation or the confidence interval + if (STDEV != 0): + # Setting the standard deviation of the output error + alpha = 1.0 - scipy.special.erf(STDEV/np.sqrt(2.0)) + elif (CONF != 0): + # Setting the confidence interval of the output error + alpha = 1.0 - CONF + else: + # Default is 95% confidence interval + alpha = 1.0 - (0.95) + # Student T-Distribution with D.O.F. nu + # t.ppf parallels tinv in matlab + tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu) + # beta_err is the error for each coefficient + # beta_err = t(nu,1-alpha/2)*standard error + st_err = np.sqrt(MSE*hdiag) + beta_err = tstar*st_err + + return {'beta':beta_mat, 'error':beta_err, 'std_err':st_err, 'R2':rsquare, + 'R2Adj':rsq_adj, 'MSE':MSE, 'NRMSE':NRMSE, 'AIC':AIC, 'BIC':BIC, + 'LOGLIK':log_lik, 'model':mod, 'residual':res, 'simple':simple, + 'season':season, 'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} diff --git a/gravity_toolkit/time_series/savitzky_golay.py b/gravity_toolkit/time_series/savitzky_golay.py new file mode 100644 index 00000000..71ef83eb --- /dev/null +++ b/gravity_toolkit/time_series/savitzky_golay.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python +u""" +savitzky_golay.py +Written by Tyler Sutterley (01/2023) +Adapted from Numerical Recipes, Third Edition + +Smooth and optionally differentiate data of non-uniform sampling + with a Savitzky-Golay filter + +Can preserves the original shape and features of the signal better + than moving averages techniques + +CALLING SEQUENCE: + y_out = gravity_toolkit.time_series.savitzky_golay(t_in, y_in, + WINDOW=13, ORDER=2) + +INPUTS: + t_in: input time array + y_in: input data array + +OPTIONS: + WINDOW: length of the window (such as 13 for annual). + Must be an odd integer number. + ORDER: order of the polynomial used in the filtering. + Must be less than (window_size - 1) + DERIV: the order of the derivative to compute + default = 0 means only smoothing + RATE: scaling factor for output data and error + DATA_ERR: estimated data error of known and equal value + +OUTPUTS: + data: smoothed time-series (or n-th derivative) + error: estimated error at time points + time: time points for window + +NOTES: + The Savitzky-Golay is a type of low-pass filter, particularly + suited for smoothing noisy data. The main idea behind this + approach is to make for each point a least-square fit with a + polynomial of high order over an odd-sized window centered at + the point. + + A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of + Data by Simplified Least Squares Procedures. Analytical + Chemistry, 1964, 36 (8), pp 1627-1639. + Numerical Recipes 3rd Edition: The Art of Scientific Computing + W.H. Press, S.A. Teukolsky, W. T. Vetterling, B.P. Flannery + Cambridge University Press + +UPDATE HISTORY: + Updated 01/2023: refactored time series analysis functions + Updated 04/2022: updated docstrings to numpy documentation format + Updated 07/2020: added function docstrings + Updated 08/2019: importing factorial from scipy.special.factorial + Updated 11/2018: using future division for python3 compatibility + Updated 08/2015: changed sys.exit to raise ValueError + Written 06/2014 +""" +from __future__ import print_function, division + +import numpy as np +import scipy.special + +def savitzky_golay(t_in, y_in, WINDOW=None, ORDER=2, DERIV=0, + RATE=1, DATA_ERR=0): + """ + Smooth and optionally differentiate data with a Savitzky-Golay + filter [Savitzky1964]_ [Press2007]_ + + Parameters + ---------- + t_in: float + time array + y_in: float + data magnitude array + WINDOW: int or NoneType, default None + Length of the window + + Must be an odd integer + ORDER: int, defualt 2 + Order of the polynomial used in the filtering + + Must be less than (window_size - 1) + DERIV: int, default 0 + Order of the derivative to compute + RATE: float, default 1 + Scaling factor for output data and error + DATA_ERR: float, default 0 + Estimated data error of known and equal value + + Returns + ------- + data: float + Smoothed signal (or n-th derivative) + error: float + Estimated error at time points + time: float + Time points for window + + References + ---------- + .. [Savitzky1964] A. Savitzky, M. J. E. Golay, "Smoothing and + Differentiation of Data by Simplified Least Squares Procedures". + *Analytical Chemistry*, 36(8), 1627--1639, (1964). + .. [Press2007] *Numerical Recipes 3rd Edition: The Art of Scientific + Computing*, W.H. Press, S.A. Teukolsky, W. T. Vetterling, + B.P. Flannery. Cambridge University Press, (2007). + """ + + # verify that WINDOW is positive, odd and greater than ORDER+1 + if WINDOW is None: + WINDOW = ORDER + -1*(ORDER % 2) + 3 + if WINDOW % 2 != 1 or WINDOW < 1: + raise ValueError("WINDOW size must be a positive odd number") + if WINDOW < ORDER + 2: + raise ValueError("WINDOW is too small for the polynomials order") + # remove any singleton dimensions + t_in = np.squeeze(t_in) + y_in = np.squeeze(y_in) + nmax = len(t_in) + + # order range + order_range = np.arange(ORDER+1) + # filter half-window + half_window = (WINDOW - 1) // 2 + # output time-series (removing half-windows on ends) + t_out = t_in[half_window:nmax-half_window] + # output smoothed timeseries (or derivative) + y_out = np.zeros((nmax-2*half_window)) + y_err = np.zeros((nmax-2*half_window)) + for n in range(0, (nmax-(2*half_window))): + yran = y_in[n + np.arange(0, 2*half_window+1)] + # Vandermonde matrix for the time-series + b = np.mat([[(t_in[k]-t_in[n+half_window])**i for i in order_range] + for k in range(n, n+2*half_window+1)]) + # compute the pseudoinverse of the design matrix + m=np.linalg.pinv(b).A[DERIV]*RATE**DERIV*scipy.special.factorial(DERIV) + # pad the signal at the extremes with values taken from the signal + firstvals = yran[0] - np.abs(yran[1:half_window+1][::-1] - yran[0]) + lastvals = yran[-1] + np.abs(yran[-half_window-1:-1][::-1] - yran[-1]) + yn = np.concatenate((firstvals, yran, lastvals)) + # compute the convolution and use middle value + y_out[n] = np.convolve(m[::-1], yn, mode='valid')[half_window] + if (DATA_ERR != 0): + # if data error is known and of equal value + P_err = DATA_ERR*np.ones((4*half_window+1)) + # compute the convolution and use middle value + y_err[n] = np.sqrt(np.convolve(m[::-1]**2, P_err**2, + mode='valid')[half_window]) + + return {'data':y_out, 'error':y_err, 'time':t_out} diff --git a/gravity_toolkit/time_series/smooth.py b/gravity_toolkit/time_series/smooth.py new file mode 100755 index 00000000..5b2d65e5 --- /dev/null +++ b/gravity_toolkit/time_series/smooth.py @@ -0,0 +1,351 @@ +#!/usr/bin/env python +u""" +smooth.py +Written by Tyler Sutterley (01/2023) + +Computes a moving average of a time-series using three possible routines: + 1) centered moving average + 2) 13-month Loess filter (default) + 3) 13-month Loess filter weighted and outputs for all dates + +Note: due to the missing months in the GRACE/GRACE-FO time series, + a standard moving average will have problems if the + missing months are not interpolated. + +CALLING SEQUENCE: + smth = gravity_toolkit.time_series.smooth(t_in, d_in, HFWTH=6) + +INPUTS: + t_in: input time array + d_in: input data array + +OUTPUTS: + time: time after removing start and end half-windows + data: smoothed time-series + season: seasonal component calculated by the Loess filter + annual: annual component calculated by the Loess filter + semiann: semi-annual component calculated by the Loess filter + trend: instantaneous trend calculated by the Loess filter + error: estimated error of the instantaneous trend + noise: noise component after removing the Loess trend and seasonal components + reduce: original time series after removing start and end half-windows + +OPTIONS: + MOVING: calculates centered moving average using mean of window + mean of: (January up to December) and (February up to January) + WEIGHT: smoothing algorithm that backward models dates before + half-width and forward models dates after half-width + 0: use unweighted Loess filter + 1: use linear weights with Loess filter + 2: use gaussian weights with Loess filter + HFWTH: half-width of the moving average (default = 6 for 13-month Loess) + DATA_ERR: input error for known and equal errors (single value) + STDEV: standard deviation of output error + CONF: confidence interval of output error (default is for 95%) + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python (https://numpy.org) + scipy: Scientific Tools for Python (https://docs.scipy.org/doc/) + +UPDATE HISTORY: + Updated 01/2023: refactored time series analysis functions + Updated 04/2022: updated docstrings to numpy documentation format + Updated 05/2021: define int/float precision to prevent deprecation warning + Updated 07/2020: added function docstrings + Updated 10/2019: changing Y/N flags to True/False. output amplitudes + Updated 09/2019: calculate and output annual and semi-annual phase + Updated 08/2018: use implicit import of scipy stats and special + Updated 09/2017: using rcond=-1 in numpy least-squares algorithms + Updated 09/2014: added output for the reduced time-series + Updated 06/2014: added parameter DATA_ERR for known and equal errors + Updated 03/2014: created a new smoothing algorithm + similar to Loess-type least-squares algorithm but has + backward models dates before HFWTH and forward models dates after + if all dates are available will be centrally weighted + need to update to include error and should find a reference + as i came up with this algorithm at 4:30am + Updated 02/2014: minor update to if statements + Updated 09/2013: switched MOVING flag to Y/N + Minor change: P_cons, and P_lin changed to P_x0, and P_x1 + Updated 06/2013: added error for instantaneous trend + Updated 04/2013: converted to python and added more outputs + Updated 02/2013: using a centered moving average + added seasonal option to compute the smooth seasonal variation + calculated by the loess filter program. + added option to compute the noise component after removing the + smoothed trend and the seasonal component + Updated 03/2012: added Loess smoothing following Velicogna (2009) + Written 12/2011 +""" +import numpy as np +import scipy.stats +import scipy.special + +def smooth(t_in, d_in, HFWTH=6, MOVING=False, DATA_ERR=0, WEIGHT=0, + STDEV=0, CONF=0): + """ + Computes the moving average of a time-series + + 1) centered moving average + 2) 13-month Loess filter [Velicogna2009]_ + 3) 13-month Loess filter weighted and outputs for all dates + + Parameters + ---------- + t_in: float + input time array + d_in: float + input data array + HFWTH: int + half-width of the moving average + MOVING: bool, default False + calculates centered moving average using mean of window + WEIGHT: smoothing algorithm that backward models dates before + half-width and forward models dates after half-width + + - ``0``: use unweighted Loess filter + - ``1``: use linear weights with Loess filter + - ``2``: use gaussian weights with Loess filter + DATA_ERR: float or list + input error for known and equal errors + STDEV: float, default 0 + Standard deviation of output error + CONF: float, default 0 + Confidence interval of output error + + Returns + ------- + time: float + time after removing start and end half-windows + data: float + smoothed time-series + season: float + seasonal component calculated by the Loess filter + annual: float + annual component calculated by the Loess filter + semiann: float + semi-annual component calculated by the Loess filter + trend: float + instantaneous trend calculated by the Loess filter + error: float + estimated error of the instantaneous trend + noise: float + noise component after removing the Loess trend and seasonal components + reduce: float + original time series after removing start and end half-windows + + References + ---------- + .. [Velicogna2009] I. Velicogna, "Increasing rates of ice mass loss + from the Greenland and Antarctic ice sheets revealed by GRACE", + *Geophysical Research Letters*, 36(L19503), + `doi: 10.1029/2009GL040222 `_ + """ + + # remove singleton dimensions + t_in = np.squeeze(t_in) + d_in = np.squeeze(d_in) + nmax = len(t_in) + + # Indice with start of seasonal terms: + SEAS = 2 + + # set either the standard deviation or the confidence interval + if (STDEV != 0): + # Setting the standard deviation of the output error + alpha = 1.0 - scipy.special.erf(STDEV/np.sqrt(2.0)) + elif (CONF != 0): + # Setting the confidence interval of the output error + alpha = 1.0 - CONF + else: + # Default is 95% confidence interval + alpha = 1.0 - (0.95) + + # moving average algorithm + if MOVING: + # Centered moving average using the mean of each window + # equal to mean of Jan:Dec and Feb:Jan+1 for HFWTH 6 + # problematic with GRACE due to missing months within time-series + # output time + tout = t_in[HFWTH:nmax-HFWTH] + smth = np.zeros((nmax-2*HFWTH)) + for k in range(0, (nmax-(2*HFWTH))): + # centered moving average sum[2:i-1] + 0.5[1] + 0.5[i] + smth[k] = np.sum(d_in[k+1:k+2*HFWTH]) + 0.5*(d_in[k]+d_in[k+2*HFWTH]) + dsmth = smth/(2*HFWTH) + return {'data':dsmth, 'time':tout} + elif WEIGHT in (1,2): + # weighted moving average calculated from the least-squares of window + # and removing An/SAn signal. models entire range of dates + # for a HFWTH of 6 (remove annual) + # will fit linear model to data for 13 months + # creates a weight array ranging from 1:HFWTH+1:-1 for linear + # or a gaussian function centered on HFWTH + # which favors the regression with the date centered + # smoothed time-series = sum(smth*weights)/sum(weights)the weight array + # output time = input time + tout = np.copy(t_in) + if (WEIGHT == 1): + # linear weights (range from 1:HFWTH+1:-1) + wi = np.concatenate((np.arange(1,HFWTH+2,dtype=np.float64), + np.arange(HFWTH,0,-1,dtype=np.float64)),axis=0) + elif (WEIGHT == 2): + # gaussian weights + # default standard deviation of 2 + stdev = 2.0 + # gaussian function over range 2*HFWTH + # centered on HFWTH + xi=np.arange(0, 2*HFWTH+1) + wi=np.exp(-(xi-HFWTH)**2/(2.0*stdev**2))/(stdev*np.sqrt(2.0*np.pi)) + + dsmth = np.zeros((nmax)) + dseason = np.zeros((nmax)) + dannual = np.zeros((nmax)) + annamp = np.zeros((nmax)) + annphase = np.zeros((nmax)) + dsemian = np.zeros((nmax)) + semiamp = np.zeros((nmax)) + semiphase = np.zeros((nmax)) + weight = np.zeros((nmax)) + for i in range(0, (nmax-(2*HFWTH))): + ran = i + np.arange(0, 2*HFWTH+1) + P_x0 = np.ones((2*HFWTH+1))# Constant Term + P_x1 = t_in[ran]# Linear Term + # Annual term = 2*pi*t*harmonic + P_asin = np.sin(2*np.pi*t_in[ran]) + P_acos = np.cos(2*np.pi*t_in[ran]) + #Semi-Annual = 4*pi*t*harmonic + P_ssin = np.sin(4*np.pi*t_in[ran]) + P_scos = np.cos(4*np.pi*t_in[ran]) + # x0,x1,AS,AC,SS,SC + TMAT = np.array([P_x0, P_x1, P_asin, P_acos, P_ssin, P_scos]) + TMAT = np.transpose(TMAT) + # Least-Squares fitting + # (the [0] denotes coefficients output)standard + beta_mat = np.linalg.lstsq(TMAT,d_in[ran],rcond=-1)[0] + # Calculating the output components + # add weighted smoothed time series + dsmth[ran] += wi*np.dot(TMAT[:,0:SEAS],beta_mat[0:SEAS]) + # seasonal component + dseason[ran] += wi*np.dot(TMAT[:,SEAS:],beta_mat[SEAS:]) + # annual component + AS,AC = beta_mat[SEAS:SEAS+2] + dannual[ran] += wi*np.dot(TMAT[:,SEAS:SEAS+2],[AS,AC]) + annamp[ran] += wi*np.sqrt(AS**2 + AC**2) + annphase[ran] += wi*np.arctan2(AC,AS)*180.0/np.pi + # semi-annual component + SS,SC = beta_mat[SEAS+2:SEAS+4] + dsemian[ran] += wi*np.dot(TMAT[:,SEAS+2:SEAS+4],[SS,SC]) + semiamp[ran] += wi*np.sqrt(SS**2 + SC**2) + semiphase[ran] += wi*np.arctan2(SC,SS)*180.0/np.pi + # add weights + weight[ran] += wi + # divide weighted smoothed time-series by weights + # to get output smoothed time-series + dsmth /= weight + dseason /= weight + dannual /= weight + annamp /= weight + annphase /= weight + dsemian /= weight + semiamp /= weight + semiphase /= weight + # noise = data - smoothed - seasonal + dnoise = d_in - dsmth - dseason + return {'data':dsmth, 'seasonal':dseason, 'annual':dannual, + 'annamp':annamp, 'annphase':annphase, 'semiann':dsemian, + 'semiamp':semiamp, 'semiphase':semiphase, 'noise':dnoise, + 'time':tout, 'weight':weight} + else: + # Moving average calculated from least-squares of window + # and removing An/SAn signal + # output time + tout = t_in[HFWTH:nmax-HFWTH] + dsmth = np.zeros((nmax-2*HFWTH)) + dtrend = np.zeros((nmax-2*HFWTH)) + derror = np.zeros((nmax-2*HFWTH)) + dseason = np.zeros((nmax-2*HFWTH)) + dannual = np.zeros((nmax-2*HFWTH)) + annamp = np.zeros((nmax-2*HFWTH)) + annphase = np.zeros((nmax-2*HFWTH)) + dsemian = np.zeros((nmax-2*HFWTH)) + semiamp = np.zeros((nmax-2*HFWTH)) + semiphase = np.zeros((nmax-2*HFWTH)) + dnoise = np.zeros((nmax-2*HFWTH)) + dreduce = np.zeros((nmax-2*HFWTH)) + for i in range(0, (nmax-(2*HFWTH))): + ran = i + np.arange(0, 2*HFWTH+1) + P_x0 = np.ones((2*HFWTH+1))# Constant Term + P_x1 = t_in[ran]# Linear Term + # Annual term = 2*pi*t*harmonic + P_asin = np.sin(2*np.pi*t_in[ran]) + P_acos = np.cos(2*np.pi*t_in[ran]) + #Semi-Annual = 4*pi*t*harmonic + P_ssin = np.sin(4*np.pi*t_in[ran]) + P_scos = np.cos(4*np.pi*t_in[ran]) + # x0,x1,AS,AC,SS,SC + TMAT = np.array([P_x0, P_x1, P_asin, P_acos, P_ssin, P_scos]) + TMAT = np.transpose(TMAT) + # Least-Squares fitting + # (the [0] denotes coefficients output) + beta_mat = np.linalg.lstsq(TMAT,d_in[ran],rcond=-1)[0] + n_terms = len(beta_mat) + + if (DATA_ERR != 0): + # LEAST-SQUARES CASE WITH KNOWN AND EQUAL ERROR + P_err = DATA_ERR*np.ones((2*HFWTH+1)) + Hinv = np.linalg.inv(np.dot(np.transpose(TMAT),TMAT)) + # Normal Equations + NORMEQ = np.dot(Hinv,np.transpose(TMAT)) + beta_err = np.zeros((n_terms)) + for n in range(0,n_terms): + beta_err[n] = np.sqrt(np.sum((NORMEQ[n,:]*P_err)**2)) + else: + # Error Analysis + # Degrees of Freedom + nu = (2*HFWTH+1) - n_terms + # Mean square error + MSE = np.dot(np.transpose(d_in[ran] - np.dot(TMAT,beta_mat)), + (d_in[ran] - np.dot(TMAT,beta_mat)))/nu + # Covariance Matrix + # Multiplying the design matrix by itself + Hinv = np.linalg.inv(np.dot(np.transpose(TMAT),TMAT)) + # Taking the diagonal components of the cov matrix + hdiag = np.diag(Hinv) + + # STANDARD LEAST-SQUARES CASE + # Regression with Errors with Unknown Standard Deviations + # Student T-Distribution with D.O.F. nu + # t.ppf parallels tinv in matlab + tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu) + # beta_err is the error for each coefficient + # beta_err = t(nu,1-alpha/2)*standard error + st_err = np.sqrt(MSE*hdiag) + beta_err = tstar*st_err + + # Calculating the output components + # smoothed time series + dsmth[i] = np.dot(TMAT[HFWTH,0:SEAS],beta_mat[0:SEAS]) + dtrend[i] = np.copy(beta_mat[1])# Instantaneous data trend + derror[i] = np.copy(beta_err[1])# Error in trend + # seasonal component + dseason[i] = np.dot(TMAT[HFWTH,SEAS:],beta_mat[SEAS:]) + # annual component + AS,AC = beta_mat[SEAS:SEAS+2] + dannual[i] = np.dot(TMAT[HFWTH,SEAS:SEAS+2],[AS,AC]) + annphase[i] = np.arctan2(AC,AS)*180.0/np.pi + annamp[i] = np.sqrt(AS**2 + AC**2) + # semi-annual component + SS,SC = beta_mat[SEAS+2:SEAS+4] + dsemian[i] = np.dot(TMAT[HFWTH,SEAS+2:SEAS+4],[SS,SC]) + semiamp[i] = np.sqrt(SS**2 + SC**2) + semiphase[i] = np.arctan2(SC,SS)*180.0/np.pi + # noise component + dnoise[i] = d_in[i+HFWTH] - dsmth[i] - dseason[i] + # reduced time-series + dreduce[i] = d_in[i+HFWTH] + + return {'data':dsmth, 'trend':dtrend, 'error':derror, + 'seasonal':dseason, 'annual':dannual, 'annphase':annphase, + 'annamp':annamp, 'semiann':dsemian, 'semiamp':semiamp, + 'semiphase':semiphase, 'noise':dnoise, 'time':tout, 'reduce':dreduce} diff --git a/gravity_toolkit/tsamplitude.py b/gravity_toolkit/tsamplitude.py index e393acb2..0e4c5470 100755 --- a/gravity_toolkit/tsamplitude.py +++ b/gravity_toolkit/tsamplitude.py @@ -27,9 +27,10 @@ Updated 05/2013: converted to python Written 07/2012: """ -import numpy as np +import warnings +import gravity_toolkit.time_series -def tsamplitude(bsin, bcos): +def tsamplitude(*args): """ Calculate the amplitude and phase of a harmonic function @@ -47,6 +48,8 @@ def tsamplitude(bsin, bcos): ph: float phase from the harmonic functions in degrees """ - ampl = np.sqrt(bsin**2.0 + bcos**2.0) - ph = 180.0*np.arctan2(bcos, bsin)/np.pi - return (ampl,ph) + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.time_series instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.time_series.amplitude(*args) diff --git a/gravity_toolkit/tsregress.py b/gravity_toolkit/tsregress.py index 91c43b84..01f203b4 100755 --- a/gravity_toolkit/tsregress.py +++ b/gravity_toolkit/tsregress.py @@ -96,12 +96,10 @@ Updated 01/2012: added std weighting for a error weighted least-squares Written 10/2011 """ -import numpy as np -import scipy.stats -import scipy.special +import warnings +import gravity_toolkit.time_series -def tsregress(t_in, d_in, ORDER=1, CYCLES=[0.5,1.0], DATA_ERR=0, - WEIGHT=False, RELATIVE=Ellipsis, STDEV=0, CONF=0, AICc=True): +def tsregress(*args, **kwargs): """ Fits a synthetic signal to data over a time period by ordinary or weighted least-squares @@ -185,173 +183,8 @@ def tsregress(t_in, d_in, ORDER=1, CYCLES=[0.5,1.0], DATA_ERR=0, 2nd Edition, 488 pp., (2002). `doi: 10.1007/b97636 `_ """ - - # remove singleton dimensions - t_in = np.squeeze(t_in) - d_in = np.squeeze(d_in) - nmax = len(t_in) - # calculate epoch for calculating relative times - if isinstance(RELATIVE, (list, np.ndarray)): - t_rel = t_in[RELATIVE].mean() - elif isinstance(RELATIVE, (float, int, np.float_, np.int_)): - t_rel = np.copy(RELATIVE) - elif (RELATIVE == Ellipsis): - t_rel = t_in[RELATIVE].mean() - - # create design matrix based on polynomial order and harmonics - DMAT = [] - # add polynomial orders (0=constant, 1=linear, 2=quadratic) - for o in range(ORDER+1): - DMAT.append((t_in-t_rel)**o) - # add cyclical terms (0.5=semi-annual, 1=annual) - for c in CYCLES: - DMAT.append(np.sin(2.0*np.pi*t_in/np.float64(c))) - DMAT.append(np.cos(2.0*np.pi*t_in/np.float64(c))) - # take the transpose of the design matrix - DMAT = np.transpose(DMAT) - - # Calculating Least-Squares Coefficients - if WEIGHT: - # Weighted Least-Squares fitting - if (np.ndim(DATA_ERR) == 0): - raise ValueError('Input DATA_ERR for Weighted Least-Squares') - # check if any error values are 0 (prevent infinite weights) - if np.count_nonzero(DATA_ERR == 0.0): - # change to minimum floating point value - DATA_ERR[DATA_ERR == 0.0] = np.finfo(np.float64).eps - # Weight Precision - wi = np.squeeze(DATA_ERR**(-2)) - # If uncorrelated weights are the diagonal - W = np.diag(wi) - # Least-Squares fitting - # Temporary Matrix: Inv(X'.W.X) - TM1 = np.linalg.inv(np.dot(np.transpose(DMAT),np.dot(W,DMAT))) - # Temporary Matrix: (X'.W.Y) - TM2 = np.dot(np.transpose(DMAT),np.dot(W,d_in)) - # Least Squares Solutions: Inv(X'.W.X).(X'.W.Y) - beta_mat = np.dot(TM1,TM2) - else:# Standard Least-Squares fitting (the [0] denotes coefficients output) - beta_mat = np.linalg.lstsq(DMAT,d_in,rcond=-1)[0] - # Weights are equal - wi = 1.0 - - # number of terms in least-squares solution - n_terms = len(beta_mat) - # modelled time-series - mod = np.dot(DMAT,beta_mat) - # residual - res = d_in[0:nmax] - np.dot(DMAT,beta_mat) - # Fitted Values without (and with) climate oscillations - simple = np.dot(DMAT[:,0:(ORDER+1)],beta_mat[0:(ORDER+1)]) - season = mod - simple - - # nu = Degrees of Freedom - nu = nmax - n_terms - - # calculating R^2 values - # SStotal = sum((Y-mean(Y))**2) - SStotal = np.dot(np.transpose(d_in[0:nmax] - np.mean(d_in[0:nmax])), - (d_in[0:nmax] - np.mean(d_in[0:nmax]))) - # SSerror = sum((Y-X*B)**2) - SSerror = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), - (d_in[0:nmax] - np.dot(DMAT,beta_mat))) - # R**2 term = 1- SSerror/SStotal - rsquare = 1.0 - (SSerror/SStotal) - # Adjusted R**2 term: weighted by degrees of freedom - rsq_adj = 1.0 - (SSerror/SStotal)*np.float64((nmax-1.0)/nu) - # Fit Criterion - # number of parameters including the intercept and the variance - K = np.float64(n_terms + 1) - # Log-Likelihood with weights (if unweighted, weight portions == 0) - # log(L) = -0.5*n*log(sigma^2) - 0.5*n*log(2*pi) - 0.5*n - #log_lik = -0.5*nmax*(np.log(2.0 * np.pi) + 1.0 + np.log(np.sum((res**2)/nmax))) - log_lik = 0.5*(np.sum(np.log(wi)) - nmax*(np.log(2.0 * np.pi) + 1.0 - - np.log(nmax) + np.log(np.sum(wi * (res**2))))) - - # Aikaike's Information Criterion - AIC = -2.0*log_lik + 2.0*K - if AICc: - # Second-Order AIC correcting for small sample sizes (restricted) - # Burnham and Anderson (2002) advocate use of AICc where - # ratio num/K is small - # A small ratio is defined in the definition at approximately < 40 - AIC += (2.0*K*(K+1.0))/(nmax - K - 1.0) - - # Bayesian Information Criterion (Schwarz Criterion) - BIC = -2.0*log_lik + np.log(nmax)*K - - # Error Analysis - if WEIGHT: - # WEIGHTED LEAST-SQUARES CASE (unequal error) - # Covariance Matrix - Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),np.dot(W,DMAT))) - # Normal Equations - NORMEQ = np.dot(Hinv,np.transpose(np.dot(W,DMAT))) - beta_err = np.zeros((n_terms)) - # Propagating RMS errors - for i in range(0,n_terms): - beta_err[i] = np.sqrt(np.sum((NORMEQ[i,:]*DATA_ERR)**2)) - # Weighted sum of squares Error - WSSE = np.dot(np.transpose(wi*(d_in[0:nmax] - np.dot(DMAT,beta_mat))), - wi*(d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) - - return {'beta':beta_mat, 'error':beta_err, 'R2':rsquare, - 'R2Adj':rsq_adj, 'WSSE':WSSE, 'AIC':AIC, 'BIC':BIC, - 'LOGLIK':log_lik, 'model':mod, 'residual':res, 'simple':simple, - 'season':season, 'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} - - elif ((not WEIGHT) and (DATA_ERR != 0)): - # LEAST-SQUARES CASE WITH KNOWN AND EQUAL ERROR - P_err = DATA_ERR*np.ones((nmax)) - Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),DMAT)) - # Normal Equations - NORMEQ = np.dot(Hinv,np.transpose(DMAT)) - beta_err = np.zeros((n_terms)) - for i in range(0,n_terms): - beta_err[i] = np.sqrt(np.sum((NORMEQ[i,:]*P_err)**2)) - # Mean square error - MSE = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), - (d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) - - return {'beta':beta_mat, 'error':beta_err, 'R2':rsquare, - 'R2Adj':rsq_adj, 'MSE':MSE, 'AIC':AIC, 'BIC':BIC, - 'LOGLIK':log_lik, 'model':mod, 'residual':res, 'simple':simple, - 'season':season,'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} - else: - # STANDARD LEAST-SQUARES CASE - # Regression with Errors with Unknown Standard Deviations - # MSE = (1/nu)*sum((Y-X*B)**2) - # Mean square error - MSE = np.dot(np.transpose(d_in[0:nmax] - np.dot(DMAT,beta_mat)), - (d_in[0:nmax] - np.dot(DMAT,beta_mat)))/np.float64(nu) - # Root mean square error - RMSE = np.sqrt(MSE) - # Normalized root mean square error - NRMSE = RMSE/(np.max(d_in[0:nmax])-np.min(d_in[0:nmax])) - # Covariance Matrix - # Multiplying the design matrix by itself - Hinv = np.linalg.inv(np.dot(np.transpose(DMAT),DMAT)) - # Taking the diagonal components of the cov matrix - hdiag = np.diag(Hinv) - # set either the standard deviation or the confidence interval - if (STDEV != 0): - # Setting the standard deviation of the output error - alpha = 1.0 - scipy.special.erf(STDEV/np.sqrt(2.0)) - elif (CONF != 0): - # Setting the confidence interval of the output error - alpha = 1.0 - CONF - else: - # Default is 95% confidence interval - alpha = 1.0 - (0.95) - # Student T-Distribution with D.O.F. nu - # t.ppf parallels tinv in matlab - tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu) - # beta_err is the error for each coefficient - # beta_err = t(nu,1-alpha/2)*standard error - st_err = np.sqrt(MSE*hdiag) - beta_err = tstar*st_err - - return {'beta':beta_mat, 'error':beta_err, 'std_err':st_err, 'R2':rsquare, - 'R2Adj':rsq_adj, 'MSE':MSE, 'NRMSE':NRMSE, 'AIC':AIC, 'BIC':BIC, - 'LOGLIK':log_lik, 'model':mod, 'residual':res, 'simple':simple, - 'season':season, 'N':n_terms, 'DOF':nu, 'cov_mat':Hinv} + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.time_series instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.time_series.regress(*args, **kwargs) diff --git a/gravity_toolkit/tssmooth.py b/gravity_toolkit/tssmooth.py index 4d30a84e..d956bd6c 100755 --- a/gravity_toolkit/tssmooth.py +++ b/gravity_toolkit/tssmooth.py @@ -76,12 +76,10 @@ Updated 03/2012: added Loess smoothing following Velicogna (2009) Written 12/2011 """ -import numpy as np -import scipy.stats -import scipy.special +import warnings +import gravity_toolkit.time_series -def tssmooth(t_in, d_in, HFWTH=6, MOVING=False, DATA_ERR=0, WEIGHT=0, - STDEV=0, CONF=0): +def tssmooth(*args, **kwargs): """ Computes the moving average of a time-series @@ -140,211 +138,8 @@ def tssmooth(t_in, d_in, HFWTH=6, MOVING=False, DATA_ERR=0, WEIGHT=0, *Geophysical Research Letters*, 36(L19503), `doi: 10.1029/2009GL040222 `_ """ - - # remove singleton dimensions - t_in = np.squeeze(t_in) - d_in = np.squeeze(d_in) - nmax = len(t_in) - - # Indice with start of seasonal terms: - SEAS = 2 - - # set either the standard deviation or the confidence interval - if (STDEV != 0): - # Setting the standard deviation of the output error - alpha = 1.0 - scipy.special.erf(STDEV/np.sqrt(2.0)) - elif (CONF != 0): - # Setting the confidence interval of the output error - alpha = 1.0 - CONF - else: - # Default is 95% confidence interval - alpha = 1.0 - (0.95) - - # moving average algorithm - if MOVING: - # Centered moving average using the mean of each window - # equal to mean of Jan:Dec and Feb:Jan+1 for HFWTH 6 - # problematic with GRACE due to missing months within time-series - # output time - tout = t_in[HFWTH:nmax-HFWTH] - smth = np.zeros((nmax-2*HFWTH)) - for k in range(0, (nmax-(2*HFWTH))): - # centered moving average sum[2:i-1] + 0.5[1] + 0.5[i] - smth[k] = np.sum(d_in[k+1:k+2*HFWTH]) + 0.5*(d_in[k]+d_in[k+2*HFWTH]) - dsmth = smth/(2*HFWTH) - return {'data':dsmth, 'time':tout} - elif WEIGHT in (1,2): - # weighted moving average calculated from the least-squares of window - # and removing An/SAn signal. models entire range of dates - # for a HFWTH of 6 (remove annual) - # will fit linear model to data for 13 months - # creates a weight array ranging from 1:HFWTH+1:-1 for linear - # or a gaussian function centered on HFWTH - # which favors the regression with the date centered - # smoothed time-series = sum(smth*weights)/sum(weights)the weight array - # output time = input time - tout = np.copy(t_in) - if (WEIGHT == 1): - # linear weights (range from 1:HFWTH+1:-1) - wi = np.concatenate((np.arange(1,HFWTH+2,dtype=np.float64), - np.arange(HFWTH,0,-1,dtype=np.float64)),axis=0) - elif (WEIGHT == 2): - # gaussian weights - # default standard deviation of 2 - stdev = 2.0 - # gaussian function over range 2*HFWTH - # centered on HFWTH - xi=np.arange(0, 2*HFWTH+1) - wi=np.exp(-(xi-HFWTH)**2/(2.0*stdev**2))/(stdev*np.sqrt(2.0*np.pi)) - - dsmth = np.zeros((nmax)) - dseason = np.zeros((nmax)) - dannual = np.zeros((nmax)) - annamp = np.zeros((nmax)) - annphase = np.zeros((nmax)) - dsemian = np.zeros((nmax)) - semiamp = np.zeros((nmax)) - semiphase = np.zeros((nmax)) - weight = np.zeros((nmax)) - for i in range(0, (nmax-(2*HFWTH))): - ran = i + np.arange(0, 2*HFWTH+1) - P_x0 = np.ones((2*HFWTH+1))# Constant Term - P_x1 = t_in[ran]# Linear Term - # Annual term = 2*pi*t*harmonic - P_asin = np.sin(2*np.pi*t_in[ran]) - P_acos = np.cos(2*np.pi*t_in[ran]) - #Semi-Annual = 4*pi*t*harmonic - P_ssin = np.sin(4*np.pi*t_in[ran]) - P_scos = np.cos(4*np.pi*t_in[ran]) - # x0,x1,AS,AC,SS,SC - TMAT = np.array([P_x0, P_x1, P_asin, P_acos, P_ssin, P_scos]) - TMAT = np.transpose(TMAT) - # Least-Squares fitting - # (the [0] denotes coefficients output)standard - beta_mat = np.linalg.lstsq(TMAT,d_in[ran],rcond=-1)[0] - # Calculating the output components - # add weighted smoothed time series - dsmth[ran] += wi*np.dot(TMAT[:,0:SEAS],beta_mat[0:SEAS]) - # seasonal component - dseason[ran] += wi*np.dot(TMAT[:,SEAS:],beta_mat[SEAS:]) - # annual component - AS,AC = beta_mat[SEAS:SEAS+2] - dannual[ran] += wi*np.dot(TMAT[:,SEAS:SEAS+2],[AS,AC]) - annamp[ran] += wi*np.sqrt(AS**2 + AC**2) - annphase[ran] += wi*np.arctan2(AC,AS)*180.0/np.pi - # semi-annual component - SS,SC = beta_mat[SEAS+2:SEAS+4] - dsemian[ran] += wi*np.dot(TMAT[:,SEAS+2:SEAS+4],[SS,SC]) - semiamp[ran] += wi*np.sqrt(SS**2 + SC**2) - semiphase[ran] += wi*np.arctan2(SC,SS)*180.0/np.pi - # add weights - weight[ran] += wi - # divide weighted smoothed time-series by weights - # to get output smoothed time-series - dsmth /= weight - dseason /= weight - dannual /= weight - annamp /= weight - annphase /= weight - dsemian /= weight - semiamp /= weight - semiphase /= weight - # noise = data - smoothed - seasonal - dnoise = d_in - dsmth - dseason - return {'data':dsmth, 'seasonal':dseason, 'annual':dannual, - 'annamp':annamp, 'annphase':annphase, 'semiann':dsemian, - 'semiamp':semiamp, 'semiphase':semiphase, 'noise':dnoise, - 'time':tout, 'weight':weight} - else: - # Moving average calculated from least-squares of window - # and removing An/SAn signal - # output time - tout = t_in[HFWTH:nmax-HFWTH] - dsmth = np.zeros((nmax-2*HFWTH)) - dtrend = np.zeros((nmax-2*HFWTH)) - derror = np.zeros((nmax-2*HFWTH)) - dseason = np.zeros((nmax-2*HFWTH)) - dannual = np.zeros((nmax-2*HFWTH)) - annamp = np.zeros((nmax-2*HFWTH)) - annphase = np.zeros((nmax-2*HFWTH)) - dsemian = np.zeros((nmax-2*HFWTH)) - semiamp = np.zeros((nmax-2*HFWTH)) - semiphase = np.zeros((nmax-2*HFWTH)) - dnoise = np.zeros((nmax-2*HFWTH)) - dreduce = np.zeros((nmax-2*HFWTH)) - for i in range(0, (nmax-(2*HFWTH))): - ran = i + np.arange(0, 2*HFWTH+1) - P_x0 = np.ones((2*HFWTH+1))# Constant Term - P_x1 = t_in[ran]# Linear Term - # Annual term = 2*pi*t*harmonic - P_asin = np.sin(2*np.pi*t_in[ran]) - P_acos = np.cos(2*np.pi*t_in[ran]) - #Semi-Annual = 4*pi*t*harmonic - P_ssin = np.sin(4*np.pi*t_in[ran]) - P_scos = np.cos(4*np.pi*t_in[ran]) - # x0,x1,AS,AC,SS,SC - TMAT = np.array([P_x0, P_x1, P_asin, P_acos, P_ssin, P_scos]) - TMAT = np.transpose(TMAT) - # Least-Squares fitting - # (the [0] denotes coefficients output) - beta_mat = np.linalg.lstsq(TMAT,d_in[ran],rcond=-1)[0] - n_terms = len(beta_mat) - - if (DATA_ERR != 0): - # LEAST-SQUARES CASE WITH KNOWN AND EQUAL ERROR - P_err = DATA_ERR*np.ones((2*HFWTH+1)) - Hinv = np.linalg.inv(np.dot(np.transpose(TMAT),TMAT)) - # Normal Equations - NORMEQ = np.dot(Hinv,np.transpose(TMAT)) - beta_err = np.zeros((n_terms)) - for n in range(0,n_terms): - beta_err[n] = np.sqrt(np.sum((NORMEQ[n,:]*P_err)**2)) - else: - # Error Analysis - # Degrees of Freedom - nu = (2*HFWTH+1) - n_terms - # Mean square error - MSE = np.dot(np.transpose(d_in[ran] - np.dot(TMAT,beta_mat)), - (d_in[ran] - np.dot(TMAT,beta_mat)))/nu - # Covariance Matrix - # Multiplying the design matrix by itself - Hinv = np.linalg.inv(np.dot(np.transpose(TMAT),TMAT)) - # Taking the diagonal components of the cov matrix - hdiag = np.diag(Hinv) - - # STANDARD LEAST-SQUARES CASE - # Regression with Errors with Unknown Standard Deviations - # Student T-Distribution with D.O.F. nu - # t.ppf parallels tinv in matlab - tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu) - # beta_err is the error for each coefficient - # beta_err = t(nu,1-alpha/2)*standard error - st_err = np.sqrt(MSE*hdiag) - beta_err = tstar*st_err - - # Calculating the output components - # smoothed time series - dsmth[i] = np.dot(TMAT[HFWTH,0:SEAS],beta_mat[0:SEAS]) - dtrend[i] = np.copy(beta_mat[1])# Instantaneous data trend - derror[i] = np.copy(beta_err[1])# Error in trend - # seasonal component - dseason[i] = np.dot(TMAT[HFWTH,SEAS:],beta_mat[SEAS:]) - # annual component - AS,AC = beta_mat[SEAS:SEAS+2] - dannual[i] = np.dot(TMAT[HFWTH,SEAS:SEAS+2],[AS,AC]) - annphase[i] = np.arctan2(AC,AS)*180.0/np.pi - annamp[i] = np.sqrt(AS**2 + AC**2) - # semi-annual component - SS,SC = beta_mat[SEAS+2:SEAS+4] - dsemian[i] = np.dot(TMAT[HFWTH,SEAS+2:SEAS+4],[SS,SC]) - semiamp[i] = np.sqrt(SS**2 + SC**2) - semiphase[i] = np.arctan2(SC,SS)*180.0/np.pi - # noise component - dnoise[i] = d_in[i+HFWTH] - dsmth[i] - dseason[i] - # reduced time-series - dreduce[i] = d_in[i+HFWTH] - - return {'data':dsmth, 'trend':dtrend, 'error':derror, - 'seasonal':dseason, 'annual':dannual, 'annphase':annphase, - 'annamp':annamp, 'semiann':dsemian, 'semiamp':semiamp, - 'semiphase':semiphase, 'noise':dnoise, 'time':tout, 'reduce':dreduce} + warnings.filterwarnings("always") + warnings.warn("Deprecated. Please use gravity_toolkit.time_series instead", + DeprecationWarning) + # call renamed version to not break workflows + return gravity_toolkit.time_series.smooth(*args, **kwargs) diff --git a/scripts/calc_degree_one.py b/scripts/calc_degree_one.py index 5bae4602..ae1dc5be 100755 --- a/scripts/calc_degree_one.py +++ b/scripts/calc_degree_one.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" calc_degree_one.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Calculates degree 1 variations using GRACE coefficients of degree 2 and greater, and ocean bottom pressure variations from ECCO and OMCT/MPIOM @@ -131,7 +131,8 @@ time.py: utilities for calculating time operations read_GIA_model.py: reads harmonics for a glacial isostatic adjustment model read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995) - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials gauss_weights.py: Computes the Gaussian weights as a function of degree ocean_stokes.py: converts a land-sea mask to a series of spherical harmonics gen_stokes.py: converts a spatial field into a series of spherical harmonics @@ -156,6 +157,7 @@ https://doi.org/10.1029/2007JB005338 UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 09/2022: add option to replace degree 4 zonal harmonics with SLR diff --git a/scripts/calc_mascon.py b/scripts/calc_mascon.py index df058551..6d22c46f 100644 --- a/scripts/calc_mascon.py +++ b/scripts/calc_mascon.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" calc_mascon.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Calculates a time-series of regional mass anomalies through a least-squares mascon procedure from GRACE/GRACE-FO time-variable gravity data @@ -128,7 +128,7 @@ gauss_weights.py: Computes the Gaussian weights as a function of degree ocean_stokes.py: reads a land-sea mask and converts to spherical harmonics gen_stokes.py: converts a spatial field into spherical harmonic coefficients - tssmooth.py: smoothes a time-series using a 13-month Loess-type algorithm + time_series.smooth.py: smoothes a time-series using a Loess-type algorithm harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO destripe_harmonics.py: calculates the decorrelation (destriping) filter and filters the GRACE/GRACE-FO coefficients for striping errors @@ -156,6 +156,7 @@ https://doi.org/10.1029/2005GL025305 UPDATE HISTORY: + Updated 01/2023: refactored time series analysis functions Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 09/2022: add option to replace degree 4 zonal harmonics with SLR @@ -533,8 +534,8 @@ def calc_mascon(base_dir, PROC, DREL, DSET, LMAX, RAD, # calculate GRACE Error (Noise of smoothed time-series) # With Annual and Semi-Annual Terms val1 = getattr(GRACE_Ylms, csharm) - smth = gravtk.tssmooth(GRACE_Ylms.time, val1[l,m,:], - HFWTH=HFWTH) + smth = gravtk.time_series.smooth(GRACE_Ylms.time, + val1[l,m,:], HFWTH=HFWTH) # number of smoothed points nsmth = len(smth['data']) tsmth = np.mean(smth['time']) diff --git a/scripts/calc_sensitivity_kernel.py b/scripts/calc_sensitivity_kernel.py index d4e6c5ae..9a93a0a1 100644 --- a/scripts/calc_sensitivity_kernel.py +++ b/scripts/calc_sensitivity_kernel.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" calc_sensitivity_kernel.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Calculates spatial sensitivity kernels through a least-squares mascon procedure @@ -54,7 +54,8 @@ PROGRAM DEPENDENCIES: read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995) - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials gauss_weights.py: Computes the Gaussian weights as a function of degree ocean_stokes.py: reads a land-sea mask and converts to spherical harmonics gen_stokes.py: converts a spatial field into spherical harmonic coefficients @@ -82,6 +83,7 @@ https://doi.org/10.1029/2009GL039401 UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 07/2022: create mask for output gridded variables diff --git a/scripts/combine_harmonics.py b/scripts/combine_harmonics.py index 29a95843..df2277f9 100644 --- a/scripts/combine_harmonics.py +++ b/scripts/combine_harmonics.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" combine_harmonics.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Converts a file from the spherical harmonic domain into the spatial domain CALLING SEQUENCE: @@ -56,7 +56,8 @@ PROGRAM DEPENDENCIES: read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995) - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials gauss_weights.py: Computes the Gaussian weights as a function of degree ocean_stokes.py: reads a land-sea mask and converts to spherical harmonics harmonic_summation.py: calculates a spatial field from spherical harmonics @@ -68,6 +69,7 @@ utilities.py: download and management utilities for files UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 12/2022: single implicit import of gravity toolkit iterate over harmonics objects versus indexing Updated 11/2022: use f-strings for formatting verbose or ascii output diff --git a/scripts/convert_harmonics.py b/scripts/convert_harmonics.py index e3400d14..cd294a20 100644 --- a/scripts/convert_harmonics.py +++ b/scripts/convert_harmonics.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" convert_harmonics.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Converts a file from the spatial domain into the spherical harmonic domain CALLING SEQUENCE: @@ -50,7 +50,8 @@ PROGRAM DEPENDENCIES: read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995) gen_stokes.py: converts a spatial field into a series of spherical harmonics - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO destripe_harmonics.py: calculates the decorrelation (destriping) filter and filters the GRACE/GRACE-FO coefficients for striping errors @@ -59,6 +60,7 @@ utilities.py: download and management utilities for files UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 12/2022: single implicit import of gravity toolkit iterate over spatial objects versus indexing Updated 11/2022: use f-strings for formatting verbose or ascii output diff --git a/scripts/grace_spatial_error.py b/scripts/grace_spatial_error.py index 82dc7f8f..cf7a3b04 100755 --- a/scripts/grace_spatial_error.py +++ b/scripts/grace_spatial_error.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" grace_spatial_error.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Calculates the GRACE/GRACE-FO errors following Wahr et al. (2006) @@ -101,9 +101,10 @@ Replaces low-degree harmonics with SLR values (if specified) read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995) gauss_weights.py: Computes the Gaussian weights as a function of degree - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials units.py: class for converting spherical harmonic data to specific units - tssmooth.py: smoothes a time-series for seasonal effects + time_series.smooth.py: smoothes a time-series using a Loess-type algorithm harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO destripe_harmonics.py: calculates the decorrelation (destriping) filter and filters the GRACE/GRACE-FO coefficients for striping errors @@ -116,6 +117,8 @@ http://dx.doi.org/10.1029/2005GL025305 UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials + refactored time series analysis functions Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 09/2022: add option to replace degree 4 zonal harmonics with SLR @@ -308,8 +311,8 @@ def grace_spatial_error(base_dir, PROC, DREL, DSET, LMAX, RAD, # Constrained GRACE Error (Noise of smoothed time-series) # With Annual and Semi-Annual Terms val1 = getattr(GRACE_Ylms, csharm) - smth = gravtk.tssmooth(GRACE_Ylms.time, val1[l,m,:], - HFWTH=HFWTH) + smth = gravtk.time_series.smooth(GRACE_Ylms.time, + val1[l,m,:], HFWTH=HFWTH) # number of smoothed points nsmth = len(smth['data']) tsmth = np.mean(smth['time']) diff --git a/scripts/grace_spatial_maps.py b/scripts/grace_spatial_maps.py index 0e2ee200..5670be2e 100755 --- a/scripts/grace_spatial_maps.py +++ b/scripts/grace_spatial_maps.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" grace_spatial_maps.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Reads in GRACE/GRACE-FO spherical harmonic coefficients and exports monthly spatial fields @@ -133,7 +133,8 @@ Replaces low-degree harmonics with SLR values (if specified) read_GIA_model.py: reads harmonics for a glacial isostatic adjustment model read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995) - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials gauss_weights.py: Computes the Gaussian weights as a function of degree ocean_stokes.py: converts a land-sea mask to a series of spherical harmonics gen_stokes.py: converts a spatial field into a series of spherical harmonics @@ -147,6 +148,7 @@ utilities.py: download and management utilities for files UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 09/2022: add option to replace degree 4 zonal harmonics with SLR diff --git a/scripts/monte_carlo_degree_one.py b/scripts/monte_carlo_degree_one.py index 3ecc8f94..d727a1b9 100644 --- a/scripts/monte_carlo_degree_one.py +++ b/scripts/monte_carlo_degree_one.py @@ -118,11 +118,12 @@ time.py: utilities for calculating time operations read_GIA_model.py: reads harmonics for a glacial isostatic adjustment model read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995) - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials gauss_weights.py: Computes the Gaussian weights as a function of degree gen_stokes.py: converts a spatial field into a series of spherical harmonics sea_level_equation.py: pseudo-spectral sea level equation solver - tssmooth.py: smoothes a time-series using a 13-month Loess-type algorithm + time_series.smooth.py: smoothes a time-series using a Loess-type algorithm units.py: class for converting GRACE/GRACE-FO Level-2 data to specific units harmonics.py: class for processing GRACE/GRACE-FO spherical harmonic data destripe_harmonics.py: calculates the decorrelation (destriping) filter @@ -146,6 +147,8 @@ https://doi.org/10.1029/2005GL025305 UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials + refactored time series analysis functions Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 09/2022: add option to replace degree 4 zonal harmonics with SLR @@ -494,7 +497,8 @@ def monte_carlo_degree_one(base_dir, PROC, DREL, LMAX, RAD, # calculate GRACE Error (Noise of smoothed time-series) # With Annual and Semi-Annual Terms val1 = getattr(GSM_Ylms, csharm) - smth = gravtk.tssmooth(tdec, val1[l,m,:], HFWTH=HFWTH) + smth = gravtk.time_series.smooth(tdec, val1[l,m,:], + HFWTH=HFWTH) # number of smoothed points nsmth = len(smth['data']) tsmth = np.mean(smth['time']) diff --git a/scripts/regress_grace_maps.py b/scripts/regress_grace_maps.py index 8bd6090b..8cb5f7ab 100755 --- a/scripts/regress_grace_maps.py +++ b/scripts/regress_grace_maps.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" regress_grace_maps.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Reads in GRACE/GRACE-FO spatial files from grace_spatial_maps.py and fits a regression model at each grid point @@ -54,12 +54,13 @@ https://www.h5py.org/ PROGRAM DEPENDENCIES: - tsregress.py: calculates trend coefficients using least-squares - tsamplitude.py: calculates the amplitude and phase of a harmonic function + time_series.regress.py: calculates trend coefficients using least-squares + time_series.amplitude.py: calculates the amplitude and phase of a harmonic spatial.py: spatial data class for reading, writing and processing data utilities.py: download and management utilities for files UPDATE HISTORY: + Updated 01/2023: refactored time series analysis functions Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 04/2022: use argparse descriptions within documentation @@ -250,7 +251,7 @@ def regress_grace_maps(LMAX, RAD, for i in range(nlat): for j in range(nlon): # Calculating the regression coefficients - tsbeta = gravtk.tsregress(grid.time, grid.data[i,j,:], + tsbeta = gravtk.time_series.regress(grid.time, grid.data[i,j,:], ORDER=ORDER, CYCLES=CYCLES, CONF=0.95) # save regression components for k in range(0, ncomp): @@ -309,7 +310,7 @@ def regress_grace_maps(LMAX, RAD, amp = dinput.zeros_like() ph = dinput.zeros_like() # calculating amplitude and phase of spatial field - amp.data,ph.data = gravtk.tsamplitude( + amp.data,ph.data = gravtk.time_series.amplitude( out.data[:,:,j], out.data[:,:,j+1] ) # convert phase from -180:180 to 0:360 diff --git a/scripts/run_sea_level_equation.py b/scripts/run_sea_level_equation.py index cd2887ee..1d9a7d19 100644 --- a/scripts/run_sea_level_equation.py +++ b/scripts/run_sea_level_equation.py @@ -1,6 +1,6 @@ #!/usr/bin/env python u""" -run_sea_level_equation.py (12/2022) +run_sea_level_equation.py (01/2023) Solves the sea level equation with the option of including polar motion feedback Uses a Clenshaw summation to calculate the spherical harmonic summation @@ -49,7 +49,8 @@ PROGRAM DEPENDENCIES: read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995) - plm_holmes.py: Computes fully normalized associated Legendre polynomials + associated_legendre.py: Computes fully normalized associated + Legendre polynomials sea_level_equation.py: pseudo-spectral sea level equation solver units.py: class for converting spherical harmonic data to specific units harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO @@ -67,6 +68,7 @@ Bollettino di Geodesia e Scienze (1982) UPDATE HISTORY: + Updated 01/2023: refactored associated legendre polynomials Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 04/2022: use wrapper function for reading load Love numbers diff --git a/scripts/scale_grace_maps.py b/scripts/scale_grace_maps.py index b0cd3f4c..45a9781e 100644 --- a/scripts/scale_grace_maps.py +++ b/scripts/scale_grace_maps.py @@ -1,7 +1,7 @@ #!/usr/bin/env python u""" scale_grace_maps.py -Written by Tyler Sutterley (12/2022) +Written by Tyler Sutterley (01/2023) Reads in GRACE/GRACE-FO spherical harmonic coefficients and exports monthly scaled spatial fields, estimated scaling errors, @@ -132,7 +132,7 @@ gen_stokes.py: converts a spatial field into spherical harmonic coefficients geocenter.py: converts between spherical harmonics and geocenter variations harmonic_summation.py: calculates a spatial field from spherical harmonics - tssmooth.py: smoothes a time-series using a 13-month Loess-type algorithm + time_series.smooth.py: smoothes a time-series using a Loess-type algorithm harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO destripe_harmonics.py: calculates the decorrelation (destriping) filter and filters the GRACE/GRACE-FO coefficients for striping errors @@ -155,6 +155,7 @@ https://doi.org/10.1029/2005GL025305 UPDATE HISTORY: + Updated 01/2023: refactored time series analysis functions Updated 12/2022: single implicit import of gravity toolkit Updated 11/2022: use f-strings for formatting verbose or ascii output Updated 09/2022: add option to replace degree 4 zonal harmonics with SLR @@ -433,8 +434,8 @@ def scale_grace_maps(base_dir, PROC, DREL, DSET, LMAX, RAD, # calculate GRACE Error (Noise of smoothed time-series) # With Annual and Semi-Annual Terms val1 = getattr(GRACE_Ylms, csharm) - smth = gravtk.tssmooth(GRACE_Ylms.time, val1[l,m,:], - HFWTH=HFWTH) + smth = gravtk.time_series.smooth(GRACE_Ylms.time, + val1[l,m,:], HFWTH=HFWTH) # number of smoothed points nsmth = len(smth['data']) tsmth = np.mean(smth['time']) diff --git a/test/test_download_and_read.py b/test/test_download_and_read.py index 0506e774..a68261bd 100644 --- a/test/test_download_and_read.py +++ b/test/test_download_and_read.py @@ -9,10 +9,7 @@ import inspect import warnings import posixpath -import gravity_toolkit.geocenter -import gravity_toolkit.utilities -from gravity_toolkit.read_gfc_harmonics import read_gfc_harmonics -from gravity_toolkit.read_GRACE_harmonics import read_GRACE_harmonics +import gravity_toolkit as gravtk from read_GRACE_geocenter.read_GRACE_geocenter import read_GRACE_geocenter # PURPOSE: Download a GRACE file from PO.DAAC and check that read program runs @@ -20,9 +17,9 @@ def test_podaac_download_and_read(username,webdav): HOST=['https://podaac-tools.jpl.nasa.gov','drive','files','allData','grace', 'L2','CSR','RL06','GSM-2_2002095-2002120_GRAC_UTCSR_BA01_0600.gz'] # download and read as virtual file object - FILE = gravity_toolkit.utilities.from_drive(HOST,username=username, + FILE = gravtk.utilities.from_drive(HOST,username=username, password=webdav,verbose=True) - Ylms = read_GRACE_harmonics(FILE, 60) + Ylms = gravtk.read_GRACE_harmonics(FILE, 60) keys = ['time', 'start', 'end', 'clm', 'slm', 'eclm', 'eslm', 'header'] test = dict(start=2452369.5, end=2452394.5) assert all((key in Ylms.keys()) for key in keys) @@ -34,8 +31,8 @@ def test_gfz_ftp_download_and_read(): HOST=['isdcftp.gfz-potsdam.de','grace','Level-2','CSR','RL06', 'GSM-2_2002095-2002120_GRAC_UTCSR_BA01_0600.gz'] # download and read as virtual file object - FILE = gravity_toolkit.utilities.from_ftp(HOST,verbose=True) - Ylms = read_GRACE_harmonics(FILE, 60) + FILE = gravtk.utilities.from_ftp(HOST,verbose=True) + Ylms = gravtk.read_GRACE_harmonics(FILE, 60) keys = ['time', 'start', 'end', 'clm', 'slm', 'eclm', 'eslm', 'header'] test = dict(start=2452369.5, end=2452394.5) assert all((key in Ylms.keys()) for key in keys) @@ -48,18 +45,18 @@ def test_gfz_icgem_costg_download_and_read(): try: HOST=['icgem.gfz-potsdam.de','02_COST-G','Grace-FO', 'GSM-2_2018152-2018181_GRFO_COSTG_BF01_0100.gfc'] - FILE = gravity_toolkit.utilities.from_ftp(HOST,verbose=True) + FILE = gravtk.utilities.from_ftp(HOST,verbose=True) except: pass # attempt to download from http server try: HOST=['http://icgem.gfz-potsdam.de','getseries','02_COST-G', 'Grace-FO','GSM-2_2018152-2018181_GRFO_COSTG_BF01_0100.gfc'] - FILE = gravity_toolkit.utilities.from_http(HOST,verbose=True) + FILE = gravtk.utilities.from_http(HOST,verbose=True) except: return # read as virtual file object - Ylms = read_GRACE_harmonics(FILE, 60) + Ylms = gravtk.read_GRACE_harmonics(FILE, 60) keys = ['time', 'start', 'end', 'clm', 'slm', 'eclm', 'eslm', 'header'] test = dict(start=2458270.5, end=2458299.5) assert all((key in Ylms.keys()) for key in keys) @@ -71,13 +68,13 @@ def test_esa_swarm_download_and_read(): # build url for Swarm file HOST='https://swarm-diss.eo.esa.int' swarm_file='SW_OPER_EGF_SHA_2__20131201T000000_20131231T235959_0101.ZIP' - parameters = gravity_toolkit.utilities.urlencode({'file': + parameters = gravtk.utilities.urlencode({'file': posixpath.join('swarm','Level2longterm','EGF',swarm_file)}) remote_file = [HOST,'?do=download&{0}'.format(parameters)] # download and read as virtual file object - gravity_toolkit.utilities.from_http(remote_file, + gravtk.utilities.from_http(remote_file, local=swarm_file,verbose=True) - Ylms = read_gfc_harmonics(swarm_file) + Ylms = gravtk.read_gfc_harmonics(swarm_file) keys = ['time', 'start', 'end', 'clm', 'slm', 'eclm', 'eslm'] test = dict(start=2456627.5, end=2456658.499988426) assert all((key in Ylms.keys()) for key in keys) @@ -92,8 +89,8 @@ def test_itsg_graz_download_and_read(): 'ITSG-Grace_operational','monthly','monthly_n60', 'ITSG-Grace_operational_n60_2018-06.gfc'] # download and read as virtual file object - gravity_toolkit.utilities.from_http(HOST,local=HOST[-1],verbose=True) - Ylms = read_gfc_harmonics(HOST[-1]) + gravtk.utilities.from_http(HOST,local=HOST[-1],verbose=True) + Ylms = gravtk.read_gfc_harmonics(HOST[-1]) keys = ['time', 'start', 'end', 'clm', 'slm', 'eclm', 'eslm'] test = dict(start=2458270.5, end=2458300.499988426) assert all((key in Ylms.keys()) for key in keys) @@ -108,7 +105,7 @@ def download_geocenter(): # download geocenter files to filepath filename = inspect.getframeinfo(inspect.currentframe()).filename filepath = os.path.dirname(os.path.abspath(filename)) - gravity_toolkit.utilities.from_figshare(filepath,verbose=True) + gravtk.utilities.from_figshare(filepath,verbose=True) # run tests yield # clean up @@ -132,7 +129,7 @@ def test_geocenter_read(PROC, DREL): keys = ['time', 'JD', 'month', 'C10', 'C11', 'S11','header'] assert all((key in DEG1.keys()) for key in keys) # test geocenter class - DATA = gravity_toolkit.geocenter().from_UCI(geocenter_file) + DATA = gravtk.geocenter().from_UCI(geocenter_file) for key in ['time', 'month', 'C10', 'C11', 'S11']: val = getattr(DATA, key) assert all(val == DEG1[key]) diff --git a/test/test_harmonics.py b/test/test_harmonics.py index 1065e888..3b828153 100755 --- a/test/test_harmonics.py +++ b/test/test_harmonics.py @@ -6,31 +6,31 @@ 2. Compares output spherical harmonics with validation dataset 3. Combines harmonics to calculate a truncated and smoothed spatial dataset 4. Compares output smoothed spatial distribution with validation dataset +Tests harmonic objects flatten, expansion and iteration routines """ import os import warnings import pytest import inspect import numpy as np -import gravity_toolkit.read_love_numbers -import gravity_toolkit.plm_mohlenkamp -import gravity_toolkit.gen_stokes -import gravity_toolkit.harmonic_summation -import gravity_toolkit.harmonics -import gravity_toolkit.spatial -from gravity_toolkit.utilities import get_data_path +import gravity_toolkit as gravtk +import matplotlib.pyplot as plt +# path to test files +filename = inspect.getframeinfo(inspect.currentframe()).filename +filepath = os.path.dirname(os.path.abspath(filename)) + +# PURPOSE: test harmonic conversion programs def test_harmonics(): # path to load Love numbers file - filename = inspect.getframeinfo(inspect.currentframe()).filename - filepath = os.path.dirname(os.path.abspath(filename)) - love_numbers_file = get_data_path(['data','love_numbers']) + love_numbers_file = gravtk.utilities.get_data_path( + ['data','love_numbers']) # read load Love numbers - hl,kl,ll = gravity_toolkit.read_love_numbers(love_numbers_file) + hl,kl,ll = gravtk.read_love_numbers(love_numbers_file) # read input spatial distribution file distribution_file = 'out.green_ice.grid.0.5.2008.cmh20.gz' - input_distribution = gravity_toolkit.spatial(spacing=[0.5,0.5], nlat=361, + input_distribution = gravtk.spatial(spacing=[0.5,0.5], nlat=361, nlon=721, extent=[0,360.0,-90,90]).from_ascii( os.path.join(filepath,distribution_file),date=False,compression='gzip') @@ -46,15 +46,16 @@ def test_harmonics(): theta[theta > np.arccos(-0.9999999)] = np.arccos(-0.9999999) theta[theta < np.arccos(0.9999999)] = np.arccos(0.9999999) # calculate Legendre polynomials with Martin Mohlenkamp's relation - PLM = gravity_toolkit.plm_mohlenkamp(LMAX, np.cos(theta)) + PLM,dPLM = gravtk.associated_legendre(LMAX, np.cos(theta), + method='mohlenkamp') # convert to spherical harmonics - test_Ylms = gravity_toolkit.gen_stokes(input_distribution.data, + test_Ylms = gravtk.gen_stokes(input_distribution.data, input_distribution.lon, input_distribution.lat, UNITS=1, LMAX=LMAX, PLM=PLM, LOVE=(hl,kl,ll)) # read harmonics from file harmonics_file = 'out.geoid.green_ice.0.5.2008.60.gz' - valid_Ylms = gravity_toolkit.harmonics(lmax=LMAX, mmax=LMAX).from_ascii( + valid_Ylms = gravtk.harmonics(lmax=LMAX, mmax=LMAX).from_ascii( os.path.join(filepath,harmonics_file),date=False,compression='gzip') # check that harmonic data is equal to machine precision @@ -65,21 +66,71 @@ def test_harmonics(): assert np.all(np.abs(difference_Ylms.slm) < harmonic_eps) # cmwe, centimeters water equivalent - dfactor = gravity_toolkit.units(lmax=LMAX).harmonic(hl,kl,ll) - wt = 2.0*np.pi*gravity_toolkit.gauss_weights(RAD,LMAX) + dfactor = gravtk.units(lmax=LMAX).harmonic(hl,kl,ll) + wt = 2.0*np.pi*gravtk.gauss_weights(RAD,LMAX) test_Ylms.convolve(dfactor.cmwe*wt) # convert harmonics back to spatial domain at same grid spacing - test_distribution = gravity_toolkit.harmonic_summation(test_Ylms.clm, + test_distribution = gravtk.harmonic_summation(test_Ylms.clm, + test_Ylms.slm, input_distribution.lon, input_distribution.lat, + LMAX=LMAX, PLM=PLM).T + # convert harmonics using fast-fourier transform method + test_transform = gravtk.harmonic_transform(test_Ylms.clm, test_Ylms.slm, input_distribution.lon, input_distribution.lat, LMAX=LMAX, PLM=PLM).T # read input and output spatial distribution files distribution_file = 'out.combine.green_ice.0.5.2008.60.gz' - output_distribution = gravity_toolkit.spatial(spacing=[0.5,0.5], nlat=361, + output_distribution = gravtk.spatial(spacing=[0.5,0.5], nlat=361, nlon=721, extent=[0,360.0,-90,90]).from_ascii( os.path.join(filepath,distribution_file),date=False,compression='gzip') # check that data is equal to machine precision difference_distribution = test_distribution - output_distribution.data + difference_transform = test_transform - output_distribution.data distribution_eps = np.finfo(np.float16).eps assert np.all(np.abs(difference_distribution) < distribution_eps) + assert np.all(np.abs(difference_transform) < distribution_eps) + +# PURPOSE: test harmonic objects +def test_iterate(): + # maximum spherical harmonic degree and order + LMAX = 60 + MMAX = 30 + # number of harmonics + n_harm = (LMAX**2 + 3*LMAX - (LMAX-MMAX)**2 - (LMAX-MMAX))//2 + 1 + # number of time points for test + nt = 12 + + # create flattened test harmonics + flat_Ylms = gravtk.harmonics(lmax=LMAX, mmax=MMAX) + ll,mm = np.meshgrid(np.arange(LMAX+1), np.arange(MMAX+1)) + ii,jj = np.triu_indices(MMAX+1, k=0, m=LMAX+1) + flat_Ylms.l = ll[ii,jj].astype(np.int64) + flat_Ylms.m = mm[ii,jj].astype(np.int64) + # add date variables + flat_Ylms.month = 1 + np.arange(nt) + flat_Ylms.time = 2002.0 + (flat_Ylms.month - 0.5)/12.0 + # create random harmonics + flat_Ylms.clm = np.random.rand(n_harm,nt) + flat_Ylms.slm = np.random.rand(n_harm,nt) + # reshape harmonics object + valid_Ylms = flat_Ylms.expand(date=True) + + # iterate over harmonics + for i,test in enumerate(valid_Ylms): + valid = valid_Ylms.index(i) + assert np.isclose(valid.l, test.l).all() + assert np.isclose(valid.m, test.m).all() + assert np.isclose(valid.month, test.month).all() + assert np.isclose(valid.time, test.time).all() + assert np.isclose(valid.clm, test.clm).all() + assert np.isclose(valid.slm, test.slm).all() + + # flatten harmonics objects + reshaped_Ylms = valid_Ylms.flatten(date=True) + assert np.isclose(reshaped_Ylms.l, flat_Ylms.l).all() + assert np.isclose(reshaped_Ylms.m, flat_Ylms.m).all() + assert np.isclose(reshaped_Ylms.month, flat_Ylms.month).all() + assert np.isclose(reshaped_Ylms.time, flat_Ylms.time).all() + assert np.isclose(reshaped_Ylms.clm, flat_Ylms.clm).all() + assert np.isclose(reshaped_Ylms.slm, flat_Ylms.slm).all() diff --git a/test/test_legendre.py b/test/test_legendre.py index a4371bcd..b5fb5e9e 100644 --- a/test/test_legendre.py +++ b/test/test_legendre.py @@ -3,11 +3,11 @@ test_legendre.py (11/2021) """ import numpy as np -import gravity_toolkit +import gravity_toolkit as gravtk # PURPOSE: test unnormalized Legendre polynomials def test_unnormalized(l=3, x=[-1.0, -0.9, -0.8]): - obs = gravity_toolkit.legendre(l, x) + obs = gravtk.legendre(l, x) expected = np.array([ [-1.00000, -0.47250, -0.08000], [0.00000, -1.99420, -1.98000], @@ -18,7 +18,7 @@ def test_unnormalized(l=3, x=[-1.0, -0.9, -0.8]): # PURPOSE: test fully-normalized Legendre polynomials def test_normalized(l=3, x=[-1.0, -0.9, -0.8]): - obs = gravity_toolkit.legendre(l, x, NORMALIZE=True) + obs = gravtk.legendre(l, x, NORMALIZE=True) expected = np.array([ [-2.64575, -1.25012, -0.21166], [-0.00000, 2.15398, 2.13864], @@ -29,7 +29,7 @@ def test_normalized(l=3, x=[-1.0, -0.9, -0.8]): # PURPOSE: test fully-normalized zonal Legendre polynomials def test_zonal(l=3, x=[-1.0, -0.9, -0.8]): - obs,_ = gravity_toolkit.legendre_polynomials(l, x) + obs,_ = gravtk.legendre_polynomials(l, x) expected = np.array([ [1.00000, 1.00000, 1.00000], [-1.73205, -1.55885, -1.38564], @@ -40,11 +40,11 @@ def test_zonal(l=3, x=[-1.0, -0.9, -0.8]): # PURPOSE: compare fully-normalized Legendre polynomials def test_plms(l=240, x=0.1): - obs = gravity_toolkit.legendre(l, x, NORMALIZE=True) + obs = gravtk.legendre(l, x, NORMALIZE=True) # calculate associated Legendre polynomials - holmes,_ = gravity_toolkit.plm_holmes(l, x) - colombo,_ = gravity_toolkit.plm_colombo(l, x) - mohlenkamp = gravity_toolkit.plm_mohlenkamp(l, x) + holmes,_ = gravtk.plm_holmes(l, x) + colombo,_ = gravtk.plm_colombo(l, x) + mohlenkamp,_ = gravtk.plm_mohlenkamp(l, x) # compare Legendre polynomials assert np.isclose(obs, holmes[l,:]).all() assert np.isclose(holmes, colombo).all() diff --git a/test/test_love_numbers.py b/test/test_love_numbers.py index b77ed8b7..f78485ff 100644 --- a/test/test_love_numbers.py +++ b/test/test_love_numbers.py @@ -1,7 +1,8 @@ #!/usr/bin/env python u""" -test_love_numbers.py (12/2020) +test_love_numbers.py (01/2023) UPDATE HISTORY: + Updated 01/2023: single implicit import of gravity toolkit Updated 12/2020: add linear interpolation to degree 1000 add tests for Gegout and Wang load Love number sets Written 08/2020 @@ -9,8 +10,7 @@ import os import warnings import pytest -import gravity_toolkit.read_love_numbers -from gravity_toolkit.utilities import get_data_path +import gravity_toolkit as gravtk # PURPOSE: Define Load Love Numbers for lower degree harmonics def get_love_numbers(): @@ -27,9 +27,10 @@ def test_love_numbers(): # valid low degree Love numbers for reference frame CF VALID = get_love_numbers() # path to load Love numbers file - love_numbers_file = get_data_path(['data','love_numbers']) + love_numbers_file = gravtk.utilities.get_data_path( + ['data','love_numbers']) # read load Love numbers and convert to reference frame CF - TEST = gravity_toolkit.read_love_numbers(love_numbers_file, + TEST = gravtk.read_love_numbers(love_numbers_file, LMAX=1000, HEADER=2, FORMAT='dict', REFERENCE='CF') assert all((v==t).all() for key in ['hl','kl','ll'] for v,t in zip(VALID[key],TEST[key])) @@ -38,19 +39,21 @@ def test_love_numbers(): # PURPOSE: Check that Gegout (2005) Load Love Numbers can be read def test_Gegout_love_numbers(): # path to load Love numbers file - love_numbers_file = get_data_path(['data','Load_Love2_CE.dat']) + love_numbers_file = gravtk.utilities.get_data_path( + ['data','Load_Love2_CE.dat']) COLUMNS = ['l','hl','ll','kl'] # read load Love numbers and convert to reference frame CM - TEST = gravity_toolkit.read_love_numbers(love_numbers_file, + TEST = gravtk.read_love_numbers(love_numbers_file, HEADER=3, COLUMNS=COLUMNS, FORMAT='dict', REFERENCE='CM') assert (TEST['l'].max() == 1024) # PURPOSE: Check that Wang et al. (2012) Load Love Numbers can be read def test_Wang_love_numbers(): # path to load Love numbers file (truncated from degree 46341) - love_numbers_file = get_data_path(['data','PREM-LLNs-truncated.dat']) + love_numbers_file = gravtk.utilities.get_data_path( + ['data','PREM-LLNs-truncated.dat']) COLUMNS = ['l','hl','ll','kl','nl','nk'] # read load Love numbers and convert to reference frame CE - TEST = gravity_toolkit.read_love_numbers(love_numbers_file, + TEST = gravtk.read_love_numbers(love_numbers_file, HEADER=1, COLUMNS=COLUMNS, FORMAT='dict', REFERENCE='CE') assert (TEST['l'].max() == 5000) diff --git a/test/test_point_masses.py b/test/test_point_masses.py index a0b737d2..09f38dbc 100644 --- a/test/test_point_masses.py +++ b/test/test_point_masses.py @@ -1,13 +1,13 @@ #!/usr/bin/env python u""" -test_point_masses.py (02/2021) +test_point_masses.py (01/2023) +UPDATE HISTORY: + Updated 01/2023: single implicit import of gravity toolkit + Written 02/2021 """ import pytest import numpy as np -from gravity_toolkit.utilities import get_data_path -from gravity_toolkit.read_love_numbers import read_love_numbers -from gravity_toolkit.gen_point_load import gen_point_load -from gravity_toolkit.gen_stokes import gen_stokes +import gravity_toolkit as gravtk # parameterize the number of point masses @pytest.mark.parametrize("NPTS", np.random.randint(2,2000,size=1)) @@ -31,14 +31,15 @@ def test_point_masses(NPTS): data[indy,indx] += MASS[i] # path to load Love numbers file - love_numbers_file = get_data_path(['data','love_numbers']) + love_numbers_file = gravtk.utilities.get_data_path( + ['data','love_numbers']) # read load Love numbers - hl,kl,ll = read_love_numbers(love_numbers_file) + hl,kl,ll = gravtk.read_love_numbers(love_numbers_file) # calculate harmonics and degree amplitudes for each case - grid_Ylms = gen_stokes(data, lon, lat, LMAX=60, UNITS=2, LOVE=(hl,kl,ll)) - grid_Ylms.amplitude() - point_Ylms = gen_point_load(MASS, LON, LAT, LMAX=60, UNITS=2, LOVE=(hl,kl,ll)) - point_Ylms.amplitude() + grid_Ylms = gravtk.gen_stokes(data, lon, lat, + LMAX=60, UNITS=2, LOVE=(hl,kl,ll)) + point_Ylms = gravtk.gen_point_load(MASS, LON, LAT, + LMAX=60, UNITS=2, LOVE=(hl,kl,ll)) # check that harmonic data is equal to machine precision difference_Ylms = grid_Ylms.copy() @@ -46,4 +47,5 @@ def test_point_masses(NPTS): harmonic_eps = np.finfo(np.float32).eps assert np.all(np.abs(difference_Ylms.clm) < harmonic_eps) # verify that the degree amplitudes are within tolerance - assert np.all(np.abs(grid_Ylms.amp - point_Ylms.amp) < harmonic_eps) + difference_amplitude = grid_Ylms.amplitude - point_Ylms.amplitude + assert np.all(np.abs(difference_amplitude) < harmonic_eps) diff --git a/test/test_time.py b/test/test_time.py index a7c1cad8..66bc65eb 100644 --- a/test/test_time.py +++ b/test/test_time.py @@ -1,9 +1,10 @@ #!/usr/bin/env python u""" -test_time.py (0t/2020) +test_time.py (01/2023) Verify time conversion and utility functions UPDATE HISTORY: + Updated 01/2023: single implicit import of gravity toolkit Updated 05/2021: define int/float precision to prevent deprecation warning Updated 02/2021: added function to test GRACE months adjustments test date parser for cases when only a date and no units @@ -12,8 +13,7 @@ import pytest import warnings import numpy as np -import gravity_toolkit.time -import gravity_toolkit.utilities +import gravity_toolkit as gravtk # parameterize calendar dates @pytest.mark.parametrize("YEAR", np.random.randint(1992,2020,size=2)) @@ -25,18 +25,18 @@ def test_julian(YEAR,MONTH): dpm_leap = np.array([31,29,31,30,31,30,31,31,30,31,30,31]) dpm_stnd = np.array([31,28,31,30,31,30,31,31,30,31,30,31]) DPM = dpm_stnd if np.mod(YEAR,4) else dpm_leap - assert (np.sum(DPM) == gravity_toolkit.time.calendar_days(YEAR).sum()) + assert (np.sum(DPM) == gravtk.time.calendar_days(YEAR).sum()) # calculate Modified Julian Day (MJD) from calendar date DAY = np.random.randint(1,DPM[MONTH-1]+1) HOUR = np.random.randint(0,23+1) MINUTE = np.random.randint(0,59+1) SECOND = 60.0*np.random.random_sample(1) - MJD = gravity_toolkit.time.convert_calendar_dates(YEAR, MONTH, DAY, + MJD = gravtk.time.convert_calendar_dates(YEAR, MONTH, DAY, hour=HOUR, minute=MINUTE, second=SECOND, epoch=(1858,11,17,0,0,0)) # convert MJD to calendar date JD = np.squeeze(MJD) + 2400000.5 - YY,MM,DD,HH,MN,SS = gravity_toolkit.time.convert_julian(JD, + YY,MM,DD,HH,MN,SS = gravtk.time.convert_julian(JD, format='tuple', astype=np.float64) # assert dates eps = np.finfo(np.float16).eps @@ -57,14 +57,14 @@ def test_decimal_dates(YEAR,MONTH): dpm_leap = np.array([31,29,31,30,31,30,31,31,30,31,30,31]) dpm_stnd = np.array([31,28,31,30,31,30,31,31,30,31,30,31]) DPM = dpm_stnd if np.mod(YEAR,4) else dpm_leap - assert (np.sum(DPM) == gravity_toolkit.time.calendar_days(YEAR).sum()) + assert (np.sum(DPM) == gravtk.time.calendar_days(YEAR).sum()) # calculate Modified Julian Day (MJD) from calendar date DAY = np.random.randint(1,DPM[MONTH-1]+1) HOUR = np.random.randint(0,23+1) MINUTE = np.random.randint(0,59+1) SECOND = 60.0*np.random.random_sample(1) # calculate year-decimal time - tdec = gravity_toolkit.time.convert_calendar_decimal(YEAR, MONTH, day=DAY, + tdec = gravtk.time.convert_calendar_decimal(YEAR, MONTH, day=DAY, hour=HOUR, minute=MINUTE, second=SECOND) # day of the year 1 = Jan 1, 365 = Dec 31 (std) day_temp = np.mod(tdec, 1)*np.sum(DPM) @@ -93,26 +93,26 @@ def test_decimal_dates(YEAR,MONTH): # PURPOSE: test UNIX time def test_unix_time(): # ATLAS Standard Data Epoch - UNIX = gravity_toolkit.utilities.get_unix_time('2018-01-01 00:00:00') + UNIX = gravtk.utilities.get_unix_time('2018-01-01 00:00:00') assert (UNIX == 1514764800) # PURPOSE: test parsing time strings def test_parse_date_string(): # time string for Modified Julian Days time_string = 'days since 1858-11-17T00:00:00' - epoch,to_secs = gravity_toolkit.time.parse_date_string(time_string) + epoch,to_secs = gravtk.time.parse_date_string(time_string) # check the epoch and the time unit conversion factors assert np.all(epoch == [1858,11,17,0,0,0]) assert (to_secs == 86400.0) # time string for ATLAS Standard Data Epoch time_string = 'seconds since 2018-01-01T00:00:00' - epoch,to_secs = gravity_toolkit.time.parse_date_string(time_string) + epoch,to_secs = gravtk.time.parse_date_string(time_string) # check the epoch and the time unit conversion factors assert np.all(epoch == [2018,1,1,0,0,0]) assert (to_secs == 1.0) # time string for unitless case time_string = '2000-01-01T12:00:00' - epoch,to_secs = gravity_toolkit.time.parse_date_string(time_string) + epoch,to_secs = gravtk.time.parse_date_string(time_string) # check the epoch and the time unit conversion factors assert np.all(epoch == [2000,1,1,12,0,0]) assert (to_secs == 0.0) @@ -190,5 +190,5 @@ def test_adjust_months(PROC): temp = np.array(12.0*(tdec-2002.0)+1,dtype='i') assert np.any(temp != months.astype('i')) # run months adjustment to fix special cases - temp = gravity_toolkit.time.adjust_months(temp) + temp = gravtk.time.adjust_months(temp) assert np.all(temp == months.astype('i')) diff --git a/version.txt b/version.txt index da275ab7..9084fa2f 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -1.0.2.7 +1.1.0