Skip to content

Commit

Permalink
Merge pull request #65 from GeoscienceAustralia/NPI-3670-sp3-bugfixes…
Browse files Browse the repository at this point in the history
…-and-unit-tests

Npi 3670 sp3 bugfixes and unit tests
  • Loading branch information
treefern authored Jan 7, 2025
2 parents 19d8495 + dc4f28f commit f96026c
Show file tree
Hide file tree
Showing 3 changed files with 109 additions and 4 deletions.
43 changes: 39 additions & 4 deletions gnssanalysis/gn_io/sp3.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,7 @@ def read_sp3(
pOnly: bool = True,
nodata_to_nan: bool = True,
drop_offline_sats: bool = False,
continue_on_ep_ev_encountered: bool = True,
) -> _pd.DataFrame:
"""Reads an SP3 file and returns the data as a pandas DataFrame.
Expand All @@ -296,6 +297,9 @@ def read_sp3(
and converts 999999* (indicating nodata) to NaN in the SP3 CLK column. Defaults to True.
:param bool drop_offline_sats: If True, drops satellites from the DataFrame if they have ANY missing (nodata)
values in the SP3 POS column.
:param bool continue_on_ep_ev_encountered: If True, logs a warning and continues if EV or EP rows are found in
the input SP3. These are currently unsupported by this function and will be ignored. Set to false to
raise a NotImplementedError instead.
:return pandas.DataFrame: The SP3 data as a DataFrame.
:raise FileNotFoundError: If the SP3 file specified by sp3_path_or_bytes does not exist.
:raise Exception: For other errors reading SP3 file/bytes
Expand Down Expand Up @@ -339,18 +343,37 @@ def read_sp3(
sp3_pos_nodata_to_nan(sp3_df)
# Convert 999999* (which indicates nodata in the SP3 CLK column) to NaN
sp3_clock_nodata_to_nan(sp3_df)

# P/V/EP/EV flag handling is currently incomplete. The current implementation truncates to the first letter,
# so can't parse nor differenitate between EP and EV!
if "E" in sp3_df.index.get_level_values("PV_FLAG").unique():
if not continue_on_ep_ev_encountered:
raise NotImplementedError("EP and EV flag rows are currently not supported")
logger.warning("EP / EV flag rows encountered. These are not yet supported, and will be ignored!")

# Check very top of the header to see if this SP3 is Position only , or also contains Velocities
if pOnly or parsed_header.HEAD.loc["PV_FLAG"] == "P":
sp3_df = sp3_df.loc[sp3_df.index.get_level_values("PV_FLAG") == "P"]
sp3_df.index = sp3_df.index.droplevel("PV_FLAG")
# TODO consider exception handling if EP rows encountered
else:
# DF contains interlaced Position & Velocity measurements for each sat. Split the data based on this, and
# recombine, turning Pos and Vel into separate columns.
position_df = sp3_df.xs("P", level="PV_FLAG")
velocity_df = sp3_df.xs("V", level="PV_FLAG")
# TODO consider exception handling if EV rows encountered

# NOTE: care must now be taken to ensure this split and merge operation does not duplicate the FLAGS columns!

# Remove the (per sat per epoch, not per pos / vel section) FLAGS from one of our DFs so when we concat them
# back together we don't have duplicated flags.
# The param axis=1, removes from columns rather than indexes (i.e. we want to drop the column from the data,
# not drop all the data to which the column previously applied!)
# We drop from pos rather than vel, because vel is on the right hand side, so the layout resembles the
# layout of an SP3 file better. Functionally, this shouldn't make a difference.
position_df = position_df.drop(axis=1, columns="FLAGS")

velocity_df.columns = SP3_VELOCITY_COLUMNS
sp3_df = _pd.concat([position_df, velocity_df], axis=1)

# sp3_df.drop(columns="PV_FLAG", inplace=True)
# Check for duplicate epochs, dedupe and log warning
if sp3_df.index.has_duplicates: # a literaly free check
# This typically runs in sub ms time. Marks all but first instance as duped:
Expand Down Expand Up @@ -630,6 +653,17 @@ def gen_sp3_content(
:param io.TextIOBase buf: The buffer to write the SP3 content to. Defaults to None.
:return str or None: The SP3 content if `buf` is None, otherwise None.
"""

# TODO ensure we correctly handle outputting Velocity data! I.e. does this need to be interlaced back in,
# not printed as additional columns?!
# E.g. do we need:
# PG01... X Y Z CLK ...
# VG01... VX VY VZ ...
#
# Rather than:
# PG01... X Y Z CLK ... VX VY VZ ...
# ?

out_buf = buf if buf is not None else _io.StringIO()
if sort_outputs:
# If we need to do particular sorting/ordering of satellites and constellations we can use some of the
Expand All @@ -638,7 +672,8 @@ def gen_sp3_content(
out_df = sp3_df["EST"]
flags_df = sp3_df["FLAGS"] # Prediction, maneuver, etc.

# Validate that all flags have valid values
# Valid values for the respective flags are 'E' 'P' 'M' 'P' (or blank), as per page 11-12 of the SP3d standard:
# https://files.igs.org/pub/data/format/sp3d.pdf
if not (
flags_df["Clock_Event"].astype(str).isin(["E", " "]).all()
and flags_df["Clock_Pred"].astype(str).isin(["P", " "]).all()
Expand Down
56 changes: 56 additions & 0 deletions tests/test_datasets/sp3_test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,60 @@
EOF
"""

# Manual test dataset for EV and EP flags (currently just checking that exceptions are raised because we don't handle them yet)
sp3_test_data_ep_ev_rows = b"""#dV2007 4 12 0 0 0.00000000 1 ORBIT IGS14 BHN ESOC
## 1422 345600.00000000 900.00000000 54202 0.0000000000000
+ 1 G01 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
++ 8 8 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
++ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
++ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
++ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
++ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
%c M cc GPS ccc cccc cccc cccc cccc ccccc ccccc ccccc ccccc
%c cc cc ccc ccc cccc cccc cccc cccc ccccc ccccc ccccc ccccc
%f 0.0000000 0.000000000 0.00000000000 0.000000000000000
%f 0.0000000 0.000000000 0.00000000000 0.000000000000000
%i 0 0 0 0 0 0 0 0 0
%i 0 0 0 0 0 0 0 0 0
/* EUROPEAN SPACE OPERATIONS CENTRE - DARMSTADT, GERMANY
/* ---------------------------------------------------------
/* SP3 FILE GENERATED BY NAPEOS BAHN TOOL (DETERMINATION)
/* PCV:IGS14_2022 OL/AL:EOT11A NONE YN ORB:CoN CLK:CoN
* 2007 4 12 0 0 0.00000000
PG01 -6114.801556 -13827.040252 22049.171610 999999.999999
EP 55 55 55 222 1234567 -1234567 5999999 -30 21 -1230000
VG01 27184.457428 -3548.055474 5304.058806 999999.999999
EV 22 22 22 111 1234567 1234567 1234567 1234567 1234567 1234567
EOF
"""
# NOTE: copied from SP3d PDF.. alignment of EP and EV seem to be quite wrong.
# TODO update with a proper source. Will do for now just to check exceptions on these flags being seen.


# Expected content output for gnssanalysis gen_sp3_content() when reading in then writing
# out sp3_test_data_igs_benchmark_null_clock:
expected_sp3_output_igs_benchmark_null_clock = b"""* 2007 4 12 0 0 0.00000000
PG01 -6114.801556 -13827.040252 22049.171610 999999.999999
VG01 27184.457428 -3548.055474 5304.058806 999999.999999
PG02 12947.223282 22448.220655 6215.570741 999999.999999
VG02 -7473.756152 -4355.288568 29939.333728 999999.999999
* 2007 4 12 0 15 0.00000000
PG01 -3659.032812 -14219.662913 22339.175481 123456.999999
VG01 27295.435569 -5170.061971 1131.227754 999999.999999
PG02 12163.580358 21962.803659 8849.429007 999999.999999
VG02 -9967.334764 -6367.969150 28506.683280 999999.999999
* 2007 4 12 0 30 0.00000000
PG01 -1218.171155 -14755.013599 22252.168480 999999.999999
VG01 26855.435366 -6704.236117 -3062.394499 999999.999999
PG02 11149.555664 21314.099837 11331.977499 123456.999999
VG02 -12578.915944 -7977.396362 26581.116225 999999.999999
"""
# NOTE 'EOF' is added in write_sp3() so we don't expect it here

# second dataset is a truncated version of file COD0OPSFIN_20242010000_01D_05M_ORB.SP3
sp3_test_data_truncated_cod_final = b"""#dP2024 7 19 0 0 0.00000000 2 d+D IGS20 FIT AIUB
## 2323 432000.00000000 300.00000000 60510 0.0000000000000
Expand Down Expand Up @@ -168,3 +222,5 @@
PG19 0.000000 0.000000 0.000000 999999.999999
EOF
"""

# TODO add some test data that actually has flags!! And write tests for those.
14 changes: 14 additions & 0 deletions tests/test_sp3.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@
from test_datasets.sp3_test_data import (
# first dataset is part of the IGS benchmark (modified to include non null data on clock):
sp3_test_data_igs_benchmark_null_clock as input_data,
# Expected content section we want gnssanalysis to write out
expected_sp3_output_igs_benchmark_null_clock,
# Test exception raising when encountering EP, EV rows
sp3_test_data_ep_ev_rows,
# second dataset is a truncated version of file COD0OPSFIN_20242010000_01D_05M_ORB.SP3:
sp3_test_data_truncated_cod_final as input_data2,
sp3_test_data_partially_offline_sat as offline_sat_test_data,
Expand Down Expand Up @@ -56,6 +60,13 @@ def test_read_sp3_pv(self, mock_file):
)
self.assertEqual(result.index[0][0], 229608000) # Same date, as J2000

@patch("builtins.open", new_callable=mock_open, read_data=sp3_test_data_ep_ev_rows)
def test_read_sp3_pv_with_ev_ep_rows(self, mock_file):
# Expect exception relating to the EV and EP rows, as we can't currently handle them properly.
self.assertRaises(
NotImplementedError, sp3.read_sp3, "mock_path", pOnly=False, continue_on_ep_ev_encountered=False
)

@patch("builtins.open", new_callable=mock_open, read_data=input_data)
def test_read_sp3_header_svs_basic(self, mock_file):
"""
Expand Down Expand Up @@ -134,6 +145,9 @@ def test_read_sp3_header_svs_detailed(self):
end_line2_acc, 18, msg="Last ACC on test line 2 (pos 30) should be 18"
)

# TODO Add test(s) for correctly reading header fundamentals (ACC, ORB_TYPE, etc.)
# TODO add tests for correctly reading the actual content of the SP3 in addition to the header.
# TODO add tests for correctly generating sp3 output content with gen_sp3_content() and gen_sp3_header()
def test_sp3_clock_nodata_to_nan(self):
sp3_df = pd.DataFrame(
{("EST", "CLK"): [999999.999999, 123456.789, 999999.999999, 987654.321]}
Expand Down

0 comments on commit f96026c

Please sign in to comment.