diff --git a/ControlTable_StreamCat.csv b/ControlTable_StreamCat.csv index a52c826..79bd441 100644 --- a/ControlTable_StreamCat.csv +++ b/ControlTable_StreamCat.csv @@ -1,7 +1,21 @@ FullTableName,accum_type,MetricName,AppendMetric,LandscapeLayer,summaryfield,Final_Table_Name,MetricType,Conversion,by_RPU,use_mask,run,notes,Published,Metadata Published,InAPIDatabase,Date Added AgDrain,Categorical,lookup/AgDrain_lookup.csv,none,AgDrain_stlvl_FINAL.tif,,AgDrain,Percent,1,0,0,0,,Yes,Yes,,7/10/2020 -Ag2006HiSlp,Categorical,lookup/Ag2006HiSlp_lookup.csv,none,Ag2006HiSlp.tif,,AgMidHiSlopes,Percent,1,0,0,0,,Yes,no,,1/21/2021 -Ag2006MidSlp,Categorical,lookup/Ag2006MidSlp_lookup.csv,none,Ag2006MidSlp.tif,,AgMidHiSlopes,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2001HiSlp,Categorical,lookup/Ag2001HiSlp_lookup.csv,none,Ag2001HiSlp.tif,,AgMidHiSlopes2001,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2001MidSlp,Categorical,lookup/Ag2001MidSlp_lookup.csv,none,Ag2001MidSlp.tif,,AgMidHiSlopes2001,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2004HiSlp,Categorical,lookup/Ag2006HiSlp_lookup.csv,none,Ag2004HiSlp.tif,,AgMidHiSlopes2004,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2004MidSlp,Categorical,lookup/Ag2006MidSlp_lookup.csv,none,Ag2004MidSlp.tif,,AgMidHiSlopes2004,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2006HiSlp,Categorical,lookup/Ag2006HiSlp_lookup.csv,none,Ag2006HiSlp.tif,,AgMidHiSlopes2006,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2006MidSlp,Categorical,lookup/Ag2006MidSlp_lookup.csv,none,Ag2006MidSlp.tif,,AgMidHiSlopes2006,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2008HiSlp,Categorical,lookup/Ag2008HiSlp_lookup.csv,none,Ag2008HiSlp.tif,,AgMidHiSlopes2008,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2008MidSlp,Categorical,lookup/Ag2008MidSlp_lookup.csv,none,Ag2008MidSlp.tif,,AgMidHiSlopes2008,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2011HiSlp,Categorical,lookup/Ag2011HiSlp_lookup.csv,none,Ag2011HiSlp.tif,,AgMidHiSlopes2011,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2011MidSlp,Categorical,lookup/Ag2011MidSlp_lookup.csv,none,Ag2011MidSlp.tif,,AgMidHiSlopes2011,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2013HiSlp,Categorical,lookup/Ag2013HiSlp_lookup.csv,none,Ag2013HiSlp.tif,,AgMidHiSlopes2013,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2013MidSlp,Categorical,lookup/Ag2013MidSlp_lookup.csv,none,Ag2013MidSlp.tif,,AgMidHiSlopes2013,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2016HiSlp,Categorical,lookup/Ag2016HiSlp_lookup.csv,none,Ag2016HiSlp.tif,,AgMidHiSlopes2016,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2016MidSlp,Categorical,lookup/Ag2016MidSlp_lookup.csv,none,Ag2016MidSlp.tif,,AgMidHiSlopes2016,Percent,1,0,0,0,,Yes,no,,1/21/2021 +Ag2019HiSlp,Categorical,lookup/Ag2019HiSlp_lookup.csv,none,Ag2019HiSlp.tif,,AgMidHiSlopes2019,Percent,1,0,0,1,,Yes,no,,1/21/2021 +Ag2019MidSlp,Categorical,lookup/Ag2019MidSlp_lookup.csv,none,Ag2019MidSlp.tif,,AgMidHiSlopes2019,Percent,1,0,0,0,,Yes,no,,1/21/2021 CBNF,Continuous,CBNF,none,cbnf.tif,,AgriculturalNitrogen,Mean,1,0,0,0,,Yes,yes,Yes,1/21/2021 Fert,Continuous,Fert,none,fert.tif,,AgriculturalNitrogen,Mean,1,0,0,0,,Yes,yes,Yes,1/21/2021 Manure,Continuous,Manure,none,manure.tif,,AgriculturalNitrogen,Mean,1,0,0,0,,Yes,yes,Yes,1/22/2021 @@ -403,9 +417,171 @@ WWTPAll,Point,WWTPAllDens,none,WWTP_All_CWA_Active_2013_CONUS.shp,,WWTP,Density, WWTPMajor,Point,WWTPMajorDens,none,WWTP_Major_CWA_Active_2013_CONUS.shp,,WWTP,Density,1,0,0,0,,Yes,yes,,12/30/2021 WWTPMinor,Point,WWTPMinorDens,none,WWTP_Minor_CWA_Active_2013_CONUS.shp,,WWTP,Density,1,0,0,0,,Yes,yes,,12/30/2021 WetnessIndex,Continuous,WetIndex,none,wetness_index.tif,,WetIndex,Mean,1,0,0,0,,Yes,yes,Yes,7/4/2020 -NPP_YrMean,Continuous,NPP_YrMean,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,1,,No,No,No,5/6/2024 -NPP,Continuous,NPP,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,1,,No,No,No,5/6/2024 -Precip_YrMean,Continuous,Precip_YrMean,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,1,,No,No,No,5/6/2024 -LST_YrMean,Continuous,LST_YrMean,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,1,,No,No,No,5/6/2024 -LST,Continuous,LST,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,1,,No,No,No,5/6/2024 -SNOW_YrMean,Continuous,SNOW_YrMean,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,1,,No,No,No,5/6/2024 +NPP_YrMean,Continuous,NPP_YrMean,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,0,,No,No,No,5/6/2024 +NPP,Continuous,NPP,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,0,,No,No,No,5/6/2024 +Precip_YrMean,Continuous,Precip_YrMean,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,0,,No,No,No,5/6/2024 +LST_YrMean,Continuous,LST_YrMean,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,0,,No,No,No,5/6/2024 +LST,Continuous,LST,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,0,,No,No,No,5/6/2024 +SNOW_YrMean,Continuous,SNOW_YrMean,none,E:\WorkingData\To_Be_Flow_Accumulated,,ClimTerms_2012_10,Mean,1,0,0,0,,No,No,No,5/6/2024 +Clay2,Continuous,Clay2,none,clay.tif,,Test,Mean,0.01,0,0,1,,No,No,Yes,6/15/2020 +N_Farm_Fert1987,Continuous,N_Farm_Fert1987,none,nlcd_1992.tif,,NFarmFertilizer,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Farm_Fert1992,Continuous,N_Farm_Fert1992,none,nlcd_1992.tif,,NFarmFertilizer,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Farm_Fert1997,Continuous,N_Farm_Fert1997,none,nlcd_1992.tif,,NFarmFertilizer,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Farm_Fert2002,Continuous,N_Farm_Fert2002,none,nlcd_2002.tif,,NFarmFertilizer,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Farm_Fert2007,Continuous,N_Farm_Fert2007,none,nlcd_2008.tif,,NFarmFertilizer,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Farm_Fert2012,Continuous,N_Farm_Fert2012,none,nlcd_2013.tif,,NFarmFertilizer,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Farm_Fert2017,Continuous,N_Farm_Fert2017,none,nlcd_2016.tif,,NFarmFertilizer,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste1987,Continuous,N_Livestock_Waste1987,none,nlcd_1992.tif,,NLiveStockWaste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste1992,Continuous,N_Livestock_Waste1992,none,nlcd_1992.tif,,NLiveStockWaste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste1997,Continuous,N_Livestock_Waste1997,none,nlcd_1992.tif,,NLiveStockWaste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste2002,Continuous,N_Livestock_Waste2002,none,nlcd_2002.tif,,NLiveStockWaste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste2007,Continuous,N_Livestock_Waste2007,none,nlcd_2008.tif,,NLiveStockWaste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste2012,Continuous,N_Livestock_Waste2012,none,nlcd_2013.tif,,NLiveStockWaste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste2017,Continuous,N_Livestock_Waste2017,none,nlcd_2016.tif,,NLiveStockWaste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste_Rec1987,Continuous,N_Livestock_Waste_Rec1987,none,nlcd_1992.tif,,NLivestockWasteRecovered,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste_Rec1992,Continuous,N_Livestock_Waste_Rec1992,none,nlcd_1992.tif,,NLivestockWasteRecovered,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste_Rec1997,Continuous,N_Livestock_Waste_Rec1997,none,nlcd_1992.tif,,NLivestockWasteRecovered,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste_Rec2002,Continuous,N_Livestock_Waste_Rec2002,none,nlcd_2002.tif,,NLivestockWasteRecovered,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste_Rec2007,Continuous,N_Livestock_Waste_Rec2007,none,nlcd_2008.tif,,NLivestockWasteRecovered,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste_Rec2012,Continuous,N_Livestock_Waste_Rec2012,none,nlcd_2013.tif,,NLivestockWasteRecovered,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Livestock_Waste_Rec2017,Continuous,N_Livestock_Waste_Rec2017,none,nlcd_2016.tif,,NLivestockWasteRecovered,Mean,1,0,0,0,,No,No,,9/6/2024 +Crop_BNF1987,Continuous,Crop_BNF1987,none,nlcd_1992.tif,,Crop_BNF,Mean,1,0,0,0,,No,No,,9/6/2024 +Crop_BNF1992,Continuous,Crop_BNF1992,none,nlcd_1992.tif,,Crop_BNF,Mean,1,0,0,0,,No,No,,9/6/2024 +Crop_BNF1997,Continuous,Crop_BNF1997,none,nlcd_1992.tif,,Crop_BNF,Mean,1,0,0,0,,No,No,,9/6/2024 +Crop_BNF2002,Continuous,Crop_BNF2002,none,nlcd_2002.tif,,Crop_BNF,Mean,1,0,0,0,,No,No,,9/6/2024 +Crop_BNF2007,Continuous,Crop_BNF2007,none,nlcd_2008.tif,,Crop_BNF,Mean,1,0,0,0,,No,No,,9/6/2024 +Crop_BNF2012,Continuous,Crop_BNF2012,none,nlcd_2013.tif,,Crop_BNF,Mean,1,0,0,0,,No,No,,9/6/2024 +Crop_BNF2017,Continuous,Crop_BNF2017,none,nlcd_2016.tif,,Crop_BNF,Mean,1,0,0,0,,No,No,,9/6/2024 +N_CropRem1987,Continuous,N_CropRem1987,none,nlcd_1992.tif,,N_CropRem,Mean,1,0,0,0,,No,No,,9/6/2024 +N_CropRem1992,Continuous,N_CropRem1992,none,nlcd_1992.tif,,N_CropRem,Mean,1,0,0,0,,No,No,,9/6/2024 +N_CropRem1997,Continuous,N_CropRem1997,none,nlcd_1992.tif,,N_CropRem,Mean,1,0,0,0,,No,No,,9/6/2024 +N_CropRem2002,Continuous,N_CropRem2002,none,nlcd_2002.tif,,N_CropRem,Mean,1,0,0,0,,No,No,,9/6/2024 +N_CropRem2007,Continuous,N_CropRem2007,none,nlcd_2008.tif,,N_CropRem,Mean,1,0,0,0,,No,No,,9/6/2024 +N_CropRem2012,Continuous,N_CropRem2012,none,nlcd_2013.tif,,N_CropRem,Mean,1,0,0,0,,No,No,,9/6/2024 +N_CropRem2017,Continuous,N_CropRem2017,none,nlcd_2016.tif,,N_CropRem,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AtmoDep1987,Continuous,N_AtmoDep1987,none,nlcd_1992.tif,,N_AtmoDep,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AtmoDep1992,Continuous,N_AtmoDep1992,none,nlcd_1992.tif,,N_AtmoDep,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AtmoDep1997,Continuous,N_AtmoDep1997,none,nlcd_1992.tif,,N_AtmoDep,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AtmoDep2002,Continuous,N_AtmoDep2002,none,nlcd_2002.tif,,N_AtmoDep,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AtmoDep2007,Continuous,N_AtmoDep2007,none,nlcd_2008.tif,,N_AtmoDep,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AtmoDep2012,Continuous,N_AtmoDep2012,none,nlcd_2013.tif,,N_AtmoDep,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AtmoDep2017,Continuous,N_AtmoDep2017,none,nlcd_2016.tif,,N_AtmoDep,Mean,1,0,0,0,,No,No,,9/6/2024 +N_NonFarm_Fert1987,Continuous,N_NonFarm_Fert1987,none,nlcd_1992.tif,,N_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/6/2024 +N_NonFarm_Fert1992,Continuous,N_NonFarm_Fert1992,none,nlcd_1992.tif,,N_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/6/2024 +N_NonFarm_Fert1997,Continuous,N_NonFarm_Fert1997,none,nlcd_1992.tif,,N_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/6/2024 +N_NonFarm_Fert2002,Continuous,N_NonFarm_Fert2002,none,nlcd_2002.tif,,N_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/6/2024 +N_NonFarm_Fert2007,Continuous,N_NonFarm_Fert2007,none,nlcd_2008.tif,,N_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/6/2024 +N_NonFarm_Fert2012,Continuous,N_NonFarm_Fert2012,none,nlcd_2013.tif,,N_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/6/2024 +N_NonFarm_Fert2017,Continuous,N_NonFarm_Fert2017,none,nlcd_2016.tif,,N_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Human_Waste1987,Continuous,N_Human_Waste1987,none,nlcd_1992.tif,,N_Human_Waste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Human_Waste1992,Continuous,N_Human_Waste1992,none,nlcd_1992.tif,,N_Human_Waste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Human_Waste1997,Continuous,N_Human_Waste1997,none,nlcd_1992.tif,,N_Human_Waste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Human_Waste2002,Continuous,N_Human_Waste2002,none,nlcd_2002.tif,,N_Human_Waste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Human_Waste2007,Continuous,N_Human_Waste2007,none,nlcd_2008.tif,,N_Human_Waste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Human_Waste2012,Continuous,N_Human_Waste2012,none,nlcd_2013.tif,,N_Human_Waste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Human_Waste2017,Continuous,N_Human_Waste2017,none,nlcd_2016.tif,,N_Human_Waste,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Wastewater_Lds1987,Continuous,N_Wastewater_Lds1987,none,nlcd_1992.tif,,N_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Wastewater_Lds1992,Continuous,N_Wastewater_Lds1992,none,nlcd_1992.tif,,N_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Wastewater_Lds1997,Continuous,N_Wastewater_Lds1997,none,nlcd_1992.tif,,N_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Wastewater_Lds2002,Continuous,N_Wastewater_Lds2002,none,nlcd_2002.tif,,N_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Wastewater_Lds2007,Continuous,N_Wastewater_Lds2007,none,nlcd_2008.tif,,N_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Wastewater_Lds2012,Continuous,N_Wastewater_Lds2012,none,nlcd_2013.tif,,N_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Wastewater_Lds2017,Continuous,N_Wastewater_Lds2017,none,nlcd_2016.tif,,N_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Legacy1987,Continuous,N_AgSurplus_Legacy1987,none,nlcd_1992.tif,,N_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Legacy1992,Continuous,N_AgSurplus_Legacy1992,none,nlcd_1992.tif,,N_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Legacy1997,Continuous,N_AgSurplus_Legacy1997,none,nlcd_1992.tif,,N_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Legacy2002,Continuous,N_AgSurplus_Legacy2002,none,nlcd_2002.tif,,N_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Legacy2007,Continuous,N_AgSurplus_Legacy2007,none,nlcd_2008.tif,,N_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Legacy2012,Continuous,N_AgSurplus_Legacy2012,none,nlcd_2013.tif,,N_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Legacy2017,Continuous,N_AgSurplus_Legacy2017,none,nlcd_2016.tif,,N_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Annual1987,Continuous,N_AgSurplus_Annual1987,none,nlcd_1992.tif,,N_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Annual1992,Continuous,N_AgSurplus_Annual1992,none,nlcd_1992.tif,,N_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Annual1997,Continuous,N_AgSurplus_Annual1997,none,nlcd_1992.tif,,N_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Annual2002,Continuous,N_AgSurplus_Annual2002,none,nlcd_2002.tif,,N_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Annual2007,Continuous,N_AgSurplus_Annual2007,none,nlcd_2008.tif,,N_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Annual2012,Continuous,N_AgSurplus_Annual2012,none,nlcd_2013.tif,,N_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/6/2024 +N_AgSurplus_Annual2017,Continuous,N_AgSurplus_Annual2017,none,nlcd_2016.tif,,N_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Total_Inputs1987,Continuous,N_Total_Inputs1987,none,nlcd_1992.tif,,N_Total_Inputs,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Total_Inputs1992,Continuous,N_Total_Inputs1992,none,nlcd_1992.tif,,N_Total_Inputs,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Total_Inputs1997,Continuous,N_Total_Inputs1997,none,nlcd_1992.tif,,N_Total_Inputs,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Total_Inputs2002,Continuous,N_Total_Inputs2002,none,nlcd_2002.tif,,N_Total_Inputs,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Total_Inputs2007,Continuous,N_Total_Inputs2007,none,nlcd_2008.tif,,N_Total_Inputs,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Total_Inputs2012,Continuous,N_Total_Inputs2012,none,nlcd_2013.tif,,N_Total_Inputs,Mean,1,0,0,0,,No,No,,9/6/2024 +N_Total_Inputs2017,Continuous,N_Total_Inputs2017,none,nlcd_2016.tif,,N_Total_Inputs,Mean,1,0,0,0,,No,No,,9/6/2024 +P_Farm_Fert1987,Continuous,P_Farm_Fert1987,none,nlcd_1992.tif,,P_Farm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Farm_Fert1992,Continuous,P_Farm_Fert1992,none,nlcd_1992.tif,,P_Farm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Farm_Fert1997,Continuous,P_Farm_Fert1997,none,nlcd_1992.tif,,P_Farm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Farm_Fert2002,Continuous,P_Farm_Fert2002,none,nlcd_2002.tif,,P_Farm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Farm_Fert2007,Continuous,P_Farm_Fert2007,none,nlcd_2008.tif,,P_Farm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Farm_Fert2012,Continuous,P_Farm_Fert2012,none,nlcd_2013.tif,,P_Farm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Farm_Fert2017,Continuous,P_Farm_Fert2017,none,nlcd_2016.tif,,P_Farm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste1987,Continuous,P_Livestock_Waste1987,none,nlcd_1992.tif,,P_Livestock_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste1992,Continuous,P_Livestock_Waste1992,none,nlcd_1992.tif,,P_Livestock_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste1997,Continuous,P_Livestock_Waste1997,none,nlcd_1992.tif,,P_Livestock_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste2002,Continuous,P_Livestock_Waste2002,none,nlcd_2002.tif,,P_Livestock_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste2007,Continuous,P_Livestock_Waste2007,none,nlcd_2008.tif,,P_Livestock_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste2012,Continuous,P_Livestock_Waste2012,none,nlcd_2013.tif,,P_Livestock_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste2017,Continuous,P_Livestock_Waste2017,none,nlcd_2016.tif,,P_Livestock_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste_Rec1987,Continuous,P_Livestock_Waste_Rec1987,none,nlcd_1992.tif,,P_Livesock_Waste_Rec,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste_Rec1992,Continuous,P_Livestock_Waste_Rec1992,none,nlcd_1992.tif,,P_Livesock_Waste_Rec,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste_Rec1997,Continuous,P_Livestock_Waste_Rec1997,none,nlcd_1992.tif,,P_Livesock_Waste_Rec,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste_Rec2002,Continuous,P_Livestock_Waste_Rec2002,none,nlcd_2002.tif,,P_Livesock_Waste_Rec,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste_Rec2007,Continuous,P_Livestock_Waste_Rec2007,none,nlcd_2008.tif,,P_Livesock_Waste_Rec,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste_Rec2012,Continuous,P_Livestock_Waste_Rec2012,none,nlcd_2013.tif,,P_Livesock_Waste_Rec,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Livestock_Waste_Rec2017,Continuous,P_Livestock_Waste_Rec2017,none,nlcd_2016.tif,,P_Livesock_Waste_Rec,Mean,1,0,0,0,,No,No,,9/9/2024 +P_CropRemoval1987,Continuous,P_CropRemoval1987,none,nlcd_1992.tif,,P_CropRemoval,Mean,1,0,0,0,,No,No,,9/9/2024 +P_CropRemoval1992,Continuous,P_CropRemoval1992,none,nlcd_1992.tif,,P_CropRemoval,Mean,1,0,0,0,,No,No,,9/9/2024 +P_CropRemoval1997,Continuous,P_CropRemoval1997,none,nlcd_1992.tif,,P_CropRemoval,Mean,1,0,0,0,,No,No,,9/9/2024 +P_CropRemoval2002,Continuous,P_CropRemoval2002,none,nlcd_2002.tif,,P_CropRemoval,Mean,1,0,0,0,,No,No,,9/9/2024 +P_CropRemoval2007,Continuous,P_CropRemoval2007,none,nlcd_2008.tif,,P_CropRemoval,Mean,1,0,0,0,,No,No,,9/9/2024 +P_CropRemoval2012,Continuous,P_CropRemoval2012,none,nlcd_2013.tif,,P_CropRemoval,Mean,1,0,0,0,,No,No,,9/9/2024 +P_CropRemoval2017,Continuous,P_CropRemoval2017,none,nlcd_2016.tif,,P_CropRemoval,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AtmoDep1987,Continuous,P_AtmoDep1987,none,nlcd_1992.tif,,P_AtmoDep,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AtmoDep1992,Continuous,P_AtmoDep1992,none,nlcd_1992.tif,,P_AtmoDep,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AtmoDep1997,Continuous,P_AtmoDep1997,none,nlcd_1992.tif,,P_AtmoDep,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AtmoDep2002,Continuous,P_AtmoDep2002,none,nlcd_2002.tif,,P_AtmoDep,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AtmoDep2007,Continuous,P_AtmoDep2007,none,nlcd_2008.tif,,P_AtmoDep,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AtmoDep2012,Continuous,P_AtmoDep2012,none,nlcd_2013.tif,,P_AtmoDep,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AtmoDep2017,Continuous,P_AtmoDep2017,none,nlcd_2016.tif,,P_AtmoDep,Mean,1,0,0,0,,No,No,,9/9/2024 +P_NonFarm_Fert1987,Continuous,P_NonFarm_Fert1987,none,nlcd_1992.tif,,P_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_NonFarm_Fert1992,Continuous,P_NonFarm_Fert1992,none,nlcd_1992.tif,,P_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_NonFarm_Fert1997,Continuous,P_NonFarm_Fert1997,none,nlcd_1992.tif,,P_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_NonFarm_Fert2002,Continuous,P_NonFarm_Fert2002,none,nlcd_2002.tif,,P_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_NonFarm_Fert2007,Continuous,P_NonFarm_Fert2007,none,nlcd_2008.tif,,P_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_NonFarm_Fert2012,Continuous,P_NonFarm_Fert2012,none,nlcd_2013.tif,,P_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_NonFarm_Fert2017,Continuous,P_NonFarm_Fert2017,none,nlcd_2016.tif,,P_NonFarm_Fert,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Human_Waste1987,Continuous,P_Human_Waste1987,none,nlcd_1992.tif,,P_Human_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Human_Waste1992,Continuous,P_Human_Waste1992,none,nlcd_1992.tif,,P_Human_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Human_Waste1997,Continuous,P_Human_Waste1997,none,nlcd_1992.tif,,P_Human_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Human_Waste2002,Continuous,P_Human_Waste2002,none,nlcd_2002.tif,,P_Human_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Human_Waste2007,Continuous,P_Human_Waste2007,none,nlcd_2008.tif,,P_Human_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Human_Waste2012,Continuous,P_Human_Waste2012,none,nlcd_2013.tif,,P_Human_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Human_Waste2017,Continuous,P_Human_Waste2017,none,nlcd_2016.tif,,P_Human_Waste,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Wastewater_Lds1987,Continuous,P_Wastewater_Lds1987,none,nlcd_1992.tif,,P_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Wastewater_Lds1992,Continuous,P_Wastewater_Lds1992,none,nlcd_1992.tif,,P_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Wastewater_Lds1997,Continuous,P_Wastewater_Lds1997,none,nlcd_1992.tif,,P_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Wastewater_Lds2002,Continuous,P_Wastewater_Lds2002,none,nlcd_2002.tif,,P_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Wastewater_Lds2007,Continuous,P_Wastewater_Lds2007,none,nlcd_2008.tif,,P_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Wastewater_Lds2012,Continuous,P_Wastewater_Lds2012,none,nlcd_2013.tif,,P_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Wastewater_Lds2017,Continuous,P_Wastewater_Lds2017,none,nlcd_2016.tif,,P_Wastewater_Lds,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Legacy1987,Continuous,P_AgSurplus_Legacy1987,none,nlcd_1992.tif,,P_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Legacy1992,Continuous,P_AgSurplus_Legacy1992,none,nlcd_1992.tif,,P_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Legacy1997,Continuous,P_AgSurplus_Legacy1997,none,nlcd_1992.tif,,P_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Legacy2002,Continuous,P_AgSurplus_Legacy2002,none,nlcd_2002.tif,,P_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Legacy2007,Continuous,P_AgSurplus_Legacy2007,none,nlcd_2008.tif,,P_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Legacy2012,Continuous,P_AgSurplus_Legacy2012,none,nlcd_2013.tif,,P_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Legacy2017,Continuous,P_AgSurplus_Legacy2017,none,nlcd_2016.tif,,P_AgSurplus_Legacy,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Annual1987,Continuous,P_AgSurplus_Annual1987,none,nlcd_1992.tif,,P_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Annual1992,Continuous,P_AgSurplus_Annual1992,none,nlcd_1992.tif,,P_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Annual1997,Continuous,P_AgSurplus_Annual1997,none,nlcd_1992.tif,,P_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Annual2002,Continuous,P_AgSurplus_Annual2002,none,nlcd_2002.tif,,P_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Annual2007,Continuous,P_AgSurplus_Annual2007,none,nlcd_2008.tif,,P_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Annual2012,Continuous,P_AgSurplus_Annual2012,none,nlcd_2013.tif,,P_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/9/2024 +P_AgSurplus_Annual2017,Continuous,P_AgSurplus_Annual2017,none,nlcd_2016.tif,,P_AgSurplus_Annual,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Total_Inputs1987,Continuous,P_Total_Inputs1987,none,nlcd_1992.tif,,P_Total_Inputs,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Total_Inputs1992,Continuous,P_Total_Inputs1992,none,nlcd_1992.tif,,P_Total_Inputs,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Total_Inputs1997,Continuous,P_Total_Inputs1997,none,nlcd_1992.tif,,P_Total_Inputs,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Total_Inputs2002,Continuous,P_Total_Inputs2002,none,nlcd_2002.tif,,P_Total_Inputs,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Total_Inputs2007,Continuous,P_Total_Inputs2007,none,nlcd_2008.tif,,P_Total_Inputs,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Total_Inputs2012,Continuous,P_Total_Inputs2012,none,nlcd_2013.tif,,P_Total_Inputs,Mean,1,0,0,0,,No,No,,9/9/2024 +P_Total_Inputs2017,Continuous,P_Total_Inputs2017,none,nlcd_2016.tif,,P_Total_Inputs,Mean,1,0,0,0,,No,No,,9/9/2024 diff --git a/PartitionDownscaledResults.py b/PartitionDownscaledResults.py index a90471e..7ae7b88 100644 --- a/PartitionDownscaledResults.py +++ b/PartitionDownscaledResults.py @@ -22,28 +22,36 @@ # Nutrient file #nut_dir = 'O:/PRIV/CPHEA/PESD/COR/CORFILES/Geospatial_Library_Projects/StreamCat/NutrientInventory/Inputs/' -nut_dir = 'E:/WorkingData/To_Be_Flow_Accumulated/' -nut = pd.read_csv(nut_dir + 'ClimTerms_2012_10.csv') +# nut_dir = 'E:/WorkingData/To_Be_Flow_Accumulated/' +# nut = pd.read_csv(nut_dir + 'ClimTerms_2012_10.csv') +nut_dir = 'O:/PRIV/CPHEA/PESD/COR/CORFILES/Geospatial_Library_Projects/AmaliaHandler/' +nut = pd.read_csv(nut_dir + 'ToBeFlowAccumulated_update.csv') + cat_area = pd.read_csv('O:/PRIV/CPHEA/PESD/COR/CORFILES/Geospatial_Library_Projects/StreamCat/NutrientInventory/Inputs/COMID_Scaled_AgVars.csv') cat_area = cat_area[['COMID','CatAreaSqKm']] cat_area.head() # add VPU using lookup table nut = pd.merge(nut, COMID_VPU, how='left', left_on=['COMID'], right_on=['COMID']) nut = pd.merge(nut, cat_area, how='left', left_on=['COMID'], right_on=['COMID']) -nut = nut.drop('Unnamed: 0', axis=1) +# nut = nut.drop('Unnamed: 0', axis=1) # nut = nut.drop('...1', axis=1) list(nut) # select columns - this part we can modify to iterate through columns -final = nut[['COMID', 'SNOW_YrMean', 'CatAreaSqKm', 'VPU']] -final = final.rename(columns={'SNOW_YrMean': 'CatSum'}) -final['CatCount'] = final['CatAreaSqKm'] -final['CatPctFull'] = 100 -final = final.set_axis(['COMID', 'CatSum', 'CatAreaSqKm','VPU', 'CatCount', 'CatPctFull'], axis=1) - -for i in VPU: - print(i) - df = final[final['VPU'] == i] - df = df.drop(columns=['VPU']) - df.to_csv(nut_dir + '/Allocation_and_Accumulation/SNOW_YrMean_' + str(i) + '.csv', - index=False) +nut.columns = nut.columns.str.replace('_Cat','') +cols = [i for i in nut.columns if i not in ["COMID", "VPU", "CatAreaSqKm"]] + +for col in cols: + final = nut[['COMID', col, 'CatAreaSqKm', 'VPU']] + final = final.rename(columns={col: 'CatSum'}) + final['CatCount'] = final['CatAreaSqKm'] + final['CatSum'] = final['CatSum'] * final['CatCount'] + final['CatPctFull'] = 100 + final = final[['COMID', 'CatAreaSqKm', 'CatCount', 'CatSum', 'CatPctFull', 'VPU']] + + for i in VPU: + print(i) + df = final[final['VPU'] == i] + df = df.drop(columns=['VPU']) + df.to_csv(nut_dir + '/Allocation_and_Accumulation/' + col + '_' + str(i) + '.csv', + index=False) diff --git a/StreamCat.py b/StreamCat.py index 466f675..6fd0cc6 100644 --- a/StreamCat.py +++ b/StreamCat.py @@ -113,34 +113,34 @@ end="", flush=True, ) - # for zone, hydroregion in INPUTS.items(): - # if not os.path.exists(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv"): - # print(zone, end=", ", flush=True) - # pre = f"{NHD_DIR}/NHDPlus{hydroregion}/NHDPlus{zone}" - # if not row.accum_type == "Point": - # izd = ( - # f"{mask_dir}/{zone}.tif" - # if mask_dir - # else f"{pre}/NHDPlusCatchment/cat" - # ) - # cat = createCatStats( - # row.accum_type, - # layer, - # izd, - # OUT_DIR, - # zone, - # row.by_RPU, - # mask_dir, - # NHD_DIR, - # hydroregion, - # apm, - # ) - # if row.accum_type == "Point": - # izd = f"{pre}/NHDPlusCatchment/Catchment.shp" - # cat = PointInPoly( - # points, zone, izd, pct_full, mask_dir, apm, summary - # ) - # cat.to_csv(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv", index=False) + for zone, hydroregion in INPUTS.items(): + if not os.path.exists(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv"): + print(zone, end=", ", flush=True) + pre = f"{NHD_DIR}/NHDPlus{hydroregion}/NHDPlus{zone}" + if not row.accum_type == "Point": + izd = ( + f"{mask_dir}/{zone}.tif" + if mask_dir + else f"{pre}/NHDPlusCatchment/cat" + ) + cat = createCatStats( + row.accum_type, + layer, + izd, + OUT_DIR, + zone, + row.by_RPU, + mask_dir, + NHD_DIR, + hydroregion, + apm, + ) + if row.accum_type == "Point": + izd = f"{pre}/NHDPlusCatchment/Catchment.shp" + cat = PointInPoly( + points, zone, izd, pct_full, mask_dir, apm, summary + ) + cat.to_csv(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv", index=False) print("done!") print("Accumulating...", end="", flush=True) for zone in INPUTS: @@ -184,11 +184,11 @@ final = pd.merge(cat, upFinal, on="COMID") final.to_csv(f"{OUT_DIR}/{row.FullTableName}_{zone}.csv", index=False) print(end="") if processed else print("done!") -if already_processed: - print( - "\n!!!Processing Problem!!!\n\n" - f"{', '.join(already_processed)} already run!\n" - "Be sure to delete the associated files in your `OUTDIR` to rerun:" - f"\n\t> {OUT_DIR}\n\n!!! `$OUT_DIR/DBF_stash/*` " - f"output used in 'Continuous' and 'Categorical' metrics!!!" - ) \ No newline at end of file + if already_processed: + print( + "\n!!!Processing Problem!!!\n\n" + f"{', '.join(already_processed)} already run!\n" + "Be sure to delete the associated files in your `OUTDIR` to rerun:" + f"\n\t> {OUT_DIR}\n\n!!! `$OUT_DIR/DBF_stash/*` " + f"output used in 'Continuous' and 'Categorical' metrics!!!" + ) \ No newline at end of file diff --git a/StreamCat_GUI.py b/StreamCat_GUI.py new file mode 100644 index 0000000..4211549 --- /dev/null +++ b/StreamCat_GUI.py @@ -0,0 +1,876 @@ +from database import DatabaseConnection +import customtkinter as ctk +from CTkListbox import * +ctk.set_appearance_mode("dark") +ctk.set_default_color_theme("blue") + +class CTkAutocompleteCombobox(ctk.CTkComboBox): + """:class:`ctk.CTkComboBox` widget that features autocompletion.""" + def __init__(self, master=None, completevalues=None, **kwargs): + """Create a CTkAutocompleteCombobox. + + Args: + master (widget): Parent frame of widget + completevalues (list): autocomplettion values + kwargs: keyword arguments passed to the :class:`ctk.CTkComboBox` initializer + """ + + ctk.CTkComboBox.__init__(self, master, values=completevalues, **kwargs) + self._completion_list = completevalues + if isinstance(completevalues, list): + self.set_completion_list(completevalues) + self._hits = [] + + self._hit_index = 0 + self.position = 0 + self.tk.eval + # navigate on keypress in the dropdown: + # code taken from https://wiki.tcl-lang.org/page/ttk%3A%3Acombobox by Pawel Salawa, copyright 2011 +# self.tk.eval(""" +# proc ComboListKeyPressed {w key} { +# if {[string length $key] > 1 && [string tolower $key] != $key} { +# return +# } + +# set cb [winfo parent [winfo toplevel $w]] +# set text [string map [list {[} {\[} {]} {\]}] $key] +# if {[string equal $text ""]} { +# return +# } + +# set values [$cb cget -values] +# set x [lsearch -glob -nocase $values $text*] +# if {$x < 0} { +# return +# } + +# set current [$w curselection] +# if {$current == $x && [string match -nocase $text* [lindex $values [expr {$x+1}]]]} { +# incr x +# } + +# $w selection clear 0 end +# $w selection set $x +# $w activate $x +# $w see $x +# } + +# set popdown [ttk::combobox::PopdownWindow %s] +# bind $popdown.f.l [list ComboListKeyPressed %%W %%K] +# """ % (self)) + + def set_completion_list(self, completion_list): + """ + Use the completion list as drop down selection menu, arrows move through menu. + + :param completion_list: completion values + :type completion_list: list + """ + self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list + self.configure(values=completion_list) + self._hits = [] + self._hit_index = 0 + self.position = 0 + self.bind('', self.handle_keyrelease) + self['values'] = self._completion_list # Setup our popup menu + + def autocomplete(self, delta=0): + """ + Autocomplete the Combobox. + + :param delta: 0, 1 or -1: how to cycle through possible hits + :type delta: int + """ + if delta: # need to delete selection otherwise we would fix the current position + self._entry.delete(self.position, ctk.END) + self + else: # set position to end so selection starts where textentry ended + self.position = len(self.get()) + # collect hits + _hits = [] + for element in self._completion_list: + if element.lower().startswith(self.get().lower()): # Match case insensitively + _hits.append(element) + # if we have a new hit list, keep this in mind + if _hits != self._hits: + self._hit_index = 0 + self._hits = _hits + # only allow cycling if we are in a known hit list + if _hits == self._hits and self._hits: + self._hit_index = (self._hit_index + delta) % len(self._hits) + # now finally perform the auto completion + if self._hits: + self._entry.delete(0, ctk.END) + self._entry.insert(0, self._hits[self._hit_index]) + self._entry.select_range(self.position, ctk.END) + + def handle_keyrelease(self, event): + """ + Event handler for the keyrelease event on this widget. + + :param event: Tkinter event + """ + if event.keysym == "BackSpace": + self._entry.delete(self._entry.index(ctk.INSERT), ctk.END) + self.position = self._entry.index(ctk.END) + if event.keysym == "Left": + if self.position < self._entry.index(ctk.END): # delete the selection + self._entry.delete(self.position, ctk.END) + else: + self.position -= 1 # delete one character + self._entry.delete(self.position, ctk.END) + if event.keysym == "Right": + self.position = self._entry.index(ctk.END) # go to end (no selection) + if event.keysym == "Return": + self.handle_return(None) + return + if len(event.keysym) == 1: + self.autocomplete() + # No need for up/down, we'll jump to the popup + # list at the position of the autocompletion + + def handle_return(self, event): + """ + Function to bind to the Enter/Return key so if Enter is pressed the selection is cleared + + :param event: Tkinter event + """ + self._entry.icursor(ctk.END) + self.selection_clear() + + def config(self, **kwargs): + """Alias for configure""" + self.configure(**kwargs) + + def configure(self, **kwargs): + """Configure widget specific keyword arguments in addition to :class:`ctk.CTkComboBox` keyword arguments.""" + if "completevalues" in kwargs: + self.set_completion_list(kwargs.pop("completevalues")) + return ctk.CTkComboBox.configure(self, **kwargs) + + def cget(self, key): + """Return value for widget specific keyword arguments""" + if key == "completevalues": + return self._completion_list + return ctk.CTkComboBox.cget(self, key) + + def keys(self): + """Return a list of all resource names of this widget.""" + keys = ctk.CTkComboBox.keys(self) + keys.append("completevalues") + return keys + + def __setitem__(self, key, value): + self.configure(**{key: value}) + + def __getitem__(self, item): + return self.cget(item) + + +class ProgressbarFrame(ctk.CTkToplevel): + def __init__(self, parent): + super().__init__(parent) + self.parent = parent + +class DbResultsFrame(ctk.CTkToplevel): + def __init__(self, parent, response): + super().__init__(parent) + self.parent = parent + self.geometry("450x350") + + self.label = ctk.CTkLabel(self, text="Database Response") + self.label.pack(side=ctk.TOP, padx=20, pady=20) + self.response = response + self.response_str = '' + if isinstance(self.response, (tuple, list)): + for i in self.response: + if not isinstance(i, str): + flattend_i = [item for sub in i for item in sub] + self.response_str += '\n'.join(map(str, flattend_i)) + else: + self.response_str += f'\n{i}' + else: + self.response_str = str(self.response) + self.textbox = ctk.CTkTextbox(self) + self.textbox.insert("0.0", text=self.response_str) + self.textbox.configure(state="disabled") + self.textbox.pack(expand=True, fill=ctk.BOTH) + +class DbUpdatesFrame(ctk.CTkToplevel): + def __init__(self, parent): + super().__init__(parent) + self.parent = parent + self.geometry("450x350") + + self.label = ctk.CTkLabel(self, text="Database Update Form") + self.label.pack(side=ctk.TOP, padx=20, pady=20) + + self.partition_label = ctk.CTkLabel(self, text="Was this update for streamcat or lakecat?") + self.partition_label.pack(padx=10, pady=5) + self.partition_var = ctk.StringVar() + self.partition_var.set('streamcat') + self.partition_radio_streamcat = ctk.CTkRadioButton(self, text="StreamCat", variable=self.partition_var, value='streamcat') + self.partition_radio_streamcat.pack(padx=10, pady=5) + self.partition_radio_lakecat = ctk.CTkRadioButton(self, text="LakeCat", variable=self.partition_var, value='lakecat') + self.partition_radio_lakecat.pack(padx=10, pady=5) + + self.internal_update_label = ctk.CTkLabel(self, text="Internal database update (what changes did you make?): ") + self.internal_update_label.pack(padx=10, pady=5) + + self.internal_update_entry = ctk.CTkEntry(self, width=280) # maybe change to textbox + self.internal_update_entry.pack(padx=10, pady=5) + + self.public_update_label = ctk.CTkLabel(self, text="Public database update, this will be displayed on the changelog page, be specific: ") + self.public_update_label.pack(padx=10, pady=5) + + self.public_update_entry = ctk.CTkEntry(self, width=280) # maybe change to textbox + self.public_update_entry.pack(padx=10, pady=5) + + self.submit_button = ctk.CTkButton(self, text="Submit", command=self.add_to_changelog) + self.submit_button.pack(fill=ctk.X, padx=10, pady=5) + + def add_to_changelog(self): + change_desc = self.internal_update_entry.get() + public_desc = self.public_update_entry.get() + partition = self.partition_var.get() + result = db_conn.newChangelogRow(partition, public_desc, change_desc) + + +class CreateDatasetFrame(ctk.CTkScrollableFrame): + def __init__(self, parent): + super().__init__(parent) + self.parent = parent + + # Create widgets for creating a dataset + self.partition_label = ctk.CTkLabel(self, text="What partition would you like to use?") + self.partition_label.pack(padx=10, pady=5) + + self.partition_var = ctk.StringVar() + self.partition_var.set('streamcat') + self.partition_radio_streamcat = ctk.CTkRadioButton(self, text="StreamCat", variable=self.partition_var, value='streamcat') + self.partition_radio_streamcat.pack(padx=10, pady=5) + self.partition_radio_lakecat = ctk.CTkRadioButton(self, text="LakeCat", variable=self.partition_var, value='lakecat') + self.partition_radio_lakecat.pack(padx=10, pady=5) + + self.dataset_label = ctk.CTkLabel(self, text="What is the name of this dataset? ") # sc_datasets.dsname and sc_metrics_tg.final_table + self.dataset_label.pack(padx=10, pady=5) + + self.dataset_entry = ctk.CTkEntry(self, width=280) + self.dataset_entry.pack(padx=10, pady=5) + + self.source_name_label = ctk.CTkLabel(self, text="Enter metric source name:") + self.source_name_label.pack(padx=10, pady=5) + self.source_name_entry = ctk.CTkEntry(self, width=280) + self.source_name_entry.pack(padx=10, pady=5) + + self.source_url_label = ctk.CTkLabel(self, text="Enter metric source url:") + self.source_url_label.pack(padx=10, pady=5) + self.source_url_entry = ctk.CTkEntry(self, width=280) + self.source_url_entry.pack(padx=10, pady=5) + + self.date_label = ctk.CTkLabel(self, text="Enter the date the dataset was downloaded with the format dd-MM-YY:") + self.date_label.pack(padx=10, pady=5) + self.date_entry = ctk.CTkEntry(self, width=280) + self.date_entry.pack(padx=10, pady=5) + + self.visible_var = ctk.StringVar() + self.visible_var.set('invisible') + self.visible_radio = ctk.CTkRadioButton(self, text="Visible?", variable=self.partition_var, value='invisible') + self.visible_radio.pack(padx=10, pady=5) + self.invisible_radio = ctk.CTkRadioButton(self, text="Invisible?", variable=self.partition_var, value='visible') + self.invisible_radio.pack(padx=10, pady=5) + + + self.files_label = ctk.CTkLabel(self, text="Choose files:") + self.files_label.pack(fill=ctk.X, padx=10, pady=5) + + self.files_button = ctk.CTkButton(self, width=280, text="Browse", command=self.browse_files) + self.files_button.pack(padx=10, pady=5) # fill=ctk.X, + + self.files_entry = ctk.CTkEntry(self) + self.files_entry.pack(fill=ctk.X, padx=10, pady=5) + + self.submit_button = ctk.CTkButton(self, width=280, text="Submit", command=self.create_dataset) + self.submit_button.pack(side=ctk.BOTTOM, padx=10, pady=5) + + self.results_window = None + self.updates_window = None + + def browse_files(self): + files = ctk.filedialog.askopenfilenames() + self.files_entry.delete(0, ctk.END) + self.files_entry.insert(0, ', '.join(files)) + + def create_dataset(self): + #progressbar = ProgressbarFrame(self) + progressbar_frame = ctk.CTkToplevel(self) + # progressbar_frame.pack(side=ctk.BOTTOM) + # progressbar_frame.grid_columnconfigure(0, weight=1) + # progressbar_frame.grid_rowconfigure(1, weight=1) + progressbar = ctk.CTkProgressBar(progressbar_frame, orientation='horizontal', mode='determinate') + progressbar.configure(mode='determinate') + + partition = self.partition_var.get().lower() + files = self.files_entry.get() + dsname = self.dataset_entry.get() + visible = self.visible_var.get() + active = 1 if visible == 'visible' else 0 + progressbar.start() + ds_result, metric_result, display_result = db_conn.CreateDatasetFromFiles(partition, dsname, files, active) + progressbar.stop() + # print(ds_result) + # print(metric_result) + # print(display_result) + results = (ds_result, metric_result, display_result) + self.results_window = DbResultsFrame(self, results) + self.results_window.focus() + #after this move to the create metric info frame. + # prepopulate final table (dataset name) and dsid from this functions results + known_info = {} + known_info['final_table'] = self.dataset_entry.get() + known_info['source_name'] = self.source_name_entry.get() + known_info['source_url'] = self.source_url_entry.get() + known_info['date_downloaded'] = self.date_entry.get() + known_info['active'] = self.visible_var.get() + + self.metric_info_frame = CreateMetricInfoFrame(self, known_info) + self.metric_info_frame.pack(fill=ctk.BOTH, expand=True, padx=10, pady=5) + + self.updates_window = DbUpdatesFrame(self) + self.updates_window.focus() + + +class CreateTableFrame(ctk.CTkScrollableFrame): + def __init__(self, parent): + super().__init__(parent) + self.parent = parent + + self.table_name_label = ctk.CTkLabel(self, text="Enter table name:") + self.table_name_label.pack(fill=ctk.X, padx=10, pady=10) + + self.table_name_entry = ctk.CTkEntry(self) + self.table_name_entry.pack(fill=ctk.X, padx=10, pady=5) + + files_btn = ctk.CTkButton(self, text="Choose Files", command=self.browse_files) + files_btn.pack(fill=ctk.X, padx=10, pady=5) + + self.files_entry = ctk.CTkEntry(self) + self.files_entry.pack(fill=ctk.X, padx=10, pady=5) + + self.submit_button = ctk.CTkButton(self, text="Submit", command=self.create_table) + self.submit_button.pack(fill=ctk.X, padx=10, pady=5) + + self.results_window = None + self.updates_window = None + + def browse_files(self): + files = ctk.filedialog.askopenfilenames() + self.files_entry.delete(0, ctk.END) + self.files_entry.insert(0, ', '.join(files)) + + def create_table(self): + table_name = self.table_name_entry.get() + files = self.files_entry.get() + results = db_conn.CreateTableFromFile(table_name, files) + + self.results_window = DbResultsFrame(self, results) + self.results_window.focus() + + self.updates_window = DbUpdatesFrame(self) + self.updates_window.focus() + +class RenameStreamCatMetricFrame(ctk.CTkScrollableFrame): + def __init__(self, parent): + super().__init__(parent) + self.parent = parent + + self.metric_name_options = [] #self.get_metric_names() + + self.partition_label = ctk.CTkLabel(self, text="Is this update for a streamcat or lakecat metric?") + self.partition_var = ctk.StringVar() + self.partition_var.set('streamcat') + self.partition_radio_streamcat = ctk.CTkRadioButton(self, text="StreamCat", variable=self.partition_var, value='streamcat', command=self.get_metric_names) + self.partition_radio_streamcat.grid(row=1, column=0, padx=10, pady=5) + self.partition_radio_lakecat = ctk.CTkRadioButton(self, text="LakeCat", variable=self.partition_var, value='lakecat', command=self.get_metric_names) + self.partition_radio_lakecat.grid(row=1, column=1, padx=10, pady=5) + + # select all metrics from sc_metrics + self.metric_name_options = self.get_metric_names() + self.metric_name_var = ctk.StringVar() + self.metric_name_var.set(self.metric_name_options[0]) + + self.old_metric_label = ctk.CTkLabel(self, text="Select the metric you want to rename") + self.old_metric_label.grid(row=2, column=0, padx=10, pady=5) + + + self.metric_name_dropdown = CTkAutocompleteCombobox(self, width=280, variable=self.metric_name_var, completevalues=self.metric_name_options) + self.metric_name_dropdown.grid(row=2, column=1, padx=10, pady=5) + + self.new_metric_label = ctk.CTkLabel(self, text="Enter new metric name (case sensitive)") + self.new_metric_label.grid(row=3, column=0, padx=10, pady=5) + self.new_name = ctk.CTkEntry(self, width=280) + self.new_name.grid(row=3, column=1, padx=10, pady=5) + + self.info_label = ctk.CTkLabel(self, text="We will apply the changes to all tables as well as all years and aoi's associated with this metric") + self.info_label.grid(row=4, column=0, padx=10, pady=5) + + self.specific_info = ctk.CTkLabel(self, text="VERY IMPORTANT!\n The naming convention is to have the first letter of the metric be capitalized and then use camel casing (no underscores) in the rest of the name.\n If the metric applies to multiple years add [Year] to the end of the name.\n Afterwards almost all metrics should have the [AOI] tag as well. The only reason it wouldn't is if the area of interest is `Other`.") + self.specific_info.grid(row=5, column=0, padx=10, pady=5) + + self.submit_button = ctk.CTkButton(self, text="Submit", command=self.rename_metric) + self.submit_button.grid(row=6, column=0, padx=10, pady=5) + + self.results_window = None + self.updates_window = None + + def get_metric_names(self): + self.metric_name_options.clear() + table_name = 'sc_metrics_tg' if self.partition_var.get() == 'streamcat' else 'lc_metrics_tg' + results = db_conn.SelectColsFromTable(['metric_name'], table_name, {'orderby': 'metric_name'}) + for row in results: + self.metric_name_options.append(row._t[0]) + return self.metric_name_options + + + def rename_metric(self): + partition = self.partition_var.get() + old_name = self.metric_name_var.get() + new_name = self.new_name.get() + results = db_conn.UpdateMetricName(partition, old_name, new_name) + + self.results_window = DbResultsFrame(self, results) + self.results_window.focus() + + self.updates_window = DbUpdatesFrame(self) + self.updates_window.focus() + +class ActivateDatasetFrame(ctk.CTkScrollableFrame): + def __init__(self, parent): + super().__init__(parent) + self.parent = parent + + self.partition_var = ctk.StringVar() + self.partition_var.set('streamcat') + self.partition_radio_streamcat = ctk.CTkRadioButton(self, text="StreamCat", variable=self.partition_var, value='streamcat', command=self.get_table_options) + self.partition_radio_streamcat.pack(padx=10, pady=5) + self.partition_radio_lakecat = ctk.CTkRadioButton(self, text="LakeCat", variable=self.partition_var, value='lakecat', command=self.get_table_options) + self.partition_radio_lakecat.pack(padx=10, pady=5) + self.partition_radio_both = ctk.CTkRadioButton(self, text="Both", variable=self.partition_var, value='both', command=self.get_table_options) + self.partition_radio_both.pack(padx=10, pady=5) + + self.dsname_var = ctk.StringVar() + self.dsname_options = self.get_table_options() # db_conn.GetAllDatasetNames() + self.dsname_var.set(self.dsname_options[0]) + self.dsname_dropdown = ctk.CTkComboBox(self, variable=self.dsname_var, values=self.dsname_options, command=self.get_current_active_value) + self.dsname_dropdown.pack(fill=ctk.X, expand=True, padx=10, pady=5) + + self.curr_active_label = ctk.CTkLabel(self, text="This dataset is currently: ") + self.curr_active_label.pack(padx=10, pady=5) + self.curr_active_val = ctk.CTkEntry(self, width=280) + self.curr_active_val.pack(padx=10, pady=5) + + self.submit_button = ctk.CTkButton(self, text="Submit", command=self.update_active_dataset) + self.submit_button.pack(side=ctk.BOTTOM, fill=ctk.X, padx=10, pady=5) + + self.results_window = None + self.updates_window = None + + def get_table_options(self): + options = [] + if self.partition_var.get() == 'both': + options = db_conn.GetAllDatasetNames() + + + else: + table = 'sc_datasets' if self.partition_var.get() == 'streamcat' else 'lc_datasets' + dsnames = db_conn.SelectColsFromTable(['dsname'], table) + for row in dsnames: + options.append(row._t[0]) + # full_list = list(db_conn.metadata.tables.keys()) + # if self.partition_var.get() == 'both': + # return full_list + + # prefix = 'sc_ds' if self.partition_var.get() == 'streamcat' else 'lc_ds' + # options = [x for x in full_list if x.startswith(prefix)] + return options + + def get_current_active_value(self, choice): + table_name = 'sc_datasets' if self.partition_var.get() == 'streamcat' else 'lc_datasets' + col = 'active' + where = {'dsname': choice} + curr_val = db_conn.SelectColWhere(table_name, col, where) + for row in curr_val: + active_val = 'Visible' if row._t[0] == 1 else 'Invisible' + self.curr_active_val.insert(0, active_val) + + def update_active_dataset(self): + dsname = self.dsname_var.get() + partition = self.partition_var.get() + results = db_conn.UpdateActiveDataset(dsname, partition) + + self.results_window = DbResultsFrame(self, results) + #self.results_window.focus() + + self.updates_window = DbUpdatesFrame(self) + self.updates_window.focus() + +class UpdateTableFrame(ctk.CTkScrollableFrame): + def __init__(self, parent): + super().__init__(parent) + self.parent = parent + + self.partition_var = ctk.StringVar() + self.partition_var.set('streamcat') + self.partition_radio_streamcat = ctk.CTkRadioButton(self, text="StreamCat", variable=self.partition_var, value='streamcat', command=self.get_table_options) + self.partition_radio_streamcat.pack(padx=10, pady=5) + self.partition_radio_lakecat = ctk.CTkRadioButton(self, text="LakeCat", variable=self.partition_var, value='lakecat', command=self.get_table_options) + self.partition_radio_lakecat.pack(padx=10, pady=5) + + self.table_dropdown_label = ctk.CTkLabel(self, text="Select table to update:") + self.table_dropdown_label.pack(padx=10, pady=5) + + self.table_var = ctk.StringVar() + self.table_options = self.get_table_options() #list(db_conn.metadata.tables.keys()) + self.table_var.set(self.table_options[0]) + self.table_dropdown = ctk.CTkComboBox(self, variable=self.table_var, values=self.table_options) + self.table_dropdown.pack(padx=10, pady=5) + + self.files_label = ctk.CTkLabel(self, text="Choose files:") + self.files_label.pack(padx=10, pady=10) + + self.files_button = ctk.CTkButton(self, text="Browse", command=self.browse_files) + self.files_button.pack(padx=10, pady=5) + + self.files_entry = ctk.CTkEntry(self) + self.files_entry.pack(padx=10, pady=5) + + self.submit_button = ctk.CTkButton(self, text="Submit", command=self.update_table) + self.submit_button.pack(side=ctk.BOTTOM, fill=ctk.X, padx=10, pady=5) + + self.results_window = None + self.updates_window = None + + def get_table_options(self): + prefix = 'sc' if self.partition_var.get() == 'streamcat' else 'lc' + full_list = list(db_conn.metadata.tables.keys()) + options = [x for x in full_list if x.startswith(prefix)] + return options + + def browse_files(self): + files = ctk.filedialog.askopenfilenames() + self.files_entry.delete(0, ctk.END) + self.files_entry.insert(0, ', '.join(files)) + + def update_table(self): + table_name = self.table_var.get() + file = self.files_entry.get() + results = db_conn.BulkInsertFromFile(table_name, file) + + self.results_window = DbResultsFrame(self, results) + self.results_window.focus() + + self.updates_window = DbUpdatesFrame(self) + self.updates_window.focus() + +class CreateMetricInfoFrame(ctk.CTkScrollableFrame): + def __init__(self, parent, known_info=None): + super().__init__(parent) + self.parent = parent + + self.metric_name_label = ctk.CTkLabel(self, text="Enter new metric name:") + self.metric_name_label.grid(row=2, column=0, padx=10, pady=5) + + self.metric_name_entry = ctk.CTkEntry(self, width=280) + self.metric_name_entry.grid(row=2, column=1, columnspan=2, padx=10, pady=5) + + self.category_label = ctk.CTkLabel(self, text="Select metric category:") + self.category_label.grid(row=3, column=0, padx=10, pady=5) + + self.category_dropdown_var = ctk.StringVar() + self.category_dropdown = ctk.CTkComboBox(self, width=280, variable=self.category_dropdown_var, values=["Base", "Natural", "Anthropogenic"]) + self.category_dropdown.grid(row=3, column=1, columnspan=2, padx=10, pady=5) + + + self.aoi_label = ctk.CTkLabel(self, text="Select all AOIs for the metric:") + self.aoi_label.grid(row=4, column=0, padx=10, pady=5) + + self.aoi_values = ["Cat", "Ws", "CatRp100", "WsRp100", "Other"] + self.aoi_listbox = CTkListbox(self, multiple_selection=True) + for i, aoi in enumerate(self.aoi_values): + self.aoi_listbox.insert(i, aoi) + self.aoi_listbox.grid(row=4, column=1, columnspan=2, rowspan=5, padx=10, pady=5) + + + self.year_label = ctk.CTkLabel(self, text="Enter comma seperated list of years (if available):") + self.year_label.grid(row=9, column=0, padx=10, pady=5) + + self.year_entry = ctk.CTkEntry(self, width=280) + self.year_entry.grid(row=9, column=1, columnspan=2, padx=10, pady=5) + + self.webtool_label = ctk.CTkLabel(self, text="Enter Webtool Name:") + self.webtool_label.grid(row=10, column=0, padx=10, pady=5) + self.webtool_entry = ctk.CTkEntry(self, width=280) + self.webtool_entry.grid(row=10, column=1, columnspan=2, padx=10, pady=5) + + self.description_label = ctk.CTkLabel(self, text="Enter metric description:") + self.description_label.grid(row=11, column=0, padx=10, pady=5) + self.description_entry = ctk.CTkEntry(self, width=280) + self.description_entry.grid(row=11, column=1, columnspan=2, padx=10, pady=5) + + self.units_label = ctk.CTkLabel(self, text="Enter metric units:") + self.units_label.grid(row=12, column=0, padx=10, pady=5) + + self.units_options = [] + self.units_results = db_conn.SelectColsFromTable(['metric_units'], 'sc_metrics_tg', {'distinct': 'metric_units'}) + for unit in self.units_results: + self.units_options.append(unit._t[0]) + self.units_var = ctk.StringVar() + self.units_entry = CTkAutocompleteCombobox(self, width=280, completevalues=self.units_options, variable=self.units_var) + self.units_entry.grid(row=12, column=1, columnspan=2, padx=10, pady=5) + + self.uuid_label = ctk.CTkLabel(self, text="Enter metric uuid:") + self.uuid_label.grid(row=13, column=0, padx=10, pady=5) + self.uuid_entry = ctk.CTkEntry(self, width=280) + self.uuid_entry.grid(row=13, column=1, columnspan=2, padx=10, pady=5) + + self.metadata_label = ctk.CTkLabel(self, text="Enter metric metadata:") + self.metadata_label.grid(row=14, column=0, padx=10, pady=5) + self.metadata_entry = ctk.CTkEntry(self, width=280) + self.metadata_entry.grid(row=14, column=1, columnspan=2, padx=10, pady=5) + + self.source_name_label = ctk.CTkLabel(self, text="Enter metric source name:") + self.source_name_label.grid(row=15, column=0, padx=10, pady=5) + self.source_name_entry = ctk.CTkEntry(self, width=280) + self.source_name_entry.grid(row=15, column=1, columnspan=2, padx=10, pady=5) + + self.source_url_label = ctk.CTkLabel(self, text="Enter metric source url:") + self.source_url_label.grid(row=16, column=0, padx=10, pady=5) + self.source_url_entry = ctk.CTkEntry(self, width=280) + self.source_url_entry.grid(row=16, column=1, columnspan=2, padx=10, pady=5) + + self.date_label = ctk.CTkLabel(self, text="Enter the date the dataset was downloaded with the format dd-MM-YY:") + self.date_label.grid(row=17, column=0, padx=10, pady=5) + self.date_entry = ctk.CTkEntry(self, width=280) + self.date_entry.grid(row=17, column=1, columnspan=2, padx=10, pady=5) + + self.dsid_label = ctk.CTkLabel(self, text="Enter metric dsid:") + self.dsid_label.grid(row=18, column=0, padx=10, pady=5) + self.dsid_entry = ctk.CTkEntry(self, width=280) + self.dsid_entry.grid(row=18, column=1, columnspan=2, padx=10, pady=5) + + self.dataset_name_label = ctk.CTkLabel(self, text="Enter metric dataset name (aka final_table):") + self.dataset_name_label.grid(row=19, column=0, padx=10, pady=5) + + self.dataset_name_entry = ctk.CTkEntry(self, width=280) + self.dataset_name_entry.grid(row=19, column=1, columnspan=2, padx=10, pady=5) + + self.partition_label = ctk.CTkLabel(self, text="What partition is the metric in?") + self.partition_label.grid(row=20, column=0, padx=10, pady=10) + + self.selected_partition = ctk.StringVar(value='streamcat') + self.streamcat_radio = ctk.CTkRadioButton(self, text="StreamCat", variable=self.selected_partition, value='streamcat') + self.streamcat_radio.grid(row=20, column=1, sticky='w', padx=10, pady=5) + self.lakecat_radio = ctk.CTkRadioButton(self, text="LakeCat", variable=self.selected_partition, value='lakecat') + self.lakecat_radio.grid(row=20, column=2, sticky='w', padx=10, pady=5) + + self.submit_button = ctk.CTkButton(self, text="Submit", command=self.create_metric_info) + self.submit_button.grid(row=21, column=0, columnspan=2, padx=10, pady=5) + + self.results_window = None + self.updates_window = None + if known_info is not None: + self.fill_known_values(known_info) + + def fill_known_values(self, known_vals: dict): + keys = known_vals.keys() + if 'final_table' in keys: + self.dataset_name_entry.insert(0, known_vals['final_table']) + + if 'source_name' in keys: + self.source_name_entry.insert(0, known_vals['source_name']) + + if 'source_url' in keys: + self.source_url_entry.insert(0, known_vals['source_url']) + + if 'date_downloaded' in keys: + self.date_entry.insert(0, known_vals['date_downloaded']) + + def create_metric_info(self): + print("Creating metric info card") + metric_data = {} + #metric_data['partition'] = self.selected_partition.get() + table_name = 'sc_metrics_tg' if self.selected_partition.get() == 'streamcat' else 'lc_metrics_tg' + metric_data['metric_name'] = self.metric_name_entry.get() + metric_data['indicator_category'] = self.category_dropdown_var.get() + metric_data['aoi'] = self.aoi_listbox.get() + metric_data['year'] = self.year_entry.get() + metric_data['webtool_name'] = self.webtool_entry.get() + metric_data['description'] = self.description_entry.get() + metric_data['units'] = self.units_entry.get() + metric_data['uuid'] = self.uuid_entry.get() + metric_data['metadata'] = self.metadata_entry.get() + metric_data['source_name'] = self.source_name_entry.get() + metric_data['source_url'] = self.source_url_entry.get() + metric_data['date_downloaded'] = self.date_entry.get() + metric_data['dsid'] = self.dsid_entry.get() + results = db_conn.InsertRow(table_name, metric_data) + + self.results_window = DbResultsFrame(self, results) + self.results_window.focus() + + self.updates_window = DbUpdatesFrame(self) + self.updates_window.focus() + +class EditMetricInfoFrame(ctk.CTkScrollableFrame): + def __init__(self, parent): + super().__init__(parent) + self.parent = parent + self.partition_label = ctk.CTkLabel(self, text="What partition would you like to use?") + self.partition_label.grid(row=1, column=0, padx=10, pady=5) + + self.partition_var = ctk.StringVar() + self.partition_var.set('streamcat') + self.partition_radio_streamcat = ctk.CTkRadioButton(self, text="StreamCat", variable=self.partition_var, value='streamcat') + self.partition_radio_streamcat.grid(row=1, column=1, padx=10, pady=5) + self.partition_radio_lakecat = ctk.CTkRadioButton(self, text="LakeCat", variable=self.partition_var, value='lakecat') + self.partition_radio_lakecat.grid(row=1, column=2, padx=10, pady=5) + + metric_name_options, tg_columns = self.get_edit_widget_data() + + self.metric_name_label = ctk.CTkLabel(self, text="What metric variable info do you want to edit?") + self.metric_name_label.grid(row=2, column=0, padx=10, pady=5) + + self.metric_name_var = ctk.StringVar() + self.metric_name_dropdown = ctk.CTkComboBox(self, width=280, variable=self.metric_name_var, values=metric_name_options) + self.metric_name_dropdown.grid(row=2, column=1, padx=10, pady=5) + + self.tg_col_label = ctk.CTkLabel(self, text="Which value needs to be edited?") + self.tg_col_label.grid(row=3, column=0, padx=10, pady=5) + + self.tg_col_var = ctk.StringVar() + self.tg_col_dropdown = ctk.CTkComboBox(self, width=280, variable=self.tg_col_var, values=tg_columns, command=self.get_current_metric_value) + self.tg_col_dropdown.grid(row=3, column=1, padx=10, pady=5) + + self.tg_curr_val_label = ctk.CTkLabel(self, text="Current value for selected metric attribute") + self.tg_curr_val_label.grid(row=4, column=0, padx=10, pady=5) + self.tg_col_curr_val = ctk.CTkEntry(self, width=280) + self.tg_col_curr_val.grid(row=4, column=1, padx=10, pady=5) + + self.new_val_label = ctk.CTkLabel(self, text="Enter new value for selected metric value: ") + self.new_val_label.grid(row=5, column=0, padx=10, pady=5) + + self.new_val_entry = ctk.CTkEntry(self, width=280) + self.new_val_entry.grid(row=5, column=1, padx=10, pady=5) + + self.submit_button = ctk.CTkButton(self, text="Submit", command=self.edit_metric_info) + self.submit_button.grid(row=6, column=0, padx=10, pady=5) + + self.results_window = None + self.updates_window = None + + def get_edit_widget_data(self): + table_name = 'sc_metrics_tg' if self.partition_var.get() == 'streamcat' else 'lc_metrics_tg' + + metric_name_results = db_conn.SelectColsFromTable(['metric_name'], table_name) + metric_name_options = [] + for row in metric_name_results: + metric_name_options.append(row._t[0]) + + tg_columns = list(db_conn.metadata.tables[table_name].c.keys()) + + return metric_name_options, tg_columns + + def get_current_metric_value(self, choice): + table_name = 'sc_metrics_tg' if self.partition_var.get() == 'streamcat' else 'lc_metrics_tg' + col = self.tg_col_dropdown.get() + metric = self.metric_name_var.get() + curr_val = db_conn.SelectColWhere(table_name, col, {'metric_name': metric}) + idx = 0 + for row in curr_val: + self.tg_col_curr_val.insert(idx, str(row._t[0])) + idx += len(str(row._t)) + + def edit_metric_info(self): + table_name = 'sc_metrics_tg' if self.partition_var.get() == 'streamcat' else 'lc_metrics_tg' + col_name = self.tg_col_var.get() + metric_name = self.metric_name_var.get() + new_val = self.new_val_entry.get() + results = db_conn.UpdateRow(table_name, col_name, 'metric_name', metric_name, new_val) + + self.results_window = DbResultsFrame(self, results) + self.results_window.focus() + + self.updates_window = DbUpdatesFrame(self) + self.updates_window.focus() + + +class DatabaseApp(ctk.CTk): + def __init__(self): + super().__init__() + #self = root + + self.title("StreamCat GUI (modern)") + # self.geometry("700x450") + self.width = int(self.winfo_screenwidth()/2) + self.height = int(self.winfo_screenheight()/1.5) + self.geometry(f"{self.width}x{self.height}") + self.minsize(500,500) + self.action_frames = {} + self.current_frame = None + + self.action_var = ctk.StringVar() + # self.action_var.set('Create Dataset') + + self.actions = [ + 'Create Dataset', + 'Create Table', + 'Rename Metric', + 'Activate/Deactivate Dataset', + 'Add File Data to Table', + 'Create Metric Info', + 'Edit Metric Info' + ] + self.action_var.set(self.actions[0]) # could add a default / info frame to be actions[0] called '--' + self.action_dropdown = ctk.CTkComboBox(self, width=200, variable=self.action_var, values=self.actions) + self.action_dropdown.pack(side=ctk.TOP, padx=10, pady=10) + + self.action_button = ctk.CTkButton(self, text="Go", command=self.show_frame) + self.action_button.pack(side=ctk.TOP, padx=10, pady=5) + + self.execute_sql_var = ctk.BooleanVar() + self.execute_sql_var.set(db_conn.execute) + self.execute_sql_switch = ctk.CTkCheckBox(self, text="Execute SQL?", variable=self.execute_sql_var, command=lambda: setattr(db_conn, 'execute', self.execute_sql_var.get())) + self.execute_sql_switch.pack(side=ctk.RIGHT, padx=10, pady=5) + + self.show_frame() + + def show_frame(self): + action = self.action_var.get() + if action not in self.action_frames: + if action == 'Create Dataset': + self.action_frames[action] = CreateDatasetFrame(self) + elif action == 'Create Table': + self.action_frames[action] = CreateTableFrame(self) + elif action == 'Rename Metric': + self.action_frames[action] = RenameStreamCatMetricFrame(self) + elif action == 'Activate/Deactivate Dataset': + self.action_frames[action] = ActivateDatasetFrame(self) + elif action == 'Add File Data to Table': + self.action_frames[action] = UpdateTableFrame(self) + elif action == 'Create Metric Info': + self.action_frames[action] = CreateMetricInfoFrame(self) + elif action == 'Edit Metric Info': + self.action_frames[action] = EditMetricInfoFrame(self) + + + if self.current_frame: + self.current_frame.pack_forget() + + self.current_frame = self.action_frames[action] + self.current_frame.pack(fill='both', expand=True) + +if __name__ == '__main__': + db_conn = DatabaseConnection() + db_conn.connect() + app = DatabaseApp() + app.mainloop() \ No newline at end of file diff --git a/database.py b/database.py new file mode 100644 index 0000000..82c05d4 --- /dev/null +++ b/database.py @@ -0,0 +1,872 @@ +from sqlalchemy.engine import create_engine +from sqlalchemy import inspect, Table, Column, MetaData, func, insert, update, delete, select, bindparam, event, text, and_, types +from sqlalchemy.orm import sessionmaker +from sqlalchemy.schema import CreateTable +from sqlalchemy.dialects.oracle import NUMBER +from sqlalchemy.sql.compiler import SQLCompiler +import pandas as pd +import logging +import json +from datetime import datetime +import os + +if not os.path.exists('logs'): + os.makedirs('logs') + +def log_query(conn, clauseelement, multiparams, params, execution_options): + # Log all non-SELECT SQL commands before execution + if not clauseelement.is_select: + if len(params) > 0 and len(multiparams) > 0: + combined_params = {} + if multiparams: + combined_params.update(multiparams[0] if isinstance(multiparams, list) and multiparams else multiparams) + combined_params.update(params or {}) + + # Compile the SQL statement with the current dialect and substitute bind parameters + compiled_sql = str(clauseelement.compile(dialect=conn.dialect, params=combined_params)) + else: + compiled_sql = str(clauseelement.compile(dialect=conn.dialect, compile_kwargs={"literal_binds": True})) + + # Log the compiled SQL statement to a file + with open(f'db_updates_{datetime.today().strftime("%m_%d_%Y")}.sql', 'a') as f: + f.write(compiled_sql + ';\n') + +class DatabaseConnection(): + def __init__(self, execute=False, database_dir="O:/PRIV/CPHEA/PESD/COR/CORFILES/Geospatial_Library_Projects/StreamCat/DatabaseModification", database_config_file="streamcat_db_config.json") -> None: + + if database_dir and database_config_file: + config_file_path = os.path.join(database_dir, database_config_file) + fp = open(config_file_path) + config_file = json.load(fp) + self.dialect = config_file['dialect'] + self.driver = config_file['driver'] + self.username = config_file['username'] + self.password = config_file['password'] + self.host = config_file['host'] + self.port = config_file['port'] + self.service = config_file['service'] + fp.close() + + self.execute = execute + + self.engine = None + self.metadata = None + + def __str__(self) -> str: + """Create Database connection string""" + return f"{self.dialect}+{self.driver}://{self.username}:{self.password}@{self.host}:{self.port}/?service_name={self.service}" + + def __del__(self): + """Safely close engine on exit""" + if self.engine: + self.engine.dispose() + + def connect(self): + """Connect to database""" + if self.engine is None: + self.engine = create_engine(self.__str__(), thick_mode={'lib_dir' : 'O:/PRIV/CPHEA/PESD/COR/CORFILES/Geospatial_Library_Projects/StreamCat/DatabaseModification/instantclient-basic-windows/instantclient_23_4'}, logging_name="StreamCatDB") + self.inspector = inspect(self.engine) + self.metadata = MetaData() + self.metadata.reflect(self.engine) + os.makedirs('logs', exist_ok=True) # make logs dir if it doesn't exists + logging.basicConfig(filename=f'logs/db_log_{datetime.today().strftime("%m_%d_%Y")}.log', filemode='a') + logging.getLogger("sqlalchemy.engine").setLevel(logging.DEBUG) + event.listen(self.engine, 'before_execute', log_query) + return + + def disconnect(self): + """Disconnect from database""" + if self.engine: + self.engine.dispose() + return + + + def RunQuery(self, query, params=None): + """Execute query with given params. + If self.execute is true the query will be executed and autocommitted + Otherwise it will print the compiled query to stdout and write it to a file. + Filename db_updates_m_d_y.sql + + Args: + query (SQL Alchemy executable): An executable sqlalchemy query. This is either a text() function or any core select, update, insert, delete function + params (list[dict] | dict, optional): Parameters required for query. Dict if it is a single line query or list[dict] if multiline query. Defaults to None. + + Returns: + result: compiled sql statement or execution results + self.execute: whether or not the query was executed + """ + #print(str(query)) + if self.execute == True: + Session = sessionmaker(self.engine) + with Session.begin() as conn: + result = conn.execute(query) # removed params + conn.commit() + else: + if isinstance(params, list): + compiled_queries = [] + for param_set in params: + compiler = SQLCompiler(self.engine.dialect, query, compile_kwargs={"literal_binds": True}) + compiler.process_parameters(param_set) + compiled_queries.append(str(compiler)) + result = '\n'.join(compiled_queries) + elif isinstance(params, dict): + compiler = SQLCompiler(self.engine.dialect, query, compile_kwargs={"literal_binds": True}) + compiler.process_parameters(params) + result = str(compiler) + else: + result = str(query.compile(dialect=self.engine.dialect, compile_kwargs={"literal_binds": True})) + + #print(result) + with open(f'db_updates_{datetime.today().strftime("%m_%d_%Y")}.sql', 'a') as db_file: + db_file.write(result + ';\n') + return result, self.execute # Return statement and whether or not it was executed + + def SelectColsFromTable(self, columns: list, table_name:str, function:None | dict = None): + """Select columns from database table + + Args: + columns (list): columns to be selected + table_name (str): name of db table + function (None | dict, optional): Function to apply to query. Key will be the type of function to apply to the query, key is column name for function if applicable. + Options are 'distinct', 'count', 'max', 'min', 'avg', 'groupby', 'orderby'. Defaults to None. + + Returns: + result (sequence of rows): Result set of query + """ + if len(columns) == 1: + col_str = columns[0] + else: + col_str = ','.join(columns) + if function is not None: + if 'distinct' in function.keys(): + query = text(f"SELECT DISTINCT({col_str}) FROM {table_name}") + elif 'count' in function.keys(): + col_str += f'COUNT({function['count']})' #TODO reorganize function to do something like this then add FROM then do any function that would happen after FROM like groupby and orderby + query = text(f"SELECT {col_str} FROM {table_name}") + elif 'orderby' in function.keys(): + query = text(f"SELECT {col_str} FROM {table_name} ORDER BY {function['orderby']}") + else: + query = text(f"SELECT {col_str} FROM {table_name}") + with self.engine.connect() as conn: + result = conn.execute(query).fetchall() + + # parsed_res = [] + # for row in result: + # parsed_res.append(row._asdict()) + + + return result + + def TextSelect(self, text_stmt): + """Select query using the SQLAlchemy text() function + + Args: + text_stmt (str | Text): String query to pass to database + + Returns: + Sequence[Row]: Return set from SQL select statement + """ + if isinstance(text_stmt, str): + text_stmt = text(text_stmt) + + with self.engine.connect() as conn: + result = conn.execute(text_stmt).fetchall() + + return result + + def SelectColWhere(self, table_name: str, col_name: str, where: dict): + """Select Columns from table with where condition + + Args: + table_name (str): Name of table to select from + col_name (str): Name of column to select + where (dict): Where clause. WHERE key[0] = value[0] + + Returns: + value (rows): Rows returned from sql statement + """ + if self.inspector.has_table(table_name): + table = self.metadata.tables[table_name] + for key, val in where.items(): + where_col = key + where_val = val + stmt = select(table.c[col_name]).where(table.c[where_col] == where_val) + with self.engine.connect() as conn: + value = conn.execute(stmt).fetchall() + conn.rollback() + return value + + def GetTableAsDf(self, table_name: str) -> pd.DataFrame | str: + """Get database table by name as pandas Dataframe + + Args: + table_name (str): Name of table to view + + Returns: + pd.Dataframe: Database table + str: If table not found by name return err string + """ + if self.inspector.has_table(table_name): + return pd.read_sql_table(table_name, self.engine) + else: + return f"No table found named {table_name} in database. Check log file for details." + + def GetTableSchema(self, table_name: str) -> str: + """Get schema of given table. Equivilent to SQL statement: DESC table; + + Args: + table_name (str): Name of db table + + Returns: + str: Description of db table schema if found, or error string on failure + """ + if self.inspector.has_table(table_name): + metadata_obj = MetaData() + table = Table(table_name, metadata_obj, autoload_with=self.engine) + return str(CreateTable(table)) + else: + return f"Table - {table_name} not found." + + def CreateNewTable(self, table_name: str, data: pd.DataFrame) -> bool: + """Create new table in database + + Args: + table_name (str): new table name + data (pd.DataFrame): dataframe used to define columns and initialize data + + Returns: + bool: True if table created and data is inserted; false otherwise. + """ + if self.inspector.has_table(table_name): + print(f"Table - {table_name} already exists") + return False + columns = [] + for col_name, col_type in zip(data.columns, data.dtypes): + + #TODO check csv files (as dataframes) to see their types so we can properly map them to the NUMBER type using types.Numeric() + # add condition for COMID types that if col_name == 'comid' that column class will need primary_key=True + # if no comid row use col[0] as primary key + if (col_type.name == 'int64' or col_type.name == 'float64') and col_name.lower() == 'comid': + col = Column(col_name, types.Numeric(), primary_key=True) + elif col_type.name == 'int64' or col_type.name == 'float64': + col = Column(col_name, types.Numeric()) + elif col_type.name == 'bool': + col = Column(col_name, types.Boolean()) + elif col_type.name == 'datetime64[ns]': + col = Column(col_name, types.DateTime()) + else: + col = Column(col_name, types.VARCHAR()) + #TODO change the columns to nullable = true maybe. Also this should be for dataset tables only new function non dataset tables + + columns.append(col) + + new_table = Table(table_name, self.metadata, *columns) + new_table.create(self.engine, checkfirst=True) + result = self.BulkInsert(table_name, data.to_dict(orient='records')) + if result: + return True + + def CreateTableFromFile(self, table_name, file_path): + df = pd.read_csv(file_path) + result = self.CreateNewTable(table_name, df) + if result: + return True + + + + def getMaxDsid(self, partition: str): + """Get highest dsid in db + + Args: + partition (str): Either 'lakecat' or 'streamcat' so we know which datasets table to query for maximum + + Returns: + max_dsid (int): largest dsid for given partition + """ + table_name = 'lc_datasets' if partition == 'lakecat' else 'sc_datasets' + ds_table = Table(table_name, self.metadata, autoload_with=self.engine) + with self.engine.connect() as conn: + max_dsid = conn.execute(func.max(ds_table.c.dsid)).scalar() + conn.rollback() + return max_dsid + + def InsertRow(self, table_name: str, values: dict): + """Insert row into db table + + Args: + table_name (str): Name of table to insert into + values (dict): dictionary with items key = column_name : value = new_value + """ + + if self.inspector.has_table(table_name): + table = self.metadata.tables[table_name] + query = insert(table).values(values).returning(*table.c) + result, executed = self.RunQuery(query) + if executed: + return result.fetchall() + else: + return result + + # TODO finish dynamic bindings + # change old value and new_value to pd.Series or sqlalchemy Column + # Update all items in these series + def UpdateRow(self, table_name: str, column: str, id_column: str, id_val: str, new_value: str): + """Update Row in database + + Args: + table_name (str): Name of database table + column (str): Name of column to update + id (str): Identifier for row + new_value (str): New value to set column equal to where the identifier is found + + Returns: + result (Result): return result of the update query + """ + # if has_table is false call create table + if self.inspector.has_table(table_name): + table = self.metadata.tables[table_name] + #col = table.c.get(column) + # if col == None: + # return f"No Column named {column} in Table {table_name}" + + id_col = table.c.get(id_column) + if id_col is None: + return f"No Column named {column} in Table {table_name}" + #query = update(table).where(col == bindparam("id")).values(new_value=bindparam("new_value")) + query = update(table).where(id_col == id_val).values({column: new_value}) + result, executed = self.RunQuery(query) + if executed: + return result.fetchall() + else: + return result + + + # TODO add confirmation + def DeleteRow(self, table_name, id): + if self.inspector.has_table(table_name): + table = self.metadata.tables[table_name] + query = delete(table).where(id == bindparam("id"))# .returning(id) + params = {"id" : id} + result, executed = self.RunQuery(query, params) + if executed: + return result.fetchall() + else: + return result + + + def BulkInsert(self, table_name, data): + """Bulk insert multiple rows of data into database table + + Args: + table_name (str): Name of table to insert data into + data (list[dict]): list of dictionary items where each item is defined as key = column_name : value = new_value + + Returns: + result : compiled queries + """ + if self.inspector.has_table(table_name): + table = self.metadata.tables[table_name] + results = [] + for row_data in data: + insert_query = ( + table.insert().values(row_data).returning(*table.c) + ) + result, executed = self.RunQuery(insert_query) + + if executed: + results.extend(result.fetchall()) + else: + results.append(result) + + return results + + + def BulkInsertFromFile(self, table_name, file_path): + # TODO + # If all df columns are in Table + # Then BULK INSERT + # Else if some + # BULK UPDATE + df = pd.read_csv(file_path) + if self.inspector.has_table(table_name): + table = self.metadata.tables[table_name] + if all(table.c) in df.columns: + + data = df.to_dict(orient='records') + results = self.BulkInsert(table_name, data) + else: + data = df.to_dict(orient='records') + results = self.BulkUpdate(table_name, data) + + if results: + return results + + + + def BulkUpdateDataset(self, table_name, data): + """Bulk update multiple rows of data in a database table. + + Args: + table_name (str): Name of table to update data in. + data (list[dict]): List of dictionary items where each item is defined as + key = column_name : value = new_value, including the primary key 'comid'. + + Returns: + results: List of compiled queries or execution results. + """ + if self.inspector.has_table(table_name): + table = self.metadata.tables[table_name] + + results = [] + for row_data in data: + # Ensure 'comid' exists in the row_data + if 'comid' not in row_data: + raise ValueError("Each row must include the primary key 'comid'") + + # Remove 'comid' from the dictionary to use as update values + comid = row_data.get('comid') + + # Create an update statement + update_query = ( + table.update() + .where(table.c.comid == comid) + .values(row_data) + # .returning(*table.c) + ) + + # Compile the query and execute or return the compiled query based on self.execute + result, executed = self.RunQuery(update_query) + + if executed: + results.extend(result.fetchall()) + else: + results.append(str(update_query.compile(dialect=self.engine.dialect))) + + return results + + def BulkUpdate(self, table_name, data): + """Bulk update multiple rows of data in a database table with dynamic WHERE conditions. + + Args: + table_name (str): Name of table to update data in. + data (list[dict]): List of dictionaries where each dictionary has two keys: + 'update_values': dict of column_name: new_value pairs for update. + 'conditions': dict of column_name: condition_value pairs for WHERE clause. + + Returns: + results: List of compiled queries or execution results. + """ + + """data example + data = [ + { + 'update_values': {'column1': 'new_value1', 'column2': 'new_value2'}, + 'conditions': {'metricname': oldname, 'status': 'active'} + }, + { + 'update_values': {'column3': 'new_value3'}, + 'conditions': {'id': 2} + } + ] + """ + if self.inspector.has_table(table_name): + table = self.metadata.tables[table_name] + + results = [] + for row_data in data: + # Ensure 'update_values' and 'conditions' exist in the row_data + if 'update_values' not in row_data or 'conditions' not in row_data: + raise ValueError("Each row must include 'update_values' and 'conditions'") + + update_values = row_data['update_values'] + conditions = row_data['conditions'] + + # Create a WHERE clause from the conditions + where_clause = and_(*[getattr(table.c, col) == val for col, val in conditions.items()]) + + # Create an update statement + update_query = ( + table.update() + .where(where_clause) + .values(update_values) + # .returning(*table.c) + ) + + # Compile the query and execute or return the compiled query based on self.execute + result, executed = self.RunQuery(update_query) + + if executed: + results.extend(result.fetchall()) + else: + results.append(result) # str(update_query.compile(dialect=self.engine.dialect)) + + return results + + + def CreateDataset(self, partition: str, df: pd.DataFrame, dsname: str, active: int = 0): + """Create new dataset table from pandas dataframe. This will also insert the new metrics into our metric informatio tables, _metrics, _metrics_display_names and _metrics_tg. + + Args: + partition (str): IMPORTANT: this needs to be either 'streamcat' or 'lakecat'. This is how we will decide what part of the database to create new data in. + df (pd.DataFrame): Dataframe to upload to database as table + dsname (str): New dataset name, defaults to csv name. + active (int): binary int 1 if dataset will be published and displayed upon creation. 0 if not. Default is 0 + + + Returns: + ds_result (tuple): new dataset table name and dataset name inserted into + metric_result: Rows inserted into _metrics table + display_result: Rows inserted into _display_names table + """ + + if partition.lower() == 'streamcat': + prefix = 'sc_' + elif partition.lower() == 'lakecat': + prefix = 'lc_' + else: + ValueError("Invalid partition! Needs to be either streamcat or lakecat") + + dsid = self.getMaxDsid(partition) + 1 + table_name = prefix + 'ds_' + str(dsid) + if self.execute: + # Change this to sqlalchemy CreateTable function called self.CreateNewTable + #self.CreateNewTable(table_name, df) + revert_columns = {} + new_col_names = {} + dtypes = {} + for col_name in df.columns: + dtypes[col_name.upper()] = NUMBER + new_col_names[col_name] = col_name.upper() + revert_columns[col_name.upper()] = col_name + + df.rename(columns=new_col_names, inplace=True) + df.to_sql(table_name, self.engine, if_exists='replace', chunksize=10000, dtype=dtypes, index=False) + df.rename(columns=revert_columns, inplace=True) + else: + # IF execute is false then we just write the raw sql queries to a file + lines = [] + column_names = ', '.join(df.columns) + base_query = f"INSERT INTO {table_name} ({column_names})" + for idx, row in df.iterrows(): + values = ', '.join([f"{str(value)}" for value in row]) + line = base_query + f"({values});\n" + lines.append(line) + with open(f"create_{table_name}.sql", 'w') as f: + f.writelines(lines) + + + # Insert dataset info into sc / lc datasets + ds_result = self.InsertRow(f'{prefix}datasets', {'dsid': dsid, 'dsname': dsname, 'tablename': table_name, 'active': active}) + + display_names = set() + metric_data = [] + display_params = [] + metrics_table_name = prefix + 'metrics' + for metric in df.columns: + if metric not in ['COMID', 'CatAreaSqKm', 'WsAreaSqKm', 'CatPctFull', 'WsPctFull', 'inStreamCat']: + + # get list of params to pass to one query + params = {"dsname": dsname, "metricname": metric, "dsid": dsid} + metric_data.append(params) + + # Add to metrics display names + metric_name = metric.lower() + if "rp100" in metric_name: + metric_name = metric_name.removesuffix("rp100") + if "cat" in metric_name: + metric_name = metric_name.removesuffix("cat") + if "ws" in metric_name: + metric_name = metric_name.removesuffix("ws") + display_names.add(metric_name) + + # Insert into sc/lc _ metrics + metric_result = self.BulkInsert(metrics_table_name, metric_data) + + + display_table_name = prefix + 'metrics_display_names' + for alias in display_names: + # get list of params to pass to one query + display_params.append({"metric_alias": alias, "dsid": dsid}) + + # Insert into sc/lc _metrics_display_names + display_result = self.BulkInsert(display_table_name, display_params) + + return ds_result, metric_result, display_result + + def CreateDatasetFromFiles(self, partition: str, dataset_name: str, files: list | str, active: int = 0): + """Create new dataset in given partition from a list of files + + Args: + partition (str): IMPORTANT: this needs to be either 'streamcat' or 'lakecat'. This is how we will decide what part of the database to create new data in. + dataset_name (str): Name of the dataset. This will be used in the sc/lc_datasets table and is also the final_table name in the TG table (metric variable info page) + files (list | str): list of paths to files + active (int): binary int 1 if dataset will be published and displayed upon creation. 0 if not. Default is 0 + + Returns: + tuple(dataset_result, metric_result, display_result): see function CreateDataset + """ + if ',' in files and isinstance(files, str): + files = files.split(', ') + + if isinstance(files, list): + dfs = [pd.read_csv(path) for path in files] + df = pd.concat(dfs) + # dsname = files[0].split('/')[-1].removesuffix('.csv') + else: + df = pd.read_csv(files) + # dsname = files.split('/')[-1].removesuffix('.csv') + + # if '_' in dsname: + # dsname = dsname.split('_')[0] + + df.fillna(0, inplace=True) + + ds_result, metric_result, display_result = self.CreateDataset(partition, df, dataset_name, active) + return ds_result, metric_result, display_result + + def FindAllMetrics(self, partition: str) -> list: + """Get all metrics in given database partition + + Args: + partition (str): IMPORTANT: this needs to be either 'streamcat' or 'lakecat'. This is how we will decide what part of the database to create new data in. + + Returns: + list: all metrics found in dataset tables + """ + full_available_metrics = [] + if partition.lower() == 'streamcat': + prefix = 'sc_ds' + elif partition.lower() == 'lakecat': + prefix = 'lc_ds' + else: + ValueError("Invalid partition, needs to be either streamcat or lakecat") + + for table in self.metadata.sorted_tables: + if 'sc_ds' in table.name: + columns = table.columns + # print(columns.keys()) + for col in columns.keys(): + col_name = col.lower() + full_available_metrics.append(col_name) + return full_available_metrics + + def FindMissingMetrics(self): + full_available_metrics = self.FindAllMetrics() + names = pd.read_sql('SELECT metricname FROM SC_METRICS', con=self.engine) + current_metric_names = names['metricname'].apply(lambda x: str(x).lower()) + missing_from_sc_metrics = set(full_available_metrics) - set(current_metric_names) + return ''.join(missing_from_sc_metrics) # could return just the set as well + + + def GetMetricsInTG(self) -> pd.Series: + def get_combinations(row): + combinations = [] + # If metric name contains [Year] or [AOI] replace with row['year'] or row['aoi'l + if row['year'] is not None: + combinations = [(y, a) for y in row['year'] for a in row['aoi']] + return combinations + + def get_full_metric_list(row): + metrics = [] + if row['year'] is None: + for aoi in row['aoi']: + name = str(row['metric_name']) + new_name = name.replace("[AOI]", aoi) + # find = name.find("[AOI]") + # print(name, find) + metrics.append(new_name) + if row['aoi'] is None: + for year in row['year']: + name = row['metric_name'] + new_name = name.replace('[Year]', year) + metrics.append(new_name) + if len(row['combinations']) > 0: + for combo in row['combinations']: + name = row['metric_name'] + new_name = name.replace('[Year]', combo[0]).replace('[AOI]', combo[1]) + metrics.append(new_name) + return metrics + + df = self.RunQuery("SELECT metric_name, aoi, year FROM SC_METRICS_TG WHERE indicator_category <> 'Base' ORDER BY metric_name ASC") + + # Extract individual years and AOIs + df['year'] = df['year'].str.split(', ') + df['aoi'] = df['aoi'].str.split(', ') + + # Generate all combinations of year and AOI + # combinations = [(y, a) for y in df['year'].iloc[0] for a in df['aoi'].iloc[0]] + df['combinations'] = df.apply(get_combinations, axis=1) + + # Create a new column with the desired format + df['full_list'] = df.apply(get_full_metric_list, axis=1) + + # Turn column into a series + full_tg_list = df['full_list'].explode() + full_tg_list = full_tg_list.apply(lambda x: str(x).lower()) + + return full_tg_list + + # def RemoveAoiFromRow(row): + # aois = ('cat', 'ws', 'catrp100', 'wsrp100') + # for aoi in aois: + # if row.endswith(aoi): + # return row[:-len(aoi)].strip() + # return row + + def GetAllDatasetNames(self): + + sc_dsnames = [] + lc_dsnames = [] + sc_select_stmt = text("SELECT dsname FROM sc_datasets") + sc_datasets_res = self.TextSelect(sc_select_stmt) + for row in sc_datasets_res: + sc_dsnames.append(row._t[0]) + + lc_select_stmt = text("SELECT dsname FROM lc_datasets") + lc_datasets_res = self.TextSelect(lc_select_stmt) + for row in lc_datasets_res: + lc_dsnames.append(row._t[0]) + + return sc_dsnames + lc_dsnames + + def UpdateMetricName(self, partition, old_name, new_name): + # call this in the edit metric info fucntion if name is updated. + prefix = 'sc' if partition == 'streamcat' else 'lc' + old_name_prefix = old_name.split('[')[0] + new_name_prefix = new_name.split('[')[0] + tg_query_result = self.TextSelect(text(f"SELECT metric_name, aoi, year, dsid FROM {prefix}_metrics_tg WHERE metric_name = '{old_name}'")) + tg_info = {} + for row in tg_query_result: + tg_info = row._asdict() + tg_info['name_prefix'] = tg_info['metric_name'].split('[')[0] + tg_info['aoi'] = tg_info['aoi'].split(', ') if ',' in tg_info['aoi'] else [tg_info['aoi']] + if tg_info['year'] is not None: + tg_info['year'] = tg_info['year'].split(', ') if ',' in tg_info['year'] else [tg_info['year']] + else: + tg_info['year'] = [''] + + # sc/lc_metrics_tg update + tg_result = self.UpdateRow(f'{prefix}_metrics_tg', 'metric_name', 'metric_name', old_name, new_name) + # sc/lc_metrics update + # concat name_prefix and all the aois + metric_result = [] + + for year in tg_info['year']: + for aoi in tg_info['aoi']: + if '[Year]' in old_name: + old_metric_name = old_name_prefix + year + aoi + else: + old_metric_name = old_name_prefix + aoi + + if '[Year]' in new_name: + new_metric_name = new_name_prefix + year + aoi + else: + new_metric_name = new_name_prefix + aoi + + metric_res = self.UpdateRow(f'{prefix}_metrics', 'metricname', 'metricname', old_metric_name, new_metric_name) + metric_result.append(metric_res) + + # sc/lc_metrics_display_names update + display_query = self.TextSelect(text(f"SELECT metric_alias FROM {prefix}_metrics_display_names WHERE metric_alias LIKE '%{old_name_prefix.lower()}%'")) + display_result = [] + for row, year in zip(display_query, tg_info['year']): + old_display_name = row._t[0].lower() + new_display_name = new_name_prefix.lower() + year + display_update = self.UpdateRow(f'{prefix}_metrics_display_names', 'metric_alias', 'metric_alias', old_display_name, new_display_name) + display_result.append(display_update) + + + # Need to make full metric name array then use those to update the column names + dataset_table_name = f'{prefix}_ds_{tg_info['dsid']}' + dataset_table = self.metadata.tables[dataset_table_name] + alter_table_results = [] + for year in tg_info['year']: + for aoi in tg_info['aoi']: + if '[Year]' in old_name: + old_col_name = old_name_prefix + year + aoi + else: + old_col_name = old_name_prefix + aoi + if '[Year]' in new_name: + new_col_name = new_name_prefix + year + aoi + else: + new_col_name = new_name_prefix + aoi + if old_col_name.lower() in dataset_table.columns.keys(): + print(f"Need to update dataset {dataset_table_name}") + alter_table_results.append(self.UpdateColumnName(dataset_table_name, old_col_name.lower(), new_col_name.lower())) + + return metric_result, display_result, tg_result, alter_table_results + + def UpdateColumnName(self, table_name, old_col, new_col): + if self.inspector.has_table(table_name): + alter_stmt = f'ALTER TABLE {table_name} RENAME COLUMN "{old_col}" TO "{new_col}"' + alter_query = text(alter_stmt) + result = self.RunQuery(alter_query) + return result[0] + else: + return f"No table named {table_name} found." + + + def UpdateDatasetColumn(self, table_name: str, col_name: str, values: list[dict]): + """ + values should be dictionary mapping of comid to new updated value + example:[ {"comid": comid_1, "new_value": new_value_1}, {"comid": comid_2, "new_value": new_value_2}] + + query: UPDATE table_name SET :column = :new_value WHERE :comid = comid + """ + + if self.inspector.has_table(table_name): + query = text(f"UPDATE {table_name}, SET {col_name} = :new_value WHERE comid = :comid") + + exec = self.RunQuery(query, values) + return exec + + + def UpdateActiveDataset(self, dsname, partition): + if partition == 'both': + pass + # TODO perform join + 2 updates + sql = text(f"SELECT l.active, s.active FROM sc_datasets s JOIN lc_datasets l ON s.dsid = l.dsid WHERE s.dsname = '{dsname}'") + res = self.TextSelect(sql) + for row in res: + new_val = 1 if 0 in row._t else 0 + + else: + prefix = 'sc' if partition == 'streamcat' else 'lc' + ds_table_name = f'{prefix}_datasets' + sql = text(f"SELECT active, tablename FROM {ds_table_name} WHERE dsname = '{dsname}'") + res = self.TextSelect(sql) + print(res[0]._t) + active = res[0]._t[0] + + new_val = 0 if active == 1 else 1 + if new_val == 1: + # check dataset for null values + + dataset_table = res[0]._t[1] + table_df = self.GetTableAsDf(dataset_table) + if table_df.isnull().any(bool_only=True): + return f"Found null values found in dataset {dsname}. Fix this and try again." + update_stmt = self.UpdateRow(ds_table_name, 'active', 'dsname', dsname, new_val) + update_res = self.RunQuery(update_stmt) + return update_res + + + def getVersionNumber(self, partition): + table_name = 'lc_info' if partition == 'lakecat' else 'sc_info' + info_table = Table(table_name, self.metadata, autoload_with=self.engine) + with self.engine.connect() as conn: + max_version = conn.execute(func.max(info_table.c.version)).scalar() + conn.rollback() + return max_version + + def newChangelogRow(self, partition, public_desc, change_desc): + table_name = 'sc_info' if partition == 'streamcat' else 'lc_info' + # stmt = f"INSERT INTO {table_name} (version, public_description) VALUES ((SELECT MAX(version)+1 FROM lc_info), '{public_desc}');" + new_version_num = self.getVersionNumber(partition) + 1 + values = { + "version": new_version_num, + "public_description": public_desc, + "change_description": change_desc + } + result = self.InsertRow(table_name, values) + return result \ No newline at end of file diff --git a/gui_requirements.txt b/gui_requirements.txt new file mode 100644 index 0000000..3e137e8 --- /dev/null +++ b/gui_requirements.txt @@ -0,0 +1,4 @@ +pandas +SQLAlchemy +customtkinter +CTkListbox \ No newline at end of file