-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathvalidation.py
951 lines (772 loc) · 43.6 KB
/
validation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
"""
Functions for testing neural network performance, using both synthetic data and real data OVIRS data of Bennu.
"""
import time
from pathlib import Path
import os
from contextlib import redirect_stdout # For saving keras prints into text files
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import sklearn.utils
import constants as C
import file_handling
import neural_network as NN
import radiance_data as rad
import file_handling as FH
def cosine_similarity(s1, s2):
"""Cosine of angle between two vectors s1 and s2"""
s1_norm = np.sqrt(np.dot(s1, s1))
s2_norm = np.sqrt(np.dot(s2, s2))
sum_s1_s2 = np.dot(s1, s2)
cosangle = (sum_s1_s2 / (s1_norm * s2_norm))
return cosangle
def spectral_angle(s1, s2):
"""Spectral angle between two spectra s1 and s2, in radians"""
cos_similarity = cosine_similarity(s1, s2)
angle = np.arccos(cos_similarity)
return angle
def MAE(s1, s2):
"""Mean absolute error between two vectors s1 and s2"""
return sum(abs(s1 - s2)) / len(s1)
def MAPE(ground, pred):
"""Mean absolute percentage error between two vectors: ground truth and prediction"""
return 100 * sum(abs((ground - pred) / ground)) / len(ground)
def test_model(x_test, y_test, model, thermal_radiances, savefolder):
"""
Testing a trained Keras model with data given as parameters. Saving results of test as a toml and as several plots.
:param x_test:
Test data
:param y_test:
Ground truth for test data
:param model:
A trained Keras model with weights loaded
:param thermal_radiances:
Thermal spectral radiances for all test data
:param emissivities:
Ground truth emissivities of the thermal radiances
:param savefolder:
Path to folder where results and plots will be saved
:return:
Dictionary containing the calculated errors
"""
# Run test for the model with Keras, take test result and elapsed time into variables to save them later
time_start = time.perf_counter_ns()
test_result = model.evaluate(x_test, y_test, verbose=0)
time_stop = time.perf_counter_ns()
elapsed_time_s = (time_stop - time_start) / 1e9
print(f'Elapsed prediction time for {len(x_test[:, 0])} samples was {elapsed_time_s} seconds')
print(f'Test with Keras resulted in a loss of {test_result}')
# Calculate some differences between ground truth and prediction
# Lists for storing the results of calculations
reflrad_mae_corrected = []
reflrad_cos_corrected = []
reflrad_mae_uncorrected = []
reflrad_cos_uncorrected = []
reflectance_mae_corrected = []
reflectance_cos_corrected = []
reflectance_mae_uncorrected = []
reflectance_cos_uncorrected = []
thermrad_mae = []
thermrad_cos = []
temperature_ground = []
temperature_pred = []
uncorrected_reflectances = []
corrected_reflectances = []
ground_reflectances = []
indices = range(len(x_test[:, 0]))
plot_indices = np.random.randint(0, len(x_test[:, 0]), 10) # Choose 10 random data points for plotting
for i in indices:
test_sample = np.expand_dims(x_test[i, :], axis=0)
prediction = model.predict(test_sample).squeeze()
pred_temperature = np.asscalar(prediction)
temperature_pred.append(pred_temperature)
ground_temperature = y_test[i, 0]
temperature_ground.append(ground_temperature)
ground_emissivity = y_test[i, 1]
print(f'Ground temperature: {ground_temperature}')
print(f'Prediction temperature: {pred_temperature}')
# Calculate thermal spectral radiance from predicted temperature and constant emissivity using Planck's law
eps = (C.emissivity_max + C.emissivity_min) / 2 # Mean emissivity in training data
pred_radiance = rad.thermal_radiance(pred_temperature, eps, C.wavelengths)
pred_therm = pred_radiance[:, 1]
ground_therm = thermal_radiances[i, :]
# Reflected radiance = sum radiance (input) - thermal radiance (from temperature)
pred_refl = test_sample.squeeze() - pred_therm
ground_refl = test_sample.squeeze() - ground_therm
uncorrected_refl = test_sample.squeeze()
# Plot some results for closer inspection from randomly chosen test spectra
if i in plot_indices:
temperature_error = abs(pred_temperature - ground_temperature)
emissivity_error = abs(eps - ground_emissivity)
plot_val_test_results(test_sample, ground_refl, ground_therm, pred_refl, pred_therm, savefolder, i+1, temperature_error, emissivity_error)
# Calculate normalized reflectance from uncorrected, NN-corrected, and ground truth reflected radiances
reflectance_ground = rad.radiance2norm_reflectance(ground_refl)
ground_reflectances.append(reflectance_ground)
reflectance_uncorrected = rad.radiance2norm_reflectance(uncorrected_refl)
uncorrected_reflectances.append(reflectance_uncorrected)
reflectance_corrected = rad.radiance2norm_reflectance(pred_refl)
corrected_reflectances.append(reflectance_corrected)
# Mean absolute errors from radiance
mae_corrected = MAE(pred_refl, ground_refl)
mae_uncorrected = MAE(uncorrected_refl, ground_refl)
mae_thermal = MAE(pred_therm, ground_therm)
reflrad_mae_corrected.append(mae_corrected)
reflrad_mae_uncorrected.append(mae_uncorrected)
thermrad_mae.append(mae_thermal)
# Spectral angles from radiances
cosang_corrected = spectral_angle(pred_refl, ground_refl)
cosang_uncorrected = spectral_angle(uncorrected_refl, ground_refl)
cosang_thermal = spectral_angle(pred_therm, ground_therm)
reflrad_cos_corrected.append(cosang_corrected)
reflrad_cos_uncorrected.append(cosang_uncorrected)
thermrad_cos.append(cosang_thermal)
# Mean absolute errors from reflectances
R_MAE_corrected = MAE(reflectance_corrected, reflectance_ground)
R_MAE_uncorrected = MAE(reflectance_uncorrected, reflectance_ground)
reflectance_mae_corrected.append(R_MAE_corrected)
reflectance_mae_uncorrected.append(R_MAE_uncorrected)
# Spectral angles from reflectances
R_cos_corrected = spectral_angle(reflectance_corrected, reflectance_ground)
R_cos_uncorrected = spectral_angle(reflectance_uncorrected, reflectance_ground)
reflectance_cos_corrected.append(R_cos_corrected)
reflectance_cos_uncorrected.append(R_cos_uncorrected)
print(f'Calculated sample {i} / {len(indices)}')
# Normalized Root Mean Square Error (NRMSE) from temperature predictions
temperature_errors = np.asarray(temperature_ground) - np.asarray(temperature_pred)
temperature_NRMSE = np.sqrt((sum(temperature_errors ** 2)) / len(temperature_ground)) / np.mean(temperature_ground)
# Calculate mean and std of temperature predictions as function of ground temperature
temperature_pred_mean_std = _calculate_temperature_pred_mean_and_std(temperature_ground, temperature_pred)
# Mean reflectance spectra: ground, uncorrected, corrected
mean_reflectance_ground = np.mean(np.asarray(ground_reflectances), axis=0)
mean_reflectance_uncorrected = np.mean(np.asarray(uncorrected_reflectances), axis=0)
mean_reflectance_corrected = np.mean(np.asarray(corrected_reflectances), axis=0)
# Gather all calculated errors in a single dictionary and save to disc as toml
mean_dict = {}
mean_dict['samples'] = i+1
mean_dict['NN_test_result'] = test_result
mean_dict['elapsed_prediction_time_s'] = elapsed_time_s
mean_dict['temperature_NRMSE'] = temperature_NRMSE
mean_dict['mean_reflected_MAE'] = np.mean(reflrad_mae_corrected)
mean_dict['mean_thermal_MAE'] = np.mean(thermrad_mae)
mean_dict['mean_reflected_SAM'] = np.mean(reflrad_cos_corrected)
mean_dict['mean_thermal_SAM'] = np.mean(thermrad_cos)
mean_dict['mean_ground_reflectance'] = mean_reflectance_ground
mean_dict['mean_uncorrected_reflectance'] = mean_reflectance_uncorrected
mean_dict['mean_corrected_reflectance'] = mean_reflectance_corrected
temperature_dict = {}
temperature_dict['ground_temperature'] = temperature_ground
temperature_dict['predicted_temperature'] = temperature_pred
temperature_dict['predicted_temperature_mean_and_std'] = temperature_pred_mean_std
MAE_dict = {}
MAE_dict['reflected_MAE'] = reflrad_mae_corrected
MAE_dict['reflected_MAE_uncorrected'] = reflrad_mae_uncorrected
MAE_dict['thermal_MAE'] = thermrad_mae
MAE_dict['reflectance_MAE'] = reflectance_mae_corrected
MAE_dict['reflectance_MAE_uncorrected'] = reflectance_mae_uncorrected
SAM_dict = {}
SAM_dict['reflected_SAM'] = reflrad_cos_corrected
SAM_dict['reflected_SAM_uncorrected'] = reflrad_cos_uncorrected
SAM_dict['thermal_SAM'] = thermrad_cos
SAM_dict['reflectance_SAM'] = reflectance_cos_corrected
SAM_dict['reflectance_SAM_uncorrected'] = reflectance_cos_uncorrected
error_dict = {}
error_dict['mean'] = mean_dict
error_dict['temperature'] = temperature_dict
error_dict['MAE'] = MAE_dict
error_dict['SAM'] = SAM_dict
FH.save_toml(error_dict, Path(savefolder, 'errors.toml'))
error_plots(savefolder)
# Return the dictionary containing calculated errors, in addition to saving it on disc
return error_dict
def _calculate_temperature_pred_mean_and_std(temperature_ground: list, temperature_pred: list):
"""
Takes a list of ground truth temperatures and a list of corresponding predicted temperatures.
Calculates mean and std of all predicted temperatures corresponding to one ground truth temperature.
:param temperature_ground:
List of ground truth temperatures
:param temperature_pred:
List of predicted temperatures corresponding to the ground truth temperatures
:return:
ndarray where first column is unique ground truth temperatures, second is mean predicted temperatures, and
third is std of predictions
"""
# Mean predicted temperature and its std for each unique value of ground temperature
temperature_ground = np.asarray(temperature_ground)
temperature_pred = np.asarray(temperature_pred)
unique_ground_temps = np.unique(temperature_ground)
temperature_pred_mean_std = np.zeros((3, len(unique_ground_temps)))
saveindex = 0
for temp in unique_ground_temps:
index = np.where(temperature_ground == temp) # Indices where ground temperature is the one to be calculated
mean = np.mean(temperature_pred[index])
std = np.std(temperature_pred[index])
temperature_pred_mean_std[:, saveindex] = [temp, mean, std]
saveindex = saveindex + 1
return temperature_pred_mean_std
def plot_val_test_results(test_sample, ground1, ground2, pred1, pred2, savefolder, index, temp_error, eps_error):
"""
Plotting some results for test with one sample radiance, and saving plots on disc.
:param test_sample:
Uncorrected spectral radiance
:param ground1:
Ground truth spectrum 1: reflected radiance
:param ground2:
Ground truth spectrum 2: thermally emitted radiance
:param pred1:
Prediction 1: reflected radiance
:param pred2:
Prediction 2: thermally emitted radiance
:param savefolder:
Path to folder where plots will be saved
:param index:
Index of the plotted sample, used in filename of plots
:param temp_error:
Error of temperature prediction, in Kelvin
:param eps_error:
Error of emissivity when predicting thermal radiance
"""
# Plots of spectral radiance, ground truth and predicted for both reflected and thermal in the same figure
fig = plt.figure()
x = C.wavelengths
plt.plot(x, ground1)
plt.plot(x, ground2)
plt.plot(x, pred1.squeeze(), linestyle='--')
plt.plot(x, pred2.squeeze(), linestyle='--')
plt.xlabel('Wavelength [µm]')
plt.ylabel('Radiance [W / m² / sr / µm]')
plt.legend(('ground 1', 'ground 2', 'prediction 1', 'prediction 2'))
fig_filename = C.training_run_name + f'_test_{index + 1}_radiance.png'
fig_path = Path(savefolder, fig_filename)
plt.savefig(fig_path)
plt.close(fig)
# Plot and save reflectances: from uncorrected (test_sample), ground truth (y_test), and NN corrected (pred1)
uncorrected = rad.radiance2norm_reflectance(test_sample).squeeze()
ground = rad.radiance2norm_reflectance(ground1)
NN_corrected = rad.radiance2norm_reflectance(pred1)
reflectanceMAE = MAE(ground, NN_corrected)
reflectanceSAM = spectral_angle(ground, NN_corrected)
# Round values to be used in filenames to proper decimals
reflectanceMAE = round(reflectanceMAE, 5)
reflectanceSAM = round(reflectanceSAM, 7)
temp_error = round(temp_error, 2)
eps_error = round(eps_error, 2)
fig = plt.figure()
x = C.wavelengths
plt.plot(x, uncorrected, color=C.uncor_plot_color, linestyle=C.uncor_plot_linestyle)
plt.plot(x, ground, color=C.ground_plot_color, linestyle=C.ground_plot_linestyle)
plt.plot(x, NN_corrected, color=C.NNcor_plot_color, linestyle=C.NNcor_plot_linestyle)
plt.xlabel('Wavelength [µm]')
plt.ylabel('Normalized reflectance')
plt.legend(('Uncorrected', 'Ground truth', 'NN-corrected'))
fig_filename = C.training_run_name + \
f'_test_{index + 1}_reflectance_temp-error_{temp_error}K_emiss-error_{eps_error}_SAM_{reflectanceSAM}_MAE_{reflectanceMAE}.png'
fig_path = Path(savefolder, fig_filename)
plt.savefig(fig_path)
plt.close(fig)
# Plot and save thermal radiances: ground truth and NN-result
ground = ground2
NN_corrected = pred2
fig = plt.figure()
x = C.wavelengths
plt.plot(x, ground, color=C.ground_plot_color)
plt.plot(x, NN_corrected, color=C.NNcor_plot_color)
plt.xlabel('Wavelength [µm]')
plt.ylabel('Thermal radiance [W / m² / sr / µm]')
plt.legend(('Ground truth', 'NN-corrected'))
fig_filename = C.training_run_name + f'_test_{index + 1}_thermal.png'
fig_path = Path(savefolder, fig_filename)
plt.savefig(fig_path)
plt.close(fig)
def validate_synthetic(model, validation_run_folder: Path):
"""
Run tests for trained model with synthetic data similar to training data.
:param model:
Trained Keras model with weights loaded
:param validation_run_folder:
Path to folder where results will be saved. Function will create a sub-folder inside for results from synthetic.
"""
# Load test radiances from one file as dicts, separate ground truth and test samples
rad_bunch_test = FH.load_pickle(C.rad_bunch_test_path)
x_test = rad_bunch_test['radiances']
y_test = rad_bunch_test['parameters']
# Limiting test sample temperatures to stay between the min and max temperatures of Bennu, to get comparable errors
min_temperature = 303
max_temperature = 351
min_indices = np.where(y_test[:, 0] == min_temperature)
min_index = min_indices[0][0]
max_indices = np.where(y_test[:, 0] == max_temperature)
max_index = max_indices[0][-1]
x_test = x_test[min_index:max_index, :]
y_test = y_test[min_index:max_index, :]
# Shuffle the data (this is probably unnecessary at this stage)
x_test, y_test = sklearn.utils.shuffle(x_test, y_test, random_state=0)
sample_percentage = 100 # percentage of validation data samples used for error calculation (adjust for tests)
indices = range(int(len(x_test[:, 0]) * (sample_percentage * 0.01)))
x_test = x_test[indices]
y_test = y_test[indices]
# Calculate thermal spectral radiance for each parameter pair
thermal_radiances = np.zeros((len(y_test), len(C.wavelengths)))
for i in range(len(y_test)):
temperature = y_test[i, 0]
emissivity = y_test[i, 1]
thermal_radiance = rad.thermal_radiance(temperature, emissivity, C.wavelengths)
thermal_radiances[i, :] = thermal_radiance[:, 1]
validation_plots_synthetic_path = Path(validation_run_folder, 'synthetic_validation')
if os.path.isdir(validation_plots_synthetic_path) == False:
os.mkdir(validation_plots_synthetic_path)
error_dict = test_model(x_test, y_test, model, thermal_radiances, validation_plots_synthetic_path)
def bennu_refine(fitslist: list, time: int, discard_indices, plots=False):
"""
Refine Bennu data to suit testing. Discard measurements where the instrument did not point to Bennu, but to
the surrounding void of space. Interpolate spectra to match the wavelengths of the training data. Convert radiance
values to match the units of training data, from [W/cm²/sr/µm] to [W/m²/sr/µm].
Discard data-points where the integration of different wavelength range sensors was apparently erroneous: these
data were picked out by hand based on how the radiance plots looked.
Return uncorrected spectral radiance, thermal tail subtracted spectral radiance, and the thermal tail spectral
radiance.
:param fitslist:
List of spectral measurements in FITS format
:param time:
Local time on Bennu where the measurement was taken, can be 1000, 1230, or 1500. Affects save locations
:param discard_indices:
Indices of datapoints that will be discarded as erroneous
:param plots:
Whether plots will be made and saved
:return: uncorrected_Bennu, corrected_Bennu, thermal_tail_Bennu:
Spectral radiances from Bennu, with bad datapoints removed, and conforming to rest of the data
"""
uncorrected_fits = fitslist[0]
corrected_fits = fitslist[1]
# # Handy info print of what the fits file includes:
# corrected_fits.info()
wavelengths = uncorrected_fits[1].data
# header = corrected_fits[0].header
uncorrected_rad = uncorrected_fits[0].data[:, 0, :]
corrected_rad = corrected_fits[0].data[:, 0, :]
thermal_tail_rad = corrected_fits[2].data[:, 0, :]
uncor_sum_rad = np.sum(uncorrected_rad, 1)
cor_sum_rad = np.sum(corrected_rad, 1)
# Data is from several scans over Bennu's surface, each scan beginning and ending off-asteroid. See this plot of
# radiances summed over wl:s to illustrate:
# plt.figure()
# plt.plot(range(len(uncor_sum_rad)), uncor_sum_rad)
# plt.xlabel('Measurement number')
# plt.ylabel('Radiance summed over wavelengths')
# plt.show()
# Go over the summed uncorrected radiances, and save the indices where radiance is over 0.02 (value from plots):
# gives indices of datapoints where the FOV was on Bennu
Bennu_indices = []
index = 0
for sum_rad in uncor_sum_rad:
if sum_rad > 0.02:
Bennu_indices.append(index)
index = index + 1
# Some data were marked for discarding due to weird looking spectra with very sharp drops and rises
# Go through discard indices, remove the listed data from the Bennu_indices
# Must loop through the list backwards to not modify indices of upcoming elements
for i in sorted(discard_indices, reverse=True):
del Bennu_indices[i]
# Pick out the spectra where sum radiance was over threshold value and data was not marked for discarding
uncorrected_Bennu = uncorrected_rad[Bennu_indices, :]
corrected_Bennu = corrected_rad[Bennu_indices, :]
thermal_tail_Bennu = thermal_tail_rad[Bennu_indices, :]
def bennu_rad_interpolation(data, waves):
# Interpolate the Bennu data to match wl range used in training
interp_data = np.zeros((len(data[:, 0]), len(C.wavelengths)))
for i in range(len(data[:, 0])):
interp_data[i] = np.interp(C.wavelengths, waves, data[i, :])
return interp_data
uncorrected_Bennu = bennu_rad_interpolation(uncorrected_Bennu, wavelengths)
corrected_Bennu = bennu_rad_interpolation(corrected_Bennu, wavelengths)
thermal_tail_Bennu = bennu_rad_interpolation(thermal_tail_Bennu, wavelengths)
def rad_unit_conversion(data):
# Convert from NASAs radiance unit [W/cm²/sr/µm] to [W/m²/sr/µm]
converted = data * 10000
return converted
uncorrected_Bennu = rad_unit_conversion(uncorrected_Bennu)
corrected_Bennu = rad_unit_conversion(corrected_Bennu)
thermal_tail_Bennu = rad_unit_conversion(thermal_tail_Bennu)
# Fetch NASA's temperature and emissivity predictions from FITS file
temperature = corrected_fits[3].data[:, 0]
temperature = temperature[Bennu_indices]
emissivity = corrected_fits[4].data
emissivity = emissivity[Bennu_indices]
if plots==True:
plotpath = Path(C.bennu_plots_path, str(time))
for i in range(len(Bennu_indices)):
fig = plt.figure()
plt.plot(C.wavelengths, uncorrected_Bennu[i, :])
plt.plot(C.wavelengths, corrected_Bennu[i, :])
plt.plot(C.wavelengths, thermal_tail_Bennu[i, :])
plt.legend(('Uncorrected', 'Corrected', 'Thermal tail'))
plt.xlabel('Wavelength [µm]')
plt.ylabel('Radiance [W / m² / sr / µm]')
plt.xlim((2, 2.45))
plt.ylim((0, 0.4))
plt.savefig(Path(plotpath, f'bennurads_{time}_{i}.png'))
print(f'Saved figure as bennurads_{time}_{i}.png')
# plt.show()
plt.close(fig)
return uncorrected_Bennu, corrected_Bennu, thermal_tail_Bennu, temperature, emissivity
def validate_bennu(model, validation_run_folder):
"""
Test model using OVIRS data of Bennu from three local times. Load data and refine it to match the training and
validation data. Saves the results of testing as toml file and some plots drawn from the results.
:param model:
Trained Keras model with weights loaded
:param validation_run_folder:
Path to the folder where results will be saved
"""
# Opening OVIRS spectra measured from Bennu
Bennu_path = Path(C.spectral_path, 'Bennu_OVIRS')
file_list = os.listdir(Bennu_path)
# Group files by time of day: 20190425 is 3pm data, 20190509 is 12:30 pm data, 20190516 is 10 am data
# Empty lists with two elements each for holding the fits data
Bennu_1000 = [None] * 2
Bennu_1230 = [None] * 2
Bennu_1500 = [None] * 2
for filename in file_list:
filepath = Path(Bennu_path, filename)
# Open .fits file with astropy, append to a list according to local time and correction
# A is uncorrected radiance, B is thermal tail removed radiance: make first element the uncorrected
if 'A' in filename:
index = 0
elif 'B' in filename:
index = 1
if '20190425' in filename:
hdulist = fits.open(filepath)
Bennu_1500[index] = hdulist
elif '20190509' in filename:
hdulist = fits.open(filepath)
Bennu_1230[index] = hdulist
elif '20190516' in filename:
hdulist = fits.open(filepath)
Bennu_1000[index] = hdulist
# Load indices that mark data which will be discarded
discard_1000 = FH.load_csv(Path(Bennu_path, '1000_discard_indices'))
discard_1230 = FH.load_csv(Path(Bennu_path, '1230_discard_indices'))
discard_1500 = FH.load_csv(Path(Bennu_path, '1500_discard_indices'))
# Interpolate to match the wl vector used for training data, convert the readings to another radiance unit
uncorrected_1500, corrected_1500, thermal_tail_1500, temperatures_1500, emissivities_1500 = bennu_refine(Bennu_1500, 1500, discard_1500, plots=False)
uncorrected_1230, corrected_1230, thermal_tail_1230, temperatures_1230, emissivities_1230 = bennu_refine(Bennu_1230, 1230, discard_1230, plots=False)
uncorrected_1000, corrected_1000, thermal_tail_1000, temperatures_1000, emissivities_1000 = bennu_refine(Bennu_1000, 1000, discard_1000, plots=False)
# # Finding min and max temperatures from the data. 10:00 is the coldest time, 12:30 is the hottest
# Bennu_min_temperature = min(temperatures_1000)
# Bennu_max_temperature = max(temperatures_1230)
# Call test function for a dataset from one local time
def test_model_Bennu(X_Bennu, temperatures, emissivities, thermal, time: str, validation_plots_Bennu_path):
# Organize ground truth data to match what the ML model expects
y_Bennu = np.zeros((len(X_Bennu[:,0]), 2))
y_Bennu[:, 0] = temperatures
y_Bennu[:, 1] = emissivities
savepath = Path(validation_plots_Bennu_path, time)
if os.path.isdir(savepath) == False:
os.mkdir(savepath)
error_dict = test_model(X_Bennu, y_Bennu, model, thermal, savepath)
return error_dict
# Save location of plots from validating with Bennu data
validation_plots_Bennu_path = Path(validation_run_folder, 'bennu_validation')
if os.path.isdir(validation_plots_Bennu_path) == False:
os.mkdir(validation_plots_Bennu_path)
print('Testing with Bennu data, local time 15:00')
errors_1500 = test_model_Bennu(uncorrected_1500, temperatures_1500, emissivities_1500, thermal_tail_1500, str(1500), validation_plots_Bennu_path)
print('Testing with Bennu data, local time 12:30')
errors_1230 = test_model_Bennu(uncorrected_1230, temperatures_1230, emissivities_1230, thermal_tail_1230, str(1230), validation_plots_Bennu_path)
print('Testing with Bennu data, local time 10:00')
errors_1000 = test_model_Bennu(uncorrected_1000, temperatures_1000, emissivities_1000, thermal_tail_1000, str(1000), validation_plots_Bennu_path)
# Collecting calculated results into one dictionary and saving it as toml
errors_Bennu = {}
errors_Bennu['errors_1000'] = errors_1000
errors_Bennu['errors_1230'] = errors_1230
errors_Bennu['errors_1500'] = errors_1500
FH.save_toml(errors_Bennu, Path(validation_plots_Bennu_path, 'errors_Bennu.toml'))
# Plotting errors from all three local times. Plots for individual times are made in the test_model -function
error_plots_bennu(validation_plots_Bennu_path)
def validate_and_test(last_epoch):
"""
Run validation with synthetic data and testing with real data, for a trained model given as argument.
:param model: Keras Model -instance
A trained model that will be tested
"""
# Generate a unique folder name for results of test based on time the test was run
timestr = time.strftime("%Y%m%d-%H%M%S")
# timestr = 'test' # Folder name for test runs, otherwise a new folder is always created
# Create folder for results
validation_run_folder = Path(C.val_and_test_path, f'validation-run_epoch-{last_epoch}_time-{timestr}')
if os.path.isdir(validation_run_folder) == False:
os.mkdir(validation_run_folder)
# Build a model and load pre-trained weights
model = NN.create_model(
conv_filters=C.conv_filters,
conv_kernel=C.conv_kernel,
encoder_start=C.encoder_start,
encoder_node_relation=C.encoder_node_relation,
encoder_stop=C.encoder_stop,
lr=C.learning_rate
)
weight_path = Path(C.weights_path, f'weights_{str(last_epoch)}.hdf5')
model.load_weights(weight_path)
# Print summary of model architecture into file
with open(Path(validation_run_folder, 'modelsummary.txt'), 'w') as f:
with redirect_stdout(f):
model.summary()
# Validation with synthetic data similar to training data
validate_synthetic(model, validation_run_folder)
# Testing with real asteroid data
validate_bennu(model, validation_run_folder)
def _plot_with_shadow(ax_obj, x_data, y_data, y_data_std, color, label, ls='-') -> None:
""" Function by K.A. Riihiaho, copied (with permission) from
https://github.com/silmae/HyperBlend/blob/master/src/plotter.py
Plot data with standard deviation as shadow.
Data must be sorted to show correctly.
:param ax_obj:
Pyplot axes object to plot to.
:param x_data:
Data x values (wavelengths).
:param y_data:
Data y values as numpy.array.
:param y_data_std:
Standard deviation as numpy array. The shadow is drawn as +- std/2.
:param color:
Color of the plot and shadow.
:param label:
Label of the value.
:param ls:
Line style. See pyplot linestyle documentation.
"""
ax_obj.fill_between(x_data, y_data - (y_data_std / 2), y_data + (y_data_std / 2), alpha=0.2,
color=color)
ax_obj.plot(x_data, y_data, color=color, ls=ls, label=label)
def error_plots(folderpath):
"""
Load a dictionary containing calculated errors from a toml file, make plots of the errors, and save the plots in the
folder where the toml file was located.
:param folderpath:
Path the to folder where the toml file of errors is and where the plots will be saved
"""
# Load dictionary of errors, saved as a toml
errordict = FH.load_toml(Path(folderpath, 'errors.toml'))
# Extract the results of test calculations saved in the dict
temperature_dict = errordict['temperature']
temperature_ground = np.asarray(temperature_dict['ground_temperature'])
temperature_pred = np.asarray(temperature_dict['predicted_temperature'])
temperature_pred_mean_std = temperature_dict['predicted_temperature_mean_and_std']
temperature_error = temperature_pred - temperature_ground
mean_dict = errordict['mean']
mean_ground_reflectance = mean_dict['mean_ground_reflectance']
mean_uncorrected_reflectance = mean_dict['mean_uncorrected_reflectance']
mean_corrected_reflectance = mean_dict['mean_corrected_reflectance']
MAE_dict = errordict['MAE']
thermrad_mae = MAE_dict['thermal_MAE']
reflrad_mae_corrected = MAE_dict['reflected_MAE']
reflrad_mae_uncorrected = MAE_dict['reflected_MAE_uncorrected']
reflectance_mae_corrected = MAE_dict['reflectance_MAE']
reflectance_mae_uncorrected = MAE_dict['reflectance_MAE_uncorrected']
SAM_dict = errordict['SAM']
thermrad_cos = SAM_dict['thermal_SAM']
reflrad_cos_corrected = SAM_dict['reflected_SAM']
reflrad_cos_uncorrected = SAM_dict['reflected_SAM_uncorrected']
reflectance_cos_corrected = SAM_dict['reflectance_SAM']
reflectance_cos_uncorrected = SAM_dict['reflectance_SAM_uncorrected']
# Mean predicted temperature and its std for each unique value of ground temperature
unique_ground_temps = np.unique(temperature_ground)
temperature_pred_mean_std = np.zeros((3, len(unique_ground_temps)))
saveindex = 0
for temp in unique_ground_temps:
index = np.where(temperature_ground == temp) # Indices where ground temperature is the one to be calculated
mean = np.mean(temperature_pred[index])
std = np.std(temperature_pred[index])
temperature_pred_mean_std[:, saveindex] = [temp, mean, std]
saveindex = saveindex + 1
# Min and max temperatures, used to plot a reference line corresponding to ideal result
min_temperature = int(min(temperature_ground))
max_temperature = int(max(temperature_ground))
# Predicted temperature as function of ground truth temperature
fig = plt.figure()
plt.scatter(temperature_ground, temperature_pred, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.NNcor_plot_color)
plt.xlabel('Ground truth temperature [K]')
plt.ylabel('Predicted temperature [K]')
# Plot a reference line with slope 1: ideal result
plt.plot(range(min_temperature, max_temperature+1), range(min_temperature, max_temperature+1), color=C.ideal_result_line_color)
_plot_with_shadow(plt, temperature_pred_mean_std[0, :], temperature_pred_mean_std[1, :], temperature_pred_mean_std[2, :], color=C.mean_std_temp_color, label='Mean and std')
if not C.temperature_plot_ylim == (0, 0):
plt.ylim(C.temperature_plot_ylim)
plt.savefig(Path(folderpath, 'predtemp-groundtemp.png'))
plt.close(fig)
# Mean reflectances of ground truth, uncorrected, and NN corrected
fig = plt.figure()
plt.plot(C.wavelengths, mean_uncorrected_reflectance, color=C.uncor_plot_color, linestyle=C.uncor_plot_linestyle, label='Uncorrected')
plt.plot(C.wavelengths, mean_ground_reflectance, color=C.ground_plot_color, linestyle=C.ground_plot_linestyle ,label='Ground')
plt.plot(C.wavelengths, mean_corrected_reflectance, color=C.NNcor_plot_color, linestyle=C.NNcor_plot_linestyle, label='NN-corrected')
plt.legend()
plt.xlabel('Wavelength [µm]')
plt.ylabel('Normalized reflectance')
plt.savefig(Path(folderpath, 'mean_reflectances.png'))
plt.close(fig)
def double_plot(uncorrected, corrected, label, filename, limit=(0,0)):
fig = plt.figure()
plt.scatter(temperature_ground, uncorrected, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.uncor_plot_color)
plt.scatter(temperature_ground, corrected, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.NNcor_plot_color)
plt.xlabel('Ground truth temperature [K]')
plt.ylabel(label)
if limit != (0, 0):
plt.ylim(limit)
leg = plt.legend(('Uncorrected', 'NN-corrected'))
for lh in leg.legendHandles:
lh.set_alpha(1)
plt.savefig(Path(folderpath, f'{filename}.png'))
plt.close(fig)
# Spectral angle of reflected radiance from ideally corrected result as function of ground truth temperature,
# for corrected and uncorrected
double_plot(reflrad_cos_uncorrected, reflrad_cos_corrected, 'Reflected radiance spectral angle', 'reflrad_SAM_groundtemp', limit=C.reflrad_sam_plot_ylim)
# The same as above, but from reflectance instead of reflected radiance
double_plot(reflectance_cos_uncorrected, reflectance_cos_corrected, 'Reflectance spectral angle', 'reflectance_SAM_groundtemp', limit=C.reflectance_sam_plot_ylim)
# Mean absolute error of reflected radiance from both corrected and uncorrected, as function of ground truth temp
double_plot(reflrad_mae_uncorrected, reflrad_mae_corrected, 'Reflected radiance MAE', 'reflrad_MAE_groundtemp', limit=C.reflrad_mae_plot_ylim)
# Same as above, but from reflectance
double_plot(reflectance_mae_uncorrected, reflectance_mae_corrected, 'Reflectance MAE', 'reflectance_MAE_groundtemp', limit=C.reflectance_mae_plot_ylim)
# Spectral angle of estimated thermal radiance from ideal result, as function of ground truth temperature
fig = plt.figure()
plt.scatter(temperature_ground, thermrad_cos, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.NNcor_plot_color)
plt.xlabel('Ground truth temperature [K]')
plt.ylabel('Thermal spectral angle')
plt.savefig(Path(folderpath, 'thermSAM_groundtemp.png'))
plt.close(fig)
# Same as above, but mean absolute error instead of angle
fig = plt.figure()
plt.scatter(temperature_ground, thermrad_mae, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.NNcor_plot_color)
plt.xlabel('Ground truth temperature [K]')
plt.ylabel('Thermal MAE')
plt.savefig(Path(folderpath, 'thermMAE_groundtemp.png'))
plt.close(fig)
def error_plots_bennu(folderpath):
"""
Plot errors using test results from all three local times on Bennu. Loads a dictionary of errors from a toml file,
and saves the plot in the folder where the toml is located.
:param folderpath:
Path to the folder where the error dictionary is, and where the plots will be saved
"""
# Load error dictionary and pick out the three dictionaries of errors for different local times
errordict = file_handling.load_toml(Path(folderpath, 'errors_Bennu.toml'))
errors_1000 = errordict['errors_1000']
errors_1230 = errordict['errors_1230']
errors_1500 = errordict['errors_1500']
# Plotting and saving results for all three datasets
def Bennuplot(errors_1000, errors_1230, errors_1500, data_name, label, savefolder):
def fetch_data(errordict, data_name):
temperature_dict = errordict['temperature']
ground_temps = np.asarray(temperature_dict['ground_temperature'])
if 'MAE' in data_name:
data_dict = errordict['MAE']
data = np.asarray(data_dict[data_name])
elif 'SAM' in data_name:
data_dict = errordict['SAM']
data = data_dict[data_name]
elif 'temperature' in data_name:
data_dict = errordict['temperature']
data = data_dict[data_name]
return ground_temps, data
ground_temps_1000, data_1000 = fetch_data(errors_1000, data_name)
ground_temps_1230, data_1230 = fetch_data(errors_1230, data_name)
ground_temps_1500, data_1500 = fetch_data(errors_1500, data_name)
fig = plt.figure()
plt.scatter(ground_temps_1000, data_1000, alpha=C.scatter_alpha, marker=C.scatter_marker)
plt.scatter(ground_temps_1230, data_1230, alpha=C.scatter_alpha, marker=C.scatter_marker)
plt.scatter(ground_temps_1500, data_1500, alpha=C.scatter_alpha, marker=C.scatter_marker)
plt.xlabel('Ground truth temperature [K]')
plt.ylabel(label)
leg = plt.legend(('10:00', '12:30', '15:00'), title='Local time on Bennu')
for lh in leg.legendHandles:
lh.set_alpha(1)
if data_name == 'predicted_temperature':
ground_temps = errors_1000['temperature']['ground_temperature'] + errors_1230['temperature']['ground_temperature'] + errors_1500['temperature']['ground_temperature']
pred_temps = errors_1000['temperature']['predicted_temperature'] + errors_1230['temperature']['predicted_temperature'] + errors_1500['temperature']['predicted_temperature']
# Min and max temperatures, used to plot a reference line corresponding to ideal result
min_temperature = int(min(ground_temps))
max_temperature = int(max(ground_temps))
plt.plot(range(min_temperature, max_temperature+1), range(min_temperature, max_temperature+1),
color=C.ideal_result_line_color) # Plot a reference line with slope 1: ideal result
mean_and_std = _calculate_temperature_pred_mean_and_std(ground_temps, pred_temps)
_plot_with_shadow(plt, mean_and_std[0, :], mean_and_std[1, :], mean_and_std[2, :], color=C.mean_std_temp_color, label='Prediction mean and std')
if not C.temperature_plot_ylim == (0, 0):
plt.ylim(C.temperature_plot_ylim)
plt.savefig(Path(savefolder, f'{data_name}_Bennu.png'))
plt.close(fig)
savefolder = folderpath
Bennuplot(errors_1000, errors_1230, errors_1500, 'predicted_temperature', 'Predicted temperature [K]', savefolder)
Bennuplot(errors_1000, errors_1230, errors_1500, 'reflected_MAE', 'Reflected MAE', savefolder)
Bennuplot(errors_1000, errors_1230, errors_1500, 'thermal_MAE', 'Thermal MAE', savefolder)
Bennuplot(errors_1000, errors_1230, errors_1500, 'reflected_SAM', 'Reflected SAM', savefolder)
Bennuplot(errors_1000, errors_1230, errors_1500, 'thermal_SAM', 'Thermal SAM', savefolder)
# Plots where error of the uncorrected results is shown alongside the corrected
def Bennu_comparison_plots(corrected_name, uncorrected_name, label, lim=(0, 0)):
temp_dict_1000 = errors_1000['temperature']
ground_temps_1000 = np.asarray(temp_dict_1000['ground_temperature'])
temp_dict_1230 = errors_1230['temperature']
ground_temps_1230 = np.asarray(temp_dict_1230['ground_temperature'])
temp_dict_1500 = errors_1500['temperature']
ground_temps_1500 = np.asarray(temp_dict_1500['ground_temperature'])
if 'MAE' in corrected_name:
dict_name = 'MAE'
elif 'SAM' in corrected_name:
dict_name = 'SAM'
dict_1000 = errors_1000[dict_name]
corrected_1000 = dict_1000[corrected_name]
uncorrected_1000 = dict_1000[uncorrected_name]
dict_1230 = errors_1230[dict_name]
corrected_1230 = dict_1230[corrected_name]
uncorrected_1230 = dict_1230[uncorrected_name]
dict_1500 = errors_1500[dict_name]
corrected_1500 = dict_1500[corrected_name]
uncorrected_1500 = dict_1500[uncorrected_name]
fig = plt.figure()
# Use one color for uncorrected and other for corrected, colors determined in constants.py
uncor_scatter1 = plt.scatter(ground_temps_1000, uncorrected_1000, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.uncor_plot_color)
uncor_scatter2 = plt.scatter(ground_temps_1230, uncorrected_1230, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.uncor_plot_color)
uncor_scatter3 = plt.scatter(ground_temps_1500, uncorrected_1500, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.uncor_plot_color)
cor_scatter1 = plt.scatter(ground_temps_1000, corrected_1000, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.NNcor_plot_color)
cor_scatter2 = plt.scatter(ground_temps_1230, corrected_1230, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.NNcor_plot_color)
cor_scatter3 = plt.scatter(ground_temps_1500, corrected_1500, alpha=C.scatter_alpha, marker=C.scatter_marker, color=C.NNcor_plot_color)
# If a limit other than (0,0) is given in arguments, use it for limiting the shown y-axis values
if lim != (0, 0):
plt.ylim(lim)
leg = plt.legend([uncor_scatter1, cor_scatter1], ['Uncorrected', 'NN-corrected'])
for lh in leg.legendHandles:
lh.set_alpha(1)
plt.xlabel('Ground truth temperature [K]')
plt.ylabel(label)
plt.savefig(Path(savefolder, f'{corrected_name}_{uncorrected_name}_Bennu.png'))
plt.close(fig)
Bennu_comparison_plots('reflected_MAE', 'reflected_MAE_uncorrected',
'Reflected radiance MAE', lim=C.reflrad_mae_plot_ylim)
Bennu_comparison_plots('reflected_SAM', 'reflected_SAM_uncorrected',
'Reflected radiance spectral angle', lim=C.reflrad_sam_plot_ylim)
Bennu_comparison_plots('reflectance_MAE', 'reflectance_MAE_uncorrected',
'Reflectance MAE', lim=C.reflectance_mae_plot_ylim)
Bennu_comparison_plots('reflectance_SAM', 'reflectance_SAM_uncorrected',
'Reflectance spectral angle', lim=C.reflectance_sam_plot_ylim)
# Plot of mean reflectances:
mean_ground_reflectance_1000 = errors_1000['mean']['mean_ground_reflectance']
mean_ground_reflectance_1230 = errors_1230['mean']['mean_ground_reflectance']
mean_ground_reflectance_1500 = errors_1500['mean']['mean_ground_reflectance']
mean_ground_reflectance = np.mean(np.asarray([mean_ground_reflectance_1000, mean_ground_reflectance_1230, mean_ground_reflectance_1500]), axis=0)
mean_uncorrected_reflectance_1000 = errors_1000['mean']['mean_uncorrected_reflectance']
mean_uncorrected_reflectance_1230 = errors_1230['mean']['mean_uncorrected_reflectance']
mean_uncorrected_reflectance_1500 = errors_1500['mean']['mean_uncorrected_reflectance']
mean_uncorrected_reflectance = np.mean(np.asarray([mean_uncorrected_reflectance_1000, mean_uncorrected_reflectance_1230, mean_uncorrected_reflectance_1500]), axis=0)
mean_corrected_reflectance_1000 = errors_1000['mean']['mean_corrected_reflectance']
mean_corrected_reflectance_1230 = errors_1230['mean']['mean_corrected_reflectance']
mean_corrected_reflectance_1500 = errors_1500['mean']['mean_corrected_reflectance']
mean_corrected_reflectance = np.mean(np.asarray([mean_corrected_reflectance_1000, mean_corrected_reflectance_1230, mean_corrected_reflectance_1500]), axis=0)
# Mean spectral reflectance over all samples, from ground truth, uncorrected and corrected reflectances
fig = plt.figure()
plt.plot(C.wavelengths, mean_uncorrected_reflectance, color=C.uncor_plot_color, linestyle=C.uncor_plot_linestyle, label='Uncorrected')
plt.plot(C.wavelengths, mean_ground_reflectance, color=C.ground_plot_color, linestyle=C.ground_plot_linestyle, label='Ground')
plt.plot(C.wavelengths, mean_corrected_reflectance, color=C.NNcor_plot_color, linestyle=C.NNcor_plot_linestyle, label='NN-corrected')
plt.legend()
plt.xlabel('Wavelength [µm]')
plt.ylabel('Normalized reflectance')
plt.savefig(Path(savefolder, 'mean_reflectances_Bennu.png'))
plt.close(fig)
print('test')