/
pplib.py
4095 lines (3775 loc) · 165 KB
/
pplib.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#########
# pplib #
#########
#pplib contains most of the necessary functions and definitions for the
# other scripts in the PulsePortraiture package. See pptoaslib for specific
# functions for the latest version of pptoas.
#Written by Timothy T. Pennucci (TTP; tim.pennucci@nanograv.org).
#Contributions by Scott M. Ransom (SMR), Paul B. Demorest (PBD), and Emmanuel
# Fonseca (EF).
###########
# imports #
###########
#Unfortunately, this has to come first in the event nodes = True
nodes = False #Used when needing parallelized operation
if nodes:
import matplotlib
matplotlib.use('Agg')
import sys
import time
import pickle
import numpy as np
import numpy.fft as fft
import scipy.interpolate as si
import scipy.optimize as opt
import scipy.signal as ss
try: import lmfit as lm
except ImportError: print "No lmfit found. You will not be able to use ppgauss.py or fit_powlaw()."
try: import pywt as pw
except ImportError: print "No pywt found. You will not be able to use wavelet_smooth() and will have limited, no-smoothing functionality in ppspline.py."
import psrchive as pr
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
from telescope_codes import telescope_code_dict
############
# settings #
############
#Exact dispersion constant (e**2/(2*pi*m_e*c)) (used by PRESTO).
Dconst_exact = 4.148808e3 #[MHz**2 cm**3 pc**-1 s]
#"Traditional" dispersion constant (used by PSRCHIVE,TEMPO,PINT).
Dconst_trad = 0.000241**-1 #[MHz**2 cm**3 pc**-1 s]
#Fitted DM values will depend on this choice. Choose wisely.
Dconst = Dconst_trad
#Power-law index for scattering law
scattering_alpha = -4.0
#Use get_noise and default_noise_method for noise levels instead of PSRCHIVE;
# see load_data.
use_get_noise = True
#Default get_noise method (see functions get_noise_*).
#_To_be_improved_.
default_noise_method = 'PS'
#Ignore 0-frequency (sum) component in Fourier fit if F0_fact == 0, else set
#F0_fact == 1.
F0_fact = 0
#Upper limit on the width of a Gaussian component to "help" in fitting.
#Should be either None or > 0.0.
wid_max = 0.25
#default_model is the default model_code used for generating Gaussian models.
#The value of a digit labels an evolutionary function that has one evolutionary
#parameter, in addition to the reference value. 0 = power-law, 1 = linear,
#...add your own below (see evolve_parameter function). The order of the
#digits corresponds to the Gaussian model parameter (loc,wid,amp). Unless
#otherwise specified in the model file after CODE, this set of evolutionary
#functions will be used. This will eventually be overhauled...!!!
default_model = '000'
# binshift is a fudge factor for scattering portrait functions; was -1;
# currently not used.
binshift = 1.0
###########
# display #
###########
#Set colormap preference
#Decent sequential colormaps: gist_heat, pink, copper, Oranges_r, gray, bone,
# Blues_r, cubehelix, terrain, YlOrBr_r
#see plt.cm for list of available colormaps.
default_colormap = 'gist_heat'
if hasattr(plt.cm, default_colormap):
plt.rc('image', cmap=default_colormap)
else:
plt.rc('image', cmap='YlOrBr_r')
#List of colors; can do this better...
cols = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'orange', 'brown', 'purple', 'pink',
'b', 'g', 'r', 'c', 'm', 'y', 'k', 'orange', 'brown', 'purple', 'pink',
'b', 'g', 'r', 'c', 'm', 'y', 'k', 'orange', 'brown', 'purple', 'pink',
'b', 'g', 'r', 'c', 'm', 'y', 'k', 'orange', 'brown', 'purple', 'pink']
########
# misc #
########
#RCSTRINGS dictionary, for the return codes given by scipy.optimize.fmin_tnc.
#These are only needed for debugging.
RCSTRINGS = {'-1':'INFEASIBLE: Infeasible (low > up).',
'0':'LOCALMINIMUM: Local minima reach (|pg| ~= 0).',
'1':'FCONVERGED: Converged (|f_n-f_(n-1)| ~= 0.)',
'2':'XCONVERGED: Converged (|x_n-x_(n-1)| ~= 0.)',
'3':'MAXFUN: Max. number of function evaluations reach.',
'4':'LSFAIL: Linear search failed.',
'5':'CONSTANT: All lower bounds are equal to the upper bounds.',
'6':'NOPROGRESS: Unable to progress.',
'7':'USERABORT: User requested end of minimization.'}
###########
# classes #
###########
class DataBunch(dict):
"""
Create a simple class instance of DataBunch.
db = DataBunch(a=1, b=2,....) has attributes a and b, which are callable
and update-able using either syntax db.a or db['a'].
"""
def __init__(self, **kwds):
dict.__init__(self, kwds)
self.__dict__ = self
class DataPortrait(object):
"""
DataPortrait is a class that contains the data to which a model is fit.
This class is also useful for the quick examining of PSRCHIVE archives in
an interactive python environment.
"""
def __init__(self, datafile=None, joinfile=None, quiet=False,
**load_data_kwargs):
"""
Unpack all of the data and set initial attributes.
If datafile is a metafile of PSRCHIVE archives, "join" attributes are
set, which are used to align the archives. A large (>3) number of
archives signficiantly slows the fitting process, and it has only
been tested for the case that each archive originates from a
unique receiver.
joinfile is a file like that which is output by write_join_parameters,
containing optional parameters to align the multiple archives.
quiet=True suppresses output.
"""
self.init_params = []
self.joinfile = joinfile
if file_is_type(datafile, "ASCII"):
self.join_params = []
self.join_fit_flags = []
self.join_nchans = [0]
self.join_nchanxs = [0]
self.join_ichans = []
self.join_ichanxs = []
self.nchans = []
self.metafile = self.datafile = datafile
self.datafiles = open(datafile, "r").readlines()
self.datafiles = [self.datafiles[ifile][:-1] for ifile in
range(len(self.datafiles))]
self.njoin = len(self.datafiles)
self.Ps = 0.0
self.nchan = 0
self.nchanx = 0
#self.nu0s = []
self.lofreq = np.inf
self.hifreq = 0.0
self.freqs = []
self.freqsxs = []
self.port = []
self.portx = []
self.flux_prof = []
self.flux_profx = []
self.noise_stds = []
self.noise_stdsxs = []
self.SNRs = []
self.SNRsxs = []
self.weights = []
self.weightsxs = []
self.masks = []
for ifile in range(len(self.datafiles)):
datafile = self.datafiles[ifile]
data = load_data(datafile, dedisperse=True, tscrunch=True,
pscrunch=True, fscrunch=False, flux_prof=True,
return_arch=True, quiet=quiet, **load_data_kwargs)
self.nchan += data.nchan
self.nchanx += len(data.ok_ichans[0])
if ifile == 0:
self.join_nchans.append(self.nchan)
self.join_nchanxs.append(self.nchanx)
self.join_params.append(0.0)
self.join_fit_flags.append(0)
self.join_params.append(data.DM*0.0)
#Change the below to 0 to not fit for first DM
#Also see fit_gaussian_portrait(...) to fit for single DM
self.join_fit_flags.append(1)
self.nbin = data.nbin
self.phases = data.phases
refprof = data.prof
self.source = data.source
else:
self.join_nchans.append(self.nchan)
self.join_nchanxs.append(self.nchanx)
prof = data.prof
phi = -fit_phase_shift(prof, refprof, Ns=self.nbin).phase
self.join_params.append(phi) # Multiply by 0 to fix phase
#Change the below to 0 to not fit for phase
self.join_fit_flags.append(1)
self.join_params.append(data.DM*0.0)
self.join_fit_flags.append(1)
self.Ps += data.Ps.mean()
lf = data.freqs.min() - (abs(data.bw) / (2*data.nchan))
if lf < self.lofreq:
self.lofreq = lf
hf = data.freqs.max() + (abs(data.bw) / (2*data.nchan))
if hf > self.hifreq:
self.hifreq = hf
self.freqs.extend(data.freqs[0])
self.freqsxs.extend(data.freqs[0, data.ok_ichans[0]])
self.masks.extend(data.masks[0,0])
self.port.extend(data.subints[0,0] * data.masks[0,0])
self.portx.extend(data.subints[0,0, data.ok_ichans[0]])
self.flux_prof.extend(data.flux_prof)
self.flux_profx.extend(data.flux_prof[data.ok_ichans[0]])
self.noise_stds.extend(data.noise_stds[0,0])
self.noise_stdsxs.extend(
data.noise_stds[0,0][data.ok_ichans[0]])
self.SNRs.extend(data.SNRs[0,0])
self.SNRsxs.extend(data.SNRs[0,0][data.ok_ichans[0]])
self.weights.extend(data.weights[0])
self.weightsxs.extend(data.weights[0,data.ok_ichans[0]])
self.Ps /= len(self.datafiles)
self.Ps = [self.Ps] #This line is a toy
self.bw = self.hifreq - self.lofreq
self.freqs = np.array(self.freqs)
self.freqsxs = np.array(self.freqsxs)
self.nu0 = self.freqs.mean()
self.isort = np.argsort(self.freqs)
self.isortx = np.argsort(self.freqsxs)
for ijoin in range(self.njoin):
join_ichans = np.intersect1d(np.where(self.isort >=
self.join_nchans[ijoin])[0], np.where(self.isort <
self.join_nchans[ijoin+1])[0])
self.join_ichans.append(join_ichans)
join_ichanxs = np.intersect1d(np.where(self.isortx >=
self.join_nchanxs[ijoin])[0], np.where(self.isortx <
self.join_nchanxs[ijoin+1])[0])
self.join_ichanxs.append(join_ichanxs)
self.masks = np.array(self.masks)[self.isort]
self.masks = np.array([[self.masks]])
self.port = np.array(self.port)[self.isort]
self.portx = np.array(self.portx)[self.isortx]
self.flux_prof = np.array(self.flux_prof)[self.isort]
self.flux_profx = np.array(self.flux_profx)[self.isortx]
self.noise_stds = np.array(self.noise_stds)[self.isort]
self.noise_stds = np.array([[self.noise_stds]]) #For consistency
self.noise_stdsxs = np.array(self.noise_stdsxs)[self.isortx]
self.SNRs = np.array(self.SNRs)[self.isort]
self.SNRsxs = np.array(self.SNRsxs)[self.isortx]
self.weights = np.array([np.array(self.weights)[self.isort]])
self.weightsxs = np.array([np.array(self.weightsxs)[self.isortx]])
self.freqs.sort()
self.freqsxs.sort()
self.freqs = np.array([self.freqs])
self.freqsxs = [self.freqsxs]
self.join_params = np.array(self.join_params)
self.join_fit_flags = np.array(self.join_fit_flags)
if self.joinfile: #Read joinfile
joinfile_lines = open(self.joinfile, "r").readlines()[-len(
self.datafiles):]
joinfile_lines = [line.split() for line in joinfile_lines]
try:
for ifile in range(len(joinfile_lines)):
ijoin = self.datafiles.index(joinfile_lines[ifile][0])
phi = np.double(joinfile_lines[ifile][1])
if len(joinfile_lines[ifile]) > 3: #New joinfiles...
DM = np.float(joinfile_lines[ifile][3])
else: #Old joinfiles...
DM = np.float(joinfile_lines[ifile][2])
self.join_params[ijoin*2] = phi
self.join_params[ijoin*2+1] = DM
except:
print "Bad join file."
self.all_join_params = [self.join_ichanxs, self.join_params,
self.join_fit_flags]
if len(self.datafiles) == 1:
self.data = data
#Unpack the data dictionary into the local namespace;
#see load_data for dictionary keys.
for key in self.data.keys():
exec("self." + key + " = self.data['" + key + "']")
else:
self.njoin = 0
self.join_params = []
self.join_ichans = []
self.all_join_params = []
self.datafile = datafile
self.datafiles = [datafile]
self.data = load_data(datafile, dedisperse=True,
dededisperse=False, tscrunch=True, pscrunch=True,
fscrunch=False, flux_prof=True, refresh_arch=True,
return_arch=True, quiet=quiet, **load_data_kwargs)
#Unpack the data dictionary into the local namespace;
#see load_data for dictionary keys.
for key in self.data.keys():
exec("self." + key + " = self.data['" + key + "']")
if self.source is None: self.source = "noname"
self.port = (self.masks * self.subints)[0,0]
self.portx = self.port[self.ok_ichans[0]]
self.flux_profx = self.flux_prof[self.ok_ichans[0]]
self.freqsxs = [self.freqs[0,self.ok_ichans[0]]]
self.noise_stdsxs = self.noise_stds[0,0,self.ok_ichans[0]]
self.SNRsxs = self.SNRs[0,0,self.ok_ichans[0]]
def apply_joinfile(self, nu_ref, undo=False):
"""
Apply parameters in joinfile.
nu_ref is the reference frequency [MHz], which should be the fitted
model's reference frequency.
undo=True rotates the data the other way.
"""
undo = (-1)**(int(undo))
for ii in range(self.njoin):
jic = self.join_ichans[ii]
self.port[jic] = rotate_data(self.port[jic],
-self.join_params[0::2][ii]*undo,
-self.join_params[1::2][ii]*undo, self.Ps[0],
self.freqs[0,jic], nu_ref)
jicx = self.join_ichanxs[ii]
self.portx[jicx] = rotate_data(self.portx[jicx],
-self.join_params[0::2][ii]*undo,
-self.join_params[1::2][ii]*undo, self.Ps[0],
self.freqsxs[0][jicx], nu_ref)
# self.model[jic] = rotate_data(self.model[jic],
# -self.join_params[0::2][ii]*undo,
# -self.join_params[1::2][ii]*undo, self.Ps[0],
# self.freqs[0,jic], nu_ref)
#self.model_masked = self.model * self.masks[0,0]
#self.modelx = np.compress(self.masks[0,0].mean(axis=1), self.model,
# axis=0)
def normalize_portrait(self, method="rms"):
"""
Normalize each channel's profile using normalize_portrait(...).
NB: currently only works properly when nsub = 1.
"""
if method not in ("mean", "max", "prof", "rms", "abs"):
print "Unknown method for normalize_portrait(...), '%s'."%method
else:
if method == "prof":
weights = self.weights[0]
weightsx = self.weights[self.weights > 0]
else:
weights = weightsx = None
#Full portrait
self.unnorm_noise_stds = np.copy(self.noise_stds)
self.port, self.norm_values = normalize_portrait(self.port, method,
weights=weights, return_norms=True)
self.noise_stds[0,0] = get_noise(self.port, chans=True)
self.flux_prof = self.port.mean(axis=1)
#Condensed portrait
self.unnorm_noise_stdsxs = np.copy(self.noise_stdsxs)
self.portx = normalize_portrait(self.portx, method,
weights=weightsx, return_norms=False)
self.noise_stdsxs = get_noise(self.portx, chans=True)
self.flux_profx = self.portx.mean(axis=1)
def unnormalize_portrait(self):
"""
Undo normalize_portrait.
"""
if hasattr(self, 'unnorm_noise_stds'):
self.port = (self.norm_values * self.port.transpose()).transpose()
self.noise_stds = np.copy(self.unnorm_noise_stds)
del(self.unnorm_noise_stds)
self.flux_prof = self.port.mean(axis=1)
self.portx = (self.norm_values[self.ok_ichans[0]] * \
self.portx.transpose()).transpose()
self.noise_stdsxs = np.copy(self.unnorm_noise_stdsxs)
del(self.unnorm_noise_stdsxs)
self.flux_profx = self.portx.mean(axis=1)
self.norm_values = np.ones(len(self.port))
def smooth_portrait(self, smart=False, **kwargs):
"""
Smooth portrait data using default settings from wavelet_smooth.
smart=True uses smart_smooth(...).
**kwargs get passed to wavelet_smooth and/or smart_smooth.
"""
#Full portrait
if smart:
self.port = smart_smooth(self.port, try_nlevels=min(8,
int(np.log2(self.nbin))), **kwargs)
else:
self.port = wavelet_smooth(self.port, **kwargs)
for ichan in range(len(self.port)):
self.noise_stds[0,0,ichan] = get_noise(self.port[ichan])
self.flux_prof = self.port.mean(axis=1)
#Condensed portrait
if smart:
self.portx = smart_smooth(self.portx, try_nlevels=min(8,
int(np.log2(self.nbin))), **kwargs)
else:
self.portx = wavelet_smooth(self.portx, **kwargs)
for ichanx in range(len(self.portx)):
self.noise_stdsxs[ichanx] = get_noise(self.portx[ichanx])
self.flux_profx = self.portx.mean(axis=1)
def fit_flux_profile(self, channel_errs=None, nu_ref=None, guessA=1.0,
guessalpha=0.0, plot=True, savefig=False, quiet=False):
"""
Fit a power-law to the phase-averaged flux spectrum of the data.
Fitted parameters and uncertainties are added as class attributes.
guessA is the initial amplitude parameter.
guessalpha is the initial spectral index parameter.
plot=True shows the fit results.
savefig specifies a string for a saved figure; will not show the plot.
quiet=True suppresses output.
"""
if nu_ref is None: nu_ref = self.nu0
#Noise level below may be off
if channel_errs is None: channel_errs = np.ones(len(self.freqsxs[0]))
fp = fit_powlaw(self.flux_profx, np.array([guessA,guessalpha]),
channel_errs, self.freqsxs[0], nu_ref)
if not quiet:
print ""
print "Flux-density power-law fit"
print "----------------------------------"
print "residual mean = %.2f"%fp.residuals.mean()
print "residual std. = %.2f"%fp.residuals.std()
print "reduced chi-squared = %.2f"%(fp.chi2 / fp.dof)
print "A = %.3f +/- %.3f (flux at %.2f MHz)"%(fp.amp,
fp.amp_err, fp.nu_ref)
print "alpha = %.3f +/- %.3f"%(fp.alpha, fp.alpha_err)
if plot or savefig:
ax1 = plt.subplot(211, position=(0.1,0.1,0.8,0.4))
ax2 = plt.subplot(212, position=(0.1,0.5,0.8,0.4))
ax1.errorbar(self.freqsxs[0], fp.residuals, channel_errs, fmt='r+')
plot_freqs = np.linspace(self.freqs[0].min(), self.freqs[0].max(),
1000)
ax2.plot(plot_freqs, powlaw(plot_freqs, fp.nu_ref, fp.amp,
fp.alpha), 'k-')
ax2.errorbar(self.freqsxs[0], self.flux_profx, channel_errs,
fmt='r+')
ax1.set_xlim(self.freqs[0].min(), self.freqs[0].max())
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticklabels([])
ax1.set_yticks(ax1.get_yticks()[1:-1])
ax2.set_yticks(ax2.get_yticks()[1:-1])
ax2.text(0.05, 0.1, r"A$_{\nu_0}$ = %.2f $\pm$ %.2f"%(
fp.amp, fp.amp_err) + "\n" + r"$\alpha$ = %.2f $\pm$ %.2f"%(
fp.alpha, fp.alpha_err), ha="left", va="bottom",
transform=ax2.transAxes)
ax1.set_xlabel("Frequency [MHz]")
ax1.set_ylabel("Residual")
ax2.set_ylabel("Flux")
ax2.set_title("Average Flux Profile for %s"%self.source)
if savefig: plt.savefig(savefig)
if plot: plt.show()
self.flux_fit = fp
self.spect_A = fp.amp
self.spect_A_err = fp.amp_err
self.spect_A_ref = fp.nu_ref
self.spect_index = fp.alpha
self.spect_index_err = fp.alpha_err
def write_join_parameters(self):
"""
Write the JOIN parameters to file.
This function is a hack until something better is developed for how to
deal with these alignment parameters.
NB: The join parameters are "opposite" of how they are used to rotate
the data with e.g. rotate_data; use a negative!
ALSO NB: In order to get "proper barycentic DMs" from these delta-DMs,
one must do something like DM = (DM0 + delta-DM)*df, where df
is the doppler factor and DM0 is the nominal dispersion
measure, both obtained from the archive via PSRCHIVE:
e.g. df = archive.get_Integration(0).get_doppler_factor and
DM0 = archive.get_dispersion_measure().
"""
#print "Beware: JOIN Parameters should be negated!"
#print "Beware: JOIN DMs are offsets and not doppler corrected!"
if self.joinfile is not None:
joinfile = self.joinfile
else:
joinfile = self.model_name + ".join"
jf = open(joinfile, "a")
header = "# archive name" + " "*32 + "-phase offset & err [rot]" + \
" "*2 + "-delta-DM & err [cm**-3 pc]\n"
jf.write(header)
for ifile in range(len(self.datafiles)):
datafile = self.datafiles[ifile]
phase = self.join_params[ifile*2]
dm = self.join_params[ifile*2 + 1]
line = datafile + " "*abs(45-len(datafile)) + "% .10f"%phase + \
" "*1 + "%.10f"%self.join_param_errs[::2][ifile] + \
" "*2 + "% .6f"%dm + " "*1 + \
"%.6f"%self.join_param_errs[1::2][ifile] + "\n"
jf.write(line)
jf.close()
def rotate_stuff(self, phase=0.0, DM=0.0, ichans=None, ichanxs=None,
nu_ref=None, model=False):
"""
Rotates the data or model portraits according to rotate_portrait(...).
phase is a value specifying the amount of achromatic rotation [rot].
DM is a value specifying the amount of rotation based on the
cold-plasma dispersion law [cm**-3 pc].
ichans is an array specifying the channel indices to be rotated; in
this way, disparate bands can be aligned. ichans=None defaults to
all channels.
ichanxs is the same as ichans, but for the condensed portrait.
nu_ref is the reference frequency [MHz] that has zero dispersive delay.
If nu_ref=None, defaults to self.nu0.
model=True applies the rotation to the model.
"""
P = self.Ps[0]
if nu_ref is None: nu_ref = self.nu0
if ichans is None: ichans = np.arange(len(self.freqs[0]))
if ichanxs is None: ichanxs = np.arange(len(self.freqsxs[0]))
freqs = self.freqs[0][ichans]
freqsxs = self.freqsxs[0][ichanxs]
if not model:
self.port[ichans] = rotate_portrait(self.port[ichans], phase, DM,
P, freqs, nu_ref)
self.portx[ichanxs] = rotate_portrait(self.portx[ichanxs], phase,
DM, P, freqsxs, nu_ref)
if hasattr(self, 'prof'):
self.prof = rotate_portrait([self.prof], phase)[0]
if hasattr(self, 'mean_prof'):
self.mean_prof = rotate_portrait([self.mean_prof], phase)[0]
if hasattr(self, 'eigvec'):
self.eigvec = rotate_portrait(self.eigvec.T, phase).T
if model and hasattr(self, 'model'):
self.model[ichans] = rotate_portrait(self.model[ichans], phase, DM,
P, freqs, nu_ref)
self.modelx[ichanxs] = rotate_portrait(self.modelx[ichanxs], phase,
DM, P, freqsxs, nu_ref)
self.model_masked[ichans] = rotate_portrait(
self.model_masked[ichans], phase, DM, P, freqs, nu_ref)
if hasattr(self, 'smooth_mean_prof'):
self.smooth_mean_prof = rotate_portrait(
[self.smooth_mean_prof], phase)[0]
if hasattr(self, 'smooth_eigvec'):
self.smooth_eigvec = rotate_portrait(self.smooth_eigvec.T,
phase).T
def unload_archive(self, outfile=None, quiet=False):
"""
Unload a PSRCHIVE archive to outfile containing the same data.
outfile=None overwrites the input archive.
quiet=True suppresses output.
Only works if input datafile is a PSRCHIVE archive. The written
archive will have the same weighted channels as the input datafile.
"""
if hasattr(self, 'arch'):
if outfile is None: outfile = self.datafile
shape = self.arch.get_data().shape
nsub,npol,nchan,nbin = shape
data = np.zeros(shape)
for isub in range(nsub):
for ipol in range(npol):
data[isub,ipol] = self.port
unload_new_archive(data, self.arch, outfile,
DM=self.arch.get_dispersion_measure(),
#dmc=int(self.arch.get_dedispersed()), weights=self.weights,
dmc=self.dmc, weights=self.weights,
quiet=quiet)
def write_model_archive(self, outfile, quiet=False):
"""
Write a PSRCHIVE archive to outfile containing the model.
outfile is the name of the written archive.
quiet=True suppresses output.
Only works if input datafile is a PSRCHIVE archive. The written
archive will have the same weighted channels as the input datafile.
"""
if hasattr(self, 'model') and hasattr(self, 'arch'):
shape = self.arch.get_data().shape
nsub,npol,nchan,nbin = shape
model_data = np.zeros(shape)
for isub in range(nsub):
for ipol in range(npol):
model_data[isub,ipol] = self.model
unload_new_archive(model_data, self.arch, outfile, DM=0.0, dmc=0,
weights=self.weights, quiet=quiet)
def show_data_portrait(self, **kwargs):
"""
Show the data portrait.
See show_portrait(...)
"""
title = "%s Data Portrait"%self.source
show_portrait(self.port * self.masks[0,0], self.phases, self.freqs[0],
title, True, True, bool(self.bw < 0), **kwargs)
def show_model_portrait(self, **kwargs):
"""
Show the masked model portrait.
See show_portrait(...)
"""
if not hasattr(self, 'model'): return None
title = "%s Model Portrait"%self.source
show_portrait(self.model * self.masks[0,0], self.phases, self.freqs[0],
title, True, True, bool(self.bw < 0), **kwargs)
def show_model_fit(self, **kwargs):
"""
Show the model, data, and residuals.
See show_residual_plot(...)
"""
if not hasattr(self, 'model'): return None
resids = self.port - self.model_masked
titles = ("%s"%self.datafile, "%s"%self.model_name, "Residuals")
show_residual_plot(self.port, self.model, resids, self.phases,
self.freqs[0], self.noise_stds[0,0], 0, titles,
bool(self.bw < 0), **kwargs)
#############
# functions #
#############
def set_colormap(colormap):
"""
Set the default colormap to colormap and apply to current image if any.
See help(colormaps) for more information
Stolen from matplotlib.pyplot: plt.pink().
"""
plt.rc("image", cmap=colormap)
im = plt.gci()
if im is not None:
exec("im.set_cmap(plt.cm.%s)"%colormap)
plt.draw_if_interactive()
def get_bin_centers(nbin, lo=0.0, hi=1.0):
"""
Return nbin bin centers with extremities at lo and hi.
nbin is the number of bins to return the centers of.
lo is the value of the ``left-edge'' of the first bin.
hi is the value of the ``right-edge'' of the last bin.
"""
lo = np.double(lo)
hi = np.double(hi)
diff = hi-lo
bin_centers = np.linspace(lo + diff/(nbin*2), hi - diff/(nbin*2), nbin)
bin_centers = np.double(bin_centers)
return bin_centers
def count_crossings(x, x0):
"""
Return number of crossings in the 1-d array x across threshold x0.
x is the 1-D array of values.
x0 is the threshold across which to count crossings.
"""
ncross = (np.diff(np.sign(x-x0)) != 0).sum() - ((x-x0) == 0).sum()
return ncross
def weighted_mean(data, errs=1.0):
"""
Return the weighted mean and its standard error.
data is a 1-D array of data values.
errs is a 1-D array of data errors a la 1-sigma uncertainties (errs**-2 are
the weights in the weighted mean).
"""
if hasattr(errs, 'is_integer'):
errs = np.ones(len(data))
iis = np.where(errs > 0.0)[0]
mean = (data[iis]*(errs[iis]**-2.0)).sum() / (errs[iis]**-2.0).sum()
mean_std_err = (errs[iis]**-2.0).sum()**-0.5
return mean, mean_std_err
def get_WRMS(data, errs=1.0):
"""
Return the weighted root-mean-square value. Mostly untested.
data is a 1-D array of data values.
errs is a 1-D array of data errors a la 1-sigma uncertainties (errs**-2 are
the weights in the weighted mean).
"""
if hasattr(errs, 'is_integer'):
errs = np.ones(len(data))
iis = np.where(errs > 0.0)[0]
w_mean = weighted_mean(data, errs)[0]
d_sum = ((data[iis] - w_mean)**2.0 * (errs[iis]**-2.0)).sum()
w_sum = (errs[iis]**-2.0).sum()
return (d_sum / w_sum)**0.5
def get_red_chi2(data, model, errs=None, dof=None):
"""
Return reduced chi-squared given input data and model.
data is a 1- or 2-D array of data values.
model is a 1- or 2-D array of model values.
errs is a value or 1-D array of '1-sigma' uncertainties on the data. If
None, errs is estimated using get_noise(data).
dof is an integer number of degrees of freedom. If None, dof =
sum(data.shape).
"""
resids = data - model
if errs is None:
if len(data.shape) == 1: errs = get_noise(data)
elif len(data.shape) == 2: errs = get_noise(data, chans=True)
else:
print "Can only handle 1- or 2-D input."
if dof is None: dof = sum(data.shape)
if len(data.shape) == 1:
red_chi2 = np.sum((resids/errs)**2.0) / dof
else:
red_chi2 = np.array([(resids[ii]/errs[ii])**2.0 for ii in
range(len(resids))]).sum() / dof
return red_chi2
def gaussian_function(xs, loc, wid, norm=False):
"""
Evaluates a Gaussian function with parameters loc and wid at values xs.
xs is the array of values that are evaluated in the function.
loc is the pulse phase location (0-1) [rot].
wid is the Gaussian pulse's full width at half-max (FWHM) [rot].
norm=True returns the profile such that the integrated density = 1.
"""
mean = loc
sigma = wid / (2 * np.sqrt(2 * np.log(2)))
scale = 1.0
zs = (xs - mean) / sigma
ys = np.exp(-0.5 * zs**2)
if norm:
scale *= (sigma**2.0 * 2.0 * np.pi)**-0.5
return scale * ys
def gaussian_profile(nbin, loc, wid, norm=False, abs_wid=False, zeroout=True):
"""
Return a Gaussian pulse profile with nbin bins and peak amplitude of 1.
nbin is the number of bins in the profile.
loc is the pulse phase location (0-1) [rot].
wid is the Gaussian pulse's full width at half-max (FWHM) [rot].
norm=True returns the profile such that the integrated density = 1.
abs_wid=True, will use abs(wid).
zeroout=True and wid <= 0, return a zero array.
Note: The FWHM of a Gaussian is approx 2.35482 "sigma", or exactly
2*sqrt(2*ln(2)).
Taken and tweaked from SMR's pygaussfit.py
"""
#Maybe should move these checks to gen_gaussian_portrait?
if abs_wid:
wid = abs(wid)
if wid > 0.0:
pass
elif wid == 0.0:
return np.zeros(nbin, 'd')
elif wid < 0.0 and zeroout:
return np.zeros(nbin, 'd')
elif wid < 0.0 and not zeroout:
pass
else:
return 0
sigma = wid / (2 * np.sqrt(2 * np.log(2)))
mean = loc % 1.0
locval = get_bin_centers(nbin, lo=0.0, hi=1.0)
if (mean < 0.5):
locval = np.where(np.greater(locval, mean + 0.5), locval - 1.0, locval)
else:
locval = np.where(np.less(locval, mean - 0.5), locval + 1.0, locval)
try:
zs = (locval - mean) / sigma
okzinds = np.compress(np.fabs(zs) < 20.0, np.arange(nbin)) #Why 20?
okzs = np.take(zs, okzinds)
retval = np.zeros(nbin, 'd')
np.put(retval, okzinds, np.exp(-0.5 * (okzs)**2.0)/(sigma * np.sqrt(2 *
np.pi)))
if norm:
return retval
else:
if np.max(abs(retval)) == 0.0:
return retval
else:
z = (locval[retval.argmax()] - loc) / sigma
fact = np.exp(-0.5 * z**2.0) / retval[retval.argmax()]
return fact * retval
except OverflowError:
print "Problem in gaussian_profile: mean = %f sigma = %f" %(mean,
sigma)
return np.zeros(nbin, 'd')
def gen_gaussian_profile(params, nbin):
"""
Return a model profile with ngauss Gaussian components.
params is a sequence of 2 + (ngauss*3) values where the first value is
the DC component, the second value is the scattering timescale [bin]
and each remaining group of three represents the Gaussians' loc (0-1),
wid (i.e. FWHM) (0-1), and amplitude (>0.0).
nbin is the number of bins in the model.
Taken and tweaked from SMR's pygaussfit.py
"""
ngauss = (len(params) - 2) / 3
model = np.zeros(nbin, dtype='d') + params[0]
for igauss in range(ngauss):
loc, wid, amp = params[(2 + igauss*3):(5 + igauss*3)]
model += amp * gaussian_profile(nbin, loc, wid)
if params[1] != 0.0:
bins = np.arange(nbin)
#sk = scattering_kernel(params[1], 1.0, np.array([1.0]), bins, P=1.0,
# alpha=scattering_alpha)[0] #alpha here does not matter
#model = add_scattering(model, sk, repeat=3)
sp_FT = scattering_profile_FT(float(params[1])/nbin, nbin)
model = np.fft.irfft(sp_FT* np.fft.rfft(model))
return model
def gen_gaussian_portrait(model_code, params, scattering_index, phases, freqs,
nu_ref, join_ichans=[], P=None):
"""
Return a Gaussian-component model portrait based on input parameters.
model_code is a three digit string specifying the evolutionary functions
to be used for the three Gaussian parameters (loc,wid,amp); see
pplib.py header for details.
params is an array of 2 + (ngauss*6) + 2*len(join_ichans) values.
The first value is the DC component, and the second value is the
scattering timescale [bin]. The next ngauss*6 values represent the
Gaussians' loc (0-1), evolution parameter in loc, wid (i.e. FWHM)
(0-1), evolution parameter in wid, amplitude (>0,0), and spectral
index alpha (no implicit negative). The remaining 2*len(join_ichans)
parameters are pairs of phase and DM. The iith list of channels in
join_ichans gets rotated in the generated model by the iith pair of
phase and DM.
scattering_index is the power-law index of the scattering law; the default
is set in the header lines of pplib.py.
phases is the array of phase values (will pass nbin to
gen_gaussian_profile).
freqs in the array of frequencies at which to calculate the model.
nu_ref is the frequency to which the locs, wids, and amps reference.
join_ichans is used only in ppgauss, in which case the period P [sec] needs
to be provided.
The units of the evolution parameters and the frequencies need to match
appropriately.
"""
njoin = len(join_ichans)
if njoin:
join_params = params[-njoin*2:]
params = params[:-njoin*2]
#Below, params[1] is multiplied by 0 so that scattering is taken care of
#outside of gen_gaussian_profile
refparams = np.array([params[0]] + [params[1]*0.0] + list(params[2::2]))
tau = params[1]
locparams = params[3::6]
widparams = params[5::6]
ampparams = params[7::6]
ngauss = len(refparams[2::3])
nbin = len(phases)
nchan = len(freqs)
gport = np.empty([nchan, nbin])
gparams = np.empty([nchan, len(refparams)])
#DC term
gparams[:,0] = refparams[0]
#Scattering term - first make unscattered portrait
gparams[:,1] = refparams[1]
#Locs
gparams[:,2::3] = evolve_parameter(freqs, nu_ref, refparams[2::3],
locparams, model_code[0])
#Wids
gparams[:,3::3] = evolve_parameter(freqs, nu_ref, refparams[3::3],
widparams, model_code[1])
#Amps
gparams[:,4::3] = evolve_parameter(freqs, nu_ref, refparams[4::3],
ampparams, model_code[2])
for ichan in range(nchan):
#Need to contrain so values don't go negative, etc., which is currently
#taken care of in gaussian_profile
gport[ichan] = gen_gaussian_profile(gparams[ichan], nbin)
if tau != 0.0:
#sk = scattering_kernel(tau, nu_ref, freqs, np.arange(nbin), 1.0,
# alpha=scattering_index)
#gport = add_scattering(gport, sk, repeat=3)
taus = scattering_times(float(tau)/nbin, scattering_index, freqs,
nu_ref)
sp_FT = scattering_portrait_FT(taus, nbin)
gport = np.fft.irfft(sp_FT * np.fft.rfft(gport, axis=-1), axis=-1)
if njoin:
for ij in range(njoin):
join_ichan = join_ichans[ij]
phi = join_params[0::2][ij]
DM = join_params[1::2][ij]
gport[join_ichan] = rotate_data(gport[join_ichan], phi,
DM, P, freqs[join_ichan], nu_ref)
return gport
def gen_spline_portrait(mean_prof, freqs, eigvec, tck, nbin=None):
"""
Generate a model portrait from make_spline_model(...) output.
mean_prof is the mean profile.
freqs are the frequencies at which to build the model.
eigvec are the eigenvectors providing the basis for the B-spline curve.
tck is a tuple containing knot locations, B-spline coefficients, and spline
degree (output of si.splprep(...)).
nbin is the number of phase bins to use in the model; if different from
len(mean_prof), a resampling function is used.
"""
if not eigvec.shape[1]:
port = np.tile(mean_prof, len(freqs)).reshape(len(freqs),
len(mean_prof))
else:
proj_port = np.array(si.splev(freqs, tck, der=0, ext=0)).T
delta_port = np.dot(proj_port, eigvec.T)
port = delta_port + mean_prof
if nbin is not None:
if len(mean_prof) != nbin:
shift = 0.5 * (nbin**-1 - len(mean_prof)**-1)
port = ss.resample(port, nbin, axis=1)
port = rotate_portrait(port, shift) #ss.resample introduces shift!
return port
def make_constant_portrait(archive, outfile, profile=None, DM=0.0, dmc=False,
weights=None, quiet=False):
"""
Fill an archive with one profile.
archive is the name of the dummy PSRCHIVE archive that will be filled with
profile.
outfile is the name of the unloaded, output archive, which will have the
same nsub, npol, nchan, nbin, frequencies, etc., as archive.
profile is the array containing the profile to be used; it must have the
same number of phase bins nbin as archive. If None, archive will be
T-, P-, and F-scrunched and the resulting average profile will be used.
DM is the header DM to store in the unloaded archive.
dmc=False stores the unloaded archive in the dispersed state.
weights is an nsub x nchan array of channel weights; if None, ones are used
as weights.
quiet=True suppresses output.
"""
arch = pr.Archive_load(archive)
nsub,npol,nchan,nbin = arch.get_data().shape
if profile is None:
arch.tscrunch()
arch.pscrunch()
arch.fscrunch()
profile = arch.get_data()[0,0,0]
arch.refresh()
nbin_check_output = "len(profile) != number of bins in dummy archive"
assert (len(profile) == nbin), nbin_check_output
if weights is None:
weights = np.ones([nsub,nchan])
data = np.zeros([nsub,npol,nchan,nbin])
for isub in range(nsub):
for ipol in range(npol):
for ichan in range(nchan):
data[isub,ipol,ichan] = profile
unload_new_archive(data, arch, outfile, DM=DM, dmc=dmc, weights=weights,
quiet=quiet)
def power_law_evolution(freqs, nu_ref, parameter, index):
"""
Evolve the parameter over freqs by a power-law with index = index.
F(freq) = parameter * (freq/nu_ref)**index