Skip to content

Commit

Permalink
Merge pull request #8 from rcjackson/master
Browse files Browse the repository at this point in the history
Sync changes
  • Loading branch information
rcjackson committed Sep 6, 2023
2 parents 07cc55b + e2d3513 commit 56aa8aa
Show file tree
Hide file tree
Showing 12 changed files with 128 additions and 140 deletions.
8 changes: 4 additions & 4 deletions doc/source/conf.py
Expand Up @@ -13,7 +13,7 @@
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))

import sphinx_theme

# -- Project information -----------------------------------------------------

Expand Down Expand Up @@ -46,7 +46,7 @@
'gallery_dirs': 'source/auto_examples'
}

#Add any paths that contain templates here, relative to this directory.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# List of patterns, relative to source directory, that match files and
Expand All @@ -67,9 +67,9 @@
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme = 'neo_rtd_theme'
import sphinx_theme

html_theme_path = [sphinx_theme.get_html_theme_path()]

# Numpy autodoc attributes
numpydoc_show_class_members = True
autosummary_generate = True
autosummary_generate = True
2 changes: 1 addition & 1 deletion examples/plot_moments.py
Expand Up @@ -25,4 +25,4 @@
my_ds["doppler_velocity_max_peak"].plot(ax=ax[1], y='range')
plt.show()

test_file.close()
test_file.close()
6 changes: 3 additions & 3 deletions examples/plot_power_spectra.py
Expand Up @@ -17,8 +17,8 @@
# Plot the power spectra for a given time and height
my_time = datetime(2017, 8, 4, 0, 40, 59)
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
my_ds["power_spectra_normed_interp"].sel(time=my_time, range=350., method='nearest').plot(ax=ax[0])
my_ds["power_spectra_normed_interp"].sel(time=my_time, range=950., method='nearest').plot(ax=ax[1])
out_ds["power_spectral_density"].sel(time=my_time, range=350., method='nearest').plot(ax=ax[0])
out_ds["power_spectral_density"].sel(time=my_time, range=950., method='nearest').plot(ax=ax[1])
plt.show()

test_file.close()
test_file.close()
7 changes: 6 additions & 1 deletion examples/read_00_file.py
@@ -1,6 +1,11 @@
"""
Example on reading raw Halo Photonics data
------------------------------------------
"""
import highiq

ds = highiq.io.read_00_data('sgpdlacfC1.00.20220107.141001.raw.aet_Stare_107_20220107_14.raw',
'sgpdlprofcalC1.home_point')
my_spectra = highiq.calc.get_psd(ds)
print(ds)
print(ds)
2 changes: 1 addition & 1 deletion highiq/__init__.py
@@ -1,4 +1,4 @@
__version__ = "1.0.1"
__version__ = "1.1"
from . import calc
from . import io
from . import vis
Expand Down
51 changes: 14 additions & 37 deletions highiq/calc/moments.py
Expand Up @@ -5,7 +5,6 @@

def _gpu_calc_power(psd, dV, block_size=200, normed=True):
shp = psd.shape
times = shp[0]
power = np.zeros((shp[0], shp[1]))
if len(shp) == 3:
gpu_array = tf.constant(psd, dtype=tf.float32)
Expand All @@ -25,7 +24,6 @@ def _gpu_calc_power(psd, dV, block_size=200, normed=True):

def _gpu_calc_velocity(psd, power, vel_bins, dV):
shp = psd.shape
times = shp[0]
gpu_array = tf.constant(psd, dtype=tf.float32)
power_array = tf.constant(power, dtype=tf.float32)
vel_bins_tiled = np.tile(vel_bins, (shp[0], shp[1], 1))
Expand All @@ -35,7 +33,6 @@ def _gpu_calc_velocity(psd, power, vel_bins, dV):


def _gpu_calc_velocity_dumb(psd, vel_bins):
shp = psd.shape
dV = np.diff(vel_bins)[0]
vel_min = vel_bins.min()
gpu_array = tf.constant(psd)
Expand All @@ -55,22 +52,12 @@ def _gpu_calc_spectral_width(psd, power, vel_bins, velocity, dV):

velocity_array = tf.transpose(np.tile(velocity, (shp[2], 1, 1)), [1, 2, 0])
vel_bins_tiled = np.tile(vel_bins, (times, shp[1], 1))
gpu_array = tf.math.sqrt(1 / power_array *
tf.math.reduce_sum(
(vel_bins_tiled - velocity_array)**2 * gpu_array, axis=2))
gpu_array = tf.math.sqrt(1 / power_array * tf.math.reduce_sum(
(vel_bins_tiled - velocity_array)**2 * gpu_array, axis=2))
specwidth = gpu_array.numpy()
return specwidth


def _gpu_snr(power, noise):
shp = power.shape
power_array = tf.constant(power, dtype=tf.float32)
gpu_noise = tf.constant(noise, dtype=tf.float32)
power_array = 10. * (tf.math.log(power_array / gpu_noise) / tf.math.log(10.))
snr = power_array.numpy()
return snr


def _gpu_calc_skewness(psd, power, vel_bins, velocity, spec_width, dV):
shp = psd.shape
times = shp[0]
Expand All @@ -82,16 +69,14 @@ def _gpu_calc_skewness(psd, power, vel_bins, velocity, spec_width, dV):
velocity_array = tf.transpose(np.tile(velocity, (shp[2], 1, 1)), [1, 2, 0])
vel_bins_tiled = np.tile(vel_bins, (times, shp[1], 1))
gpu_array = 1 / power_array * tf.math.reduce_sum(
(vel_bins_tiled - velocity_array)**3 * gpu_array, axis=2)
(vel_bins_tiled - velocity_array)**3 * gpu_array, axis=2)
skewness = gpu_array.numpy()
return skewness


def _gpu_calc_kurtosis(psd, power, vel_bins, velocity, spec_width, dV):
shp = psd.shape
times = shp[0]
kurtosis = np.zeros((shp[0], shp[1]))

gpu_array = tf.constant(psd.values, dtype=tf.float32)
power_array = tf.constant(power, dtype=tf.float32)
spec_width_array = tf.constant(spec_width, dtype=tf.float32)
Expand Down Expand Up @@ -129,6 +114,8 @@ def get_lidar_moments(spectra, snr_thresh=0, block_size_ratio=1.0, which_moments
spectra: ACT Dataset
The database with the Doppler lidar moments.
"""
if 'power_spectral_density' not in spectra.variables.keys():
raise ValueError("You must calculate the power spectra before calculating moments!")
if which_moments is None:
which_moments = ['snr', 'doppler_velocity', 'spectral_width',
'skewness', 'kurtosis']
Expand All @@ -139,33 +126,23 @@ def get_lidar_moments(spectra, snr_thresh=0, block_size_ratio=1.0, which_moments
raise ValueError("block_size_ratio must be a positive floating point number!")

dV = np.diff(spectra['vel_bins'])[0]
linear_psd = spectra['power_spectral_density']
linear_psd = spectra['power_spectral_density'] - 1
linear_psd_0filled = linear_psd.fillna(0).values
power = _gpu_calc_power(
linear_psd_0filled, dV)
if ('doppler_velocity' in which_moments or 'spectral_width' in which_moments or
'skewness' in which_moments or 'kurtosis' in which_moments):
velocity = _gpu_calc_velocity(
linear_psd_0filled, power,
spectra['vel_bins'].values, dV)
velocity = _gpu_calc_velocity(linear_psd_0filled, power, spectra['vel_bins'].values, dV)

spectra['noise'] = spectra['power_bkg'].sum(axis=-1)
if 'snr' in which_moments:
power_with_noise = _gpu_calc_power(
spectra['power'].values, dV,
normed=False)
power_with_noise = xr.DataArray(power_with_noise, dims=('time', 'range'))
spectra['snr'] = xr.DataArray(
_gpu_snr(power_with_noise.values, spectra['noise'].values),
dims=('time', 'range'))
power_with_noise = dV * spectra['power_spectral_density'].sum(dim='vel_bins')
spectra['snr'] = power_with_noise / (dV * len(spectra['vel_bins']))
spectra['snr'].attrs['long_name'] = "Signal to Noise Ratio"
spectra['snr'].attrs['units'] = "dB"
spectra['intensity'] = spectra['snr'] + 1.
spectra['intensity'].attrs['long_name'] = "Intensity (SNR + 1)"
spectra['intensity'].attrs['units'] = "dB"
spectra['snr'].attrs['units'] = ""
spectra['intensity'] = spectra['snr'] - 1.
spectra['intensity'].attrs['long_name'] = "Intensity (SNR - 1)"
spectra['intensity'].attrs['units'] = ""
spectra['intensity'] = \
spectra['intensity'].where(spectra.snr > snr_thresh)
spectra.attrs['snr_mask'] = "%f dB" % snr_thresh
spectra.attrs['snr_mask'] = "%f" % snr_thresh

if 'doppler_velocity' in which_moments:
velocity_dumb = _gpu_calc_velocity_dumb(
Expand Down
128 changes: 70 additions & 58 deletions highiq/calc/spectra.py
Expand Up @@ -9,13 +9,12 @@

def _fast_expand(complex_array, factor, num_per_block=200):
shp = complex_array.shape
times = shp[0]
expanded_array = np.zeros((shp[0], shp[1], shp[2] * factor))
weights = np.tile(np.arange(0, factor) / factor, (shp[0], shp[1], 1))
for l in range(shp[2]):
gpu_array = tf.constant(complex_array[:, :, l], dtype=tf.float32)
if l < shp[2] - 1:
gpu_array2 = tf.constant(complex_array[:, :, l + 1], dtype=tf.float32)
for i in range(shp[2]):
gpu_array = tf.constant(complex_array[:, :, i], dtype=tf.float32)
if i < shp[2] - 1:
gpu_array2 = tf.constant(complex_array[:, :, i + 1], dtype=tf.float32)
diff_array = gpu_array2 - gpu_array
else:
diff_array = tf.zeros((shp[0], shp[1]))
Expand All @@ -25,10 +24,11 @@ def _fast_expand(complex_array, factor, num_per_block=200):
diff_array = tf.transpose(
np.tile(diff_array, (factor, 1, 1)), [1, 2, 0])
temp_array = diff_array * weights + rep_array
expanded_array[:, :, factor * l:factor * (l + 1)] = temp_array.numpy()
expanded_array[:, :, factor * i:factor * (i + 1)] = temp_array.numpy()
return expanded_array

def get_psd(spectra, gate_resolution=30., wavelength=None, fs=None, nfft=1024,

def get_psd(spectra, gate_resolution=60., wavelength=None, fs=None, nfft=1024, time_window=None,
acf_name='acf', acf_bkg_name='acf_bkg', block_size_ratio=1.0):
"""
This function will get the power spectral density from the autocorrelation function.
Expand All @@ -50,6 +50,8 @@ def get_psd(spectra, gate_resolution=30., wavelength=None, fs=None, nfft=1024,
with the magnitude and units of the sample rate.
nfft: int
The number of points to include in the FFT.
time_window: int
The number of time points to include in the rolling window.
acf_name: str
The name of the autocorrelation function field.
acf_bkg_name: str
Expand All @@ -66,33 +68,37 @@ def get_psd(spectra, gate_resolution=30., wavelength=None, fs=None, nfft=1024,

Q_ = UnitRegistry().Quantity
if fs is None:
if not "sample_rate" in spectra.attrs:
raise KeyError("If sample frequency is not specified, then ACT Dataset must contain a sample_rate " +
"attribute!")
if "sample_rate" not in spectra.attrs:
raise KeyError("If sample frequency is not specified, then ACT Dataset must contain a sample_rate attribute!")

fs_pint = Q_(spectra.attrs["sample_rate"])
fs_pint = fs_pint.to("Hz")
fs = fs_pint.magnitude


if wavelength is None:
if not "wavelength" in spectra.attrs:
if "wavelength" not in spectra.attrs:
raise KeyError("If wavelength is not specified, then the dataset must contain a wavelength attribute!")
fs_pint = Q_(spectra.attrs["wavelength"])
fs_pint = fs_pint.to("m")
wavelength = fs_pint.magnitude

if time_window is not None:
spectra = spectra.resample(time='%ds' % int(time_window)).mean()
else:
spectra[acf_bkg_name] = xr.DataArray(np.ones(spectra[acf_name].shape),
dims=spectra[acf_name].dims) * spectra[acf_bkg_name]

num_gates = int(gate_resolution / 3)
complex_coeff = spectra[acf_name].isel(complex=0).values +\
spectra[acf_name].isel(complex=1).values * 1j
complex_coeff = np.reshape(complex_coeff,
(complex_coeff.shape[0],
int(complex_coeff.shape[1] / (num_gates)),
int(complex_coeff.shape[2] * num_gates)))
ntimes = complex_coeff.shape[0]
complex_coeff_in = spectra[acf_name].isel(complex=0).values + \
spectra[acf_name].isel(complex=1).values * 1j

complex_coeff = np.zeros(
(complex_coeff_in.shape[0], int(complex_coeff_in.shape[1] / num_gates),
complex_coeff_in.shape[2]), dtype=np.complex128)
for i in range(complex_coeff.shape[1]):
complex_coeff[:, i, :] = np.mean(complex_coeff_in[:, (num_gates * i):(num_gates * i + 1), :], axis=1)
del complex_coeff_in
freq = np.fft.fftfreq(nfft) * fs

spectra.attrs['nyquist_velocity'] = "%f m s-1" % (wavelength / (4 * 1 / fs))
spectra['freq_bins'] = xr.DataArray(freq, dims=['freq'])
spectra['freq_bins'].attrs["long_name"] = "Doppler spectra bins in frequency units"
Expand All @@ -104,47 +110,38 @@ def get_psd(spectra, gate_resolution=30., wavelength=None, fs=None, nfft=1024,
spectra['vel_bins'] = xr.DataArray(vel_bins, dims=('vel_bins'), attrs=attrs_dict)
spectra['freq_bins'] = spectra['freq_bins'][inds_sorted]


complex_coeff_bkg = (spectra[acf_bkg_name].isel(complex=0).values +
spectra[acf_bkg_name].isel(complex=1).values * 1j)
complex_coeff_bkg = np.reshape(complex_coeff_bkg,
(int(complex_coeff_bkg.shape[0] / num_gates),
int(complex_coeff_bkg.shape[1] * num_gates)))
frames = tf.signal.frame(complex_coeff,
frame_length=int(nfft),
frame_step=int(nfft / 2), pad_end=True)
window = tf.signal.hann_window(nfft).numpy()
multiples = (frames.shape[0], frames.shape[1], frames.shape[2], 1)
window = np.tile(window, multiples)
power = np.square(tf.math.abs(tf.signal.fft(frames * window)))
power = power.mean(axis=2)
complex_coeff_bkg_in = (spectra[acf_bkg_name].isel(complex=0).values + spectra[acf_bkg_name].isel(complex=1).values * 1j)

complex_coeff_bkg = np.zeros(
(complex_coeff_bkg_in.shape[0], int(complex_coeff_bkg_in.shape[1] / num_gates),
complex_coeff_bkg_in.shape[2]), dtype=np.complex128)
for i in range(complex_coeff.shape[1]):
complex_coeff_bkg[:, i, :] = np.mean(complex_coeff_bkg_in[:, (num_gates * i):(num_gates * i + 1), :], axis=1)
num_lags = complex_coeff_bkg_in.shape[2]
if nfft < num_lags:
raise RuntimeError("Number of points in FFT < number of lags in sample!")
pad_after = int((nfft - num_lags))
pad_before = 0
pad_lengths = tf.constant([[0, 0], [0, 0], [pad_before, pad_after]])
frames = tf.pad(complex_coeff, pad_lengths, mode='CONSTANT', constant_values=0)

power = np.abs(tf.signal.fft(frames).numpy())
attrs_dict = {'long_name': 'Range', 'units': 'm'}
spectra['range'] = xr.DataArray(
gate_resolution * np.arange(int(frames.shape[1])),
dims=('range'), attrs=attrs_dict)

spectra['power'] = xr.DataArray(
power[:, :, inds_sorted], dims=(('time', 'range', 'vel_bins')))
frames = tf.signal.frame(complex_coeff_bkg, frame_length=int(nfft),
frame_step=int(nfft / 2), pad_end=True)
window = tf.signal.hann_window(nfft).numpy()
multiples = (frames.shape[0], frames.shape[1], 1)
window = np.tile(window, multiples)
power = np.square(tf.abs(tf.signal.fft(frames * window)))
power = power.mean(axis=1)
frames = tf.pad(complex_coeff_bkg, pad_lengths, mode='CONSTANT', constant_values=0)
power = np.abs(tf.signal.fft(frames).numpy())

spectra['power_bkg'] = xr.DataArray(
power[:, inds_sorted], dims=('range', 'vel_bins'))
power[:, :, inds_sorted], dims=('time', 'range', 'vel_bins'))

# Subtract background noise
spectra['power_spectral_density'] = spectra['power'] / \
np.tile(spectra['power_bkg'], (ntimes, 1, 1))
spectra['power_spectral_density'] = spectra['power_spectral_density'].where(
spectra['power_spectral_density'] > 0, 0)
spectra['power_spectral_density'] = spectra['power'] / spectra['power_bkg']
spectra['power_spectral_density'].attrs["long_name"] = "Power spectral density"

spectra['power_spectral_density'].attrs["units"] = ''


spectra['range'].attrs['long_name'] = "Range"
spectra['range'].attrs['units'] = 'm'
spectra['vel_bins'].attrs['long_name'] = "Doppler velocity"
Expand Down Expand Up @@ -175,23 +172,38 @@ def calc_num_peaks(my_spectra, **kwargs):
my_array = spectra.fillna(0).values
shp = my_array.shape
num_peaks = np.zeros((shp[0], shp[1]))
peak_loc = np.nan * np.ones((shp[0], shp[1], 5))

vel_bins = my_spectra['vel_bins'].values

if not 'height' in kwargs.keys():
height = 3
if 'prominence' not in kwargs.keys():
prominence = 0.01
else:
height = kwargs.pop('height')
prominence = kwargs.pop('prominence')

if not 'width' in kwargs.keys():
if 'width' not in kwargs.keys():
width = 8
else:
width = kwargs.pop('height')
width = kwargs.pop('width')

if 'height' not in kwargs.keys():
height = 1.5
else:
height = kwargs.pop('height')

for i in range(shp[0]):
for j in range(shp[1]):
num_peaks[i, j] = len(
find_peaks(my_array[i,j], height=height, width=width, **kwargs)[0])
peaks = find_peaks(my_array[i, j], prominence=prominence, width=width, height=height, **kwargs)[0]
num_peaks[i, j] = len(peaks)
for k in range(len(peaks)):
if k > 4:
continue
peak_loc[i, j, k] = vel_bins[peaks[k]]

my_spectra['npeaks'] = xr.DataArray(num_peaks, dims=('time', 'range'))
my_spectra['npeaks'].attrs['long_name'] = "Number of peaks in Doppler spectra"
my_spectra['npeaks'].attrs['units'] = "1"

my_spectra['peak_velocities'] = xr.DataArray(peak_loc, dims=('time', 'range', 'peak_no'))
my_spectra['peak_velocities'].attrs['long_name'] = "Dopper velocity peaks"
my_spectra['peak_velocities'].attrs['units'] = 'm s-1'
return my_spectra

0 comments on commit 56aa8aa

Please sign in to comment.