Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Subset dev #14

Open
wants to merge 14 commits into
base: master
Choose a base branch
from
Binary file added ASCAM instructions.pdf
Binary file not shown.
6 changes: 4 additions & 2 deletions README.md
Expand Up @@ -36,8 +36,10 @@ A straightforward installation can be achieved by first installing Anaconda or m

After successful installation of Anaconda, if you have Git installed, you can clone the ASCAM directory from Github onto your machine with the following command in the Terminal: *git clone https://github.com/AGPlested/ASCAM*. But if you had Git installed, you almost certainly knew that already.

20-03-01: Note, with the migration to Qt, some problems may be encountered on the Mac if you already have installations of Qt4+. A fresh environment (e.g. can help.
21-05-25: Update to Big Sur - Pyqtgraph and PyQt need Python 3.8, PySide2 5.15 and the command export QT_MAC_WANTS_LAYER=1 must be issued in the Terminal.
20-03-01: With the migration to Qt, some problems may be encountered on the Mac if you already have installations of Qt4+. A fresh environment can help.

21-05-25: Running under macOS Big Sur - Pyqtgraph and PyQt need Python 3.8, PySide2 5.15 and the command `export QT_MAC_WANTS_LAYER=1` must be issued in the Terminal.


## Running ASCAM

Expand Down
13 changes: 13 additions & 0 deletions src/core/filtering.py
@@ -1,3 +1,8 @@
<<<<<<< HEAD
=======
import logging

>>>>>>> subsets
import numpy as np


Expand Down Expand Up @@ -149,7 +154,11 @@ def predict_forward(self, data, window_width):
forward_prediction /= window_width
else:
raise ValueError(
<<<<<<< HEAD
f"Mode {self.mode} is an unknown method for dealing\
=======
f"Mode {mode} is an unknown method for dealing\
>>>>>>> subsets
with edges"
)
return forward_prediction
Expand Down Expand Up @@ -189,7 +198,11 @@ def predict_backward(self, data, window_width):
backward_prediction /= window_width
else:
raise ValueError(
<<<<<<< HEAD
f"Mode {self.mode} is an unknown method for dealing\
=======
f"Mode {mode} is an unknown method for dealing\
>>>>>>> subsets
with edges"
)
return backward_prediction
Expand Down
20 changes: 20 additions & 0 deletions src/core/idealization.py
Expand Up @@ -104,6 +104,7 @@ def idealize_series(self):
self.idealize_episode(i)

def get_events(self, time_unit="s", trace_unit="A"):
<<<<<<< HEAD
if self.all_ep_inds != self.ind_idealized:
self.idealize_series()
event_array = np.zeros((0, 5)).astype(object)
Expand All @@ -118,6 +119,25 @@ def get_events(self, time_unit="s", trace_unit="A"):
(episode_number[:, np.newaxis], ep_events), axis=1
)
event_array = np.concatenate((event_array, ep_events), axis=0)
=======
# idealizing every trace, every time is killing us
# now we only get events from traces that were idealized
##if self.all_ep_inds != self.ind_idealized:
## self.idealize_series()
event_array = np.zeros((0, 5)).astype(object)
for episode in self.data.series:
if self.idealization(episode.n_episode) is not None:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the same as if self.idealization(episode.n_episode): so you don't have to use is not None at the end. Python treats None as well as empty list ([]) and empty string ('') as False in this context.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You are right. Maybe I put an explanatory comment.

# create a column containing the episode number
ep_events = Idealizer.extract_events(
self.idealization(episode.n_episode), self.time()
)
episode_number = episode.n_episode * np.ones(len(ep_events[:, 0]))
# glue that column to the event
ep_events = np.concatenate(
(episode_number[:, np.newaxis], ep_events), axis=1
)
event_array = np.concatenate((event_array, ep_events), axis=0)
>>>>>>> subsets
event_array[:, 1] *= CURRENT_UNIT_FACTORS[trace_unit]
event_array[:, 2:] *= TIME_UNIT_FACTORS[time_unit]
return event_array
Expand Down
9 changes: 9 additions & 0 deletions src/core/readdata.py
@@ -1,3 +1,7 @@
<<<<<<< HEAD
=======
import csv
>>>>>>> subsets
import pickle
import logging

Expand Down Expand Up @@ -26,8 +30,13 @@ def load(filename, filetype=False, dtype=None, headerlength=None, fs=None):
output = load_matlab(filename)
elif filetype == "bin":
output = load_binary(filename, dtype, headerlength, fs)
<<<<<<< HEAD
# elif filetype == "tdt":
# output = load_tdt(filename)
=======
elif filetype == "tdt":
output = load_tdt(filename)
>>>>>>> subsets
else:
print("Filetype not supported.")
output = False
Expand Down
102 changes: 102 additions & 0 deletions src/core/recording.py
Expand Up @@ -77,7 +77,11 @@ def from_file(
else:
raise ValueError(f"Cannot load from filetype {filetype}.")

<<<<<<< HEAD
recording.lists = {"All": (list(range(len(recording["raw_"]))), None)}
=======
recording.subsets = {"All": (list(range(len(recording["raw_"]))), None)}
>>>>>>> subsets

return recording

Expand All @@ -93,6 +97,7 @@ def __init__(self, filename="", sampling_rate=4e4):
# attributes for storing and managing the data
self["raw_"] = []
self.current_datakey = "raw_"
<<<<<<< HEAD
self.current_ep_ind = 0

# variables for user created lists of episodes
Expand Down Expand Up @@ -123,6 +128,43 @@ def episodes_in_lists(self, names):
debug_logger.debug(f"Selected episodes: {indices}")
return np.array(self.series)[indices]

=======
self.current_subsets = ["All"]
self.current_ep_ind = 0

# variables for user-created subsets of episodes
# `subsets` stores the indices of the episodes in the subset in the first
# element and the associated keyboard key as the second:
# subsets[name] = ([indices], key)
self.subsets = dict()

def select_episodes(self, datakey=None, subsets=None):
if datakey is None:
datakey = self.current_datakey
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Another unimportant tip/comment: you can do this shorter like datakey = datakey or self.current_datakey.

if subsets is None:
subsets = self.current_subsets
indices = list()
for subsetname in subsets:
indices.extend(self.subsets[subsetname][0])
indices = np.array(list(set(indices)))
return np.array(self[datakey])[indices]

def episodes_in_subsets(self, subset_names):
if isinstance(subset_names, str): #AP corrected here 291121
subset_names = [subset_names]
indices = list()
for subsetname in subset_names:
indices.extend(self.subsets[subsetname][0])
# remove duplicate indices
indices = np.array(list(set(indices)))
print (indices)
aplested marked this conversation as resolved.
Show resolved Hide resolved
debug_logger.debug(f"Selected episodes: {indices}")
if indices != []:
return np.array(self.series)[indices]
else:
return None

>>>>>>> subsets
@property
def series(self):
return self[self.current_datakey]
Expand Down Expand Up @@ -289,7 +331,11 @@ def series_hist(
"""Create a histogram of all episodes in the presently selected series
"""
debug_logger.debug(f"series_hist")
<<<<<<< HEAD
# put all piezo traces and all current traces in lists
=======
# put all piezo traces and all current traces in subsets
>>>>>>> subsets
piezos = [episode.piezo for episode in self.series]
traces = [episode.trace for episode in self.series]
trace_list = []
Expand Down Expand Up @@ -397,7 +443,11 @@ def _load_from_pickle(recording):
def export_idealization(
self,
filepath,
<<<<<<< HEAD
lists_to_save,
=======
subsets_to_save,
>>>>>>> subsets
time_unit,
trace_unit,
amplitudes,
Expand All @@ -410,7 +460,11 @@ def export_idealization(
if not filepath.endswith(".csv"):
filepath += ".csv"

<<<<<<< HEAD
episodes = self.select_episodes(lists=lists_to_save)
=======
episodes = self.select_episodes(subsets=subsets_to_save)
>>>>>>> subsets

export_array = np.zeros(
shape=(len(episodes) + 1, episodes[0].idealization.size)
Expand All @@ -437,19 +491,31 @@ def export_matlab(
self,
filepath,
datakey,
<<<<<<< HEAD
lists_to_save,
=======
subsets_to_save,
>>>>>>> subsets
save_piezo,
save_command,
time_unit="s",
trace_unit="A",
piezo_unit="V",
command_unit="V",
):
<<<<<<< HEAD
"""Export all the episodes in the givens list(s) from the given series
(only one) to a matlab file."""
debug_logger.debug(
f"export_matlab:\n"
f"saving the lists: {lists_to_save}\n"
=======
"""Export all the episodes in the givens subset(s) from the given series
(only one) to a matlab file."""
debug_logger.debug(
f"export_matlab:\n"
f"saving the subsets: {subsets_to_save}\n"
>>>>>>> subsets
f"of series {datakey}\n"
f"save piezo: {save_piezo}; "
"save command: {save_command}\n"
Expand All @@ -464,7 +530,11 @@ def export_matlab(
export_dict = dict()
export_dict["time"] = self["raw_"][0].time * TIME_UNIT_FACTORS[time_unit]
fill_length = len(str(len(self[datakey])))
<<<<<<< HEAD
episodes = self.select_episodes(datakey, lists_to_save)
=======
episodes = self.select_episodes(datakey, subsets_to_save)
>>>>>>> subsets
# # get the episodes we want to save
# indices = list()
# for listname in lists_to_save:
Expand All @@ -484,19 +554,31 @@ def export_matlab(
)
io.savemat(filepath, export_dict)

<<<<<<< HEAD
def export_axo(self, filepath, datakey, lists_to_save, save_piezo, save_command):
=======
def export_axo(self, filepath, datakey, subsets_to_save, save_piezo, save_command):
>>>>>>> subsets
"""Export data to an axograph file.

Argument:
filepath - location where the file is to be stored
datakey - series that should be exported
<<<<<<< HEAD
lists_to_save - the user-created lists of episodes that should be
=======
subsets_to_save - the user-created subsets of episodes that should be
>>>>>>> subsets
includes
save_piezo - if true piezo data will be exported
save_command - if true command voltage data will be exported"""
debug_logger.debug(
f"export_axo:\n"
<<<<<<< HEAD
f"saving the lists: {lists_to_save}\n"
=======
f"saving the subsets: {subsets_to_save}\n"
>>>>>>> subsets
f"of series {datakey}\n"
f"save piezo: {save_piezo}; save command: {save_command}\n"
f"saving to destination: {filepath}"
Expand All @@ -515,7 +597,11 @@ def export_axo(self, filepath, datakey, lists_to_save, save_piezo, save_command)
data_list = [self.episode().time]

# get the episodes we want to save
<<<<<<< HEAD
episodes = self.select_episodes(datakey, lists_to_save)
=======
episodes = self.select_episodes(datakey, subsets_to_save)
>>>>>>> subsets

for episode in episodes:
data_list.append(np.array(episode.trace))
Expand All @@ -530,7 +616,11 @@ def export_axo(self, filepath, datakey, lists_to_save, save_piezo, save_command)
file.write(filepath)

def create_first_activation_table(
<<<<<<< HEAD
self, datakey=None, time_unit="ms", lists_to_save=None, trace_unit="pA"
=======
self, datakey=None, time_unit="ms", subsets_to_save=None, trace_unit="pA"
>>>>>>> subsets
):
if datakey is None:
datakey = self.current_datakey
Expand All @@ -544,7 +634,11 @@ def create_first_activation_table(
episode.first_activation_amplitude
* CURRENT_UNIT_FACTORS[trace_unit],
)
<<<<<<< HEAD
for episode in self.select_episodes(datakey, lists_to_save)
=======
for episode in self.select_episodes(datakey, subsets_to_save)
>>>>>>> subsets
]
)
return export_array.astype(object)
Expand All @@ -554,12 +648,20 @@ def export_first_activation(
filepath,
datakey=None,
time_unit="ms",
<<<<<<< HEAD
lists_to_save=None,
=======
subsets_to_save=None,
>>>>>>> subsets
trace_unit="pA",
):
"""Export csv file of first activation times."""
export_array = self.create_first_activation_table(
<<<<<<< HEAD
datakey, time_unit, lists_to_save, trace_unit
=======
datakey, time_unit, subsets_to_save, trace_unit
>>>>>>> subsets
)
header = [
"Episode Number",
Expand Down
10 changes: 10 additions & 0 deletions src/core/savedata.py
@@ -1,6 +1,11 @@
from scipy import io
import os
import json
<<<<<<< HEAD
=======
import pickle
from ..utils.tools import parse_filename
>>>>>>> subsets


def save_metadata(data, filename):
Expand Down Expand Up @@ -96,8 +101,13 @@ def save_data(data, filename="", filetype="mat", save_piezo=True, save_command=T
save_piezo=save_piezo,
save_command=save_command,
)
<<<<<<< HEAD
# elif filetype == "pkl":
# return_status = save_pickle(data=data, filepath=filepath)
=======
elif filetype == "pkl":
return_status = save_pickle(data=data, filepath=filepath)
>>>>>>> subsets
else:
print('Can only save as ".mat"!')
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or as ".pkl" now :).

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh wait, actually no. The save_pickle function this is referring to is still commented out. Nevermind!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I removed it again


Expand Down