Skip to content

Commit

Permalink
Merge branch 'v1.2.2'
Browse files Browse the repository at this point in the history
  • Loading branch information
castillohair committed Oct 19, 2019
2 parents 031a7af + 9cd83ef commit 5e187b0
Show file tree
Hide file tree
Showing 20 changed files with 436 additions and 140 deletions.
2 changes: 2 additions & 0 deletions .gitattributes
@@ -0,0 +1,2 @@
"Install FlowCal (macOS)" eol=lf
"Run FlowCal (macOS)" eol=lf
2 changes: 2 additions & 0 deletions .gitignore
Expand Up @@ -3,3 +3,5 @@ dist
FlowCal.egg-info
*.pyc
*.ipynb_checkpoints
.DS_Store
*/.DS_Store
25 changes: 25 additions & 0 deletions .readthedocs.yml
@@ -0,0 +1,25 @@
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details

# Required
version: 2

# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: doc/conf.py

# Build documentation with MkDocs
#mkdocs:
# configuration: mkdocs.yml

# Optionally build your docs in additional formats such as PDF and ePub
formats: all

# Optionally set the version of Python and requirements required to build your docs
python:
version: 3.7
install:
- requirements: doc/requirements.txt
- method: setuptools
path: .
2 changes: 1 addition & 1 deletion FlowCal/__init__.py
Expand Up @@ -6,7 +6,7 @@
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
__version__ = '1.2.1'
__version__ = '1.2.2'

from . import io
from . import excel_ui
Expand Down
28 changes: 13 additions & 15 deletions FlowCal/excel_ui.py
Expand Up @@ -1454,20 +1454,16 @@ def show_open_file_dialog(filetypes):
was chosen.
"""
# The following line is used to Tk's main window is not shown
Tk().withdraw()

# OSX ONLY: Call bash script to prevent file select window from sticking
# after use.
if platform.system() == 'Darwin':
subprocess.call("defaults write org.python.python " +
"ApplePersistenceIgnoreState YES", shell=True)
filename = askopenfilename(filetypes=filetypes)
subprocess.call("defaults write org.python.python " +
"ApplePersistenceIgnoreState NO", shell=True)
else:
filename = askopenfilename(filetypes=filetypes)

# initialize tkinter root window
root = Tk()
# remove main root window (will cause kernel panic on OSX if not present)
root.withdraw()
# link askopenfilename window to root window
filename = askopenfilename(parent = root, filetypes=filetypes)
# refresh root window to remove askopenfilename window
root.update()

return filename

def run(input_path=None,
Expand Down Expand Up @@ -1613,9 +1609,11 @@ def run_command_line(args=None):
from ``sys.argv``.
See Also
----------
FlowCal.excel_ui.run()
--------
FlowCal.excel_ui.run
References
----------
http://amir.rachum.com/blog/2017/07/28/python-entry-points/
"""
Expand Down
144 changes: 136 additions & 8 deletions FlowCal/io.py
Expand Up @@ -680,7 +680,7 @@ class FCSFile(object):
If more than one data set is detected in the same file.
Warning
If the ANALYSIS segment was not successfully parsed.
Notes
-----
The Flow Cytometry Standard (FCS) describes the de facto standard
Expand Down Expand Up @@ -730,7 +730,7 @@ class FCSFile(object):
"""
def __init__(self, infile):

self._infile = infile

if isinstance(infile, six.string_types):
Expand Down Expand Up @@ -832,7 +832,7 @@ def __init__(self, infile):
self._analysis = {}
else:
self._analysis = {}

# Import DATA segment
param_ranges = [float(self._text['$P{0}R'.format(p)])
for p in range(1,D+1)]
Expand Down Expand Up @@ -947,6 +947,22 @@ def __hash__(self):
def __repr__(self):
return str(self.infile)

_FCSDataPickleState = collections.namedtuple(
typename='_FCSDataPickleState',
field_names=['infile',
'text',
'analysis',
'data_type',
'time_step',
'acquisition_start_time',
'acquisition_end_time',
'channels',
'amplification_type',
'detector_voltage',
'amplifier_gain',
'range',
'resolution'])

class FCSData(np.ndarray):
"""
Object containing events data from a flow cytometry sample.
Expand All @@ -962,7 +978,7 @@ class FCSData(np.ndarray):
the detector and the amplifiers are parsed from the TEXT segment of the
FCS file and exposed as attributes. The TEXT and ANALYSIS segments are
also exposed as attributes.
Parameters
----------
infile : str or file-like
Expand Down Expand Up @@ -1760,6 +1776,118 @@ def __array_finalize__(self, obj):
if hasattr(obj, '_resolution'):
self._resolution = copy.deepcopy(obj._resolution)

def __reduce__(self):
"""
For pickling.
Call NDArray's __reduce__ and append FCSData state information. Per
the pickle/cPickle documentation, __reduce__ should return a tuple
between two and five elements long. The first three elements are
described below. FCSData augments the `state` element but otherwise
relies on NDArray to populate the rest of the tuple.
Returns
-------
init : callable
Callable used to recreate the inital instance
init_args : tuple
Arguments to be passed to `init` to reconstitute the initial
instance
state : tuple
Internal state data needed to fully reconstruct the instance. Data
is passed to self.__setstate__() to reconstruct the object during
deserialization. Contains the FCSData state information (packaged
in a _FCSDataPickleState namedtuple) as the first element and
(if used) the NDArray state information as the second element.
See Also
--------
__setstate__ : Sets the state data upon deserialization.
Notes
-----
Solution addresses Issue #277, based on StackOverflow solution:
https://stackoverflow.com/a/26599346
"""
# Collect FCSData state information
fcsdata_state = _FCSDataPickleState(
infile = self._infile,
text = self._text,
analysis = self._analysis,
data_type = self._data_type,
time_step = self._time_step,
acquisition_start_time = self._acquisition_start_time,
acquisition_end_time = self._acquisition_end_time,
channels = self._channels,
amplification_type = self._amplification_type,
detector_voltage = self._detector_voltage,
amplifier_gain = self._amplifier_gain,
range = self._range,
resolution = self._resolution)

# Call NDArray's __reduce__ to populate the initial reduce value
super_reduce_value = super(FCSData, self).__reduce__()

# Update state information
reduce_value = list(super_reduce_value)
if len(super_reduce_value) > 2:
super_state = super_reduce_value[2]
reduce_value[2] = (fcsdata_state, super_state)
else:
reduce_value.append((fcsdata_state,))
reduce_value = tuple(reduce_value)

return reduce_value

def __setstate__(self, state):
"""
For unpickling.
Call NDArray's __setstate__ with the NDArray state information if
provided and then populate the FCSData state information.
Parameters
----------
state : tuple
State data originally aggregated by __reduce__. Should contain
the FCSData state information (packaged in a _FCSDataPickleState
namedtuple) as the first element and (if used) the NDArray state
information as the second element.
See Also
---------
__reduce__ : Called during serialization to aggregate relevant state
data.
Notes
-----
Solution addresses Issue #277, based on StackOverflow solution:
https://stackoverflow.com/a/26599346
"""
# Unpackage state information (originally packaged by __reduce__)
fcsdata_state = state[0]
if len(state) > 1:
super_state = state[1]
# Call NDArray's __setstate__ with its state information
super(FCSData, self).__setstate__(super_state)

# Populate FCSData state information
self._infile = fcsdata_state.infile
self._text = fcsdata_state.text
self._analysis = fcsdata_state.analysis
self._data_type = fcsdata_state.data_type
self._time_step = fcsdata_state.time_step
self._acquisition_start_time = fcsdata_state.acquisition_start_time
self._acquisition_end_time = fcsdata_state.acquisition_end_time
self._channels = fcsdata_state.channels
self._amplification_type = fcsdata_state.amplification_type
self._detector_voltage = fcsdata_state.detector_voltage
self._amplifier_gain = fcsdata_state.amplifier_gain
self._range = fcsdata_state.range
self._resolution = fcsdata_state.resolution

# Helper functions
@staticmethod
def _parse_time_string(time_str):
Expand Down Expand Up @@ -1944,8 +2072,8 @@ def __getitem__(self, key):
slicing the `channel_info` attribute.
"""
# If key is a tuple with no None, decompose and interpret key[1] as
# the channel. If it contains Nones, pass directly to
# If key is a tuple with no None, decompose and interpret key[1] as
# the channel. If it contains Nones, pass directly to
# ndarray.__getitem__() and convert to np.ndarray. Otherwise, pass
# directly to ndarray.__getitem__().
if isinstance(key, tuple) and len(key) == 2 \
Expand Down Expand Up @@ -2030,8 +2158,8 @@ def __setitem__(self, key, item):
channel name when writing to a FCSData object.
"""
# If key is a tuple with no Nones, decompose and interpret key[1] as
# the channel. If it contains Nones, pass directly to
# If key is a tuple with no Nones, decompose and interpret key[1] as
# the channel. If it contains Nones, pass directly to
# ndarray.__setitem__().
if isinstance(key, tuple) and len(key) == 2 \
and key[0] is not None and key[1] is not None:
Expand Down
2 changes: 2 additions & 0 deletions FlowCal/mef.py
Expand Up @@ -917,6 +917,8 @@ def get_transform_fxn(data_beads,
else:
mef_channels = [mef_channels]
mef_values = [mef_values]
# Transform mef_values to numpy array
mef_values = np.array(mef_values)

# Ensure matching number of `mef_values` for all channels (this implies
# that the calibration beads have the same number of subpopulations for
Expand Down

0 comments on commit 5e187b0

Please sign in to comment.