Skip to content

Commit

Permalink
Bump version for 0.10.8
Browse files Browse the repository at this point in the history
  • Loading branch information
gb119 committed Oct 18, 2023
1 parent 849d324 commit b55f6da
Show file tree
Hide file tree
Showing 95 changed files with 645 additions and 647 deletions.
14 changes: 7 additions & 7 deletions Stoner/Analysis.py
Expand Up @@ -193,7 +193,7 @@ def integrate(
bounds=lambda x, y: True,
**kargs,
):
"""Inegrate a column of data, optionally returning the cumulative integral.
"""Integrate a column of data, optionally returning the cumulative integral.
Args:
xcol (index):
Expand All @@ -214,7 +214,7 @@ def integrate(
bounds (callable):
A function that evaluates for each row to determine if the data should be integrated over.
**kargs:
Other keyword arguements are fed direct to the scipy.integrate.cumtrapz method
Other keyword arguments are fed direct to the scipy.integrate.cumtrapz method
Returns:
(:py:class:`Stoner.Data`):
Expand All @@ -240,7 +240,7 @@ def integrate(
resultdata = cumtrapz(yd, xdat, **kargs)
resultdata = np.append(np.array([0]), resultdata)
if result is not None:
header = header if header is not None else f"Intergral of {self.column_headers[_.ycol]}"
header = header if header is not None else f"Integral of {self.column_headers[_.ycol]}"
if isinstance(result, bool) and result:
self.add_column(resultdata, header=header, replace=False)
else:
Expand All @@ -267,7 +267,7 @@ def normalise(self, target=None, base=None, replace=True, header=None, scale=Non
Keyword Arguments:
base (index):
The column to normalise to, can be an integer or string. **Depricated** can also be a tuple (low,
The column to normalise to, can be an integer or string. **Deprecated** can also be a tuple (low,
high) being the output range
replace (bool):
Set True(default) to overwrite the target data columns
Expand Down Expand Up @@ -461,7 +461,7 @@ def transform(set1, *p):
perr = np.sqrt(np.diagonal(pcov))
self.data[:, _.xcol], self.data[:, _.ycol] = func(self.data[:, _.xcol], self.data[:, _.ycol], *popt)
self["Stitching Coefficients"] = list(popt)
self["Stitching Coeffient Errors"] = list(perr)
self["Stitching Coefficient Errors"] = list(perr)
self["Stitching overlap"] = (lower, upper)
self["Stitching Window"] = num_pts

Expand All @@ -478,7 +478,7 @@ def threshold(self, threshold, **kargs):
col (index):
Column index to look for data in
rising (bool):
look for case where the data is increasing in value (defaukt True)
look for case where the data is increasing in value (default True)
falling (bool):
look for case where data is fallinh in value (default False)
xcol (index, bool or None):
Expand All @@ -496,7 +496,7 @@ def threshold(self, threshold, **kargs):
Either a sing;le fractional row index, or an in terpolated x value
Note:
If you don't sepcify a col value or set it to None, then the assigned columns via the
If you don't specify a col value or set it to None, then the assigned columns via the
:py:attr:`DataFile.setas` attribute will be used.
Warning:
Expand Down
38 changes: 19 additions & 19 deletions Stoner/Core.py
Expand Up @@ -86,7 +86,7 @@ class DataFile(
The possible mime-types of data files represented by each matching filename pattern in
:py:attr:`Datafile.pattern`.
patterns (list):
A list of filename extenion glob patterns that matrches the expected filename patterns for a DataFile
A list of filename extension glob patterns that matrches the expected filename patterns for a DataFile
(*.txt and *.dat")
priority (int):
Used to indicathe order in which subclasses of :py:class:`DataFile` are tried when loading data. A higher
Expand All @@ -96,11 +96,11 @@ class DataFile(
shape (tuple of integers):
Returns the shape of the data (rows,columns) - equivalent to self.data.shape.
records (numpy record array):
Returns the data in the form of a list of yuples where each tuple maps to the columsn names.
Returns the data in the form of a list of yuples where each tuple maps to the columns names.
clone (DataFile):
Creates a deep copy of the :py:class`DataFile` object.
dict_records (array of dictionaries):
View the data as an array or dictionaries where each dictionary represnets one row with keys dervied
View the data as an array or dictionaries where each dictionary represents one row with keys derived
from column headers.
dims (int):
When data columns are set as x,y,z etc. returns the number of dimensions implied in the data set
Expand Down Expand Up @@ -257,7 +257,7 @@ def __init__(self, *args, **kargs):
# ============================================================================================

def _init_single(self, *args, **kargs):
"""Handle constructor with 1 arguement - called from __init__."""
"""Handle constructor with 1 argument - called from __init__."""
arg = args[0]
inits = {
path_types + (bool, bytes, io.IOBase): self._init_load,
Expand Down Expand Up @@ -396,7 +396,7 @@ def _init_list(self, arg, **kargs):
raise TypeError(f"Unable to construct DataFile from a {type(arg)}")

# ============================================================================================
############################ Speical Methods ###############################################
############################ Special Methods ###############################################
# ============================================================================================

def __call__(self, *args, **kargs):
Expand Down Expand Up @@ -448,7 +448,7 @@ def __deepcopy__(self, memo):
return result

def __dir__(self):
"""Reeturns the attributes of the current object.
"""Returns the attributes of the current object.
Augmenting the keys of self.__dict__ with the attributes that __getattr__ will handle."""
attr = dir(type(self))
Expand Down Expand Up @@ -638,7 +638,7 @@ def _load(self, filename, *args, **kargs):
Raised if the first row does not start with 'TDI Format 1.5' or 'TDI Format=1.0'.
Note:
The *_load* methods shouldbe overidden in each child class to handle the process of loading data from
The *_load* methods shouldbe overridden in each child class to handle the process of loading data from
disc. If they encounter unexpected data, then they should raise StonerLoadError to signal this, so that
the loading class can try a different sub-class instead.
"""
Expand Down Expand Up @@ -916,7 +916,7 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla
Returns:
self:
The :py:class:`DataFile` instance with the additonal column inserted.
The :py:class:`DataFile` instance with the additional column inserted.
Note:
Like most :py:class:`DataFile` methods, this method operates in-place in that it also modifies
Expand Down Expand Up @@ -991,7 +991,7 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla

# If not replacing, then add extra columns to existing data.
if not replace:
colums = copy.copy(self.column_headers)
columns = copy.copy(self.column_headers)
old_setas = self.setas.clone
if index == self.data.shape[1]: # appending column
self.data = DataArray(np.append(self.data, np_data, axis=1), setas=self.setas.clone)
Expand All @@ -1003,10 +1003,10 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla
setas=self.setas.clone,
)
for ix in range(0, index):
self.column_headers[ix] = colums[ix]
self.column_headers[ix] = columns[ix]
self.setas[ix] = old_setas[ix]
for ix in range(index, dc):
self.column_headers[ix + cw] = colums[ix]
self.column_headers[ix + cw] = columns[ix]
self.setas[ix + cw] = old_setas[ix]
# Check that we don't need to expand to overwrite with the new data
if index + cw > self.shape[1]:
Expand Down Expand Up @@ -1069,7 +1069,7 @@ def del_column(self, col=None, duplicates=False):
- If duplicates is True and col is None then all duplicate columns are removed,
- if col is not None and duplicates is True then all duplicates of the specified column are removed.
- If duplicates is False and *col* is either None or False then all masked coplumns are deleeted. If
*col* is True, then all columns that are not set i the :py:attr:`setas` attrobute are delted.
*col* is True, then all columns that are not set i the :py:attr:`setas` attrobute are deleted.
- If col is a list (duplicates should not be None) then the all the matching columns are found.
- If col is an iterable of booleans, then all columns whose elements are False are deleted.
- If col is None and duplicates is None, then all columns with at least one elelemtn masked
Expand Down Expand Up @@ -1142,7 +1142,7 @@ def del_nan(self, col=None, clone=False):
else: # Not cloning so ret is self
ret = self

if col is None: # If col is still None, use all columsn that are set to any value in self.setas
if col is None: # If col is still None, use all columns that are set to any value in self.setas
col = [ix for ix, col in enumerate(self.setas) if col != "."]
if not isLikeList(col): # If col isn't a list, make it one now
col = [col]
Expand All @@ -1163,13 +1163,13 @@ def del_rows(self, col=None, val=None, invert=False):
Args:
col (list,slice,int,string, re, callable or None):
Column containg values to search for.
Column containing values to search for.
val (float or callable):
Specifies rows to delete. Maybe:
- None - in which case the *col* argument is used to identify rows to be deleted,
- a float in which case rows whose columncol = val are deleted
- or a function - in which case rows where the function evaluates to be true are deleted.
- a tuple, in which case rows where column col takes value between the minium and maximum of
- a tuple, in which case rows where column col takes value between the minimum and maximum of
the tuple are deleted.
Keyword Arguments:
Expand Down Expand Up @@ -1247,7 +1247,7 @@ def del_rows(self, col=None, val=None, invert=False):
return self

def dir(self, pattern=None):
"""Return a list of keys in the metadata, filtering wiht a regular expression if necessary.
"""Return a list of keys in the metadata, filtering with a regular expression if necessary.
Keyword Arguments:
pattern (string or re):
Expand All @@ -1271,7 +1271,7 @@ def filter(self, func=None, cols=None, reset=True):
Args:
func (callable):
is a callable object that should take a single list as a p[arameter representing one row.
is a callable object that should take a single list as a p[parameter representing one row.
cols (list):
a list of column indices that are used to form the list of values passed to func.
reset (bool):
Expand Down Expand Up @@ -1357,7 +1357,7 @@ def load(cls, *args, **kargs):
Each subclass is scanned in turn for a class attribute priority which governs the order in which they
are tried. Subclasses which can make an early positive determination that a file has the correct format
can have higher priority levels. Classes should return a suitable expcetion if they fail to load the file.
can have higher priority levels. Classes should return a suitable exception if they fail to load the file.
If no class can load a file successfully then a StonerUnrecognisedFormat exception is raised.
"""
Expand Down Expand Up @@ -1569,7 +1569,7 @@ def to_pandas(self):
Notes:
In addition to transferring the numerical data, the DataFrame's columns are set to
a multi-level index of the :py:attr:`Stoner.Data.column_headers` and :py:attr:`Stoner.Data.setas`
calues. A pandas DataFrame extension attribute, *metadata* is registered and is used to store
values. A pandas DataFrame extension attribute, *metadata* is registered and is used to store
the metada from the :py:class:1Stoner.Data` object. This pandas extension attribute is in fact a trivial
subclass of the :py:class:`Stoner.core.typeHintedDict`.
Expand Down
2 changes: 1 addition & 1 deletion Stoner/FileFormats.py
Expand Up @@ -58,6 +58,6 @@

warn(
"*" * 80
+ "\nStoner.FileFormats is a depricated module - use Stoner.formats and it's sub-modules now!\n"
+ "\nStoner.FileFormats is a deprecated module - use Stoner.formats and it's sub-modules now!\n"
+ "*" * 80
)
2 changes: 1 addition & 1 deletion Stoner/Folders.py
Expand Up @@ -14,7 +14,7 @@ class DataFolder(DataMethodsMixin, DiskBasedFolderMixin, baseFolder):

"""Provide an interface to manipulating lots of data files stored within a directory structure on disc.
By default, the members of the DataFolder are isntances of :class:`Stoner.Data`. The DataFolder emplys a lazy
By default, the members of the DataFolder are instances of :class:`Stoner.Data`. The DataFolder emplys a lazy
open strategy, so that files are only read in from disc when actually needed.
.. inheritance-diagram:: DataFolder
Expand Down
14 changes: 7 additions & 7 deletions Stoner/HDF5.py
Expand Up @@ -41,7 +41,7 @@ def get_hdf_loader(f, default_loader=lambda *args, **kargs: None):
typ = bytes2str(f.attrs.get("type", ""))
if (typ not in globals() or not isinstance(globals()[typ], type)) and "module" not in f.attrs:
raise StonerLoadError(
"HDF5 Group does not speicify a recongized type and does not specify a module to use to load."
"HDF5 Group does not specify a recognized type and does not specify a module to use to load."
)

if "module" in f.attrs:
Expand Down Expand Up @@ -121,13 +121,13 @@ class HDF5File(DataFile):
kargs (dict):
Dictionary of keyword arguments
If the first non-keyword arguement is not an h5py File or Group then
If the first non-keyword argument is not an h5py File or Group then
initialises with a blank parent constructor and then loads data, otherwise,
calls parent constructor.
Datalayout is dead simple, the numerical data is in a dataset called *data*,
metadata are attribtutes of a group called *metadata* with the keynames being the
full name + typehint of the stanard DataFile metadata dictionary
metadata are attributes of a group called *metadata* with the keynames being the
full name + typehint of the standard DataFile metadata dictionary
*column_headers* are an attribute of the root file/group
*filename* is set from either an attribute called filename, or from the
group name or from the hdf5 filename.
Expand Down Expand Up @@ -392,7 +392,7 @@ def __getter__(self, name, instantiate=True):
the baseFolder class uses a :py:class:`regexpDict` to store objects in.
Keyword Arguments:
instatiate (bool):
instantiate (bool):
If True (default) then always return a :py:class:`Stoner.Core.Data` object. If False,
the __getter__ method may return a key that can be used by it later to actually get the
:py:class:`Stoner.Core.Data` object.
Expand Down Expand Up @@ -649,7 +649,7 @@ def _load(self, filename, *args, **kargs):
return self

def scan_meta(self, group):
"""Scan the HDF5 Group for atributes and datasets and sub groups and recursively add them to the metadata."""
"""Scan the HDF5 Group for attributes and datasets and sub groups and recursively add them to the metadata."""
root = ".".join(group.name.split("/")[2:])
for name, thing in group.items():
parts = thing.name.split("/")
Expand Down Expand Up @@ -684,7 +684,7 @@ def __init__(self, *args, **kargs):
Keyword Args:
regrid (bool):
If set True, the gridimage() method is automatically called to re-grid the image to known co-ordinates.
If set True, the gridimage() method is automatically called to re-grid the image to known coordinates.
"""
regrid = kargs.pop("regrid", False)
bcn = kargs.pop("bcn", False)
Expand Down

0 comments on commit b55f6da

Please sign in to comment.