diff --git a/Stoner/Analysis.py b/Stoner/Analysis.py index 5cbaf23d8..512a04406 100755 --- a/Stoner/Analysis.py +++ b/Stoner/Analysis.py @@ -193,7 +193,7 @@ def integrate( bounds=lambda x, y: True, **kargs, ): - """Inegrate a column of data, optionally returning the cumulative integral. + """Integrate a column of data, optionally returning the cumulative integral. Args: xcol (index): @@ -214,7 +214,7 @@ def integrate( bounds (callable): A function that evaluates for each row to determine if the data should be integrated over. **kargs: - Other keyword arguements are fed direct to the scipy.integrate.cumtrapz method + Other keyword arguments are fed direct to the scipy.integrate.cumtrapz method Returns: (:py:class:`Stoner.Data`): @@ -240,7 +240,7 @@ def integrate( resultdata = cumtrapz(yd, xdat, **kargs) resultdata = np.append(np.array([0]), resultdata) if result is not None: - header = header if header is not None else f"Intergral of {self.column_headers[_.ycol]}" + header = header if header is not None else f"Integral of {self.column_headers[_.ycol]}" if isinstance(result, bool) and result: self.add_column(resultdata, header=header, replace=False) else: @@ -267,7 +267,7 @@ def normalise(self, target=None, base=None, replace=True, header=None, scale=Non Keyword Arguments: base (index): - The column to normalise to, can be an integer or string. **Depricated** can also be a tuple (low, + The column to normalise to, can be an integer or string. **Deprecated** can also be a tuple (low, high) being the output range replace (bool): Set True(default) to overwrite the target data columns @@ -461,7 +461,7 @@ def transform(set1, *p): perr = np.sqrt(np.diagonal(pcov)) self.data[:, _.xcol], self.data[:, _.ycol] = func(self.data[:, _.xcol], self.data[:, _.ycol], *popt) self["Stitching Coefficients"] = list(popt) - self["Stitching Coeffient Errors"] = list(perr) + self["Stitching Coefficient Errors"] = list(perr) self["Stitching overlap"] = (lower, upper) self["Stitching Window"] = num_pts @@ -478,7 +478,7 @@ def threshold(self, threshold, **kargs): col (index): Column index to look for data in rising (bool): - look for case where the data is increasing in value (defaukt True) + look for case where the data is increasing in value (default True) falling (bool): look for case where data is fallinh in value (default False) xcol (index, bool or None): @@ -496,7 +496,7 @@ def threshold(self, threshold, **kargs): Either a sing;le fractional row index, or an in terpolated x value Note: - If you don't sepcify a col value or set it to None, then the assigned columns via the + If you don't specify a col value or set it to None, then the assigned columns via the :py:attr:`DataFile.setas` attribute will be used. Warning: diff --git a/Stoner/Core.py b/Stoner/Core.py index ed3c82ace..99f54fc6c 100755 --- a/Stoner/Core.py +++ b/Stoner/Core.py @@ -86,7 +86,7 @@ class DataFile( The possible mime-types of data files represented by each matching filename pattern in :py:attr:`Datafile.pattern`. patterns (list): - A list of filename extenion glob patterns that matrches the expected filename patterns for a DataFile + A list of filename extension glob patterns that matrches the expected filename patterns for a DataFile (*.txt and *.dat") priority (int): Used to indicathe order in which subclasses of :py:class:`DataFile` are tried when loading data. A higher @@ -96,11 +96,11 @@ class DataFile( shape (tuple of integers): Returns the shape of the data (rows,columns) - equivalent to self.data.shape. records (numpy record array): - Returns the data in the form of a list of yuples where each tuple maps to the columsn names. + Returns the data in the form of a list of yuples where each tuple maps to the columns names. clone (DataFile): Creates a deep copy of the :py:class`DataFile` object. dict_records (array of dictionaries): - View the data as an array or dictionaries where each dictionary represnets one row with keys dervied + View the data as an array or dictionaries where each dictionary represents one row with keys derived from column headers. dims (int): When data columns are set as x,y,z etc. returns the number of dimensions implied in the data set @@ -257,7 +257,7 @@ def __init__(self, *args, **kargs): # ============================================================================================ def _init_single(self, *args, **kargs): - """Handle constructor with 1 arguement - called from __init__.""" + """Handle constructor with 1 argument - called from __init__.""" arg = args[0] inits = { path_types + (bool, bytes, io.IOBase): self._init_load, @@ -396,7 +396,7 @@ def _init_list(self, arg, **kargs): raise TypeError(f"Unable to construct DataFile from a {type(arg)}") # ============================================================================================ - ############################ Speical Methods ############################################### + ############################ Special Methods ############################################### # ============================================================================================ def __call__(self, *args, **kargs): @@ -448,7 +448,7 @@ def __deepcopy__(self, memo): return result def __dir__(self): - """Reeturns the attributes of the current object. + """Returns the attributes of the current object. Augmenting the keys of self.__dict__ with the attributes that __getattr__ will handle.""" attr = dir(type(self)) @@ -638,7 +638,7 @@ def _load(self, filename, *args, **kargs): Raised if the first row does not start with 'TDI Format 1.5' or 'TDI Format=1.0'. Note: - The *_load* methods shouldbe overidden in each child class to handle the process of loading data from + The *_load* methods shouldbe overridden in each child class to handle the process of loading data from disc. If they encounter unexpected data, then they should raise StonerLoadError to signal this, so that the loading class can try a different sub-class instead. """ @@ -916,7 +916,7 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla Returns: self: - The :py:class:`DataFile` instance with the additonal column inserted. + The :py:class:`DataFile` instance with the additional column inserted. Note: Like most :py:class:`DataFile` methods, this method operates in-place in that it also modifies @@ -991,7 +991,7 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla # If not replacing, then add extra columns to existing data. if not replace: - colums = copy.copy(self.column_headers) + columns = copy.copy(self.column_headers) old_setas = self.setas.clone if index == self.data.shape[1]: # appending column self.data = DataArray(np.append(self.data, np_data, axis=1), setas=self.setas.clone) @@ -1003,10 +1003,10 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla setas=self.setas.clone, ) for ix in range(0, index): - self.column_headers[ix] = colums[ix] + self.column_headers[ix] = columns[ix] self.setas[ix] = old_setas[ix] for ix in range(index, dc): - self.column_headers[ix + cw] = colums[ix] + self.column_headers[ix + cw] = columns[ix] self.setas[ix + cw] = old_setas[ix] # Check that we don't need to expand to overwrite with the new data if index + cw > self.shape[1]: @@ -1069,7 +1069,7 @@ def del_column(self, col=None, duplicates=False): - If duplicates is True and col is None then all duplicate columns are removed, - if col is not None and duplicates is True then all duplicates of the specified column are removed. - If duplicates is False and *col* is either None or False then all masked coplumns are deleeted. If - *col* is True, then all columns that are not set i the :py:attr:`setas` attrobute are delted. + *col* is True, then all columns that are not set i the :py:attr:`setas` attrobute are deleted. - If col is a list (duplicates should not be None) then the all the matching columns are found. - If col is an iterable of booleans, then all columns whose elements are False are deleted. - If col is None and duplicates is None, then all columns with at least one elelemtn masked @@ -1142,7 +1142,7 @@ def del_nan(self, col=None, clone=False): else: # Not cloning so ret is self ret = self - if col is None: # If col is still None, use all columsn that are set to any value in self.setas + if col is None: # If col is still None, use all columns that are set to any value in self.setas col = [ix for ix, col in enumerate(self.setas) if col != "."] if not isLikeList(col): # If col isn't a list, make it one now col = [col] @@ -1163,13 +1163,13 @@ def del_rows(self, col=None, val=None, invert=False): Args: col (list,slice,int,string, re, callable or None): - Column containg values to search for. + Column containing values to search for. val (float or callable): Specifies rows to delete. Maybe: - None - in which case the *col* argument is used to identify rows to be deleted, - a float in which case rows whose columncol = val are deleted - or a function - in which case rows where the function evaluates to be true are deleted. - - a tuple, in which case rows where column col takes value between the minium and maximum of + - a tuple, in which case rows where column col takes value between the minimum and maximum of the tuple are deleted. Keyword Arguments: @@ -1247,7 +1247,7 @@ def del_rows(self, col=None, val=None, invert=False): return self def dir(self, pattern=None): - """Return a list of keys in the metadata, filtering wiht a regular expression if necessary. + """Return a list of keys in the metadata, filtering with a regular expression if necessary. Keyword Arguments: pattern (string or re): @@ -1271,7 +1271,7 @@ def filter(self, func=None, cols=None, reset=True): Args: func (callable): - is a callable object that should take a single list as a p[arameter representing one row. + is a callable object that should take a single list as a p[parameter representing one row. cols (list): a list of column indices that are used to form the list of values passed to func. reset (bool): @@ -1357,7 +1357,7 @@ def load(cls, *args, **kargs): Each subclass is scanned in turn for a class attribute priority which governs the order in which they are tried. Subclasses which can make an early positive determination that a file has the correct format - can have higher priority levels. Classes should return a suitable expcetion if they fail to load the file. + can have higher priority levels. Classes should return a suitable exception if they fail to load the file. If no class can load a file successfully then a StonerUnrecognisedFormat exception is raised. """ @@ -1569,7 +1569,7 @@ def to_pandas(self): Notes: In addition to transferring the numerical data, the DataFrame's columns are set to a multi-level index of the :py:attr:`Stoner.Data.column_headers` and :py:attr:`Stoner.Data.setas` - calues. A pandas DataFrame extension attribute, *metadata* is registered and is used to store + values. A pandas DataFrame extension attribute, *metadata* is registered and is used to store the metada from the :py:class:1Stoner.Data` object. This pandas extension attribute is in fact a trivial subclass of the :py:class:`Stoner.core.typeHintedDict`. diff --git a/Stoner/FileFormats.py b/Stoner/FileFormats.py index 516cc3a5d..223c17a59 100755 --- a/Stoner/FileFormats.py +++ b/Stoner/FileFormats.py @@ -58,6 +58,6 @@ warn( "*" * 80 - + "\nStoner.FileFormats is a depricated module - use Stoner.formats and it's sub-modules now!\n" + + "\nStoner.FileFormats is a deprecated module - use Stoner.formats and it's sub-modules now!\n" + "*" * 80 ) diff --git a/Stoner/Folders.py b/Stoner/Folders.py index 67c36f638..8d3773b11 100755 --- a/Stoner/Folders.py +++ b/Stoner/Folders.py @@ -14,7 +14,7 @@ class DataFolder(DataMethodsMixin, DiskBasedFolderMixin, baseFolder): """Provide an interface to manipulating lots of data files stored within a directory structure on disc. - By default, the members of the DataFolder are isntances of :class:`Stoner.Data`. The DataFolder emplys a lazy + By default, the members of the DataFolder are instances of :class:`Stoner.Data`. The DataFolder emplys a lazy open strategy, so that files are only read in from disc when actually needed. .. inheritance-diagram:: DataFolder diff --git a/Stoner/HDF5.py b/Stoner/HDF5.py index 8031c5ddf..db52c2e07 100755 --- a/Stoner/HDF5.py +++ b/Stoner/HDF5.py @@ -41,7 +41,7 @@ def get_hdf_loader(f, default_loader=lambda *args, **kargs: None): typ = bytes2str(f.attrs.get("type", "")) if (typ not in globals() or not isinstance(globals()[typ], type)) and "module" not in f.attrs: raise StonerLoadError( - "HDF5 Group does not speicify a recongized type and does not specify a module to use to load." + "HDF5 Group does not specify a recognized type and does not specify a module to use to load." ) if "module" in f.attrs: @@ -121,13 +121,13 @@ class HDF5File(DataFile): kargs (dict): Dictionary of keyword arguments - If the first non-keyword arguement is not an h5py File or Group then + If the first non-keyword argument is not an h5py File or Group then initialises with a blank parent constructor and then loads data, otherwise, calls parent constructor. Datalayout is dead simple, the numerical data is in a dataset called *data*, - metadata are attribtutes of a group called *metadata* with the keynames being the - full name + typehint of the stanard DataFile metadata dictionary + metadata are attributes of a group called *metadata* with the keynames being the + full name + typehint of the standard DataFile metadata dictionary *column_headers* are an attribute of the root file/group *filename* is set from either an attribute called filename, or from the group name or from the hdf5 filename. @@ -392,7 +392,7 @@ def __getter__(self, name, instantiate=True): the baseFolder class uses a :py:class:`regexpDict` to store objects in. Keyword Arguments: - instatiate (bool): + instantiate (bool): If True (default) then always return a :py:class:`Stoner.Core.Data` object. If False, the __getter__ method may return a key that can be used by it later to actually get the :py:class:`Stoner.Core.Data` object. @@ -649,7 +649,7 @@ def _load(self, filename, *args, **kargs): return self def scan_meta(self, group): - """Scan the HDF5 Group for atributes and datasets and sub groups and recursively add them to the metadata.""" + """Scan the HDF5 Group for attributes and datasets and sub groups and recursively add them to the metadata.""" root = ".".join(group.name.split("/")[2:]) for name, thing in group.items(): parts = thing.name.split("/") @@ -684,7 +684,7 @@ def __init__(self, *args, **kargs): Keyword Args: regrid (bool): - If set True, the gridimage() method is automatically called to re-grid the image to known co-ordinates. + If set True, the gridimage() method is automatically called to re-grid the image to known coordinates. """ regrid = kargs.pop("regrid", False) bcn = kargs.pop("bcn", False) diff --git a/Stoner/Image/attrs.py b/Stoner/Image/attrs.py index f3147fc60..d6d4e864b 100755 --- a/Stoner/Image/attrs.py +++ b/Stoner/Image/attrs.py @@ -53,7 +53,7 @@ class DrawProxy: This class allows access the user to draw simply shapes on an image (or its mask) by specifying the desired shape and geometry (centre, length/width etc). Mostly this implemented by pass throughs to the :py:mod:`skimage.draw` module, but methods are provided for an annulus, rectangle (and square) and rectangle-perimeter meothdds- the - latter offering rotation about the centre pooint in contrast to the :py:mod:`skimage.draw` equivalents. + latter offering rotation about the centre point in contrast to the :py:mod:`skimage.draw` equivalents. No state data is stored with this class so the attribute does not need to be serialised when the parent ImageFile is saved. @@ -68,11 +68,11 @@ def annulus(self, r, c, radius1, radius2, shape=None, value=1.0): """Use a combination of two circles to draw and annulus. Args: - r,c (float): Centre co-ordinates + r,c (float): Centre coordinates radius1,radius2 (float): Inner and outer radius. Keyword Arguments: - shape (2-tuple, None): Confine the co-ordinates to staywith shape + shape (2-tuple, None): Confine the coordinates to staywith shape value (float): value to draw with Returns: A copy of the image with the annulus drawn on it. @@ -106,7 +106,7 @@ def circle(self, r, c, radius, shape=None, value=1.0): """ "Generate coordinates of pixels within circle. Args: - r,c (int): co-ordinates of the centre of the circle to be drawn. + r,c (int): coordinates of the centre of the circle to be drawn. radius (float): Radius of the circle Keyword arguments: @@ -126,12 +126,12 @@ def rectangle(self, r, c, w, h, angle=0.0, shape=None, value=1.0): """Draw a rectangle on an image. Args: - r,c (float): Centre co-ordinates + r,c (float): Centre coordinates w,h (float): Lengths of the two sides of the rectangle Keyword Arguments: angle (float): Angle to rotate the rectangle about - shape (2-tuple or None): Confine the co-ordinates to this shape. + shape (2-tuple or None): Confine the coordinates to this shape. value (float): The value to draw with. Returns: @@ -158,12 +158,12 @@ def rectangle_perimeter(self, r, c, w, h, angle=0.0, shape=None, value=1.0): """Draw the perimter of a rectangle on an image. Args: - r,c (float): Centre co-ordinates + r,c (float): Centre coordinates w,h (float): Lengths of the two sides of the rectangle Keyword Arguments: angle (float): Angle to rotate the rectangle about - shape (2-tuple or None): Confine the co-ordinates to this shape. + shape (2-tuple or None): Confine the coordinates to this shape. value (float): The value to draw with. Returns: @@ -190,12 +190,12 @@ def square(self, r, c, w, angle=0.0, shape=None, value=1.0): """Draw a square on an image. Args: - r,c (float): Centre co-ordinates + r,c (float): Centre coordinates w (float): Length of the side of the square Keyword Arguments: angle (float): Angle to rotate the rectangle about - shape (2-tuple or None): Confine the co-ordinates to this shape. + shape (2-tuple or None): Confine the coordinates to this shape. value (float): The value to draw with. Returns: @@ -274,7 +274,7 @@ def image(self): @property def draw(self): - """Access the draw proxy opbject.""" + """Access the draw proxy object.""" return DrawProxy(self._mask, self._imagefolder) def __init__(self, *args): @@ -290,7 +290,7 @@ def __setitem__(self, index, value): self._imagearray.mask.__setitem__(index, value) def __delitem__(self, index): - """Proxy through to underyling mask.""" + """Proxy through to underlying mask.""" self._imagearray.mask.__delitem__(index) def __getattr__(self, name): @@ -362,7 +362,7 @@ def select(self, **kargs): """Interactive selection mode. This method allows the user to interactively choose a mask region on the image. It will require the - Matplotlib backen to be set to Qt or other non-inline backend that suppports a user vent loop. + Matplotlib backen to be set to Qt or other non-inline backend that supports a user vent loop. The image is displayed in the window and athe user can interact with it with the mouse and keyboard. diff --git a/Stoner/Image/core.py b/Stoner/Image/core.py index 7285ffdaf..74f7de577 100755 --- a/Stoner/Image/core.py +++ b/Stoner/Image/core.py @@ -115,7 +115,7 @@ def copy_into(source: "ImageFile", dest: "ImageFile") -> "ImageFile": Args: source(ImageFile): The ImageFile object to be copied from - dest (ImageFile): The ImageFile objrct to be changed by recieving the copiued data. + dest (ImageFile): The ImageFile objrct to be changed by receiving the copiued data. Returns: The modified *dest* ImageFile. @@ -169,14 +169,14 @@ class ImageArray(np.ma.MaskedArray, metadataObject): metadata (:py:class:`Stoner.core.regexpDict`): A dictionary of metadata items associated with this image. filename (str): - The name of the file from which this image was laoded. + The name of the file from which this image was loaded. title (str): The title of the image (defaults to the filename). mask (:py:class:`numpy.ndarray of bool`): The underlying mask data of the image. Masked elements (i.e. where mask=True) are ignored for many image operations. Indexing them will return the mask fill value (typically NaN, ot -1 or -MAXINT) draw (:py:class:`Stoner.Image.attrs.DrawProxy`): - A sepcial object that allows the user to manipulate the image data by making use of + A special object that allows the user to manipulate the image data by making use of :py:mod:`skimage.draw` functions as well as some additional drawing functions. clone (:py:class:`Stoner.ImageArry`): Return a duplicate copy of the current image - this allows subsequent methods to @@ -186,7 +186,7 @@ class ImageArray(np.ma.MaskedArray, metadataObject): aspect (float): The aspect ratio (width/height) of the image. max_box (tuple (0,x-size,0-y-size)): - The extent of the iamge size in a form suitable for use in defining a box. + The extent of the image size in a form suitable for use in defining a box. flip_h (:py:class:`ImageArray`): Clone the current image and then flip it horizontally (left-right). flip_v (:py:class:`ImageArray`): @@ -228,7 +228,7 @@ class ImageArray(np.ma.MaskedArray, metadataObject): I would call im=im.translate((4,10)) """ - # Proxy attributess for storing imported functions. Only do the import when needed + # Proxy attributes for storing imported functions. Only do the import when needed _func_proxy = None # extra attributes for class beyond standard numpy ones @@ -238,7 +238,7 @@ class ImageArray(np.ma.MaskedArray, metadataObject): fmts = ["png", "npy", "tiff", "tif"] - # These will be overriden with isntance attributes, but setting here allows ImageFile properties to be defined. + # These will be overridden with instance attributes, but setting here allows ImageFile properties to be defined. debug = False filename = "" @@ -305,7 +305,7 @@ def __new__(cls, *args, **kargs): asfloat = kargs.pop("asfloat", False) or kargs.pop( "convert_float", False - ) # convert_float for back compatability + ) # convert_float for back compatibility if asfloat and ret.dtype.kind != "f": # convert to float type in place ret = ret.convert(np.float64) @@ -473,7 +473,7 @@ def _box(self, *args, **kargs): - (iterable of length 4) - assumed to give 4 integers to describe a specific box """ if len(args) == 0 and "box" in kargs.keys(): - args = [kargs["box"]] # back compatability + args = [kargs["box"]] # back compatibility elif len(args) not in (0, 1, 4): raise ValueError("box accepts 1 or 4 arguments, {len(args)} given.") if len(args) == 0 or (len(args) == 1 and args[0] is None): @@ -602,7 +602,7 @@ def CCW(self): @property def draw(self): - """Access the DrawProxy opbject for accessing the skimage draw sub module.""" + """Access the DrawProxy object for accessing the skimage draw sub module.""" return DrawProxy(self, self) # ============================================================================== @@ -633,7 +633,7 @@ def __setattr__(self, name, value): """Set an attribute on the object.""" super().__setattr__(name, value) # add attribute to those for copying in array_finalize. use value as - # defualt. + # default. circ = ["_optinfo", "mask", "__dict__"] # circular references proxy = ["_funcs"] # can be reloaded for cloned arrays if name in circ + proxy: @@ -698,7 +698,7 @@ def __delitem__(self, index): @class_wrapper(target=ImageArray, exclude_below=metadataObject) class ImageFile(metadataObject): - """An Image file type that is analagous to :py:class:`Stoner.Data`. + """An Image file type that is analogous to :py:class:`Stoner.Data`. This contains metadata and an image attribute which is an :py:class:`Stoner.Image.ImageArray` type which subclasses numpy ndarray and @@ -710,14 +710,14 @@ class ImageFile(metadataObject): metadata (:py:class:`Stoner.core.regexpDict`): A dictionary of metadata items associated with this image. filename (str): - The name of the file from which this image was laoded. + The name of the file from which this image was loaded. title (str): The title of the image (defaults to the filename). mask (:py:class:`Stoner.Image.attrs.MaskProxy`): A special object that allows manipulation of the image's mask - thius allows the user to selectively disable regions of the image from rpocessing functions. draw (:py:class:`Stoner.Image.attrs.DrawProxy`): - A sepcial object that allows the user to manipulate the image data by making use of + A special object that allows the user to manipulate the image data by making use of :py:mod:`skimage.draw` functions as well as some additional drawing functions. clone (:py:class:`Stoner.ImageFile`): Return a duplicate copy of the current image - this allows subsequent methods to @@ -727,7 +727,7 @@ class ImageFile(metadataObject): aspect (float): The aspect ratio (width/height) of the image. max_box (tuple (0,x-size,0-y-size)): - The extent of the iamge size in a form suitable for use in defining a box. + The extent of the image size in a form suitable for use in defining a box. flip_h (ImageFile): Clone the current image and then flip it horizontally (left-right). flip_v (ImageFile): @@ -781,7 +781,7 @@ def __init__(self, *args, **kargs): through to image attribute. There is one special case of creating an ImageFile from a :py:class:`Stoner.Core.DataFile`. In this case the - the DataFile is assummed to contain (x,y,z) data that should be converted to a map of + the DataFile is assumed to contain (x,y,z) data that should be converted to a map of z on a regular grid of x,y. The columns for the x,y,z data can be taken from the DataFile's :py:attr:`Stoner.Core.DataFile.setas` attribute or overridden by providing xcol, ycol and zcol keyword arguments. A further *shape* keyword can spewcify the shape as a tuple or "unique" to use the unique values of @@ -849,7 +849,7 @@ def data(self, value): @property def draw(self): - """Access the DrawProxy opbject for accessing the skimage draw sub module.""" + """Access the DrawProxy object for accessing the skimage draw sub module.""" return DrawProxy(self.image, self) @property @@ -979,7 +979,7 @@ def __floordiv__(self, other): if self.image.dtype != other.image.dtype: raise ValueError( "Only ImageFiles with the same type of underlying image data can be used to calculate an" - + "XMCD ratio.Mimatch is {self.image.dtype} vs {other.image.dtype}" + + "XMCD ratio.Mismatch is {self.image.dtype} vs {other.image.dtype}" ) if self.image.dtype.kind != "f": ret = self.clone.convert(float) @@ -1057,11 +1057,11 @@ def __repr__(self): ############################# Private methods ##################################################################### def _init_from_datafile(self, *args, **kargs): - """Initialise ImageFile from DataFile defining x,y,z co-ordinates. + """Initialise ImageFile from DataFile defining x,y,z coordinates. Args: args[0] (DataFile): - A :py:class:`Stoner.Core.DataFile` instance that defines x,y,z co-ordinates or has columns specified + A :py:class:`Stoner.Core.DataFile` instance that defines x,y,z coordinates or has columns specified in keywords. Keyword Args: @@ -1169,7 +1169,7 @@ def load(cls, *args, **kargs): Each subclass is scanned in turn for a class attribute :py:attr:`Stoner.ImnageFile.priority` which governs the order in which they are tried. Subclasses which can make an early positive determination that a - file has the correct format can have higher priority levels. Classes should return a suitable expcetion + file has the correct format can have higher priority levels. Classes should return a suitable exception if they fail to load the file. If no class can load a file successfully then a RunttimeError exception is raised. diff --git a/Stoner/Image/folders.py b/Stoner/Image/folders.py index b58ba8a33..32ba64acc 100755 --- a/Stoner/Image/folders.py +++ b/Stoner/Image/folders.py @@ -27,7 +27,7 @@ class ImageFolderMixin: Attributes: type (:py:class:`Stoner.Image.core.ImageArray`): - the type ob object to sotre in the folder (defaults to :py:class:`Stoner.Cire.Data`) + the type ob object to store in the folder (defaults to :py:class:`Stoner.Cire.Data`) extra_args (dict): Extra arguments to use when instantiatoing the contents of the folder from a file on disk. pattern (str or regexp): @@ -35,13 +35,13 @@ class ImageFolderMixin: provided then any named groups are used to construct additional metadata entryies from the filename. Default is *.* to match all files with an extension. read_means (bool): - If true, additional metatdata keys are added that return the mean value of each column of the data. + If true, additional metadata keys are added that return the mean value of each column of the data. This can hep in grouping files where one column of data contains a constant value for the experimental state. Default is False recursive (bool): - Specifies whether to search recurisvely in a whole directory tree. Default is True. + Specifies whether to search recursively in a whole directory tree. Default is True. flatten (bool): - Specify where to present subdirectories as spearate groups in the folder (False) or as a single group + Specify where to present subdirectories as separate groups in the folder (False) or as a single group (True). Default is False. The :py:meth:`DiskBasedFolderMixin.flatten` method has the equivalent effect and :py:meth:`DiskBasedFolderMixin.unflatten` reverses it. directory (str): @@ -120,7 +120,7 @@ def align(self, *args, **kargs): Keyword Arguments: method (str): - The mthod is passed to the :py:class:`Stone.Image.ImageArray.align` method to control how the image + The method is passed to the :py:class:`Stone.Image.ImageArray.align` method to control how the image alignment is done. By default the 'Scharr' method is used. box (int, float, tuple of ints or floats): Specifies a subset of the images to be used to calculate the alignment with. @@ -178,7 +178,7 @@ def apply_all(self, func, *args, **kargs): Note: Further args, kargs are passed through to the function """ - warn("apply_all is depricated and will be removed in a future version. Use ImageFolder.each() instead") + warn("apply_all is deprecated and will be removed in a future version. Use ImageFolder.each() instead") return self.each(func, *args, **kargs) def average(self, weights=None, _box=False, _metadata="first"): @@ -489,7 +489,7 @@ class ImageFolder(ImageFolderMixin, DiskBasedFolderMixin, baseFolder): Attributes: type (:py:class:`Stoner.Image.core.ImageArray`): - the type ob object to sotre in the folder (defaults to :py:class:`Stoner.Cire.Data`) + the type ob object to store in the folder (defaults to :py:class:`Stoner.Cire.Data`) extra_args (dict): Extra arguments to use when instantiatoing the contents of the folder from a file on disk. pattern (str or regexp): @@ -497,13 +497,13 @@ class ImageFolder(ImageFolderMixin, DiskBasedFolderMixin, baseFolder): then any named groups are used to construct additional metadata entryies from the filename. Default is *.* to match all files with an extension. read_means (bool): - If true, additional metatdata keys are added that return the mean value of each column of the data. + If true, additional metadata keys are added that return the mean value of each column of the data. This can hep in grouping files where one column of data contains a constant value for the experimental state. Default is False recursive (bool): - Specifies whether to search recurisvely in a whole directory tree. Default is True. + Specifies whether to search recursively in a whole directory tree. Default is True. flatten (bool): - Specify where to present subdirectories as spearate groups in the folder (False) or as a single group + Specify where to present subdirectories as separate groups in the folder (False) or as a single group (True). Default is False. The :py:meth:`DiskBasedFolderMixin.flatten` method has the equivalent effect and :py:meth:`DiskBasedFolderMixin.unflatten` reverses it. directory (str): diff --git a/Stoner/Image/imagefuncs.py b/Stoner/Image/imagefuncs.py index 2c991861e..3e8ca2a35 100755 --- a/Stoner/Image/imagefuncs.py +++ b/Stoner/Image/imagefuncs.py @@ -96,7 +96,7 @@ def _scale(coord, scale=1.0, to_pixel=True): - """Convert pixel cordinates to scaled co-ordinates or visa versa. + """Convert pixel coordinates to scaled coordinates or visa versa. Args: coord(int,float or iterable): Coordinates to be scaled @@ -106,7 +106,7 @@ def _scale(coord, scale=1.0, to_pixel=True): to_pixel(bool): Force the conversion to be to pixels Returns: - scaled co-ordinates. + scaled coordinates. """ if isinstance(coord, int): if not to_pixel: @@ -229,12 +229,12 @@ def align(im, ref, method="scharr", **kargs): up-sampling of the fourier transform for sub-pixel alignment. The metadata key *chi2_shift* contains the translation vector and errors. - imreg_dft module's similarity function. This implements a full scale, rotation, translation - algorithm (by default cosntrained for just translation). It's unclear how much sub-pixel translation - is accomodated. + algorithm (by default constrained for just translation). It's unclear how much sub-pixel translation + is accommodated. - cv2 module based affine transform on a gray scale image. from: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/ """ - # To be consistent with x-y co-ordinate systems + # To be consistent with x-y coordinate systems align_methods = { "scharr": (_align_scharr, imreg_dft), "chi2_shift": (_align_chi2_shift, chi2_shift), @@ -328,7 +328,7 @@ def convert(image, dtype, force_copy=False, uniform=False, normalise=True): http://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx (2) Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25", pp 7-8. Khronos Group, 2010. - (3) Proper treatment of pixels as integers. A.W. Paeth. + (3) Proper treatment of pixels as integers. A.W. Path. In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990. (4) Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels", pp 47-57. Morgan Kaufmann, 1998. @@ -470,7 +470,7 @@ def correct_drift(im, ref, **kargs): do_shift (bool): Shift the image, or just calculate the drift and store in metadata (default True, shit) Returns: - A shifted iamge with the image shift added to the metadata as 'correct drift'. + A shifted image with the image shift added to the metadata as 'correct drift'. Detects common features on the images and tracks them moving. Adds 'drift_shift' to the metadata as the (x,y) vector that translated the @@ -555,26 +555,26 @@ def filter_image(im, sigma=2): def gridimage(im, points=None, xi=None, method="linear", fill_value=None, rescale=False): - """Use :py:func:`scipy.interpolate.griddata` to shift the image to a regular grid of co-ordinates. + """Use :py:func:`scipy.interpolate.griddata` to shift the image to a regular grid of coordinates. Args: points (tuple of (x-co-ords,yco-ordsa)): - The actual sampled data co-ordinates + The actual sampled data coordinates xi (tupe of (2D array,2D array)): - The regular grid co-ordinates (as generated by e.g. :py:func:`np.meshgrid`) + The regular grid coordinates (as generated by e.g. :py:func:`np.meshgrid`) Keyword Arguments: method ("linear","cubic","nearest"): how to interpolate, default is linear fill_value (folat, Callable, None): - What to put when the co-ordinates go out of range (default is None). May be a callable + What to put when the coordinates go out of range (default is None). May be a callable in which case the initial image is presented as the only argument. If None, use the mean value. rescale (bool): - If the x and y co-ordinates are very different in scale, set this to True. + If the x and y coordinates are very different in scale, set this to True. Returns: A copy of the modified image. The image data is interpolated and metadata kets "actual_x","actual_y","sample_ - x","samp[le_y" are set to give co-ordinates of new grid. + x","samp[le_y" are set to give coordinates of new grid. Notes: If points and or xi are missed out then we try to construct them from the metadata. For points, the metadata @@ -702,7 +702,7 @@ def add_figure_to_clipboard(event): def level_image(im, poly_vert=1, poly_horiz=1, box=None, poly=None, mode="clip"): """Subtract a polynomial background from image. - Keword Arguments: + Keyword Arguments: poly_vert (int): fit a polynomial in the vertical direction for the image of order given. If 0 do not fit or subtract in the vertical direction poly_horiz (int): fit a polynomial of order poly_horiz to the image. If 0 given @@ -763,7 +763,7 @@ def level_image(im, poly_vert=1, poly_horiz=1, box=None, poly=None, mode="clip") def normalise(im, scale=None, sample=False, limits=(0.0, 1.0), scale_masked=False): """Norm alise the data to a fixed scale. - Keyword Arguements: + Keyword Arguments: scale (2-tuple): The range to scale the image to, defaults to -1 to 1. saple (box): @@ -836,9 +836,9 @@ def profile_line(img, src=None, dst=None, linewidth=1, order=1, mode="constant", Keyword Parameters: src, dst (2-tuple of int or float): - start and end of line profile. If the co-ordinates - are given as intergers then they are assumed to be pxiel co-ordinates, floats are - assumed to be real-space co-ordinates using the embedded metadata. + start and end of line profile. If the coordinates + are given as integers then they are assumed to be pxiel coordinates, floats are + assumed to be real-space coordinates using the embedded metadata. linewidth (int): the wideth of the profile to be taken. order (int 1-3): @@ -877,9 +877,9 @@ def profile_line(img, src=None, dst=None, linewidth=1, order=1, mode="constant", dst = _scale(dst, scale) src = _scale(src, scale) if not isTuple(src, int, int): - raise ValueError("src co-ordinates are not a 2-tuple of ints.") + raise ValueError("src coordinates are not a 2-tuple of ints.") if not isTuple(dst, int, int): - raise ValueError("dst co-ordinates are not a 2-tuple of ints.") + raise ValueError("dst coordinates are not a 2-tuple of ints.") if constrain: fix = lambda x, mx: int(round(sorted([0, x, mx])[1])) @@ -913,11 +913,11 @@ def profile_line(img, src=None, dst=None, linewidth=1, order=1, mode="constant", def radial_coordinates(im, centre=(None, None), pixel_size=(1, 1), angle=False): - """Rerurn a map of the radial co-ordinates of an image from a given centre, with adjustments for pixel size. + """Rerurn a map of the radial coordinates of an image from a given centre, with adjustments for pixel size. Keyword Arguments: centre (2-tuple): - Co-ordinates of centre point in terms of the orginal pixels. Defaults to(None,None) for the middle of the + Coordinates of centre point in terms of the original pixels. Defaults to(None,None) for the middle of the image. pixel_size (2-tuple): The size of one pixel in (dx by dy) - defaults to 1,1 @@ -925,7 +925,7 @@ def radial_coordinates(im, centre=(None, None), pixel_size=(1, 1), angle=False): Whether to return the angles (in radians, True), distances (False) o a complex number (None). Returns: - An array of the same class as the input, but with values corresponding to the radial co-ordinates. + An array of the same class as the input, but with values corresponding to the radial coordinates. """ cx, cy = centre r, c = im.shape @@ -950,7 +950,7 @@ def radial_coordinates(im, centre=(None, None), pixel_size=(1, 1), angle=False): def radial_profile(im, angle=None, r=None, centre=(None, None), pixel_size=(1, 1)): """Extract a radial profile line from an image. - Keyword Paramaters: + Keyword Parameters: angle (float, tuple, None): Select the radial angle to include: - float selects a single angle @@ -960,12 +960,12 @@ def radial_profile(im, angle=None, r=None, centre=(None, None), pixel_size=(1, 1 Edges of the bins in the radual direction - will return r.size-1 points. Default is None which uses the minimum r value found on the edges of the image. centre (2-tuple): - Co-ordinates of centre point in terms of the orginal pixels. Defaults to(None,None) for the middle of the + Coordinates of centre point in terms of the original pixels. Defaults to(None,None) for the middle of the image. pixel_size (2-tuple): The size of one pixel in (dx by dy) - defaults to 1,1 - Retunrs: + Returns: (Data): A py:class:`Stoner.Data` object with a column for r and columns for mean, std, and number of pixels. """ @@ -1063,7 +1063,7 @@ def remove_outliers(im, percentiles=(0.01, 0.99), replace=None): Returns: (ndarray): - Tje modified array. + The modified array. Use this method if you have an image with a small number of pixels with extreme values that are out of range. @@ -1154,7 +1154,7 @@ def sgolay2d(img, points=15, poly=1, derivative=None): points (int): The number of points in the window aperture. Must be an odd number. (default 15) poly (int): - Degree of polynomial to use in the filter. (defatult 1) + Degree of polynomial to use in the filter. (default 1) derivative (str or None): Type of defivative to calculate. Can be: None - smooth only (default) @@ -1276,7 +1276,7 @@ def translate(im, translation, add_metadata=False, order=3, mode="wrap", cval=No mode (str): How to handle points outside the original image. See :py:func:`skimage.transform.warp`. Defaults to "wrap" cval (float): - The value to fill with if *mode* is constant. If not speficied or None, defaults to the mean pixcel value. + The value to fill with if *mode* is constant. If not specified or None, defaults to the mean pixcel value. Returns: im (ImageArray): translated image @@ -1379,7 +1379,7 @@ def crop(self, *args, **kargs): but uses image x,y coords (x,y --> col,row) Returns a view according to the coords given. If box is None it will allow the user to select a rectangle. If a tuple is given with None - included then max extent is used for that coord (analagous to slice). + included then max extent is used for that coord (analogous to slice). If copy then return a copy of self with the cropped image. The box can be specified in a number of ways: @@ -1388,15 +1388,15 @@ def crop(self, *args, **kargs): - (float 0.0-1.0): A border of the given fraction of the images height and width is ignored - (string): - A correspoinding item of metadata is located and used to specify the box + A corresponding item of metadata is located and used to specify the box - (tuple of 4 ints or floats): - For each item in the tuple it is interpreted as foloows: + For each item in the tuple it is interpreted as follows: - (int): - A pixel co-ordinate in either the x or y direction + A pixel coordinate in either the x or y direction - (float 0.0-1.0): A fraction of the width or height in from the left, right, top, bottom sides - (float > 1.0): - Is rounded to the nearest integer and used a pixel cordinate. + Is rounded to the nearest integer and used a pixel coordinate. - None: The extent of the image is used. @@ -1599,7 +1599,7 @@ def save_tiff(self, filename, forcetype=False): Keyword Args: forcetype(bool): - (depricated) if forcetype then preserve data type as best as possible on save. + (deprecated) if forcetype then preserve data type as best as possible on save. Otherwise we let the underlying pillow library choose the best data type. Note: diff --git a/Stoner/Image/kerr.py b/Stoner/Image/kerr.py index 5c8166c1c..2df2aacc8 100755 --- a/Stoner/Image/kerr.py +++ b/Stoner/Image/kerr.py @@ -356,7 +356,7 @@ def switch_index(self, saturation_end=True, saturation_value=True): True if the last image is closest to the fully saturated state. False if you want the first image saturation_value(bool): - if True then a pixel value True means that switching has occured + if True then a pixel value True means that switching has occurred (ie magnetic saturation would be all True) Returns: diff --git a/Stoner/Image/kerrfuncs.py b/Stoner/Image/kerrfuncs.py index 7cf5d8571..b297641be 100755 --- a/Stoner/Image/kerrfuncs.py +++ b/Stoner/Image/kerrfuncs.py @@ -27,7 +27,7 @@ "Images to Average", "Lens", "Magnification", - "Substraction Std", + "Subtraction Std", ] _test_keys = ["X-B-2d", "field: units"] # minimum keys in data to assert that it is a standard file output @@ -62,7 +62,7 @@ def _parse_text(text, key=None): text = float(text) except ValueError: pass # leave it as string - # print '{} after processsing: \'{}\''.format(key,data) + # print '{} after processing: \'{}\''.format(key,data) return text @@ -100,8 +100,8 @@ def reduce_metadata(kerr_im): if key in kerr_im.keys(): newmet[key] = kerr_im[key] newmet["field"] = newmet.pop("X-B-2d") # rename - if "Substraction Std" in kerr_im.keys(): - newmet["subtraction"] = newmet.pop("Substraction Std") + if "Subtraction Std" in kerr_im.keys(): + newmet["subtraction"] = newmet.pop("Subtraction Std") if "Averaging" in kerr_im.keys(): if kerr_im["Averaging"]: # averaging was on newmet["Averaging"] = newmet.pop("Images to Average") @@ -167,7 +167,7 @@ def get_scalebar(kerr_im): im = exposure.rescale_intensity(im, in_range=(0.49, 0.5)) # saturate black and white pixels im = exposure.rescale_intensity(im) # make sure they're black and white im = np.diff(im[0]) # 1d numpy array, differences - lim = [np.where(im > 0.9)[0][0], np.where(im < -0.9)[0][0]] # first occurance of both cases + lim = [np.where(im > 0.9)[0][0], np.where(im < -0.9)[0][0]] # first occurrence of both cases assertion(len(lim) == 2, "Couldn't find scalebar") return lim[1] - lim[0] diff --git a/Stoner/Image/stack.py b/Stoner/Image/stack.py index 06d072777..8a54381e8 100755 --- a/Stoner/Image/stack.py +++ b/Stoner/Image/stack.py @@ -109,7 +109,7 @@ def __getter__(self, name, instantiate=True): the baseFolder class uses a :py:class:`regexpDict` to store objects in. Keyword Arguments: - instatiate (bool): If True (default) then always return a metadataObject. If False, + instantiate (bool): If True (default) then always return a metadataObject. If False, the __getter__ method may return a key that can be used by it later to actually get the metadataObject. If None, then will return whatever is helf in the object cache, either instance or name. @@ -254,7 +254,7 @@ def __floordiv__(self, other): if self._stack.dtype != other._stack.dtype: raise ValueError( "Only ImageFiles with the same type of underlying image data can be used to calculate an XMCD ratio." - + "Mimatch is {self._stack.dtype} vs {other._stack.dtype}" + + "Mismatch is {self._stack.dtype} vs {other._stack.dtype}" ) if self._stack.dtype.kind != "f": ret = self.clone.convert(float) @@ -295,7 +295,7 @@ def _instantiate(self, idx): def _resize_stack(self, new_size, dtype=None): """Create a new stack with a new size.""" old_size = self._stack.shape - assertion(isinstance(self._stack, ImageArray), f"Trying to resize a non-image aray {type(self._stack)}") + assertion(isinstance(self._stack, ImageArray), f"Trying to resize a non-image array {type(self._stack)}") if old_size == new_size: return new_size if dtype is None: @@ -373,14 +373,14 @@ def convert(self, dtype, force_copy=False, uniform=False, normalise=True): http://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx 2, Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25", pp 7-8. Khronos Group, 2010. - 3, Proper treatment of pixels as integers. A.W. Paeth. + 3, Proper treatment of pixels as integers. A.W. Path. In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990. 4, Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels", pp 47-57. Morgan Kaufmann, 1998. """ from .imagefuncs import convert - # Aactually this is just a pass through for the imagefuncs.convert routine + # Actually this is just a pass through for the imagefuncs.convert routine mask = self._stack.mask self._stack = convert(self._stack, dtype, force_copy=force_copy, uniform=uniform, normalise=normalise).view( type(self._stack) @@ -400,7 +400,7 @@ def asfloat(self, normalise=True, clip=False, clip_negative=False, **kargs): clip range further to 0,1 Notes: - Analagous behaviour to ImageFile.asfloat() + Analogous behaviour to ImageFile.asfloat() If currently an int type and normalise then floats will be normalised to the maximum allowed value of the int type. @@ -440,7 +440,7 @@ def dtype_limits(self, clip_negative=True): return ret ########################################################################### - ################### Depricated Compaibility methods ####################### + ################### Deprecated Compatibility methods ####################### def correct_drifts(self, refindex, threshold=0.005, upsample_factor=50, box=None): """Align images to correct for image drift. @@ -456,7 +456,7 @@ def correct_drifts(self, refindex, threshold=0.005, upsample_factor=50, box=None box: see ImageArray.correct_drift """ - warnings.warn("correct_drift is a depricated method for an image stack - consider using align.") + warnings.warn("correct_drift is a deprecated method for an image stack - consider using align.") ref = self[refindex] self.apply_all("correct_drift", ref, threshold=threshold, upsample_factor=upsample_factor, box=box) @@ -471,12 +471,12 @@ def crop_stack(self, box): (ImageStack): cropped images """ - warnings.warn("crop_stack is depricated - sam effect can be achieved with crop(box)") + warnings.warn("crop_stack is deprecated - sam effect can be achieved with crop(box)") self.each.crop(box) def show(self): """Pass through to :py:meth:`Stoner.Image.ImageFolder.view`.""" - warnings.warn("show() is depricated in favour of ImageFolder.view()") + warnings.warn("show() is deprecated in favour of ImageFolder.view()") return self.view() @@ -500,7 +500,7 @@ def subtract(self, background): The modified image stack. Notes: - Method cahnged for v0.10 to not normalise or clip the data. + Method changed for v0.10 to not normalise or clip the data. The background image is scaled by the ratio of the mean pixel values of the unmasked region in the background image. """ diff --git a/Stoner/Image/widgets.py b/Stoner/Image/widgets.py index 88e0bfedf..53c150d40 100755 --- a/Stoner/Image/widgets.py +++ b/Stoner/Image/widgets.py @@ -51,7 +51,7 @@ class LineSelect: def __init__(self): """Create the LineSelect object, display the image and register the hooks. - The constructor will wait until the finished co-ordinates are set and then return + The constructor will wait until the finished coordinates are set and then return [(x_start,y_start),(x_finish, y_finish)]. """ self.started = False @@ -125,7 +125,7 @@ def on_click(self, event): Returns: None. - Records with the starting or finishing co-ordinates of the line. + Records with the starting or finishing coordinates of the line. """ if self.mode == "x": y1, y2 = self.ax.get_ylim() @@ -154,7 +154,7 @@ def draw_line(self, event): Returns: None. - Optiuonal line properties can be overriden by passing keyword parameters to the constructor. + Optiuonal line properties can be overridden by passing keyword parameters to the constructor. """ if not self.started: # Do nothing until we start return @@ -177,7 +177,7 @@ class RegionSelect: def __init__(self): """Create the LineSelect object, display the image and register the hooks. - The constructor will wait until the finished co-ordinates are set and then return + The constructor will wait until the finished coordinates are set and then return [(x_start,y_start),(x_finish, y_finish)]. """ self.p1 = False @@ -263,7 +263,7 @@ class ShapeSelect: def __init__(self): """Create the LineSelect object, display the image and register the hooks. - The constructor will wait until the finished co-ordinates are set and then return + The constructor will wait until the finished coordinates are set and then return [(x_start,y_start),(x_finish, y_finish)]. """ self.invert = False @@ -438,7 +438,7 @@ def draw_vertices(self, vertices): def draw_poly(self, vertices): """Draw a polygon method using the specified vertices. - Returns rr,cc co-ordinates. + Returns rr,cc coordinates. """ if len(vertices) < 2: return ([], []) @@ -495,7 +495,7 @@ def draw_circle(self, vertices): draw_circle.instructions = "2 or 3 perimeter vertices to define a circle\n4 or more to define an ellipse" def draw_rectangle(self, vertices): - """Calculate the co-ordinates for a rectangle from the vertices.""" + """Calculate the coordinates for a rectangle from the vertices.""" if len(vertices) < 2: return ([], []) if len(vertices) == 2: diff --git a/Stoner/Util.py b/Stoner/Util.py index 10da06a20..c9a789bed 100755 --- a/Stoner/Util.py +++ b/Stoner/Util.py @@ -47,7 +47,7 @@ def _h_sat_linear(d, i, Ms_vals, Hsat_vals, h_sat_fraction): Ms, Ms_err, _ = Ms_vals Hsat, Hsat_err = Hsat_vals - # Fit a striaght line to the central fraction of the data + # Fit a straight line to the central fraction of the data if i == 1: bounds = lambda x, r: np.abs(r.y) < np.abs(Ms) * h_sat_fraction else: @@ -60,14 +60,14 @@ def _h_sat_linear(d, i, Ms_vals, Hsat_vals, h_sat_fraction): Ms_i = Ms if i == 0 else -Ms # Find the intercept Hsat[1 - i] = fsolve(lambda x: Linear().func(x, *popt) - Ms_i, 0)[0] - # Uncertainity is the sum of error in slope * found intercept and error in Ms times slope + # Uncertainty is the sum of error in slope * found intercept and error in Ms times slope Hsat_err[1 - i] = np.sqrt((Hsat[1 - i] * pferr[1]) ** 2 + (popt[1] * Ms_err) ** 2) return (Hsat, Hsat_err) def _h_sat_susceptibility(d, i, Ms_vals, Hsat_vals, h_sat_fraction): # pylint: disable=unused-argument - """Determine the saturation field from the change in local sysceptibility in the looop.""" + """Determine the saturation field from the change in local sysceptibility in the loop.""" Hsat, Hsat_err = Hsat_vals xi = d.SG_Filter(order=1)[0, :] m, h = d.SG_Filter() @@ -124,7 +124,7 @@ def _up_down(data): high = data.x > mid + 0.45 * span low = data.x < mid - 0.45 * span - # Locate points where we cross a threhold + # Locate points where we cross a threshold t = np.zeros((2, len(data) + 2), dtype=bool) t[0, 1:-1] = high t[1, 1:-1] = low @@ -228,7 +228,7 @@ def split_up_down(data, col=None, folder=None): def hysteresis_correct(data, **kargs): - """Peform corrections to a hysteresis loop. + """Perform corrections to a hysteresis loop. Args: data (Data): @@ -251,7 +251,7 @@ def hysteresis_correct(data, **kargs): and look at the intercept with the relevant saturation moment. - "delta_M": Look for a field where the moment has changed by *h_sat_fraction* times the error in M_s. - - "susceptibility" - Calcualte H_sat from where the susceptibility changes by 1% of the average + - "susceptibility" - Calculate H_sat from where the susceptibility changes by 1% of the average susceptibility h_sat_fraction (float): The central fraction of the saturation moment that is used for calculating the saturation field. Defaults diff --git a/Stoner/Zip.py b/Stoner/Zip.py index 3c162d78d..d491eeb13 100755 --- a/Stoner/Zip.py +++ b/Stoner/Zip.py @@ -57,7 +57,7 @@ class ZippedFile(DataFile): """A sub class of DataFile that sores itself in a zip file. - If the first non-keyword arguement is not an :py:class:`zipfile:ZipFile` then + If the first non-keyword argument is not an :py:class:`zipfile:ZipFile` then initialises with a blank parent constructor and then loads data, otherwise, calls parent constructor. @@ -122,14 +122,14 @@ def _extract(self, archive, member): return self def _load(self, filename=None, *args, **kargs): - """Load a file from the zip file, openining it as necessary.""" + """Load a file from the zip file, opening it as necessary.""" if filename is None or not filename: self.get_filename("r") else: self.filename = filename try: if isinstance(self.filename, zf.ZipFile): # Loading from an ZipFile - if not self.filename.fp: # Open zipfile if necessarry + if not self.filename.fp: # Open zipfile if necessary other = zf.ZipFile(self.filename.filename, "r") close_me = True else: # Zip file is already open @@ -392,7 +392,7 @@ def __getter__(self, name, instantiate=True): The canonical mapping key to construct the path from. Keyword Arguments: - instatiate (bool): + instantiate (bool): IF True (default) then always return a :py:class:`Stoner.Core.Data` object. If False, the __getter__ method may return a key that can be used by it later to actually get the :py:class:`Stoner.Core.Data` object. @@ -431,7 +431,7 @@ def __lookup__(self, name): Note: We try two things - first a direct lookup in the namelist if there is an exact match to the key and then - we preprend the ZipFolder's path to try for a match with just the final part of the filename. + we prepend the ZipFolder's path to try for a match with just the final part of the filename. """ try: # try to go back to the base to see if it's already loaded return self._storage_class.__lookup__(self, name) @@ -441,7 +441,7 @@ def __lookup__(self, name): try: if isinstance(name, string_types): name = name.replace(path.sep, "/") - # First try tthe direct lookup - will work if we have a full name + # First try the direct lookup - will work if we have a full name if name in self.File.namelist(): return name pth = path.normpath(path.join(self.full_key, name)).replace(path.sep, "/") @@ -485,7 +485,7 @@ def _save(self, f, trail): Returns: The new filename of the saved DataFile. - ZipFiles are really a flat heirarchy, so concatentate the trail and save the data using + ZipFiles are really a flat hierarchy, so concatenate the trail and save the data using :py:meth:`Stoner.Zip.ZipFile.save` This routine is used by a walk_groups call - hence the prototype matches that required for @@ -511,7 +511,7 @@ class ZipFolder(ZipFolderMixin, DiskBasedFolderMixin, baseFolder): """A sub class of DataFile that sores itself in a zip file. - If the first non-keyword arguement is not an :py:class:`zipfile:ZipFile` then + If the first non-keyword argument is not an :py:class:`zipfile:ZipFile` then initialises with a blank parent constructor and then loads data, otherwise, calls parent constructor. diff --git a/Stoner/__init__.py b/Stoner/__init__.py index 35eb07506..5ddc22542 100755 --- a/Stoner/__init__.py +++ b/Stoner/__init__.py @@ -35,7 +35,7 @@ Options = _Options() -__version_info__ = ("0", "10", "7") +__version_info__ = ("0", "10", "8") __version__ = ".".join(__version_info__) __homepath__ = pathlib.Path(__file__).parent.resolve() diff --git a/Stoner/analysis/columns.py b/Stoner/analysis/columns.py index 285bbb2d1..2204e998c 100755 --- a/Stoner/analysis/columns.py +++ b/Stoner/analysis/columns.py @@ -50,7 +50,7 @@ def error_calc(adata, bdata): # pylint: disable=function-redefined ) else: - raise ValueError(f"Unknown error caclulation mode {error_type}") + raise ValueError(f"Unknown error calculation mode {error_type}") adata, aname = self.__get_math_val(col_a) bdata, bname = self.__get_math_val(col_b) @@ -248,7 +248,7 @@ def mean(self, column=None, sigma=None, bounds=None): Keyword Arguments: sigma (column index or array): - The uncertainity noted for each value in the mean + The uncertainty noted for each value in the mean bounds (callable): col_a callable function that takes col_a single argument list of numbers representing one row, and returns True for all rows to search in. @@ -362,7 +362,7 @@ def multiply(self, col_a, col_b, replace=False, header=None, index=None): return self def span(self, column=None, bounds=None): - """Return a tuple of the maximum and minumum values within the given column and bounds. + """Return a tuple of the maximum and minimum values within the given column and bounds. Args: column (index): @@ -395,7 +395,7 @@ def std(self, column=None, sigma=None, bounds=None): Keyword Arguments: sigma (column index or array): - The uncertainity noted for each value in the mean + The uncertainty noted for each value in the mean bounds (callable): col_a callable function that takes col_a single argument list of numbers representing one row, and returns True for all rows to search in. diff --git a/Stoner/analysis/features.py b/Stoner/analysis/features.py index 134fc7fbd..05bef6437 100755 --- a/Stoner/analysis/features.py +++ b/Stoner/analysis/features.py @@ -24,7 +24,7 @@ def peaks(self, **kargs): ycol (index): the column name or index of the data in which to search for peaks width (int or float): - the expected minium halalf-width of a peak in terms of the number of data points (int) or distance + the expected minimum halalf-width of a peak in terms of the number of data points (int) or distance in x (float). This is used in the differnetiation code to find local maxima. Bigger equals less sensitive to experimental noise, smaller means better eable to see sharp peaks poly (int): @@ -34,10 +34,10 @@ def peaks(self, **kargs): Keyword Arguments: significance (float): used to decide whether a local maxmima is a significant peak. Essentially just the curvature - of the data. Bigger means less sensistive, smaller means more likely to detect noise. Default is the + of the data. Bigger means less sensitive, smaller means more likely to detect noise. Default is the maximum curvature/(2*width) xcol (index or None): - name or index of data column that p[rovides the x-coordinate (default None) + name or index of data column that p[provides the x-coordinate (default None) peaks (bool): select whether to measure peaks in data (default True) troughs (bool): @@ -55,7 +55,7 @@ def peaks(self, **kargs): (various): If *modify* is true, then returns a the AnalysisMixin with the data set to just the peaks/troughs. If *modify* is false (default), then the return value depends on *ycol* and *xcol*. If *ycol* is - not None and *xcol* is None, then returns conplete rows of data corresponding to the found + not None and *xcol* is None, then returns complete rows of data corresponding to the found peaks/troughs. If *xcol* is not None, or *ycol* is None and *xcol* is None, then returns a 1D array of the x positions of the peaks/troughs. @@ -67,7 +67,7 @@ def peaks(self, **kargs): troughs = kargs.pop("troughs", False) poly = kargs.pop("poly", 2) assertion( - poly >= 2, "poly must be at least 2nd order in peaks for checking for significance of peak or trough" + poly >= 2, "poly must be at least 2nd order in peaks for checking for significance of peak or through" ) sort = kargs.pop("sort", False) @@ -137,7 +137,7 @@ def peaks(self, **kargs): return ret def find_peaks(self, **kargs): - """Interface to :py:func:`scipy.signal.find_peaks` for loacating peaks in data. + """Interface to :py:func:`scipy.signal.find_peaks` for locating peaks in data. Args: ycol (index): @@ -203,7 +203,7 @@ def find_peaks(self, **kargs): (various): If *modify* is true, then returns a the AnalysisMixin with the data set to just the peaks/troughs. If *modify* is false (default), then the return value depends on *ycol* and *xcol*. If *ycol* is - not None and *xcol* is None, then returns conplete rows of data corresponding to the found + not None and *xcol* is None, then returns complete rows of data corresponding to the found peaks/troughs. If *xcol* is not None, or *ycol* is None and *xcol* is None, then returns a 1D array of the x positions of the peaks/troughs. diff --git a/Stoner/analysis/filtering.py b/Stoner/analysis/filtering.py index 6fff17bf5..bcee26010 100755 --- a/Stoner/analysis/filtering.py +++ b/Stoner/analysis/filtering.py @@ -55,11 +55,11 @@ def SG_Filter( Notes: If col is not specified or is None then the :py:attr:`DataFile.setas` column assignments are used - to set an x and y column. If col is a tuple, then it is assumed to secify and x-column and y-column + to set an x and y column. If col is a tuple, then it is assumed to specify and x-column and y-column for differentiating data. This is now a pass through to :py:func:`scipy.signal.savgol_filter` Padding can help stop wildly wrong artefacts in the data at the start and enf of the data, particularly - when the differntial order is >1. + when the differential order is >1. See Also: User guide section :ref:`smoothing_guide` @@ -456,7 +456,7 @@ def make_bins(self, xcol, bins, mode="lin", **kargs): bin_centres = (bin_start + bin_stop) / 2.0 elif mode.lower().startswith("log"): if not 0.0 < bins <= 1.0: - raise ValueError("Bin width must be between 0 ans 1 for log binning") + raise ValueError("Bin width must be between 0 and 1 for log binning") if xmin <= 0: raise ValueError("The start of the binning must be a positive value in log mode.") xp = xmin @@ -494,12 +494,12 @@ def outlier_detection( Args: column(column index): - specifing column for outlier detection. If not set, + specifying column for outlier detection. If not set, defaults to the current y set column. Keyword Arguments: window(int): - data window for anomoly detection + data window for anomaly detection shape(str): The name of a :py:mod:`scipy.signal` windowing function to use when averaging the data. Defaults to 'boxcar' for a flat average. @@ -520,7 +520,7 @@ def outlier_detection( action_args (tuple): if *action* is callable, then action_args can be used to pass extra arguments to the action callable action_kargs (dict): - If *action* is callable, then action_kargs can be useed to pass extra keyword arguments to the action + If *action* is callable, then action_kargs can be used to pass extra keyword arguments to the action callable. Returns: @@ -537,7 +537,7 @@ def outlier_detection( The outlier detection function has the signatrure:: - def outlier(row,column,window,certainity,**kargs) + def outlier(row,column,window,certainty,**kargs) #code return True # or False @@ -581,7 +581,7 @@ def action(i,column, data, *action_args, **action_kargs): index = np.zeros(len(self), dtype=bool) for i, t in enumerate(self.rolling_window(window, wrap=False, exclude_centre=width)): index[i] = func(self.data[i], t, metric=certainty, **kargs) - self["outliers"] = np.arange(len(self))[index] # add outlier indecies to metadata + self["outliers"] = np.arange(len(self))[index] # add outlier indices to metadata if action == "mask" or action == "mask row": if action == "mask": self.mask[index, column] = True @@ -599,7 +599,7 @@ def scale(self, other, xcol=None, ycol=None, **kargs): Args: other (DataFile): - The other isntance of a datafile to match to + The other instance of a datafile to match to Keyword Arguments: xcol (column index): @@ -619,7 +619,7 @@ def scale(self, other, xcol=None, ycol=None, **kargs): Specifies whether to estimate an initial transformation value or to use the provided one, or start with an identity transformation. replace (bool): - Whether to map the x,y data to the new co-ordinates and return a copy of this AnalysisMixin (true) + Whether to map the x,y data to the new coordinates and return a copy of this AnalysisMixin (true) or to just return the results of the scaling. headers (2-element list or tuple of strings): new column headers to use if replace is True. @@ -754,7 +754,7 @@ def smooth(self, window="boxcar", xcol=None, ycol=None, size=None, **kargs): """ _ = self._col_args(xcol=xcol, ycol=ycol) replace = kargs.pop("replace", True) - result = kargs.pop("result", True) # overwirte existing y column data + result = kargs.pop("result", True) # overwrite existing y column data header = kargs.pop("header", self.column_headers[_.ycol]) # Sort out window size @@ -825,7 +825,7 @@ def spline(self, xcol=None, ycol=None, sigma=None, **kargs): Depending on the value of *replace*, returns a copy of the AnalysisMixin, a 1D numpy array of data or an :[y:class:`scipy.interpolate.UniverateSpline` object. - This is really jsut a pass through to the scipy.interpolate.UnivariateSpline function. Also used in the + This is really just a pass through to the scipy.interpolate.UnivariateSpline function. Also used in the extrapolate function. """ _ = self._col_args(xcol=xcol, ycol=ycol) @@ -835,7 +835,7 @@ def spline(self, xcol=None, ycol=None, sigma=None, **kargs): else: sigma = np.ones(len(self)) replace = kargs.pop("replace", True) - result = kargs.pop("result", True) # overwirte existing y column data + result = kargs.pop("result", True) # overwrite existing y column data header = kargs.pop("header", self.column_headers[_.ycol]) k = kargs.pop("order", 3) s = kargs.pop("smoothing", None) diff --git a/Stoner/analysis/fitting/mixins.py b/Stoner/analysis/fitting/mixins.py index 60a56271d..bf445fbd3 100755 --- a/Stoner/analysis/fitting/mixins.py +++ b/Stoner/analysis/fitting/mixins.py @@ -130,7 +130,7 @@ def __init__(self, model, *args, **kargs): # pylint: disable=unused-argument params (lmfit:parameter or dict): Parameters used to fit model. **kargs (dict): - Keyword arguments to intialise the reuslt object/. + Keyword arguments to initialise the result object/. Raises: RuntimeError: @@ -185,7 +185,7 @@ def __init__(self, popt, pcov, infodict, mesg, ier): infodict (dict): Additional information from curve_fit. mesg (str): - Descriptive information frok curve_fit. + Descriptive information from curve_fit. ier (int): Numerical error message. """ @@ -208,7 +208,7 @@ def __init__(self, popt, pcov, infodict, mesg, ier): for k in infodict: setattr(self, k, infodict[k]) - # Following peroperties used to return desired information + # Following properties used to return desired information @property def name(self): @@ -481,9 +481,9 @@ def annotate_fit(self, model, x=None, y=None, z=None, text_only=False, **kargs): Keyword Parameters: x (float): - x co-ordinate of the label + x coordinate of the label y (float): - y co-ordinate of the label + y coordinate of the label z (float): z co-ordinbate of the label if the current axes are 3D prefix (str): @@ -629,7 +629,7 @@ def _assemnle_data_to_fit(self, xcol, ycol, sigma, bounds, scale_covar, sigma_x= ycol(index): Column with ydata in it sigma (index or array-like): - column of y-errors or uncertainity values. + column of y-errors or uncertainty values. bounds (callable): Used to select the data rows to fit scale_covar (bool,None): @@ -1009,7 +1009,7 @@ def curve_fit(self, func, xcol=None, ycol=None, sigma=None, **kargs): ycol (index, list of indices or array): The index of the y-column data to fit. If an array, then should be 1D and the same length as the data. If ycol is a list of indices then the columns are iterated over in - turn, fitting occuring for each one. In this case the return value is a list of what would be + turn, fitting occurring for each one. In this case the return value is a list of what would be returned for a single column fit. Keyword Arguments: @@ -1030,16 +1030,16 @@ def curve_fit(self, func, xcol=None, ycol=None, sigma=None, **kargs): If this is a string then it is used as the name of the fitted data. (default None) absolute_sigma (bool): If False, `sigma` denotes relative weights of the data points. The default True means that - the sigma parameter is the reciprocal of the absoluate standard deviation. + the sigma parameter is the reciprocal of the absolute standard deviation. output (str, default "fit"): - Specifiy what to return. + Specify what to return. Returns: (various): The return value is determined by the *output* parameter. Options are: * "fit" (tuple of popt,pcov) Optimal values of the fitting parameters p, and the variance-co-variance matrix for the fitting parameters. - * "row" just a one dimensional numpy array of the fit paraeters interleaved with their + * "row" just a one dimensional numpy array of the fit parameters interleaved with their uncertainties * "full" a tuple of (popt,pcov,dictionary of optional outputs, message, return code, row). * "data" a copy of the :py:class:`Stoner.Core.DataFile` object with fit recorded in the @@ -1146,7 +1146,7 @@ def _func(x, *beta): ) ) - if callable(p0): # Allow the user to suppy p0 as a callanble function + if callable(p0): # Allow the user to supply p0 as a callanble function if ydata.ndim != 1: yy = ydata.ravel() else: @@ -1238,7 +1238,7 @@ def differential_evolution(self, model, xcol=None, ycol=None, p0=None, sigma=Non scale_covar (bool) : whether to automatically scale covariance matrix (leastsq only) output (str, default "fit"): - Specifiy what to return. + Specify what to return. Returns: ( various ) : @@ -1246,13 +1246,13 @@ def differential_evolution(self, model, xcol=None, ycol=None, p0=None, sigma=Non The return value is determined by the *output* parameter. Options are - "fit" just the :py:class:`lmfit.model.ModelFit` instance that contains all relevant information about the fit. - - "row" just a one dimensional numpy array of the fit paraeters interleaved with their + - "row" just a one dimensional numpy array of the fit parameters interleaved with their uncertainties - "full" a tuple of the fit instance and the row. - "data" a copy of the :py:class:`Stoner.Core.DataFile` object with the fit recorded in the - emtadata and optinally as a column of data. + emtadata and optionally as a column of data. - This function is essentially a wrapper around the :py:func:`scipy.optimize.differential_evolution` funtion + This function is essentially a wrapper around the :py:func:`scipy.optimize.differential_evolution` function that presents the same interface as the other Stoner package curve fitting functions. The parent function, however, does not provide the variance-covariance matrix to estimate the fitting errors. To work around this, this function does the initial fit with the differential evolution, but then uses that to give a starting @@ -1373,18 +1373,18 @@ def lmfit(self, model, xcol=None, ycol=None, p0=None, sigma=None, **kargs): scale_covar (bool) : whether to automatically scale covariance matrix (leastsq only) output (str, default "fit"): - Specifiy what to return. + Specify what to return. Returns: ( various ) : The return value is determined by the *output* parameter. Options are - "fit" just the :py:class:`lmfit.model.ModelFit` instance that contains all relevant information about the fit. - - "row" just a one dimensional numpy array of the fit paraeters interleaved with their + - "row" just a one dimensional numpy array of the fit parameters interleaved with their uncertainties - "full" a tuple of the fit instance and the row. - "data" a copy of the :py:class:`Stoner.Core.DataFile` object with the fit recorded in the - emtadata and optinally as a column of data. + emtadata and optionally as a column of data. See Also: - :py:meth:`Stoner.Data.curve_fit` @@ -1513,7 +1513,7 @@ def polyfit( Note: If the x or y columns are not specified (or are None) the the setas attribute is used instead. - This method is depricated and may be removed in a future version in favour of the more general + This method is deprecated and may be removed in a future version in favour of the more general curve_fit """ _ = self._col_args(xcol=xcol, ycol=ycol, scalar=False) @@ -1547,7 +1547,7 @@ def odr(self, model, xcol=None, ycol=None, **kargs): Args: model (scipy.odr.Model, lmfit.models.Model or callable): - Tje model that describes the data. See below for more details. + The model that describes the data. See below for more details. xcol (index or None): Columns to be used for the x data for the fitting. If not givem defaults to the :py:attr:`Stoner.Core.DataFile.setas` x column @@ -1575,17 +1575,17 @@ def odr(self, model, xcol=None, ycol=None, **kargs): header (string or None): If this is a string then it is used as the name of the fitted data. (default None) output (str, default "fit"): - Specifiy what to return. + Specify what to return. Returns: ( various ) : The return value is determined by the *output* parameter. Options are - "fit" just the :py:class:`scipy.odr.Output` instance (default) - - "row" just a one dimensional numpy array of the fit paraeters interleaved with their + - "row" just a one dimensional numpy array of the fit parameters interleaved with their uncertainties - "full" a tuple of the fit instance and the row. - "data" a copy of the :py:class:`Stoner.Core.DataFile` object with the fit recorded in the - emtadata and optinally + emtadata and optionally as a column of data. Notes: @@ -1596,9 +1596,9 @@ def odr(self, model, xcol=None, ycol=None, **kargs): - A subclass or instance of an lmfit.models.Model: the :py:mod:`Stoner.analysis.fitting.models` package has a number of useful prebuilt lmfit models that can be used directly by this function. - A callable function which should have a signature f(x,parameter1,parameter2...) and *not* the - scip.odr stadnard f(beta,x) + scip.odr standard f(beta,x) - This function ois designed to be as compatible as possible with :py:meth:`AnalysisMixin.curve_fit` and + This function is designed to be as compatible as possible with :py:meth:`AnalysisMixin.curve_fit` and :py:meth:`AnalysisMixin.lmfit` to facilitate easy of switching between them. See Also: diff --git a/Stoner/analysis/fitting/models/__init__.py b/Stoner/analysis/fitting/models/__init__.py index 4e09038b9..137303171 100755 --- a/Stoner/analysis/fitting/models/__init__.py +++ b/Stoner/analysis/fitting/models/__init__.py @@ -43,7 +43,7 @@ def _get_model_(model): imported. - A callable object. In this case the callable will be passed to the constructor of Model and a fresh Model instance is constructed - - A subclass of lmfit.Model - in whcih case it is instantiated. + - A subclass of lmfit.Model - in which case it is instantiated. - A Model instance - in which case no further action is necessary. """ if isinstance(model, string_types): # model is a string, so we;ll try importing it now @@ -85,7 +85,7 @@ def guesser_function(y_data,x=x_data,**kargs): return (param_1,param_2,....,pram_n) Similarly, the class provides a :py:meth:`_ModelDecorator.hinter` decorator which can be used to mark a - function as something that can generate prameter hints for the model. In this case the function should + function as something that can generate parameter hints for the model. In this case the function should take the form:: def hinter(**kwargs): @@ -163,7 +163,7 @@ def guesser(cls, func): @wraps(func) def guess_proxy(self, *args, **kargs): - """A magic proxy call around a function to guess initial prameters.""" + """A magic proxy call around a function to guess initial parameters.""" guesses = func(*args, **kargs) pars = {x: y for x, y in zip(self.param_names, guesses)} pars = self.make_params(**pars) @@ -183,7 +183,7 @@ def cfg_data_from_ini(inifile, filename=None, **kargs): Path to the ini file to be read. Keyword Arguments: - filename (strig,boolean or None): + filename (string,boolean or None): File to load that contains the data. **kargs: All other keywords are passed to the Data constructor @@ -196,7 +196,7 @@ def cfg_data_from_ini(inifile, filename=None, **kargs): - **filename (str or boolean):** optionally used if *filename* parameter is None. - **xcol (column index):** defines the x-column data for fitting. - **ycol (column index):** defines the y-column data for fitting. - - **yerr (column index):** Optional column with uncertainity values for the data + - **yerr (column index):** Optional column with uncertainty values for the data """ if SafeConfigParser is None: raise RuntimeError("Need to have ConfigParser module installed for this to work.") @@ -266,7 +266,7 @@ def cfg_model_from_ini(inifile, model=None, data=None): imported. - A callable object. In this case the callable will be passed to the constructor of Model and a fresh Model instance is constructed - - A subclass of lmfit.Model - in whcih case it is instantiated. + - A subclass of lmfit.Model - in which case it is instantiated. - A Model instance - in which case no further action is necessary. The returned model is configured with parameter hints for fitting with. The second return value is diff --git a/Stoner/analysis/fitting/models/generic.py b/Stoner/analysis/fitting/models/generic.py index 17dabde9a..45528d6bd 100755 --- a/Stoner/analysis/fitting/models/generic.py +++ b/Stoner/analysis/fitting/models/generic.py @@ -34,7 +34,7 @@ def quadratic(x, a, b, c): r"""Calculate a simple quadratic fitting function. Args: - x (aray): Input data + x (array): Input data a (float): Quadratic term co-efficient b (float): Linear term co-efficient c (float): Constant offset term @@ -58,7 +58,7 @@ def powerLaw(x, A, k): Args: x (array): Input data A (float): Prefactor - k (float): Pwoer + k (float): Power Return: Power law. @@ -95,7 +95,7 @@ def lorentzian_diff(x, A, sigma, mu): Args: x (array): x data - A (flaot): Peak amplitude + A (float): Peak amplitude sigma (float): peak wideth mu (float): peak location in x @@ -120,7 +120,7 @@ class Quadratic(_Quadratic): r"""A Simple quadratic fitting function. Args: - x (aray): Input data + x (array): Input data a (float): Quadratic term co-efficient b (float): Linear term co-efficient c (float): Constant offset term @@ -143,7 +143,7 @@ class PowerLaw(_PowerLaw): Args: x (array): Input data A (float): Prefactor - k (float): Pwoer + k (float): Power Return: Power law. @@ -200,7 +200,7 @@ class Lorentzian_diff(Model): Args: x (array): x data - A (flaot): Peak amplitude + A (float): Peak amplitude sigma (float): peak wideth mu (float): peak location in x diff --git a/Stoner/analysis/fitting/models/superconductivity.py b/Stoner/analysis/fitting/models/superconductivity.py index 913202fe9..28d96f355 100755 --- a/Stoner/analysis/fitting/models/superconductivity.py +++ b/Stoner/analysis/fitting/models/superconductivity.py @@ -160,7 +160,7 @@ def rsj_noiseless(I, Ic_p, Ic_n, Rn, V_offset): V_offset(float): Offset volage in measurement Returns: - (array) Calculated volatages + (array) Calculated voltages Notes: Impleemtns a simple form of the RSJ model for a Josephson Junction: @@ -194,7 +194,7 @@ def rsj_simple(I, Ic, Rn, V_offset): Returns: (array): - Calculated volatages + Calculated voltages Notes: Impleemtns a simple form of the RSJ model for a Josephson Junction: @@ -367,7 +367,7 @@ class RSJ_Noiseless(Model): V_offset(float): Offset volage in measurement Returns: - (array) Calculated volatages + (array) Calculated voltages Notes: Impleemtns a simple form of the RSJ model for a Josephson Junction: @@ -419,7 +419,7 @@ class RSJ_Simple(Model): V_offset(float): Offset volage in measurement Returns: - (array) Calculated volatages + (array) Calculated voltages Notes: Impleemtns a simple form of the RSJ model for a Josephson Junction: diff --git a/Stoner/analysis/fitting/models/thermal.py b/Stoner/analysis/fitting/models/thermal.py index 4b58e9a4a..44b070395 100755 --- a/Stoner/analysis/fitting/models/thermal.py +++ b/Stoner/analysis/fitting/models/thermal.py @@ -25,7 +25,7 @@ def arrhenius(x, A, DE): - r"""Arrhenius Equation without T dependendent prefactor. + r"""Arrhenius Equation without T dependent prefactor. Args: x (array): temperatyre data in K @@ -49,7 +49,7 @@ def arrhenius(x, A, DE): def nDimArrhenius(x, A, DE, n): - r"""Arrhenius Equation without T dependendent prefactor for various dimensions. + r"""Arrhenius Equation without T dependent prefactor for various dimensions. Args: x (array): temperatyre data in K @@ -95,19 +95,19 @@ def modArrhenius(x, A, DE, n): def vftEquation(x, A, DE, x_0): - r"""Vogel-Flucher-Tammann (VFT) Equation without T dependendent prefactor. + r"""Vogel-Flucher-Tammann (VFT) Equation without T dependent prefactor. Args: x (float): Temperature in K A (float): Prefactror (not temperature dependent) DE (float): Energy barrier in eV - x_0 (float): Offset temeprature in K + x_0 (float): Offset temperature in K Return: Rates according the VFT equation. The VFT equation is defined as as :math:`\tau = A\exp\left(\frac{DE}{x-x_0}\right)` and represents - a modifed form of the Arrenhius distribution with a freezing point of :math:`x_0`. + a modified form of the Arrenhius distribution with a freezing point of :math:`x_0`. Example: .. plot:: samples/Fitting/vftEquation.py @@ -122,7 +122,7 @@ def vftEquation(x, A, DE, x_0): class Arrhenius(Model): - r"""Arrhenius Equation without T dependendent prefactor. + r"""Arrhenius Equation without T dependent prefactor. Args: x (array): temperatyre data in K @@ -159,7 +159,7 @@ def guess(self, data, x=None, **kwargs): class NDimArrhenius(Model): - r"""Arrhenius Equation without T dependendent prefactor for various dimensions. + r"""Arrhenius Equation without T dependent prefactor for various dimensions. Args: x (array): temperatyre data in K @@ -235,19 +235,19 @@ def guess(self, data, x=None, **kwargs): class VFTEquation(Model): - r"""Vogel-Flucher-Tammann (VFT) Equation without T dependendent prefactor. + r"""Vogel-Flucher-Tammann (VFT) Equation without T dependent prefactor. Args: x (array): Temperature in K A (float): Prefactror (not temperature dependent) DE (float): Energy barrier in eV - x_0 (float): Offset temeprature in K + x_0 (float): Offset temperature in K Return: Rates according the VFT equation. The VFT equation is defined as as :math:`\tau = A\exp\left(\frac{DE}{x-x_0}\right)` and represents - a modifed form of the Arrenhius distribution with a freezing point of :math:`x_0`. + a modified form of the Arrenhius distribution with a freezing point of :math:`x_0`. See :py:func:`vftEquation` for an example. diff --git a/Stoner/analysis/utils.py b/Stoner/analysis/utils.py index ff37f5454..5f7ae1add 100755 --- a/Stoner/analysis/utils.py +++ b/Stoner/analysis/utils.py @@ -146,7 +146,7 @@ def _twoD_fit(xy1, xy2, xmode="linear", ymode="linear", m0=None): represented as a 2 x 3 matrix of coordinates. The *xmode* and *ymode* parameters control the possible operations to align the data in x and y directions, in addition to which the *xmode* parameter can take the value 'affine' which allows a full affine transformation. The returned values are the affine transformation matrix, the - uncertainities in this and a function to map co-ordinates with the optimal affine transformation. + uncertainties in this and a function to map coordinates with the optimal affine transformation. Note: *m0* combines both giving an initial value and fixed values for the transformation. If *m0* is set, then it @@ -155,9 +155,7 @@ def _twoD_fit(xy1, xy2, xmode="linear", ymode="linear", m0=None): fixed, however, no scaling is done at all. """ if xy1.shape != xy2.shape or xy1.shape[1] != 2: - raise RuntimeError( - f"co-ordinate arrays must be equal length with two columns, not {xy1.shape} and {xy2.shape}" - ) + raise RuntimeError(f"coordinate arrays must be equal length with two columns, not {xy1.shape} and {xy2.shape}") xvarp = { "affine": [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]], "linear": [[0, 0], [0, 2]], @@ -236,7 +234,7 @@ def ApplyAffineTransform(xy, transform): Returns: (n x 2 array). - Transformed co-ordinates. + Transformed coordinates. """ xyt = np.row_stack((xy.T, np.ones(len(xy)))) xyt = np.dot(transform, xyt) @@ -244,13 +242,13 @@ def ApplyAffineTransform(xy, transform): def GetAffineTransform(p, pd): - """Calculate an affine transofrm from 2 sets of three points. + """Calculate an affine transform from 2 sets of three points. Args: p (3x2 array): Coordinates of points to transform from. pd (3x2 array): - Cooridinates of points to transform to. + Coordinates of points to transform to. Returns: (2x3 array): diff --git a/Stoner/compat.py b/Stoner/compat.py index 912f1ac2c..e9bdcfc66 100755 --- a/Stoner/compat.py +++ b/Stoner/compat.py @@ -114,7 +114,7 @@ def bytes2str(data): def get_filedialog(what="file", **opts): - """Wrap around Tk file dialog to mange creating file dialogs in a cross platform way. + """Wrap around Tk file dialog to manage creating file dialogs in a cross platform way. Args: what (str): What sort of a dialog to create - options are 'file','directory','save','files' diff --git a/Stoner/core/array.py b/Stoner/core/array.py index 790fba3ae..3ec0f1bb8 100755 --- a/Stoner/core/array.py +++ b/Stoner/core/array.py @@ -31,16 +31,16 @@ class DataArray(ma.MaskedArray): When a column is declared to contain *x*, *y*, or *z* data, then these attributes access the corresponding columns. When written to, the attributes overwrite the existing column's data. d,e,f (1D DataArray): - Where a column is identified as containing uncertainities for *x*, *y* or *z* data, then these attributes + Where a column is identified as containing uncertainties for *x*, *y* or *z* data, then these attributes provide a quick access to them. When written to, the attributes overwrite the existing column's data. u,v,w (1D DataArray): Columns may be identieid as containing vectgor field information. These attributes provide quick - access to them, assuming that they are defined as cartesian co-ordinates. When written to, the attributes + access to them, assuming that they are defined as cartesian coordinates. When written to, the attributes overwrite the existing column's data. p,q,r (1D DataArray): These attributes access calculated columns that convert :math:`(x,y,z)` data or :math:`(u,v,w)` - into :math:`(\phi,\theta,r)` polar co-ordinates. If on *x* and *y* columns are defined, then 2D polar - co-ordinates are returned for *q* and *r*. + into :math:`(\phi,\theta,r)` polar coordinates. If on *x* and *y* columns are defined, then 2D polar + coordinates are returned for *q* and *r*. setas (list or string): Actually a proxy to a magic class that handles the assignment of columns to different axes and also tracks the names of columns (so that columns may be accessed as named items). @@ -144,7 +144,7 @@ def isrow(self, value): @property def r(self): - r"""Calculate the radius :math:`\rho` co-ordinate if using spherical or polar co-ordinate systems.""" + r"""Calculate the radius :math:`\rho` coordinate if using spherical or polar coordinate systems.""" axes = int(self._setas.cols["axes"]) m = [ lambda d: None, @@ -165,7 +165,7 @@ def r(self): @property def q(self): - r"""Calculate the azimuthal :math:`\theta` co-ordinate if using spherical or polar co-ordinates.""" + r"""Calculate the azimuthal :math:`\theta` coordinate if using spherical or polar coordinates.""" axes = int(self._setas.cols["axes"]) m = [ lambda d: None, @@ -186,7 +186,7 @@ def q(self): @property def p(self): - r"""Calculate the inclination :math:`\phi` co-ordinate for spherical co-ordinate systems.""" + r"""Calculate the inclination :math:`\phi` coordinate for spherical coordinate systems.""" axes = int(self._setas.cols["axes"]) m = [ lambda d: None, @@ -226,7 +226,7 @@ def i(self, value): self._ibase = np.array([value]) elif self.ndim >= 1: r = self.shape[0] - if isiterable(value) and len(value) == r: # Iterable and the correct length - assing straight + if isiterable(value) and len(value) == r: # Iterable and the correct length - assign straight self._ibase = np.array(value) elif isiterable(value) and len(value) > 0: # Iterable but not the correct length - count from min of value self._ibase = np.arange(min(value), min(value) + r) @@ -265,7 +265,7 @@ def setas(self, value): setas(value) # ============================================================================================================== - ############################ Speical Methods #################################################### + ############################ Special Methods #################################################### # ============================================================================================================== def __reduce__(self): @@ -341,12 +341,12 @@ def __getitem__(self, ix): plus the special operations where one columns are named. Warning: - Teh code almost certainly makes some assumptiuons that DataArray is one or 2D and + The code almost certainly makes some assumptiuons that DataArray is one or 2D and may blow up with 3D arrays ! On the other hand it has a special case exception for where you give a string as the first index element and assumes that you've forgotten that we're row major and tries to do the right thing. """ - # Is this goign to be a single row ? + # Is this going to be a single row ? single_row = isinstance(ix, int_types) or ( isinstance(ix, tuple) and len(ix) > 0 and isinstance(ix[0], int_types) ) @@ -387,7 +387,7 @@ def __getitem__(self, ix): if isinstance(ret, ma.MaskedArray): ret = ma.filled(ret) return ret.dtype.type(ret) - if not isinstance(ret, np.ndarray): # bugout for scalar resturns + if not isinstance(ret, np.ndarray): # bugout for scalar returns return ret if ret.ndim >= 2: # Potentially 2D array here if ix[-1] is None: # Special case for increasing an array dimension diff --git a/Stoner/core/base.py b/Stoner/core/base.py index 1fbc33efc..17a212081 100755 --- a/Stoner/core/base.py +++ b/Stoner/core/base.py @@ -54,7 +54,7 @@ def _parse_date(string: str) -> datetime.datetime: def literal_eval(string: str) -> Any: - """Use the asteval module to interpret arbitary strings slightly safely. + """Use the asteval module to interpret arbitrary strings slightly safely. Args: string (str): @@ -84,7 +84,7 @@ def string_to_type(value: String_Types) -> Any: First of all the first character is checked to see if it is a [ or { which would suggest this is a list of dictionary. If the value looks like a common boolean value (i.e. Yes, No, True, Fale, On, Off) then it is assumed to be a boolean value. - Fianlly it interpretation as an int, float or string is tried. + Finally it interpretation as an int, float or string is tried. Args: value (string): @@ -140,7 +140,7 @@ def __lookup__( Keyword Arguments: multiple (bool): - Return a singl entry ()default, False) or multiple entries + Return a single entry ()default, False) or multiple entries exact(bool): Do not do a regular expression search, match the exact string only. @@ -179,7 +179,7 @@ def __lookup__( if ret is None or isiterable(ret) and not ret: raise KeyError(f"{name} is not a match to any key.") - if multiple: # sort out returing multiple entries or not + if multiple: # sort out returning multiple entries or not if not isinstance(ret, list): ret = [ret] else: @@ -206,7 +206,7 @@ def __delitem__(self, name: Any) -> None: super().__delitem__(self.__lookup__(name)) def __contains__(self, name: Any) -> bool: - """Return True if name either is an exact key or matches when interpreted as a regular experssion.""" + """Return True if name either is an exact key or matches when interpreted as a regular expression.""" try: name = self.__lookup__(name) return True @@ -266,7 +266,7 @@ class typeHintedDict(regexpDict): The CM Physics Group at Leeds makes use of a standard file format that closely matches the :py:class:`DataFile` data structure. However, it is convenient for this file format to be ASCII text for ease of use with other programs. In order to represent metadata which - can have arbitary types, the LabVIEW code that generates the data file from our measurements + can have arbitrary types, the LabVIEW code that generates the data file from our measurements adds a type hint string. The Stoner Python code can then make use of this type hinting to choose the correct representation for the metadata. The type hinting information is retained so that files output from Python will retain type hints to permit them to be loaded into @@ -278,7 +278,7 @@ class typeHintedDict(regexpDict): __regexGetType (re): Used to extract the type hint from a string __regexSignedInt (re): - matches type hint strings for signed intergers + matches type hint strings for signed integers __regexUnsignedInt (re): matches the type hint string for unsigned integers __regexFloat (re): @@ -361,7 +361,7 @@ def __init__(self, *args: Any, **kargs: Any) -> None: """ self._typehints = sorteddict() super().__init__(*args, **kargs) - for key in list(self.keys()): # Chekc through all the keys and see if they contain + for key in list(self.keys()): # Check through all the keys and see if they contain # type hints. If they do, move them to the # _typehint dict value = super().__getitem__(key) @@ -370,7 +370,7 @@ def __init__(self, *args: Any, **kargs: Any) -> None: @property def types(self) -> Dict: - """Return the dictrionary of value types.""" + """Return the dictionary of value types.""" return self._typehints def findtype(self, value: Any) -> str: @@ -695,7 +695,7 @@ def __init__(self, *args: Any, **kargs: Any) -> None: # pylint: disable=unused- @property def _public_attrs(self): - """Return a dictionary of attributes setable by keyword argument with thier types.""" + """Return a dictionary of attributes setable by keyword argument with their types.""" try: return self._public_attrs_real # pylint: disable=no-member except AttributeError: @@ -704,7 +704,7 @@ def _public_attrs(self): @_public_attrs.setter def _public_attrs(self, value): - """Privaye property to update the list of public attributes.""" + """Private property to update the list of public attributes.""" self._public_attrs_real.update(dict(value)) # pylint: disable=no-member @property diff --git a/Stoner/core/data.py b/Stoner/core/data.py index aa1056ce0..2ac088ab7 100755 --- a/Stoner/core/data.py +++ b/Stoner/core/data.py @@ -39,7 +39,7 @@ def format(self, key, **kargs): r"""Return the contents of key pretty formatted using :py:func:`format_error`. Args: - fmt (str): Specify the output format, opyions are: + fmt (str): Specify the output format, options are: * "text" - plain text output * "latex" - latex output @@ -51,7 +51,7 @@ def format(self, key, **kargs): mode (string): If "float" (default) the number is formatted as is, if "eng" the value and error is converted to the next samllest power of 1000 and the appropriate SI index appended. If mode is "sci" then a - scientifc, i.e. mantissa and exponent format is used. + scientific, i.e. mantissa and exponent format is used. units (string): A suffix providing the units of the value. If si mode is used, then appropriate si prefixes are prepended to the units string. In LaTeX mode, the units string is embedded in \mathrm diff --git a/Stoner/core/exceptions.py b/Stoner/core/exceptions.py index 999afe86f..47d72dab6 100755 --- a/Stoner/core/exceptions.py +++ b/Stoner/core/exceptions.py @@ -8,7 +8,7 @@ class StonerLoadError(Exception): """An exception thrown by the file loading routines in the Stoner Package. This special exception is thrown when one of the subclasses of :py:class:`Stoner.Core.DataFile` - attmpts and then fails to load some data from disk. Generally speaking this is not a real + attempts and then fails to load some data from disk. Generally speaking this is not a real error, but simply indicates that the file format is not recognised by that particular subclass, and thus another subclass should have a go instead. """ diff --git a/Stoner/core/interfaces.py b/Stoner/core/interfaces.py index 03516f45c..52e3299d4 100755 --- a/Stoner/core/interfaces.py +++ b/Stoner/core/interfaces.py @@ -54,7 +54,7 @@ def __getitem__(self, name): - If name is a string then the metadata dictionary item with the correspondoing key will be returned. - If name is a numpy array then the corresponding rows of the data are returned. - - If a tuple is supplied as the arguement then there are a number of possible behaviours. + - If a tuple is supplied as the argument then there are a number of possible behaviours. - If the first element of the tuple is a string, then it is assumed that it is the nth element of the named metadata is required. - Otherwise itis assumed that it is a particular element within a column determined by the second diff --git a/Stoner/core/methods.py b/Stoner/core/methods.py index 4b9d8eb5f..a575bd4e9 100755 --- a/Stoner/core/methods.py +++ b/Stoner/core/methods.py @@ -97,7 +97,7 @@ def column(self, col): def find_col(self, col, force_list=False): """Indexes the column headers in order to locate a column of data.shape. - Indexing can be by supplying an integer, a string, a regular experssion, a slice or a list of any of the above. + Indexing can be by supplying an integer, a string, a regular expression, a slice or a list of any of the above. - Integer indices are simply checked to ensure that they are in range - String indices are first checked for an exact match against a column header @@ -156,9 +156,9 @@ def find_duplicates(self, xcol=None, delta=1e-8): row = np.atleast_1d(search_data[ix]) if tuple(row) in results: continue - for iy, (value, delt) in enumerate(zip(row, delta)): + for iy, (value, dealt) in enumerate(zip(row, delta)): # Modify all search data that is close to the current row - search_data[np.isclose(search_data[:, iy], value, atol=delt), iy] = value + search_data[np.isclose(search_data[:, iy], value, atol=dealt), iy] = value matches = np.arange(search_data.shape[0])[np.all(search_data == row, axis=1)] results[tuple(row)] = matches.tolist() return results @@ -287,7 +287,7 @@ def search(self, xcol=None, value=None, columns=None, accuracy=0.0): Uncertainty to accept when testing equalities Returns: - ndarray: numpy array of matching rows or column values depending on the arguements. + ndarray: numpy array of matching rows or column values depending on the arguments. Note: The value is interpreted as follows: @@ -314,7 +314,7 @@ def search(self, xcol=None, value=None, columns=None, accuracy=0.0): return data def section(self, **kargs): - """Assuming data has x,y or x,y,z co-ordinates, return data from a section of the parameter space. + """Assuming data has x,y or x,y,z coordinates, return data from a section of the parameter space. Keyword Arguments: x (float, tuple, list or callable): @@ -324,7 +324,7 @@ def section(self, **kargs): z (float, tuple,list or callable): z values ,atch this condition are included inth e section r (callable): a - function that takes a tuple (x,y,z) and returns True if the line is to be incluided in section + function that takes a tuple (x,y,z) and returns True if the line is to be included in section Returns: (DataFile): @@ -334,7 +334,7 @@ def section(self, **kargs): Internally this function is calling :py:meth:`DataFile.search` to pull out matching sections of the data array. To extract a 2D section of the parameter space orthogonal to one axis you just specify a condition on that axis. Specifying conditions on two axes will return a line of points along the third axis. The final - keyword parameter allows you to select data points that lie in an arbitary plane or line. eg:: + keyword parameter allows you to select data points that lie in an arbitrary plane or line. eg:: d.section(r=lambda x,y,z:abs(2+3*x-2*y)<0.1 and z==2) @@ -371,12 +371,12 @@ def select(self, *args, **kargs): A single positional argument if present is interpreted as follows: - If a callable function is given, the entire row is presented to it. If it evaluates True then that - row is selected. This allows arbitary select operations + row is selected. This allows arbitrary select operations - If a dict is given, then it and the kargs dictionary are merged and used to select the rows Keyword Arguments: kargs (various): - Arbitary keyword arguments are interpreted as requestion matches against the corresponding + Arbitrary keyword arguments are interpreted as requestion matches against the corresponding columns. The keyword argument may have an additional *__operator** appended to it which is interpreted as follows: @@ -386,7 +386,7 @@ def select(self, *args, **kargs): - *lt* value doe less than argument value - *ge* value doe greater than or equal to argument value - *le* value doe less than or equal to argument value - - *between* value lies beween the minimum and maximum values of the arguement (the default test + - *between* value lies between the minimum and maximum values of the argument (the default test for 2-length tuple arguments) - *ibetween*,*ilbetween*,*iubetween* as above but include both,lower or upper values @@ -503,7 +503,7 @@ def sort(self, *order, **kargs): return self def split(self, *args, final="files"): - """Recursively splits the current DataFile into a :py:class:`Stoner.Forlders.DataFolder`. + """Recursively splits the current DataFile into a :py:class:`Stoner.Folders.DataFolder`. Args: *args (column index or function): diff --git a/Stoner/core/property.py b/Stoner/core/property.py index d90cd09e5..a11f99134 100755 --- a/Stoner/core/property.py +++ b/Stoner/core/property.py @@ -28,7 +28,7 @@ class DataFilePropertyMixin: - """Provide the proerties for DataFile Like Objects.""" + """Provide the properties for DataFile Like Objects.""" _subclasses = None @@ -38,7 +38,7 @@ def _repr_html_(self): Raises: AttributeError: - If short representation options are selcted, raise an AttributeError. + If short representation options are selected, raise an AttributeError. Returns: str: diff --git a/Stoner/core/setas.py b/Stoner/core/setas.py index b25feec00..aef7acaea 100755 --- a/Stoner/core/setas.py +++ b/Stoner/core/setas.py @@ -206,7 +206,7 @@ def column_headers(self): @column_headers.setter def column_headers(self, value): - """Set the colum headers.""" + """Set the column headers.""" if isinstance(value, np.ndarray): # Convert ndarray to list of strings value = value.astype(str).tolist() elif isinstance(value, string_types): # Bare strings get turned into lists @@ -463,7 +463,7 @@ def __repr__(self): return self.setas.__repr__() def __str__(self): - """Our string representation is just fromed by joing the assingments together.""" + """Our string representation is just formed by joing the assignments together.""" # Quick string conversion routine return "".join(self.setas) @@ -560,7 +560,7 @@ def __isub__(self, other): def find_col(self, col, force_list=False): """Indexes the column headers in order to locate a column of data.shape. - Indexing can be by supplying an integer, a string, a regular experssion, a slice or a list of any of the above. + Indexing can be by supplying an integer, a string, a regular expression, a slice or a list of any of the above. - Integer indices are simply checked to ensure that they are in range - String indices are first checked for an exact match against a column header @@ -584,7 +584,7 @@ def find_col(self, col, force_list=False): """ if isinstance(col, int_types): # col is an int so pass on if col >= len(self.column_headers): - raise IndexError(f"Attempting to index a non - existant column {col}") + raise IndexError(f"Attempting to index a non - existent column {col}") if col < 0: col = col % len(self.column_headers) elif isinstance(col, string_types): # Ok we have a string @@ -624,7 +624,7 @@ def find_col(self, col, force_list=False): return col def clear(self): - """Clear the current setas attrbute. + """Clear the current setas attribute. Notes: Equivalent to doing :py:meth:`setas.unset` with no argument. @@ -641,7 +641,7 @@ def get(self, key, default=None): # pylint: disable=arguments-differ raise KeyError(f"{key} is not in setas and no default was given.") from err def keys(self): - """Acess mapping keys. + """Access mapping keys. Mapping keys are the same as iterating over the unique headers""" for c in self._unique_headers: diff --git a/Stoner/folders/__init__.py b/Stoner/folders/__init__.py index 7a0e01b59..65443185e 100755 --- a/Stoner/folders/__init__.py +++ b/Stoner/folders/__init__.py @@ -1,4 +1,4 @@ -"""Core support for wokring with collections of files in the :py:class:`Stoner.DataFolder`.""" +"""Core support for working with collections of files in the :py:class:`Stoner.DataFolder`.""" __all__ = ["core", "each", "groups", "metadata", "mixins", "utils", "DataFolder", "PlotFolder"] diff --git a/Stoner/folders/core.py b/Stoner/folders/core.py index 9c4a0fe06..3e6253cbe 100755 --- a/Stoner/folders/core.py +++ b/Stoner/folders/core.py @@ -68,7 +68,7 @@ def _div_core_(result, other): def _sub_core_(result, other): - """Implemenet the core logic of the subtraction operator. + """Implement the core logic of the subtraction operator. Note: We're in the base class here, so we don't call super() if we can't handle this, then we're stuffed! @@ -168,7 +168,7 @@ def _build_select_function(kargs, arg): if isinstance(kargs[arg], tuple) and len(kargs[arg] == 2): op = "between" # Assume two length tuples are testing for range elif not isinstance(kargs[arg], string_types) and isiterable(kargs[arg]): - op = "in" # Assume other iterables are testing for memebership + op = "in" # Assume other iterables are testing for membership else: # Everything else is exact matches op = "eq" func = operator[op] @@ -195,7 +195,7 @@ class baseFolder(MutableSequence): depth (int): The maximum number of levels of nested groups in the folder files (list of str or metadataObject): - The indivdual objects or their names if they are not loaded + The individual objects or their names if they are not loaded instance (metadataObject): An empty instance of the data type stored in the folder loaded (generator of (str name, metadataObject value): @@ -996,7 +996,7 @@ def _marshall(self, layout=None, data=None): Returns: (list or self): - If *layout* is defined then returns a copy of the baseFolder with the entires moved around as + If *layout* is defined then returns a copy of the baseFolder with the entries moved around as defined in the *layout*. If *layout* is None, then moves the contents into a flat list. """ if layout is None: @@ -1044,7 +1044,7 @@ def __walk_groups(self, walker, **kargs): replace_terminal (bool): If group is True and the walker function returns an instance of metadataObject then the return value is appended to the files and the group is removed from the current objectFolder. This will unwind - the group heirarchy by one level. + the group hierarchy by one level. only_terminal (bool): Only iterate over the files in the group if the group has no sub-groups. walker_args (dict): @@ -1168,7 +1168,7 @@ def count(self, value): # pylint: disable=arguments-differ def fetch(self): """Preload the contents of the baseFolder. - In the base class this is a NOP becuase the objects are all in memory anyway. + In the base class this is a NOP because the objects are all in memory anyway. """ return self @@ -1233,7 +1233,7 @@ def filter( Keyword Arguments: invert (bool): - Invert the sense of the filter (done by doing an XOR whith the filter condition + Invert the sense of the filter (done by doing an XOR with the filter condition copy (bool): If set True then the :py:class:`DataFolder` is copied before being filtered. \Default is False - work in place. @@ -1502,7 +1502,7 @@ def select(self, *args, **kargs): A single positional argument if present is interpreted as follows: * If a callable function is given, the entire metadataObject is presented to it. - If it evaluates True then that metadataObject is selected. This allows arbitary select operations + If it evaluates True then that metadataObject is selected. This allows arbitrary select operations * If a dict is given, then it and the kargs dictionary are merged and used to select the metadataObjects @@ -1510,7 +1510,7 @@ def select(self, *args, **kargs): recurse (bool): Also recursively slect through the sub groups kargs (varuous): - Arbitary keyword arguments are interpreted as requestion matches against the corresponding + Arbitrary keyword arguments are interpreted as requestion matches against the corresponding metadata values. The keyword argument may have an additional **__operator** appended to it which is interpreted as follows: @@ -1526,7 +1526,7 @@ def select(self, *args, **kargs): - *startswith* metadata value startswith argument value - *endswith* metadata value endwith argument value - *icontains*,*iin*, *istartswith*,*iendswith* as above but case insensitive - - *between* metadata value lies beween the minimum and maximum values of the arguement + - *between* metadata value lies between the minimum and maximum values of the argument (the default test for 2-length tuple arguments) - *ibetween*,*ilbetween*,*iubetween* as above but include both,lower or upper values @@ -1651,7 +1651,7 @@ def sort(self, key=None, reverse=False, recurse=True): Keyword Arguments: key (string, callable or None): Either a string or a callable function. If a string then this is interpreted as a - metadata key, if callable then it is assumed that this is a a function of one paramater x + metadata key, if callable then it is assumed that this is a a function of one parameter x that is a :py:class:`Stoner.Core.metadataObject` object and that returns a key value. If key is not specified (default), then a sort is performed on the filename reverse (bool): @@ -1736,7 +1736,7 @@ def values(self): return self.groups.values() def walk_groups(self, walker, **kargs): - """Walk through a heirarchy of groups and calls walker for each file. + """Walk through a hierarchy of groups and calls walker for each file. Args: walker (callable): @@ -1749,7 +1749,7 @@ def walk_groups(self, walker, **kargs): replace_terminal (bool): If group is True and the walker function returns an instance of metadataObject then the return value is appended to the files and the group is removed from the current objectFolder. This will unwind - the group heirarchy by one level. + the group hierarchy by one level. obly_terminal(bool): Only execute the walker function on groups that have no sub-groups inside them (i.e. are terminal groups) diff --git a/Stoner/folders/each.py b/Stoner/folders/each.py index 3bcea22a7..a7c158262 100755 --- a/Stoner/folders/each.py +++ b/Stoner/folders/each.py @@ -12,7 +12,7 @@ def _worker(d, **kwargs): - """Support function to run an arbitary function over a :py:class:`Stoner.Data` object.""" + """Support function to run an arbitrary function over a :py:class:`Stoner.Data` object.""" byname = kwargs.get("byname", False) func = kwargs.get("func", lambda x: x) if byname: @@ -27,7 +27,7 @@ def _worker(d, **kwargs): try: if byname: # Ut's an instance bound moethod ret = func(*args, **kargs) - else: # It's an arbitary function + else: # It's an arbitrary function ret = func(d, *args, **kargs) except Exception as e: # pylint: disable=W0703 # Ok to be broad as user func could do anything ret = e, format_exc() @@ -111,7 +111,7 @@ class Item: """Provides a proxy object for accessing methods on the inividual members of a Folder. Notes: - The pupose of this class is to allow it to be explicit that we're calling methods + The purpose of this class is to allow it to be explicit that we're calling methods on the members of the folder rather than a collective method. This allows us to work around nameclashes. """ @@ -153,7 +153,7 @@ def __call__(self, func, *args, **kargs): Notes: If *_return* is None and the return type of *func* is the same type as the :py:class:`baseFolder` is - storing, then the return value replaces trhe original :py:class:`Stoner.Core.metadataobject` in the + storing, then the return value replaces the original :py:class:`Stoner.Core.metadataobject` in the :py:class:`baseFolder`. If *_result* is True the return value is added to the :py:class:`Stoner.Core.metadataObject`'s metadata under the name of the function. If *_result* is a string. then return result is stored in the corresponding name. @@ -249,7 +249,7 @@ def __getattr__(self, name): def __setattr__(self, name, value): """Proxy call to set an attribute. - Setting the attrbute on .each sets it on all instantiated objects and in _object_attrs. + Setting the attribute on .each sets it on all instantiated objects and in _object_attrs. Args: name(str): Attribute to set @@ -288,7 +288,7 @@ def __getattr_proxy(self, item): item (string): Name of method of metadataObject class to be called Returns: - Either a modifed copy of this objectFolder or a list of return values + Either a modified copy of this objectFolder or a list of return values from evaluating the method for each file in the Folder. """ meth = getattr(self._folder.instance, item, None) @@ -350,7 +350,7 @@ def iter(self, func, *args, **kargs): Notes: If *_return* is None and the return type of *func* is the same type as the :py:class:`baseFolder` is - storing, then the return value replaces trhe original :py:class:`Stoner.Core.metadataobject` in the + storing, then the return value replaces the original :py:class:`Stoner.Core.metadataobject` in the :py:class:`baseFolder`. If *_result* is True the return value is added to the :py:class:`Stoner.Core.metadataObject`'s metadata under the name of the function. If *_result* is a string. then return result is stored in the corresponding name. diff --git a/Stoner/folders/groups.py b/Stoner/folders/groups.py index 8a0b999e9..2541aa7df 100755 --- a/Stoner/folders/groups.py +++ b/Stoner/folders/groups.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -"""Provides the classes and support functions for the :py:attr:`Stoner.DataFolder.grups` magic attribute.""" +"""Provides the classes and support functions for the :py:attr:`Stoner.DataFolder.groups` magic attribute.""" __all__ = ["GroupsDict"] @@ -47,7 +47,7 @@ def compress(self, base=None, key=".", keep_terminal=False): base.groups[nk].__deleter__(f) if len(base.groups[nk]) == 0 and len(base.groups[nk].groups) == 0: del base.groups[nk] - self.base.grups = self + self.base.groups = self return self.base def keep(self, name): @@ -74,7 +74,7 @@ def keep(self, name): g.groups.keep(name) if not len(g.groups): del self[grp] - self.base.grups = self + self.base.groups = self return self.base def prune(self, name=None): @@ -92,5 +92,5 @@ def prune(self, name=None): del self[grp] elif not len(g) and not len(g.groups): del self[grp] - self.base.grups = self + self.base.groups = self return self.base diff --git a/Stoner/folders/metadata.py b/Stoner/folders/metadata.py index 2f6f4885e..8834a4e03 100755 --- a/Stoner/folders/metadata.py +++ b/Stoner/folders/metadata.py @@ -184,14 +184,14 @@ def __len__(self): return len(self.common_keys) def __repr__(self): - """Give an informative dispaly of the metadata represenation.""" + """Give an informative display of the metadata representation.""" return ( f"The {type(self._folder).__name__} {self._folder.key} has" + f" {len(self)} common keys of metadata in {len(self._folder)} {self._folder.type.__name__} objects" ) def __delitem__(self, item): - """Attempt to delte item from all members of the folder.""" + """Attempt to delete item from all members of the folder.""" ok = False for entry in self._folder: try: diff --git a/Stoner/folders/mixins.py b/Stoner/folders/mixins.py index 7c5b3ac4f..66f8b3600 100755 --- a/Stoner/folders/mixins.py +++ b/Stoner/folders/mixins.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -"""mixin calsses for :py:class:`Stoner.folders.core.baseFoler`.""" +"""mixin classes for :py:class:`Stoner.folders.core.baseFoler`.""" from __future__ import division __all__ = ["DiskBasedFolderMixin", "DataMethodsMixin", "PlotMethodsMixin"] @@ -58,11 +58,11 @@ def _loader(name, loader=None, typ=None, directory=None): class DiskBasedFolderMixin: - """A Mixin class that implmenets reading metadataObjects from disc. + """A Mixin class that implements reading metadataObjects from disc. Attributes: type (:py:class:`Stoner.Core.metadataObject`): - the type ob object to sotre in the folder (defaults to :py:class:`Stoner.Core.Data`) + the type ob object to store in the folder (defaults to :py:class:`Stoner.Core.Data`) extra_args (dict): Extra arguments to use when instantiatoing the contents of the folder from a file on disk. pattern (str or regexp): @@ -73,13 +73,13 @@ class DiskBasedFolderMixin: A filename globbing pattern that matches files to exclude from the folder. Default is *.tdms_index to exclude all tdms index files. read_means (bool): - If true, additional metatdata keys are added that return the mean value of each column of the data. + If true, additional metadata keys are added that return the mean value of each column of the data. This can hep in grouping files where one column of data contains a constant value for the experimental state. Default is False recursive (bool): - Specifies whether to search recurisvely in a whole directory tree. Default is True. + Specifies whether to search recursively in a whole directory tree. Default is True. flatten (bool): - Specify where to present subdirectories as spearate groups in the folder (False) or as a single group + Specify where to present subdirectories as separate groups in the folder (False) or as a single group (True). Default is False. The :py:meth:`DiskBasedFolderMixin.flatten` method has the equivalent effect and :py:meth:`DiskBasedFolderMixin.unflatten` reverses it. discard_earlier (bool): @@ -211,7 +211,7 @@ def _save(self, grp, trail, root=None): return grp.filename def __lookup__(self, name): - """Addional logic for the looking up names.""" + """Additional logic for the looking up names.""" if isinstance(name, string_types): if list(self.basenames).count(name) == 1: return self.__names__()[list(self.basenames).index(name)] @@ -227,7 +227,7 @@ def __getter__(self, name, instantiate=True): the baseFolder class uses a :py:class:`regexpDict` to store objects in. Keyword Arguments: - instatiate (bool): + instantiate (bool): If True (default) then always return a :py:class:`Stoner.Core.Data` object. If False, the __getter__ method may return a key that can be used by it later to actually get the :py:class:`Stoner.Core.Data` object. @@ -395,7 +395,7 @@ def getlist(self, **kargs): def keep_latest(self): """Filter out earlier revisions of files with the same name. - The CM group LabVIEW software will avoid overwirting files when measuring by inserting !#### where #### is an + The CM group LabVIEW software will avoid overwriting files when measuring by inserting !#### where #### is an integer revision number just before the filename extension. This method will look for instances of several files which differ in name only by the presence of the revision number and will kepp only the highest revision number. This is useful if several measurements of the same experiment have been carried out, but only the @@ -442,7 +442,7 @@ def save(self, root=None): Args: root (string): The root directory to start creating files and subdirectories under. If set to None or not specified, - the current folder's diretory attribute will be used. + the current folder's directory attribute will be used. Returns: A list of the saved files @@ -454,7 +454,7 @@ def unload(self, name=None): Args: name(string,int or None): - Specifies the entry to unload from memeory. If set to None all loaded entries are unloaded. + Specifies the entry to unload from memory. If set to None all loaded entries are unloaded. Returns: (DataFolder): returns a copy of itself. @@ -470,16 +470,16 @@ def unload(self, name=None): class DataMethodsMixin: - """Methods for wokring with :py:class:`Stner.Data` in py:class:`Stoner.DataFolder`s.""" + """Methods for working with :py:class:`Stner.Data` in py:class:`Stoner.DataFolder`s.""" def concatenate(self, sort=None, reverse=False): - """Concatentates all the files in a objectFolder into a single metadataObject like object. + """Concatenates all the files in a objectFolder into a single metadataObject like object. Keyword Arguments: sort (column index, None or bool, or clallable function): Sort the resultant metadataObject by this column (if a column index), or by the *x* column if None or True, or not at all if False. *sort* is passed directly to the eponymous method as the - *order* paramter. + *order* parameter. reverse (bool): Reverse the order of the sort (defaults to False) @@ -651,7 +651,7 @@ class PlotMethodsMixin: _defaults = {"plots_per_page": 12, "fig_defaults": {"figsize": (8, 6)}} def figure(self, *args, **kargs): - """Pass through for :py:func:`matplotlib.pyplot.figure` but alos takes a note of the arguments for later.""" + """Pass through for :py:func:`matplotlib.pyplot.figure` but also takes a note of the arguments for later.""" self._fig_args = args self._fig_kargs = getattr(self, "fig_defaults", {}) @@ -743,7 +743,7 @@ class DataFolder(DataMethodsMixin, DiskBasedFolderMixin, baseFolder): """Provide an interface to manipulating lots of data files stored within a directory structure on disc. - By default, the members of the DataFolder are isntances of :class:`Stoner.Data`. The DataFolder emplys a lazy + By default, the members of the DataFolder are instances of :class:`Stoner.Data`. The DataFolder emplys a lazy open strategy, so that files are only read in from disc when actually needed. .. inheritance-diagram:: DataFolder diff --git a/Stoner/formats/__init__.py b/Stoner/formats/__init__.py index 16be3c714..c23eacaea 100755 --- a/Stoner/formats/__init__.py +++ b/Stoner/formats/__init__.py @@ -12,7 +12,7 @@ positively identify that the file is one that they understand and throw a :py:exception:Stoner.cpre.exceptions.StonerLoadError` if not. -Classes may also provide :py:attr:`Stoner.Core.DataFile.patterns` atribute which is a list of filename glob patterns +Classes may also provide :py:attr:`Stoner.Core.DataFile.patterns` attribute which is a list of filename glob patterns (e.g. ['*.data','*.txt']) which is used in the file dialog box to filter the list of files. Finally, classes can provide a :py:attr:`Stoner.Core.DataFile.mime_type` attribute which gives a list of mime types that this class might be able to open. This helps identify classes that could be use to load particular file types. diff --git a/Stoner/formats/attocube.py b/Stoner/formats/attocube.py index 76a256adc..9310f24d4 100755 --- a/Stoner/formats/attocube.py +++ b/Stoner/formats/attocube.py @@ -288,7 +288,7 @@ def regrid(self, **kargs): Keyword Parameters: x_range, y_range (tuple of start, stop, points): Range of x-y co-rdinates to regrid the data to. Used as an argument to :py:func:`np.linspace` to - generate the co-ordinate + generate the coordinate vector. in_place (bool): If True then replace the existing datasets with the regridded data, otherwise create a new copy diff --git a/Stoner/formats/facilities.py b/Stoner/formats/facilities.py index bab79cbc8..363a3c324 100755 --- a/Stoner/formats/facilities.py +++ b/Stoner/formats/facilities.py @@ -27,7 +27,7 @@ class BNLFile(Core.DataFile): Author RCT 12/2011 - The file from BNL must be split into seperate scan files before Stoner can use + The file from BNL must be split into separate scan files before Stoner can use them, a separate python script has been written for this and should be found in data/Python/PythonCode/scripts. """ @@ -304,7 +304,7 @@ def _load(self, filename=None, *args, **kargs): if line.startswith("["): # Look for a section header section = line.strip().strip("[]") - if section == "Data": # The Data section has one line of colum headers and then data + if section == "Data": # The Data section has one line of column headers and then data header = next(data)[2:].split("\t") column_headers = [h.strip() for h in header] self.data = np.genfromtxt(data) # we end by reading the raw data diff --git a/Stoner/formats/generic.py b/Stoner/formats/generic.py index a4d792eb8..572f4588f 100755 --- a/Stoner/formats/generic.py +++ b/Stoner/formats/generic.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -"""Implement DataFile classes for soem generic file formats.""" +"""Implement DataFile classes for some generic file formats.""" __all__ = ["CSVFile", "HyperSpyFile", "KermitPNGFile", "TDMSFile"] import csv import contextlib @@ -86,9 +86,9 @@ def _load(self, filename, *args, **kargs): Keyword Arguments: header_line (int): The line in the file that contains the column headers. - If None, then column headers are auotmatically generated. + If None, then column headers are automatically generated. data_line (int): The line on which the data starts - data_delim (string): Thge delimiter used for separating data values + data_delim (string): The delimiter used for separating data values header_delim (strong): The delimiter used for separating header values Returns: @@ -141,7 +141,7 @@ def save(self, filename=None, **kargs): """Override the save method to allow CSVFiles to be written out to disc (as a mininmalist output). Args: - filename (string): Fielname to save as (using the same rules as for the load routines) + filename (string): Filename to save as (using the same rules as for the load routines) Keyword Arguments: deliminator (string): Record deliniminator (defaults to a comma) diff --git a/Stoner/formats/instruments.py b/Stoner/formats/instruments.py index ddf509e1a..40f2575fa 100755 --- a/Stoner/formats/instruments.py +++ b/Stoner/formats/instruments.py @@ -484,7 +484,7 @@ def _read_loginfo(self, f): self._header[key] = value def _load(self, filename=None, *args, **kargs): - """Read a .scf file produced by the Renishaw Raman system (amongs others). + """Read a .scf file produced by the Renishaw Raman system (among others). Args: filename (string or bool): @@ -667,7 +667,7 @@ def __parse_VSM(self, header_line=3, data_line=3, header_delim=","): Keyword Arguments: header_line (int): - The line in the file that contains the column headers. If None, then column headers are auotmatically + The line in the file that contains the column headers. If None, then column headers are automatically generated. data_line (int): The line on which the data starts @@ -776,7 +776,7 @@ def _load(self, filename=None, *args, **kargs): self.filename = filename sh = re.compile(r"\[(.+)\]") # Regexp to grab section name with FileManager(self.filename, errors="ignore", encoding="utf-8") as f: # Read filename linewise - if f.readline().strip() != ";RAW4.00": # Check we have the corrrect fileformat + if f.readline().strip() != ";RAW4.00": # Check we have the correct fileformat raise Core.StonerLoadError("File Format Not Recognized !") drive = 0 for line in f: # for each line diff --git a/Stoner/formats/maximus.py b/Stoner/formats/maximus.py index 5a2d45c9e..8277c22a9 100755 --- a/Stoner/formats/maximus.py +++ b/Stoner/formats/maximus.py @@ -467,7 +467,7 @@ def _read_images(files, header): Returns: data (ndarray): 2D or 3D data. - dims (tuple of 1D arays): 2 or 3 1D arrays corresponding to the dimensions of data. + dims (tuple of 1D arrays): 2 or 3 1D arrays corresponding to the dimensions of data. """ xims = list(files) scandef = header["ScanDefinition"] @@ -497,7 +497,7 @@ def _read_pointscan(files, header): Returns: data (ndarray): 2D or 3D data. - dims (tuple of 1D arays): 2 or 3 1D arrays corresponding to the dimensions of data. + dims (tuple of 1D arrays): 2 or 3 1D arrays corresponding to the dimensions of data. """ xsps = list(files) scandef = header["ScanDefinition"] diff --git a/Stoner/formats/rigs.py b/Stoner/formats/rigs.py index 2681608da..0c440c27b 100755 --- a/Stoner/formats/rigs.py +++ b/Stoner/formats/rigs.py @@ -117,7 +117,7 @@ class MokeFile(Core.DataFile): # .. note:: # Subclasses with priority<=32 should make some positive identification that they have the right # file type before attempting to read data. - priotity = 16 + priority = 16 #: pattern (list of str): A list of file extensions that might contain this type of file. Used to construct # the file load/save dialog boxes. patterns = ["*.dat", "*.txt"] @@ -193,7 +193,7 @@ def _load(self, filename, *args, **kargs): raise Core.StonerLoadError("Not a Focussed MOKE file !") del label[0] for k, v in zip(label, value): - self.metadata[k] = v # Create metatdata from first 2 lines + self.metadata[k] = v # Create metadata from first 2 lines column_headers = [x.strip() for x in bytes2str(f.readline()).split("\t")] self.data = np.genfromtxt(f, dtype="float", delimiter="\t", invalid_raise=False) self.column_headers = column_headers diff --git a/Stoner/formats/simulations.py b/Stoner/formats/simulations.py index bfda84718..9ffb7c6e0 100755 --- a/Stoner/formats/simulations.py +++ b/Stoner/formats/simulations.py @@ -72,7 +72,7 @@ def _load(self, filename=None, *args, **kargs): raise StonerLoadError("Not a GenXFile") for ix, line in enumerate(datafile): line = line.strip() - if line in ["# Headers:", "# Column lables:"]: + if line in ["# Headers:", "# Column labels:"]: line = next(datafile)[1:].strip() break else: @@ -141,7 +141,7 @@ def _load(self, filename=None, *args, **kargs): chk = numbers[0] if ( chk != [1234567.0, 123456789012345.0][self.metadata["representation size"] // 4 - 1] - ): # If we have a good check number we can carry on, otherwise try the other endianess + ): # If we have a good check number we can carry on, otherwise try the other endianness numbers = np.frombuffer(bin_data, dtype=f">f{self.metadata['representation size']}") chk = numbers[0] if chk != [1234567.0, 123456789012345.0][self.metadata["representation size"] // 4 - 1]: diff --git a/Stoner/plot/__init__.py b/Stoner/plot/__init__.py index d46f9b0cd..0eb240bba 100644 --- a/Stoner/plot/__init__.py +++ b/Stoner/plot/__init__.py @@ -1,7 +1,7 @@ """Stoner.plot sub-package - contains classes and functions for visuallising data. -Most of the plotting functionailty is provided by the :class:`.PlotMixin` mixin class which is available through the -:py:class:`Stoner.Data` classs. +Most of the plotting functionality is provided by the :class:`.PlotMixin` mixin class which is available through the +:py:class:`Stoner.Data` class. The :mod:`.formats` module provides a set of template classes for producing different plot styles and formats. The :py:mod:`Stoner.plot.util` module provides diff --git a/Stoner/plot/core.py b/Stoner/plot/core.py index 07cbd4cc8..6a30e61da 100755 --- a/Stoner/plot/core.py +++ b/Stoner/plot/core.py @@ -42,11 +42,11 @@ def __mpl3DQuiver(x_coord, y_coord, z_coord, u_comp, v_comp, w_comp, **kargs): Args: x_coord_coord_coord (array): - x data co-ordinates + x data coordinates y_coord (array): - y data co-ordinates + y data coordinates z_coord (array): - z data co-ordinates + z data coordinates u_comp (array): u data vector field component v_comp (array): @@ -77,7 +77,7 @@ class PlotMixin: Args: args(tuple): - Arguements to pass to :py:meth:`Stoner.Core.DataFile.__init__` + Arguments to pass to :py:meth:`Stoner.Core.DataFile.__init__` kargs (dict): keyword arguments to pass to \b DataFile.__init__ @@ -211,7 +211,7 @@ def labels(self, value): def showfig(self): """Return either the current figure or self or None. - The return value depeds on whether the attribute is True or False or None.""" + The return value depends on whether the attribute is True or False or None.""" if self._showfig is None or get_option("no_figs"): return None if self._showfig: @@ -298,7 +298,7 @@ def _surface_plotter(self, x_coord, y_coord, z_coord, **kargs): Args: x_coord, y_coord, z_coord (array): - Data point co-ordinates + Data point coordinates kargs (dict): Other keywords to pass through @@ -347,7 +347,7 @@ def _vector_field_plotter(self, x_coord, y_coord, z_coord, u_comp, v_comp, w_com Args: x_coord, y_coord, z_coord (array): - Data point co-ordinates + Data point coordinates u_comp, v_comp, w_comp (array): U,V,W vector field component @@ -456,7 +456,7 @@ def _fix_kargs(self, function=None, defaults=None, otherkargs=None, **kargs): if k not in otherkargs and k not in defaults: del kargs[k] - # Defaults now a dictionary of default arugments overlaid with keyword argument values + # Defaults now a dictionary of default arguments overlaid with keyword argument values # Now inspect the plotting function to see what it takes. if function is None: function = defaults["plotter"] @@ -470,7 +470,7 @@ def _fix_kargs(self, function=None, defaults=None, otherkargs=None, **kargs): plt.close(plt.gcf()) (args, _, kwargs) = getargspec(function)[:3] - # Manually overide the list of arguments that the plotting function takes if it takes keyword dictionary + # Manually override the list of arguments that the plotting function takes if it takes keyword dictionary if isinstance(otherkargs, (list, tuple)) and kwargs is not None: args.extend(otherkargs) nonkargs = dict() @@ -501,7 +501,7 @@ def __getattr__(self, name): - xlim - the X axis limits - ylim - the Y axis limits - All other attrbiutes are passed over to the parent class + All other attributes are passed over to the parent class """ func = None o_name = name @@ -589,7 +589,7 @@ def __setattr__(self, name, value): Args: name (string): The name of the attribute to set. The cuirrent attributes are supported: - - fig - set the plt figure isntance to use + - fig - set the plt figure instance to use - xlabel - set the X axis label text - ylabel - set the Y axis label text - title - set the plot title @@ -671,7 +671,7 @@ def add_column(self, column_data, header=None, index=None, **kargs): Set the type of column (x,y,z data etc - see :py:attr:`Stoner.Core.DataFile.setas`) Returns: - A :py:class:`DataFile` instance with the additonal column inserted. + A :py:class:`DataFile` instance with the additional column inserted. Note: Like most :py:class:`DataFile` methods, this method operates in-place in that it also modifies @@ -832,11 +832,11 @@ def griddata( zlim (tuple): The ylimits method (string): - Type of interploation to use, default is linear + Type of interpolation to use, default is linear ReturnsL (X,Y,Z) or (X,Y,Z,M): - three two dimensional arrays of the co-ordinates of the interpolated data or 4 three diemensional + three two dimensional arrays of the coordinates of the interpolated data or 4 three diemensional arrays of the interpolated data Notes: @@ -938,7 +938,7 @@ def image_plot(self, xcol=None, ycol=None, zcol=None, shape=None, xlim=None, yli ylim (tuple): The ylimits, defaults to automatically determined from data xlabel (string): - X axes label. Deafult is None - guess from xvals or metadata + X axes label. Default is None - guess from xvals or metadata ylabel (string): Y axes label, Default is None - guess from metadata zlabel (string): @@ -1130,9 +1130,9 @@ def plot_matrix( show_plot (bool): True Turns on interactive plot control title (string): - Optional parameter that specfies the plot title - otherwise the current DataFile filename is used + Optional parameter that specifies the plot title - otherwise the current DataFile filename is used xlabel (string): - X axes label. Deafult is None - guess from xvals or metadata + X axes label. Default is None - guess from xvals or metadata ylabel (string): Y axes label, Default is None - guess from metadata zlabel (string): @@ -1142,7 +1142,7 @@ def plot_matrix( False then a new figure is always used, otherwise it will default to using the last figure used by this DataFile object. plotter (callable): - Optional arguement that passes a plotting function into the routine. Sensible choices might be + Optional argument that passes a plotting function into the routine. Sensible choices might be plt.plot (default), py.semilogy, plt.semilogx kwords (dict): A dictionary of other keyword arguments to pass into the plot function. @@ -1151,8 +1151,8 @@ def plot_matrix( The matplotib figure with the data plotted """ # Sortout yvals values - if isinstance(yvals, int): # Int means we're sepcifying a data row - if rectang is None: # we need to intitialise the rectang + if isinstance(yvals, int): # Int means we're specifying a data row + if rectang is None: # we need to initialise the rectang rectang = (yvals + 1, 0) # We'll sort the column origin later elif ( isinstance(rectang, tuple) and rectang[1] <= yvals @@ -1170,7 +1170,7 @@ def plot_matrix( headers = self.column_headers[1:] else: headers = self.column_headers - yvals = np.array([float(x) for x in headers]) # Ok try to construct yvals aray + yvals = np.array([float(x) for x in headers]) # Ok try to construct yvals array else: raise RuntimeError("uvals must be either an integer, list, tuple, numpy array or None") # Sort out xvls values @@ -1273,11 +1273,11 @@ def plot_xy(self, xcol=None, ycol=None, fmt=None, xerr=None, yerr=None, **kargs) xerr,yerr (index): C olumns of data to get x and y errorbars from. Setting these turns the default plotter to plt.errorbar xlabel (string): - X axes label. Deafult is None - guess from xvals or metadata + X axes label. Default is None - guess from xvals or metadata ylabel (string): Y axes label, Default is None - guess from metadata title (string): - Optional parameter that specfies the plot title - otherwise the current DataFile filename is used + Optional parameter that specifies the plot title - otherwise the current DataFile filename is used plotter (function): Function to use to plot data. Defaults to plt.plot unless error bars are set show_plot (bool): @@ -1297,7 +1297,7 @@ def plot_xy(self, xcol=None, ycol=None, fmt=None, xerr=None, yerr=None, **kargs) Other arguments are passed on to the plotter. Returns: - A matplotlib.figure isntance + A matplotlib.figure instance """ c = self._fix_cols(xcol=xcol, ycol=ycol, xerr=xerr, yerr=yerr, scalar=False, **kargs) (kargs["xerr"], kargs["yerr"]) = (c.xerr, c.yerr) @@ -1523,7 +1523,7 @@ def plot_xyz(self, xcol=None, ycol=None, zcol=None, shape=None, xlim=None, ylim= Keyword Arguments: shape (tuple): - Defines the shape of the surface (i.e. the number of X and Y value. If not procided or None, then + Defines the shape of the surface (i.e. the number of X and Y value. If not provided or None, then the routine will attempt to calculate these from the data provided xlim (tuple): @@ -1535,7 +1535,7 @@ def plot_xyz(self, xcol=None, ycol=None, zcol=None, shape=None, xlim=None, ylim= show_plot (bool): True Turns on interactive plot control title (string): - Optional parameter that specfies the plot title - otherwise the current DataFile filename is used + Optional parameter that specifies the plot title - otherwise the current DataFile filename is used save_filename (string): Filename used to save the plot figure (matplotlib figure): @@ -1543,15 +1543,15 @@ def plot_xyz(self, xcol=None, ycol=None, zcol=None, shape=None, xlim=None, ylim= False then a new figure is always used, otherwise it will default to using the last figure used by this DataFile object. plotter (callable): - Optional arguement that passes a plotting function into the routine. Default is a 3d surface + Optional argument that passes a plotting function into the routine. Default is a 3d surface plotter, but contour plot and pcolormesh also work. projection (string or None): - Whether to use a 3D projection or regular 2D axes (deault is 3D) + Whether to use a 3D projection or regular 2D axes (default is 3D) **kargs (dict): A dictionary of other keyword arguments to pass into the plot function. Returns: - A matplotlib.figure isntance + A matplotlib.figure instance """ if not _3D: raise RuntimeError("3D plotting Not available. Install matplotlib toolkits") @@ -1637,7 +1637,7 @@ def plot_xyuv(self, xcol=None, ycol=None, ucol=None, vcol=None, wcol=None, **kar show_plot (bool): True Turns on interactive plot control title (string): - Optional parameter that specfies the plot title - otherwise the current DataFile filename is used + Optional parameter that specifies the plot title - otherwise the current DataFile filename is used save_filename (string): Filename used to save the plot figure (matplotlib figure): @@ -1647,7 +1647,7 @@ def plot_xyuv(self, xcol=None, ycol=None, ucol=None, vcol=None, wcol=None, **kar no_quiver (bool): Do not overlay quiver plot (in cases of dense meshes of points) plotter (callable): - Optional arguement that passes a plotting function into the routine. Default is a 3d surface plotter, + Optional argument that passes a plotting function into the routine. Default is a 3d surface plotter, but contour plot and pcolormesh also work. **kargs (dict): A dictionary of other keyword arguments to pass into the plot function. @@ -1705,7 +1705,7 @@ def plot_xyzuvw(self, xcol=None, ycol=None, zcol=None, ucol=None, vcol=None, wco figure is always used, otherwise it will default to using the last figure used by this DataFile object. plotter (callable): - Optional arguement that passes a plotting function into the routine. Sensible choices might be + Optional argument that passes a plotting function into the routine. Sensible choices might be plt.plot (default), py.semilogy, plt.semilogx kargs (dict): A dictionary of other keyword arguments to pass into the plot function. @@ -1930,7 +1930,7 @@ def quiver_plot(self, xcol=None, ycol=None, ucol=None, vcol=None, **kargs): Keyword Arguments: xlabel (string): - X axes label. Deafult is None - guess from xvals or metadata + X axes label. Default is None - guess from xvals or metadata ylabel (string): Y axes label, Default is None - guess from metadata zlabel (string): @@ -2009,7 +2009,7 @@ def subplot(self, *args, **kargs): Args: rows (int): If this is the only argument, then a three digit number representing - the rows,columns,index arguments. If seperate rows, column and index are provided, + the rows,columns,index arguments. If separate rows, column and index are provided, then this is the number of rows of sub-plots in one figure. columns (int): The number of columns of sub-plots in one figure. diff --git a/Stoner/plot/formats.py b/Stoner/plot/formats.py index fcb13296c..74cedc135 100755 --- a/Stoner/plot/formats.py +++ b/Stoner/plot/formats.py @@ -52,7 +52,7 @@ def _remove_dots(key): class TexFormatter(Formatter): r"""An axis tick label formatter that emits Tex formula mode code. - Formating is set so that large numbers are registered as :math`\times 10^{power}` + Formatting is set so that large numbers are registered as :math`\times 10^{power}` rather than using E notation.""" def __call__(self, value, pos=None): @@ -161,7 +161,7 @@ class DefaultPlotStyle(MutableMapping): show_title (bool): show the title show_legend (bool): show the legend stylename (string): Name of the matplotlib style to use - stylesheet (list): Calculated list of stylesheets found by traversing the class heirarchy + stylesheet (list): Calculated list of stylesheets found by traversing the class hierarchy Example .. plot:: samples/plotstyles/default.py @@ -241,7 +241,7 @@ def __call__(self, **kargs): self.update({_add_dots(k): v}) def __delitem__(self, name): - """Clear any setting that overides the default for *name*.""" + """Clear any setting that overrides the default for *name*.""" if hasattr(self, name): default = getattr(type(self)(), name) setattr(self, name, default) @@ -321,7 +321,7 @@ def _allowed_attr(self, x, template=False): @property def stylesheet(self): - """Horribly hacky method to traverse over the class heirarchy for style sheet names.""" + """Horribly hacky method to traverse over the class hierarchy for style sheet names.""" if ( self._stylesheet is not None and self._stylesheet[0] == self.stylename ): # Have we cached a copy of our stylesheets ? @@ -362,7 +362,7 @@ def stylesheet(self): @stylesheet.setter def stylesheet(self, value): # pylint: disable=r0201 """Just stop the stylesheet from being set.""" - raise AttributeError("Can't set the stylesheet value, this is dervied from the stylename aatribute.") + raise AttributeError("Can't set the stylesheet value, this is derived from the stylename aatribute.") def clear(self): """Reset everything back o defaults.""" @@ -491,7 +491,7 @@ def customise(self): """Implement hook to customise plot. This method is supplied for sub classes to override to provide additional - plot customisation after the rc paramaters are updated from the class and + plot customisation after the rc parameters are updated from the class and instance attributes.""" def customise_axes(self, ax, plot): @@ -695,7 +695,7 @@ class SeabornPlotStyle(DefaultPlotStyle): Attributes: stylename (str): - The seaborn plot stlye to use - darkgrid, whitegrid, dark, white, or ticks + The seaborn plot style to use - darkgrid, whitegrid, dark, white, or ticks context (str): The seaborn plot context for scaling elements - paper,notebook,talk, or poster palette (str): diff --git a/Stoner/plot/utils.py b/Stoner/plot/utils.py index 8ceddd0af..a9b7a0bbd 100755 --- a/Stoner/plot/utils.py +++ b/Stoner/plot/utils.py @@ -223,7 +223,7 @@ def joy_division(x, y, z, **kargs): Parameters: x,y,z (1D arrays): - x y and z co-ordinates. data should be arranged so that z(x,y=constant) + x y and z coordinates. data should be arranged so that z(x,y=constant) Keyword Parameters: ax (matplotlib.Axes): @@ -244,7 +244,7 @@ def joy_division(x, y, z, **kargs): Returns: None - Constructurs a mono-chromatic waterfall plot in the style of the Joy Division album cover of Pulsar signals. + Constructors a mono-chromatic waterfall plot in the style of the Joy Division album cover of Pulsar signals. """ ax = kargs.pop("ax", plt.gca()) y_shift = kargs.pop("y_shift", (z.max() - z.min()) / np.unique(y).size) diff --git a/Stoner/tools/classes.py b/Stoner/tools/classes.py index cf99735ea..bea0b0c0e 100755 --- a/Stoner/tools/classes.py +++ b/Stoner/tools/classes.py @@ -48,7 +48,7 @@ def __setattr__(self, name: str, value: Any) -> None: self[name] = value def __getattr__(self, name: str) -> Any: - """Get an attrbute (equivalent to getting an item).""" + """Get an attribute (equivalent to getting an item).""" try: return self[name] except KeyError as err: @@ -189,17 +189,17 @@ def __setitem__(self, name: Union[int, IterableType, slice], value: Any) -> None if isiterable(name) or isinstance(name, slice): if not isiterable(value) or not all_type(value, self._type): raise TypeError( - f"Elelements of this list should be of type {self._type} and must set " + f"Elements of this list should be of type {self._type} and must set " + "the correct number of elements" ) elif not isinstance(value, self._type): - raise TypeError(f"Elelements of this list should be of type {self._type}") + raise TypeError(f"Elements of this list should be of type {self._type}") self._store[name] = value def extend(self, values: IterableType) -> None: # pylint: disable=arguments-differ """Extend the list and do some type checking.""" if not isiterable(values) or not all_type(values, self._type): - raise TypeError(f"Elelements of this list should be of type {self._type}") + raise TypeError(f"Elements of this list should be of type {self._type}") self._store.extend(values) def index(self, value: Any, start: int = 0, end: Optional[int] = None) -> int: # pylint: disable=arguments-differ @@ -211,7 +211,7 @@ def index(self, value: Any, start: int = 0, end: Optional[int] = None) -> int: def insert(self, index: int, value: Any) -> None: # pylint: disable=arguments-differ """Insert an element and do some type checking.""" if not isinstance(value, self._type): - raise TypeError(f"Elelements of this list should be of type {self._type}") + raise TypeError(f"Elements of this list should be of type {self._type}") self._store.insert(index, value) @@ -294,7 +294,7 @@ def copy_into(source: "DataFile", dest: "DataFile") -> "DataFile": Args: source(DataFile): The DataFile object to be copied from - dest (DataFile): The DataFile objrct to be changed by recieving the copiued data. + dest (DataFile): The DataFile objrct to be changed by receiving the copiued data. Returns: The modified *dest* DataFile. diff --git a/Stoner/tools/decorators.py b/Stoner/tools/decorators.py index f602e23e1..d0a412af7 100755 --- a/Stoner/tools/decorators.py +++ b/Stoner/tools/decorators.py @@ -158,7 +158,7 @@ def gen_func(self, *args, **kargs): def array_file_property(workingfunc): - """Wrap an arbitary callbable to make it a bound method of this class. + """Wrap an arbitrary callbable to make it a bound method of this class. Args: workingfunc (callable): @@ -209,7 +209,7 @@ def deleter(self): def image_array_adaptor(workingfunc): - """Wrap an arbitary callbable to make it a bound method of this class. + """Wrap an arbitrary callbable to make it a bound method of this class. Args: workingfunc (callable): @@ -251,7 +251,7 @@ def gen_func(self, *args, **kwargs): if isinstance(r, type(self)) and np.shares_memory(r, self): # Assume everything was inplace return r r = r.view(type(self)) - sm = self.metadata.copy() # Copy the currenty metadata + sm = self.metadata.copy() # Copy the currently metadata sm.update(r.metadata) # merge in any new metadata from the call r.metadata = sm # and put the returned metadata as the merged data # NB we might not be returning an ndarray at all here ! @@ -341,7 +341,7 @@ def class_wrapper( attr_pass=array_file_attr, exclude_below=None, ): - """Create entries in the current class for all attrbutes of klass that are not already defined. + """Create entries in the current class for all attributes of klass that are not already defined. Keyword Arguments: target (type): @@ -349,7 +349,7 @@ def class_wrapper( adaptor (callable): A factory function to make methods to the the connection to the underlying attributes. - Reutrns: + Returns: class: Modified class definition. @@ -395,7 +395,7 @@ def keep_return_type(func): def clones(func): - """Mark the mthod as one that expects it's input to be cloned.""" + """Mark the method as one that expects it's input to be cloned.""" func.clones = True return func @@ -417,7 +417,7 @@ def fix_signature(proxy_func, wrapped_func): def make_Data(*args, **kargs): """Return an instance of Stoner.Data passig through constructor arguments. - Calling make_Data(None) is a speical case to return the Data class ratther than an instance + Calling make_Data(None) is a special case to return the Data class ratther than an instance """ if len(args) == 1 and args[0] is None: return import_module("Stoner.core.data").Data diff --git a/Stoner/tools/file.py b/Stoner/tools/file.py index 510a28bcc..193d7c289 100755 --- a/Stoner/tools/file.py +++ b/Stoner/tools/file.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -"""General fle related tools.""" +"""General file related tools.""" from importlib import import_module import io import os @@ -43,7 +43,7 @@ def file_dialog( mode (string): The mode of the file operation 'r' or 'w' filename (str, Path, bool): - Tje starting filename + The starting filename filetype (submclass of metadataObject, string): The filetype to open with - used to selectr file patterns basclass (subclass of metadataObject): @@ -161,7 +161,7 @@ def auto_load_classes( if isinstance(test, metadataObject): test["Loaded as"] = cls_name if debug: - print(f"Test matadata: {test.metadata}") + print(f"Test metadata: {test.metadata}") break except StonerLoadError as e: @@ -197,7 +197,7 @@ def get_mime_type(filename: Union[pathlib.Path, str], debug: bool = False) -> Op class FileManager: - """Simple context manager that allows opening files or working with alreadt open string buffers.""" + """Simple context manager that allows opening files or working with already open string buffers.""" def __init__(self, filename, *args, **kargs): """Store the parameters passed to the context manager.""" diff --git a/Stoner/tools/formatting.py b/Stoner/tools/formatting.py index bb5149650..9a980042f 100755 --- a/Stoner/tools/formatting.py +++ b/Stoner/tools/formatting.py @@ -73,13 +73,13 @@ def format_error(value: Numeric, error: Optional[Numeric] = None, **kargs: Any) Args: value (float): - The value to be formated + The value to be formatted error (float): The uncertainty in the value Keyword Arguments: fmt (str): - Specify the output format, opyions are: + Specify the output format, options are: - "text" - plain text output - "latex" - latex output - "html" - html entities @@ -89,7 +89,7 @@ def format_error(value: Numeric, error: Optional[Numeric] = None, **kargs: Any) mode (string): If "float" (default) the number is formatted as is, if "eng" the value and error is converted to the next samllest power of 1000 and the appropriate SI index appended. If mode is "sci" then a - scientifc, i.e. mantissa and exponent format is used. + scientific, i.e. mantissa and exponent format is used. units (string): A suffix providing the units of the value. If si mode is used, then appropriate si prefixes are prepended to the units string. In LaTeX mode, the units string is embedded in \mathrm @@ -98,7 +98,7 @@ def format_error(value: Numeric, error: Optional[Numeric] = None, **kargs: Any) inside the math-mode markers, but not embedded in \mathrm. Returns: - String containing the formated number with the eorr to one s.f. and value to no more d.p. than the error. + String containing the formatted number with the eorr to one s.f. and value to no more d.p. than the error. """ mode = kargs.get("mode", "float") units = kargs.get("units", "") @@ -115,7 +115,7 @@ def format_error(value: Numeric, error: Optional[Numeric] = None, **kargs: Any) prefix = escape_func(prefix) units = escape_func(units) - # Sort out special fomatting for different modes + # Sort out special formatting for different modes if mode == "float": # Standard suffix_val = "" elif mode == "eng": # Use SI prefixes @@ -191,7 +191,7 @@ def format_val(value: Numeric, **kargs: Any) -> str: Keyword Arguments: fmt (str): - Specify the output format, opyions are: + Specify the output format, options are: - "text" - plain text output - "latex" - latex output - "html" - html entities @@ -201,7 +201,7 @@ def format_val(value: Numeric, **kargs: Any) -> str: mode (string): If "float" (default) the number is formatted as is, if "eng" the value and error is converted to the next samllest power of 1000 and the appropriate SI index appended. If mode is "sci" then a - scientifc, i.e. mantissa and exponent format is used. + scientific, i.e. mantissa and exponent format is used. units (string): A suffix providing the units of the value. If si mode is used, then appropriate si prefixes are prepended to the units string. In LaTeX mode, the units string is embedded in \mathrm @@ -289,7 +289,7 @@ def quantize(number: NumericArray, quantum: Numeric) -> NumericArray: def tex_escape(text: str) -> str: - """Escapes spacecial text charcters in a string. + """Escapes spacecial text characters in a string. Parameters: text (str): diff --git a/Stoner/tools/tests.py b/Stoner/tools/tests.py index ffe88f43e..7690cfd33 100755 --- a/Stoner/tools/tests.py +++ b/Stoner/tools/tests.py @@ -114,7 +114,7 @@ def isiterable(value: Any) -> bool: Args: value : - Entitiy to check if it is iterable + Entity to check if it is iterable Returns: (bool): @@ -165,7 +165,7 @@ def isproperty(obj: Any, name: str) -> bool: obj (instance or class): Thing that has the attribute to check name (str): - Name of the attrbiute that might be a property + Name of the attribute that might be a property Returns: (bool): @@ -185,7 +185,7 @@ def isTuple(obj: Any, *args: type, strict: bool = True) -> bool: obj(object): The object to check *args(type): - Each of the suceeding arguments are used to determine the expected type of each element. + Each of the succeeding arguments are used to determine the expected type of each element. Keywoprd Arguments: strict(bool): diff --git a/Stoner/tools/widgets.py b/Stoner/tools/widgets.py index f49c9ab15..93ae24d3e 100755 --- a/Stoner/tools/widgets.py +++ b/Stoner/tools/widgets.py @@ -189,7 +189,7 @@ def __call__(self, data, xcol, accuracy, invert=False): fig.canvas.mpl_connect("key_press_event", self.keypress) while not self.finished: plt.pause(0.1) - # Clean up and resotre the figure settings + # Clean up and restore the figure settings plt.close(self.data.fig.number) if fig_tmp[0] is not None and fig_tmp[0] in plt.get_fignums(): self.data.fig = fig_tmp[0] diff --git a/doc/UserGuide/Users_Guide.tex b/doc/UserGuide/Users_Guide.tex index 3eb2aa795..632d39834 100755 --- a/doc/UserGuide/Users_Guide.tex +++ b/doc/UserGuide/Users_Guide.tex @@ -46,7 +46,7 @@ \section{Introduction} -This manual provides a user guide and reference for the Stoner python pacakage. +This manual provides a user guide and reference for the Stoner python package. The Stoner python package provides a set of python classes and functions for reading, manipulating and plotting data acquired with the lab equipment in the Condensed Matter Physics Group at the University of Leeds. @@ -246,7 +246,7 @@ \subsubsection{Masked Data and Why You Care}\label{(maskeddata)} d.mask=lambda x:[y<50 for y in x] \end{lstlisting} -The first line is simply the import statement for the numpy masked arrays in order to get the \textit{nomask} symbol. The second line will simply print the current mask. The next two examples will unmask all the data \ie make the values visible and useable. The next example illustrates using a numpy array of booleans to set the mask - every element in the mask array that evaluates as a boolean True will be masked and every False value unmasked. So far the semantics here are the same as if one had accessed the mask directly on the data via \verb'd.data.mask' but the final two examples illustrate an extension that setting the \textbf{DataFile} mask attribute allows. If you pass a callable object to the mask attribute it will be executed, passing each row of the data array to the user supplied function as a numpy array. The user supplied function can then either return a single boolean value -- in which case it will be used to mask the entire row -- or a list of booleans to mask individual cells in the current row. +The first line is simply the import statement for the numpy masked arrays in order to get the \textit{nomask} symbol. The second line will simply print the current mask. The next two examples will unmask all the data \ie make the values visible and usable. The next example illustrates using a numpy array of booleans to set the mask - every element in the mask array that evaluates as a boolean True will be masked and every False value unmasked. So far the semantics here are the same as if one had accessed the mask directly on the data via \verb'd.data.mask' but the final two examples illustrate an extension that setting the \textbf{DataFile} mask attribute allows. If you pass a callable object to the mask attribute it will be executed, passing each row of the data array to the user supplied function as a numpy array. The user supplied function can then either return a single boolean value -- in which case it will be used to mask the entire row -- or a list of booleans to mask individual cells in the current row. By default when the \textbf{DataFile} object is printed or saved, data values that have been masked are replaced with a ``fill'' value of $10^{20}$. @@ -459,9 +459,9 @@ \subsubsection{Selecting Individual rows and columns of data} d.filter(lambda r:r[0]>5,['Temp']) \end{lstlisting} -With jsut a single argument, the filter method takes a complete row at a time and passes it +With just a single argument, the filter method takes a complete row at a time and passes it to the first argument, expecting to get a boolean response (or list olf booleans equal in length -to the number of columns). With a second argument as in the second example, you can sepcify which +to the number of columns). With a second argument as in the second example, you can specify which columns are passed to the filtering function in what order. The second argument must be a list of things which can be used to index a column (\ie strings, integers, regular expressions). @@ -484,9 +484,9 @@ \subsubsection{Find out more about the data} \subsubsection{Copying Data} -One of the characterisitics of Python that can confuse those used to other +One of the characteristics of Python that can confuse those used to other programming languages is that assignments and argument passing is by reference -and not by value. This can lead to unexcted results as you can end up modifying variables you were not expecting ! To help with creating genuine copies of data Python provides the copy module. Whilst this works with DataFile objects, for convenience, the \textbf{DataFile.clone} atribute is provided to make a deep copy of a DataFile object. +and not by value. This can lead to unexcted results as you can end up modifying variables you were not expecting ! To help with creating genuine copies of data Python provides the copy module. Whilst this works with DataFile objects, for convenience, the \textbf{DataFile.clone} attribute is provided to make a deep copy of a DataFile object. \keypoint{This is an attribute not a method, so there are no brackets here !} @@ -536,7 +536,7 @@ \subsubsection{Appending data} In the first example above, we add a single row of data to \textit{d}. This assumes that the number of elements in the array matches the number of columns in the data file. The second example is similar but this time appends a 2 -dimensional numpy array to the data. The third example demonstrates adding data from a dictioary. In this case +dimensional numpy array to the data. The third example demonstrates adding data from a dictionary. In this case the keys of the dictionary are used to determine which column the values are added to. If their columns that don't match one of the dictionary keys, then a \textit{NaN} is inserted. If their are keys that don't match columns labels, then new columns are added to the data set, filled with \textit{NaN}. In the fourth example, each element @@ -583,13 +583,13 @@ \subsubsection{Inserting Columns of Data} The first example simply adds a column of data to the end of the dataset and sets the new column headers. The second variant inserts the new column before column \textit{Index}. \textit{Index} follows the same rules as for the -\textbf{DataFile.colummn()} method. In the third example, the new column data is +\textbf{DataFile.column()} method. In the third example, the new column data is generated by applying the specified function. The function is passed s dingle row as a 1D numpy array and any of the keyword, argument pairs passed in a dictionary to the optional \textit{func\_args} argument. The \textbf{DataFile.add\_column} method returns a copy of the DataFile object -itself as well as modifying the object. This is to allow the metod to be chained +itself as well as modifying the object. This is to allow the method to be chained up with other methods for more compact code writing. \subsubsection{Deleting Rows of Data} @@ -692,8 +692,8 @@ \subsection{Plotting 2D data} The examples above demonstrate several use cases of the \textbf{plot\_xy} method. The first parameter is always the x column that contains the data, the second is the y-data either as a single column or list of columns. The third parameter is the style of the plot (lines, points, colours \etc) and can either be a list if the y-column data is a list or a single string. Finally additional parameters can be given to specify a title and to control which figure is used for the plot. All matplotlib keyword parameters can be specified as additional keyword arguments and are passed through to the relevant plotting function. The final example illustrates a convenient way to produce log-linear and log-log plots. By default, \textbf{plotxy} uses the \textbf{pyplot.plot} function to produce linear scaler plots. There are a number of useful plotter functions that will work like this: \begin{description} \item[pyplot.semilogx,pyplot.semilogy] These two plotting functions will produce log-linear plots, with semilogx making the x-axes the log one and semilogy the y-axis. - \item[pyplot.loglog] Liek the semi-log plots, this will produce a log-log plot. - \item[pyplot.errorbar] this particularly useful plotting function will draw error bars. The values for the error bars are passed as keyword arguments, \textit{xerr} or \textit{yerr}. In standard matplotlib, these can be numpy arrays or constants. \textbf{PlotFile.plot\_xy} extends this by intercepting these arguements and offering some short cuts: + \item[pyplot.loglog] Like the semi-log plots, this will produce a log-log plot. + \item[pyplot.errorbar] this particularly useful plotting function will draw error bars. The values for the error bars are passed as keyword arguments, \textit{xerr} or \textit{yerr}. In standard matplotlib, these can be numpy arrays or constants. \textbf{PlotFile.plot\_xy} extends this by intercepting these arguments and offering some short cuts: \begin{lstlisting} p.plot_xy(x,y,plotter=errorbar,yerr='dResistance',xerr=[5,'dTemp+']) \end{lstlisting} @@ -717,7 +717,7 @@ \subsection{Plotting 3D Data} p.plot_xyz(col_x,col_y,col_z,xlim=(-10,10,100),ylim=(-10,10,100)) \end{lstlisting} - By default the plot\_xyz will produce a 3D surface plot with the z-axis coded with a rainbow colourmap (specifically, the matplotlib provided \textit{matplotlib.cm.jet} colourmap. This can be overriden with the \textit{cmap} keyword parameter. If a simple 2D surface plot is required, then the \textit{plotter} parameter should be set to a suitable function such as \textbf{pyplot.pcolor}. + By default the plot\_xyz will produce a 3D surface plot with the z-axis coded with a rainbow colourmap (specifically, the matplotlib provided \textit{matplotlib.cm.jet} colourmap. This can be overridden with the \textit{cmap} keyword parameter. If a simple 2D surface plot is required, then the \textit{plotter} parameter should be set to a suitable function such as \textbf{pyplot.pcolor}. Like \textbf{plot\_xy}, a \textit{figure} parameter can be used to control the figure being used and any additional keywords are passed through to the plotting function. The axes labels are set from the corresponding column labels. @@ -745,7 +745,7 @@ \subsection{Plotting 3D Data} p.plot_matrix(plotter=pyplot.pcolor,figure=False) \end{lstlisting} - The first example just uses all the default values, in which case the matrix is assumed to run from the 2nd column in the file to the last and over all of the rows. The x values for each row are found from the contents of the first column, and the y values for each column are found from the column headers interpreted as a floating pint number. The colourmap defaults to the built in `jet' theme. The x axis label is set to be the column header for the first column, the y axis label is set either from the meta data item ``ylabel'' or to ``Y Data''. Likewise the z axis label is set from the corresponding metadata item or defaults to ``Z Data''. In the second form these parameters are all set explicitly. The \textit{xvals} parameter can be either a column index (integer or sring) or a list, tuple or numpy array. The \textit{yvals} parameter can be either an row number (integer) or list,tuple or numpy array. Other parameters (including \textit{plotter}, \textit{figure} \etc) work as for the \textbf{PlotFile.plot\_xyz} method. The \textit{rectang} parameter is used to select only part of the data array to use as the matrix. It may be 2-tuple in which case it specifies just the origin as (row,column) or a 4-tuple in which case the third and forth elements are the number of rows and columns to include. If \textit{xvals} or \textit{yvals} specify particular column or rows then the origin of the matrix is moved to be one column further over and one row further down (\ie the matrix is to the right and below the columns and rows used to generate the x and y data values). The final example illustrates how to generate a new 2D surface plot in a new window using default matrix setup. + The first example just uses all the default values, in which case the matrix is assumed to run from the 2nd column in the file to the last and over all of the rows. The x values for each row are found from the contents of the first column, and the y values for each column are found from the column headers interpreted as a floating pint number. The colourmap defaults to the built in `jet' theme. The x axis label is set to be the column header for the first column, the y axis label is set either from the meta data item ``ylabel'' or to ``Y Data''. Likewise the z axis label is set from the corresponding metadata item or defaults to ``Z Data''. In the second form these parameters are all set explicitly. The \textit{xvals} parameter can be either a column index (integer or string) or a list, tuple or numpy array. The \textit{yvals} parameter can be either an row number (integer) or list,tuple or numpy array. Other parameters (including \textit{plotter}, \textit{figure} \etc) work as for the \textbf{PlotFile.plot\_xyz} method. The \textit{rectang} parameter is used to select only part of the data array to use as the matrix. It may be 2-tuple in which case it specifies just the origin as (row,column) or a 4-tuple in which case the third and forth elements are the number of rows and columns to include. If \textit{xvals} or \textit{yvals} specify particular column or rows then the origin of the matrix is moved to be one column further over and one row further down (\ie the matrix is to the right and below the columns and rows used to generate the x and y data values). The final example illustrates how to generate a new 2D surface plot in a new window using default matrix setup. \subsection{Getting More Control on the Figure} @@ -837,7 +837,7 @@ \subsection{Manipulating Data} and a column `Temperature' that contains numbers above and below 100. The first example would return a \textbf{DataFoder} object (see \ref{DataFolder}) containing separate \textbf{AnalyseFile} which -would each contain the rows from the orginal data that had each unique value of the polarisation data. The second example would +would each contain the rows from the original data that had each unique value of the polarisation data. The second example would produce a \textbf{DataFolder} object containing two \textbf{AnalyseFile} objects for the rows with temperature abobe and below 100. The final example will result in a \textbf{DataFolder} object that has two groups each of which contains \textbf{AnalyseFile} objects for each polarisation value. @@ -881,7 +881,7 @@ \subsubsection{Simple function fitting} parameters, the default value is 1. \textit{xcol} and \textit{ycol} are the x and y columns to fit. This method cannot handle multiple y columns. \textit{sigma}, if present, provides the weightings for each datapoint and so -should also be an array of the same length as the x and y data. Fianlly, the +should also be an array of the same length as the x and y data. Finally, the bounds function can be used to restrict the fitting to only a subset of the rows of data. @@ -969,7 +969,7 @@ \subsubsection{Fitting with limits} \subsection{More AnalyseFile Functions} -\subsubsection{Applying an arbitary function through the data} +\subsubsection{Applying an arbitrary function through the data} \begin{lstlisting} a.apply(func, col, replace = True, header = None) @@ -1000,7 +1000,7 @@ \subsubsection{Basic Data Inspection} The span method simply returns a tuple of minimum and maximum values within either the whole column or bounded data. Internally this is just calling the \textbf{max} and \textbf{min} methods. The \textbf{clip} method deletes rows for which the specified column as a value that is either larger or smaller than the maximum or minimum value within the second argument. This allows one to specify either a tuple -- \eg the result of the \textbf{span} method, or a complete list as in the last example above. Specifying a single float would have the effect of removing all rows where the column didn't equal the float value. This is probably not a good idea... -It is worth pointing out that these functions will respect the existing mask on the data unless the bounds parameter is set, in which case the mask is temproarily discarded in favour of one generated from the bounds expression. This can be worked around, however, as the parameter passed to the bounds function is itself a masked array and thus one can include a test of the mask in the bounds function: +It is worth pointing out that these functions will respect the existing mask on the data unless the bounds parameter is set, in which case the mask is temporarily discarded in favour of one generated from the bounds expression. This can be worked around, however, as the parameter passed to the bounds function is itself a masked array and thus one can include a test of the mask in the bounds function: \begin{lstlisting} a.span(column,bounds=lambda x,y:y[2]>10 or not numpy.any(y.mask)) @@ -1084,12 +1084,12 @@ \subsection{Getting a List of Files} The current root directory and pattern are stored in the \textit{directory} and \textit{pattern} keywords and the \textbf{getlist} method can be used to force a new listing of files. \begin{lstlisting} - f.dirctory='/home/phygbu/Data' + f.directory='/home/phygbu/Data' f.pattern='*.txt' f.getlist() \end{lstlisting} -Sometimes a more complex filename matching mechanism than simple ``globbing'' is useful. The \textit{patter} keyword can also be a compiled regular expression: +Sometimes a more complex filename matching mechanism than simple ``globbing'' is useful. The \textit{pattern} keyword can also be a compiled regular expression: \begin{lstlisting} import re @@ -1143,7 +1143,7 @@ \subsection{Doing Something With Each File} \subsection{Sorting, Filtering and Grouping Data Files}\label{groups} -The order of the files in a \textbf{DataFolder} is arbitary. If it is important to process them in a given order then the \textit{sort} method can be used: +The order of the files in a \textbf{DataFolder} is arbitrary. If it is important to process them in a given order then the \textit{sort} method can be used: \begin{lstlisting} f.sort() @@ -1175,7 +1175,7 @@ \subsection{Sorting, Filtering and Grouping Data Files}\label{groups} f.groups \end{lstlisting} -The \textbf{group} method splits the files in the \textbf{DataFolder} into several groups each of which share a common value of the arguement supplied to the \textbf{group} method. A group is itself another isntanceinstance of the \textbf{DataFolder} class. Each \textbf{DataFolder} object maintains a dictionary called \textit{groups} whose keys are the distinct values of the argument of the \textbf{group} methods and whose values are \textbf{DataFolder} objects. So, if our \textbf{DataFolder} f contained files measured at 4.2, 77 and 300K and at fields of 1T and -1T then the first variant would create 3 groups: 4.2, 77 and 300 each one of which would be a \textbf{DataFolder} object containg the files measured at those temperatures. The second varaint would produce 2 groups -- ``postive'' containing the files measured with magnetic field of 1T and ``negative'' containing the files measured at -1T. The third variant then goes one stage further and would produce 3 groups, each of which in turn had 2 groups. The groups are accessed via the \textit{group} attribute: +The \textbf{group} method splits the files in the \textbf{DataFolder} into several groups each of which share a common value of the argument supplied to the \textbf{group} method. A group is itself another isntanceinstance of the \textbf{DataFolder} class. Each \textbf{DataFolder} object maintains a dictionary called \textit{groups} whose keys are the distinct values of the argument of the \textbf{group} methods and whose values are \textbf{DataFolder} objects. So, if our \textbf{DataFolder} f contained files measured at 4.2, 77 and 300K and at fields of 1T and -1T then the first variant would create 3 groups: 4.2, 77 and 300 each one of which would be a \textbf{DataFolder} object containing the files measured at those temperatures. The second variant would produce 2 groups -- ``postive'' containing the files measured with magnetic field of 1T and ``negative'' containing the files measured at -1T. The third variant then goes one stage further and would produce 3 groups, each of which in turn had 2 groups. The groups are accessed via the \textit{group} attribute: \begin{lstlisting} f.groups[4.2].groups["positive"].files diff --git a/doc/UserGuide/analysisfile.rst b/doc/UserGuide/analysisfile.rst index 34c2ea112..d558cd964 100755 --- a/doc/UserGuide/analysisfile.rst +++ b/doc/UserGuide/analysisfile.rst @@ -53,8 +53,8 @@ This is typically used to calculate asymmetry parameters.:: a.diffsum('I+','I-') Of course, these elementary operations might look rather pointless given how easy it is to extract single columns of data and then add them to a -:py:class:`Stoner.Core.Data` object, however if the cahnnels are specified as a **tuple** of two elements, then it is taken as a channel of data and a -second channel of uncertainities. The uncertainity calculation is then propagated correctly for the maths operations. This is particularly useful for the +:py:class:`Stoner.Core.Data` object, however if the channels are specified as a **tuple** of two elements, then it is taken as a channel of data and a +second channel of uncertainties. The uncertainty calculation is then propagated correctly for the maths operations. This is particularly useful for the :py:meth:`AnalysisMixin.diffsum` method where the error propagation is not entirely trivial. .. plot:: samples/channel_math.py @@ -75,7 +75,7 @@ each row belongs in. The :py:meth:`AnalysisMixin.split` method is useful for thi In these examples we assume the :py:class:`AnalysisMixin` has a data column 'Polarisation' that takes two (or more) discrete values and a column 'Temperature' that contains numbers above and below 100. -The first example would return a :py:class:`Stoner.Folders.DataFolder` object containing two separate isntances of :py:class:`AnalysisMixin` which +The first example would return a :py:class:`Stoner.Folders.DataFolder` object containing two separate instances of :py:class:`AnalysisMixin` which would each contain the rows from the original data that had each unique value of the polarisation data. The second example would produce a :py:class:`Stoner.Folders.DataFolder` object containing two :py:class:`AnalysisMixin` objects for the rows with temperature above and below 100. The final example will result in a :py:class:`Stoner.Folders.DataFolder` object that has two groups each of which contains @@ -84,7 +84,7 @@ The final example will result in a :py:class:`Stoner.Folders.DataFolder` object More AnalysisMixin Functions ============================ -Applying an arbitary function through the data +Applying an arbitrary function through the data ---------------------------------------------- :py:meth:`AnalysisMixin.apply`:: @@ -125,7 +125,7 @@ eg the result of the :py:meth:`AnalysisMixin.span` method, or a complete list as float would have the effect of removing all rows where the column didn't equal the float value. This is probably not a good idea... It is worth pointing out that these functions will respect the existing mask on the data unless the bounds parameter is set, -in which case the mask is temproarily discarded in favour of one generated from the bounds expression. This can be worked around, +in which case the mask is temporarily discarded in favour of one generated from the bounds expression. This can be worked around, however, as the parameter passed to the bounds function is itself a masked array and thus one can include a test of the mask in the bounds function:: @@ -161,7 +161,7 @@ error bar on the bin bceomes the quadrature sum of the individual error bars on is the standard error in the y points in the bin and the y value is the simple mean of the data. If **xcol** and/or **ycol** are not specified, then they are looked up from the :py:attr:`Stoner.Core.DataFile.setas` attribute. In this case, the **yerr** -is also taken from this attribute if not specified spearately. +is also taken from this attribute if not specified separately. IF the keyword *clone* is supplied and is False, four 1D numpy arrays are returned, representing the x, y and y-errors for the new bins and the number of points averaged into each bin.. If *clone* is True or not provided, :py:meth:`AnalysisMixin.bin` returns a clone of the current data file with its data @@ -191,9 +191,9 @@ underlying trends. The Stoner package offers a number of approaches to filterin In both these examples, the data to be smoothed is determined from the :py:attr:`Stoner.Core.DataFile.setas` attribute. The first argument is passed to :py:func:`scipy.signal.get_window` to define the window function. The *size* argument - can either be an integer to sepcify the number of rows in the window, or a float to specify the size of the window in + can either be an integer to specify the number of rows in the window, or a float to specify the size of the window in terms of the x data. In the latter case, the data is first reinterpolated to an evenly space set in terms of the x-column - and then smoothed and then reinterpolated back to the original x data co-ordinates. + and then smoothed and then reinterpolated back to the original x data coordinates. .. warning:: @@ -215,12 +215,12 @@ underlying trends. The Stoner package offers a number of approaches to filterin An alternative approach is to use a smoothing spline to fit the data locally. Depending on the spline smoothing setting this will create a function that is continuous in both value and derivative that approaches the data. Unlike Savotzky- - Golay fitlering it cannot be used to calculate a derivative easily, but it can handle y data with uncertainities. It is + Golay filtering it cannot be used to calculate a derivative easily, but it can handle y data with uncertainties. It is implemented as the :py:meth:`AnalysisMixin.spline` method. - Rebinning - As ullustrated above, rebinning the data is a common way fof reducing noise by combining several data points. This is simple + As ullustrated above, rebinning the data is a common way for reducing noise by combining several data points. This is simple and effective, but does reduce the length of the data ! All three approaches are illustrated in the excample below: @@ -258,7 +258,7 @@ possible match to *scan1*. If *xcol* or *ycol* are not given then the default x :py:attr:`Stoner.Core.DataFile.setas` attribute. The *mode* keyword can be used to specify the types of scaling operations that are -to be allowed. Teh defaults are to shoft both x and y by an offset value and to rescale +to be allowed. The defaults are to shoft both x and y by an offset value and to rescale the y data by some factor. If even more control of the transformation is needed, the *func* keyword and *p0* keyword can be used to provide an alternative transformation function and initial guesses to its parameters. The profotype for the transformation @@ -269,7 +269,7 @@ function should be:: return (mapped_x,mapped_y) In addition to changing the X and Y data in the current :py:class:`AnalysisMixin` -instance, two new metadata keys, *Stitching Coefficient* and *Stitching Coeffient Errors*, +instance, two new metadata keys, *Stitching Coefficient* and *Stitching Coefficient Errors*, with the co-efficients used to modify the scan data. Thresholding, Interpolating and Extrapolation of Data @@ -291,11 +291,11 @@ scipy routine :py:func:`scipy.optimize.interp1d`:: a.interpolate(newX,kind='linear', xcol="X-Data") -The new values of X are set from the mandetory first argument. **kind** can be either "linear" or "cubic" whilst the xcol data can be omitted in which case the +The new values of X are set from the mandatory first argument. **kind** can be either "linear" or "cubic" whilst the xcol data can be omitted in which case the :py:attr:`Stoner.Core.DataFile.setas` attribute is used. The method will return a new set of data where all columns are interpolated against the new values of X. The :py:meth:`AnalysisMixin.interpolate` method will return values that are obtained from 'joining the dots' - which is -appropriate if the uncertainities (and hence scatter) in the data is small. With more scatter in the data, it is better to +appropriate if the uncertainties (and hence scatter) in the data is small. With more scatter in the data, it is better to use some locally fitted spline function to interpolate with. The :py:meth:`AnalysisMixin.spline` function can be used for this.:: d.spline("X-Data","Y-Data",header="Spline Data",order=3,smoothing=2.0,replace=True) @@ -305,18 +305,18 @@ use some locally fitted spline function to interpolate with. The :py:meth:`Analy The *order* keyword gives the polynomial order of the spline function being fitted. The *smoothing* factor determines how closely the spline follows the data points, with a *smoothing*=0.0 being a strict interpolation. The *repalce* argument -controls what the return value from the :py:meth:`AnalysisMixin.spline` method reutrns. IF *replace* is True or a column +controls what the return value from the :py:meth:`AnalysisMixin.spline` method returns. IF *replace* is True or a column index, then the new data is added as a column of the Data, possibly replacing the current y-data. If *replace* is False, then the new y-data is returned, but the existing data is unmodified. Finally, if *replace* is None, then the :py:meth:`AnalysisMixin.spline` method returns a :py:class:`scipy.interpolate.UnivararateSpline` object that can be used to -evaluate the spline at arbitary locations, including extrapolating outside the range of the original x data. +evaluate the spline at arbitrary locations, including extrapolating outside the range of the original x data. Extrapolation is, of course, a dangerous, operation when applied to data as it is essentially 'inventing' new data. Extrapolating fromt he spline function, whislt possible, is a little tricky and in many cases the :py:meth:`AnalysisMixin.extrapolate` method is likely to be more successful. :py:meth:`AbnalyseFile.extrapolate` works by fitting a function over a window in the data and using the fit function to predict nearby values. Where the new values lie within the range of data, this is strictly a form of interpolation and the window of data fitted to the extrpolation function is centred around the new x-data point. As -the new x-data point approaches and passes the limits of the exisiting data, the window used to provide the fit function +the new x-data point approaches and passes the limits of the existing data, the window used to provide the fit function runs up to and stops at the limit of the supplied data. Thus when extrapolating, the new values of data are increasingly less certain as one moves further from the end of the data. @@ -325,8 +325,8 @@ as one moves further from the end of the data. :outname: extrapolate -Extrapolation is of course most succesful if one has a physical model that should describe the data. -To allow for this, you can pass an arbitary fitting function as the *kind* parameter. +Extrapolation is of course most successful if one has a physical model that should describe the data. +To allow for this, you can pass an arbitrary fitting function as the *kind* parameter. Whilst interpolation will tell you the @@ -339,11 +339,11 @@ Whilst not the most sophisticated algorithm it is reasonably easy to implement a SG_Filter(col=("X"."Y"), points=15, poly=1, order=0,result=None, replace=False, header=None) -If col is a tuple then it is taken to be specifing both x and y data column indices. Otherwise the data -indexed by col is differentiated with repsect to the row. *order* specifies the order of differentiation, where 0 +If col is a tuple then it is taken to be specifying both x and y data column indices. Otherwise the data +indexed by col is differentiated with respect to the row. *order* specifies the order of differentiation, where 0 means simply smoothing the data. The algorithm works by locally fitting a polynomial over a certain window of points. The parameters for this fitting are controlled by the *points* and *poly* parameters. *points*>*poly*>*order* for -the algorithm to work. *resul;t*, *replace* and *header* specify that the calculated data should also be added to +the algorithm to work. *result;t*, *replace* and *header* specify that the calculated data should also be added to the :py:class:`AnalysisMixin` instance, optionally replacing an existing column indexed by *result* and given a new header *header*. The nature of the local fitting means that the first and last *poly*/2 points are not valid. @@ -366,8 +366,8 @@ The algorithm used is to differentiate the data with a Savitsky-Golay filter - w Zero crossing values in the derivative are located and then the second derivative is found for these points and are used to identify peaks and troughs in the data. The *width* and *poly* keywords are used to control the order of polynomial and the width of the window used for calculating the derivative - a lower order of polynomial and wider width will make the algroithm less sensitive to narrow peaks. -The *significance* parameter controls which peaks and troughs are returned. If *signficance* is a float, then only peaks and troughs whose -second derivatives are larger than *significance* are returned. If *significance* is an integer, then maxmium snd derivative in the data is divided +The *significance* parameter controls which peaks and troughs are returned. If *significance* is a float, then only peaks and troughs whose +second derivatives are larger than *significance* are returned. If *significance* is an integer, then maximum snd derivative in the data is divided by the supplied significance and used as the threshold on which peaks and troughs to return. If *significance* is not provided then a value of 20 is used. Finally, if *sort* is True, the peaks are returned in order of their significance. *peaks* and *troughs* select whether to return peaks or troughs. diff --git a/doc/UserGuide/cookbook.rst b/doc/UserGuide/cookbook.rst index 6d7c0688b..ddd97f25a 100755 --- a/doc/UserGuide/cookbook.rst +++ b/doc/UserGuide/cookbook.rst @@ -58,13 +58,13 @@ and any offset in H (e.g. due to trapped flux in the magnetometer). The latter o also remove the effect of any eexhange bias that moves the coercive field. As well as performing the corrections, the code will add metadata items for: - * Background susceptibility (from fitting striaght lines to the out part of the data) + * Background susceptibility (from fitting straight lines to the out part of the data) * Saturation magnetisation and uncertainty (also from fitting lines to the out part of the data) * Coervice Fields (H for zero M) * Remenance (M for zero H) * Saturation Fields (H where M deviates by the standard error from saturation) * Maximum BH product (the point where -H * M is maximum) - * Loop Area (from integrating the area inside the hysteresis loop - only valide for complete loops) + * Loop Area (from integrating the area inside the hysteresis loop - only valid for complete loops) Some of these parameters are determined by fitting a straight line to the outer portions of the data (i.e. at the extrema in H). The keyword parameter *saturation_fraction* controls the extent of the data assumed to be saturated. @@ -74,7 +74,7 @@ Formatting Error Values ----------------------- In experimental physics, the usual practice (unless one has good reason to do otherwise) is to quote uncertainties in -a measurement to one signficant figure, and then quote the value to the same number of decimal places. Whilst doing this +a measurement to one significant figure, and then quote the value to the same number of decimal places. Whilst doing this might sound simple, actually doing it seems something that many students find difficult. To hep with this task, the :py:mod:`Stoner.Util` module provides the :py:func:`Stoner.Util.format_error` function.:: @@ -124,7 +124,7 @@ For example: :outname: curvefit_sphere_2 -Other Recipies +Other Recipes ============== Extract X-Y(Z) from X-Y-Z data @@ -206,7 +206,7 @@ to quickly examine the output data:: import Stoner.plot p=SP.PlotFile('my_simulation.ovf') p.setas="xyzuvw" - p=p.section(z=10.5) # Take a slice inthe xy plane where z is 10.5 nm + p=p.section(z=10.5) # Take a slice in the xy plane where z is 10.5 nm p.plot() # A 3D plot with cones p.setas="xy.uvw" p.plot() # a 2D colour wheel plot with triangular glyphs showing vector direction. diff --git a/doc/UserGuide/credits.rst b/doc/UserGuide/credits.rst index 26a4ad05d..86dfde6a7 100644 --- a/doc/UserGuide/credits.rst +++ b/doc/UserGuide/credits.rst @@ -5,7 +5,7 @@ Contributors and Credits A number of current and former members of the Condensed Matter Physics Group at the University of Leeds have contributed to this code in various forms. -The original versions were developped by Dr Matthew Newman and Dr Chris Allen. +The original versions were developed by Dr Matthew Newman and Dr Chris Allen. Dr Rowan Temple, Dr Sophie Morely and Dr Joe Batley have all contributed file formats, plot styles. They, and Dr May Wheeler, Dr Philippa Shepley and Nathan Satchell have found bugs and made helpful suggestions. diff --git a/doc/UserGuide/curve_fitting.rst b/doc/UserGuide/curve_fitting.rst index df6ede315..a2aced5c7 100755 --- a/doc/UserGuide/curve_fitting.rst +++ b/doc/UserGuide/curve_fitting.rst @@ -1,7 +1,7 @@ .. _curve_fit_guide: *********************************** -Curve Fitting in the Stoner Pacakge +Curve Fitting in the Stoner Package *********************************** .. currentmodule:: Stoner @@ -20,19 +20,19 @@ In order of increasing complexity, the Stoner package supports the following: - `Simple polynomial fits`_ - If the model is simply a polynomial function and there are no uncerainties in the data and no constraints on the parameters, then this + If the model is simply a polynomial function and there are no uncertainties in the data and no constraints on the parameters, then this is the simplest and easiest to use. This makes use of the :py:meth:`Data.polyfit` method. - `Simple function fitting`_ - If you need to fit to an arbitary function, have no contraints on the values of the fitting parameters, and have uncertainities in the *y* - co-ordinates but not in the *x*, then the simple function fitting is probably the best option. The Stoner package provides a wrapper around + If you need to fit to an arbitrary function, have no constraints on the values of the fitting parameters, and have uncertainties in the *y* + coordinates but not in the *x*, then the simple function fitting is probably the best option. The Stoner package provides a wrapper around the standard :py:func:`scipy.optimize.curve_fit` function in the form of the :py:meth:`Data.curve_vit` method. - `Fitting with limits`_ - If your problem has constrained parameters - that is there are physical reasons why the paramters in your model cannot take certain values, - the you probably want to use the :py:meth:`Data.lmfit` method. This works well when your data has uncertainities in the *y* values but + If your problem has constrained parameters - that is there are physical reasons why the parameters in your model cannot take certain values, + the you probably want to use the :py:meth:`Data.lmfit` method. This works well when your data has uncertainties in the *y* values but not in *x*. - `Orthogonal distance regression`_ @@ -45,21 +45,21 @@ In order of increasing complexity, the Stoner package supports the following: Differential evolution algorithms attempt to find optimal fits by evaluating a population of possible solutions and then combining those that were scored by some costing function to be the best fits - thereby creating a new population of possible (hopefully better) solutions. In general some level of random fluctuation is permitted to stop the minimizer getting stuck in local minima. These algorithms can be effective when there are a - karge number of parametgers to search or the cost is not a smooth function of the parmaeters and thus cannot be differentiated. The algorithm here + karge number of parametgers to search or the cost is not a smooth function of the parameters and thus cannot be differentiated. The algorithm here uses a standard weighted variance as the cost function - like *lmfit* and *curve_fit* do. Why Use the Stoner Package Fitting Wrappers? -------------------------------------------- -There are a number of advantages to using the Stoner pakcage wrappers around the the vartious fitting algorithms rather than using them as -standalone fitting functios: +There are a number of advantages to using the Stoner package wrappers around the the vartious fitting algorithms rather than using them as +standalone fitting functions: - #. They provide a consisten way of defining the model to be fitted. All of the Stoner package functions accept a model function of the form: + #. They provide a consistent way of defining the model to be fitted. All of the Stoner package functions accept a model function of the form: f(x,p1,p2,p3), constructing the necessary intrermediate model class as necessary - similatly they can all take an :py:class:`lmfit.model.Model` class or instance and adapt that as necessary. - #. They provide a consisten parameter order and keyword argument names as far as possible within the limits of the underlying algorithms. + #. They provide a consistent parameter order and keyword argument names as far as possible within the limits of the underlying algorithms. Gerneally these follow the :py:func:`scipy.optimize.curve_fit` conventions. - #. They make use of the :py:attr:`Data.setas` attribute to identify data columns containing *x*, *y* and associated uncertainities. They + #. They make use of the :py:attr:`Data.setas` attribute to identify data columns containing *x*, *y* and associated uncertainties. They also probvide a common way to select a subset of data to use for the fitting through the *bounds* keyword argument. #. They provide a consistent way to add the best fit data as a column(s) to the :py:class:`Data` object and to stpore the best-fit parameters in the metadata for retrieval later. Since this is done in a consistent fashion, the package also can probide a @@ -86,16 +86,16 @@ method returns a list of coefficients with the highest power first. If If *result* is specified then a new column with the header given by the *result* parameter will be created and the fitted polynomial evaluated at each point. -Fitting Arbitary Functions +Fitting Arbitrary Functions ========================== Common features of the Function Fitting Methods ----------------------------------------------- -he output of the three methods used to fit arbitary functions depend on the keyword parameters *output*, *result* *replace* and *header* in the method call. +he output of the three methods used to fit arbitrary functions depend on the keyword parameters *output*, *result* *replace* and *header* in the method call. - *output="fit"* - The optimal parameters and a variance-covariance matrix are retured + The optimal parameters and a variance-covariance matrix are returned - *output="row"* A 1D numpy array of optimal values and standard errors interleaved (i.e. p_opt[0],p_error[0],p_opt[1],p_error[1]....) us returned. This is useful when analysing a large number of similar data sets in order to build a table of fitting results. @@ -104,7 +104,7 @@ he output of the three methods used to fit arbitary functions depend on the key - *output="data"* A copy for the data file itself is returned - this is most useful when used in conjunction with :py:class:`Stoner.DataFolder` - *oputput="full"* - As much infromation about the fit as can be extracted from the fitting algorithm is returned. + As much information about the fit as can be extracted from the fitting algorithm is returned. If *result* is not None, then the best fit data points are calculated and also the fitting parameters, errors and :math:`\chi^2` value is calculated and added to the metadata of the :py:class:`Data` object. To distinguish between multiple fits, a *prefix* keyword can be given, otherwise @@ -127,7 +127,7 @@ for adding appropriately formatted details of the fit to the plot (in this case :outname: curve_fit_line The *bounds* function can be used to restrict the fitting to only a subset of the rows -of data. Any callable object which will take a float and an array of floats, representing the one x-value and one complete row and rturn +of data. Any callable object which will take a float and an array of floats, representing the one x-value and one complete row and return True if the row is to be included in the fit and False if not. e.g.:: def bounds_func(x,row): @@ -150,7 +150,7 @@ the same name.:: The first parameter is the fitting function. This should have prototype ``y=func(x,p[0],p[1],p[2]...)``: where *p* is a list of fitting parameters. -Alternatively a subclass of, or instance of, a :py:class:`lmfit.model.Model` can also be passed and it's function will be used to provide infromation to +Alternatively a subclass of, or instance of, a :py:class:`lmfit.model.Model` can also be passed and it's function will be used to provide information to :py:meth:`Data.curve_fit`. The *p0* parameter contains the initial guesses at the fitting @@ -158,11 +158,11 @@ parameters, the default value is 1. *xcol* and *ycol* are the x and y columns to fit. If *xcol* and *ycol* are not given, then the :py:attr:`Data.setas` attrobite is used to determine which columns to fit. -*sigma*, *absolute_sigma* and *scale_covar* determine how the fitting process takes account of uncertainities in the *y* data. -*sigma*, if present, provides the weightings or stadnard deviations for each datapoint and so +*sigma*, *absolute_sigma* and *scale_covar* determine how the fitting process takes account of uncertainties in the *y* data. +*sigma*, if present, provides the weightings or standard deviations for each datapoint and so should also be an array of the same length as the x and y data. If *sigma* is not given and a column is identified int he :py:attr:`Data.setas` attribute as containing *e* values, then that is used instead. -If *absolute_sigma* is given then if this is True, the *sigma* values are interpreted as absolute uncertainities in the data points, +If *absolute_sigma* is given then if this is True, the *sigma* values are interpreted as absolute uncertainties in the data points, if it is False, then they are relative weightings. If *absolute_sigma* is not given, a *scale_covar* parameter will have the same effect, but inverted, so that True equates to relative weights and False to absolute uncertainties. Finally if neither is given then any *sigma* values are assumed to be absolute. @@ -201,8 +201,8 @@ The operation of :py:meth:`Data.lmfit` is very similar to that of :py:meth:`Data print fit.fit_report() print a["Arrehenius:A"],a["Arrehenius:A err"],a["chi^2"],a["nfev"] -In this example we would be fitting an Arrehenius model to data contained inthe 'Temp' and 'Cond' columns. The resulting -fit would be added as an additional colum called fit. In addition, details of the fit are added as metadata to the current :py:class:`Data`. +In this example we would be fitting an Arrehenius model to data contained in the 'Temp' and 'Cond' columns. The resulting +fit would be added as an additional column called fit. In addition, details of the fit are added as metadata to the current :py:class:`Data`. The *model* argument to :py:meth:`Data.lmfit` can be either an instance of the model class, or just the class itself (in which case it will be instantiated as required), or just a bare callable, in which case a model class will be created around it. The latter is approximately equivalent to @@ -211,7 +211,7 @@ a simple call to :py:meth:`Data.curve_fit`. The return value from :py:meth:`Data.lmfit` is controlled by the *output* keyword parameter. By default it is the :py:class:`lmfit.model.ModelFit` instance. This contains all the information about the fit and fitting process. -You can pass the model as a subclass of model, if you don't pass initial values either via the *p0* parameter or as keyword arguements, then the model's +You can pass the model as a subclass of model, if you don't pass initial values either via the *p0* parameter or as keyword arguments, then the model's *guess* method is called (e.g. :py:meth:`Stoner.analysis.fitting.models.thermal.Arrhenius.guess`) to determine parameters fromt he data. For example: .. plot:: samples/lmfit_example.py @@ -222,8 +222,8 @@ Orthogonal distance regression ------------------------------ :py:meth:`Data.curve_fit` and :py:meth:`Data.lmfit` are both essentially based on a Levenberg-Marquardt fitting algorithm which is a non-linear least squares -routine. The essential point is that it seeks to minimize the **vertical** distance between the model and the data points, taking into account the uncertainity -in the vertical poisition (ie. *y* co-ordinate) only. If your data has peaks that may change position and/or uncertanities in the horizontal (*x*) position of +routine. The essential point is that it seeks to minimize the **vertical** distance between the model and the data points, taking into account the uncertainty +in the vertical position (ie. *y* coordinate) only. If your data has peaks that may change position and/or uncertanities in the horizontal (*x*) position of the data points, you may be better off using an orthogonal distance regression. The :py:meth:`Data.odr` method wraps the :py:mod:`scipy.odr` module and tries to make it function as much like :py:mod:`lmfit` as possible. In fact, in most @@ -233,8 +233,8 @@ cases it can be used as a drop-in replacement: :include-source: :outname: odrfit2 -The :py:meth:`Data.odr` method allows uncertainities in *x* and *y* to be specified via the *sigma_x* and *sigma_y* parameters. If either are not specified, and -a *sigma* parameter is given, then that is used instead. If they are either explictly set to **None** or not given, then the :py:attr:`Data.setas` attribute is +The :py:meth:`Data.odr` method allows uncertainties in *x* and *y* to be specified via the *sigma_x* and *sigma_y* parameters. If either are not specified, and +a *sigma* parameter is given, then that is used instead. If they are either explicitly set to **None** or not given, then the :py:attr:`Data.setas` attribute is used instead. Differential Evolution Algorithm @@ -242,17 +242,17 @@ Differential Evolution Algorithm When the number of parameters gets large it can get increasingly difficult to get fits using the techniques above. In these situations, the differential evolution approach may be valuable. The :py:meth:`Stoner.Data.differential_evolution` method provides a wrapper around the :py:func:`scipi.optimize.differential_evolution` -minimizer with the advantage that the model sepcification, and calling signatures are essentially the same as for the other fitting functions and thus there is +minimizer with the advantage that the model specification, and calling signatures are essentially the same as for the other fitting functions and thus there is little programmer overhead to switching to it: .. plot:: samples/differential_evolution_simple.py :include-source: :outname: diffev2 -Intrinsically, the differential evolution algorithm does not caculate a variance-covariance matrix since it never needs to find the gradient of the :math:`\chi^2` +Intrinsically, the differential evolution algorithm does not calculate a variance-covariance matrix since it never needs to find the gradient of the :math:`\chi^2` of the fit with the parameters. In order to provide such an estimate, the :py:meth:`Stroner.Data.differential_evolution` method carries out a standard least-squares non-linear fit (using :py:func:`scipy.optimize.curve_fit`) as a second stage once :py:func:`scipy.optimize.differential_evolution` has foind a likely goot fitting -set of parameters. This hybrid approach allows a good fit localtion to be identified, but also the physically useful fitting errors to be estimated. +set of parameters. This hybrid approach allows a good fit location to be identified, but also the physically useful fitting errors to be estimated. Included Fitting Models @@ -264,7 +264,7 @@ The :py:mod:`Stoner.analysis.fitting.models` module provides a number of standar Elementary Models ----------------- -Amongst the incluided models are very generic model functions (in :py:mod:`Stoner.analysis.fitting.models.generic`) including: +Amongst the included models are very generic model functions (in :py:mod:`Stoner.analysis.fitting.models.generic`) including: .. currentmodule:: Stoner.analysis.fitting.models.generic - :py:class:`Linear` - straight line fit :math:`y=mx+c` @@ -299,7 +299,7 @@ We also have a number of models for electron tunnelling processes built into the is comparable to the junction bias. - :py:class:`BDR` - this model introduces a trapezoidal barrier where the barrier height is different between the two electrondes - e.g. where the electrodes are composed of different materials. - - :py:class:`FowlerNordheim` - this is another simplified model of electron tunneling that has a single barrier heigt and width parameters. + - :py:class:`FowlerNordheim` - this is another simplified model of electron tunneling that has a single barrier height and width parameters. - :py:class:`Stoner.Fit.TersoffHammann` - this model just treats tunneling as a linear I-V process and is applicable when the barrier height is large compared to the bias across the tunnel barrier. @@ -309,11 +309,11 @@ Magnetism Related Models The :py:mod:`Stoner.analysis.fitting.models.magnetism` includes models related to magnetism and magnetic materials. .. currentmodule:: Stoner.analysis.fitting.models.magnetism - - :py:class:`Langevin` model is used to describe the magnetic momement versus field of a paramagnet. + - :py:class:`Langevin` model is used to describe the magnetic moment versus field of a paramagnet. - :py:class:`KittelEquation` and :py:class:`Stoner.Fit.Inverse_Kittel` - the Kittel equation is used to described the magnetic field and frequency - reponse of the ferromagnetic resonance peak. + response of the ferromagnetic resonance peak. - :py:class:`KittelEquation` and :py:class:`Inverse_Kittel` - the Kittel equation is used to described the magnetic field and frequency - reponse of the ferromagnetic resonance peak. + response of the ferromagnetic resonance peak. Peak Models @@ -322,7 +322,7 @@ Peak Models The :py:mod:`lmfit` package comes with several common peak function models built in which can be used firectly. The Stoner package adds a coouple more to the selection - these are particularly useful for fitting ferromagnetic resonance data: - - :py:class:`Lorentzian_diff` - the :py:mod:`lmfit` module incluides built in classes for Lorentzian peaks - but this model is the differential + - :py:class:`Lorentzian_diff` - the :py:mod:`lmfit` module includes built in classes for Lorentzian peaks - but this model is the differential of a Lorentzian peak. - :py:class:`FMR_Power` - although this model is usually used specifically for calculating the absorption spectrum for a Ferromagnetic Resonance process, it is in fact a generic combination of both Lorentzian peak and differential forms. @@ -372,7 +372,7 @@ to guess parameter values:: def model_guess(y_data,x=x_data): return [param1_guess,param2_guess] -In the same vein, the class provides a decorator to use a function to generate hints about the parameter, such as bouding values:: +In the same vein, the class provides a decorator to use a function to generate hints about the parameter, such as bounding values:: @model_func.hinter def model_parameter_hints(): @@ -397,7 +397,7 @@ points in x,y,z space are fitted to a plane. :include-source: :outname: curvefit_plane -Finally, by you can sepcify the *y-data* to fit to as a numpy array. This can be used to fit functions that +Finally, by you can specify the *y-data* to fit to as a numpy array. This can be used to fit functions that don't themseleves return values that can be matched up to existing data. An example of doing this is fitting a sphere to a set of :math:`(x,y,z)` data points. In this case the fitting parameters are :math:`(x_0,y_0,z_0)` for the centre of the sphere, :math:`r` for the radius and the fitting equation is :math:`(x-x_0)^2+(y-y_0)^2+(z-z_0)^2-r^2=0` and so we pass an array @@ -413,7 +413,7 @@ See also :ref:`Fitting_tricks` Non-linear curve fitting with initialisation file ------------------------------------------------- -For writing general purpose fitting codes, it can be useful to drive the fitting code from a separate intialisation file so that users do not have to +For writing general purpose fitting codes, it can be useful to drive the fitting code from a separate initialisation file so that users do not have to edit the source code. :py:meth:`Data.lmfit` and :py:meth:`Data.odr` combined with :py:mod:`Stoner.Fit` provide some mechanisms to enable this. Firstly, the initialisation file should take the form like so. diff --git a/doc/UserGuide/datafile.rst b/doc/UserGuide/datafile.rst index d98cf6d35..2e134805c 100755 --- a/doc/UserGuide/datafile.rst +++ b/doc/UserGuide/datafile.rst @@ -22,12 +22,12 @@ class which in turn is a superset of many of the classes in the Stoner package. detect the format of many of the measurement files that we use in our research. The native file format for the Stoner package is known as the *TDI 1.5* format - a tab delimited text file -that stores arbitary metadata and a single 2D data set. It closely matches the :py:class:`DataFile` class of the +that stores arbitrary metadata and a single 2D data set. It closely matches the :py:class:`DataFile` class of the :py:mod:`Stoner.Core` module. .. note:: :py:class:`Data` will also read a related text format where the first column of the first line contains the string - *TDI Fromat=Text 1.0* which are produced by some of the LabVIEW rigs used by the Device Materials Group in + *TDI Format=Text 1.0* which are produced by some of the LabVIEW rigs used by the Device Materials Group in Cambridge. @@ -64,7 +64,7 @@ Base Classes and Generic Formats :py:class:`Stoner.formats.simulations.OVFFile` OVF files are output by a variety of micomagnetics simulators. The standard was designed for the OOMMF code. This class will handle rectangualr mesh files with text or binary formats, versions 1.0 and 2.0 -Classes for Specifc Instruments (Mainly ones owned by the CM Physics Group in Leeds) +Classes for Specific Instruments (Mainly ones owned by the CM Physics Group in Leeds) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :py:class:`Stoner.formats.instruments.VSMFile` @@ -295,7 +295,7 @@ so that 'e' data is the error in the y data). :py:class:`DataFile` has a concept will then use these by default in many methods when appropriate to have 'x' and 'y' data. In addition to identifying columns as 'x','y', or 'z', for data that describes a vector field, you can mark the columns as containing -'u', 'v', 'w' data where (u,v,w) is the vector value at the point (x,y,z). There's no support at present for uncertainities in (u,v,w) being marked. +'u', 'v', 'w' data where (u,v,w) is the vector value at the point (x,y,z). There's no support at present for uncertainties in (u,v,w) being marked. .. image:: https://i.imgur.com/vwBUO25.png :target: https://www.youtube.com/watch?v=LbSIqxTD9Xc @@ -406,7 +406,7 @@ Number of Columns Assignments Swapping and Rotating Column Assignments ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Finally, if the :py:attr:`DataFile.setas` attribute has been set with *x*, *y* (and *z*) columns then these assingments can be +Finally, if the :py:attr:`DataFile.setas` attribute has been set with *x*, *y* (and *z*) columns then these assignments can be swapped around by the **invert** operator **~**. This either swaps *x* and *y* with eir associated errorbars for 2-D datasets, or rotates *x* to *y*, *y* to *z* and *z* to *x* )again with their associated errors bars.:: @@ -534,10 +534,10 @@ rows, or directly pull out the last fews rows in the file. Special Magic When Working with Subsets of Data ----------------------------------------------- -As mentioned above, the data in a :py:class:`DataFile` is a sepcial siubclass of numpy's Masked Array - :py:class:`DataArray`. +As mentioned above, the data in a :py:class:`DataFile` is a special siubclass of numpy's Masked Array - :py:class:`DataArray`. A DataArray understands that columns can have names and can be assigned to hold specific types of data - x,y,z values etc. In fact, the logic used for the column names and setas attribute in a :py:class:`DataFile` is actually supplied by the -:py:class:`DataArray`. Whn you index a DataFile or it's data, the resulting data remembers it's column names and assignments +:py:class:`DataArray`. When you index a DataFile or it's data, the resulting data remembers it's column names and assignments and these can be used directly:: r=d[1:4] @@ -551,7 +551,7 @@ the *i* attribute.:: r.i # 10 r.column_headers -You can reset the row numbers by assiging a value to the *i* attribute. +You can reset the row numbers by assigning a value to the *i* attribute. A single column of data also gains a *.name* attribute that matches its column_header:: @@ -738,7 +738,7 @@ To select which rows of data have been masked off, use the :py:meth:`DataFile.fi With just a single argument, the filter method takes a complete row at a time and passes it to the first argument, expecting to get a boolean response (or list olf booleans equal in length -to the number of columns). With a second argument as in the second example, you can sepcify which +to the number of columns). With a second argument as in the second example, you can specify which columns are passed to the filtering function in what order. The second argument must be a list of things which can be used to index a column (ie strings, integers, regular expressions). @@ -756,7 +756,7 @@ The general form is to provide keyword arguments that are something that can be underscore, followed by and operator. Where more than one keyword argument is supplied, the results of testing each row are logically ORed. The result of chaining together two separate calls to select will, however, logically AND the two tests. So, in the examples above, the, first line will assume an implicit equality test and give only those rows with a column *Temp* equal to 250. The second line gives an -explicit greather than or equal to test for the same column. The third line will select first those rows that have column T1 less than 4.2 *or* +explicit greater than or equal to test for the same column. The third line will select first those rows that have column T1 less than 4.2 *or* column T2 less than 5 and then from those select those rows which have a column Res between 100 and 200. The full list of operators is given in :py:meth:`Stoner.Folders.baseFolder.select`. @@ -771,7 +771,7 @@ points. For this case the :py:,eth:`DataFile.section` method can be used:: slab=d.section(x=5.2) line=d.section(x=4.7,z=2) thick_slab=d.section(z=(5.0,6.0)) - arbitary=d.section(r=lambda x,y,z:3*x-2*y+z-4==0) + arbitrary=d.section(r=lambda x,y,z:3*x-2*y+z-4==0) After the x, y, z data columns are identified, the :py:meth:`DataFile.section` method works with 'x', 'y' and 'z' keyword arguments which ar then used to search for matching data rows (the arguments to @@ -809,7 +809,7 @@ One of the characteristics of Python that can confuse those used to other programming languages is that assignments and argument passing is by reference and not by value. This can lead to unexpected results as you can end up modifying variables you were not expecting ! To help with creating genuine copies of data Python provides the copy module. -Whilst this works with DataFile objects, for convenience, the :py:attr:`DataFile.clone` atribute is +Whilst this works with DataFile objects, for convenience, the :py:attr:`DataFile.clone` attribute is provided to make a deep copy of a DataFile object. .. note:: @@ -863,7 +863,7 @@ arrays to the DataFile object to append additional data.:: In the first example above, we add a single row of data to *d*. This assumes that the number of elements in the array matches the number of columns in the data file. The second example is similar but this time appends a 2 -dimensional numpy array to the data. The third example demonstrates adding data from a dictioary. +dimensional numpy array to the data. The third example demonstrates adding data from a dictionary. In this case the keys of the dictionary are used to determine which column the values are added to. If their columns that don't match one of the dictionary keys, then a ``NaN`` is inserted. If there are keys that don't match columns labels, then new columns are added to the data set, filled with ``NaN``. @@ -896,15 +896,15 @@ When writing to the column attriobutes you must supply a numpy array with the co try to spot and correct if the array needs to be transposed first). If you specify more than one column has a particular type then you should supply a 2D array with the corresponding number of columns of data setting the attribute. -In order to preserve the behaviour that allows you to set the column assingments by setting the attribute to an index type, the +In order to preserve the behaviour that allows you to set the column assignments by setting the attribute to an index type, the :py:class:`DataFile` checks to see if you are setting something that might be a column index or a numpy array. Thus the following also works:: d.x="Temp" # Set the Temp column to be x data d.x=np.linspace(0,300,len(d)) # And set the column to contain a linear array of values from 0 to 300. -You cannot set the p,q, or r attributes like this as they are calculated on the fly from the cartesian co-ordinates. -On the otherhand you can do an efficient conversion to polar co-ordinates with:: +You cannot set the p,q, or r attributes like this as they are calculated on the fly from the cartesian coordinates. +On the otherhand you can do an efficient conversion to polar coordinates with:: d.setas="xyz" (d.x,d.y,d.z)=(d.p,d.q,d.r) @@ -958,7 +958,7 @@ row as a 1D numpy array and any of the keyword, argument pairs passed in a dictionary to the optional *func_args* argument. The :py:meth:`DataFile.add_column` method returns a copy of the DataFile object -itself as well as modifying the object. This is to allow the metod to be chained +itself as well as modifying the object. This is to allow the method to be chained up with other methods for more compact code writing. Deleting Rows of Data @@ -993,13 +993,13 @@ The final two variants above, use a tuple to select the data. The final example use of the *invert* keyword argument to reverse the sense used to selkect tows. In both cases rows are deleted(kept for *invert* = True) if the specified column lies between the maximum and minimum values of the tuple. The test is done inclusively. Any length two iterable object can be used -for specifying the bounds. Fianlly, if you call :py:meth:`DataFile.del_rows` with no arguments at all, then +for specifying the bounds. Finally, if you call :py:meth:`DataFile.del_rows` with no arguments at all, then it removes all rows where at least one column of data is masked out.:: d.filter(lambda r:r[0]>50) # Mask all rows where the first column is greater than 50 d.del_rows() # Then delete them. -For simple caases where the row to be delted can be expressed as an integer or list of integers, +For simple caases where the row to be deleted can be expressed as an integer or list of integers, the subtration operator can be used.:: e=d-2 @@ -1080,5 +1080,5 @@ as a two level index of column headers and column assignments. The Stoner librar *metadata* extension attribute for DataFrames that provides thin sub-class wrapper around the same regular expression based and type hinting dictionary that is used to store metadata in :py:attr:`Stoner.Data.metadata`. -The pandas.DataFrame produced by the :py:meth:`Stoner.Data.to_pandas` method is reversibly convertable back to an identical +The pandas.DataFrame produced by the :py:meth:`Stoner.Data.to_pandas` method is reversibly convertible back to an identical :py:class:`Stoner.Data` object by passing the DataFrame into the constructor of the :py:class:`Stoner.Data` object. diff --git a/doc/UserGuide/datafolder.rst b/doc/UserGuide/datafolder.rst index ff53e1594..a6192cb23 100755 --- a/doc/UserGuide/datafolder.rst +++ b/doc/UserGuide/datafolder.rst @@ -14,7 +14,7 @@ Like :py:class:`Stoner.Core.Data`, :py:class:`Stoner.Folders.DataFolder` is expo :py:class:`Stoner.Image.ImageFolder` is exported from the :py:mod:`Stoner.Image` sub-paclkage. :py:class:`DataFolder` and it's friends are essentially containers for :py:class:`Stoner.Data` (or similar classes from the -:py:mod:`Stoner.Image` package) and for other instances of :py:class:`DataFolder` to alow a nested heirarchy to be built up. +:py:mod:`Stoner.Image` package) and for other instances of :py:class:`DataFolder` to allow a nested hierarchy to be built up. The :py:class:`DataFolder` supports both sequence-like and mapping-like interfaces to both the :py:class:`Stoner.Core.Data` objects and the 'sub'-:py:class:`DataFolder` objects (meaning that they work like both a list or a dictionary). :py:class:`DataFolder` is also lazy about loading files from disc - if an operation doesn't need to load a file it generally won't bother to keep memory usage @@ -124,7 +124,7 @@ More Goodies for :py:class:`DataFolder` s ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Since a :py:class:`Stoner.Data` represents data in named columns, the :py:class:`DataFolder` offers a couple of additional options for actions to take -when reading the files in from disk. It is possible to have the mean and satandard deviation of each column of data to be calculated and added as +when reading the files in from disk. It is possible to have the mean and standard deviation of each column of data to be calculated and added as metadata as each file is loaded. The *read_means* boolean parameter can enable this. Other Options @@ -140,15 +140,15 @@ for example, can allow one to set the default :py:attr:`Stoner.Data.setas` attri A particularly useful parameter to set in the DataFolder constructor is the *setas* parameter - this will ensure that the Lpy:attr:`Stoner.Data.setas` attribute is set to identify columns of data as x, y etc. as the data files are loaded into the folder - thus allowing subsequent calls to - :py:class:`Stoner.Data` methods to run without needing to explictly set the columns each time. + :py:class:`Stoner.Data` methods to run without needing to explicitly set the columns each time. All of these keywords to the constructor will set corresponding attributes on the created :py:class:`DataFolder`, so it is possible to redo the -process of reading the list of files from disk by directly manipulating these attrbutes. +process of reading the list of files from disk by directly manipulating these attributes. The current root directory and pattern are set in the *directory* and *pattern* keywords and stored in the similarly named attributes. The :py:meth:`DataFolder.getlist` method can be used to force a new listing of files.:: - f.dirctory='/home/phygbu/Data' + f.directory='/home/phygbu/Data' f.pattern='*.txt' f.getlist() @@ -377,15 +377,15 @@ is the :py:class:`Stoner.Data` to be analysed, then the following syntax can be (my_analysis@f)(arg1,arg2,karg=False) -*(my_analysis@f)* creates the callable object that iterates *my_analysis* over f, the second set of parenthesis above jsut calls this iterating object. +*(my_analysis@f)* creates the callable object that iterates *my_analysis* over f, the second set of parenthesis above just calls this iterating object. If the return value of the function is another instance of :py:class:`Stoner.Data` (or whatever is being stored as the items in the :py:class:`DataFolder`) then it will replace the items inside the :py:class:`DataFolder`. The call to :py:attr:`DataFolder.each` will also return a -simple list of the return values. If the function retuns something else, then you can have it added to the metadata of each item in the +simple list of the return values. If the function returns something else, then you can have it added to the metadata of each item in the :py:class:`DataFolder` by adding a *_return* keyword that can either be True to use the function name as the metadata name or a string to specify the name of the metadata to store the return value explicitly. -Thus, if your analysis function calcualtes some parameter that you want to call *beta* you might use the following:: +Thus, if your analysis function calculates some parameter that you want to call *beta* you might use the following:: f=DataFolder(",",pattern="*.txt") f.each(my_analysis,arg1,arg2,karg=False,_return="beta") @@ -426,7 +426,7 @@ Writing to the contents of the :py:attr:`DataFolder.metadata` will simple set th f.metadata["test"] >>> array([12.56, 12.56, 12.56, 12.56]) -The :py:meth:`combined_metadata_proxy.slice" method procides more control over how the metadata stored in the data folder can be returned.:: +The :py:meth:`combined_metadata_proxy.slice" method provides more control over how the metadata stored in the data folder can be returned.:: f.metadata.slice("Startupaxis-X") >>> [{'Startupaxis-X': 2}, @@ -562,8 +562,8 @@ based on metadata values.:: f.select({"temp:T1":4.2}) The basic pattern of the :py:meth:`DataFolder.select` method is that each keyword argument determines both the name of the metadata to use -as the asis of the selection and also the operation to be performed. The value of the keyword argument is the value use to check. The oepration is -seperated from the column name by a double underscore. +as the asis of the selection and also the operation to be performed. The value of the keyword argument is the value use to check. The operation is +separated from the column name by a double underscore. In the first example, only those files with a metadata value "temperature_T1" which is 4.2 will be selected, here there is no operator specified, so for a single scalar value it is assumed to be ''__eq'' for equals. For a tuple it would be ''__between'' and for a longer list ''__in''. @@ -590,7 +590,7 @@ data curves taken at a variety of temperatures and with three different magnetic f.groups The :py:meth:`DataFolder.group` method splits the files in the :py:class:`DataFolder` into several -groups each of which share a common value of the arguement supplied to the :py:meth:`DataFolder.group` +groups each of which share a common value of the argument supplied to the :py:meth:`DataFolder.group` method. A group is itself another instance of the :py:class:`DataFolder` class. As explained above, each :py:class:`DataFolder` object maintains a dictionary called :py:attr:`DataFolder.groups` whose keys are the distinct values of the argument of the :py:meth:`DataFolder.group` methods and whose values are @@ -648,7 +648,7 @@ to a set of metadata values from each file in the :py:class:`DataFolder`. This c f.extract(["Temperature","Angle","Other_metadata"]) The argument to the :py:meth:`DataFolder.extract` method is a list of metadata values to be extracted from each file. The -metadata should be convertable to an array type so that it can be included in the final result matrix. Any metadata that doesn't +metadata should be convertible to an array type so that it can be included in the final result matrix. Any metadata that doesn't appear to be so convertible in the first file in the ;py:class:`DataFolder` is ignored. The column headings of the final results table are the names of the metadata that were used in the extraction. diff --git a/doc/UserGuide/developer.rst b/doc/UserGuide/developer.rst index 81470affa..62c33e7c0 100644 --- a/doc/UserGuide/developer.rst +++ b/doc/UserGuide/developer.rst @@ -15,17 +15,17 @@ From the end-user perspective, the :py:class:`DataFile` class contains several a - data Although this is essentially a numpy array, it is in fact a subclass of a :py:class:`numpy.ma.MaskedArray`, :py:class:`DataArray`, with an extra attribute :py:attr:`DataArray.setas` that is used to label the columns of data and to track assignments to types of columns (x,y,z etc). As - :py:class:`DataArray` can thus track what columns mean in cartersian co-ordinate systems, it can provide an on-the-fly conversion to polar - co-ordinate sustems. :py:class:`DataArray` also keeps track of the row numbers of the array and can label its rows with the :py:attr:`DataArray.i` + :py:class:`DataArray` can thus track what columns mean in cartersian coordinate systems, it can provide an on-the-fly conversion to polar + coordinate systems. :py:class:`DataArray` also keeps track of the row numbers of the array and can label its rows with the :py:attr:`DataArray.i` attribute. - metadata - In many respects this looks like a standard dictionary, albeit one that sorts its keys in alpahabetical order. Actually it is a subclass of a + In many respects this looks like a standard dictionary, albeit one that sorts its keys in alphabetical order. Actually it is a subclass of a blist.sortedDict that maintains a second dictionary with matching keys that stores information about the data type of the values in the metadata in order to support export to non-duck typed environments. - column_headers - This appears to be a stadnard list of strings, except that it is kept in sync with the size of the numerical data array, adding extra entries or + This appears to be a standard list of strings, except that it is kept in sync with the size of the numerical data array, adding extra entries or losing them as the data array gains or loses columns. Actually this is proxied through to the :py:attr:`DataArray.setas` attribute - setas diff --git a/doc/UserGuide/extras/gb_custom.sty b/doc/UserGuide/extras/gb_custom.sty index 5b8726f5b..bbca3e4f7 100644 --- a/doc/UserGuide/extras/gb_custom.sty +++ b/doc/UserGuide/extras/gb_custom.sty @@ -58,7 +58,7 @@ \newenvironment{multipleguess}[1]{ \begin{samepage} \item #1 -\begin{enumerate}[label={\bf \Alph*},itemsep=-1ex] +\begin{enumerate}[label={\bf \Alpha*},itemsep=-1ex] }{ \end{enumerate} \end{samepage} diff --git a/doc/UserGuide/image.rst b/doc/UserGuide/image.rst index 2ff3a81d5..8ee4c48ef 100755 --- a/doc/UserGuide/image.rst +++ b/doc/UserGuide/image.rst @@ -24,7 +24,7 @@ allowing a sequence of operations to be completed by chaining the method calls t The array of methods available to a :class:`ImageFile` is particularly rich as the class automatically wraps functions from the popular scikit-image package (:py:mod:`skimage`) as well as the :py:mod:`scipy.ndimage` module. In general such functions take an image as their first argument -and :class:`ImageFile` passes its own image data as that argument to whatever funcion is being wrapped. If the function Returns +and :class:`ImageFile` passes its own image data as that argument to whatever function is being wrapped. If the function Returns image data of the same size that the original image, then the wrapper replaces the :class:`ImageFile`'s image data with the new image data. @@ -68,12 +68,12 @@ The key attributes and properties of the :class:`ImageFile` are: - :py:attr:`ImageFile.image`: This is the actual numpy array of data that is the image. - :attr:`ImageFile.metadata`: - This is the dictionary that contains the metadata assoicated with the image. This is normally parameters and + This is the dictionary that contains the metadata associated with the image. This is normally parameters and information about the measurement or data that is encoded within the the measurement file, but can be supplemented by information added by the user directly or by methods and functions. Each item of metadata is normally referred to by a string *key*. - :attr:`ImageFile.filename`: This is the name of the file from which the image was loaded (as a string). It is also used as a default title when - displaying the iamge. + displaying the image. - :attr:`ImageFile.mask`: This represents the mask data for an image file. By default it is entirely False, meaning all parts of the image are visible. It supports a variety of different ways of manipulating which parts of the image are masked - see the section @@ -96,7 +96,7 @@ formats as required. There are a number of other attributes that can tell you information about the :class:`ImageFile` such as: - :attr:`ImageFile.centre`: - the co-ordinates of the centre of the image + the coordinates of the centre of the image - :attr:`ImageFile.aspect`: the image aspect ratio - :attr:`ImageFile.shape`: @@ -108,7 +108,7 @@ copies of the :class:`ImageFile` that are rotate 90 degrees clockwise and counte In many cases the default behaviour of modifying the image data in place may not be desired - to get a copy of the :class:`ImageFile` you use the :attr:`ImageFile.clone` attribute:: - new_im = im.clone.gaussian(4.0) # create a copy of im and then apply a guassian blur + new_im = im.clone.gaussian(4.0) # create a copy of im and then apply a gaussian blur .. image:: figures/kermit-blurred.png @@ -125,7 +125,7 @@ of the original image data.:: .. image:: figures/kermit_flipped.png -Indexing an :class:`ImageFile` with a string will instead acces the metadata stored with the object.:: +Indexing an :class:`ImageFile` with a string will instead access the metadata stored with the object.:: im["Loaded as"] >>> 'KermitPNGFile' @@ -158,22 +158,22 @@ opencv2, but this section will cover some of the commonly used functionality. Specify a box ^^^^^^^^^^^^^ -Many of the functions can be used with a *_box* parameter to limit their operation to a sepcified region of the image. -Additionally the :meth:`ImageFile.crop` method will discard the image outside a region specifed in a similar way. The +Many of the functions can be used with a *_box* parameter to limit their operation to a specified region of the image. +Additionally the :meth:`ImageFile.crop` method will discard the image outside a region specified in a similar way. The working box can be given as follows: - A single integer: - This is interpreted to exclude a regio of n pixels from all sides of the image. + This is interpreted to exclude a region of n pixels from all sides of the image. - A single floating point number between 0.0 and 0.5: This is interpreted to exclude this corresponding fraction of the width and height from each side of the image - A tuple of 4 numbers, or 4 arguments: - This is interpreeted as a sequence of pxiel co-ordinates for (left-x, right-x, top-y, bottom-y). If any of the + This is interpreeted as a sequence of pxiel coordinates for (left-x, right-x, top-y, bottom-y). If any of the numbers are None, then this is take as the minimum or maximum extents of the width or height (depending on whether - the None value substitutes for the left, right top or bottom co-ordinate). + the None value substitutes for the left, right top or bottom coordinate). - A single string: The argument is interpreted as the name of a metadata item that will define the box to be used. - A sigle value False: - This is equivalent to the whole iamge (i.e. to not specify a box) + This is equivalent to the whole image (i.e. to not specify a box) - A single None value: In this case a copy of the image is shown to the user and they are invited to draw the box with the mouse and then press the key to confirm their selection. @@ -184,10 +184,10 @@ Aligning Two Images ^^^^^^^^^^^^^^^^^^^ The :meth:`Stoner.ImageFile.align` method can ve used to align an image to a reference image. It offers a variety of different -algorthims which may be better or worse depending on the nature of the image. The options are: +algorithms which may be better or worse depending on the nature of the image. The options are: - chi2_shift: - this uses the image-registration module to carry out a chi-squared analysis of shifting the two iamges + this uses the image-registration module to carry out a chi-squared analysis of shifting the two images relative to each other. - imreg_dft: this uses the imreg_dft module to carry out the image registration. In essence it takes a fourier transform @@ -196,9 +196,9 @@ algorthims which may be better or worse depending on the nature of the image. Th this is the default method used. It first of all applies a Scharr edge detection filter and uses the imreg_dft method to find the translation vector. - cv2: - this method uses the opencv2 package's alignment algorthim. + this method uses the opencv2 package's alignment algorithm. -Align also takes a *_box* keyword parameter to confine the section of the image used for the alignment to a sepcific region +Align also takes a *_box* keyword parameter to confine the section of the image used for the alignment to a specific region (this can make the operation more efficient if much of the images are featureless), and a *scale* parameter that will upscale the image before attempting to do the alignment. This may improve sub-pixel alignment corrections. @@ -249,7 +249,7 @@ noise and speckle.:: im.gaussian(1.0) -For band-pass filt4ering the scikit-image.fitlers method *difference_of_gaussians* can be used. This filters the image with two +For band-pass filt4ering the scikit-image.filters method *difference_of_gaussians* can be used. This filters the image with two different gaussian blurs and then takes the difference between them - the smaller gaussian blur removes high frequency noise whilst the large gaussian removes low spatial frequency variations.:: @@ -284,7 +284,7 @@ parameter, whilst the output is controlled by the *phase* parameter - False give and None returns the full complex FFT. To aid with analyhsing radial distributions in FFTs (or images), the :meth:`ImageFile.radial_profile` method can be used. -This will compute a prfile from a given centre outwards - either integrating over all angles, or restricting to specific angles. +This will compute a profile from a given centre outwards - either integrating over all angles, or restricting to specific angles. At its simpletst one can just do:: profile=fft.radial_profile() diff --git a/doc/UserGuide/install.rst b/doc/UserGuide/install.rst index 167d4fdf6..5ebce8e2b 100644 --- a/doc/UserGuide/install.rst +++ b/doc/UserGuide/install.rst @@ -5,7 +5,7 @@ Installation of Stoner Package Introduction ============ -This manual provides a user guide and reference for the Stoner python pacakage. +This manual provides a user guide and reference for the Stoner python package. The Stoner python package provides a set of python classes and functions for reading, manipulating and plotting data acquired with the lab equipment in the Condensed Matter Physics Group at the University of Leeds. @@ -114,6 +114,6 @@ representations such as png or html output formats.:: from Stoner import Options Options.short_repr=True -The **options** object supports reading and setting options through attribute assignment. Deleting an Option atribute +The **options** object supports reading and setting options through attribute assignment. Deleting an Option attribute resets the Option back to the corresponding default value. **dir(Options)** will give a list of all possible package options. diff --git a/doc/UserGuide/plotfile.rst b/doc/UserGuide/plotfile.rst index 0bab5e4d8..e9c8c3ff9 100755 --- a/doc/UserGuide/plotfile.rst +++ b/doc/UserGuide/plotfile.rst @@ -18,7 +18,7 @@ Once this is done, the plotting methods will use these to try to make a sensible p.setas="xye" p.plot() -:py:meth:`Data.plot` is simply a wrapper that insepects the available columns and calls either :py:meth:`Data.plot_xy` or +:py:meth:`Data.plot` is simply a wrapper that inspects the available columns and calls either :py:meth:`Data.plot_xy` or :py:meth:`Data.plot_xyz` as appropriate. All keyword arguments to :py:meth:`Data.plot` are passed on to the actual plotting method. Each :py:class:`Data` instance maintains it's own :py:class:`matplotlib.pyplot.Figure` in the :py:attr:`Data.fig` attribute, if no @@ -71,7 +71,7 @@ are a number of useful plotter functions that will work like this * [:py:func:`matplotlib.pyplot.loglog`] Like the semi-log plots, this will produce a log-log plot. * [:py:func:`matplotlib.pyplot.errorbar`] this particularly useful plotting function will draw error bars. The values for the error bars are passed as keyword arguments, *xerr* or *yerr*. In standard matplotlib, these can be numpy arrays or constants. - :py:meth:`Data.plot_xy` extends this by intercepting these arguements and offering some short cuts:: + :py:meth:`Data.plot_xy` extends this by intercepting these arguments and offering some short cuts:: p.plot_xy(x,y,plotter=errorbar,yerr='dResistance',xerr=[5,'dTemp+']) @@ -99,7 +99,7 @@ these on a surface plot or 3D plot. The :py:meth:`Data.plot_xyz` method can be u :outname: colormap By default the :py:meth:`Data.plot_xyz` will produce a 3D surface plot with the z-axis coded with a rainbow colour-map -(specifically, the matplotlib provided *matplotlib.cm.jet* colour-map. This can be overriden with the *cmap* keyword +(specifically, the matplotlib provided *matplotlib.cm.jet* colour-map. This can be overridden with the *cmap* keyword parameter. If a simple 2D surface plot is required, then the *plotter* parameter should be set to a suitable function such as **pyplot.pcolor**. @@ -139,7 +139,7 @@ the y values for each column are found from the column headers interpreted as a to the built in 'jet' theme. The x axis label is set to be the column header for the first column, the y axis label is set either from the meta data item 'ylabel or to 'Y Data'. Likewise the z axis label is set from the corresponding metadata item or defaults to 'Z Data;. In the second form these parameters are all set explicitly. The *xvals* parameter can be -either a column index (integer or sring) or a list, tuple or numpy array. The *yvals* parameter can be either a row number +either a column index (integer or string) or a list, tuple or numpy array. The *yvals* parameter can be either a row number (integer) or list,tuple or numpy array. Other parameters (including *plotter*, *figure* etc) work as for the :py:meth:`Data.plot_xyz` method. The *rectang* parameter is used to select only part of the data array to use as the matrix. It may be 2-tuple in which case it specifies just the origin as (row,column) or a 4-tuple in which case the third and forth @@ -153,10 +153,10 @@ Plotting 3D Scalar Fields A 3D scalar field is a some :math:`F(x,y,z)` where the function returns a scalar value. One option is to plot this as a volumetric plot where a colour is interpolated over a volume. For this purpose, :py:meth:`Data.voxel_plot` is provided. -The function will accept x,y and z column indices for the co-ordinates (or take them from the :py:attr:`Data.setas` +The function will accept x,y and z column indices for the coordinates (or take them from the :py:attr:`Data.setas` attribute) and a fourth, *u* column index (or the column defined as holding the *u* data in the :py:attr:`Data.setas` attribute). Keyword arguments *visible* or $filled$ indicate whether a particular voxel will be plotted or not to -allow a cross section of the 3D co-ordinate space to be show. +allow a cross section of the 3D coordinate space to be show. .. plot:: samples/voxel_plot.py :include-source: @@ -167,7 +167,7 @@ Plotting Vector Fields For these purposes a vector field is a set of points $(x,y,z)$ at which a 3D vector $(u,v,w)$ is defined. Often vector fields are visualised by using quiver plots, where a small arrow points in the direction of the vector and whose length -is proportional to the magnitude. This can also be suplemented by colour information - in one scheme a hue-saturation-luminence +is proportional to the magnitude. This can also be supplemented by colour information - in one scheme a hue-saturation-luminence space is used, where hue described a direction in the x-y plane, the luminence describes the vertical component and the saturation, the relative magnitude. This is a common scheme in micro-magnetics, so is supported in the Stoner package. Following the naming convention above, the :py:meth:`Data.plot_xyzuvw` method handles these plots.:: @@ -184,7 +184,7 @@ Enthought. The import is done when the plot is required to speed loading times f 2D plotting only is required. If the mayavi package is not available, then matplotlib's 3D quiver plot is used as a fall back. The first example above will result in a plot using flat arroiws coloured according to the vector magnitude. The second -examnple will instead color them using the specified column from the data. The third example demonstrates passing in +example will instead color them using the specified column from the data. The third example demonstrates passing in a separate list of colour data. In both of these cases the relative magnitude of the colors data is mapped to a colour map (which can be given via a colormap keyword parameter). @@ -246,7 +246,7 @@ instance. When setting the attribute, lists and tuples will be assumed to contai keyword arguments. If you want to pass a single tuple, list or dictionary, then you should wrap it in a single element tuple. -Particualrly useful attributes include: +Particularly useful attributes include: - :py:attr:`Data.xlabel`, :py:attr:`Data.ylabel` will set the x and y axes labels - :py:attr:`Data.title` will set the plot title @@ -332,9 +332,9 @@ or refer to a file on disc. The template will search for the file (named *