diff --git a/.gitignore b/.gitignore index 61df16ff0..76e2a0455 100755 --- a/.gitignore +++ b/.gitignore @@ -51,7 +51,3 @@ Backup_of_*.cdr #Profuling prof - -#LockFiles -*.dirlock -*.lock diff --git a/README.rst b/README.rst index 40c1ceed9..8f58e3b8f 100755 --- a/README.rst +++ b/README.rst @@ -41,7 +41,8 @@ Getting this Code :alt: Introduction and Installation Guide to Stoner Pythin Package :width: 320 -See the `requirements.txt` file for the current package requirements. +The *Stoner* package requires h5py>=2.7.0, lmfit>=0.9.7, matplotlib>=2.0,numpy>=1.13, Pillow>=4.0, +scikit-image>=0.13.0 & scipy>=1.0.0 and also optional depends on filemagic, npTDMS, imreg_dft and numba, fabio, hyperspy. Ananconda Python (and probably other scientific Python distributions) include nearly all of the dependencies, and the remaining dependencies are collected together in the **phygbu** repositry on anaconda cloud. The easiest way to install the Stoner package is, @@ -50,10 +51,10 @@ therefore, to install the most recent Anaconda Python distribution. Compatibility -------------- -Versions 0.10.x (stable branch) are compatible with Python 3.6-3.9. The current development branch is targetting v3.8-3.9 -with 3.10 added when anaconda support is available in mainstream realease. +Versions 0.9.x (stable branch) are compatible with Python 2.7, 3.5, 3.6 and 3.7. The latest 0.9.6 version is also compatible with Python 3.8 +The current stable verstion (0.10, stable branch) is compatible with Python 3.6-3.9 -Conda packages are built on each github release via a github actions for 64bit Python on Windows, MacOS and Linux. +Conda packages are prepared for the stable branch and when the development branch enters beta testing. Pip wheels are prepared for selected stable releases only. Installation ------------ diff --git a/Stoner/Analysis.py b/Stoner/Analysis.py index 2535c23c4..512a04406 100755 --- a/Stoner/Analysis.py +++ b/Stoner/Analysis.py @@ -1,12 +1,17 @@ """Stoner .Analysis provides a subclass of :class:`.Data` that has extra analysis routines builtin.""" -__all__ = ["AnalysisMixin"] +__all__ = ["AnalysisMixin", "GetAffineTransform", "ApplyAffineTransform"] +from inspect import getfullargspec import numpy as np import numpy.ma as ma from scipy.integrate import cumtrapz +from scipy.optimize import curve_fit from .tools import isiterable, isTuple +from .compat import string_types +from .core.exceptions import assertion +from .analysis.utils import threshold as _threshold, ApplyAffineTransform, GetAffineTransform # from matplotlib.pylab import * #Surely not? diff --git a/Stoner/Core.py b/Stoner/Core.py index 67f0b6e7b..99f54fc6c 100755 --- a/Stoner/Core.py +++ b/Stoner/Core.py @@ -30,7 +30,7 @@ from .compat import string_types, int_types, index_types, _pattern_type, path_types from .tools import all_type, isiterable, isLikeList, get_option, make_Data -from .tools.file import get_file_name_type, auto_load_classes, get_file_type +from .tools.file import get_file_name_type, auto_load_classes from .core.exceptions import StonerLoadError, StonerSetasError from .core import _setas, regexpDict, typeHintedDict, metadataObject @@ -1369,8 +1369,6 @@ def load(cls, *args, **kargs): filename, filetype = get_file_name_type(filename, filetype, DataFile) elif not auto_load and not filetype: raise StonerLoadError("Cannot read data from non-path like filenames !") - else: - filetype = get_file_type(filetype, DataFile) if auto_load: # We're going to try every subclass we canA ret = auto_load_classes(filename, DataFile, debug=False, args=args, kargs=kargs) if not isinstance(ret, DataFile): # autoload returned something that wasn't a data file! diff --git a/Stoner/HDF5.py b/Stoner/HDF5.py index d7ac37af7..1587d8729 100755 --- a/Stoner/HDF5.py +++ b/Stoner/HDF5.py @@ -3,7 +3,7 @@ Classes include * HDF5File - A :py:class:`Stoner.Code.DataFile` subclass that can save and load data from hdf5 files -* HDF5Folder - A :py:class:`Stoner.folders.DataFolder` subclass that can save and load data from a single hdf5 file +* HDF5Folder - A :py:class:`Stoner.Folders.DataFolder` subclass that can save and load data from a single hdf5 file It is only necessary to import this module for the subclasses of :py:class:`Stoner.Core.DataFile` to become available to :py:class:`Stoner.Core.Data`. @@ -375,7 +375,7 @@ class HDF5FolderMixin: """Provides a method to load and save data from a single HDF5 file with groups. - See :py:class:`Stoner.folders.DataFolder` for documentation on constructor. + See :py:class:`Stoner.Folders.DataFolder` for documentation on constructor. Datalayout consistns of sub-groups that are either instances of HDF5Files (i.e. have a type attribute that contains 'HDF5File') or are themsleves HDF5Folder instances (with a type attribute that reads 'HDF5Folder'). diff --git a/Stoner/Image/core.py b/Stoner/Image/core.py index 3d6358d13..0babdc206 100755 --- a/Stoner/Image/core.py +++ b/Stoner/Image/core.py @@ -8,7 +8,7 @@ from io import BytesIO as StreamIO from warnings import warn -from PIL import Image, PngImagePlugin +from PIL import Image import numpy as np import matplotlib.pyplot as plt from scipy import ndimage as ndi @@ -26,6 +26,7 @@ segmentation, transform, ) + from ..core.base import typeHintedDict, metadataObject from ..core.exceptions import StonerLoadError, StonerUnrecognisedFormat from ..Core import DataFile @@ -37,18 +38,12 @@ get_filedialog, int_types, path_types, - str2bytes, ) # Some things to help with Python2 and Python3 compatibility from .attrs import DrawProxy, MaskProxy from .widgets import RegionSelect from . import imagefuncs from ..tools.classes import Options -if "READTHEDOCS" not in os.environ: - from skimage import viewer -else: - from ..tools import null as viewer - IMAGE_FILES = [("Tiff File", "*.tif;*.tiff"), ("PNG files", "*.png", "Numpy Files", "*.npy")] dtype_range = { diff --git a/Stoner/Image/folders.py b/Stoner/Image/folders.py index 29ae53d10..e23631ed4 100755 --- a/Stoner/Image/folders.py +++ b/Stoner/Image/folders.py @@ -13,7 +13,7 @@ from PIL import Image from .core import ImageArray -from ..folders import DiskBasedFolderMixin, baseFolder +from ..Folders import DiskBasedFolderMixin, baseFolder from ..compat import string_types, int_types from . import ImageFile @@ -366,7 +366,7 @@ def montage(self, *args, **kargs): else: j += 1 fig = figure(fignum) - ax = fig.add_subplot(plt_y, plt_x, j) + ax = subplot(plt_y, plt_x, j) plt_kargs["figure"] = fig plt_kargs["ax"] = ax if "title" in kargs: diff --git a/Stoner/Image/stack.py b/Stoner/Image/stack.py index 7578c7da7..8a54381e8 100755 --- a/Stoner/Image/stack.py +++ b/Stoner/Image/stack.py @@ -9,7 +9,7 @@ from ..core.exceptions import assertion from ..Core import regexpDict, typeHintedDict -from ..folders import DiskBasedFolderMixin, baseFolder +from ..Folders import DiskBasedFolderMixin, baseFolder from .core import ImageArray, ImageFile from .folders import ImageFolder, ImageFolderMixin diff --git a/Stoner/Util.py b/Stoner/Util.py index 819acde00..c9a789bed 100755 --- a/Stoner/Util.py +++ b/Stoner/Util.py @@ -171,8 +171,8 @@ def split_up_down(data, col=None, folder=None): object containign the data to be sorted col (index): is something that :py:meth:`Stoner.Core.DataFile.find_col` can use - folder (:py:class:`Stoner.folders.DataFolder` or None): - if this is an instance of :py:class:`Stoner.folders.DataFolder` then add + folder (:py:class:`Stoner.Folders.DataFolder` or None): + if this is an instance of :py:class:`Stoner.Folders.DataFolder` then add rising and falling files to groups of this DataFolder, otherwise create a new one Returns: diff --git a/Stoner/Zip.py b/Stoner/Zip.py index 2f6a15853..82181e025 100755 --- a/Stoner/Zip.py +++ b/Stoner/Zip.py @@ -4,7 +4,7 @@ Classes Include * ZippedFile - A :py:class:`Stoner.Code.DataFile` subclass that can save and load data from a zip files -* ZipFolder - A :py:class:`Stoner.folders.DataFolder` subclass that can save and load data from a single zip file +* ZipFolder - A :py:class:`Stoner.Folders.DataFolder` subclass that can save and load data from a single zip file """ __all__ = ["test_is_zip", "ZippedFile", "ZipFolderMixin", "ZipFolder"] import zipfile as zf @@ -14,7 +14,7 @@ from .compat import string_types, str2bytes, get_filedialog, _pattern_type, path_types from .Core import DataFile, StonerLoadError -from .folders import DiskBasedFolderMixin +from .Folders import DiskBasedFolderMixin from .folders.core import baseFolder from .folders.utils import pathjoin from .tools import copy_into, make_Data @@ -231,7 +231,7 @@ class ZipFolderMixin: """Provides methods to load and save data from a single Zip file. - See :py:class:`Stoner.folders.DataFolder` for documentation on constructor. + See :py:class:`Stoner.Folders.DataFolder` for documentation on constructor. Note: As this mixin class provides both read and write storage, it cannot be mixed in with another class that @@ -492,7 +492,7 @@ def _save(self, f, trail): :py:meth:`Stoner.Zip.ZipFile.save` This routine is used by a walk_groups call - hence the prototype matches that required for - :py:meth:`Stoner.folders.DataFolder.walk_groups`. + :py:meth:`Stoner.Folders.DataFolder.walk_groups`. """ if not isinstance(f, DataFile): diff --git a/Stoner/__init__.py b/Stoner/__init__.py index 958f6f698..0d67223b5 100755 --- a/Stoner/__init__.py +++ b/Stoner/__init__.py @@ -27,7 +27,7 @@ from . import core, analysis, formats, plot, tools, Image from .core.data import Data -from .folders import DataFolder +from .Folders import DataFolder from .Image import ImageFile, ImageFolder, ImageStack from .tools import set_option, get_option, Options as _Options diff --git a/Stoner/analysis/features.py b/Stoner/analysis/features.py index b53f2e070..05bef6437 100755 --- a/Stoner/analysis/features.py +++ b/Stoner/analysis/features.py @@ -4,17 +4,13 @@ __all__ = ["FeatureOpsMixin"] -from inspect import getfullargspec - import numpy as np from scipy.signal import find_peaks from scipy.interpolate import interp1d -from scipy.optimize import curve_fit -from ..compat import string_types -from ..tools import isiterable, isTuple -from ..core.exceptions import assertion -from .utils import threshold as _threshold +from Stoner.tools import isiterable, isTuple +from Stoner.core.exceptions import assertion +from .utils import threshold class FeatureOpsMixin: @@ -117,7 +113,7 @@ def peaks(self, **kargs): xdata = self.column(xcol) xdata = interp1d(np.arange(len(self)), xdata, kind="cubic") - possible_peaks = np.array(_threshold(0, d1, rising=troughs, falling=peaks)) + possible_peaks = np.array(threshold(0, d1, rising=troughs, falling=peaks)) curvature = np.abs(d2_interp(possible_peaks)) # Filter just the significant peaks @@ -285,252 +281,3 @@ def find_peaks(self, **kargs): if full_data: return peak_data, data return peak_data[_.xcol], peak_data[_.yxol], data - - def stitch(self, other, xcol=None, ycol=None, overlap=None, min_overlap=0.0, mode="All", func=None, p0=None): - r"""Apply a scaling to this data set to make it stich to another dataset. - - Args: - other (DataFile): - Another data set that is used as the base to stitch this one on to - xcol,ycol (index or None): - The x and y data columns. If left as None then the current setas attribute is used. - - Keyword Arguments: - overlap (tuple of (lower,higher) or None): - The band of x values that are used in both data sets to match, - if left as None, thenthe common overlap of the x data is used. - min_overlap (float): - If you know that overlap must be bigger than a certain amount, the bounds between the two - data sets needs to be adjusted. In this case min_overlap shifts the boundary of the overlap - on this DataFile. - mode (str): - Unless *func* is specified, controls which parameters are actually variable, defaults to all of them. - func (callable): - a stitching function that transforms :math:`(x,y)\rightarrow(x',y')`. Default is to use - functions defined by *mode* - p0 (iterable): - if func is not None then p0 should be the starting values for the stitching function parameters - - Returns: - (:py:class:`Stoner.Data`): - A copy of the current :py:class:`AnalysisMixin` with the x and y data columns adjusted to stitch - - To stitch the data together, the x and y data in the current data file is transforms so that - :math:`x'=x+A` and :math:`y'=By+C` where :math:`A,B,C` are constants and :math:`(x',y')` are close matches - to the :math:`(x,y)` data in *other*. The algorithm assumes that the overlap region contains equal - numbers of :math:`(x,y)` points *mode* controls whether A,B, and C are fixed or adjustable - - - "All" - all three parameters adjustable - - "Scale y, shift x" - C is fixed at 0.0 - - "Scale and shift y" A is fixed at 0.0 - - "Scale y" - only B is adjustable - - "Shift y" - Only c is adjsutable - - "Shift x" - Only A is adjustable - - "Shift both" - B is fixed at 1.0 - - See Also: - User Guide section :ref:`stitch_guide` - - Example: - .. plot:: samples/stitch-int-overlap.py - :include-source: - :outname: stitch_int_overlap - """ - _ = self._col_args(xcol=xcol, ycol=ycol, scalar=True) - points = self.column([_.xcol, _.ycol]) - points = points[points[:, 0].argsort(), :] - points[:, 0] += min_overlap - otherpoints = other.column([_.xcol, _.ycol]) - otherpoints = otherpoints[otherpoints[:, 0].argsort(), :] - self_second = np.max(points[:, 0]) > np.max(otherpoints[:, 0]) - if overlap is None: # Calculate the overlap - lower = max(np.min(points[:, 0]), np.min(otherpoints[:, 0])) - upper = min(np.max(points[:, 0]), np.max(otherpoints[:, 0])) - elif isinstance(overlap, int) and overlap > 0: - if self_second: - lower = points[0, 0] - upper = points[overlap, 0] - else: - lower = points[-overlap - 1, 0] - upper = points[-1, 0] - elif ( - isinstance(overlap, tuple) - and len(overlap) == 2 - and isinstance(overlap[0], float and isinstance(overlap[1], float)) - ): - lower = min(overlap) - upper = max(overlap) - inrange = np.logical_and(points[:, 0] >= lower, points[:, 0] <= upper) - points = points[inrange] - num_pts = points.shape[0] - if self_second: - otherpoints = otherpoints[-num_pts - 1 : -1] - else: - otherpoints = otherpoints[0:num_pts] - x = points[:, 0] - y = points[:, 1] - xp = otherpoints[:, 0] - yp = otherpoints[:, 1] - if func is None: - opts = { - "all": (lambda x, y, A, B, C: (x + A, y * B + C)), - "scale y and shift x": (lambda x, y, A, B: (x + A, B * y)), - "scale and shift y": (lambda x, y, B, C: (x, y * B + C)), - "scale y": (lambda x, y, B: (x, y * B)), - "shift y": (lambda x, y, C: (x, y + C)), - "shift both": (lambda x, y, A, C: (x + A, y + C)), - } - defaults = { - "all": [1, 2, 3], - "scale y,shift x": [1, 2], - "scale and shift y": [2, 3], - "scale y": [2], - "shift y": [3], - "shift both": [1, 3], - } - A0 = np.mean(xp) - np.mean(x) - C0 = np.mean(yp) - np.mean(y) - B0 = (np.max(yp) - np.min(yp)) / (np.max(y) - np.min(y)) - p = np.array([0, A0, B0, C0]) - assertion(isinstance(mode, string_types), "mode keyword should be a string if func is not defined") - mode = mode.lower() - assertion(mode in opts, f"mode keyword should be one of {opts.keys}") - func = opts[mode] - p0 = p[defaults[mode]] - else: - assertion(callable(func), "Keyword func should be callable if given") - args = getfullargspec(func)[0] # pylint: disable=W1505 - assertion(isiterable(p0), "Keyword parameter p0 shoiuld be iterable if keyword func is given") - assertion( - len(p0) == len(args) - 2, "Keyword p0 should be the same length as the optional arguments to func" - ) - # This is a bit of a hack, we turn (x,y) points into a 1D array of x and then y data - set1 = np.append(x, y) - set2 = np.append(xp, yp) - assertion(len(set1) == len(set2), "The number of points in the overlap are different in the two data sets") - - def transform(set1, *p): - """Construct the wrapper function to fit for transform.""" - m = int(len(set1) / 2) - x = set1[:m] - y = set1[m:] - tmp = func(x, y, *p) - out = np.append(tmp[0], tmp[1]) - return out - - popt, pcov = curve_fit(transform, set1, set2, p0=p0) # Curve fit for optimal A,B,C - perr = np.sqrt(np.diagonal(pcov)) - self.data[:, _.xcol], self.data[:, _.ycol] = func(self.data[:, _.xcol], self.data[:, _.ycol], *popt) - self["Stitching Coefficients"] = list(popt) - self["Stitching Coeffient Errors"] = list(perr) - self["Stitching overlap"] = (lower, upper) - self["Stitching Window"] = num_pts - - return self - - def threshold(self, threshold, **kargs): - """Find partial indices where the data in column passes the threshold, rising or falling. - - Args: - threshold (float): - Value to look for in column col - - Keyword Arguments: - col (index): - Column index to look for data in - rising (bool): - look for case where the data is increasing in value (defaukt True) - falling (bool): - look for case where data is fallinh in value (default False) - xcol (index, bool or None): - rather than returning a fractional row index, return the - interpolated value in column xcol. If xcol is False, then return a complete row - all_vals (bool): return all crossing points of the threshold or just the first. (default False) - transpose (bbool): - Swap the x and y columns around - this is most useful when the column assignments - have been done via the setas attribute - all_vals (bool): - Return all values that match the criteria, or just the first in the file. - - Returns: - (float): - Either a sing;le fractional row index, or an in terpolated x value - - Note: - If you don't sepcify a col value or set it to None, then the assigned columns via the - :py:attr:`DataFile.setas` attribute will be used. - - Warning: - There has been an API change. Versions prior to 0.1.9 placed the column before the threshold in the - positional argument list. In order to support the use of assigned columns, this has been swapped to the - present order. - """ - DataArray = type(self.data) - col = kargs.pop("col", None) - xcol = kargs.pop("xcol", None) - _ = self._col_args(xcol=xcol, ycol=col) - - col = _.ycol - if xcol is None and _.has_xcol: - xcol = _.xcol - - rising = kargs.pop("rising", True) - falling = kargs.pop("falling", False) - all_vals = kargs.pop("all_vals", False) - - current = self.column(col) - - # Recursively call if we've got an iterable threshold - if isiterable(threshold): - if isinstance(xcol, bool) and not xcol: - ret = np.zeros((len(threshold), self.shape[1])) - else: - ret = np.zeros_like(threshold).view(type=DataArray) - for ix, th in enumerate(threshold): - ret[ix] = self.threshold(th, col=col, xcol=xcol, rising=rising, falling=falling, all_vals=all_vals) - # Now we have to clean up the retujrn list into a DataArray - if isinstance(xcol, bool) and not xcol: # if xcol was False we got a complete row back - ch = self.column_headers - ret.setas = self.setas.clone - ret.column_headers = ch - ret.i = ret[0].i - else: # Either xcol was None so we got indices or we got a specified column back - if xcol is not None: # Specific column - ret = np.atleast_2d(ret) - ret.column_headers = [self.column_headers[self.find_col(xcol)]] - ret.i = [r.i for r in ret] - ret.setas = "x" - ret.isrow = False - else: - ret.column_headers = ["Index"] - ret.isrow = False - return ret - ret = _threshold(threshold, current, rising=rising, falling=falling) - if not all_vals: - ret = [ret[0]] if np.any(ret) else [] - - if isinstance(xcol, bool) and not xcol: - retval = self.interpolate(ret, xcol=False) - retval.setas = self.setas.clone - retval.setas.shape = retval.shape - retval.i = ret - ret = retval - elif xcol is not None: - retval = self.interpolate(ret, xcol=False)[:, self.find_col(xcol)] - # if retval.ndim>0: #not sure what this bit does but it's throwing errors for a simple threshold - # retval.setas=self.setas.clone - # retval.setas.shape=retval.shape - # retval.i=ret - ret = retval - else: - ret = DataArray(ret) - if not all_vals: - if ret.size == 1: - pass - elif ret.size > 1: - ret = ret[0] - else: - ret = [] - if isinstance(ret, DataArray): - ret.isrow = True - return ret diff --git a/Stoner/core/array.py b/Stoner/core/array.py index b9ed12082..6af8f02c6 100755 --- a/Stoner/core/array.py +++ b/Stoner/core/array.py @@ -511,7 +511,7 @@ def _col_args( ret[c] = ret[c][0] else: ret[c] = None - elif force_list or (isinstance(scalar, bool) and not scalar): + elif isinstance(scalar, bool) and not scalar: for c in ret: if c.startswith("x") or c.startswith("has_"): continue diff --git a/Stoner/folders/__init__.py b/Stoner/folders/__init__.py index b2adcd65f..65443185e 100755 --- a/Stoner/folders/__init__.py +++ b/Stoner/folders/__init__.py @@ -1,17 +1,6 @@ """Core support for working with collections of files in the :py:class:`Stoner.DataFolder`.""" -__all__ = [ - "core", - "each", - "groups", - "metadata", - "mixins", - "utils", - "baseFolder", - "DataFolder", - "PlotFolder", - "DiskBasedFolderMixin", -] +__all__ = ["core", "each", "groups", "metadata", "mixins", "utils", "DataFolder", "PlotFolder"] -from .core import baseFolder -from .mixins import DataFolder, PlotFolder, DiskBasedFolderMixin +from . import mixins +from .mixins import DataFolder, PlotFolder diff --git a/Stoner/folders/core.py b/Stoner/folders/core.py index 70f97c46e..93fd35c6f 100755 --- a/Stoner/folders/core.py +++ b/Stoner/folders/core.py @@ -261,7 +261,6 @@ def __new__(cls, *args, **kargs): self._root = "." self._default_store = None self.directory = None - self.executor = None return self def __init__(self, *args, **kargs): @@ -699,11 +698,6 @@ def __clone__(self, other=None, attrs_only=False): ######## Methods to implement the MutableMapping abstract methods ######### ######## And to provide a mapping interface that mainly access groups ##### - def __del__(self): - """Clean up the exececutor if it is defined.""" - if self.executor: - self.executor.shutdown() - def __getitem__(self, name): """Try to get either a group or an object. diff --git a/Stoner/folders/each.py b/Stoner/folders/each.py index 9372d91f5..e97dc9fdf 100755 --- a/Stoner/folders/each.py +++ b/Stoner/folders/each.py @@ -1,12 +1,8 @@ """Classes and support functions for the :py:attr:`Stoner.DataFolder.each`.magic attribute.""" -from __future__ import annotations - __all__ = ["Item"] from collections.abc import MutableSequence from functools import wraps, partial from traceback import format_exc -import os -from typing import TYPE_CHECKING import numpy as np @@ -14,15 +10,11 @@ from ..compat import string_types from .utils import get_pool -if TYPE_CHECKING: - from ..core import metadataObject - from .core import baseFolder - from typing import Tuple, List, Any, Union, Optional, Callable def _worker(d, **kwargs): """Support function to run an arbitrary function over a :py:class:`Stoner.Data` object.""" byname = kwargs.get("byname", False) - func = kwargs.get("_func", lambda x: x) + func = kwargs.get("func", lambda x: x) if byname: func = getattr(d, func, lambda x: x) args = kwargs.get("args", tuple()) @@ -46,7 +38,7 @@ class SetasWrapper(MutableSequence): """Manages wrapping each member of the folder's setas attribute.""" - def __init__(self, parent: "Item"): + def __init__(self, parent): """Note a reference to the parent item class instance and folder.""" self._each = parent self._folder = parent._folder @@ -65,11 +57,11 @@ def __len__(self): lengths = np.array([len(data.setas) for data in self._folder]) return abs(lengths.min()) - def __getitem__(self, index: Union[str, int, slice]) -> Union[str, List[str], int]: + def __getitem__(self, index): """Get the corresponding item from all the setas items in the folder.""" return [data.setas[index] for data in self._folder] - def __setitem__(self, index: Union[str, int], value): + def __setitem__(self, index, value): """Set the value of the specified item on the setas elements in the folder. Args: @@ -91,7 +83,7 @@ def __setitem__(self, index: Union[str, int], value): setas[index] = v self._folder._object_attrs["setas"] = setas - def __delitem__(self, index: Union[str, int]): + def __delitem__(self, index): """Cannot delete items from the proxy setas object - so simply clear it instead.""" for data in self._folder: data.setas[index] = "." @@ -99,13 +91,11 @@ def __delitem__(self, index: Union[str, int]): setas[index] = "." self._folder._object_attrs["setas"] = setas - def insert(self, index: Union[str, int], value: Any): + def insert(self, index, value): """Cannot insert items into the proxy setas object.""" - raise NotImplementedError( - "Cannot insert into the objectFolder's setas - insdert into the objectFolder instead!" - ) + raise IndexError("Cannot insert into the objectFolder's setas - insdert into the objectFolder instead!") - def collapse(self) -> List[str]: + def collapse(self): """Collapse the setas into a single list if possible.""" setas = [] for v in self: @@ -129,26 +119,26 @@ class Item: _folder = None - def __init__(self, folder: baseFolder): + def __init__(self, folder): """Create the each proxy object. Notes the partent folder that created us.""" self._folder = folder @property - def setas(self) -> SetasWrapper: + def setas(self): """Return a proxy object for manipulating all the setas objects in a folder.""" return SetasWrapper(self) @setas.setter - def setas(self, value: Union[str, List[str]]): + def setas(self, value): """Manipualte the setas property of all the objects in the folder.""" setas = self.setas setas(value) self._folder._object_attrs["setas"] = setas.collapse() return setas - def __call__(self, func: Callable, *args, **kargs) -> Any: + def __call__(self, func, *args, **kargs): """Iterate over the baseFolder, calling func on each item. Args: @@ -177,7 +167,7 @@ def __call__(self, func: Callable, *args, **kargs) -> Any: func = getattr(self, func) return list(self.iter(func, *args, **kargs)) - def __dir__(self) -> List[str]: + def __dir__(self): """Return a list of the common set of attributes of the instances in the folder.""" if self._folder and len(self._folder) != 0: res = set(dir(self._folder[0])) @@ -188,7 +178,7 @@ def __dir__(self) -> List[str]: res &= set(dir(d)) return list(res) - def __delattr__(self, name: str): + def __delattr__(self, name): """Handle removing an attribute from the folder, including proxied attributes.""" if name in dir(self._folder.instance) or ( len(self._folder) and hasattr(self._folder[0], name) @@ -206,7 +196,7 @@ def __delattr__(self, name: str): else: raise AttributeError(f"Unrecognised attribute {name}") - def __getattr__(self, name: str) -> Any: + def __getattr__(self, name): """Handle some special case attributes that provide alternative views of the objectFolder. Args: @@ -246,9 +236,18 @@ def __getattr__(self, name: str) -> Any: raise AttributeError except AttributeError as err: # Ok, pass back raise AttributeError(f"{name} is not an Attribute of {type(self)} or {type(instance)}") from err + # except TypeError as err: # Can be triggered if self.instance lacks the attribute + # if len(self._folder) and hasattr(self._folder[0], name): + # ret = [(not hasattr(x, name), getattr(x, name, None)) for x in self._folder] + # mask, values = zip(*ret) + # ret = np.ma.MaskedArray(values) + # ret.mask = mask + # else: + # raise err + return ret - def __setattr__(self, name: str, value: Any): + def __setattr__(self, name, value): """Proxy call to set an attribute. Setting the attribute on .each sets it on all instantiated objects and in _object_attrs. @@ -283,7 +282,7 @@ def __setattr__(self, name: str, value: Any): else: raise AttributeError(f"Unknown attribute {name}") - def __getattr_proxy(self, item: str) -> Callable: + def __getattr_proxy(self, item): """Make a prpoxy call to access a method of the metadataObject like types. Args: @@ -312,7 +311,7 @@ def _wrapper_(*args, **kargs): # Ok that's the wrapper function, now return it for the user to mess around with. return _wrapper_ - def __rmatmul__(self, other: Callable) -> Callable: + def __rmatmul__(self, other): """Implement callable@DataFolder as a generic iterate a function over DataFolder members. Returns: @@ -338,24 +337,14 @@ def _wrapper_(*args, **kargs): # Ok that's the wrapper function, now return it for the user to mess around with. return _wrapper_ - def iter(self, func: Union[str, Callable], *args, **kargs) -> Any: + def iter(self, func, *args, **kargs): """Iterate over the baseFolder, calling func on each item. Args: - func (str, callable): - A Callable object that must take a metadataObject type instance as it's first argument. + func (callable): A Callable object that must take a metadataObject type instance as it's first argument. Keyword Args: - _return (None, bool or str): - Controls how the return value from *func* is added to the DataFolder - _byname (bool): - Whether to look func up as the name of a function. Defaults to True if func is a string. - _mode (str): - Whether to iterate using a parallel iteration scheme. Possible values are: - "serial","SingleProcess": In the same process as the main script - "ThreadPool": Uses a concurrent.futures ThreadPool - "ProcessPool": Uses a concurrent.futures ProcessPool - "Dask": Uses a dask.distributed.Client to distribute the task over an DASK cluster. + _return (None, bool or str): Controls how the return value from *func* is added to the DataFolder Returns: A list of the results of evaluating *func* for each item in the folder. @@ -368,22 +357,19 @@ def iter(self, func: Union[str, Callable], *args, **kargs) -> Any: string. then return result is stored in the corresponding name. """ _return = kargs.pop("_return", None) - _byname = kargs.pop("_byname", isinstance(func, string_types)) - _model = kargs.pop("_mode", "serial") - if _model.lower() not in ["serial", "singleprocess", "threadpool", "processpool", "dask"]: - raise ValueError(f"Unknown folder iteration model {_model}") - + _byname = kargs.pop("_byname", False) + _serial = kargs.pop("_serial", False) self._folder.fetch() # Prefetch thefolder in case we can do it in parallel - executor = get_pool(folder=self._folder, _model=_model) + p, imap = get_pool(_serial) for ix, (new_d, ret) in enumerate( - executor.map(partial(_worker, _func=func, args=args, kargs=kargs, byname=_byname), self._folder) + imap(partial(_worker, func=func, args=args, kargs=kargs, byname=_byname), self._folder) ): if self._folder.debug: print(ix, type(ret)) if isinstance(ret, self._folder._type) and _return is None: try: # Check if ret has same data type, otherwise will not overwrite well if ret.data.dtype != new_d.data.dtype: - return ret + continue new_d = ret except AttributeError: pass @@ -394,3 +380,6 @@ def iter(self, func: Union[str, Callable], *args, **kargs) -> Any: name = self._folder.__names__()[ix] self._folder.__setter__(name, new_d) yield ret + if p is not None: + p.close() + p.join() diff --git a/Stoner/folders/mixins.py b/Stoner/folders/mixins.py index 20d5be324..b81d170ce 100755 --- a/Stoner/folders/mixins.py +++ b/Stoner/folders/mixins.py @@ -322,13 +322,9 @@ def pattern(self, value): else: raise ValueError(f"pattern should be a string, regular expression or iterable object not a {value}") - def fetch(self, futures=False): + def fetch(self): """Preload the contents of the DiskBasedFolderMixin. - Keyword Arguments: - futures (bool): - If True, then return te concurrent.futures object otherwise return the folder. - With multiprocess enabled this will parallel load the contents of the folder into memory. """ p, imap = get_pool() @@ -338,6 +334,9 @@ def fetch(self, futures=False): self.__setter__( name, self.on_load_process(f) ) # This doesn't run on_load_process in parallel, but it's not expensive enough to make it worth it. + if p is not None: + p.close() + p.join() return self def getlist(self, **kargs): diff --git a/Stoner/folders/utils.py b/Stoner/folders/utils.py index 45b24ffbb..b55252d1b 100755 --- a/Stoner/folders/utils.py +++ b/Stoner/folders/utils.py @@ -10,67 +10,19 @@ "removeDisallowedFilenameChars", ] import os.path as path -from os import cpu_count import re import string import fnmatch import pathlib -from concurrent import futures -from dask.distributed import Client +from multiprocessing.pool import ThreadPool from numpy import array +import multiprocess as multiprocessing from Stoner.compat import string_types, _pattern_type from Stoner.tools import get_option -class _fake_future: - - """Minimal class that behaves like a simple future. - - This simply stores the function that should be exectured and its arguments and then delays executing it until - the result() method is called. - """ - - def __init__(self, fn, *args, **kargs): - self.fn = fn - self.args = args - self.kargs = kargs - - def result(self): - """Execute the stored function call and return the result.""" - return self.fn(*self.args, **self.kargs) - - -class _fake_executor: - - """Minimal class to fake the bits of the executor protocol that we need.""" - - def __init__(self, *args, **kargs): - """Fake constructor.""" - - def map(self, fn, *iterables): # pylint: disable=no-self-use - """Map over the results, yields each result in turn.""" - for item in zip(*iterables): - yield fn(*item) - - def shutdown(self): # pylint: disable=no-self-use - """Fake shutdown method.""" - - def submit(self, fn, *args, **kwargs): # pylint: disable no-self-use - """Execute a function.""" - return _fake_future(fn(*args, **kwargs)) - - -executor_map = { - "singleprocess": (_fake_executor, {}), - "serial": (_fake_executor, {}), - "threadpool": (futures.ThreadPoolExecutor, {"max_workers": cpu_count()}), - "processpool": (futures.ProcessPoolExecutor, {"max_workers": cpu_count()}), - "dask": (Client, {}), -} - - def pathsplit(pth): """Split pth into a sequence of individual parts with path.split.""" pth = pathlib.Path(pth) @@ -153,36 +105,27 @@ def filter_files(files, patterns, keep=True): return files -def get_pool(folder=None, _model=None): - """Get a concurrent.futures compatible executor. +def get_pool(_serial=False): + """Get a Pool and map implementation depending on options. Returns: - (futures.Executor): - Executor on which to run the distributed job. + Pool(),map: Pool object if possible and map implementation. """ - if isinstance(_model, str): - _model = _model.lower() - if getattr(folder, "executor", False): - if folder.executor.name == _model: - return folder.executor - - if _model is None: - if get_option("multiprocessing"): + if get_option("multiprocessing") and not _serial: + try: if get_option("threading"): - _model = "threadpool" + p = ThreadPool(processes=int(multiprocessing.cpu_count() - 1)) else: - _model = "processpool" - else: - _model = "singleprocess" - executor_class, kwargs = executor_map[_model] - executor = executor_class(**kwargs) - executor.name = _model - - if getattr(folder, "executor", False): - folder.executor.shutdown() - if folder: - setattr(folder, "executor", executor) - return executor + p = multiprocessing.Pool(int(multiprocessing.cpu_count() / 2)) + imap = p.imap + except (ArithmeticError, AttributeError, LookupError, RuntimeError, NameError, OSError, TypeError, ValueError): + # Fallback to non-multiprocessing if necessary + p = None + imap = map + else: + p = None + imap = map + return p, imap def removeDisallowedFilenameChars(filename): diff --git a/Stoner/formats/attocube.py b/Stoner/formats/attocube.py index 0b7ca8850..d2078b560 100755 --- a/Stoner/formats/attocube.py +++ b/Stoner/formats/attocube.py @@ -14,7 +14,6 @@ import h5py from ..compat import string_types, bytes2str -from ..core.exceptions import StonerLoadError from ..core.base import typeHintedDict from ..Image import ImageStack, ImageFile, ImageArray from ..HDF5 import HDFFileManager @@ -135,11 +134,12 @@ def _load(self, filename, *args, **kargs): self.filename = filename with HDFFileManager(self.filename, "r") as f: if "type" not in f.attrs: - raise StonerLoadError("HDF5 Group does not specify the type attribute used to check we can load it.") + _raise_error(f, message="HDF5 Group does not specify the type attribute used to check we can load it.") typ = bytes2str(f.attrs["type"]) if typ != type(self).__name__ and "module" not in f.attrs: - raise StonerLoadError( - f"HDF5 Group is not a {type(self).__name__} and does not specify a module to use to load.", + _raise_error( + f, + message=f"HDF5 Group is not a {type(self).__name__} and does not specify a module to use to load.", ) loader = None if typ == type(self).__name__: @@ -234,7 +234,7 @@ def _load_asc(self, filename): """Load a single scan file from ascii data.""" with FileManager(filename, "r") as data: if not data.readline().startswith("# Daisy frame view snapshot"): - raise StonerLoadError(f"{filename} lacked the correct header line") + raise ValueError(f"{filename} lacked the correct header line") tmp = ImageFile() for line in data: if not line.startswith("# "): @@ -439,7 +439,7 @@ def read_hdf5(cls, filename, *args, **kargs): channels = [] grps = list(f.keys()) if "common_metadata" not in grps or "common_typehints" not in grps: - raise StonerLoadError("Couldn;t find common metadata groups, something is not right here!") + _raise_error(f, message="Couldn;t find common metadata groups, something is not right here!") metadata = f["common_metadata"].attrs typehints = f["common_typehints"].attrs for i in sorted(metadata): diff --git a/Stoner/formats/facilities.py b/Stoner/formats/facilities.py index 40b24adad..363a3c324 100755 --- a/Stoner/formats/facilities.py +++ b/Stoner/formats/facilities.py @@ -257,8 +257,6 @@ def _load(self, filename=None, *args, **kargs): key = parts[0] value = parts[1].strip() self.metadata[key] = string_to_type(value) - if i == 0: - raise StonerLoadError("Empty fiule processed by OpenGDAFile!") column_headers = f.readline().strip().split("\t") self.data = np.genfromtxt([str2bytes(l) for l in f], dtype="float", invalid_raise=False) self.column_headers = column_headers @@ -386,10 +384,6 @@ def _load(self, filename=None, *args, **kargs): self.metadata.update(img.header) return self except (OSError, ValueError, TypeError, IndexError) as err: - try: - filename.seek(0) - except AttributeError: - pass raise StonerLoadError("Not a Fabio Image file !") from err else: diff --git a/Stoner/formats/generic.py b/Stoner/formats/generic.py index a8ec67cbf..21a02eb32 100755 --- a/Stoner/formats/generic.py +++ b/Stoner/formats/generic.py @@ -136,19 +136,11 @@ def _load(self, filename, *args, **kargs): column_headers = next(csv.reader(io.StringIO(header), delimiter=header_delim)) data = np.genfromtxt(datafile, delimiter=data_delim, skip_header=data_line - header_line) except (TypeError, ValueError, csv.Error, StopIteration, UnicodeDecodeError) as err: - try: - filename.seek(0) - except AttributeError: - pass raise StonerLoadError("Header and data on the same line") from err else: # Generate try: data = np.genfromtxt(datafile, delimiter=data_delim, skip_header=data_line) except (TypeError, ValueError) as err: - try: - filename.seek(0) - except AttributeError: - pass raise StonerLoadError("Failed to open file as CSV File") from err column_headers = ["Column" + str(x) for x in range(np.shape(data)[1])] @@ -249,10 +241,6 @@ def _load(self, filename=None, *args, **kargs): self.metadata[k] = img.info[k] self.data = np.asarray(img) except IOError as err: - try: - filename.seek(0) - except AttributeError: - pass raise StonerLoadError("Unable to read as a PNG file.") from err return self @@ -399,10 +387,6 @@ def _load(self, filename=None, *args, **kargs): if not isinstance(signal, Signal2D): raise StonerLoadError("Not a 2D signal object - aborting!") except Exception as err: # pylint: disable=W0703 Pretty generic error catcher - try: - filename.seek(0) - except AttributeError: - pass raise StonerLoadError(f"Not readable by HyperSpy error was {err}") from err self.data = signal.data self._unpack_meta("", signal.metadata.as_dictionary()) diff --git a/Stoner/formats/instruments.py b/Stoner/formats/instruments.py index 8e4ee68a4..40f2575fa 100755 --- a/Stoner/formats/instruments.py +++ b/Stoner/formats/instruments.py @@ -17,16 +17,6 @@ from ..tools.file import FileManager, SizedFileManager -def _r_float(f, length=4): - """Read 4 bytes and convert to float.""" - return struct.unpack("= n_blocks: - raise StonerLoadError("Tried reading an out of range block!") - for iBlock in range(blockNum): - headLen = _r_int(f) - nSteps = _r_int(f) - if iBlock < blockNum: - pos += headLen + 4 * nSteps - f.seek(pos) - continue - f.seek(pos + 12) - step = _r_float(f) - start2Th = _r_float(f) - pos += headLen # position at start of data block - f.seek(pos) - x = np.arange(start2Th, start2Th + step * (nSteps + 1), step) - y = np.array([max(1.0, _r_float(f)) for i in range(nSteps)]) - self.data = np.column_stack((x, y)) - self.column_headers = ["Two Theta", "Counts"] - self.metadata["repeats"] = blockNum != n_blocks - - def _read_3(self, f): - """RAW v3 file reader.""" - f.seek(12) - n_blocks = _r_int(f) - self.metadata["Date/Time"] = f.read(20).decode("latin1") - f.seek(326) - self.metadta["Sample="] = self.Read(f, 60).decode("latin1") - f.seek(564) - radius = _r_float(f) - self.metadata["Gonio. radius"] = radius - self.metadata["Gonio. radius"] = radius - f.seek(608) - self.metadata["Anode"] = f.read(2).decode("latin1") - f.seek(616) - self.metadata["Ka_mean"] = _r_float(f, 8) - self.metadata["Ka1"] = _r_float(f, 8) - self.metadata["Ka2"] = _r_float(f, 8) - self.metadata["Kb"] = _r_float(f, 8) - self.metadata["Ka2/Ka1"] = _r_float(f, 8) - pos = 712 - f.seek(pos) # position at 1st block header - blockNum = self.block - self.metadata["block"] = blockNum - if blockNum >= n_blocks: - raise StonerLoadError("Tried reading an out of range block!") - for iBlock in range(blockNum): - headLen = _r_int(f) - nSteps = _r_int(f) - if not nSteps: - break - if n_blocks > 1: - f.seek(pos + 256) - headLen += _r_float(f) - else: - headLen += 40 - if iBlock + 1 != blockNum: - pos += headLen + 4 * nSteps - f.seek(pos) - continue - f.seek(pos + 8) - _r_float(f, 8) - start2Th = _r_float(f, 8) - f.seek(pos + 212) - temp = _r_float(f) - if temp > 0.0: - self.Sample["Temperature"] = temp - f.seek(pos + 176) - step = _r_float(f, 8) - pos += headLen # position at start of data block - f.seek(pos) - x = np.arange(start2Th, start2Th + step * (nSteps + 1), step) - try: - y = np.array([max(1.0, _r_float(f)) for i in range(nSteps)]) - except (ValueError, TypeError, IOError): # this is absurd - f.seek(pos - 40) - y = np.array([max(1.0, _r_float(f)) for i in range(nSteps)]) - w = 1.0 / y - self.data = np.column_stack((x, y, w)) - break - - self.metadata["repeats"] = blockNum != n_blocks - - def _read_4(self, f): - """"RAW v1 file reader.""" - raise StonerLoadError("Unable to handle version 4 RAW files") diff --git a/Stoner/formats/maximus.py b/Stoner/formats/maximus.py index 5abdc5d58..64a8383d2 100755 --- a/Stoner/formats/maximus.py +++ b/Stoner/formats/maximus.py @@ -223,7 +223,7 @@ def __clone__(self, other=None, attrs_only=False): def _read_image(self, g): """Read an image array and return a member of the image stack.""" if "image" not in g: - raise StonerLoadError(f"{g.name} does not have a signal dataset !") + _raise_error(g.parent, message=f"{g.name} does not have a signal dataset !") tmp = self.type() # pylint: disable=E1102 data = g["image"] if np.prod(np.array(data.shape)) > 0: @@ -326,7 +326,7 @@ def read_hdf5(cls, filename, *args, **kargs): names = [] grps = list(f.keys()) if "common_metadata" not in grps or "common_typehints" not in grps: - raise StonerLoadError("Couldn;t find common metadata groups, something is not right here!") + _raise_error(f, message="Couldn;t find common metadata groups, something is not right here!") metadata = f["common_metadata"].attrs typehints = f["common_typehints"].attrs for i in sorted(metadata): diff --git a/Stoner/plot/core.py b/Stoner/plot/core.py index 4cb83bf65..8367a6782 100755 --- a/Stoner/plot/core.py +++ b/Stoner/plot/core.py @@ -26,7 +26,6 @@ from .formats import DefaultPlotStyle from .utils import errorfill from .utils import hsl2rgb -from .classes import PlotAttr try: # Check we've got 3D plotting @@ -133,7 +132,6 @@ def __init__(self, *args, **kargs): # Do the import of plt here to speed module self.__figure = None self._showfig = kargs.pop("showfig", True) # Retains previous behaviour self._subplots = [] - self.newplot = PlotAttr(self) self._public_attrs = { "fig": (int, mplfig.Figure), "labels": list, @@ -144,7 +142,6 @@ def __init__(self, *args, **kargs): # Do the import of plt here to speed module "xlabel": string_types, "ylabel": string_types, "_showfig": bool, - "newplot": PlotAttr, } super().__init__(*args, **kargs) self._labels = typedList(string_types, []) @@ -273,16 +270,6 @@ def template(self, value): raise ValueError(f"Template is not of the right class:{type(value)}") self._template.apply() - def __getstate__(self): - """Cleanup state before pickle for folder operations.""" - state = self.__dict__.copy() - state.pop("newplot", None) - return state - - def __setstate__(self, state): - """Restore state after pickle.""" - self.__dict__.update(state) - def _Plot(self, ix, iy, fmt, plotter, figure, **kwords): """Private method for plotting a single plot to a figure. diff --git a/Stoner/plot/formats.py b/Stoner/plot/formats.py index 9b38aeaaa..e8fc7cc62 100755 --- a/Stoner/plot/formats.py +++ b/Stoner/plot/formats.py @@ -480,10 +480,7 @@ def apply(self): """Update matplotlib rc parameters from any attributes starting template_.""" plt.style.use(self.stylesheet) for attr in dir(self): - try: - v = getattr(self, attr) - except TypeError: - continue + v = getattr(self, attr) if not attr.startswith("template_"): continue attr = _add_dots(attr[9:]) @@ -516,8 +513,6 @@ def customise_axes(self, ax, plot): ax.set_yticks(ax.get_yticks()) ax.set_xticklabels(ax.get_xticks(), size=self.template_xtick__labelsize) ax.set_yticklabels(ax.get_yticks(), size=self.template_ytick__labelsize) - ax.xaxis.set_major_locator(self.xlocater()) - ax.yaxis.set_major_locator(self.ylocater()) if isinstance(self.xformatter, Formatter): xformatter = self.xformatter else: diff --git a/Stoner/plot/utils.py b/Stoner/plot/utils.py index c3a0707c2..e1357aa51 100755 --- a/Stoner/plot/utils.py +++ b/Stoner/plot/utils.py @@ -330,59 +330,3 @@ def auto_fit_fontsize(text, width, height, scale_down=True, scale_up=False): if not np.isclose(scale, 1.0): text.set_fontsize(text.get_fontsize() * scale) return scale - - -def wrap_prop(attr, func): - """Produce a wrapper function that calls func on self.attr.""" - - def _wrapped(self, *args, **kargs): - return func(getattr(self, attr), *args, **kargs) - - return _wrapped - - -def add_properties(*srcclasses): - """Decorator to add get/set properties from srccls get_* set_* methods to a new destcls.""" - getters = {} - setters = {} - for srccls, attr in srcclasses: - getters.update( - { - x[4:]: (attr, getattr(srccls, x)) - for x in dir(srccls) - if x.startswith("get_") and callable(getattr(srccls, x)) - } - ) - setters.update( - { - x[4:]: (attr, getattr(srccls, x)) - for x in dir(srccls) - if x.startswith("set_") and callable(getattr(srccls, x)) - } - ) - - def real_add_properties(destcls): - """Add properties to the destcls from the getters and setters dictionaries.""" - for k in set(getters.keys()) & set(setters.keys()): # R/W properties - if hasattr(destcls, k): - continue # Don't overwrite existing attributes - attr, fget = getters[k] - attr, fset = setters[k] - fget = wrap_prop(attr, fget) - fset = wrap_prop(attr, fset) - setattr(destcls, k, property(fget=fget, fset=fset, fdel=lambda self: fset())) - for k in set(getters.keys()) - set(setters.keys()): # Read only Proerpties - if hasattr(destcls, k): - continue # Don't overwrite existing attributes - attr, fget = getters[k] - fget = wrap_prop(attr, fget) - setattr(destcls, k, property(fget=fget)) - for k in set(setters.keys()) - set(getters.keys()): # Write only properties - if hasattr(destcls, k): - continue # Don't overwrite existing attributes - attr, fset = setters[k] - fset = wrap_prop(attr, fset) - setattr(destcls, k, property(fset=fset, fdel=lambda self: fset())) - return destcls - - return real_add_properties diff --git a/Stoner/tools/decorators.py b/Stoner/tools/decorators.py index b5a0c16be..d0a412af7 100755 --- a/Stoner/tools/decorators.py +++ b/Stoner/tools/decorators.py @@ -295,8 +295,8 @@ def class_modifier( RTD_Restrictions (bool): If True (default), do not add members from outside our own package when on ReadTheDocs. no_long_names (bool): - To avoid name collision the default is to create two entries in the class __dict__ - one for the - standard name and one to include the full module path. This disables the latter. + To avoid name collision the default is to create two entries in the class __dict__ - one for the standard name + and one to include the full module path. This disables the latter. Returns: diff --git a/Stoner/tools/file.py b/Stoner/tools/file.py index 040f1bb77..193d7c289 100755 --- a/Stoner/tools/file.py +++ b/Stoner/tools/file.py @@ -13,14 +13,12 @@ from .classes import subclasses from ..core.exceptions import StonerLoadError, StonerUnrecognisedFormat from ..core.base import regexpDict, metadataObject -from .null import null from ..core.Typing import Filename __all__ = [ "file_dialog", "get_file_name_type", - "get_filee_type", "auto_load_classes", "get_mime_type", "FileManager", @@ -96,37 +94,6 @@ def get_file_name_type( """Rationalise a filename and filetype.""" if isinstance(filename, string_types): filename = pathlib.Path(filename) - filetype = get_file_type(filetype, parent) - if filename is None or (isinstance(filename, bool) and not filename): - filename = file_dialog("r", filename, filetype, parent) - elif isinstance(filename, io.IOBase): # Opened file - filename = filename.name - try: - if not filename.exists(): - raise IOError(f"Cannot find {filename} to load") - except AttributeError as err: - raise IOError(f"Unable to tell if file exists - {type(filename)}") from err - return filename, filetype - - -def get_file_type(filetype: Union[Type[metadataObject], str], parent: Type[metadataObject]) -> Type[metadataObject]: - """Try to ensure that the filetype parameter is an appropriate filetype class. - - Args: - filetype (str or ubclass of metadataObject): - The requested type to use for loading. - parent (sublclass of metadataObject): - The type of object we're trying to create for which this file must be a subclass. - - Returns: - (metadataObject subclass): - The requested subclass. - - Raises: - (ValueError): - If the requested filetype is a string and cannot be imported, or the filetype isn't a subclass of the - requested parent class. - """ if isinstance(filetype, string_types): # We can specify filetype as part of name try: filetype = regexpDict(subclasses(parent))[filetype] # pylint: disable=E1136 @@ -138,24 +105,20 @@ def get_file_type(filetype: Union[Type[metadataObject], str], parent: Type[metad filetype = getattr(mod, parts[-1]) except (ImportError, AttributeError) as err: raise ValueError(f"Unable to import {filetype}") from err - if filetype is None: - filetype = parent - if not issubclass(filetype, parent): - raise ValueError(f"{filetype} is not a subclass of DataFile.") - return filetype - - -def _handle_urllib_response(resp): - """Decode a response object to either a bytes or str depending on the context type.""" - data = resp.read() - content_type = [x.strip() for x in resp.headers.get("Content-Type", "text/plain; charset=utf-8").split(";")] - typ, substyp = content_type[0].split("/") - if len(content_type) > 1 and "charset" in content_type[1] and typ == "text": - charset = content_type[1][8:] - data = data.decode(charset) - elif typ == "text": - data = bytes2str(data) - return data + if not issubclass(filetype, parent): + raise ValueError(f"{filetype} is not a subclass of DataFile.") + if filename is None or (isinstance(filename, bool) and not filename): + if filetype is None: + filetype = parent + filename = file_dialog("r", filename, filetype, parent) + elif isinstance(filename, io.IOBase): # Opened file + filename = filename.name + try: + if not filename.exists(): + raise IOError(f"Cannot find {filename} to load") + except AttributeError as err: + raise IOError(f"Unable to tell if file exists - {type(filename)}") from err + return filename, filetype def auto_load_classes( @@ -169,24 +132,6 @@ def auto_load_classes( mimetype = get_mime_type(filename, debug=debug) args = args if args is not None else () kargs = kargs if kargs is not None else {} - debug = kargs.get("debug", False) - if isinstance(filename, io.IOBase): # We need to stop the autoloading classes closing the file - if not filename.seekable(): # Replace the filename with a seekable buffer - data = _handle_urllib_response(filename) - if isinstance(data, bytes): - filename = io.BytesIO(data) - if debug: - print("Replacing non seekable buffer with BytesIO") - else: - filename = io.StringIO(data) - if debug: - print("Replacing non seekable buffer with BytesIO") - if debug: - print("Replacing close method to prevent unintended stream closure") - original_close = filename.close - filename.close = null() - else: - original_close = None for cls in subclasses(baseclass).values(): # pylint: disable=E1136, E1101 cls_name = cls.__name__ if debug: @@ -231,8 +176,6 @@ def auto_load_classes( f"Ran out of subclasses to try and load {filename} (mimetype={mimetype}) as." + f" Recognised filetype are:{list(subclasses(baseclass).keys())}" # pylint: disable=E1101 ) - if original_close is not None: - filename.close = original_close # Restore the close method now we're done messing return test @@ -259,7 +202,6 @@ class FileManager: def __init__(self, filename, *args, **kargs): """Store the parameters passed to the context manager.""" self.filename = filename - self.buffer = None self.args = args self.kargs = kargs self.file = None @@ -269,21 +211,25 @@ def __init__(self, filename, *args, **kargs): if parsed.scheme not in URL_SCHEMES: filename = pathlib.Path(filename) else: - resp = urllib.request.urlopen(filename) - data = _handle_urllib_response(resp) - if isinstance(data, bytes): - filename = io.BytesIO(data) - else: - filename = io.StringIO(data) + filename = urllib.request.urlopen(filename) if isinstance(filename, path_types): self.mode = "open" elif isinstance(filename, io.IOBase): - self.mode = "buffer" - pos = filename.tell() - self.buffer = filename.read() - if len(args) == 0: - self.binary = isinstance(self.buffer, bytes) - filename.seek(0) + if not hasattr(filename, "response"): + if self.binary: + self.mode = "bytes" + self.filename = str2bytes(filename.read()) + else: + self.filename = bytes2str(filename.read()) + self.mode = "text" + filename.response = self.filename + else: + if self.binary: + self.mode = "bytes" + self.filename = str2bytes(filename.response) + else: + self.filename = bytes2str(filename.response) + self.mode = "text" elif isinstance(filename, bytes): if (len(args) > 0 and args[0][-1] == "b") or self.kargs.pop("mode", "").endswith("b"): self.filename = filename @@ -300,16 +246,20 @@ def __enter__(self): if len(self.args) > 0 and "b" not in self.args[0]: self.kargs.setdefault("encoding", "utf-8") self.file = open(self.filename, *self.args, **self.kargs) - elif self.buffer is not None and self.binary: - self.file = io.BytesIO(str2bytes(self.buffer)) - elif self.buffer is not None and not self.binary: - self.file = io.StringIO(bytes2str(self.buffer)) + elif self.mode == "text": + self.file = io.StringIO(self.filename) + elif self.mode == "bytes": + self.file = io.BytesIO(self.filename) + elif self.mode in ["bytesio", "textio"]: + self.file = self.filename else: raise TypeError(f"Unrecognised filename type {type(self.filename)}") return self.file def __exit__(self, exc_type, exc_value, exc_traceback): """Close the open file, or reset the buffer position.""" + if not self.file.closed and self.file.seekable(): + self.file.seek(0) if self.mode == "open": self.file.close() @@ -320,14 +270,19 @@ class SizedFileManager(FileManager): def __enter__(self): """Add the file length information to the context variable.""" - ret = super().__enter__() + super().__enter__() if self.mode == "open": length = os.stat(self.filename).st_size - elif self.file.seekable(): - pos = self.file.tell() - self.file.seek(0, 2) - length = self.file.tell() - self.file.seek(pos, 0) + elif self.mode in ["textio", "bytesio"]: + if self.file.seekable(): + pos = self.file.tell() + self.file.seek(0, 2) + length = self.file.tell() + self.file.seek(pos) + else: + length = -1 + elif self.mode in ["text", "bytes"]: + length = len(self.filename) else: - length = -1 - return ret, length + length = len(self.file) + return self.file, length diff --git a/doc/Stoner.rst b/doc/Stoner.rst index 079d85e18..2afe658db 100755 --- a/doc/Stoner.rst +++ b/doc/Stoner.rst @@ -199,26 +199,24 @@ Utility Functions :no-inheritance-diagram: :headings: -~ - ------------------------------------- Folders pacakge - Collections Classes ------------------------------------- -.. module:: Stoner.folders - - -Main Class -^^^^^^^^^^ -.. autosummary:: - :toctree: classes - :template: classdocs.rst - - DataFolder +Main Classes +^^^^^^^^^^^^ +.. automodapi:: Stoner.Folders + :headings: -~ Folders Package and Submodules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. automodapi:: Stoner.folders + :no-inheritance-diagram: + :inherited-members: + :headings: -~ + .. automodapi:: Stoner.folders.core :no-inheritance-diagram: :inherited-members: diff --git a/doc/UserGuide/Users_Guide.tex b/doc/UserGuide/Users_Guide.tex index b73f5da32..632d39834 100755 --- a/doc/UserGuide/Users_Guide.tex +++ b/doc/UserGuide/Users_Guide.tex @@ -11,7 +11,7 @@ \usepackage{gb_custom} -\lstset{frame=tblr, +\lstset{frame=tblr, framerule=0mm, framesep=0.2mm, resetmargins=true, @@ -71,11 +71,11 @@ \subsubsection{Getting the Latest Development Code} \keypoint{These isntrctions are for members of the University of Leeds Condensed Matter Physics Group. External users are recommended to download the source from GitHub} -The source code for the Stoner python module is kept on github using the git +The source code for the Stoner python module is kept on github using the git revision control tool. A nightly development release of the code is available for copying and -use in \verb#\\stonerlab\data\software\python\PythonCode\#. +use in \verb#\\stonerlab\data\software\python\PythonCode\#. -The Stoner Package currently depends on a number of other modules. These are installed on the lab +The Stoner Package currently depends on a number of other modules. These are installed on the lab machines that have Python installed. Primarily these are Numpy, SciPy and Matplotlib. The easiest way to get a Python installation with all the necessary dependencies for the Stoner Package is to install the \textit{Enthought Python Distribution, Canopy}. Installers for Windows, MacOS and Linux are kept in \\ \verb#\\stonerlab\data\software\Python# @@ -407,7 +407,7 @@ \subsubsection{Selecting Individual rows and columns of data} \begin{lstlisting} for row in d.rows(): print row - + for column in d.columns(): print column ...... @@ -720,13 +720,13 @@ \subsection{Plotting 3D Data} By default the plot\_xyz will produce a 3D surface plot with the z-axis coded with a rainbow colourmap (specifically, the matplotlib provided \textit{matplotlib.cm.jet} colourmap. This can be overridden with the \textit{cmap} keyword parameter. If a simple 2D surface plot is required, then the \textit{plotter} parameter should be set to a suitable function such as \textbf{pyplot.pcolor}. Like \textbf{plot\_xy}, a \textit{figure} parameter can be used to control the figure being used and any additional keywords are passed through to the plotting function. The axes labels are set from the corresponding column labels. - + Another option is a contour plot based on $(x,y,z)$ data points. This can be done with the \textbf{contour\_xyz} method. - + \begin{lstlisting} p.contour_xyz(xcol,ycol,zcol,shape=(50,50)) p.contour_xyz(xcol,ycol,zcol,xlim=(10,10,100),ylim=(-10,10,100)) -\end{lstlisting} +\end{lstlisting} Both \textbf{plot\_xyz} and \textbf{contour\_xyz} make use of a call to \textbf{griddata} which is a utility method of the \textbf{PlotFile} -- essentially this is just a pass through method to the underlying \textit{scipy.interpolate.griddata} function. The shape of the grid is determined through a combination of the \textit{xlim}, \textit{ylim} and \textit{shape} arguments. @@ -1023,16 +1023,16 @@ \subsection{Non-linear curve fitting with initialisation file} If you wish to fit your data to a non-linear function more complicated than a polynomial you can use \verb#Stoner.nlfit.nlfit(inifile, func, data=None)# or equivalently if you have an AnalyseFile instance of your data called d say you can call \verb#d.nlfit(inifile, func)#. This performs a non-linear least squares fitting algorithm to your data and returns the AnalyseFile instance used with an additional final column that is the fit, it also plots the fit. There is an example run script, ini file and data file in PythonCode\textbackslash Scripts, have a look at them to see how to use this function. -The function to fit to can either be created by the user and passed in or one of a library of current existing functions can be used from the FittingFunctions.py file in Stoner\textbackslash src (just pass in the name of the function you wish to use as a string). The function takes it's fitting parameters information from a .ini file created by the user, look at the example .ini file mentioned above for the format, you can see that it allows for the parameters to be fixed or constrained which can be very useful for fitting. +The function to fit to can either be created by the user and passed in or one of a library of current existing functions can be used from the FittingFunctions.py file in Stoner\textbackslash src (just pass in the name of the function you wish to use as a string). The function takes it's fitting parameters information from a .ini file created by the user, look at the example .ini file mentioned above for the format, you can see that it allows for the parameters to be fixed or constrained which can be very useful for fitting. Current functions existing in FittingFunctions.py: \begin{itemize} -\item Various tunnelling I-V models including BDR, Simmons, Field emission and Tersoff Hamman STM. +\item Various tunnelling I-V models including BDR, Simmons, Field emission and Tersoff Hamman STM. \item 2D weak localisation \item Strijkers model for PCAR fitting \end{itemize} -Please see the function documentation in FittingFunctions.py for more information about these models. Please do add functions you think would be of use to everybody, have a look at the current functions for examples, the main thing is that the function must take an x array and a list of parameters, apply a function and then return the resulting array. - +Please see the function documentation in FittingFunctions.py for more information about these models. Please do add functions you think would be of use to everybody, have a look at the current functions for examples, the main thing is that the function must take an x array and a list of parameters, apply a function and then return the resulting array. + \section{Working with Lots of Files}\label{DataFolder} A common case is that you have measured lots of data curves and now have a large stack of data files sitting in a tree of folders on disc and now need to process all of them with some code. The \textbf{DataFolder} class is designed to make it easier to process lots of files. @@ -1042,7 +1042,7 @@ \subsection{Getting a List of Files} The first thing you probably want to do is to get a list of data files in a directory (possibly including its subdirectories) and probably matching some sort of filename pattern. \begin{lstlisting} - from Stoner.folders import DataFolder + from Stoner.Folders import DataFolder f=DataFolder(pattern='*.dat') \end{lstlisting} @@ -1235,7 +1235,7 @@ \subsection{The Utils module} from Stoner.Utils import split_up_down folder=split_up_down(data,column) \end{lstlisting} -\textit{folder} is a DataFolder instance with two groups, one for rising values of the column and one for falling values of the column. The \textit{split\_up\_down} will take an optional third parameter which is an existing \textbf{DataFolder} instance to which the new groups (if they don't already exist) and files will be added. +\textit{folder} is a DataFolder instance with two groups, one for rising values of the column and one for falling values of the column. The \textit{split\_up\_down} will take an optional third parameter which is an existing \textbf{DataFolder} instance to which the new groups (if they don't already exist) and files will be added. \subsection{Extract X-Y(Z) from X-Y-Z data} diff --git a/doc/UserGuide/analysisfile.rst b/doc/UserGuide/analysisfile.rst index cdf96f925..d558cd964 100755 --- a/doc/UserGuide/analysisfile.rst +++ b/doc/UserGuide/analysisfile.rst @@ -77,8 +77,8 @@ and a column 'Temperature' that contains numbers above and below 100. The first example would return a :py:class:`Stoner.Folders.DataFolder` object containing two separate instances of :py:class:`AnalysisMixin` which would each contain the rows from the original data that had each unique value of the polarisation data. The second example would -produce a :py:class:`Stoner.folders.DataFolder` object containing two :py:class:`AnalysisMixin` objects for the rows with temperature above and below 100. -The final example will result in a :py:class:`Stoner.folders.DataFolder` object that has two groups each of which contains +produce a :py:class:`Stoner.Folders.DataFolder` object containing two :py:class:`AnalysisMixin` objects for the rows with temperature above and below 100. +The final example will result in a :py:class:`Stoner.Folders.DataFolder` object that has two groups each of which contains :py:class:`AnalysisMixin` objects for each polarisation value. More AnalysisMixin Functions @@ -378,4 +378,4 @@ The example below shows how to use :py:meth:`AnalysisMixin.peaks` to filter out .. plot:: samples/peaks_example.py :include-source: - :outname: peaks + :outname: peaks \ No newline at end of file diff --git a/doc/UserGuide/cookbook.rst b/doc/UserGuide/cookbook.rst index 7f866559e..ddd97f25a 100755 --- a/doc/UserGuide/cookbook.rst +++ b/doc/UserGuide/cookbook.rst @@ -34,7 +34,7 @@ rising or falling. This is designed to help deal with analysis problems involvin from Stoner.Util import split_up_down folder=split_up_down(data,column) -In this example *folder* is a :py:class:`Stoner.folders.DataFolder` instance with two groups, one for rising values of the column +In this example *folder* is a :py:class:`Stoner.Folders.DataFolder` instance with two groups, one for rising values of the column and one for falling values of the column. The :py:func:`split\_up_down` will take an optional third parameter which is an existing :py:class:`Stoner.Core.DataFolder` instance to which the new groups (if they don't already exist) and files will be added. diff --git a/doc/UserGuide/curve_fitting.rst b/doc/UserGuide/curve_fitting.rst index 88e5d1600..a2aced5c7 100755 --- a/doc/UserGuide/curve_fitting.rst +++ b/doc/UserGuide/curve_fitting.rst @@ -9,7 +9,7 @@ Curve Fitting in the Stoner Package Introduction ============ -Many data analysis tasks make use of curve fitting at some point - the process of fitting a model to a set of data points and +Many data analysis tasks make use of curve fitting at some point - the process of fitting a model to as set of data points and determining the co-efficients of the model that give the best fit. Since this is such a ubiquitous task, it will be no surprise that the Stoner package provides a variety of different algorithms. @@ -37,7 +37,7 @@ In order of increasing complexity, the Stoner package supports the following: - `Orthogonal distance regression`_ - On the other hand, if your data has uncertainties in both *x* and *y* you may want to use the :py:meth:`Data.odr` method to do an analysis that + Finally, if your data has uncertainties in both *x* and *y* you may want to use the :py:meth:`Data.odr` method to do an analysis that minimizes the distance of the model function in both *x* and *y*. - `Differential Evolution Algorithm`_ @@ -61,7 +61,7 @@ standalone fitting functions: Gerneally these follow the :py:func:`scipy.optimize.curve_fit` conventions. #. They make use of the :py:attr:`Data.setas` attribute to identify data columns containing *x*, *y* and associated uncertainties. They also probvide a common way to select a subset of data to use for the fitting through the *bounds* keyword argument. - #. They provide a consistent way to add the best fit data as a column(s) to the :py:class:`Data` object and to store the best-fit + #. They provide a consistent way to add the best fit data as a column(s) to the :py:class:`Data` object and to stpore the best-fit parameters in the metadata for retrieval later. Since this is done in a consistent fashion, the package also can probide a :py:meth:`Data.annotate_plot` method to diisplay the fitting parameters on a plot of the data. diff --git a/doc/UserGuide/datafile.rst b/doc/UserGuide/datafile.rst index 35439b8cf..2e134805c 100755 --- a/doc/UserGuide/datafile.rst +++ b/doc/UserGuide/datafile.rst @@ -758,7 +758,7 @@ ORed. The result of chaining together two separate calls to select will, however the, first line will assume an implicit equality test and give only those rows with a column *Temp* equal to 250. The second line gives an explicit greater than or equal to test for the same column. The third line will select first those rows that have column T1 less than 4.2 *or* column T2 less than 5 and then from those select those rows which have a column Res between 100 and 200. The full list of operators is given in -:py:meth:`Stoner.folders.baseFolder.select`. +:py:meth:`Stoner.Folders.baseFolder.select`. Sectioning ~~~~~~~~~~ diff --git a/doc/UserGuide/datafolder.rst b/doc/UserGuide/datafolder.rst index b353de283..a6192cb23 100755 --- a/doc/UserGuide/datafolder.rst +++ b/doc/UserGuide/datafolder.rst @@ -1,16 +1,16 @@ ************************** Working with Lots of Files ************************** -.. currentmodule:: Stoner.folders +.. currentmodule:: Stoner.Folders A common case is that you have measured lots of data and now have a large stack of data files sitting in a tree of directories on disc and need to process all of them with some code. -The :py:mod:`Stoner.folders` contains classes to make this job much easier. +The :py:mod:`Stoner.Folders` contains classes to make this job much easier. For the end-user, the top level classes are :py:class:`DataFolder` for :py:class:`Stoner.Data` and :py:class:`Stoner.Image.ImageFolder` doe xollections of :py:class:`Stoner.Image.ImageFile` s. These are designed to complement the corresponding data classes :py:class:`Stoner.Data` and :py:class:`Stoner.ImageFile`. -Like :py:class:`Stoner.Core.Data`, :py:class:`Stoner.folders.DataFolder` is exported directly from the :py:mod:`Stoner` package, whilst the +Like :py:class:`Stoner.Core.Data`, :py:class:`Stoner.Folders.DataFolder` is exported directly from the :py:mod:`Stoner` package, whilst the :py:class:`Stoner.Image.ImageFolder` is exported from the :py:mod:`Stoner.Image` sub-paclkage. :py:class:`DataFolder` and it's friends are essentially containers for :py:class:`Stoner.Data` (or similar classes from the @@ -347,7 +347,7 @@ of even (!):: This last example illustrates a special ability of a :py:class:`DataFolder` to use the methods of the type of :py:class:`Stoner.Data` inside the DataFolder. The special :py:attr:`DataFolder.each` attribute (which is actually a -:py:class:`Stoner.folders.each_item instance) provides special hooks to let you call methods of the underlying :py:attr:`DataFolder.type` class on each +:py:class:`Stoner.Folders.each_item instance) provides special hooks to let you call methods of the underlying :py:attr:`DataFolder.type` class on each file in the :py:class:`DataFolder` in turn. When you access a method on :py:attr:`DataFolder.each` that is actually a method of the DataFile, they call a method that wraps a call to each :py:class:`Stoner.Data` in turn. If the method on :py:class:`Stoner.Data` returns the :py:class:`Stoner.Data` back, then this is stored in the :py:class:`DataFolder`. In this case the result back` diff --git a/doc/UserGuide/upgrade.rst b/doc/UserGuide/upgrade.rst index bfa11b2ff..17248158e 100755 --- a/doc/UserGuide/upgrade.rst +++ b/doc/UserGuide/upgrade.rst @@ -151,3 +151,5 @@ The new :py:attr:`Stoner.DataFolder.metadata` attribute and :py:meth:`Stoner.fol allow this to be done directly:: result=fldr.metadata.slice(["thing_1","thing_2"], output="Data") + + diff --git a/doc/conf.py b/doc/conf.py index 1863a44ec..5279b6fc4 100755 --- a/doc/conf.py +++ b/doc/conf.py @@ -126,7 +126,7 @@ 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'seaborn': ('https://seaborn.pydata.org/', None), 'astropy': ('http://docs.astropy.org/en/stable/', None), - 'h5py': (' https://docs.h5py.org/en/latest/', None), + 'h5py': ('http://docs.h5py.org/en/latest/', None), 'lmfit': ('https://lmfit.github.io/lmfit-py/', None), 'skimage': ('https://scikit-image.org/docs/stable', None) } diff --git a/doc/plot_cache/stitch_int_overlap-int-overlap.hires.png b/doc/plot_cache/stitch_int_overlap-int-overlap.hires.png index 094d8da3c..fd4e3dfe1 100755 Binary files a/doc/plot_cache/stitch_int_overlap-int-overlap.hires.png and b/doc/plot_cache/stitch_int_overlap-int-overlap.hires.png differ diff --git a/doc/plot_cache/stitch_int_overlap-int-overlap.pdf b/doc/plot_cache/stitch_int_overlap-int-overlap.pdf index 186c58324..20a6fef0d 100755 Binary files a/doc/plot_cache/stitch_int_overlap-int-overlap.pdf and b/doc/plot_cache/stitch_int_overlap-int-overlap.pdf differ diff --git a/doc/plot_cache/stitch_int_overlap-int-overlap.png b/doc/plot_cache/stitch_int_overlap-int-overlap.png index 1c07cc744..9059568a3 100755 Binary files a/doc/plot_cache/stitch_int_overlap-int-overlap.png and b/doc/plot_cache/stitch_int_overlap-int-overlap.png differ diff --git a/doc/readme.rst b/doc/readme.rst index 40c1ceed9..8f58e3b8f 100755 --- a/doc/readme.rst +++ b/doc/readme.rst @@ -41,7 +41,8 @@ Getting this Code :alt: Introduction and Installation Guide to Stoner Pythin Package :width: 320 -See the `requirements.txt` file for the current package requirements. +The *Stoner* package requires h5py>=2.7.0, lmfit>=0.9.7, matplotlib>=2.0,numpy>=1.13, Pillow>=4.0, +scikit-image>=0.13.0 & scipy>=1.0.0 and also optional depends on filemagic, npTDMS, imreg_dft and numba, fabio, hyperspy. Ananconda Python (and probably other scientific Python distributions) include nearly all of the dependencies, and the remaining dependencies are collected together in the **phygbu** repositry on anaconda cloud. The easiest way to install the Stoner package is, @@ -50,10 +51,10 @@ therefore, to install the most recent Anaconda Python distribution. Compatibility -------------- -Versions 0.10.x (stable branch) are compatible with Python 3.6-3.9. The current development branch is targetting v3.8-3.9 -with 3.10 added when anaconda support is available in mainstream realease. +Versions 0.9.x (stable branch) are compatible with Python 2.7, 3.5, 3.6 and 3.7. The latest 0.9.6 version is also compatible with Python 3.8 +The current stable verstion (0.10, stable branch) is compatible with Python 3.6-3.9 -Conda packages are built on each github release via a github actions for 64bit Python on Windows, MacOS and Linux. +Conda packages are prepared for the stable branch and when the development branch enters beta testing. Pip wheels are prepared for selected stable releases only. Installation ------------ diff --git a/doc/requirements.txt b/doc/requirements.txt index e7e109cb3..4cafa80a0 100755 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -2,14 +2,13 @@ sphinx>=1.7.5 sphinx-automodapi>=0.3 sphinx-better-theme>=0.1.3 h5py -matplotlib>=3.0 -numpy>=1.20 +matplotlib>=2.0 +numpy>=1.19 Pillow -scikit-image>=0.17 +scikit-image>=0.13.0 scipy statsmodels multiprocess asteval lmfit urllib3>=1.26 -importlib_metadata diff --git a/doc/samples/bins.py b/doc/samples/bins.py index 6381a24d6..8cfa55e38 100755 --- a/doc/samples/bins.py +++ b/doc/samples/bins.py @@ -20,7 +20,7 @@ zip([e, f, g, h], ["0.05 Linear", "0.25 Linear", "0.05 Log", "50 log"]) ): binned.subplot(411 + i) - d.plot(fmt="k.", capsize=2.0, ms=3.0) + d.plot(fmt="k,", capsize=2.0) binned.fig = d.fig binned.plot(plotter=errorfill, label=label, color="red") diff --git a/doc/samples/plot_folder_demo.py b/doc/samples/plot_folder_demo.py index f0150e384..1e6b689c1 100755 --- a/doc/samples/plot_folder_demo.py +++ b/doc/samples/plot_folder_demo.py @@ -11,7 +11,7 @@ from Stoner.analysis.fitting.models.magnetism import FMR_Power, Inverse_Kittel from Stoner.analysis.fitting.models.generic import Linear from Stoner.plot.formats import DefaultPlotStyle, TexEngFormatter -from Stoner.folders import PlotFolder +from Stoner.Folders import PlotFolder # Customise a plot template template = DefaultPlotStyle() @@ -33,13 +33,13 @@ def extra(_, __, d): d.xlabel = r"Field $\mu_0H\,$" d.ylabel = "Abs. (arb)" d.plt_legend(loc=3) - d.annotate_fit(FMR_Power, mode="eng", fontdict={"size": 8}, x=0.05, y=0.25) + d.annotate_fit(FMR_Power, fontdict={"size": 8}, x=0.05, y=0.25) def do_fit(f): """Fit just one set of data.""" f.template = template - f["cut"] = f.threshold(0.75e5, rising=False, falling=True) + f["cut"] = f.threshold(1.75e5, rising=False, falling=True) f["Frequency"] = (f // "Frequency").mean() f.lmfit( FMR_Power, result=True, header="Fit", bounds=lambda x, r: x < f["cut"] @@ -111,7 +111,7 @@ def do_fit(f): result = resfldr[0].clone for c in [0, 2, 4, 6, 8]: result.data[:, c] = (resfldr[1][:, c] + resfldr[0][:, c]) / 2.0 - for c in [2, 4, 6, 8]: + for c in [1, 3, 5, 7]: result.data[:, c] = gmean((resfldr[0][:, c], resfldr[1][:, c]), axis=0) # Doing the Kittel fit with an orthogonal distance regression as we have x errors not y errors @@ -121,8 +121,8 @@ def do_fit(f): ) result.setas[-1] = "y" - # result.template.yformatter = TexEngFormatter - # result.template.xformatter = TexEngFormatter + result.template.yformatter = TexEngFormatter + result.template.xformatter = TexEngFormatter result.labels = None result.figure(figsize=(6, 8), no_axes=True) result.subplot(211) diff --git a/requirements.txt b/requirements.txt index e8139cae4..f71110681 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,9 @@ +scipy>1.4 +numpy>=1.18 +matplotlib>=3.0 +scikit-image>=0.17 +h5py cycler>=0.10.0 -dask>=2020.12.0 -dill>=0.3.4 -fabio>=0.11.0 filemagic>=1.6 image-registration>=0.2.1 lmfit>=0.9.7 diff --git a/tests/Stoner/Image/test_folder.py b/tests/Stoner/Image/test_folder.py index 4c75cfab0..7d6c4b986 100755 --- a/tests/Stoner/Image/test_folder.py +++ b/tests/Stoner/Image/test_folder.py @@ -5,7 +5,6 @@ from Stoner.Image import ImageArray, ImageFolder import pytest import os -import re knownkeys = [ "Averaging", diff --git a/tests/Stoner/Image/test_kerr.py b/tests/Stoner/Image/test_kerr.py index 659adf9d8..c6f3f8c7c 100755 --- a/tests/Stoner/Image/test_kerr.py +++ b/tests/Stoner/Image/test_kerr.py @@ -16,8 +16,6 @@ import Stoner Stoner.Options.multiprocessing = False -Stoner.Options.threading = True - # data arrays for testing - some useful small images for tests diff --git a/tests/Stoner/Image/test_stack.py b/tests/Stoner/Image/test_stack.py index a6126c38e..d9fe0aab2 100755 --- a/tests/Stoner/Image/test_stack.py +++ b/tests/Stoner/Image/test_stack.py @@ -61,7 +61,7 @@ def test_ImageStack_align(): assert False, "stack.align with an object failed to raise a TypeError" istack2.align(method="imreg_dft") - assert (istack2[0]["tvec"] == (0.0, 0.0), "stack didn't align to first image") + assert istack2[0]["tvec"] == (0.0, 0.0), "stack didn't align to first image" istack2 = selfistack2.clone diff --git a/tests/Stoner/folders/test_each.py b/tests/Stoner/folders/test_each.py index 7a5bf2148..d5b175ca4 100755 --- a/tests/Stoner/folders/test_each.py +++ b/tests/Stoner/folders/test_each.py @@ -10,7 +10,7 @@ import os import pytest -from Stoner import DataFolder, Options +from Stoner import DataFolder from Stoner.Util import hysteresis_correct pth = path.dirname(__file__) @@ -18,8 +18,6 @@ sys.path.insert(0, pth) datadir = path.join(pth, "sample-data") -Options.multiprocessing = False -Options.threading = True def test_each_call(): os.chdir(datadir) diff --git a/tests/Stoner/tools/test_tests.py b/tests/Stoner/tools/test_tests.py index 8ea9e448f..091b641e4 100755 --- a/tests/Stoner/tools/test_tests.py +++ b/tests/Stoner/tools/test_tests.py @@ -75,5 +75,6 @@ def test_is_tuple(): assert tests.isTuple((4, "Hi"), int, str), "isTuple failed to match types" assert not tests.isTuple((4, "Hi"), str, int), "isTuple failed to match types as bad" + if __name__ == "__main__": - pytest.main(["--pdb",__file__]) + pytest.main() diff --git a/tests/Stoner/tools/test_widgets.py b/tests/Stoner/tools/test_widgets.py index d2b556268..f0a7a09e5 100755 --- a/tests/Stoner/tools/test_widgets.py +++ b/tests/Stoner/tools/test_widgets.py @@ -66,8 +66,6 @@ def dummy(mode="getOpenFileName"): assert widgets.fileDialog.openDialog(mode="SelectDirectory") == ret_pth.parent with pytest.raises(ValueError): widgets.fileDialog.openDialog(mode="Whateve") - d=Data.load(None) - assert Path(d.filename).name=='TDI_Format_RT.txt',"Failed to open correct file when using a file dialog!" def test_loader():