From 9d8ca29a59563aa2163149d4fdfde0cdcbfe4cdb Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Tue, 7 Nov 2023 14:16:21 +0100 Subject: [PATCH 01/45] Create pylint.yml --- .github/workflows/pylint.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .github/workflows/pylint.yml diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml new file mode 100644 index 0000000..383e65c --- /dev/null +++ b/.github/workflows/pylint.yml @@ -0,0 +1,23 @@ +name: Pylint + +on: [push] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.8", "3.9", "3.10"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pylint + - name: Analysing the code with pylint + run: | + pylint $(git ls-files '*.py') From 2f360ea5c98da88fc3e391c037893b97917eeaa8 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 16:11:58 +0100 Subject: [PATCH 02/45] migrate to meson-python --- pyproject.toml | 74 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 68 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 482cc8d..91ed3e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,71 @@ +[project] +name = 'freesas' +dynamic = ['version',] +license = {file = 'copyright'} +requires-python = '>=3.7' +readme = 'README.rd' +description = 'Small angle scattering tools ... but unlike most others, free and written in Python' + +authors = [ + { name = 'Guillaume Bonamis'}, + { name = 'Martha Brennich'}, + { name = 'Jérôme Kieffer', email = 'jerome.kieffer@esrf.fr'}, +] +classifiers = [ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English", + 'Operating System :: MacOS :: MacOS X', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: Unix', + 'Operating System :: POSIX', + 'Programming Language :: Cython', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: Implementation :: CPython', + 'Topic :: Scientific/Engineering :: Physics', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Scientific/Engineering :: Bio-Informatics', +] + +dependencies = [ + 'numpy', + 'scipy', +] [build-system] +build-backend = 'mesonpy' requires = [ - "wheel", - "setuptools<60.0.0", - "numpy>=1.12", - "Cython>=0.21.1", - "scipy", + 'meson-python>=0.11', + "meson>=0.64; platform_system=='Windows'", + "meson; platform_system!='Windows'", + 'ninja', + 'wheel', + 'Cython>=0.29', + "numpy<1.26.0; platform_machine == 'ppc64le'", + "numpy; platform_machine != 'ppc64le'", + 'pyproject-metadata>=0.5.0', + 'tomli>=1.0.0' ] -build-backend = "setuptools.build_meta" + +[project.urls] +homepage = 'http://silx.org' +documentation = 'http://www.silx.org/doc/freesas/latest/' +source = 'https://github.com/silx-kit/freeas' +download = 'https://github.com/silx-kit/freesas/releases' +tracker = 'https://github.com/silx-kit/freesas/issues' + +[project.scripts] +free_gpa = 'freesas.app.auto_gpa:main' +free_guinier = 'freesas.app.auto_guinier:main' +free_rg = 'freesas.app.autorg:main' +cormapy = 'freesas.app.cormap:main' +supycomb = 'freesas.app.supycomb:main' +free_bift = 'freesas.app.bift:main", +extract_ascii = 'freesas.app.extract_ascii:main' + +[project.gui-scripts] +freesas = 'freesas.app.plot_sas:main' + From 8ce34c3a146041eca54cb7fd94097030b017babe Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 16:15:22 +0100 Subject: [PATCH 03/45] new meson.build file --- meson.build | 26 ++++++++++++++++++++++++++ pyproject.toml | 1 + version.py => src/freesas/_version.py | 6 +++--- 3 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 meson.build rename version.py => src/freesas/_version.py (97%) diff --git a/meson.build b/meson.build new file mode 100644 index 0000000..082300f --- /dev/null +++ b/meson.build @@ -0,0 +1,26 @@ +project('FreeSAS', + 'c', 'cython', + license: 'MIT', + meson_version: '>= 0.60', + version: run_command('src/freesas/_version.py', + check:true).stdout().strip(), +) + + +# Seek the backend +if meson.backend() != 'ninja' + error('Ninja backend required') +endif + +cc = meson.get_compiler('c') +m_dep = cc.find_library('m', required : false) +if m_dep.found() + add_project_link_arguments('-lm', language : 'c') +endif + +# https://mesonbuild.com/Python-module.html +py_mod = import('python') +py = py_mod.find_installation() +py_dep = py.dependency() + +subdir('src/freesas') diff --git a/pyproject.toml b/pyproject.toml index 91ed3e5..53247f3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ classifiers = [ dependencies = [ 'numpy', 'scipy', + 'matplotlib' ] [build-system] build-backend = 'mesonpy' diff --git a/version.py b/src/freesas/_version.py similarity index 97% rename from version.py rename to src/freesas/_version.py index 94e2ff4..355029c 100644 --- a/version.py +++ b/src/freesas/_version.py @@ -2,7 +2,7 @@ # coding: utf-8 # /*########################################################################## # -# Copyright (c) 2015-2021 European Synchrotron Radiation Facility +# Copyright (c) 2015-2023 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -52,7 +52,7 @@ __authors__ = ["Jérôme Kieffer"] __license__ = "MIT" __copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" -__date__ = "25/01/2021" +__date__ = "27/11/2023" __status__ = "production" __docformat__ = 'restructuredtext' __all__ = ["date", "version_info", "strictversion", "hexversion", "debianversion", @@ -70,7 +70,7 @@ "candidate": "rc"} MAJOR = 0 MINOR = 9 -MICRO = 2 +MICRO = 9 RELEV = "dev" # <16 SERIAL = 0 # <16 From 226b0202b1e9cd0b1bffd994886507ac893b3ac2 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 16:20:46 +0100 Subject: [PATCH 04/45] include python source --- src/freesas/__init__.py | 64 ++ src/freesas/align.py | 447 ++++++++ src/freesas/autorg.py | 198 ++++ src/freesas/average.py | 271 +++++ src/freesas/bift.py | 80 ++ src/freesas/collections.py | 107 ++ src/freesas/cormap.py | 122 ++ src/freesas/decorators.py | 58 + src/freesas/fitting.py | 222 ++++ src/freesas/invariants.py | 132 +++ src/freesas/meson.build | 25 + src/freesas/model.py | 315 ++++++ src/freesas/plot.py | 562 ++++++++++ src/freesas/sas_argparser.py | 165 +++ src/freesas/sasio.py | 100 ++ src/freesas/transformations.py | 1918 ++++++++++++++++++++++++++++++++ 16 files changed, 4786 insertions(+) create mode 100644 src/freesas/__init__.py create mode 100644 src/freesas/align.py create mode 100644 src/freesas/autorg.py create mode 100644 src/freesas/average.py create mode 100644 src/freesas/bift.py create mode 100644 src/freesas/collections.py create mode 100644 src/freesas/cormap.py create mode 100644 src/freesas/decorators.py create mode 100644 src/freesas/fitting.py create mode 100644 src/freesas/invariants.py create mode 100644 src/freesas/meson.build create mode 100644 src/freesas/model.py create mode 100644 src/freesas/plot.py create mode 100644 src/freesas/sas_argparser.py create mode 100644 src/freesas/sasio.py create mode 100644 src/freesas/transformations.py diff --git a/src/freesas/__init__.py b/src/freesas/__init__.py new file mode 100644 index 0000000..79072af --- /dev/null +++ b/src/freesas/__init__.py @@ -0,0 +1,64 @@ +# coding: utf-8 +# /*########################################################################## +# +# Copyright (c) 2015-2018 European Synchrotron Radiation Facility +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +# ###########################################################################*/ +""" +The silx package contains the following main sub-packages: + +- silx.gui: Qt widgets for data visualization and data file browsing +- silx.image: Some processing functions for 2D images +- silx.io: Reading and writing data files (HDF5/NeXus, SPEC, ...) +- silx.math: Some processing functions for 1D, 2D, 3D, nD arrays +- silx.opencl: OpenCL-based data processing +- silx.sx: High-level silx functions suited for (I)Python console. +- silx.utils: Miscellaneous convenient functions + +See silx documentation: http://www.silx.org/doc/silx/latest/ +""" + +__authors__ = ["Jérôme Kieffer"] +__license__ = "MIT" +__date__ = "31/08/2018" + +import os as _os +import logging as _logging + +_logging.getLogger(__name__).addHandler(_logging.NullHandler()) + + +project = _os.path.basename(_os.path.dirname(_os.path.abspath(__file__))) + +try: + from ._version import __date__ as date # noqa + from ._version import ( + version, + version_info, + hexversion, + strictversion, + dated_version, + ) # noqa +except ImportError: + raise RuntimeError( + "Do NOT use %s from its sources: build it and use the built version" + % project + ) diff --git a/src/freesas/align.py b/src/freesas/align.py new file mode 100644 index 0000000..e3a1fb5 --- /dev/null +++ b/src/freesas/align.py @@ -0,0 +1,447 @@ +__author__ = "Guillaume Bonamis" +__license__ = "MIT" +__copyright__ = "2015, ESRF" + +import os +import sys +import numpy +import matplotlib +# matplotlib.use('Agg') +import matplotlib.pyplot as plt +from freesas.model import SASModel +import itertools +from scipy.optimize import fmin +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("log_freesas") + + +class InputModels: + def __init__(self): + self.inputfiles = [] + self.sasmodels = [] + self.rfactors = [] + self.rmax = None + self.validmodels = [] + + def __repr_(self): + return "Preparation of %s models for alignment" % len(self.inputfiles) + + def assign_models(self, molecule=None): + """ + Create SASModels from pdb files saved in self.inputfiles and saved them in self.models. + Center of mass, inertia tensor and canonical parameters are computed for each SASModel. + + :param molecule: optional 2d array, coordinates of the atoms for the model to create + :return self.models: list of SASModel + """ + if not self.inputfiles and len(molecule) == 0: + logger.error("No input files") + + if self.inputfiles: + for inputpdb in self.inputfiles: + model = SASModel() + model.read(inputpdb) + model.centroid() + model.inertiatensor() + model.canonical_parameters() + self.sasmodels.append(model) + if len(self.inputfiles) != len(self.sasmodels): + logger.error("Problem of assignment\n%s models for %s files" % (len(self.sasmodels), len(self.inputfiles))) + + elif len(molecule) != 0: + model = SASModel() + model.atoms = molecule + model.centroid() + model.inertiatensor() + model.canonical_parameters() + self.sasmodels.append(model) + + return self.sasmodels + + def rcalculation(self): + """ + Calculation the maximal value for the R-factors, which is the mean of all the R-factors of + inputs plus 2 times the standard deviation. + R-factors are saved in the attribute self.rfactors, 1d array, and in percentage. + + :return rmax: maximal value for the R-factor + """ + if len(self.sasmodels) == 0: + self.assign_models() + models = self.sasmodels + + rfactors = numpy.empty(len(models), dtype="float") + for i in range(len(models)): + rfactors[i] = models[i].rfactor + self.rfactors = 100.0 * rfactors + + rmax = self.rfactors.mean() + 2 * self.rfactors.std() + self.rmax = rmax + + return rmax + + def models_selection(self): + """ + Check if each model respect the limit for the R-factor + + :return self.validmodels: 1d array, 0 for a non valid model, else 1 + """ + if self.rmax is None: + self.rcalculation() + rmax = self.rmax + + validmodels = [] + for i in range(len(self.sasmodels)): + rfactor = self.rfactors[i] + if rfactor <= rmax: + validmodels.append(1.0) + else: + validmodels.append(0.0) + + self.validmodels = numpy.array(validmodels, dtype="float") + + return self.validmodels + + def rfactorplot(self, filename=None, save=False): + """ + Create a png file with the table of R factor for each model. + A threshold is computed to discarded models with Rfactor>Rmax. + + :param filename: filename for the figure, default to Rfactor.png + :param save: save automatically the figure if True, else show it + :return fig: the wanted figures + """ + if filename is None: + filename = "Rfactor.png" + if len(self.validmodels) == 0: + self.models_selection() + + dammif_files = len(self.inputfiles) + R = self.rfactors + Rmax = self.rmax + + xticks = 1 + numpy.arange(dammif_files) + fig = plt.figure(figsize=(7.5, 10)) + labels = [os.path.splitext(os.path.basename(self.inputfiles[i]))[0] for i in range(dammif_files)] + + ax2 = fig.add_subplot(1, 1, 1) + ax2.set_title("Selection of dammif models based on R factor") + ax2.bar(xticks - 0.5, R) + ax2.plot([0.5, dammif_files + 0.5], [Rmax, Rmax], "-r", label="R$_{max}$ = %.3f" % Rmax) + ax2.set_ylabel("R factor in percent") + ax2.set_xticks(xticks) + ax2.set_xticklabels(labels, rotation=90) + ax2.legend(loc=8) + + bbox_props = dict(fc="pink", ec="r", lw=1) + for i in range(dammif_files): + if not self.validmodels[i]: + ax2.text(i + 0.95, Rmax / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props) + logger.info("model %s discarded, Rfactor > Rmax" % self.inputfiles[i]) + + if save: + fig.savefig(filename) + else: + fig.show() + + return fig + + +class AlignModels: + """ + Used to align DAM from pdb files + """ + + def __init__(self, files, slow=True, enantiomorphs=True): + """ + :param files: list of pdb files to read to create DAM + :param slow: optimized every symmetry if True, else only optimized the best one + :param enantiomorphs: take into account both enantiomorphs if True (i.e. inversion authorized) + """ + self.slow = slow + self.enantiomorphs = enantiomorphs + self.inputfiles = files + self.outputfiles = [] + self.models = [] + self.arrayNSD = None + self.validmodels = [] + self.reference = None + + def __repr__(self): + return "alignment process for %s models" % len(self.models) + + def assign_models(self): + """ + Create SASModels from pdb files saved in self.inputfiles and saved them in self.models. + Center of mass, inertia tensor and canonical parameters are computed for each SASModel. + + :return self.models: list of SASModel + """ + for inputpdb in self.inputfiles: + model = SASModel() + model.read(inputpdb) + model.centroid() + model.inertiatensor() + model.canonical_parameters() + self.models.append(model) + if len(self.inputfiles) != len(self.models): + logger.error("Problem of assignment\n%s models for %s files" % (len(self.models), len(self.inputfiles))) + + return self.models + + def optimize(self, reference, molecule, symmetry): + """ + Use scipy.optimize to optimize transformation parameters to minimize NSD + + :param reference: SASmodel + :param molecule: SASmodel + :param symmetry: 3-list of +/-1 + :return p: transformation parameters optimized + :return dist: NSD after optimization + """ + p, dist, niter, nfuncalls, warmflag = fmin(reference.dist_after_movement, molecule.can_param, args=(molecule, symmetry), ftol=1e-4, maxiter=200, full_output=True, disp=False) + if niter == 200: + logger.debug("convergence not reached") + else: + logger.debug("convergence reach after %s iterations" % niter) + return p, dist + + def alignment_sym(self, reference, molecule): + """ + Apply 8 combinations to the molecule and select the one which minimize the distance between it and the reference. + + :param reference: SASModel, the one which do not move + :param molecule: SASModel, the one wich has to be aligned + :return combinaison: best symmetry to minimize NSD + :return p: transformation parameters optimized if slow is true, unoptimized else + """ + can_paramref = reference.can_param + can_parammol = molecule.can_param + + ref_can = reference.transform(can_paramref, [1, 1, 1]) + mol_can = molecule.transform(can_parammol, [1, 1, 1]) + + if self.slow: + parameters, dist = self.optimize(reference, molecule, [1, 1, 1]) + else: + parameters = can_parammol + dist = reference.dist(molecule, ref_can, mol_can) + combinaison = None + + for comb in itertools.product((-1, 1), repeat=3): + if comb == (1, 1, 1): + continue + if not self.enantiomorphs and comb[0] * comb[1] * comb[2] == -1: + continue + + sym = numpy.diag(comb + (1,)) + mol_sym = numpy.dot(sym, mol_can.T).T + + if self.slow: + symmetry = [sym[0, 0], sym[1, 1], sym[2, 2]] + p, d = self.optimize(reference, molecule, symmetry) + else: + p = can_parammol + d = reference.dist(molecule, ref_can, mol_sym) + + if d < dist: + dist = d + parameters = p + combinaison = comb + if combinaison is not None: + combinaison = list(combinaison) + else: + combinaison = [1, 1, 1] + return combinaison, parameters + + def makeNSDarray(self): + """ + Calculate the NSD correlation table and save it in self.arrayNSD + + :return self.arrayNSD: 2d array, NSD correlation table + """ + models = self.models + size = len(models) + valid = self.validmodels + self.arrayNSD = numpy.zeros((size, size), dtype="float") + + for i in range(size): + if valid[i] == 1.0: + reference = models[i] + else: + self.arrayNSD[i, :] = 0.00 + continue + for j in range(size): + if i == j: + self.arrayNSD[i, j] = 0.00 + elif i < j: + if valid[j] == 1.0: + molecule = models[j] + symmetry, p = self.alignment_sym(reference, molecule) + if self.slow: + dist = reference.dist_after_movement(p, molecule, symmetry) + else: + p, dist = self.optimize(reference, molecule, symmetry) + else: + dist = 0.00 + self.arrayNSD[i, j] = self.arrayNSD[j, i] = dist + return self.arrayNSD + + def plotNSDarray(self, rmax=None, filename=None, save=False): + """ + Create a png file with the table of NSD and the average NSD for each model. + A threshold is computed to segregate good models and the ones to exclude. + + :param rmax: threshold of R factor for the validity of a model + :param filename: filename for the figure, default to nsd.png + :param save: save automatically the figure if True, else show it + :return fig: the wanted figures + """ + if self.arrayNSD is None: + self.makeNSDarray() + if not self.reference: + self.reference = self.find_reference() + if filename is None: + filename = "nsd.png" + + dammif_files = len(self.inputfiles) + valid_models = self.validmodels + labels = [os.path.splitext(os.path.basename(self.outputfiles[i]))[0] for i in range(dammif_files)] + mask2d = (numpy.outer(valid_models, valid_models)) + tableNSD = self.arrayNSD * mask2d + maskedNSD = numpy.ma.masked_array(tableNSD, mask=numpy.logical_not(mask2d)) + data = valid_models * (tableNSD.sum(axis=-1) / (valid_models.sum() - 1)) # mean for the valid models, excluding itself + + fig = plt.figure(figsize=(15, 10)) + xticks = 1 + numpy.arange(dammif_files) + ax1 = fig.add_subplot(1, 2, 1) + ax2 = fig.add_subplot(1, 2, 2) + + # first subplot : the NSD table + lnsd = [] + for i in range(dammif_files): + for j in range(dammif_files): + nsd = maskedNSD[i, j] + if not maskedNSD.mask[i, j]: + ax1.text(i, j, "%.2f" % nsd, ha="center", va="center", size=12 * 8 // dammif_files) + ax1.text(j, i, "%.2f" % nsd, ha="center", va="center", size=12 * 8 // dammif_files) + if i != j: + lnsd.append(nsd) + + lnsd = numpy.array(lnsd) + nsd_max = lnsd.mean() + lnsd.std() # threshold for nsd mean + + ax1.imshow(maskedNSD, interpolation="nearest", origin="upper", cmap="YlOrRd", norm=matplotlib.colors.Normalize(vmin=min(lnsd))) + ax1.set_title(u"NSD correlation table") + ax1.set_xticks(range(dammif_files)) + ax1.set_xticklabels(labels, rotation=90) + ax1.set_xlim(-0.5, dammif_files - 0.5) + ax1.set_ylim(-0.5, dammif_files - 0.5) + ax1.set_yticks(range(dammif_files)) + ax1.set_yticklabels(labels) + + # second subplot : the NSD mean for each model + ax2.bar(xticks - 0.5, data) + ax2.plot([0.5, dammif_files + 0.5], [nsd_max, nsd_max], "-r", label=u"NSD$_{max}$ = %.2f" % nsd_max) + ax2.set_title(u"NSD between any model and all others") + ax2.set_ylabel("Normalized Spatial Discrepancy") + ax2.set_xticks(xticks) + ax2.set_xticklabels(labels, rotation=90) + bbox_props = dict(fc="cyan", ec="b", lw=1) + ax2.text(self.reference + 0.95, data[self.reference] / 2, "Reference", ha="center", va="center", rotation=90, size=10, bbox=bbox_props) + ax2.legend(loc=8) + + bbox_props = dict(fc="pink", ec="r", lw=1) + valid_number = 0 + for i in range(dammif_files): + if data[i] > nsd_max: + ax2.text(i + 0.95, data[self.reference] / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props) + logger.debug("model %s discarded, nsd > nsd_max" % self.inputfiles[i]) + elif not valid_models[i]: + if rmax: + ax2.text(i + 0.95, data[self.reference] / 2, "Discarded, Rfactor = %s > Rmax = %s" % (100.0 * self.models[i].rfactor, rmax), ha="center", va="center", rotation=90, size=10, bbox=bbox_props) + else: + ax2.text(i + 0.95, data[self.reference] / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props) + else: + if valid_models[i] == 1.0: + valid_number += 1 + + logger.debug("%s valid models" % valid_number) + + if save: + fig.savefig(filename) + else: + fig.show() + return fig + + def find_reference(self): + """ + Find the reference model among the models aligned. + The reference model is the one with lower average NSD with other models. + + :return ref_number: position of the reference model in the list self.models + """ + if self.arrayNSD is None: + self.makeNSDarray() + if len(self.validmodels) == 0: + logger.error("Validity of models is not computed") + valid = self.validmodels + valid = valid.astype(bool) + + averNSD = numpy.zeros(len(self.models)) + averNSD += sys.maxsize + averNSD[valid] = ((self.arrayNSD.sum(axis=-1)) / (valid.sum() - 1))[valid] + + self.reference = averNSD.argmin() + + return self.reference + + def alignment_reference(self, ref_number=None): + """ + Align all models in self.models with the reference one. + The aligned models are saved in pdb files (names in list self.outputfiles) + """ + if self.reference is None and ref_number is None: + self.find_reference() + + ref_number = self.reference + models = self.models + reference = models[ref_number] + for i in range(len(models)): + if i == ref_number: + continue + else: + molecule = models[i] + symmetry, p = self.alignment_sym(reference, molecule) + if not self.slow: + p, dist = self.optimize(reference, molecule, symmetry) + molecule.atoms = molecule.transform(p, symmetry) # molecule sent on its canonical position + molecule.atoms = molecule.transform(reference.can_param, [1, 1, 1], reverse=True) # molecule sent on reference position + molecule.save(self.outputfiles[i]) + reference.save(self.outputfiles[ref_number]) + return 0 + + def alignment_2models(self, save=True): + """ + Align two models using the first one as reference. + The aligned models are save in pdb files. + + :return dist: NSD after alignment + """ + models = self.models + reference = models[0] + molecule = models[1] + + symmetry, p = self.alignment_sym(reference, molecule) + if not self.slow: + p, dist = self.optimize(reference, molecule, symmetry) + + molecule.atoms = molecule.transform(p, symmetry) + molecule.atoms = molecule.transform(reference.can_param, [1, 1, 1], reverse=True) + if self.slow: + dist = reference.dist(molecule, reference.atoms, molecule.atoms) + if save: + molecule.save(self.outputfiles) + + return dist diff --git a/src/freesas/autorg.py b/src/freesas/autorg.py new file mode 100644 index 0000000..4284f94 --- /dev/null +++ b/src/freesas/autorg.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +"""Functions for calculating the radius of gyration and forward scattering intensity.""" + +__authors__ = ["Jerome Kieffer"] +__license__ = "MIT" +__copyright__ = "2020, ESRF" +__date__ = "05/06/2020" + +import logging +import numpy +from scipy.optimize import curve_fit +from ._autorg import ( # pylint: disable=E0401 + RG_RESULT, + autoRg, + AutoGuinier, + linear_fit, + FIT_RESULT, + guinier, + NoGuinierRegionError, + DTYPE, + InsufficientDataError, +) + + +logger = logging.getLogger(__name__) + + +def auto_gpa(data, Rg_min=1.0, qRg_max=1.3, qRg_min=0.5): + """ + Uses the GPA theory to guess quickly Rg, the + radius of gyration and I0, the forwards scattering + + The theory is described in `Guinier peak analysis for visual and automated + inspection of small-angle X-ray scattering data` + Christopher D. Putnam + J. Appl. Cryst. (2016). 49, 1412–1419 + + This fits sqrt(q²Rg²)*exp(-q²Rg²/3)*I0/Rg to the curve I*q = f(q²) + + The Guinier region goes arbitrary from 0.5 to 1.3 q·Rg + qRg_min and qRg_max can be provided + + :param data: the raw data read from disc. Only q and I are used. + :param Rg_min: the minimal accpetable value for the radius of gyration + :param qRg_max: the default upper bound for the Guinier region. + :param qRg_min: the default lower bound for the Guinier region. + :return: autRg result with limited information + """ + + def curate_data(data): + q = data.T[0] + I = data.T[1] + err = data.T[2] + + start0 = numpy.argmax(I) + stop0 = numpy.where(q > qRg_max / Rg_min)[0][0] + + range0 = slice(start0, stop0) + q = q[range0] + I = I[range0] + err = err[range0] + + q2 = q ** 2 + lnI = numpy.log(I) + I2_over_sigma2 = err ** 2 / I ** 2 + + y = I * q + p1 = numpy.argmax(y) + + # Those are guess from the max position: + Rg = (1.5 / q2[p1]) ** 0.5 + I0 = I[p1] * numpy.exp(q2[p1] * Rg ** 2 / 3.0) + + # Let's cut-down the guinier region from 0.5-1.3 in qRg + try: + start1 = numpy.where(q > qRg_min / Rg)[0][0] + except IndexError: + start1 = None + try: + stop1 = numpy.where(q > qRg_max / Rg)[0][0] + except IndexError: + stop1 = None + range1 = slice(start1, stop1) + + q1 = q[range1] + I1 = I[range1] + + return q1, I1, Rg, I0, q2, lnI, I2_over_sigma2, start0 + + q1, I1, Rg, I0, q2, lnI, I2_over_sigma2, start0 = curate_data(data) + if len(q1) < 3: + reduced_data = numpy.delete(data, start0, axis=0) + q1, I1, Rg, I0, q2, lnI, I2_over_sigma2, start0 = curate_data( + reduced_data + ) + + x = q1 * q1 + y = I1 * q1 + + f = ( + lambda x, Rg, I0: I0 + / Rg + * numpy.sqrt(x * Rg * Rg) + * numpy.exp(-x * Rg * Rg / 3.0) + ) + res = curve_fit(f, x, y, [Rg, I0]) + logger.debug( + "GPA upgrade Rg %s-> %s and I0 %s -> %s", Rg, res[0][0], I0, res[0][1] + ) + Rg, I0 = res[0] + sigma_Rg, sigma_I0 = numpy.sqrt(numpy.diag(res[1])) + end = numpy.where(data.T[0] > qRg_max / Rg)[0][0] + start = numpy.where(data.T[0] > qRg_min / Rg)[0][0] + aggregation = guinier.check_aggregation( + q2, lnI, I2_over_sigma2, 0, end - start0, Rg=Rg, threshold=False + ) + quality = guinier.calc_quality( + Rg, sigma_Rg, data.T[0, start], data.T[0, end], aggregation, qRg_max + ) + return RG_RESULT( + Rg, sigma_Rg, I0, sigma_I0, start, end, quality, aggregation + ) + + +def auto_guinier(data, Rg_min=1.0, qRg_max=1.3, relax=1.2): + """ + Yet another implementation of the Guinier fit + + The idea: + * extract the reasonable range + * convert to the Guinier space (ln(I) = f(q²) + * scan all possible intervall + * keep any with qRg_max<1.3 (or 1.5 in relaxed mode) + * select the begining and the end of the guinier region according to the contribution of two parameters: + - (q_max·Rg - q_min·Rg)/qRg_max --> in favor of large ranges + - 1 / RMSD --> in favor of good quality data + For each start and end point, the contribution of all ranges are averaged out (using histograms) + The best solution is the start/end position with the maximum average. + * All ranges within this region are averaged out to measure Rg, I0 and more importantly their deviation. + * The quality is still to be calculated + * Aggergation is assessed according a second order polynom fit. + + :param data: 2D array with (q,I,err) + :param Rg_min: minimum value for Rg + :param qRg_max: upper bound of the Guinier region + :param relax: relaxation factor for the upper bound + :param resolution: step size of the slope histogram + :return: autRg result + """ + + raw_size = data.shape[0] + q_ary = numpy.empty(raw_size, dtype=DTYPE) + i_ary = numpy.empty(raw_size, dtype=DTYPE) + sigma_ary = numpy.empty(raw_size, dtype=DTYPE) + q2_ary = numpy.empty(raw_size, dtype=DTYPE) + lnI_ary = numpy.empty(raw_size, dtype=DTYPE) + wg_ary = numpy.empty(raw_size, dtype=DTYPE) + + start0, stop0 = guinier.curate_data( + data, q_ary, i_ary, sigma_ary, Rg_min, qRg_max, relax + ) + if start0 < 0: + raise InsufficientDataError( + "Minimum region size is %s" % guinier.min_size + ) + guinier.guinier_space( + start0, stop0, q_ary, i_ary, sigma_ary, q2_ary, lnI_ary, wg_ary + ) + + fits = guinier.many_fit( + q2_ary, lnI_ary, wg_ary, start0, stop0, Rg_min, qRg_max, relax + ) + + cnt, relaxed, qRg_max, aslope_max = guinier.count_valid( + fits, qRg_max, relax + ) + # valid_fits = fits[fits[:, 9] < qRg_max] + if cnt == 0: + raise NoGuinierRegionError(qRg_max) + + # select the Guinier region based on all fits: + start, stop = guinier.find_region(fits, qRg_max) + + # Now average out the + Rg_avg, Rg_std, I0_avg, I0_std, good = guinier.average_values( + fits, start, stop + ) + + aggregated = guinier.check_aggregation( + q2_ary, lnI_ary, wg_ary, start0, stop, Rg=Rg_avg, threshold=False + ) + quality = guinier.calc_quality( + Rg_avg, Rg_std, q_ary[start], q_ary[stop], aggregated, qRg_max + ) + result = RG_RESULT( + Rg_avg, Rg_std, I0_avg, I0_std, start, stop, quality, aggregated + ) + return result diff --git a/src/freesas/average.py b/src/freesas/average.py new file mode 100644 index 0000000..3c428c4 --- /dev/null +++ b/src/freesas/average.py @@ -0,0 +1,271 @@ +__author__ = "Guillaume" +__license__ = "MIT" +__copyright__ = "2015, ESRF" + +import numpy +from freesas.model import SASModel + + +class Grid: + """ + This class is used to create a grid which include all the input models + """ + def __init__(self, inputfiles): + """ + :param inputfiles: list of pdb files needed for averaging + """ + self.inputs = inputfiles + self.size = [] + self.nbknots = None + self.radius = None + self.coordknots = [] + + def __repr__(self): + return "Grid with %i knots"%self.nbknots + + def spatial_extent(self): + """ + Calculate the maximal extent of input models + + :return self.size: 6-list with x,y,z max and then x,y,z min + """ + atoms = [] + models_fineness = [] + for files in self.inputs: + m = SASModel(files) + if len(atoms)==0: + atoms = m.atoms + else: + atoms = numpy.append(atoms, m.atoms, axis=0) + models_fineness.append(m.fineness) + mean_fineness = sum(models_fineness) / len(models_fineness) + + coordmin = atoms.min(axis=0) - mean_fineness + coordmax = atoms.max(axis=0) + mean_fineness + self.size = [coordmax[0],coordmax[1],coordmax[2],coordmin[0],coordmin[1],coordmin[2]] + + return self.size + + def calc_radius(self, nbknots=None): + """ + Calculate the radius of each point of a hexagonal close-packed grid, + knowing the total volume and the number of knots in this grid. + + :param nbknots: number of knots wanted for the grid + :return radius: the radius of each knot of the grid + """ + if len(self.size)==0: + self.spatial_extent() + nbknots = nbknots if nbknots is not None else 5000 + size = self.size + dx = size[0] - size[3] + dy = size[1] - size[4] + dz = size[2] - size[5] + volume = dx * dy * dz + + density = numpy.pi / (3*2**0.5) + radius = ((3 /( 4 * numpy.pi)) * density * volume / nbknots)**(1.0/3) + self.radius = radius + + return radius + + def make_grid(self): + """ + Create a grid using the maximal size and the radius previously computed. + The geometry used is a face-centered cubic lattice (fcc). + + :return knots: 2d-array, coordinates of each dot of the grid. Saved as self.coordknots. + """ + if len(self.size)==0: + self.spatial_extent() + if self.radius is None: + self.calc_radius() + + radius = self.radius + a = numpy.sqrt(2.0)*radius + + xmax = self.size[0] + xmin = self.size[3] + ymax = self.size[1] + ymin = self.size[4] + zmax = self.size[2] + zmin = self.size[5] + + x = 0.0 + y = 0.0 + z = 0.0 + + xlist = [] + ylist = [] + zlist = [] + knots = numpy.empty((1,4), dtype="float") + while (zmin + z) <= zmax: + zlist.append(z) + z += a + while (ymin + y) <= ymax: + ylist.append(y) + y += a + while (xmin + x) <= xmax: + xlist.append(x) + x += a + + for i in range(len(zlist)): + z = zlist[i] + if i % 2 ==0: + for j in range(len(xlist)): + x = xlist[j] + if j % 2 == 0: + for y in ylist[0:-1:2]: + knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0) + else: + for y in ylist[1:-1:2]: + knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0) + else: + for j in range(len(xlist)): + x = xlist[j] + if j % 2 == 0: + for y in ylist[1:-1:2]: + knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0) + else: + for y in ylist[0:-1:2]: + knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0) + + knots = numpy.delete(knots, 0, axis=0) + self.nbknots = knots.shape[0] + self.coordknots = knots + + return knots + + +class AverModels(): + """ + Provides tools to create an averaged models using several aligned dummy atom models + """ + def __init__(self, inputfiles, grid): + """ + :param inputfiles: list of pdb files of aligned models + :param grid: 2d-array coordinates of each point of a grid, fourth column full of zeros + """ + self.inputfiles = inputfiles + self.models = [] + self.header = [] + self.radius = None + self.atoms = [] + self.grid = grid + + def __repr__(self): + return "Average SAS model with %i atoms"%len(self.atoms) + + def read_files(self, reference=None): + """ + Read all the pdb file in the inputfiles list, creating SASModels. + The SASModels created are save in a list, the reference model is the first model in the list. + + :param reference: position of the reference model file in the inputfiles list + """ + ref = reference if reference is not None else 0 + inputfiles = self.inputfiles + + models = [] + models.append(SASModel(inputfiles[ref])) + for i in range(len(inputfiles)): + if i==ref: + continue + else: + models.append(SASModel(inputfiles[i])) + self.models = models + + return models + + def calc_occupancy(self, griddot): + """ + Assign an occupancy and a contribution factor to the point of the grid. + + :param griddot: 1d-array, coordinates of a point of the grid + :return tuple: 2-tuple containing (occupancy, contribution) + """ + occ = 0.0 + contrib = 0 + for model in self.models: + f = model.fineness + for i in range(model.atoms.shape[0]): + dx = model.atoms[i, 0] - griddot[0] + dy = model.atoms[i, 1] - griddot[1] + dz = model.atoms[i, 2] - griddot[2] + dist = dx * dx + dy * dy + dz * dz + add = max(1 - (dist / f), 0) + if add != 0: + contrib += 1 + occ += add + return occ, contrib + + def assign_occupancy(self): + """ + For each point of the grid, total occupancy and contribution factor are computed and saved. + The grid is then ordered with decreasing value of occupancy. + The fourth column of the array correspond to the occupancy of the point and the fifth to + the contribution for this point. + + :return sortedgrid: 2d-array, coordinates of each point of the grid + """ + grid = self.grid + nbknots = grid.shape[0] + grid = numpy.append(grid, numpy.zeros((nbknots, 1), dtype="float"), axis=1) + + for i in range(nbknots): + occ, contrib = self.calc_occupancy(grid[i, 0:3]) + grid[i, 3] = occ + grid[i, 4] = contrib + + order = numpy.argsort(grid, axis=0)[:, -2] + sortedgrid = numpy.empty_like(grid) + for i in range(nbknots): + sortedgrid[nbknots - i - 1, :] = grid[order[i], :] + + return sortedgrid + + def make_header(self): + """ + Create the layout of the pdb file for the averaged model. + """ + header = [] + header.append("Number of files averaged : %s\n"%len(self.inputfiles)) + for i in self.inputfiles: + header.append(i + "\n") + header.append("Total number of dots in the grid : %s\n"%self.grid.shape[0]) + + decade = 1 + for i in range(self.grid.shape[0]): + line = "ATOM CA ASP 1 20.00 2 201\n" + line = line[:7] + "%4.i"%(i + 1) + line[11:] + if not (i + 1) % 10: + decade += 1 + line = line[:21] + "%4.i"%decade + line[25:] + header.append(line) + self.header = header + return header + + def save_aver(self, filename): + """ + Save the position of each occupied dot of the grid, its occupancy and its contribution + in a pdb file. + + :param filename: name of the pdb file to write + """ + if len(self.header) == 0: + self.make_header() + assert self.grid.shape[-1] == 5 + + nr = 0 + with open(filename, "w") as pdbout: + for line in self.header: + if line.startswith("ATOM"): + if nr < self.grid.shape[0] and self.grid[nr, 4] != 0: + coord = "%8.3f%8.3f%8.3f" % tuple(self.grid[nr, 0:3]) + occ = "%6.2f" % self.grid[nr, 3] + contrib = "%2.f" % self.grid[nr, 4] + line = line[:30] + coord + occ + line[60:66] + contrib + line[68:] + else: + line = "" + nr += 1 + pdbout.write(line) diff --git a/src/freesas/bift.py b/src/freesas/bift.py new file mode 100644 index 0000000..e2e9ec3 --- /dev/null +++ b/src/freesas/bift.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +""" +Bayesian Inverse Fourier Transform + +This code is the implementation of +Steen Hansen J. Appl. Cryst. (2000). 33, 1415-1421 + +Based on the BIFT from Jesse Hopkins, available at: +https://sourceforge.net/p/bioxtasraw/git/ci/master/tree/bioxtasraw/BIFT.py + +Many thanks to Pierre Paleo for the auto-alpha guess +""" + +__authors__ = ["Jerome Kieffer", "Jesse Hopkins"] +__license__ = "MIT" +__copyright__ = "2020, ESRF" +__date__ = "10/06/2020" + +import logging +logger = logging.getLogger(__name__) +# from collections import namedtuple +from math import log, ceil +import numpy +from scipy.optimize import minimize +from ._bift import BIFT +from .autorg import auto_gpa, autoRg, auto_guinier, NoGuinierRegionError + + +def auto_bift(data, Dmax=None, alpha=None, npt=100, + start_point=None, end_point=None, + scan_size=11, Dmax_over_Rg=3): + """Calculates the inverse Fourier tranform of the data using an optimisation of the evidence + + :param data: 2D array with q, I(q), δI(q). q can be in 1/nm or 1/A, it imposes the unit for r & Dmax + :param Dmax: Maximum diameter of the object, this is the starting point to be refined. Can be guessed + :param alpha: Regularisation parameter, let it to None for automatic scan + :param npt: Number of point for the curve p(r) + :param start_point: First useable point in the I(q) curve, this is not the start of the Guinier region + :param end_point: Last useable point in the I(q) curve + :param scan_size: size of the initial geometrical scan for alpha values. + :param Dmax_over_Rg: In average, protein's Dmax is 3x Rg, use this to adjust + :return: BIFT object. Call the get_best to retrieve the optimal solution + """ + assert data.ndim == 2 + assert data.shape[1] == 3 # enforce q, I, err + use_wisdom = False + data = data[slice(start_point, end_point)] + q, I, err = data.T + npt = min(npt, q.size) # no chance for oversampling ! + bo = BIFT(q, I, err) # this is the bift object + if Dmax is None: + # Try to get a reasonable guess from Rg + try: + Guinier = auto_guinier(data) + except: + logger.error("Guinier analysis failed !") + raise +# print(Guinier) + if Guinier.Rg <= 0: + raise NoGuinierRegionError + Dmax = bo.set_Guinier(Guinier, Dmax_over_Rg) + if alpha is None: + alpha_max = bo.guess_alpha_max(npt) + # First scan on alpha: + key = bo.grid_scan(Dmax, Dmax, 1, + 1.0 / alpha_max, alpha_max, scan_size, npt) + Dmax, alpha = key[:2] + # Then scan on Dmax: + key = bo.grid_scan(max(Dmax / 2, Dmax * (Dmax_over_Rg - 1) / Dmax_over_Rg), Dmax * (Dmax_over_Rg + 1) / Dmax_over_Rg, scan_size, + alpha, alpha, 1, npt) + Dmax, alpha = key[:2] + if bo.evidence_cache[key].converged: + bo.update_wisdom() + use_wisdom = True + + # Optimization using Bayesian operator: + logger.info("Start search at Dmax=%.2f alpha=%.2f use wisdom=%s", Dmax, alpha, use_wisdom) + res = minimize(bo.opti_evidence, (Dmax, log(alpha)), args=(npt, use_wisdom), method="powell") + logger.info("Result of optimisation:\n %s", res) + return bo diff --git a/src/freesas/collections.py b/src/freesas/collections.py new file mode 100644 index 0000000..91fdea8 --- /dev/null +++ b/src/freesas/collections.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +""" +Set of namedtuples defined a bit everywhere +""" +__authors__ = ["Jérôme Kieffer"] +__license__ = "MIT" +__copyright__ = "2020 ESRF" +__date__ = "13/10/2020" + +from collections import namedtuple +from os import linesep +import numpy + +# Used in AutoRg +RG_RESULT = namedtuple( + "RG_RESULT", + "Rg sigma_Rg I0 sigma_I0 start_point end_point quality aggregated", +) + + +def _RG_RESULT_repr(self): + return f"Rg={self.Rg:6.4f}(±{self.sigma_Rg:6.4f}) I0={self.I0:6.4f}(±{self.sigma_I0:6.4f}) [{self.start_point}-{self.end_point}] {100.0*self.quality:5.2f}% {'aggregated' if self.aggregated>0.1 else ''}" + + +RG_RESULT.__repr__ = _RG_RESULT_repr + +FIT_RESULT = namedtuple( + "FIT_RESULT", + "slope sigma_slope intercept sigma_intercept, R, R2, chi2, RMSD", +) +RT_RESULT = namedtuple("RT_RESULT", "Vc sigma_Vc Qr sigma_Qr mass sigma_mass") + + +def _RT_RESULT_repr(self): + return f"Vc={self.Vc:6.4f}(±{self.sigma_Vc:6.4f}) Qr={self.Qr:6.4f}(±{self.sigma_Qr:6.4f}) mass={self.mass:6.4f}(±{self.sigma_mass:6.4f})" + + +RT_RESULT.__repr__ = _RT_RESULT_repr + +# Used in BIFT +RadiusKey = namedtuple("RadiusKey", "Dmax npt") +PriorKey = namedtuple("PriorKey", "type npt") +TransfoValue = namedtuple("TransfoValue", "transfo B sum_dia") +EvidenceKey = namedtuple("EvidenceKey", "Dmax alpha npt") +EvidenceResult = namedtuple( + "EvidenceResult", "evidence chi2r regularization radius density converged" +) + +StatsResult = namedtuple( + "StatsResult", + "radius density_avg density_std evidence_avg evidence_std Dmax_avg Dmax_std alpha_avg, alpha_std chi2r_avg chi2r_std regularization_avg regularization_std Rg_avg Rg_std I0_avg I0_std", +) + + +def save_bift(stats, filename, source=None): + "Save the results of the fit to the file" + res = [ + "Dmax= %.2f±%.2f" % (stats.Dmax_avg, stats.Dmax_std), + "𝛂= %.1f±%.1f" % (stats.alpha_avg, stats.alpha_std), + "S₀= %.4f±%.4f" % (stats.regularization_avg, stats.regularization_std), + "χ²= %.2f±%.2f" % (stats.chi2r_avg, stats.chi2r_std), + "logP= %.2f±%.2f" % (stats.evidence_avg, stats.evidence_std), + "Rg= %.2f±%.2f" % (stats.Rg_avg, stats.Rg_std), + "I₀= %.2f±%.2f" % (stats.I0_avg, stats.I0_std), + ] + with open(filename, "wt", encoding="utf-8") as out: + out.write("# %s %s" % (source or filename, "\n")) + for txt in res: + out.write("# %s %s" % (txt, "\n")) + out.write("%s# r\tp(r)\tsigma_p(r)%s" % ("\n", "\n")) + for r, p, s in zip( + stats.radius.astype(numpy.float32), + stats.density_avg.astype(numpy.float32), + stats.density_std.astype(numpy.float32), + ): + out.write("%s\t%s\t%s%s" % (r, p, s, "\n")) + return filename + ": " + "; ".join(res) + + +StatsResult.save = save_bift + +# Used in Cormap +GOF = namedtuple("GOF", ["n", "c", "P"]) diff --git a/src/freesas/cormap.py b/src/freesas/cormap.py new file mode 100644 index 0000000..7741ed5 --- /dev/null +++ b/src/freesas/cormap.py @@ -0,0 +1,122 @@ +__author__ = "Jerome Kieffer" +__license__ = "MIT" +__copyright__ = "2017, ESRF" + +import numpy +from math import log +from .collections import GOF + +from ._cormap import measure_longest + + +class LongestRunOfHeads(object): + """Implements the "longest run of heads" by Mark F. Schilling + The College Mathematics Journal, Vol. 21, No. 3, (1990), pp. 196-207 + + See: http://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020742.02p0021g.pdf + """ + + def __init__(self): + "We store already calculated values for (n,c)" + self.knowledge = {} + + def A(self, n, c): + """Calculate A(number_of_toss, length_of_longest_run) + + :param n: number of coin toss in the experiment, an integer + :param c: length of the longest run of + :return: The A parameter used in the formula + + """ + if n <= c: + return 2 ** n + elif (n, c) in self.knowledge: + return self.knowledge[(n, c)] + else: + s = 0 + for j in range(c, -1, -1): + s += self.A(n - 1 - j, c) + self.knowledge[(n, c)] = s + return s + + def B(self, n, c): + """Calculate B(number_of_toss, length_of_longest_run) + to have either a run of Heads either a run of Tails + + :param n: number of coin toss in the experiment, an integer + :param c: length of the longest run of + :return: The B parameter used in the formula + """ + return 2 * self.A(n - 1, c - 1) + + def __call__(self, n, c): + """Calculate the probability for the longest run of heads to exceed the observed length + + :param n: number of coin toss in the experiment, an integer + :param c: length of the longest run of heads, an integer + :return: The probablility of having c subsequent heads in a n toss of fair coin + """ + if c >= n: + return 0 + delta = 2 ** n - self.A(n, c) + if delta <= 0: + return 0 + return 2.0 ** (log(delta, 2) - n) + + def probaHeadOrTail(self, n, c): + """Calculate the probability of a longest run of head or tails to occur + + :param n: number of coin toss in the experiment, an integer + :param c: length of the longest run of heads or tails, an integer + :return: The probablility of having c subsequent heads or tails in a n toss of fair coin + """ + if c > n: + return 0 + if c == 0: + return 0 + delta = self.B(n, c) - self.B(n, c - 1) + if delta <= 0: + return 0 + return min(2.0 ** (log(delta, 2.0) - n), 1.0) + + def probaLongerRun(self, n, c): + """Calculate the probability for the longest run of heads or tails to exceed the observed length + + :param n: number of coin toss in the experiment, an integer + :param c: length of thee observed run of heads or tails, an integer + :return: The probablility of having more than c subsequent heads or tails in a n toss of fair coin + """ + if c > n: + return 0 + if c == 0: + return 0 + delta = (2 ** n) - self.B(n, c) + if delta <= 0: + return 0 + return min(2.0 ** (log(delta, 2.0) - n), 1.0) + + +LROH = LongestRunOfHeads() + + +def gof(data1, data2): + """Calculate the probability for a couple of dataset to be equivalent + + Implementation according to: + http://www.nature.com/nmeth/journal/v12/n5/full/nmeth.3358.html + + :param data1: numpy array + :param data2: numpy array + :return: probablility for the 2 data to be equivalent + """ + + if data1.ndim == 2 and data1.shape[1] > 1: + data1 = data1[:, 1] + if data2.ndim == 2 and data2.shape[1] > 1: + data2 = data2[:, 1] + + cdata = numpy.ascontiguousarray(data2 - data1, numpy.float64).ravel() + c = measure_longest(cdata) + n = cdata.size + res = GOF(n, c, LROH.probaLongerRun(n, c - 1)) + return res diff --git a/src/freesas/decorators.py b/src/freesas/decorators.py new file mode 100644 index 0000000..6bdb517 --- /dev/null +++ b/src/freesas/decorators.py @@ -0,0 +1,58 @@ +# coding: utf-8 +# +# Project: Free SAS tools +# https://github.com/kif/freesas +# +# Copyright (C) 2015-2020 European Synchrotron Radiation Facility, Grenoble, France +# +# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +"""Bunch of useful decorators""" + +__authors__ = ["Jerome Kieffer", "H. Payno", "P. Knobel", "V. Valls"] +__contact__ = "Jerome.Kieffer@ESRF.eu" +__license__ = "MIT" +__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" +__date__ = "27/04/2020" +__status__ = "development" +__docformat__ = 'restructuredtext' + +import sys +import time +import logging + +timelog = logging.getLogger("freesas.timeit") + + +def timeit(func): + + def wrapper(*arg, **kw): + '''This is the docstring of timeit: + a decorator that logs the execution time''' + t1 = time.perf_counter() + res = func(*arg, **kw) + t2 = time.perf_counter() + name = func.func_name if sys.version_info[0] < 3 else func.__name__ + timelog.warning("%s took %.3fs", name, t2 - t1) + return res + + wrapper.__name__ = func.__name__ + wrapper.__doc__ = func.__doc__ + return wrapper diff --git a/src/freesas/fitting.py b/src/freesas/fitting.py new file mode 100644 index 0000000..9242c40 --- /dev/null +++ b/src/freesas/fitting.py @@ -0,0 +1,222 @@ +"""This module provides a function which reads in the data, +performs the guinier fit with a given algotithm and reates the input.""" + +__authors__ = ["Martha Brennich"] +__contact__ = "martha.brennich@googlemail.com" +__license__ = "MIT" +__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" +__date__ = "21/03/2021" +__status__ = "development" +__docformat__ = "restructuredtext" + +import sys +import logging +import platform +from os import linesep as os_linesep +from pathlib import Path +from contextlib import contextmanager +from typing import Callable, List, Optional, IO, Generator +from numpy import ndarray +from .autorg import ( + RG_RESULT, + InsufficientDataError, + NoGuinierRegionError, +) +from .sasio import ( + load_scattering_data, + convert_inverse_angstrom_to_nanometer, +) +from .sas_argparser import GuinierParser + + +def set_logging_level(verbose_flag: int) -> None: + """ + Set logging level according to verbose flag of argparser + :param verbose_flag: int flag for logging level + """ + if verbose_flag == 1: + logging.root.setLevel(logging.INFO) + elif verbose_flag >= 2: + logging.root.setLevel(logging.DEBUG) + + +def collect_files(file_list: List[str]) -> List[Path]: + """ + Take file list from argparser and return list of paths + :param file_list: file list as returned by the argparser + :return: A list of Path objects which includes only existing files + """ + files = [Path(i) for i in file_list if Path(i).exists()] + if platform.system() == "Windows" and files == []: + files = list(Path.cwd().glob(file_list[0])) + files.sort() + return files + + +@contextmanager +def get_output_destination( + output_path: Optional[Path] = None, +) -> Generator[IO[str], None, None]: + """ + Return file or stdout object to write output to + :param output_path: None if output to stdout, else Path to outputfile + :return: opened file with write access or sys.stdout + """ + # pylint: disable=R1705 + if output_path is not None: + with open(output_path, "w") as destination: + yield destination + else: + yield sys.stdout + + +def get_linesep(output_destination: IO[str]) -> str: + """ + Get the appropriate linesep depending on the output destination. + :param output_destination: an IO object, e.g. an open file or stdout + :return: string with the correct linesep + """ + # pylint: disable=R1705 + if output_destination == sys.stdout: + return os_linesep + else: + return "\n" + + +def get_guinier_header( + linesep: str, output_format: Optional[str] = None +) -> str: + """Return appropriate header line for selected output format + :param output_format: output format from string parser + :param linesep: correct linesep for chosen destination + :return: a one-line string""" + # pylint: disable=R1705 + if output_format == "csv": + return ( + ",".join( + ( + "File", + "Rg", + "Rg StDev", + "I(0)", + "I(0) StDev", + "First point", + "Last point", + "Quality,Aggregated", + ) + ) + + linesep + ) + else: + return "" + + +def rg_result_to_output_line( + rg_result: RG_RESULT, + afile: Path, + linesep: str, + output_format: Optional[str] = None, +) -> str: + """Return result line formatted according to selected output format + :param rg_result: Result of an rg fit + :param afile: The name of the file that was processed + :param output_format: The chosen output format + :param linesep: correct linesep for chosen destination + :return: a one-line string including linesep""" + # pylint: disable=R1705 + if output_format == "csv": + return ( + ",".join( + [ + f"{afile}", + f"{rg_result.Rg:6.4f}", + f"{rg_result.sigma_Rg:6.4f}", + f"{rg_result.I0:6.4f}", + f"{rg_result.sigma_I0:6.4f}", + f"{rg_result.start_point:3}", + f"{rg_result.end_point:3}", + f"{rg_result.quality:6.4f}", + f"{rg_result.aggregated:6.4f}", + ] + ) + + linesep + ) + elif output_format == "ssv": + return ( + " ".join( + [ + f"{rg_result.Rg:6.4f}", + f"{rg_result.sigma_Rg:6.4f}", + f"{rg_result.I0:6.4f}", + f"{rg_result.sigma_I0:6.4f}", + f"{rg_result.start_point:3}", + f"{rg_result.end_point:3}", + f"{rg_result.quality:6.4f}", + f"{rg_result.aggregated:6.4f}", + f"{afile}", + ] + ) + + linesep + ) + else: + return f"{afile} {rg_result}{linesep}" + + +def run_guinier_fit( + fit_function: Callable[[ndarray], RG_RESULT], + parser: GuinierParser, + logger: logging.Logger, +) -> None: + """ + reads in the data, performs the guinier fit with a given algotithm and + creates the + :param fit_function : A Guinier fit function data -> RG_RESULT + :param parser: a function that returns the output of argparse.parse() + :param logger: a Logger + """ + args = parser.parse_args() + set_logging_level(args.verbose) + files = collect_files(args.file) + logger.debug("%s input files", len(files)) + + with get_output_destination(args.output) as output_destination: + linesep = get_linesep(output_destination) + + output_destination.write( + get_guinier_header( + linesep, + args.format, + ) + ) + + for afile in files: + logger.info("Processing %s", afile) + try: + data = load_scattering_data(afile) + except OSError: + logger.error("Unable to read file %s", afile) + except ValueError: + logger.error("Unable to parse file %s", afile) + else: + if args.unit == "Å": + data = convert_inverse_angstrom_to_nanometer(data) + try: + rg_result = fit_function(data) + except ( + InsufficientDataError, + NoGuinierRegionError, + ValueError, + IndexError, + ) as err: + sys.stderr.write( + f"{afile}, {err.__class__.__name__}: {err} {os_linesep}" + ) + else: + res = rg_result_to_output_line( + rg_result, + afile, + linesep, + args.format, + ) + output_destination.write(res) + output_destination.flush() diff --git a/src/freesas/invariants.py b/src/freesas/invariants.py new file mode 100644 index 0000000..76540ec --- /dev/null +++ b/src/freesas/invariants.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +""" +This module is mainly about the calculation of the Rambo-Tainer invariant +described in: + +https://dx.doi.org/10.1038%2Fnature12070 + +Some formula taken from Putnam et al, 2007, Table 1 in the review +""" +__authors__ = ["Martha E. Brennich", "J. Kieffer"] +__license__ = "MIT" +__date__ = "10/06/2020" + +import logging +logger = logging.getLogger(__name__) +import numpy +from .collections import RT_RESULT + + +def extrapolate(data, guinier): + """Extrapolate SAS data according to the Guinier fit until q=0 + Uncertainties are extrapolated (linearly) from the Guinier region + + :param data: SAS data in q,I,dI format + :param guinier: result of a Guinier fit + :return: extrapolated SAS data + """ + + dq = data[1, 0] - data[0, 0] + qmin = data[guinier.start_point, 0] + + q_low = numpy.arange(0, qmin, dq) + # Extrapolate I from Guinier approximation: + I_low = guinier.I0 * numpy.exp(-(q_low**2 * guinier.Rg**2) / 3.0) + # Extrapolate dI from Guinier region: + range_ = slice(guinier.start_point, guinier.end_point+1) + slope, intercept = numpy.polyfit(data[range_, 0], data[range_, 2], deg=1) + dI_low = abs(q_low*slope + intercept) + # Now wrap-up + data_low = numpy.vstack((q_low, I_low, dI_low)).T + return numpy.concatenate((data_low, data[guinier.start_point:])) + + +def calc_Porod(data, guinier): + """Calculate the particle volume according to Porod's formula: + + V = 2*π²I₀²/(sum_q I(q)q² dq) + + Formula from Putnam's review, 2007, table 1 + Intensities are extrapolated to q=0 using Guinier fit. + + :param data: SAS data in q,I,dI format + :param Guinier: result of a Guinier fit (instance of RT_RESULT) + :return: Volume calculated according to Porrod's formula + """ + q, I, dI = extrapolate(data, guinier).T + + denom = numpy.trapz(I*q**2, q) + volume = 2*numpy.pi**2*guinier.I0 / denom + return volume + + +def calc_Vc(data, Rg, dRg, I0, dI0, imin): + """Calculates the Rambo-Tainer invariant Vc, including extrapolation to q=0 + + :param data: SAS data in q,I,dI format, cropped to maximal q that should be used for calculation (normally 2 nm-1) + :param Rg,dRg,I0,dI0: results from Guinier approximation/autorg + :param imin: minimal index of the Guinier range, below that index data will be extrapolated by the Guinier approximation + :returns: Vc and an error estimate based on non-correlated error propagation + """ + dq = data[1, 0] - data[0, 0] + qmin = data[imin, 0] + qlow = numpy.arange(0, qmin, dq) + + lowqint = numpy.trapz((qlow * I0 * numpy.exp(-(qlow * qlow * Rg * Rg) / 3.0)), qlow) + dlowqint = numpy.trapz(qlow * numpy.sqrt((numpy.exp(-(qlow * qlow * Rg * Rg) / 3.0) * dI0) ** 2 + ((I0 * 2.0 * (qlow * qlow) * Rg / 3.0) * numpy.exp(-(qlow * qlow * Rg * Rg) / 3.0) * dRg) ** 2), qlow) + vabs = numpy.trapz(data[imin:, 0] * data[imin:, 1], data[imin:, 0]) + dvabs = numpy.trapz(data[imin:, 0] * data[imin:, 2], data[imin:, 0]) + vc = I0 / (lowqint + vabs) + dvc = (dI0 / I0 + (dlowqint + dvabs) / (lowqint + vabs)) * vc + return (vc, dvc) + + +def calc_Rambo_Tainer(data, + guinier, qmax=2.0): + """calculates the invariants Vc and Qr from the Rambo & Tainer 2013 Paper, + also the the mass estimate based on Qr for proteins + + :param data: data in q,I,dI format, q in nm^-1 + :param guinier: RG_RESULT instance with result from the Guinier fit + :param qmax: maximum q-value for the calculation in nm^-1 + @return: dict with Vc, Qr and mass plus errors + """ + scale_prot = 1.0 / 0.1231 + power_prot = 1.0 + + imax = abs(data[:, 0] - qmax).argmin() + if (imax <= guinier.start_point) or (guinier.start_point < 0): # unlikely but can happened + logger.error("Guinier region start too late for Rambo_Tainer invariants calculation") + return None + vc = calc_Vc(data[:imax, :], guinier.Rg, guinier.sigma_Rg, guinier.I0, guinier.sigma_I0, guinier.start_point) + + qr = vc[0] ** 2 / (guinier.Rg) + mass = scale_prot * qr ** power_prot + + dqr = qr * (guinier.sigma_Rg / guinier.Rg + 2 * ((vc[1]) / (vc[0]))) + dmass = mass * dqr / qr + + return RT_RESULT(vc[0], vc[1], qr, dqr, mass, dmass) diff --git a/src/freesas/meson.build b/src/freesas/meson.build new file mode 100644 index 0000000..9292813 --- /dev/null +++ b/src/freesas/meson.build @@ -0,0 +1,25 @@ +subdir('ext') +#subdir('test') +#subdir('utils') + +py.install_sources([ + '_version.py', + '__init__.py', + 'align.py', + 'autorg.py', + 'average.py', + 'bift.py', + 'collections.py' + 'cormap.py', + 'decorators.py', + 'fitting.py', + 'invariants.py', + 'model.py', + 'plot.py', + 'sas_argparser.py', + 'sasio.py', + 'transformations.py', +], + pure: false, # Will be installed next to binaries + subdir: 'freesas' # Folder relative to site-packages to install to +) diff --git a/src/freesas/model.py b/src/freesas/model.py new file mode 100644 index 0000000..fd367bc --- /dev/null +++ b/src/freesas/model.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python +# coding: utf-8 +from __future__ import print_function + +__author__ = "Guillaume" +__license__ = "MIT" +__copyright__ = "2015, ESRF" + +import os +from math import sqrt +import threading +import six +import numpy +try: + from . import _distance +except ImportError: + _distance = None +from . import transformations + + +def delta_expand(vec1, vec2): + """Create a 2d array with the difference vec1[i]-vec2[j] + + :param vec1, vec2: 1d-array + :return v1 - v2: difference for any element of v1 and v2 (i.e a 2D array) + """ + v1 = numpy.ascontiguousarray(vec1) + v2 = numpy.ascontiguousarray(vec2) + v1.shape = -1, 1 + v2.shape = 1, -1 + v1.strides = v1.strides[0], 0 + v2.strides = 0, v2.strides[-1] + return v1 - v2 + + +class SASModel: + """ + Tools for Dummy Atoms Model manipulation + """ + + def __init__(self, molecule=None): + """ + :param molecule: if str, name of a pdb file, else if 2d-array, coordinates of atoms of a molecule + """ + if isinstance(molecule, (six.text_type, six.binary_type)) and os.path.exists(molecule): + self.read(molecule) + else: + self.atoms = molecule if molecule is not None else [] # initial coordinates of each dummy atoms of the molecule, fourth column full of one for the transformation matrix + self.header = "" # header of the PDB file + self.rfactor = None + self.radius = 1.0 # unused at the moment + self.com = [] + self._fineness = None + self._Rg = None + self._Dmax = None + self.inertensor = [] + self.can_param = [] + self.enantiomer = None # symmetry used on the molecule + self._sem = threading.Semaphore() + + def __repr__(self): + return "SAS model with %i atoms" % len(self.atoms) + + def read(self, filename): + """ + Read the PDB file, + extract coordinates of each dummy atom, + extract the R-factor of the model, coordinates of each dummy atom and pdb file header. + + :param filename: name of the pdb file to read + """ + header = [] + atoms = [] + with open(filename) as fd: + for line in fd: + if line.startswith("ATOM"): + x = float(line[30:38]) + y = float(line[38:46]) + z = float(line[46:54]) + atoms.append([x, y, z]) + if line.startswith("REMARK 265 Final R-factor"): # very dependent of the pdb file format ! + self.rfactor = float(line[43:56]) + header.append(line) + self.header = header + atom3 = numpy.array(atoms) + self.atoms = numpy.append(atom3, numpy.ones((atom3.shape[0], 1), dtype="float"), axis=1) + + def save(self, filename): + """ + Save the position of each dummy atom in a PDB file. + + :param filename: name of the pdb file to write + """ + nr = 0 + self.atoms = numpy.delete(self.atoms, 3, 1) + with open(filename, "w") as pdbout: + for line in self.header: + if line.startswith("ATOM"): + if nr < self.atoms.shape[0]: + line = line[:30] + "%8.3f%8.3f%8.3f" % tuple(self.atoms[nr]) + line[54:] + else: + line = "" + nr += 1 + pdbout.write(line) + + def centroid(self): + """ + Calculate the position of the center of mass of the molecule. + + :return self.com: 1d array, coordinates of the center of mass of the molecule + """ + mol = self.atoms[:, 0:3] + self.com = mol.mean(axis=0) + return self.com + + def inertiatensor(self): + """ + calculate the inertia tensor of the protein + + :return self.inertensor: inertia tensor of the molecule + """ + if len(self.com) == 0: + self.com = self.centroid() + + mol = self.atoms[:, 0:3] - self.com + self.inertensor = numpy.empty((3, 3), dtype="float") + delta_kron = lambda i, j: 1 if i == j else 0 + for i in range(3): + for j in range(i, 3): + self.inertensor[i, j] = self.inertensor[j, i] = (delta_kron(i, j) * (mol ** 2).sum(axis=1) - (mol[:, i] * mol[:, j])).sum() / mol.shape[0] + return self.inertensor + + def canonical_translate(self): + """ + Calculate the translation matrix to translate the center of mass of the molecule on the origin of the base. + + :return trans: translation matrix + """ + if len(self.com) == 0: + self.com = self.centroid() + + trans = numpy.identity(4, dtype="float") + trans[0:3, 3] = -self.com + return trans + + def canonical_rotate(self): + """ + Calculate the rotation matrix to align inertia momentum of the molecule on principal axis. + + :return rot: rotation matrix det==1 + """ + if len(self.inertensor) == 0: + self.inertensor = self.inertiatensor() + + w, v = numpy.linalg.eigh(self.inertensor) + mat = v[:, w.argsort()] + + rot = numpy.zeros((4, 4), dtype="float") + rot[3, 3] = 1 + rot[:3, :3] = mat.T + + det = numpy.linalg.det(mat) + if det > 0: + self.enantiomer = [1, 1, 1] + else: + self.enantiomer = [-1, -1, -1] + mirror = numpy.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype="float") + rot = numpy.dot(mirror, rot) + + return rot + + def canonical_parameters(self): + """ + Save the 6 canonical parameters of the initial molecule: + x0, y0, z0, the position of the center of mass + phi, theta, psi, the three Euler angles of the canonical rotation (axis:x,y',z'') + """ + rot = self.canonical_rotate() + trans = self.canonical_translate() + + angles = transformations.euler_from_matrix(rot) + shift = transformations.translation_from_matrix(trans) + self.can_param = [shift[0], shift[1], shift[2], angles[0], angles[1], angles[2]] + + def calc_invariants(self, use_cython=True): + """ + Calculate the invariants of the structure: + * fineness, ie. average distance between an atoms and its nearest neighbor + * radius of gyration of the model + * diameter of the model + + :return invariants: 3-tuple containing (fineness, Rg, Dmax) + """ + if _distance and use_cython: + return _distance.calc_invariants(self.atoms) + + else: + size = self.atoms.shape[0] + D = delta_expand(self.atoms[:, 0], self.atoms[:, 0]) ** 2 + delta_expand(self.atoms[:, 1], self.atoms[:, 1]) ** 2 + delta_expand(self.atoms[:, 2], self.atoms[:, 2]) ** 2 + Rg = sqrt(D.sum() / 2.0) / size + Dmax = sqrt(D.max()) + d12 = (D.max() * numpy.eye(size) + D).min(axis=0).mean() + fineness = sqrt(d12) + return fineness, Rg, Dmax + + @property + def fineness(self): + if self._fineness is None: + with self._sem: + if self._fineness is None: + self._fineness, self._Rg, self._Dmax = self.calc_invariants() + return self._fineness + + @property + def Rg(self): + if self._Rg is None: + with self._sem: + if self._Rg is None: + self._fineness, self._Rg, self._Dmax = self.calc_invariants() + return self._Rg + + @property + def Dmax(self): + if self._Dmax is None: + with self._sem: + if self._Dmax is None: + self._fineness, self._Rg, self._Dmax = self.calc_invariants() + return self._Dmax + + def dist(self, other, molecule1, molecule2, use_cython=True): + """ + Calculate the distance with another model. + + :param self,other: two SASModel + :param molecule1: 2d array of the position of each atom of the first molecule + :param molecule2: 2d array of the position of each atom of the second molecule + :return D: NSD between the 2 molecules, in their position molecule1 and molecule2 + """ + if _distance and use_cython: + return _distance.calc_distance(molecule1, molecule2, self.fineness, other.fineness) + + else: + mol1 = molecule1[:, 0:3] + mol2 = molecule2[:, 0:3] + + mol1x = mol1[:, 0] + mol1y = mol1[:, 1] + mol1z = mol1[:, 2] + mol1x.shape = mol1.shape[0], 1 + mol1y.shape = mol1.shape[0], 1 + mol1z.shape = mol1.shape[0], 1 + + mol2x = mol2[:, 0] + mol2y = mol2[:, 1] + mol2z = mol2[:, 2] + mol2x.shape = mol2.shape[0], 1 + mol2y.shape = mol2.shape[0], 1 + mol2z.shape = mol2.shape[0], 1 + + d2 = delta_expand(mol1x, mol2x) ** 2 + delta_expand(mol1y, mol2y) ** 2 + delta_expand(mol1z, mol2z) ** 2 + + D = (0.5 * ((1. / ((mol1.shape[0]) * other.fineness * other.fineness)) * (d2.min(axis=1).sum()) + (1. / ((mol2.shape[0]) * self.fineness * self.fineness)) * (d2.min(axis=0)).sum())) ** 0.5 + return D + + def transform(self, param, symmetry, reverse=None): + """ + Calculate the new coordinates of each dummy atoms of the molecule after a transformation defined by six parameters and a symmetry + + :param param: 6 parameters of transformation (3 coordinates of translation, 3 Euler angles) + :param symmetry: list of three constants which define a symmetry to apply + :return mol: 2d array, coordinates after transformation + """ + mol = self.atoms + + sym = numpy.array([[symmetry[0], 0, 0, 0], [0, symmetry[1], 0, 0], [0, 0, symmetry[2], 0], [0, 0, 0, 1]], dtype="float") + if not reverse: + vect = numpy.array([param[0:3]]) + angles = (param[3:6]) + + translat1 = transformations.translation_matrix(vect) + rotation = transformations.euler_matrix(*angles) + translat2 = numpy.dot(numpy.dot(rotation, translat1), rotation.T) + transformation = numpy.dot(translat2, rotation) + + else: + vect = -numpy.array([param[0:3]]) + angles = (-param[5], -param[4], -param[3]) + + translat = transformations.translation_matrix(vect) + rotation = transformations.euler_matrix(*angles, axes="szyx") + transformation = numpy.dot(translat, rotation) + + mol = numpy.dot(transformation, mol.T) + mol = numpy.dot(sym, mol).T + return mol + + def dist_after_movement(self, param, other, symmetry): + """ + The first molecule, molref, is put on its canonical position. + The second one, mol2, is moved following the transformation selected + + :param param: list of 6 parameters for the transformation, 3 coordinates of translation and 3 Euler angles + :param symmetry: list of three constants which define a symmetry to apply + :return distance: the NSD between the first molecule and the second one after its movement + """ + if not self.can_param: + self.canonical_parameters() + + can_param1 = self.can_param + molref_can = self.transform(can_param1, [1, 1, 1]) # molecule reference put on its canonical position + + mol2_moved = other.transform(param, symmetry) # movement selected applied to mol2 + distance = self.dist(other, molref_can, mol2_moved) + + return distance diff --git a/src/freesas/plot.py b/src/freesas/plot.py new file mode 100644 index 0000000..3509274 --- /dev/null +++ b/src/freesas/plot.py @@ -0,0 +1,562 @@ +# -*- coding: utf-8 -*- +""" +Functions to generating graphs related to SAS. +""" + +__authors__ = ["Jerome Kieffer"] +__license__ = "MIT" +__copyright__ = "2020, ESRF" +__date__ = "15/09/2022" + +import logging + +logger = logging.getLogger(__name__) +import numpy +from matplotlib.pyplot import subplots + + +def scatter_plot( + data, + guinier=None, + ift=None, + filename=None, + img_format="svg", + unit="nm", + title="Scattering curve", + ax=None, + labelsize=None, + fontsize=None, +): + """ + Generate a scattering plot I = f(q) in semi_log_y. + + :param data: data read from an ASCII file, 3 column (q,I,err) + :param filename: name of the file where the cuve should be saved + :param img_format: image format + :param unit: Unit name for Rg and 1/q + :param guinier: output of autoRg + :param ift: converged instance of BIFT (output of auto_bift) + :param ax: subplot where the plot shall go in + :return: the matplotlib figure + """ + label_exp = "Experimental data" + label_guinier = "Guinier region" + label_ift = "BIFT extraplolated" + exp_color = "blue" + err_color = "lightblue" + guinier_color = "limegreen" + ift_color = "crimson" + assert data.ndim == 2 + assert data.shape[1] >= 2 + q = data.T[0] + I = data.T[1] + try: + err = data.T[2] + except: + err = None + if ax: + fig = ax.figure + else: + fig, ax = subplots() + + # Extend q to zero + delta_q = (q[-1] - q[0]) / (len(q) - 1) + extra_q = int(q[0] / delta_q) + first = q[0] - extra_q * delta_q + q_ext = numpy.linspace(first, q[-1], extra_q + len(q)) + + if guinier is None: + if ift is not None: + # best = ift.calc_stats()[0] + I0 = guinier.I0 + rg = guinier.rg + first_point = ift.high_start + last_point = ift.high.stop + else: + rg = I0 = first_point = last_point = None + else: + I0 = guinier.I0 + rg = guinier.Rg + first_point = guinier.start_point + last_point = guinier.end_point + + if (rg is None) and (ift is None): + if err is not None: + ax.errorbar( + q, + I, + err, + label=label_exp, + capsize=0, + color=exp_color, + ecolor=err_color, + ) + else: + ax.plot(q, I, label=label_exp, color="blue") + else: + q_guinier = q[first_point:last_point] + I_guinier = I0 * numpy.exp(-((q_guinier * rg) ** 2) / 3) + if err is not None: + ax.errorbar( + q, + I, + err, + label=label_exp, + capsize=0, + color=exp_color, + ecolor=err_color, + alpha=0.5, + ) + else: + ax.plot(q, I, label=label_exp, color=exp_color, alpha=0.5) + label_guinier += ": $R_g=$%.2f %s, $I_0=$%.2f" % (rg, unit, I0) + ax.plot( + q_guinier, + I_guinier, + label=label_guinier, + color=guinier_color, + linewidth=5, + ) + + if ift: + from ._bift import BIFT, StatsResult + + if isinstance(ift, BIFT): + stats = ift.calc_stats() + elif isinstance(ift, StatsResult): + stats = ift + else: + raise TypeError("ift is expected to be a BIFT object") + + r = stats.radius + T = numpy.outer(q_ext, r / numpy.pi) + T = (4 * numpy.pi * (r[-1] - r[0]) / (len(r) - 1)) * numpy.sinc(T) + p = stats.density_avg + label_ift += ": $D_{max}=$%.2f %s,\n $R_g=$%.2f %s, $I_0=$%.2f" % ( + stats.Dmax_avg, + unit, + stats.Rg_avg, + unit, + stats.I0_avg, + ) + ax.plot(q_ext, T.dot(p), label=label_ift, color=ift_color) + + ax.set_ylabel("$I(q)$ (log scale)", fontsize=fontsize) + ax.set_xlabel("$q$ (%s$^{-1}$)" % unit, fontsize=fontsize) + ax.set_title(title) + ax.set_yscale("log") + # ax.set_ylim(ymin=I.min() * 10, top=I.max() * 1.1) + + # Re-order labels ... + crv, lab = ax.get_legend_handles_labels() + ordered_lab = [] + ordered_crv = [] + for l in [label_exp, label_guinier, label_ift]: + try: + idx = lab.index(l) + except: + continue + ordered_lab.append(lab[idx]) + ordered_crv.append(crv[idx]) + ax.legend(ordered_crv, ordered_lab, loc=3) + ax.tick_params(axis="x", labelsize=labelsize) + ax.tick_params(axis="y", labelsize=labelsize) + if filename: + if img_format: + fig.savefig(filename, format=img_format) + else: + fig.savefig(filename) + return fig + + +def kratky_plot( + data, + guinier, + filename=None, + img_format="svg", + unit="nm", + title="Dimensionless Kratky plot", + ax=None, + labelsize=None, + fontsize=None, +): + """ + Generate a Kratky plot q²Rg²I/I₀ = f(q·Rg) + + :param data: data read from an ASCII file, 3 column (q,I,err) + :param guinier: output of autoRg + :param filename: name of the file where the cuve should be saved + :param img_format: image format + :param unit: Unit name for Rg and 1/q + :param ax: subplot where the plot shall go in + :return: the matplotlib figure + """ + label = "Experimental data" + assert data.ndim == 2 + assert data.shape[1] >= 2 + q = data.T[0] + I = data.T[1] + try: + err = data.T[2] + except: + err = None + if ax: + fig = ax.figure + else: + fig, ax = subplots() + Rg = guinier.Rg + I0 = guinier.I0 + + xdata = q * Rg + ydata = xdata * xdata * I / I0 + if err is not None: + dy = xdata * xdata * err / I0 + dplot = ax.errorbar( + xdata, + ydata, + dy, + label=label, + capsize=0, + color="blue", + ecolor="lightblue", + ) + else: + dplot = ax.plot(xdata, ydata, label=label, color="blue") + ax.set_ylabel("$(qR_{g})^2 I/I_{0}$", fontsize=fontsize) + ax.set_xlabel("$qR_{g}$", fontsize=fontsize) + ax.legend(loc=1) + + ax.hlines( + 3.0 * numpy.exp(-1), + xmin=-0.05, + xmax=max(xdata), + color="0.75", + linewidth=1.0, + ) + ax.vlines( + numpy.sqrt(3.0), + ymin=-0.01, + ymax=max(ydata), + color="0.75", + linewidth=1.0, + ) + ax.set_xlim(left=-0.05, right=8.5) + ax.set_ylim(bottom=-0.01, top=(min(3.5, max(ydata)))) + ax.set_title(title) + # ax.legend([dplot[0]], [dplot[0].get_label()], loc=0) + ax.legend(loc=0) + ax.tick_params(axis="x", labelsize=labelsize) + ax.tick_params(axis="y", labelsize=labelsize) + + if filename: + if img_format: + fig.savefig(filename, format=img_format) + else: + fig.savefig(filename) + return fig + + +def guinier_plot( + data, + guinier, + filename=None, + img_format="png", + unit="nm", + ax=None, + labelsize=None, + fontsize=None, +): + """ + Generate a guinier plot: ln(I) = f(q²) + + :param data: data read from an ASCII file, 3 column (q,I,err) + :param guinier: A RG_RESULT object from AutoRg + :param filename: name of the file where the cuve should be saved + :param img_format: image format + :param: ax: subplot where to plot in + :return: the matplotlib figure + """ + assert data.ndim == 2 + assert data.shape[1] >= 2 + + q, I, err = data.T[:3] + + mask = (I > 0) & numpy.isfinite(I) & (q > 0) & numpy.isfinite(q) + if err is not None: + mask &= (err > 0.0) & numpy.isfinite(err) + mask = mask.astype(bool) + Rg = guinier.Rg + I0 = guinier.I0 + first_point = guinier.start_point + last_point = guinier.end_point + intercept = numpy.log(I0) + slope = -Rg * Rg / 3.0 + end = numpy.where(q > 1.5 / Rg)[0][0] + mask[end:] = False + + q2 = q[mask] ** 2 + logI = numpy.log(I[mask]) + + if ax: + fig = ax.figure + else: + fig, ax = subplots(figsize=(12, 10)) + if err is not None: + dlogI = err[mask] / logI + ax.errorbar( + q2, + logI, + dlogI, + label="Experimental curve", + capsize=0, + color="blue", + ecolor="lightblue", + alpha=0.5, + ) + else: + ax.plot( + q2[mask], + logI[mask], + label="Experimental curve", + color="blue", + alpha=0.5, + ) + # ax.plot(q2[first_point:last_point], logI[first_point:last_point], marker='D', markersize=5, label="guinier region") + xmin = q[first_point] ** 2 + xmax = q[last_point] ** 2 + ymax = numpy.log(I[first_point]) + ymin = numpy.log(I[last_point]) + dy = (ymax - ymin) / 2.0 + ax.vlines(xmin, ymin=ymin, ymax=ymax + dy, color="0.75", linewidth=1.0) + ax.vlines( + xmax, ymin=ymin - dy, ymax=ymin + dy, color="0.75", linewidth=1.0 + ) + ax.annotate( + "$(qR_{g})_{min}$=%.1f" % (Rg * q[first_point]), + (xmin, ymax + dy), + xytext=None, + xycoords="data", + textcoords="data", + ) + ax.annotate( + "$(qR_{g})_{max}$=%.1f" % (Rg * q[last_point]), + (xmax, ymin + dy), + xytext=None, + xycoords="data", + textcoords="data", + ) + ax.annotate( + "Guinier region", + (xmin, ymin - dy), + xytext=None, + xycoords="data", + textcoords="data", + ) + ax.plot( + q2[:end], + intercept + slope * q2[:end], + label="ln[$I(q)$] = %.2f %.2f * $q^2$" % (intercept, slope), + color="crimson", + ) + ax.set_ylabel("ln[$I(q)$]", fontsize=fontsize) + ax.set_xlabel("$q^2$ (%s$^{-2}$)" % unit, fontsize=fontsize) + ax.set_title("Guinier plot: $R_{g}=$%.2f %s $I_{0}=$%.2f" % (Rg, unit, I0)) + ax.legend() + ax.tick_params(axis="x", labelsize=labelsize) + ax.tick_params(axis="y", labelsize=labelsize) + + if filename: + if img_format: + fig.savefig(filename, format=img_format) + else: + fig.savefig(filename) + return fig + + +def density_plot( + ift, + filename=None, + img_format="png", + unit="nm", + ax=None, + labelsize=None, + fontsize=None, +): + """ + Generate a density plot p(r) + + :param ift: An IFT result comming out of BIFT + :param filename: name of the file where the cuve should be saved + :param img_format: image image format + :param ax: subplotib where to plot in + :return: the matplotlib figure + """ + if ax: + fig = ax.figure + else: + fig, ax = subplots(figsize=(12, 10)) + + from ._bift import BIFT, StatsResult + + if isinstance(ift, BIFT): + stats = ift.calc_stats() + elif isinstance(ift, StatsResult): + stats = ift + else: + raise TypeError("ift is expected to be a BIFT object") + + ax.errorbar( + ift.radius, + ift.density_avg, + ift.density_std, + label="BIFT: χ$_{r}^{2}=$%.2f\n $D_{max}=$%.2f %s\n $R_{g}=$%.2f %s\n $I_{0}=$%.2f" + % ( + stats.chi2r_avg, + stats.Dmax_avg, + unit, + stats.Rg_avg, + unit, + stats.I0_avg, + ), + capsize=0, + color="blue", + ecolor="lightblue", + ) + ax.set_ylabel("$p(r)$", fontsize=fontsize) + ax.set_xlabel("$r$ (%s)" % unit, fontsize=fontsize) + ax.set_title("Pair distribution function") + ax.legend() + ax.tick_params(axis="x", labelsize=labelsize) + ax.tick_params(axis="y", labelsize=labelsize) + + if filename: + if img_format: + fig.savefig(filename, format=img_format) + else: + fig.savefig(filename) + return fig + + +def plot_all( + data, + filename=None, + img_format=None, + unit="nm", + labelsize=None, + fontsize=None, +): + from . import bift, autorg + + try: + guinier = autorg.autoRg(data) + except autorg.InsufficientDataError: + raise + logger.debug(guinier) + try: + bo = bift.auto_bift(data, npt=100, scan_size=11, Dmax_over_Rg=3) + except ( + autorg.InsufficientDataError, + autorg.NoGuinierRegionError, + ValueError, + ): + raise + else: + ift = bo.calc_stats() + logger.debug(ift) + fig, ax = subplots(2, 2, figsize=(12, 10)) + scatter_plot( + data, + guinier=guinier, + ift=ift, + ax=ax[0, 0], + unit=unit, + labelsize=labelsize, + fontsize=fontsize, + ) + guinier_plot( + data, + guinier, + filename=None, + img_format=None, + unit=unit, + ax=ax[0, 1], + labelsize=labelsize, + fontsize=fontsize, + ) + kratky_plot( + data, + guinier, + filename=None, + img_format=None, + unit=unit, + ax=ax[1, 0], + labelsize=labelsize, + fontsize=fontsize, + ) + density_plot( + ift, + filename=None, + img_format=None, + unit=unit, + ax=ax[1, 1], + labelsize=labelsize, + fontsize=fontsize, + ) + if filename is not None: + if img_format: + fig.savefig(filename, format=img_format) + else: + fig.savefig(filename) + return fig + +def hplc_plot(hplc, + fractions = None, + title="Chromatogram", + filename=None, + img_format="png", + ax=None, + labelsize=None, + fontsize=None,): + """ + Generate an HPLC plot I=f(t) + + :param hplc: stack of diffraction data + :param fractions: list of 2tuple with first and last ndex if each fraction + :param filename: name of the file where the cuve should be saved + :param img_format: image image format + :param ax: subplotib where to plot in + :return: the matplotlib figure + """ + if ax: + fig = ax.figure + else: + fig, ax = subplots(figsize=(12, 10)) + data = [sum(i) if hasattr(i, '__iter__') else i for i in hplc] + ax.plot(data, label = "Chromatogram") + ax.set_xlabel("Elution (frame index)", fontsize=fontsize) + ax.set_ylabel("Summed intensities", fontsize=fontsize) + ax.set_title(title) + + ax.tick_params(axis="x", labelsize=labelsize) + ax.tick_params(axis="y", labelsize=labelsize) + if fractions is not None and len(fractions): + fractions.sort() + l = len(data) + idx = list(range(l)) + for start,stop in fractions: + start = int(min(l-1, max(0, start))) + stop = int(min(l-1, max(0, stop))) + ax.plot(idx[start:stop+1], + data[start:stop+1], + label=f"Fraction {start}-{stop}", + linewidth=10, + alpha=0.5) + ax.legend() + + if filename: + if img_format: + fig.savefig(filename, format=img_format) + else: + fig.savefig(filename) + return fig + diff --git a/src/freesas/sas_argparser.py b/src/freesas/sas_argparser.py new file mode 100644 index 0000000..50b2ef8 --- /dev/null +++ b/src/freesas/sas_argparser.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +""" +Generalized arg parser for freeSAS apps to ensure unified command line API. +""" + +__author__ = "Martha Brennich" +__license__ = "MIT" +__copyright__ = "2020, ESRF" +__date__ = "09/08/2020" + +import argparse +from pathlib import Path +from freesas import dated_version as freesas_version + + +def parse_unit(unit_input: str) -> str: + """ + Parser for sloppy acceptance of unit flags. + Current rules: + "A" ➔ "Å" + :param unit_input: unit flag as provided by the user + :return: cast of user input to known flag if sloppy rule defined, + else user input. + """ + if unit_input == "A": # pylint: disable=R1705 + return "Å" + else: + return unit_input + + +class SASParser: + + """ + Wrapper class for argparse ArgumentParser that provides predefined argument. + """ + + usage = "" + + def __init__(self, prog: str, description: str, epilog: str, **kwargs): + """ + Create parser argparse ArgumentParser + - standardized usage text + - standardized verion text + - verbose and version args added by default + + :param prog: name of the executable + :param description: description param of argparse ArgumentParser + :param epilog: epilog param of argparse ArgumentParser + :param kwargs: additional kwargs for argparse ArgumentParser + """ + + self.usage = "%s [OPTIONS] FILES " % (prog) + version = "%s version %s from %s" % ( + prog, + freesas_version.version, + freesas_version.date, + ) + + self.parser = argparse.ArgumentParser( + usage=self.usage, description=description, epilog=epilog, **kwargs + ) + self.add_argument( + "-v", + "--verbose", + default=0, + help="switch to verbose mode", + action="count", + ) + self.add_argument("-V", "--version", action="version", version=version) + + def parse_args(self, *args, **kwargs): + """ Wrapper for argparse parse_args() """ + return self.parser.parse_args(*args, **kwargs) + + def add_argument(self, *args, **kwargs): + """ Wrapper for argparse add_argument() """ + self.parser.add_argument(*args, **kwargs) + + def add_file_argument(self, help_text: str): + """ + Add positional file argument. + + :param help_text: specific help text to be displayed + """ + self.add_argument("file", metavar="FILE", nargs="+", help=help_text) + + def add_q_unit_argument(self): + """ + Add default argument for selecting length unit of input data + between Å and nm. nm is default. + """ + self.add_argument( + "-u", + "--unit", + action="store", + choices=["nm", "Å", "A"], + help="Unit for q: inverse nm or Ångstrom?", + default="nm", + type=parse_unit, + ) + + def add_output_filename_argument(self): + """ Add default argument for specifying output format. """ + self.add_argument( + "-o", + "--output", + action="store", + help="Output filename", + default=None, + type=Path, + ) + + def add_output_data_format(self, *formats: str, default: str = None): + """ Add default argument for specifying output format. """ + help_string = "Output format: " + ", ".join(formats) + self.add_argument( + "-f", + "--format", + action="store", + help=help_string, + default=default, + type=str, + ) + + +class GuinierParser: + """ + Wrapper class for argparse ArgumentParser that provides predefined + arguments for auto_rg like programs. + """ + + usage = "" + + def __init__(self, prog: str, description: str, epilog: str, **kwargs): + """ + Create parser argparse ArgumentParser with argument + - standardized usage text + - standardized version text + - verbose and version args added by default + + :param prog: name of the executable + :param description: description param of argparse ArgumentParser + :param epilog: epilog param of argparse ArgumentParser + :param kwargs: additional kwargs for argparse ArgumentParser + """ + + file_help_text = "dat files of the scattering curves" + self.parser = SASParser( + prog=prog, description=description, epilog=epilog, **kwargs + ) + self.parser.add_file_argument(help_text=file_help_text) + self.parser.add_output_filename_argument() + self.parser.add_output_data_format( + "native", "csv", "ssf", default="native" + ) + self.parser.add_q_unit_argument() + self.usage = self.parser.usage + + def parse_args(self, *args, **kwargs): + """ Wrapper for SASParser parse_args() """ + return self.parser.parse_args(*args, **kwargs) + + def add_argument(self, *args, **kwargs): + """ Wrapper for SASParser add_argument() """ + self.parser.add_argument(*args, **kwargs) diff --git a/src/freesas/sasio.py b/src/freesas/sasio.py new file mode 100644 index 0000000..cb7084a --- /dev/null +++ b/src/freesas/sasio.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Project: FreeSAS +# https://github.com/kif/freesas +# +# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France +# +# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) +# + +""" +Contains helper functions for loading SAS data from differents sources. +""" +__authors__ = ["Martha Brennich"] +__contact__ = "martha.brennich@googlemail.com" +__license__ = "MIT" +__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" +__date__ = "19/09/2022" +__status__ = "development" +__docformat__ = "restructuredtext" + +import io +from typing import List, Union +from os import PathLike +from numpy import loadtxt, array, ndarray + +PathType = Union[PathLike, str, bytes, io.StringIO, io.BytesIO] + + +def load_scattering_data(filename: PathType) -> ndarray: + """ + Load scattering data q, I, err into a numpy array. + + :param filename: ASCII file, 3 column (q,I,err) + :return: numpy array with 3 column (q,I,err) + """ + try: + data = loadtxt(filename) + except OSError as err: + raise OSError("File could not be read.") + except ValueError as err: + text = None + if isinstance(filename, (io.StringIO, io.BytesIO)): + filename.seek(0) + text = filename.readlines() + else: + try: + with open(filename) as data_file: + text = data_file.readlines() + except OSError: + raise OSError("File could not be read.") + if text is not None: + try: + data = parse_ascii_data(text, number_of_columns=3) + except ValueError: + raise ValueError( + "File does not seem to be " "in the format q, I, err. " + ) + return data + + +def parse_ascii_data( + input_file_text: List[str], number_of_columns: int +) -> ndarray: + """ + Parse data from an ascii file into an N column numpy array + + :param input_file_text: List containing one line of input data per element + :param number_of_columns: Expected number of lines in the data file + :return: numpy array with 3 column (q,I,err) + """ + data = [] + for line in input_file_text: + split = line.split() + if len(split) == number_of_columns: + try: + data.append([float(x) for x in split]) + except ValueError as err: + if "could not convert string to float" in err.args[0]: + pass + else: + raise + if data == []: + raise ValueError + return array(data) + + +def convert_inverse_angstrom_to_nanometer( + data_in_inverse_angstrom: ndarray, +) -> ndarray: + """ + Convert data with q in 1/Å to 1/nm. + + :param data_in_inverse_angstrom: numpy array in format + (q_in_inverse_Angstrom,I,err) + :return: numpy array with 3 column (q_in_inverse_nm,I,err) + """ + q_in_angstrom, intensity, err = data_in_inverse_angstrom.T + return array([q_in_angstrom * 10.0, intensity, err]).T diff --git a/src/freesas/transformations.py b/src/freesas/transformations.py new file mode 100644 index 0000000..7eea6a0 --- /dev/null +++ b/src/freesas/transformations.py @@ -0,0 +1,1918 @@ +# -*- coding: utf-8 -*- +# transformations.py + +# Copyright (c) 2006-2015, Christoph Gohlke +# Copyright (c) 2006-2015, The Regents of the University of California +# Produced at the Laboratory for Fluorescence Dynamics +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holders nor the names of any +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Homogeneous Transformation Matrices and Quaternions. + +A library for calculating 4x4 matrices for translating, rotating, reflecting, +scaling, shearing, projecting, orthogonalizing, and superimposing arrays of +3D homogeneous coordinates as well as for converting between rotation matrices, +Euler angles, and quaternions. Also includes an Arcball control object and +functions to decompose transformation matrices. + +:Author: + `Christoph Gohlke `_ + +:Organization: + Laboratory for Fluorescence Dynamics, University of California, Irvine + +:Version: 2015.03.19 + +Requirements +------------ +* `CPython 2.7 or 3.4 `_ +* `Numpy 1.9 `_ +* `Transformations.c 2015.03.19 `_ + (recommended for speedup of some functions) + +Notes +----- +The API is not stable yet and is expected to change between revisions. + +This Python code is not optimized for speed. Refer to the transformations.c +module for a faster implementation of some functions. + +Documentation in HTML format can be generated with epydoc. + +Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using +numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using +numpy.dot(M, v) for shape (4, \*) column vectors, respectively +numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points"). + +This module follows the "column vectors on the right" and "row major storage" +(C contiguous) conventions. The translation components are in the right column +of the transformation matrix, i.e. M[:3, 3]. +The transpose of the transformation matrices may have to be used to interface +with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16]. + +Calculations are carried out with numpy.float64 precision. + +Vector, point, quaternion, and matrix function arguments are expected to be +"array like", i.e. tuple, list, or numpy arrays. + +Return types are numpy arrays unless specified otherwise. + +Angles are in radians unless specified otherwise. + +Quaternions w+ix+jy+kz are represented as [w, x, y, z]. + +A triple of Euler angles can be applied/interpreted in 24 ways, which can +be specified using a 4 character string or encoded 4-tuple: + + *Axes 4-string*: e.g. 'sxyz' or 'ryxy' + + - first character : rotations are applied to 's'tatic or 'r'otating frame + - remaining characters : successive rotation axis 'x', 'y', or 'z' + + *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) + + - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. + - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed + by 'z', or 'z' is followed by 'x'. Otherwise odd (1). + - repetition : first and last axis are same (1) or different (0). + - frame : rotations are applied to static (0) or rotating (1) frame. + +Other Python packages and modules for 3D transformations and quaternions: + +* `Transforms3d `_ + includes most code of this module. +* `Blender.mathutils `_ +* `numpy-dtypes `_ + +References +---------- +(1) Matrices and transformations. Ronald Goldman. + In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. +(2) More matrices and transformations: shear and pseudo-perspective. + Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. +(3) Decomposing a matrix into simple transformations. Spencer Thomas. + In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. +(4) Recovering the data from the transformation matrix. Ronald Goldman. + In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. +(5) Euler angle conversion. Ken Shoemake. + In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. +(6) Arcball rotation control. Ken Shoemake. + In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. +(7) Representing attitude: Euler angles, unit quaternions, and rotation + vectors. James Diebel. 2006. +(8) A discussion of the solution for the best rotation to relate two sets + of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. +(9) Closed-form solution of absolute orientation using unit quaternions. + BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642. +(10) Quaternions. Ken Shoemake. + http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf +(11) From quaternion to matrix and back. JMP van Waveren. 2005. + http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm +(12) Uniform random rotations. Ken Shoemake. + In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. +(13) Quaternion in molecular modeling. CFF Karney. + J Mol Graph Mod, 25(5):595-604 +(14) New method for extracting the quaternion from a rotation matrix. + Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087. +(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann. + Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130. +(16) Column Vectors vs. Row Vectors. + http://steve.hollasch.net/cgindex/math/matrix/column-vec.html + +Examples +-------- +>>> alpha, beta, gamma = 0.123, -1.234, 2.345 +>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] +>>> I = identity_matrix() +>>> Rx = rotation_matrix(alpha, xaxis) +>>> Ry = rotation_matrix(beta, yaxis) +>>> Rz = rotation_matrix(gamma, zaxis) +>>> R = concatenate_matrices(Rx, Ry, Rz) +>>> euler = euler_from_matrix(R, 'rxyz') +>>> numpy.allclose([alpha, beta, gamma], euler) +True +>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') +>>> is_same_transform(R, Re) +True +>>> al, be, ga = euler_from_matrix(Re, 'rxyz') +>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) +True +>>> qx = quaternion_about_axis(alpha, xaxis) +>>> qy = quaternion_about_axis(beta, yaxis) +>>> qz = quaternion_about_axis(gamma, zaxis) +>>> q = quaternion_multiply(qx, qy) +>>> q = quaternion_multiply(q, qz) +>>> Rq = quaternion_matrix(q) +>>> is_same_transform(R, Rq) +True +>>> S = scale_matrix(1.23, origin) +>>> T = translation_matrix([1, 2, 3]) +>>> Z = shear_matrix(beta, xaxis, origin, zaxis) +>>> R = random_rotation_matrix(numpy.random.rand(3)) +>>> M = concatenate_matrices(T, R, Z, S) +>>> scale, shear, angles, trans, persp = decompose_matrix(M) +>>> numpy.allclose(scale, 1.23) +True +>>> numpy.allclose(trans, [1, 2, 3]) +True +>>> numpy.allclose(shear, [0, math.tan(beta), 0]) +True +>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) +True +>>> M1 = compose_matrix(scale, shear, angles, trans, persp) +>>> is_same_transform(M, M1) +True +>>> v0, v1 = random_vector(3), random_vector(3) +>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1)) +>>> v2 = numpy.dot(v0, M[:3,:3].T) +>>> numpy.allclose(unit_vector(v1), unit_vector(v2)) +True + +""" + +from __future__ import division, print_function + +import math + +import numpy + +__version__ = '2015.03.19' +__docformat__ = 'restructuredtext en' +__all__ = () + + +def identity_matrix(): + """Return 4x4 identity/unit matrix. + + >>> I = identity_matrix() + >>> numpy.allclose(I, numpy.dot(I, I)) + True + >>> numpy.sum(I), numpy.trace(I) + (4.0, 4.0) + >>> numpy.allclose(I, numpy.identity(4)) + True + + """ + return numpy.identity(4) + + +def translation_matrix(direction): + """Return matrix to translate by direction vector. + + >>> v = numpy.random.random(3) - 0.5 + >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) + True + + """ + M = numpy.identity(4) + M[:3, 3] = direction[:3] + return M + + +def translation_from_matrix(matrix): + """Return translation vector from translation matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = translation_from_matrix(translation_matrix(v0)) + >>> numpy.allclose(v0, v1) + True + + """ + return numpy.array(matrix, copy=False)[:3, 3].copy() + + +def reflection_matrix(point, normal): + """Return matrix to mirror at plane defined by point and normal vector. + + >>> v0 = numpy.random.random(4) - 0.5 + >>> v0[3] = 1. + >>> v1 = numpy.random.random(3) - 0.5 + >>> R = reflection_matrix(v0, v1) + >>> numpy.allclose(2, numpy.trace(R)) + True + >>> numpy.allclose(v0, numpy.dot(R, v0)) + True + >>> v2 = v0.copy() + >>> v2[:3] += v1 + >>> v3 = v0.copy() + >>> v2[:3] -= v1 + >>> numpy.allclose(v2, numpy.dot(R, v3)) + True + + """ + normal = unit_vector(normal[:3]) + M = numpy.identity(4) + M[:3, :3] -= 2.0 * numpy.outer(normal, normal) + M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal + return M + + +def reflection_from_matrix(matrix): + """Return mirror plane point and normal vector from reflection matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = numpy.random.random(3) - 0.5 + >>> M0 = reflection_matrix(v0, v1) + >>> point, normal = reflection_from_matrix(M0) + >>> M1 = reflection_matrix(point, normal) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + # normal: unit eigenvector corresponding to eigenvalue -1 + w, V = numpy.linalg.eig(M[:3, :3]) + i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue -1") + normal = numpy.real(V[:, i[0]]).squeeze() + # point: any unit eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return point, normal + + +def rotation_matrix(angle, direction, point=None): + """Return matrix to rotate about axis defined by point and direction. + + >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0]) + >>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) + True + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> is_same_transform(R0, R1) + True + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(-angle, -direc, point) + >>> is_same_transform(R0, R1) + True + >>> I = numpy.identity(4, numpy.float64) + >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) + True + >>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2, + ... direc, point))) + True + + """ + sina = math.sin(angle) + cosa = math.cos(angle) + direction = unit_vector(direction[:3]) + # rotation matrix around unit vector + R = numpy.diag([cosa, cosa, cosa]) + R += numpy.outer(direction, direction) * (1.0 - cosa) + direction *= sina + R += numpy.array([[ 0.0, -direction[2], direction[1]], + [ direction[2], 0.0, -direction[0]], + [-direction[1], direction[0], 0.0]]) + M = numpy.identity(4) + M[:3, :3] = R + if point is not None: + # rotation not around origin + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + M[:3, 3] = point - numpy.dot(R, point) + return M + + +def rotation_from_matrix(matrix): + """Return rotation angle and axis from rotation matrix. + + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> angle, direc, point = rotation_from_matrix(R0) + >>> R1 = rotation_matrix(angle, direc, point) + >>> is_same_transform(R0, R1) + True + + """ + R = numpy.array(matrix, dtype=numpy.float64, copy=False) + R33 = R[:3, :3] + # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 + w, W = numpy.linalg.eig(R33.T) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + direction = numpy.real(W[:, i[-1]]).squeeze() + # point: unit eigenvector of R33 corresponding to eigenvalue of 1 + w, Q = numpy.linalg.eig(R) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + point = numpy.real(Q[:, i[-1]]).squeeze() + point /= point[3] + # rotation angle depending on direction + cosa = (numpy.trace(R33) - 1.0) / 2.0 + if abs(direction[2]) > 1e-8: + sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] + elif abs(direction[1]) > 1e-8: + sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] + else: + sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] + angle = math.atan2(sina, cosa) + return angle, direction, point + + +def scale_matrix(factor, origin=None, direction=None): + """Return matrix to scale by factor around origin in direction. + + Use factor -1 for point symmetry. + + >>> v = (numpy.random.rand(4, 5) - 0.5) * 20 + >>> v[3] = 1 + >>> S = scale_matrix(-1.234) + >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) + True + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S = scale_matrix(factor, origin) + >>> S = scale_matrix(factor, origin, direct) + + """ + if direction is None: + # uniform scaling + M = numpy.diag([factor, factor, factor, 1.0]) + if origin is not None: + M[:3, 3] = origin[:3] + M[:3, 3] *= 1.0 - factor + else: + # nonuniform scaling + direction = unit_vector(direction[:3]) + factor = 1.0 - factor + M = numpy.identity(4) + M[:3, :3] -= factor * numpy.outer(direction, direction) + if origin is not None: + M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction + return M + + +def scale_from_matrix(matrix): + """Return scaling factor, origin and direction from scaling matrix. + + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S0 = scale_matrix(factor, origin) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + >>> S0 = scale_matrix(factor, origin, direct) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + factor = numpy.trace(M33) - 2.0 + try: + # direction: unit eigenvector corresponding to eigenvalue factor + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0] + direction = numpy.real(V[:, i]).squeeze() + direction /= vector_norm(direction) + except IndexError: + # uniform scaling + factor = (factor + 2.0) / 3.0 + direction = None + # origin: any eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 1") + origin = numpy.real(V[:, i[-1]]).squeeze() + origin /= origin[3] + return factor, origin, direction + + +def projection_matrix(point, normal, direction=None, + perspective=None, pseudo=False): + """Return matrix to project onto plane defined by point and normal. + + Using either perspective point, projection direction, or none of both. + + If pseudo is True, perspective projections will preserve relative depth + such that Perspective = dot(Orthogonal, PseudoPerspective). + + >>> P = projection_matrix([0, 0, 0], [1, 0, 0]) + >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) + True + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> P1 = projection_matrix(point, normal, direction=direct) + >>> P2 = projection_matrix(point, normal, perspective=persp) + >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> is_same_transform(P2, numpy.dot(P0, P3)) + True + >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0]) + >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(P, v0) + >>> numpy.allclose(v1[1], v0[1]) + True + >>> numpy.allclose(v1[0], 3-v1[1]) + True + + """ + M = numpy.identity(4) + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + normal = unit_vector(normal[:3]) + if perspective is not None: + # perspective projection + perspective = numpy.array(perspective[:3], dtype=numpy.float64, + copy=False) + M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) + M[:3, :3] -= numpy.outer(perspective, normal) + if pseudo: + # preserve relative depth + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) + else: + M[:3, 3] = numpy.dot(point, normal) * perspective + M[3, :3] = -normal + M[3, 3] = numpy.dot(perspective, normal) + elif direction is not None: + # parallel projection + direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) + scale = numpy.dot(direction, normal) + M[:3, :3] -= numpy.outer(direction, normal) / scale + M[:3, 3] = direction * (numpy.dot(point, normal) / scale) + else: + # orthogonal projection + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * normal + return M + + +def projection_from_matrix(matrix, pseudo=False): + """Return projection plane and perspective point from projection matrix. + + Return values are same as arguments for projection_matrix function: + point, normal, direction, perspective, and pseudo. + + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, direct) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) + >>> result = projection_from_matrix(P0, pseudo=False) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> result = projection_from_matrix(P0, pseudo=True) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not pseudo and len(i): + # point: any eigenvector corresponding to eigenvalue 1 + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + # direction: unit eigenvector corresponding to eigenvalue 0 + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 0") + direction = numpy.real(V[:, i[0]]).squeeze() + direction /= vector_norm(direction) + # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 + w, V = numpy.linalg.eig(M33.T) + i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] + if len(i): + # parallel projection + normal = numpy.real(V[:, i[0]]).squeeze() + normal /= vector_norm(normal) + return point, normal, direction, None, False + else: + # orthogonal projection, where normal equals direction vector + return point, direction, None, None, False + else: + # perspective projection + i = numpy.where(abs(numpy.real(w)) > 1e-8)[0] + if not len(i): + raise ValueError( + "no eigenvector not corresponding to eigenvalue 0") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + normal = - M[3, :3] + perspective = M[:3, 3] / numpy.dot(point[:3], normal) + if pseudo: + perspective -= normal + return point, normal, None, perspective, pseudo + + +def clip_matrix(left, right, bottom, top, near, far, perspective=False): + """Return matrix to obtain normalized device coordinates from frustum. + + The frustum bounds are axis-aligned along x (left, right), + y (bottom, top) and z (near, far). + + Normalized device coordinates are in range [-1, 1] if coordinates are + inside the frustum. + + If perspective is True the frustum is a truncated pyramid with the + perspective point at origin and direction along z axis, otherwise an + orthographic canonical view volume (a box). + + Homogeneous coordinates transformed by the perspective clip matrix + need to be dehomogenized (divided by w coordinate). + + >>> frustum = numpy.random.rand(6) + >>> frustum[1] += frustum[0] + >>> frustum[3] += frustum[2] + >>> frustum[5] += frustum[4] + >>> M = clip_matrix(perspective=False, *frustum) + >>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) + array([-1., -1., -1., 1.]) + >>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1]) + array([ 1., 1., 1., 1.]) + >>> M = clip_matrix(perspective=True, *frustum) + >>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) + >>> v / v[3] + array([-1., -1., -1., 1.]) + >>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1]) + >>> v / v[3] + array([ 1., 1., -1., 1.]) + + """ + if left >= right or bottom >= top or near >= far: + raise ValueError("invalid frustum") + if perspective: + if near <= _EPS: + raise ValueError("invalid frustum: near <= 0") + t = 2.0 * near + M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0], + [0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0], + [0.0, 0.0, (far+near)/(near-far), t*far/(far-near)], + [0.0, 0.0, -1.0, 0.0]] + else: + M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)], + [0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)], + [0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)], + [0.0, 0.0, 0.0, 1.0]] + return numpy.array(M) + + +def shear_matrix(angle, direction, point, normal): + """Return matrix to shear by angle along direction vector on shear plane. + + The shear plane is defined by a point and normal vector. The direction + vector must be orthogonal to the plane's normal vector. + + A point P is transformed by the shear matrix into P" such that + the vector P-P" is parallel to the direction vector and its extent is + given by the angle of P-P'-P", where P' is the orthogonal projection + of P onto the shear plane. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S = shear_matrix(angle, direct, point, normal) + >>> numpy.allclose(1, numpy.linalg.det(S)) + True + + """ + normal = unit_vector(normal[:3]) + direction = unit_vector(direction[:3]) + if abs(numpy.dot(normal, direction)) > 1e-6: + raise ValueError("direction and normal vectors are not orthogonal") + angle = math.tan(angle) + M = numpy.identity(4) + M[:3, :3] += angle * numpy.outer(direction, normal) + M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction + return M + + +def shear_from_matrix(matrix): + """Return shear angle, direction and plane from shear matrix. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S0 = shear_matrix(angle, direct, point, normal) + >>> angle, direct, point, normal = shear_from_matrix(S0) + >>> S1 = shear_matrix(angle, direct, point, normal) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + # normal: cross independent eigenvectors corresponding to the eigenvalue 1 + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0] + if len(i) < 2: + raise ValueError("no two linear independent eigenvectors found %s" % w) + V = numpy.real(V[:, i]).squeeze().T + lenorm = -1.0 + for i0, i1 in ((0, 1), (0, 2), (1, 2)): + n = numpy.cross(V[i0], V[i1]) + w = vector_norm(n) + if w > lenorm: + lenorm = w + normal = n + normal /= lenorm + # direction and angle + direction = numpy.dot(M33 - numpy.identity(3), normal) + angle = vector_norm(direction) + direction /= angle + angle = math.atan(angle) + # point: eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 1") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return angle, direction, point, normal + + +def decompose_matrix(matrix): + """Return sequence of transformations from transformation matrix. + + matrix : array_like + Non-degenerative homogeneous transformation matrix + + Return tuple of: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + Raise ValueError if matrix is of wrong type or degenerative. + + >>> T0 = translation_matrix([1, 2, 3]) + >>> scale, shear, angles, trans, persp = decompose_matrix(T0) + >>> T1 = translation_matrix(trans) + >>> numpy.allclose(T0, T1) + True + >>> S = scale_matrix(0.123) + >>> scale, shear, angles, trans, persp = decompose_matrix(S) + >>> scale[0] + 0.123 + >>> R0 = euler_matrix(1, 2, 3) + >>> scale, shear, angles, trans, persp = decompose_matrix(R0) + >>> R1 = euler_matrix(*angles) + >>> numpy.allclose(R0, R1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=True).T + if abs(M[3, 3]) < _EPS: + raise ValueError("M[3, 3] is zero") + M /= M[3, 3] + P = M.copy() + P[:, 3] = 0.0, 0.0, 0.0, 1.0 + if not numpy.linalg.det(P): + raise ValueError("matrix is singular") + + scale = numpy.zeros((3, )) + shear = [0.0, 0.0, 0.0] + angles = [0.0, 0.0, 0.0] + + if any(abs(M[:3, 3]) > _EPS): + perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) + M[:, 3] = 0.0, 0.0, 0.0, 1.0 + else: + perspective = numpy.array([0.0, 0.0, 0.0, 1.0]) + + translate = M[3, :3].copy() + M[3, :3] = 0.0 + + row = M[:3, :3].copy() + scale[0] = vector_norm(row[0]) + row[0] /= scale[0] + shear[0] = numpy.dot(row[0], row[1]) + row[1] -= row[0] * shear[0] + scale[1] = vector_norm(row[1]) + row[1] /= scale[1] + shear[0] /= scale[1] + shear[1] = numpy.dot(row[0], row[2]) + row[2] -= row[0] * shear[1] + shear[2] = numpy.dot(row[1], row[2]) + row[2] -= row[1] * shear[2] + scale[2] = vector_norm(row[2]) + row[2] /= scale[2] + shear[1:] /= scale[2] + + if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: + numpy.negative(scale, scale) + numpy.negative(row, row) + + angles[1] = math.asin(-row[0, 2]) + if math.cos(angles[1]): + angles[0] = math.atan2(row[1, 2], row[2, 2]) + angles[2] = math.atan2(row[0, 1], row[0, 0]) + else: + #angles[0] = math.atan2(row[1, 0], row[1, 1]) + angles[0] = math.atan2(-row[2, 1], row[1, 1]) + angles[2] = 0.0 + + return scale, shear, angles, translate, perspective + + +def compose_matrix(scale=None, shear=None, angles=None, translate=None, + perspective=None): + """Return transformation matrix from sequence of transformations. + + This is the inverse of the decompose_matrix function. + + Sequence of transformations: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + >>> scale = numpy.random.random(3) - 0.5 + >>> shear = numpy.random.random(3) - 0.5 + >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) + >>> trans = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(4) - 0.5 + >>> M0 = compose_matrix(scale, shear, angles, trans, persp) + >>> result = decompose_matrix(M0) + >>> M1 = compose_matrix(*result) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.identity(4) + if perspective is not None: + P = numpy.identity(4) + P[3, :] = perspective[:4] + M = numpy.dot(M, P) + if translate is not None: + T = numpy.identity(4) + T[:3, 3] = translate[:3] + M = numpy.dot(M, T) + if angles is not None: + R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') + M = numpy.dot(M, R) + if shear is not None: + Z = numpy.identity(4) + Z[1, 2] = shear[2] + Z[0, 2] = shear[1] + Z[0, 1] = shear[0] + M = numpy.dot(M, Z) + if scale is not None: + S = numpy.identity(4) + S[0, 0] = scale[0] + S[1, 1] = scale[1] + S[2, 2] = scale[2] + M = numpy.dot(M, S) + M /= M[3, 3] + return M + + +def orthogonalization_matrix(lengths, angles): + """Return orthogonalization matrix for crystallographic cell coordinates. + + Angles are expected in degrees. + + The de-orthogonalization matrix is the inverse. + + >>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90]) + >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) + True + >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) + >>> numpy.allclose(numpy.sum(O), 43.063229) + True + + """ + a, b, c = lengths + angles = numpy.radians(angles) + sina, sinb, _ = numpy.sin(angles) + cosa, cosb, cosg = numpy.cos(angles) + co = (cosa * cosb - cosg) / (sina * sinb) + return numpy.array([ + [ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0], + [-a*sinb*co, b*sina, 0.0, 0.0], + [ a*cosb, b*cosa, c, 0.0], + [ 0.0, 0.0, 0.0, 1.0]]) + + +def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): + """Return affine transform matrix to register two point sets. + + v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous + coordinates, where ndims is the dimensionality of the coordinate space. + + If shear is False, a similarity transformation matrix is returned. + If also scale is False, a rigid/Euclidean transformation matrix + is returned. + + By default the algorithm by Hartley and Zissermann [15] is used. + If usesvd is True, similarity and Euclidean transformation matrices + are calculated by minimizing the weighted sum of squared deviations + (RMSD) according to the algorithm by Kabsch [8]. + Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9] + is used, which is slower when using this Python implementation. + + The returned matrix performs rotation, translation and uniform scaling + (if specified). + + >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]] + >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]] + >>> affine_matrix_from_points(v0, v1) + array([[ 0.14549, 0.00062, 675.50008], + [ 0.00048, 0.14094, 53.24971], + [ 0. , 0. , 1. ]]) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> S = scale_matrix(random.random()) + >>> M = concatenate_matrices(T, R, S) + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1) + >>> M = affine_matrix_from_points(v0[:3], v1[:3]) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + + More examples in superimposition_matrix() + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=True) + v1 = numpy.array(v1, dtype=numpy.float64, copy=True) + + ndims = v0.shape[0] + if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape: + raise ValueError("input arrays are of wrong shape or type") + + # move centroids to origin + t0 = -numpy.mean(v0, axis=1) + M0 = numpy.identity(ndims+1) + M0[:ndims, ndims] = t0 + v0 += t0.reshape(ndims, 1) + t1 = -numpy.mean(v1, axis=1) + M1 = numpy.identity(ndims+1) + M1[:ndims, ndims] = t1 + v1 += t1.reshape(ndims, 1) + + if shear: + # Affine transformation + A = numpy.concatenate((v0, v1), axis=0) + u, s, vh = numpy.linalg.svd(A.T) + vh = vh[:ndims].T + B = vh[:ndims] + C = vh[ndims:2*ndims] + t = numpy.dot(C, numpy.linalg.pinv(B)) + t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1) + M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,))) + elif usesvd or ndims != 3: + # Rigid transformation via SVD of covariance matrix + u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) + # rotation matrix from SVD orthonormal bases + R = numpy.dot(u, vh) + if numpy.linalg.det(R) < 0.0: + # R does not constitute right handed system + R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0) + s[-1] *= -1.0 + # homogeneous transformation matrix + M = numpy.identity(ndims+1) + M[:ndims, :ndims] = R + else: + # Rigid transformation matrix via quaternion + # compute symmetric matrix N + xx, yy, zz = numpy.sum(v0 * v1, axis=1) + xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) + xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) + N = [[xx+yy+zz, 0.0, 0.0, 0.0], + [yz-zy, xx-yy-zz, 0.0, 0.0], + [zx-xz, xy+yx, yy-xx-zz, 0.0], + [xy-yx, zx+xz, yz+zy, zz-xx-yy]] + # quaternion: eigenvector corresponding to most positive eigenvalue + w, V = numpy.linalg.eigh(N) + q = V[:, numpy.argmax(w)] + q /= vector_norm(q) # unit quaternion + # homogeneous transformation matrix + M = quaternion_matrix(q) + + if scale and not shear: + # Affine transformation; scale is ratio of RMS deviations from centroid + v0 *= v0 + v1 *= v1 + M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) + + # move centroids back + M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0)) + M /= M[ndims, ndims] + return M + + +def superimposition_matrix(v0, v1, scale=False, usesvd=True): + """Return matrix to transform given 3D point set into second point set. + + v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points. + + The parameters scale and usesvd are explained in the more general + affine_matrix_from_points function. + + The returned matrix is a similarity or Euclidean transformation matrix. + This function has a fast C implementation in transformations.c. + + >>> v0 = numpy.random.rand(3, 10) + >>> M = superimposition_matrix(v0, v0) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]] + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> S = scale_matrix(random.random()) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> M = concatenate_matrices(T, R, S) + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1) + >>> M = superimposition_matrix(v0, v1, scale=True) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v = numpy.empty((4, 100, 3)) + >>> v[:, :, 0] = v0 + >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) + True + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] + v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] + return affine_matrix_from_points(v0, v1, shear=False, + scale=scale, usesvd=usesvd) + + +def euler_matrix(ai, aj, ak, axes='sxyz'): + """Return homogeneous rotation matrix from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> R = euler_matrix(1, 2, 3, 'syxz') + >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) + True + >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) + >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) + True + >>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + >>> for axes in _TUPLE2AXES.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + if frame: + ai, ak = ak, ai + if parity: + ai, aj, ak = -ai, -aj, -ak + + si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) + ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) + cc, cs = ci*ck, ci*sk + sc, ss = si*ck, si*sk + + M = numpy.identity(4) + if repetition: + M[i, i] = cj + M[i, j] = sj*si + M[i, k] = sj*ci + M[j, i] = sj*sk + M[j, j] = -cj*ss+cc + M[j, k] = -cj*cs-sc + M[k, i] = -sj*ck + M[k, j] = cj*sc+cs + M[k, k] = cj*cc-ss + else: + M[i, i] = cj*ck + M[i, j] = sj*sc-cs + M[i, k] = sj*cc+ss + M[j, i] = cj*sk + M[j, j] = sj*ss+cc + M[j, k] = sj*cs-sc + M[k, i] = -sj + M[k, j] = cj*si + M[k, k] = cj*ci + return M + + +def euler_from_matrix(matrix, axes='sxyz'): + """Return Euler angles from rotation matrix for specified axis sequence. + + axes : One of 24 axis sequences as string or encoded tuple + + Note that many Euler angle triplets can describe one matrix. + + >>> R0 = euler_matrix(1, 2, 3, 'syxz') + >>> al, be, ga = euler_from_matrix(R0, 'syxz') + >>> R1 = euler_matrix(al, be, ga, 'syxz') + >>> numpy.allclose(R0, R1) + True + >>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R0 = euler_matrix(axes=axes, *angles) + ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) + ... if not numpy.allclose(R0, R1): print(axes, "failed") + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i+parity] + k = _NEXT_AXIS[i-parity+1] + + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] + if repetition: + sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) + if sy > _EPS: + ax = math.atan2( M[i, j], M[i, k]) + ay = math.atan2( sy, M[i, i]) + az = math.atan2( M[j, i], -M[k, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2( sy, M[i, i]) + az = 0.0 + else: + cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) + if cy > _EPS: + ax = math.atan2( M[k, j], M[k, k]) + ay = math.atan2(-M[k, i], cy) + az = math.atan2( M[j, i], M[i, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2(-M[k, i], cy) + az = 0.0 + + if parity: + ax, ay, az = -ax, -ay, -az + if frame: + ax, az = az, ax + return ax, ay, az + + +def euler_from_quaternion(quaternion, axes='sxyz'): + """Return Euler angles from quaternion for specified axis sequence. + + >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0]) + >>> numpy.allclose(angles, [0.123, 0, 0]) + True + + """ + return euler_from_matrix(quaternion_matrix(quaternion), axes) + + +def quaternion_from_euler(ai, aj, ak, axes='sxyz'): + """Return quaternion from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') + >>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435]) + True + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + 1 + j = _NEXT_AXIS[i+parity-1] + 1 + k = _NEXT_AXIS[i-parity] + 1 + + if frame: + ai, ak = ak, ai + if parity: + aj = -aj + + ai /= 2.0 + aj /= 2.0 + ak /= 2.0 + ci = math.cos(ai) + si = math.sin(ai) + cj = math.cos(aj) + sj = math.sin(aj) + ck = math.cos(ak) + sk = math.sin(ak) + cc = ci*ck + cs = ci*sk + sc = si*ck + ss = si*sk + + q = numpy.empty((4, )) + if repetition: + q[0] = cj*(cc - ss) + q[i] = cj*(cs + sc) + q[j] = sj*(cc + ss) + q[k] = sj*(cs - sc) + else: + q[0] = cj*cc + sj*ss + q[i] = cj*sc - sj*cs + q[j] = cj*ss + sj*cc + q[k] = cj*cs - sj*sc + if parity: + q[j] *= -1.0 + + return q + + +def quaternion_about_axis(angle, axis): + """Return quaternion for rotation about axis. + + >>> q = quaternion_about_axis(0.123, [1, 0, 0]) + >>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0]) + True + + """ + q = numpy.array([0.0, axis[0], axis[1], axis[2]]) + qlen = vector_norm(q) + if qlen > _EPS: + q *= math.sin(angle/2.0) / qlen + q[0] = math.cos(angle/2.0) + return q + + +def quaternion_matrix(quaternion): + """Return homogeneous rotation matrix from quaternion. + + >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) + >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) + True + >>> M = quaternion_matrix([1, 0, 0, 0]) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> M = quaternion_matrix([0, 1, 0, 0]) + >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + n = numpy.dot(q, q) + if n < _EPS: + return numpy.identity(4) + q *= math.sqrt(2.0 / n) + q = numpy.outer(q, q) + return numpy.array([ + [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], + [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], + [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], + [ 0.0, 0.0, 0.0, 1.0]]) + + +def quaternion_from_matrix(matrix, isprecise=False): + """Return quaternion from rotation matrix. + + If isprecise is True, the input matrix is assumed to be a precise rotation + matrix and a faster algorithm is used. + + >>> q = quaternion_from_matrix(numpy.identity(4), True) + >>> numpy.allclose(q, [1, 0, 0, 0]) + True + >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1])) + >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0]) + True + >>> R = rotation_matrix(0.123, (1, 2, 3)) + >>> q = quaternion_from_matrix(R, True) + >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786]) + True + >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0], + ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]] + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611]) + True + >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0], + ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]] + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603]) + True + >>> R = random_rotation_matrix() + >>> q = quaternion_from_matrix(R) + >>> is_same_transform(R, quaternion_matrix(q)) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] + if isprecise: + q = numpy.empty((4, )) + t = numpy.trace(M) + if t > M[3, 3]: + q[0] = t + q[3] = M[1, 0] - M[0, 1] + q[2] = M[0, 2] - M[2, 0] + q[1] = M[2, 1] - M[1, 2] + else: + i, j, k = 1, 2, 3 + if M[1, 1] > M[0, 0]: + i, j, k = 2, 3, 1 + if M[2, 2] > M[i, i]: + i, j, k = 3, 1, 2 + t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] + q[i] = t + q[j] = M[i, j] + M[j, i] + q[k] = M[k, i] + M[i, k] + q[3] = M[k, j] - M[j, k] + q *= 0.5 / math.sqrt(t * M[3, 3]) + else: + m00 = M[0, 0] + m01 = M[0, 1] + m02 = M[0, 2] + m10 = M[1, 0] + m11 = M[1, 1] + m12 = M[1, 2] + m20 = M[2, 0] + m21 = M[2, 1] + m22 = M[2, 2] + # symmetric matrix K + K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0], + [m01+m10, m11-m00-m22, 0.0, 0.0], + [m02+m20, m12+m21, m22-m00-m11, 0.0], + [m21-m12, m02-m20, m10-m01, m00+m11+m22]]) + K /= 3.0 + # quaternion is eigenvector of K that corresponds to largest eigenvalue + w, V = numpy.linalg.eigh(K) + q = V[[3, 0, 1, 2], numpy.argmax(w)] + if q[0] < 0.0: + numpy.negative(q, q) + return q + + +def quaternion_multiply(quaternion1, quaternion0): + """Return multiplication of two quaternions. + + >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7]) + >>> numpy.allclose(q, [28, -44, -14, 48]) + True + + """ + w0, x0, y0, z0 = quaternion0 + w1, x1, y1, z1 = quaternion1 + return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0, + x1*w0 + y1*z0 - z1*y0 + w1*x0, + -x1*z0 + y1*w0 + z1*x0 + w1*y0, + x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64) + + +def quaternion_conjugate(quaternion): + """Return conjugate of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_conjugate(q0) + >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:]) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + numpy.negative(q[1:], q[1:]) + return q + + +def quaternion_inverse(quaternion): + """Return inverse of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_inverse(q0) + >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + numpy.negative(q[1:], q[1:]) + return q / numpy.dot(q, q) + + +def quaternion_real(quaternion): + """Return real part of quaternion. + + >>> quaternion_real([3, 0, 1, 2]) + 3.0 + + """ + return float(quaternion[0]) + + +def quaternion_imag(quaternion): + """Return imaginary part of quaternion. + + >>> quaternion_imag([3, 0, 1, 2]) + array([ 0., 1., 2.]) + + """ + return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True) + + +def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): + """Return spherical linear interpolation between two quaternions. + + >>> q0 = random_quaternion() + >>> q1 = random_quaternion() + >>> q = quaternion_slerp(q0, q1, 0) + >>> numpy.allclose(q, q0) + True + >>> q = quaternion_slerp(q0, q1, 1, 1) + >>> numpy.allclose(q, q1) + True + >>> q = quaternion_slerp(q0, q1, 0.5) + >>> angle = math.acos(numpy.dot(q0, q)) + >>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \ + numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle) + True + + """ + q0 = unit_vector(quat0[:4]) + q1 = unit_vector(quat1[:4]) + if fraction == 0.0: + return q0 + elif fraction == 1.0: + return q1 + d = numpy.dot(q0, q1) + if abs(abs(d) - 1.0) < _EPS: + return q0 + if shortestpath and d < 0.0: + # invert rotation + d = -d + numpy.negative(q1, q1) + angle = math.acos(d) + spin * math.pi + if abs(angle) < _EPS: + return q0 + isin = 1.0 / math.sin(angle) + q0 *= math.sin((1.0 - fraction) * angle) * isin + q1 *= math.sin(fraction * angle) * isin + q0 += q1 + return q0 + + +def random_quaternion(rand=None): + """Return uniform random unit quaternion. + + rand: array like or None + Three independent random variables that are uniformly distributed + between 0 and 1. + + >>> q = random_quaternion() + >>> numpy.allclose(1, vector_norm(q)) + True + >>> q = random_quaternion(numpy.random.random(3)) + >>> len(q.shape), q.shape[0]==4 + (1, True) + + """ + if rand is None: + rand = numpy.random.rand(3) + else: + assert len(rand) == 3 + r1 = numpy.sqrt(1.0 - rand[0]) + r2 = numpy.sqrt(rand[0]) + pi2 = math.pi * 2.0 + t1 = pi2 * rand[1] + t2 = pi2 * rand[2] + return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1, + numpy.cos(t1)*r1, numpy.sin(t2)*r2]) + + +def random_rotation_matrix(rand=None): + """Return uniform random rotation matrix. + + rand: array like + Three independent random variables that are uniformly distributed + between 0 and 1 for each returned quaternion. + + >>> R = random_rotation_matrix() + >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) + True + + """ + return quaternion_matrix(random_quaternion(rand)) + + +class Arcball(object): + """Virtual Trackball Control. + + >>> ball = Arcball() + >>> ball = Arcball(initial=numpy.identity(4)) + >>> ball.place([320, 320], 320) + >>> ball.down([500, 250]) + >>> ball.drag([475, 275]) + >>> R = ball.matrix() + >>> numpy.allclose(numpy.sum(R), 3.90583455) + True + >>> ball = Arcball(initial=[1, 0, 0, 0]) + >>> ball.place([320, 320], 320) + >>> ball.setaxes([1, 1, 0], [-1, 1, 0]) + >>> ball.constrain = True + >>> ball.down([400, 200]) + >>> ball.drag([200, 400]) + >>> R = ball.matrix() + >>> numpy.allclose(numpy.sum(R), 0.2055924) + True + >>> ball.next() + + """ + def __init__(self, initial=None): + """Initialize virtual trackball control. + + initial : quaternion or rotation matrix + + """ + self._axis = None + self._axes = None + self._radius = 1.0 + self._center = [0.0, 0.0] + self._vdown = numpy.array([0.0, 0.0, 1.0]) + self._constrain = False + if initial is None: + self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0]) + else: + initial = numpy.array(initial, dtype=numpy.float64) + if initial.shape == (4, 4): + self._qdown = quaternion_from_matrix(initial) + elif initial.shape == (4, ): + initial /= vector_norm(initial) + self._qdown = initial + else: + raise ValueError("initial not a quaternion or matrix") + self._qnow = self._qpre = self._qdown + + def place(self, center, radius): + """Place Arcball, e.g. when window size changes. + + center : sequence[2] + Window coordinates of trackball center. + radius : float + Radius of trackball in window coordinates. + + """ + self._radius = float(radius) + self._center[0] = center[0] + self._center[1] = center[1] + + def setaxes(self, *axes): + """Set axes to constrain rotations.""" + if axes is None: + self._axes = None + else: + self._axes = [unit_vector(axis) for axis in axes] + + @property + def constrain(self): + """Return state of constrain to axis mode.""" + return self._constrain + + @constrain.setter + def constrain(self, value): + """Set state of constrain to axis mode.""" + self._constrain = bool(value) + + def down(self, point): + """Set initial cursor window coordinates and pick constrain-axis.""" + self._vdown = arcball_map_to_sphere(point, self._center, self._radius) + self._qdown = self._qpre = self._qnow + if self._constrain and self._axes is not None: + self._axis = arcball_nearest_axis(self._vdown, self._axes) + self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) + else: + self._axis = None + + def drag(self, point): + """Update current cursor window coordinates.""" + vnow = arcball_map_to_sphere(point, self._center, self._radius) + if self._axis is not None: + vnow = arcball_constrain_to_axis(vnow, self._axis) + self._qpre = self._qnow + t = numpy.cross(self._vdown, vnow) + if numpy.dot(t, t) < _EPS: + self._qnow = self._qdown + else: + q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]] + self._qnow = quaternion_multiply(q, self._qdown) + + def next(self, acceleration=0.0): + """Continue rotation in direction of last drag.""" + q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False) + self._qpre, self._qnow = self._qnow, q + + def matrix(self): + """Return homogeneous rotation matrix.""" + return quaternion_matrix(self._qnow) + + +def arcball_map_to_sphere(point, center, radius): + """Return unit sphere coordinates from window coordinates.""" + v0 = (point[0] - center[0]) / radius + v1 = (center[1] - point[1]) / radius + n = v0*v0 + v1*v1 + if n > 1.0: + # position outside of sphere + n = math.sqrt(n) + return numpy.array([v0/n, v1/n, 0.0]) + else: + return numpy.array([v0, v1, math.sqrt(1.0 - n)]) + + +def arcball_constrain_to_axis(point, axis): + """Return sphere point perpendicular to axis.""" + v = numpy.array(point, dtype=numpy.float64, copy=True) + a = numpy.array(axis, dtype=numpy.float64, copy=True) + v -= a * numpy.dot(a, v) # on plane + n = vector_norm(v) + if n > _EPS: + if v[2] < 0.0: + numpy.negative(v, v) + v /= n + return v + if a[2] == 1.0: + return numpy.array([1.0, 0.0, 0.0]) + return unit_vector([-a[1], a[0], 0.0]) + + +def arcball_nearest_axis(point, axes): + """Return axis, which arc is nearest to point.""" + point = numpy.array(point, dtype=numpy.float64, copy=False) + nearest = None + mx = -1.0 + for axis in axes: + t = numpy.dot(arcball_constrain_to_axis(point, axis), point) + if t > mx: + nearest = axis + mx = t + return nearest + + +# epsilon for testing whether a number is close to zero +_EPS = numpy.finfo(float).eps * 4.0 + +# axis sequences for Euler angles +_NEXT_AXIS = [1, 2, 0, 1] + +# map axes strings to/from tuples of inner axis, parity, repetition, frame +_AXES2TUPLE = { + 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), + 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), + 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), + 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), + 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), + 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), + 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), + 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} + +_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) + + +def vector_norm(data, axis=None, out=None): + """Return length, i.e. Euclidean norm, of ndarray along axis. + + >>> v = numpy.random.random(3) + >>> n = vector_norm(v) + >>> numpy.allclose(n, numpy.linalg.norm(v)) + True + >>> v = numpy.random.rand(6, 5, 3) + >>> n = vector_norm(v, axis=-1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) + True + >>> n = vector_norm(v, axis=1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> v = numpy.random.rand(5, 4, 3) + >>> n = numpy.empty((5, 3)) + >>> vector_norm(v, axis=1, out=n) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> vector_norm([]) + 0.0 + >>> vector_norm([1]) + 1.0 + + """ + data = numpy.array(data, dtype=numpy.float64, copy=True) + if out is None: + if data.ndim == 1: + return math.sqrt(numpy.dot(data, data)) + data *= data + out = numpy.atleast_1d(numpy.sum(data, axis=axis)) + numpy.sqrt(out, out) + return out + else: + data *= data + numpy.sum(data, axis=axis, out=out) + numpy.sqrt(out, out) + + +def unit_vector(data, axis=None, out=None): + """Return ndarray normalized by length, i.e. Euclidean norm, along axis. + + >>> v0 = numpy.random.random(3) + >>> v1 = unit_vector(v0) + >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) + True + >>> v0 = numpy.random.rand(5, 4, 3) + >>> v1 = unit_vector(v0, axis=-1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) + >>> numpy.allclose(v1, v2) + True + >>> v1 = unit_vector(v0, axis=1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) + >>> numpy.allclose(v1, v2) + True + >>> v1 = numpy.empty((5, 4, 3)) + >>> unit_vector(v0, axis=1, out=v1) + >>> numpy.allclose(v1, v2) + True + >>> list(unit_vector([])) + [] + >>> list(unit_vector([1])) + [1.0] + + """ + if out is None: + data = numpy.array(data, dtype=numpy.float64, copy=True) + if data.ndim == 1: + data /= math.sqrt(numpy.dot(data, data)) + return data + else: + if out is not data: + out[:] = numpy.array(data, copy=False) + data = out + length = numpy.atleast_1d(numpy.sum(data*data, axis)) + numpy.sqrt(length, length) + if axis is not None: + length = numpy.expand_dims(length, axis) + data /= length + if out is None: + return data + + +def random_vector(size): + """Return array of random doubles in the half-open interval [0.0, 1.0). + + >>> v = random_vector(10000) + >>> numpy.all(v >= 0) and numpy.all(v < 1) + True + >>> v0 = random_vector(10) + >>> v1 = random_vector(10) + >>> numpy.any(v0 == v1) + False + + """ + return numpy.random.random(size) + + +def vector_product(v0, v1, axis=0): + """Return vector perpendicular to vectors. + + >>> v = vector_product([2, 0, 0], [0, 3, 0]) + >>> numpy.allclose(v, [0, 0, 6]) + True + >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] + >>> v1 = [[3], [0], [0]] + >>> v = vector_product(v0, v1) + >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]]) + True + >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] + >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] + >>> v = vector_product(v0, v1, axis=1) + >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]]) + True + + """ + return numpy.cross(v0, v1, axis=axis) + + +def angle_between_vectors(v0, v1, directed=True, axis=0): + """Return angle between vectors. + + If directed is False, the input vectors are interpreted as undirected axes, + i.e. the maximum angle is pi/2. + + >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3]) + >>> numpy.allclose(a, math.pi) + True + >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False) + >>> numpy.allclose(a, 0) + True + >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] + >>> v1 = [[3], [0], [0]] + >>> a = angle_between_vectors(v0, v1) + >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532]) + True + >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] + >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] + >>> a = angle_between_vectors(v0, v1, axis=1) + >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532]) + True + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=False) + v1 = numpy.array(v1, dtype=numpy.float64, copy=False) + dot = numpy.sum(v0 * v1, axis=axis) + dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis) + return numpy.arccos(dot if directed else numpy.fabs(dot)) + + +def inverse_matrix(matrix): + """Return inverse of square transformation matrix. + + >>> M0 = random_rotation_matrix() + >>> M1 = inverse_matrix(M0.T) + >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) + True + >>> for size in range(1, 7): + ... M0 = numpy.random.rand(size, size) + ... M1 = inverse_matrix(M0) + ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size) + + """ + return numpy.linalg.inv(matrix) + + +def concatenate_matrices(*matrices): + """Return concatenation of series of transformation matrices. + + >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 + >>> numpy.allclose(M, concatenate_matrices(M)) + True + >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) + True + + """ + M = numpy.identity(4) + for i in matrices: + M = numpy.dot(M, i) + return M + + +def is_same_transform(matrix0, matrix1): + """Return True if two matrices perform same transformation. + + >>> is_same_transform(numpy.identity(4), numpy.identity(4)) + True + >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) + False + + """ + matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) + matrix0 /= matrix0[3, 3] + matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) + matrix1 /= matrix1[3, 3] + return numpy.allclose(matrix0, matrix1) + + +def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'): + """Try import all public attributes from module into global namespace. + + Existing attributes with name clashes are renamed with prefix. + Attributes starting with underscore are ignored by default. + + Return True on successful import. + + """ + import warnings + from importlib import import_module + try: + if not package: + module = import_module(name) + else: + module = import_module('.' + name, package=package) + except ImportError: + if warn: + warnings.warn("failed to import module %s" % name) + else: + for attr in dir(module): + if ignore and attr.startswith(ignore): + continue + if prefix: + if attr in globals(): + globals()[prefix + attr] = globals()[attr] + elif warn: + warnings.warn("no Python implementation of " + attr) + globals()[attr] = getattr(module, attr) + return True + + + +#_import_module('_transformations') + +if __name__ == "__main__": + import doctest + import random # used in doctests + numpy.set_printoptions(suppress=True, precision=5) + doctest.testmod() + + From f89d43110e609e5759c5b700dac8b8d9b2bac357 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 16:26:54 +0100 Subject: [PATCH 05/45] move files --- freesas/__init__.py | 64 -- freesas/align.py | 447 --------- freesas/autorg.py | 198 ---- freesas/average.py | 271 ----- freesas/bift.py | 80 -- freesas/collections.py | 107 -- freesas/cormap.py | 122 --- freesas/decorators.py | 58 -- freesas/fitting.py | 222 ----- freesas/invariants.py | 132 --- freesas/model.py | 315 ------ freesas/plot.py | 562 ----------- freesas/sas_argparser.py | 165 ---- freesas/sasio.py | 100 -- freesas/transformations.py | 1918 ------------------------------------ pyproject.toml | 4 +- src/freesas/_version.py | 0 17 files changed, 2 insertions(+), 4763 deletions(-) delete mode 100644 freesas/__init__.py delete mode 100644 freesas/align.py delete mode 100644 freesas/autorg.py delete mode 100644 freesas/average.py delete mode 100644 freesas/bift.py delete mode 100644 freesas/collections.py delete mode 100644 freesas/cormap.py delete mode 100644 freesas/decorators.py delete mode 100644 freesas/fitting.py delete mode 100644 freesas/invariants.py delete mode 100644 freesas/model.py delete mode 100644 freesas/plot.py delete mode 100644 freesas/sas_argparser.py delete mode 100644 freesas/sasio.py delete mode 100644 freesas/transformations.py mode change 100644 => 100755 src/freesas/_version.py diff --git a/freesas/__init__.py b/freesas/__init__.py deleted file mode 100644 index 79072af..0000000 --- a/freesas/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding: utf-8 -# /*########################################################################## -# -# Copyright (c) 2015-2018 European Synchrotron Radiation Facility -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -# -# ###########################################################################*/ -""" -The silx package contains the following main sub-packages: - -- silx.gui: Qt widgets for data visualization and data file browsing -- silx.image: Some processing functions for 2D images -- silx.io: Reading and writing data files (HDF5/NeXus, SPEC, ...) -- silx.math: Some processing functions for 1D, 2D, 3D, nD arrays -- silx.opencl: OpenCL-based data processing -- silx.sx: High-level silx functions suited for (I)Python console. -- silx.utils: Miscellaneous convenient functions - -See silx documentation: http://www.silx.org/doc/silx/latest/ -""" - -__authors__ = ["Jérôme Kieffer"] -__license__ = "MIT" -__date__ = "31/08/2018" - -import os as _os -import logging as _logging - -_logging.getLogger(__name__).addHandler(_logging.NullHandler()) - - -project = _os.path.basename(_os.path.dirname(_os.path.abspath(__file__))) - -try: - from ._version import __date__ as date # noqa - from ._version import ( - version, - version_info, - hexversion, - strictversion, - dated_version, - ) # noqa -except ImportError: - raise RuntimeError( - "Do NOT use %s from its sources: build it and use the built version" - % project - ) diff --git a/freesas/align.py b/freesas/align.py deleted file mode 100644 index e3a1fb5..0000000 --- a/freesas/align.py +++ /dev/null @@ -1,447 +0,0 @@ -__author__ = "Guillaume Bonamis" -__license__ = "MIT" -__copyright__ = "2015, ESRF" - -import os -import sys -import numpy -import matplotlib -# matplotlib.use('Agg') -import matplotlib.pyplot as plt -from freesas.model import SASModel -import itertools -from scipy.optimize import fmin -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("log_freesas") - - -class InputModels: - def __init__(self): - self.inputfiles = [] - self.sasmodels = [] - self.rfactors = [] - self.rmax = None - self.validmodels = [] - - def __repr_(self): - return "Preparation of %s models for alignment" % len(self.inputfiles) - - def assign_models(self, molecule=None): - """ - Create SASModels from pdb files saved in self.inputfiles and saved them in self.models. - Center of mass, inertia tensor and canonical parameters are computed for each SASModel. - - :param molecule: optional 2d array, coordinates of the atoms for the model to create - :return self.models: list of SASModel - """ - if not self.inputfiles and len(molecule) == 0: - logger.error("No input files") - - if self.inputfiles: - for inputpdb in self.inputfiles: - model = SASModel() - model.read(inputpdb) - model.centroid() - model.inertiatensor() - model.canonical_parameters() - self.sasmodels.append(model) - if len(self.inputfiles) != len(self.sasmodels): - logger.error("Problem of assignment\n%s models for %s files" % (len(self.sasmodels), len(self.inputfiles))) - - elif len(molecule) != 0: - model = SASModel() - model.atoms = molecule - model.centroid() - model.inertiatensor() - model.canonical_parameters() - self.sasmodels.append(model) - - return self.sasmodels - - def rcalculation(self): - """ - Calculation the maximal value for the R-factors, which is the mean of all the R-factors of - inputs plus 2 times the standard deviation. - R-factors are saved in the attribute self.rfactors, 1d array, and in percentage. - - :return rmax: maximal value for the R-factor - """ - if len(self.sasmodels) == 0: - self.assign_models() - models = self.sasmodels - - rfactors = numpy.empty(len(models), dtype="float") - for i in range(len(models)): - rfactors[i] = models[i].rfactor - self.rfactors = 100.0 * rfactors - - rmax = self.rfactors.mean() + 2 * self.rfactors.std() - self.rmax = rmax - - return rmax - - def models_selection(self): - """ - Check if each model respect the limit for the R-factor - - :return self.validmodels: 1d array, 0 for a non valid model, else 1 - """ - if self.rmax is None: - self.rcalculation() - rmax = self.rmax - - validmodels = [] - for i in range(len(self.sasmodels)): - rfactor = self.rfactors[i] - if rfactor <= rmax: - validmodels.append(1.0) - else: - validmodels.append(0.0) - - self.validmodels = numpy.array(validmodels, dtype="float") - - return self.validmodels - - def rfactorplot(self, filename=None, save=False): - """ - Create a png file with the table of R factor for each model. - A threshold is computed to discarded models with Rfactor>Rmax. - - :param filename: filename for the figure, default to Rfactor.png - :param save: save automatically the figure if True, else show it - :return fig: the wanted figures - """ - if filename is None: - filename = "Rfactor.png" - if len(self.validmodels) == 0: - self.models_selection() - - dammif_files = len(self.inputfiles) - R = self.rfactors - Rmax = self.rmax - - xticks = 1 + numpy.arange(dammif_files) - fig = plt.figure(figsize=(7.5, 10)) - labels = [os.path.splitext(os.path.basename(self.inputfiles[i]))[0] for i in range(dammif_files)] - - ax2 = fig.add_subplot(1, 1, 1) - ax2.set_title("Selection of dammif models based on R factor") - ax2.bar(xticks - 0.5, R) - ax2.plot([0.5, dammif_files + 0.5], [Rmax, Rmax], "-r", label="R$_{max}$ = %.3f" % Rmax) - ax2.set_ylabel("R factor in percent") - ax2.set_xticks(xticks) - ax2.set_xticklabels(labels, rotation=90) - ax2.legend(loc=8) - - bbox_props = dict(fc="pink", ec="r", lw=1) - for i in range(dammif_files): - if not self.validmodels[i]: - ax2.text(i + 0.95, Rmax / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props) - logger.info("model %s discarded, Rfactor > Rmax" % self.inputfiles[i]) - - if save: - fig.savefig(filename) - else: - fig.show() - - return fig - - -class AlignModels: - """ - Used to align DAM from pdb files - """ - - def __init__(self, files, slow=True, enantiomorphs=True): - """ - :param files: list of pdb files to read to create DAM - :param slow: optimized every symmetry if True, else only optimized the best one - :param enantiomorphs: take into account both enantiomorphs if True (i.e. inversion authorized) - """ - self.slow = slow - self.enantiomorphs = enantiomorphs - self.inputfiles = files - self.outputfiles = [] - self.models = [] - self.arrayNSD = None - self.validmodels = [] - self.reference = None - - def __repr__(self): - return "alignment process for %s models" % len(self.models) - - def assign_models(self): - """ - Create SASModels from pdb files saved in self.inputfiles and saved them in self.models. - Center of mass, inertia tensor and canonical parameters are computed for each SASModel. - - :return self.models: list of SASModel - """ - for inputpdb in self.inputfiles: - model = SASModel() - model.read(inputpdb) - model.centroid() - model.inertiatensor() - model.canonical_parameters() - self.models.append(model) - if len(self.inputfiles) != len(self.models): - logger.error("Problem of assignment\n%s models for %s files" % (len(self.models), len(self.inputfiles))) - - return self.models - - def optimize(self, reference, molecule, symmetry): - """ - Use scipy.optimize to optimize transformation parameters to minimize NSD - - :param reference: SASmodel - :param molecule: SASmodel - :param symmetry: 3-list of +/-1 - :return p: transformation parameters optimized - :return dist: NSD after optimization - """ - p, dist, niter, nfuncalls, warmflag = fmin(reference.dist_after_movement, molecule.can_param, args=(molecule, symmetry), ftol=1e-4, maxiter=200, full_output=True, disp=False) - if niter == 200: - logger.debug("convergence not reached") - else: - logger.debug("convergence reach after %s iterations" % niter) - return p, dist - - def alignment_sym(self, reference, molecule): - """ - Apply 8 combinations to the molecule and select the one which minimize the distance between it and the reference. - - :param reference: SASModel, the one which do not move - :param molecule: SASModel, the one wich has to be aligned - :return combinaison: best symmetry to minimize NSD - :return p: transformation parameters optimized if slow is true, unoptimized else - """ - can_paramref = reference.can_param - can_parammol = molecule.can_param - - ref_can = reference.transform(can_paramref, [1, 1, 1]) - mol_can = molecule.transform(can_parammol, [1, 1, 1]) - - if self.slow: - parameters, dist = self.optimize(reference, molecule, [1, 1, 1]) - else: - parameters = can_parammol - dist = reference.dist(molecule, ref_can, mol_can) - combinaison = None - - for comb in itertools.product((-1, 1), repeat=3): - if comb == (1, 1, 1): - continue - if not self.enantiomorphs and comb[0] * comb[1] * comb[2] == -1: - continue - - sym = numpy.diag(comb + (1,)) - mol_sym = numpy.dot(sym, mol_can.T).T - - if self.slow: - symmetry = [sym[0, 0], sym[1, 1], sym[2, 2]] - p, d = self.optimize(reference, molecule, symmetry) - else: - p = can_parammol - d = reference.dist(molecule, ref_can, mol_sym) - - if d < dist: - dist = d - parameters = p - combinaison = comb - if combinaison is not None: - combinaison = list(combinaison) - else: - combinaison = [1, 1, 1] - return combinaison, parameters - - def makeNSDarray(self): - """ - Calculate the NSD correlation table and save it in self.arrayNSD - - :return self.arrayNSD: 2d array, NSD correlation table - """ - models = self.models - size = len(models) - valid = self.validmodels - self.arrayNSD = numpy.zeros((size, size), dtype="float") - - for i in range(size): - if valid[i] == 1.0: - reference = models[i] - else: - self.arrayNSD[i, :] = 0.00 - continue - for j in range(size): - if i == j: - self.arrayNSD[i, j] = 0.00 - elif i < j: - if valid[j] == 1.0: - molecule = models[j] - symmetry, p = self.alignment_sym(reference, molecule) - if self.slow: - dist = reference.dist_after_movement(p, molecule, symmetry) - else: - p, dist = self.optimize(reference, molecule, symmetry) - else: - dist = 0.00 - self.arrayNSD[i, j] = self.arrayNSD[j, i] = dist - return self.arrayNSD - - def plotNSDarray(self, rmax=None, filename=None, save=False): - """ - Create a png file with the table of NSD and the average NSD for each model. - A threshold is computed to segregate good models and the ones to exclude. - - :param rmax: threshold of R factor for the validity of a model - :param filename: filename for the figure, default to nsd.png - :param save: save automatically the figure if True, else show it - :return fig: the wanted figures - """ - if self.arrayNSD is None: - self.makeNSDarray() - if not self.reference: - self.reference = self.find_reference() - if filename is None: - filename = "nsd.png" - - dammif_files = len(self.inputfiles) - valid_models = self.validmodels - labels = [os.path.splitext(os.path.basename(self.outputfiles[i]))[0] for i in range(dammif_files)] - mask2d = (numpy.outer(valid_models, valid_models)) - tableNSD = self.arrayNSD * mask2d - maskedNSD = numpy.ma.masked_array(tableNSD, mask=numpy.logical_not(mask2d)) - data = valid_models * (tableNSD.sum(axis=-1) / (valid_models.sum() - 1)) # mean for the valid models, excluding itself - - fig = plt.figure(figsize=(15, 10)) - xticks = 1 + numpy.arange(dammif_files) - ax1 = fig.add_subplot(1, 2, 1) - ax2 = fig.add_subplot(1, 2, 2) - - # first subplot : the NSD table - lnsd = [] - for i in range(dammif_files): - for j in range(dammif_files): - nsd = maskedNSD[i, j] - if not maskedNSD.mask[i, j]: - ax1.text(i, j, "%.2f" % nsd, ha="center", va="center", size=12 * 8 // dammif_files) - ax1.text(j, i, "%.2f" % nsd, ha="center", va="center", size=12 * 8 // dammif_files) - if i != j: - lnsd.append(nsd) - - lnsd = numpy.array(lnsd) - nsd_max = lnsd.mean() + lnsd.std() # threshold for nsd mean - - ax1.imshow(maskedNSD, interpolation="nearest", origin="upper", cmap="YlOrRd", norm=matplotlib.colors.Normalize(vmin=min(lnsd))) - ax1.set_title(u"NSD correlation table") - ax1.set_xticks(range(dammif_files)) - ax1.set_xticklabels(labels, rotation=90) - ax1.set_xlim(-0.5, dammif_files - 0.5) - ax1.set_ylim(-0.5, dammif_files - 0.5) - ax1.set_yticks(range(dammif_files)) - ax1.set_yticklabels(labels) - - # second subplot : the NSD mean for each model - ax2.bar(xticks - 0.5, data) - ax2.plot([0.5, dammif_files + 0.5], [nsd_max, nsd_max], "-r", label=u"NSD$_{max}$ = %.2f" % nsd_max) - ax2.set_title(u"NSD between any model and all others") - ax2.set_ylabel("Normalized Spatial Discrepancy") - ax2.set_xticks(xticks) - ax2.set_xticklabels(labels, rotation=90) - bbox_props = dict(fc="cyan", ec="b", lw=1) - ax2.text(self.reference + 0.95, data[self.reference] / 2, "Reference", ha="center", va="center", rotation=90, size=10, bbox=bbox_props) - ax2.legend(loc=8) - - bbox_props = dict(fc="pink", ec="r", lw=1) - valid_number = 0 - for i in range(dammif_files): - if data[i] > nsd_max: - ax2.text(i + 0.95, data[self.reference] / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props) - logger.debug("model %s discarded, nsd > nsd_max" % self.inputfiles[i]) - elif not valid_models[i]: - if rmax: - ax2.text(i + 0.95, data[self.reference] / 2, "Discarded, Rfactor = %s > Rmax = %s" % (100.0 * self.models[i].rfactor, rmax), ha="center", va="center", rotation=90, size=10, bbox=bbox_props) - else: - ax2.text(i + 0.95, data[self.reference] / 2, "Discarded", ha="center", va="center", rotation=90, size=10, bbox=bbox_props) - else: - if valid_models[i] == 1.0: - valid_number += 1 - - logger.debug("%s valid models" % valid_number) - - if save: - fig.savefig(filename) - else: - fig.show() - return fig - - def find_reference(self): - """ - Find the reference model among the models aligned. - The reference model is the one with lower average NSD with other models. - - :return ref_number: position of the reference model in the list self.models - """ - if self.arrayNSD is None: - self.makeNSDarray() - if len(self.validmodels) == 0: - logger.error("Validity of models is not computed") - valid = self.validmodels - valid = valid.astype(bool) - - averNSD = numpy.zeros(len(self.models)) - averNSD += sys.maxsize - averNSD[valid] = ((self.arrayNSD.sum(axis=-1)) / (valid.sum() - 1))[valid] - - self.reference = averNSD.argmin() - - return self.reference - - def alignment_reference(self, ref_number=None): - """ - Align all models in self.models with the reference one. - The aligned models are saved in pdb files (names in list self.outputfiles) - """ - if self.reference is None and ref_number is None: - self.find_reference() - - ref_number = self.reference - models = self.models - reference = models[ref_number] - for i in range(len(models)): - if i == ref_number: - continue - else: - molecule = models[i] - symmetry, p = self.alignment_sym(reference, molecule) - if not self.slow: - p, dist = self.optimize(reference, molecule, symmetry) - molecule.atoms = molecule.transform(p, symmetry) # molecule sent on its canonical position - molecule.atoms = molecule.transform(reference.can_param, [1, 1, 1], reverse=True) # molecule sent on reference position - molecule.save(self.outputfiles[i]) - reference.save(self.outputfiles[ref_number]) - return 0 - - def alignment_2models(self, save=True): - """ - Align two models using the first one as reference. - The aligned models are save in pdb files. - - :return dist: NSD after alignment - """ - models = self.models - reference = models[0] - molecule = models[1] - - symmetry, p = self.alignment_sym(reference, molecule) - if not self.slow: - p, dist = self.optimize(reference, molecule, symmetry) - - molecule.atoms = molecule.transform(p, symmetry) - molecule.atoms = molecule.transform(reference.can_param, [1, 1, 1], reverse=True) - if self.slow: - dist = reference.dist(molecule, reference.atoms, molecule.atoms) - if save: - molecule.save(self.outputfiles) - - return dist diff --git a/freesas/autorg.py b/freesas/autorg.py deleted file mode 100644 index 4284f94..0000000 --- a/freesas/autorg.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- coding: utf-8 -*- -"""Functions for calculating the radius of gyration and forward scattering intensity.""" - -__authors__ = ["Jerome Kieffer"] -__license__ = "MIT" -__copyright__ = "2020, ESRF" -__date__ = "05/06/2020" - -import logging -import numpy -from scipy.optimize import curve_fit -from ._autorg import ( # pylint: disable=E0401 - RG_RESULT, - autoRg, - AutoGuinier, - linear_fit, - FIT_RESULT, - guinier, - NoGuinierRegionError, - DTYPE, - InsufficientDataError, -) - - -logger = logging.getLogger(__name__) - - -def auto_gpa(data, Rg_min=1.0, qRg_max=1.3, qRg_min=0.5): - """ - Uses the GPA theory to guess quickly Rg, the - radius of gyration and I0, the forwards scattering - - The theory is described in `Guinier peak analysis for visual and automated - inspection of small-angle X-ray scattering data` - Christopher D. Putnam - J. Appl. Cryst. (2016). 49, 1412–1419 - - This fits sqrt(q²Rg²)*exp(-q²Rg²/3)*I0/Rg to the curve I*q = f(q²) - - The Guinier region goes arbitrary from 0.5 to 1.3 q·Rg - qRg_min and qRg_max can be provided - - :param data: the raw data read from disc. Only q and I are used. - :param Rg_min: the minimal accpetable value for the radius of gyration - :param qRg_max: the default upper bound for the Guinier region. - :param qRg_min: the default lower bound for the Guinier region. - :return: autRg result with limited information - """ - - def curate_data(data): - q = data.T[0] - I = data.T[1] - err = data.T[2] - - start0 = numpy.argmax(I) - stop0 = numpy.where(q > qRg_max / Rg_min)[0][0] - - range0 = slice(start0, stop0) - q = q[range0] - I = I[range0] - err = err[range0] - - q2 = q ** 2 - lnI = numpy.log(I) - I2_over_sigma2 = err ** 2 / I ** 2 - - y = I * q - p1 = numpy.argmax(y) - - # Those are guess from the max position: - Rg = (1.5 / q2[p1]) ** 0.5 - I0 = I[p1] * numpy.exp(q2[p1] * Rg ** 2 / 3.0) - - # Let's cut-down the guinier region from 0.5-1.3 in qRg - try: - start1 = numpy.where(q > qRg_min / Rg)[0][0] - except IndexError: - start1 = None - try: - stop1 = numpy.where(q > qRg_max / Rg)[0][0] - except IndexError: - stop1 = None - range1 = slice(start1, stop1) - - q1 = q[range1] - I1 = I[range1] - - return q1, I1, Rg, I0, q2, lnI, I2_over_sigma2, start0 - - q1, I1, Rg, I0, q2, lnI, I2_over_sigma2, start0 = curate_data(data) - if len(q1) < 3: - reduced_data = numpy.delete(data, start0, axis=0) - q1, I1, Rg, I0, q2, lnI, I2_over_sigma2, start0 = curate_data( - reduced_data - ) - - x = q1 * q1 - y = I1 * q1 - - f = ( - lambda x, Rg, I0: I0 - / Rg - * numpy.sqrt(x * Rg * Rg) - * numpy.exp(-x * Rg * Rg / 3.0) - ) - res = curve_fit(f, x, y, [Rg, I0]) - logger.debug( - "GPA upgrade Rg %s-> %s and I0 %s -> %s", Rg, res[0][0], I0, res[0][1] - ) - Rg, I0 = res[0] - sigma_Rg, sigma_I0 = numpy.sqrt(numpy.diag(res[1])) - end = numpy.where(data.T[0] > qRg_max / Rg)[0][0] - start = numpy.where(data.T[0] > qRg_min / Rg)[0][0] - aggregation = guinier.check_aggregation( - q2, lnI, I2_over_sigma2, 0, end - start0, Rg=Rg, threshold=False - ) - quality = guinier.calc_quality( - Rg, sigma_Rg, data.T[0, start], data.T[0, end], aggregation, qRg_max - ) - return RG_RESULT( - Rg, sigma_Rg, I0, sigma_I0, start, end, quality, aggregation - ) - - -def auto_guinier(data, Rg_min=1.0, qRg_max=1.3, relax=1.2): - """ - Yet another implementation of the Guinier fit - - The idea: - * extract the reasonable range - * convert to the Guinier space (ln(I) = f(q²) - * scan all possible intervall - * keep any with qRg_max<1.3 (or 1.5 in relaxed mode) - * select the begining and the end of the guinier region according to the contribution of two parameters: - - (q_max·Rg - q_min·Rg)/qRg_max --> in favor of large ranges - - 1 / RMSD --> in favor of good quality data - For each start and end point, the contribution of all ranges are averaged out (using histograms) - The best solution is the start/end position with the maximum average. - * All ranges within this region are averaged out to measure Rg, I0 and more importantly their deviation. - * The quality is still to be calculated - * Aggergation is assessed according a second order polynom fit. - - :param data: 2D array with (q,I,err) - :param Rg_min: minimum value for Rg - :param qRg_max: upper bound of the Guinier region - :param relax: relaxation factor for the upper bound - :param resolution: step size of the slope histogram - :return: autRg result - """ - - raw_size = data.shape[0] - q_ary = numpy.empty(raw_size, dtype=DTYPE) - i_ary = numpy.empty(raw_size, dtype=DTYPE) - sigma_ary = numpy.empty(raw_size, dtype=DTYPE) - q2_ary = numpy.empty(raw_size, dtype=DTYPE) - lnI_ary = numpy.empty(raw_size, dtype=DTYPE) - wg_ary = numpy.empty(raw_size, dtype=DTYPE) - - start0, stop0 = guinier.curate_data( - data, q_ary, i_ary, sigma_ary, Rg_min, qRg_max, relax - ) - if start0 < 0: - raise InsufficientDataError( - "Minimum region size is %s" % guinier.min_size - ) - guinier.guinier_space( - start0, stop0, q_ary, i_ary, sigma_ary, q2_ary, lnI_ary, wg_ary - ) - - fits = guinier.many_fit( - q2_ary, lnI_ary, wg_ary, start0, stop0, Rg_min, qRg_max, relax - ) - - cnt, relaxed, qRg_max, aslope_max = guinier.count_valid( - fits, qRg_max, relax - ) - # valid_fits = fits[fits[:, 9] < qRg_max] - if cnt == 0: - raise NoGuinierRegionError(qRg_max) - - # select the Guinier region based on all fits: - start, stop = guinier.find_region(fits, qRg_max) - - # Now average out the - Rg_avg, Rg_std, I0_avg, I0_std, good = guinier.average_values( - fits, start, stop - ) - - aggregated = guinier.check_aggregation( - q2_ary, lnI_ary, wg_ary, start0, stop, Rg=Rg_avg, threshold=False - ) - quality = guinier.calc_quality( - Rg_avg, Rg_std, q_ary[start], q_ary[stop], aggregated, qRg_max - ) - result = RG_RESULT( - Rg_avg, Rg_std, I0_avg, I0_std, start, stop, quality, aggregated - ) - return result diff --git a/freesas/average.py b/freesas/average.py deleted file mode 100644 index 3c428c4..0000000 --- a/freesas/average.py +++ /dev/null @@ -1,271 +0,0 @@ -__author__ = "Guillaume" -__license__ = "MIT" -__copyright__ = "2015, ESRF" - -import numpy -from freesas.model import SASModel - - -class Grid: - """ - This class is used to create a grid which include all the input models - """ - def __init__(self, inputfiles): - """ - :param inputfiles: list of pdb files needed for averaging - """ - self.inputs = inputfiles - self.size = [] - self.nbknots = None - self.radius = None - self.coordknots = [] - - def __repr__(self): - return "Grid with %i knots"%self.nbknots - - def spatial_extent(self): - """ - Calculate the maximal extent of input models - - :return self.size: 6-list with x,y,z max and then x,y,z min - """ - atoms = [] - models_fineness = [] - for files in self.inputs: - m = SASModel(files) - if len(atoms)==0: - atoms = m.atoms - else: - atoms = numpy.append(atoms, m.atoms, axis=0) - models_fineness.append(m.fineness) - mean_fineness = sum(models_fineness) / len(models_fineness) - - coordmin = atoms.min(axis=0) - mean_fineness - coordmax = atoms.max(axis=0) + mean_fineness - self.size = [coordmax[0],coordmax[1],coordmax[2],coordmin[0],coordmin[1],coordmin[2]] - - return self.size - - def calc_radius(self, nbknots=None): - """ - Calculate the radius of each point of a hexagonal close-packed grid, - knowing the total volume and the number of knots in this grid. - - :param nbknots: number of knots wanted for the grid - :return radius: the radius of each knot of the grid - """ - if len(self.size)==0: - self.spatial_extent() - nbknots = nbknots if nbknots is not None else 5000 - size = self.size - dx = size[0] - size[3] - dy = size[1] - size[4] - dz = size[2] - size[5] - volume = dx * dy * dz - - density = numpy.pi / (3*2**0.5) - radius = ((3 /( 4 * numpy.pi)) * density * volume / nbknots)**(1.0/3) - self.radius = radius - - return radius - - def make_grid(self): - """ - Create a grid using the maximal size and the radius previously computed. - The geometry used is a face-centered cubic lattice (fcc). - - :return knots: 2d-array, coordinates of each dot of the grid. Saved as self.coordknots. - """ - if len(self.size)==0: - self.spatial_extent() - if self.radius is None: - self.calc_radius() - - radius = self.radius - a = numpy.sqrt(2.0)*radius - - xmax = self.size[0] - xmin = self.size[3] - ymax = self.size[1] - ymin = self.size[4] - zmax = self.size[2] - zmin = self.size[5] - - x = 0.0 - y = 0.0 - z = 0.0 - - xlist = [] - ylist = [] - zlist = [] - knots = numpy.empty((1,4), dtype="float") - while (zmin + z) <= zmax: - zlist.append(z) - z += a - while (ymin + y) <= ymax: - ylist.append(y) - y += a - while (xmin + x) <= xmax: - xlist.append(x) - x += a - - for i in range(len(zlist)): - z = zlist[i] - if i % 2 ==0: - for j in range(len(xlist)): - x = xlist[j] - if j % 2 == 0: - for y in ylist[0:-1:2]: - knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0) - else: - for y in ylist[1:-1:2]: - knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0) - else: - for j in range(len(xlist)): - x = xlist[j] - if j % 2 == 0: - for y in ylist[1:-1:2]: - knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0) - else: - for y in ylist[0:-1:2]: - knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0) - - knots = numpy.delete(knots, 0, axis=0) - self.nbknots = knots.shape[0] - self.coordknots = knots - - return knots - - -class AverModels(): - """ - Provides tools to create an averaged models using several aligned dummy atom models - """ - def __init__(self, inputfiles, grid): - """ - :param inputfiles: list of pdb files of aligned models - :param grid: 2d-array coordinates of each point of a grid, fourth column full of zeros - """ - self.inputfiles = inputfiles - self.models = [] - self.header = [] - self.radius = None - self.atoms = [] - self.grid = grid - - def __repr__(self): - return "Average SAS model with %i atoms"%len(self.atoms) - - def read_files(self, reference=None): - """ - Read all the pdb file in the inputfiles list, creating SASModels. - The SASModels created are save in a list, the reference model is the first model in the list. - - :param reference: position of the reference model file in the inputfiles list - """ - ref = reference if reference is not None else 0 - inputfiles = self.inputfiles - - models = [] - models.append(SASModel(inputfiles[ref])) - for i in range(len(inputfiles)): - if i==ref: - continue - else: - models.append(SASModel(inputfiles[i])) - self.models = models - - return models - - def calc_occupancy(self, griddot): - """ - Assign an occupancy and a contribution factor to the point of the grid. - - :param griddot: 1d-array, coordinates of a point of the grid - :return tuple: 2-tuple containing (occupancy, contribution) - """ - occ = 0.0 - contrib = 0 - for model in self.models: - f = model.fineness - for i in range(model.atoms.shape[0]): - dx = model.atoms[i, 0] - griddot[0] - dy = model.atoms[i, 1] - griddot[1] - dz = model.atoms[i, 2] - griddot[2] - dist = dx * dx + dy * dy + dz * dz - add = max(1 - (dist / f), 0) - if add != 0: - contrib += 1 - occ += add - return occ, contrib - - def assign_occupancy(self): - """ - For each point of the grid, total occupancy and contribution factor are computed and saved. - The grid is then ordered with decreasing value of occupancy. - The fourth column of the array correspond to the occupancy of the point and the fifth to - the contribution for this point. - - :return sortedgrid: 2d-array, coordinates of each point of the grid - """ - grid = self.grid - nbknots = grid.shape[0] - grid = numpy.append(grid, numpy.zeros((nbknots, 1), dtype="float"), axis=1) - - for i in range(nbknots): - occ, contrib = self.calc_occupancy(grid[i, 0:3]) - grid[i, 3] = occ - grid[i, 4] = contrib - - order = numpy.argsort(grid, axis=0)[:, -2] - sortedgrid = numpy.empty_like(grid) - for i in range(nbknots): - sortedgrid[nbknots - i - 1, :] = grid[order[i], :] - - return sortedgrid - - def make_header(self): - """ - Create the layout of the pdb file for the averaged model. - """ - header = [] - header.append("Number of files averaged : %s\n"%len(self.inputfiles)) - for i in self.inputfiles: - header.append(i + "\n") - header.append("Total number of dots in the grid : %s\n"%self.grid.shape[0]) - - decade = 1 - for i in range(self.grid.shape[0]): - line = "ATOM CA ASP 1 20.00 2 201\n" - line = line[:7] + "%4.i"%(i + 1) + line[11:] - if not (i + 1) % 10: - decade += 1 - line = line[:21] + "%4.i"%decade + line[25:] - header.append(line) - self.header = header - return header - - def save_aver(self, filename): - """ - Save the position of each occupied dot of the grid, its occupancy and its contribution - in a pdb file. - - :param filename: name of the pdb file to write - """ - if len(self.header) == 0: - self.make_header() - assert self.grid.shape[-1] == 5 - - nr = 0 - with open(filename, "w") as pdbout: - for line in self.header: - if line.startswith("ATOM"): - if nr < self.grid.shape[0] and self.grid[nr, 4] != 0: - coord = "%8.3f%8.3f%8.3f" % tuple(self.grid[nr, 0:3]) - occ = "%6.2f" % self.grid[nr, 3] - contrib = "%2.f" % self.grid[nr, 4] - line = line[:30] + coord + occ + line[60:66] + contrib + line[68:] - else: - line = "" - nr += 1 - pdbout.write(line) diff --git a/freesas/bift.py b/freesas/bift.py deleted file mode 100644 index e2e9ec3..0000000 --- a/freesas/bift.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Bayesian Inverse Fourier Transform - -This code is the implementation of -Steen Hansen J. Appl. Cryst. (2000). 33, 1415-1421 - -Based on the BIFT from Jesse Hopkins, available at: -https://sourceforge.net/p/bioxtasraw/git/ci/master/tree/bioxtasraw/BIFT.py - -Many thanks to Pierre Paleo for the auto-alpha guess -""" - -__authors__ = ["Jerome Kieffer", "Jesse Hopkins"] -__license__ = "MIT" -__copyright__ = "2020, ESRF" -__date__ = "10/06/2020" - -import logging -logger = logging.getLogger(__name__) -# from collections import namedtuple -from math import log, ceil -import numpy -from scipy.optimize import minimize -from ._bift import BIFT -from .autorg import auto_gpa, autoRg, auto_guinier, NoGuinierRegionError - - -def auto_bift(data, Dmax=None, alpha=None, npt=100, - start_point=None, end_point=None, - scan_size=11, Dmax_over_Rg=3): - """Calculates the inverse Fourier tranform of the data using an optimisation of the evidence - - :param data: 2D array with q, I(q), δI(q). q can be in 1/nm or 1/A, it imposes the unit for r & Dmax - :param Dmax: Maximum diameter of the object, this is the starting point to be refined. Can be guessed - :param alpha: Regularisation parameter, let it to None for automatic scan - :param npt: Number of point for the curve p(r) - :param start_point: First useable point in the I(q) curve, this is not the start of the Guinier region - :param end_point: Last useable point in the I(q) curve - :param scan_size: size of the initial geometrical scan for alpha values. - :param Dmax_over_Rg: In average, protein's Dmax is 3x Rg, use this to adjust - :return: BIFT object. Call the get_best to retrieve the optimal solution - """ - assert data.ndim == 2 - assert data.shape[1] == 3 # enforce q, I, err - use_wisdom = False - data = data[slice(start_point, end_point)] - q, I, err = data.T - npt = min(npt, q.size) # no chance for oversampling ! - bo = BIFT(q, I, err) # this is the bift object - if Dmax is None: - # Try to get a reasonable guess from Rg - try: - Guinier = auto_guinier(data) - except: - logger.error("Guinier analysis failed !") - raise -# print(Guinier) - if Guinier.Rg <= 0: - raise NoGuinierRegionError - Dmax = bo.set_Guinier(Guinier, Dmax_over_Rg) - if alpha is None: - alpha_max = bo.guess_alpha_max(npt) - # First scan on alpha: - key = bo.grid_scan(Dmax, Dmax, 1, - 1.0 / alpha_max, alpha_max, scan_size, npt) - Dmax, alpha = key[:2] - # Then scan on Dmax: - key = bo.grid_scan(max(Dmax / 2, Dmax * (Dmax_over_Rg - 1) / Dmax_over_Rg), Dmax * (Dmax_over_Rg + 1) / Dmax_over_Rg, scan_size, - alpha, alpha, 1, npt) - Dmax, alpha = key[:2] - if bo.evidence_cache[key].converged: - bo.update_wisdom() - use_wisdom = True - - # Optimization using Bayesian operator: - logger.info("Start search at Dmax=%.2f alpha=%.2f use wisdom=%s", Dmax, alpha, use_wisdom) - res = minimize(bo.opti_evidence, (Dmax, log(alpha)), args=(npt, use_wisdom), method="powell") - logger.info("Result of optimisation:\n %s", res) - return bo diff --git a/freesas/collections.py b/freesas/collections.py deleted file mode 100644 index 91fdea8..0000000 --- a/freesas/collections.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -""" -Set of namedtuples defined a bit everywhere -""" -__authors__ = ["Jérôme Kieffer"] -__license__ = "MIT" -__copyright__ = "2020 ESRF" -__date__ = "13/10/2020" - -from collections import namedtuple -from os import linesep -import numpy - -# Used in AutoRg -RG_RESULT = namedtuple( - "RG_RESULT", - "Rg sigma_Rg I0 sigma_I0 start_point end_point quality aggregated", -) - - -def _RG_RESULT_repr(self): - return f"Rg={self.Rg:6.4f}(±{self.sigma_Rg:6.4f}) I0={self.I0:6.4f}(±{self.sigma_I0:6.4f}) [{self.start_point}-{self.end_point}] {100.0*self.quality:5.2f}% {'aggregated' if self.aggregated>0.1 else ''}" - - -RG_RESULT.__repr__ = _RG_RESULT_repr - -FIT_RESULT = namedtuple( - "FIT_RESULT", - "slope sigma_slope intercept sigma_intercept, R, R2, chi2, RMSD", -) -RT_RESULT = namedtuple("RT_RESULT", "Vc sigma_Vc Qr sigma_Qr mass sigma_mass") - - -def _RT_RESULT_repr(self): - return f"Vc={self.Vc:6.4f}(±{self.sigma_Vc:6.4f}) Qr={self.Qr:6.4f}(±{self.sigma_Qr:6.4f}) mass={self.mass:6.4f}(±{self.sigma_mass:6.4f})" - - -RT_RESULT.__repr__ = _RT_RESULT_repr - -# Used in BIFT -RadiusKey = namedtuple("RadiusKey", "Dmax npt") -PriorKey = namedtuple("PriorKey", "type npt") -TransfoValue = namedtuple("TransfoValue", "transfo B sum_dia") -EvidenceKey = namedtuple("EvidenceKey", "Dmax alpha npt") -EvidenceResult = namedtuple( - "EvidenceResult", "evidence chi2r regularization radius density converged" -) - -StatsResult = namedtuple( - "StatsResult", - "radius density_avg density_std evidence_avg evidence_std Dmax_avg Dmax_std alpha_avg, alpha_std chi2r_avg chi2r_std regularization_avg regularization_std Rg_avg Rg_std I0_avg I0_std", -) - - -def save_bift(stats, filename, source=None): - "Save the results of the fit to the file" - res = [ - "Dmax= %.2f±%.2f" % (stats.Dmax_avg, stats.Dmax_std), - "𝛂= %.1f±%.1f" % (stats.alpha_avg, stats.alpha_std), - "S₀= %.4f±%.4f" % (stats.regularization_avg, stats.regularization_std), - "χ²= %.2f±%.2f" % (stats.chi2r_avg, stats.chi2r_std), - "logP= %.2f±%.2f" % (stats.evidence_avg, stats.evidence_std), - "Rg= %.2f±%.2f" % (stats.Rg_avg, stats.Rg_std), - "I₀= %.2f±%.2f" % (stats.I0_avg, stats.I0_std), - ] - with open(filename, "wt", encoding="utf-8") as out: - out.write("# %s %s" % (source or filename, "\n")) - for txt in res: - out.write("# %s %s" % (txt, "\n")) - out.write("%s# r\tp(r)\tsigma_p(r)%s" % ("\n", "\n")) - for r, p, s in zip( - stats.radius.astype(numpy.float32), - stats.density_avg.astype(numpy.float32), - stats.density_std.astype(numpy.float32), - ): - out.write("%s\t%s\t%s%s" % (r, p, s, "\n")) - return filename + ": " + "; ".join(res) - - -StatsResult.save = save_bift - -# Used in Cormap -GOF = namedtuple("GOF", ["n", "c", "P"]) diff --git a/freesas/cormap.py b/freesas/cormap.py deleted file mode 100644 index 7741ed5..0000000 --- a/freesas/cormap.py +++ /dev/null @@ -1,122 +0,0 @@ -__author__ = "Jerome Kieffer" -__license__ = "MIT" -__copyright__ = "2017, ESRF" - -import numpy -from math import log -from .collections import GOF - -from ._cormap import measure_longest - - -class LongestRunOfHeads(object): - """Implements the "longest run of heads" by Mark F. Schilling - The College Mathematics Journal, Vol. 21, No. 3, (1990), pp. 196-207 - - See: http://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020742.02p0021g.pdf - """ - - def __init__(self): - "We store already calculated values for (n,c)" - self.knowledge = {} - - def A(self, n, c): - """Calculate A(number_of_toss, length_of_longest_run) - - :param n: number of coin toss in the experiment, an integer - :param c: length of the longest run of - :return: The A parameter used in the formula - - """ - if n <= c: - return 2 ** n - elif (n, c) in self.knowledge: - return self.knowledge[(n, c)] - else: - s = 0 - for j in range(c, -1, -1): - s += self.A(n - 1 - j, c) - self.knowledge[(n, c)] = s - return s - - def B(self, n, c): - """Calculate B(number_of_toss, length_of_longest_run) - to have either a run of Heads either a run of Tails - - :param n: number of coin toss in the experiment, an integer - :param c: length of the longest run of - :return: The B parameter used in the formula - """ - return 2 * self.A(n - 1, c - 1) - - def __call__(self, n, c): - """Calculate the probability for the longest run of heads to exceed the observed length - - :param n: number of coin toss in the experiment, an integer - :param c: length of the longest run of heads, an integer - :return: The probablility of having c subsequent heads in a n toss of fair coin - """ - if c >= n: - return 0 - delta = 2 ** n - self.A(n, c) - if delta <= 0: - return 0 - return 2.0 ** (log(delta, 2) - n) - - def probaHeadOrTail(self, n, c): - """Calculate the probability of a longest run of head or tails to occur - - :param n: number of coin toss in the experiment, an integer - :param c: length of the longest run of heads or tails, an integer - :return: The probablility of having c subsequent heads or tails in a n toss of fair coin - """ - if c > n: - return 0 - if c == 0: - return 0 - delta = self.B(n, c) - self.B(n, c - 1) - if delta <= 0: - return 0 - return min(2.0 ** (log(delta, 2.0) - n), 1.0) - - def probaLongerRun(self, n, c): - """Calculate the probability for the longest run of heads or tails to exceed the observed length - - :param n: number of coin toss in the experiment, an integer - :param c: length of thee observed run of heads or tails, an integer - :return: The probablility of having more than c subsequent heads or tails in a n toss of fair coin - """ - if c > n: - return 0 - if c == 0: - return 0 - delta = (2 ** n) - self.B(n, c) - if delta <= 0: - return 0 - return min(2.0 ** (log(delta, 2.0) - n), 1.0) - - -LROH = LongestRunOfHeads() - - -def gof(data1, data2): - """Calculate the probability for a couple of dataset to be equivalent - - Implementation according to: - http://www.nature.com/nmeth/journal/v12/n5/full/nmeth.3358.html - - :param data1: numpy array - :param data2: numpy array - :return: probablility for the 2 data to be equivalent - """ - - if data1.ndim == 2 and data1.shape[1] > 1: - data1 = data1[:, 1] - if data2.ndim == 2 and data2.shape[1] > 1: - data2 = data2[:, 1] - - cdata = numpy.ascontiguousarray(data2 - data1, numpy.float64).ravel() - c = measure_longest(cdata) - n = cdata.size - res = GOF(n, c, LROH.probaLongerRun(n, c - 1)) - return res diff --git a/freesas/decorators.py b/freesas/decorators.py deleted file mode 100644 index 6bdb517..0000000 --- a/freesas/decorators.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding: utf-8 -# -# Project: Free SAS tools -# https://github.com/kif/freesas -# -# Copyright (C) 2015-2020 European Synchrotron Radiation Facility, Grenoble, France -# -# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -"""Bunch of useful decorators""" - -__authors__ = ["Jerome Kieffer", "H. Payno", "P. Knobel", "V. Valls"] -__contact__ = "Jerome.Kieffer@ESRF.eu" -__license__ = "MIT" -__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" -__date__ = "27/04/2020" -__status__ = "development" -__docformat__ = 'restructuredtext' - -import sys -import time -import logging - -timelog = logging.getLogger("freesas.timeit") - - -def timeit(func): - - def wrapper(*arg, **kw): - '''This is the docstring of timeit: - a decorator that logs the execution time''' - t1 = time.perf_counter() - res = func(*arg, **kw) - t2 = time.perf_counter() - name = func.func_name if sys.version_info[0] < 3 else func.__name__ - timelog.warning("%s took %.3fs", name, t2 - t1) - return res - - wrapper.__name__ = func.__name__ - wrapper.__doc__ = func.__doc__ - return wrapper diff --git a/freesas/fitting.py b/freesas/fitting.py deleted file mode 100644 index 9242c40..0000000 --- a/freesas/fitting.py +++ /dev/null @@ -1,222 +0,0 @@ -"""This module provides a function which reads in the data, -performs the guinier fit with a given algotithm and reates the input.""" - -__authors__ = ["Martha Brennich"] -__contact__ = "martha.brennich@googlemail.com" -__license__ = "MIT" -__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" -__date__ = "21/03/2021" -__status__ = "development" -__docformat__ = "restructuredtext" - -import sys -import logging -import platform -from os import linesep as os_linesep -from pathlib import Path -from contextlib import contextmanager -from typing import Callable, List, Optional, IO, Generator -from numpy import ndarray -from .autorg import ( - RG_RESULT, - InsufficientDataError, - NoGuinierRegionError, -) -from .sasio import ( - load_scattering_data, - convert_inverse_angstrom_to_nanometer, -) -from .sas_argparser import GuinierParser - - -def set_logging_level(verbose_flag: int) -> None: - """ - Set logging level according to verbose flag of argparser - :param verbose_flag: int flag for logging level - """ - if verbose_flag == 1: - logging.root.setLevel(logging.INFO) - elif verbose_flag >= 2: - logging.root.setLevel(logging.DEBUG) - - -def collect_files(file_list: List[str]) -> List[Path]: - """ - Take file list from argparser and return list of paths - :param file_list: file list as returned by the argparser - :return: A list of Path objects which includes only existing files - """ - files = [Path(i) for i in file_list if Path(i).exists()] - if platform.system() == "Windows" and files == []: - files = list(Path.cwd().glob(file_list[0])) - files.sort() - return files - - -@contextmanager -def get_output_destination( - output_path: Optional[Path] = None, -) -> Generator[IO[str], None, None]: - """ - Return file or stdout object to write output to - :param output_path: None if output to stdout, else Path to outputfile - :return: opened file with write access or sys.stdout - """ - # pylint: disable=R1705 - if output_path is not None: - with open(output_path, "w") as destination: - yield destination - else: - yield sys.stdout - - -def get_linesep(output_destination: IO[str]) -> str: - """ - Get the appropriate linesep depending on the output destination. - :param output_destination: an IO object, e.g. an open file or stdout - :return: string with the correct linesep - """ - # pylint: disable=R1705 - if output_destination == sys.stdout: - return os_linesep - else: - return "\n" - - -def get_guinier_header( - linesep: str, output_format: Optional[str] = None -) -> str: - """Return appropriate header line for selected output format - :param output_format: output format from string parser - :param linesep: correct linesep for chosen destination - :return: a one-line string""" - # pylint: disable=R1705 - if output_format == "csv": - return ( - ",".join( - ( - "File", - "Rg", - "Rg StDev", - "I(0)", - "I(0) StDev", - "First point", - "Last point", - "Quality,Aggregated", - ) - ) - + linesep - ) - else: - return "" - - -def rg_result_to_output_line( - rg_result: RG_RESULT, - afile: Path, - linesep: str, - output_format: Optional[str] = None, -) -> str: - """Return result line formatted according to selected output format - :param rg_result: Result of an rg fit - :param afile: The name of the file that was processed - :param output_format: The chosen output format - :param linesep: correct linesep for chosen destination - :return: a one-line string including linesep""" - # pylint: disable=R1705 - if output_format == "csv": - return ( - ",".join( - [ - f"{afile}", - f"{rg_result.Rg:6.4f}", - f"{rg_result.sigma_Rg:6.4f}", - f"{rg_result.I0:6.4f}", - f"{rg_result.sigma_I0:6.4f}", - f"{rg_result.start_point:3}", - f"{rg_result.end_point:3}", - f"{rg_result.quality:6.4f}", - f"{rg_result.aggregated:6.4f}", - ] - ) - + linesep - ) - elif output_format == "ssv": - return ( - " ".join( - [ - f"{rg_result.Rg:6.4f}", - f"{rg_result.sigma_Rg:6.4f}", - f"{rg_result.I0:6.4f}", - f"{rg_result.sigma_I0:6.4f}", - f"{rg_result.start_point:3}", - f"{rg_result.end_point:3}", - f"{rg_result.quality:6.4f}", - f"{rg_result.aggregated:6.4f}", - f"{afile}", - ] - ) - + linesep - ) - else: - return f"{afile} {rg_result}{linesep}" - - -def run_guinier_fit( - fit_function: Callable[[ndarray], RG_RESULT], - parser: GuinierParser, - logger: logging.Logger, -) -> None: - """ - reads in the data, performs the guinier fit with a given algotithm and - creates the - :param fit_function : A Guinier fit function data -> RG_RESULT - :param parser: a function that returns the output of argparse.parse() - :param logger: a Logger - """ - args = parser.parse_args() - set_logging_level(args.verbose) - files = collect_files(args.file) - logger.debug("%s input files", len(files)) - - with get_output_destination(args.output) as output_destination: - linesep = get_linesep(output_destination) - - output_destination.write( - get_guinier_header( - linesep, - args.format, - ) - ) - - for afile in files: - logger.info("Processing %s", afile) - try: - data = load_scattering_data(afile) - except OSError: - logger.error("Unable to read file %s", afile) - except ValueError: - logger.error("Unable to parse file %s", afile) - else: - if args.unit == "Å": - data = convert_inverse_angstrom_to_nanometer(data) - try: - rg_result = fit_function(data) - except ( - InsufficientDataError, - NoGuinierRegionError, - ValueError, - IndexError, - ) as err: - sys.stderr.write( - f"{afile}, {err.__class__.__name__}: {err} {os_linesep}" - ) - else: - res = rg_result_to_output_line( - rg_result, - afile, - linesep, - args.format, - ) - output_destination.write(res) - output_destination.flush() diff --git a/freesas/invariants.py b/freesas/invariants.py deleted file mode 100644 index 76540ec..0000000 --- a/freesas/invariants.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -""" -This module is mainly about the calculation of the Rambo-Tainer invariant -described in: - -https://dx.doi.org/10.1038%2Fnature12070 - -Some formula taken from Putnam et al, 2007, Table 1 in the review -""" -__authors__ = ["Martha E. Brennich", "J. Kieffer"] -__license__ = "MIT" -__date__ = "10/06/2020" - -import logging -logger = logging.getLogger(__name__) -import numpy -from .collections import RT_RESULT - - -def extrapolate(data, guinier): - """Extrapolate SAS data according to the Guinier fit until q=0 - Uncertainties are extrapolated (linearly) from the Guinier region - - :param data: SAS data in q,I,dI format - :param guinier: result of a Guinier fit - :return: extrapolated SAS data - """ - - dq = data[1, 0] - data[0, 0] - qmin = data[guinier.start_point, 0] - - q_low = numpy.arange(0, qmin, dq) - # Extrapolate I from Guinier approximation: - I_low = guinier.I0 * numpy.exp(-(q_low**2 * guinier.Rg**2) / 3.0) - # Extrapolate dI from Guinier region: - range_ = slice(guinier.start_point, guinier.end_point+1) - slope, intercept = numpy.polyfit(data[range_, 0], data[range_, 2], deg=1) - dI_low = abs(q_low*slope + intercept) - # Now wrap-up - data_low = numpy.vstack((q_low, I_low, dI_low)).T - return numpy.concatenate((data_low, data[guinier.start_point:])) - - -def calc_Porod(data, guinier): - """Calculate the particle volume according to Porod's formula: - - V = 2*π²I₀²/(sum_q I(q)q² dq) - - Formula from Putnam's review, 2007, table 1 - Intensities are extrapolated to q=0 using Guinier fit. - - :param data: SAS data in q,I,dI format - :param Guinier: result of a Guinier fit (instance of RT_RESULT) - :return: Volume calculated according to Porrod's formula - """ - q, I, dI = extrapolate(data, guinier).T - - denom = numpy.trapz(I*q**2, q) - volume = 2*numpy.pi**2*guinier.I0 / denom - return volume - - -def calc_Vc(data, Rg, dRg, I0, dI0, imin): - """Calculates the Rambo-Tainer invariant Vc, including extrapolation to q=0 - - :param data: SAS data in q,I,dI format, cropped to maximal q that should be used for calculation (normally 2 nm-1) - :param Rg,dRg,I0,dI0: results from Guinier approximation/autorg - :param imin: minimal index of the Guinier range, below that index data will be extrapolated by the Guinier approximation - :returns: Vc and an error estimate based on non-correlated error propagation - """ - dq = data[1, 0] - data[0, 0] - qmin = data[imin, 0] - qlow = numpy.arange(0, qmin, dq) - - lowqint = numpy.trapz((qlow * I0 * numpy.exp(-(qlow * qlow * Rg * Rg) / 3.0)), qlow) - dlowqint = numpy.trapz(qlow * numpy.sqrt((numpy.exp(-(qlow * qlow * Rg * Rg) / 3.0) * dI0) ** 2 + ((I0 * 2.0 * (qlow * qlow) * Rg / 3.0) * numpy.exp(-(qlow * qlow * Rg * Rg) / 3.0) * dRg) ** 2), qlow) - vabs = numpy.trapz(data[imin:, 0] * data[imin:, 1], data[imin:, 0]) - dvabs = numpy.trapz(data[imin:, 0] * data[imin:, 2], data[imin:, 0]) - vc = I0 / (lowqint + vabs) - dvc = (dI0 / I0 + (dlowqint + dvabs) / (lowqint + vabs)) * vc - return (vc, dvc) - - -def calc_Rambo_Tainer(data, - guinier, qmax=2.0): - """calculates the invariants Vc and Qr from the Rambo & Tainer 2013 Paper, - also the the mass estimate based on Qr for proteins - - :param data: data in q,I,dI format, q in nm^-1 - :param guinier: RG_RESULT instance with result from the Guinier fit - :param qmax: maximum q-value for the calculation in nm^-1 - @return: dict with Vc, Qr and mass plus errors - """ - scale_prot = 1.0 / 0.1231 - power_prot = 1.0 - - imax = abs(data[:, 0] - qmax).argmin() - if (imax <= guinier.start_point) or (guinier.start_point < 0): # unlikely but can happened - logger.error("Guinier region start too late for Rambo_Tainer invariants calculation") - return None - vc = calc_Vc(data[:imax, :], guinier.Rg, guinier.sigma_Rg, guinier.I0, guinier.sigma_I0, guinier.start_point) - - qr = vc[0] ** 2 / (guinier.Rg) - mass = scale_prot * qr ** power_prot - - dqr = qr * (guinier.sigma_Rg / guinier.Rg + 2 * ((vc[1]) / (vc[0]))) - dmass = mass * dqr / qr - - return RT_RESULT(vc[0], vc[1], qr, dqr, mass, dmass) diff --git a/freesas/model.py b/freesas/model.py deleted file mode 100644 index fd367bc..0000000 --- a/freesas/model.py +++ /dev/null @@ -1,315 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 -from __future__ import print_function - -__author__ = "Guillaume" -__license__ = "MIT" -__copyright__ = "2015, ESRF" - -import os -from math import sqrt -import threading -import six -import numpy -try: - from . import _distance -except ImportError: - _distance = None -from . import transformations - - -def delta_expand(vec1, vec2): - """Create a 2d array with the difference vec1[i]-vec2[j] - - :param vec1, vec2: 1d-array - :return v1 - v2: difference for any element of v1 and v2 (i.e a 2D array) - """ - v1 = numpy.ascontiguousarray(vec1) - v2 = numpy.ascontiguousarray(vec2) - v1.shape = -1, 1 - v2.shape = 1, -1 - v1.strides = v1.strides[0], 0 - v2.strides = 0, v2.strides[-1] - return v1 - v2 - - -class SASModel: - """ - Tools for Dummy Atoms Model manipulation - """ - - def __init__(self, molecule=None): - """ - :param molecule: if str, name of a pdb file, else if 2d-array, coordinates of atoms of a molecule - """ - if isinstance(molecule, (six.text_type, six.binary_type)) and os.path.exists(molecule): - self.read(molecule) - else: - self.atoms = molecule if molecule is not None else [] # initial coordinates of each dummy atoms of the molecule, fourth column full of one for the transformation matrix - self.header = "" # header of the PDB file - self.rfactor = None - self.radius = 1.0 # unused at the moment - self.com = [] - self._fineness = None - self._Rg = None - self._Dmax = None - self.inertensor = [] - self.can_param = [] - self.enantiomer = None # symmetry used on the molecule - self._sem = threading.Semaphore() - - def __repr__(self): - return "SAS model with %i atoms" % len(self.atoms) - - def read(self, filename): - """ - Read the PDB file, - extract coordinates of each dummy atom, - extract the R-factor of the model, coordinates of each dummy atom and pdb file header. - - :param filename: name of the pdb file to read - """ - header = [] - atoms = [] - with open(filename) as fd: - for line in fd: - if line.startswith("ATOM"): - x = float(line[30:38]) - y = float(line[38:46]) - z = float(line[46:54]) - atoms.append([x, y, z]) - if line.startswith("REMARK 265 Final R-factor"): # very dependent of the pdb file format ! - self.rfactor = float(line[43:56]) - header.append(line) - self.header = header - atom3 = numpy.array(atoms) - self.atoms = numpy.append(atom3, numpy.ones((atom3.shape[0], 1), dtype="float"), axis=1) - - def save(self, filename): - """ - Save the position of each dummy atom in a PDB file. - - :param filename: name of the pdb file to write - """ - nr = 0 - self.atoms = numpy.delete(self.atoms, 3, 1) - with open(filename, "w") as pdbout: - for line in self.header: - if line.startswith("ATOM"): - if nr < self.atoms.shape[0]: - line = line[:30] + "%8.3f%8.3f%8.3f" % tuple(self.atoms[nr]) + line[54:] - else: - line = "" - nr += 1 - pdbout.write(line) - - def centroid(self): - """ - Calculate the position of the center of mass of the molecule. - - :return self.com: 1d array, coordinates of the center of mass of the molecule - """ - mol = self.atoms[:, 0:3] - self.com = mol.mean(axis=0) - return self.com - - def inertiatensor(self): - """ - calculate the inertia tensor of the protein - - :return self.inertensor: inertia tensor of the molecule - """ - if len(self.com) == 0: - self.com = self.centroid() - - mol = self.atoms[:, 0:3] - self.com - self.inertensor = numpy.empty((3, 3), dtype="float") - delta_kron = lambda i, j: 1 if i == j else 0 - for i in range(3): - for j in range(i, 3): - self.inertensor[i, j] = self.inertensor[j, i] = (delta_kron(i, j) * (mol ** 2).sum(axis=1) - (mol[:, i] * mol[:, j])).sum() / mol.shape[0] - return self.inertensor - - def canonical_translate(self): - """ - Calculate the translation matrix to translate the center of mass of the molecule on the origin of the base. - - :return trans: translation matrix - """ - if len(self.com) == 0: - self.com = self.centroid() - - trans = numpy.identity(4, dtype="float") - trans[0:3, 3] = -self.com - return trans - - def canonical_rotate(self): - """ - Calculate the rotation matrix to align inertia momentum of the molecule on principal axis. - - :return rot: rotation matrix det==1 - """ - if len(self.inertensor) == 0: - self.inertensor = self.inertiatensor() - - w, v = numpy.linalg.eigh(self.inertensor) - mat = v[:, w.argsort()] - - rot = numpy.zeros((4, 4), dtype="float") - rot[3, 3] = 1 - rot[:3, :3] = mat.T - - det = numpy.linalg.det(mat) - if det > 0: - self.enantiomer = [1, 1, 1] - else: - self.enantiomer = [-1, -1, -1] - mirror = numpy.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype="float") - rot = numpy.dot(mirror, rot) - - return rot - - def canonical_parameters(self): - """ - Save the 6 canonical parameters of the initial molecule: - x0, y0, z0, the position of the center of mass - phi, theta, psi, the three Euler angles of the canonical rotation (axis:x,y',z'') - """ - rot = self.canonical_rotate() - trans = self.canonical_translate() - - angles = transformations.euler_from_matrix(rot) - shift = transformations.translation_from_matrix(trans) - self.can_param = [shift[0], shift[1], shift[2], angles[0], angles[1], angles[2]] - - def calc_invariants(self, use_cython=True): - """ - Calculate the invariants of the structure: - * fineness, ie. average distance between an atoms and its nearest neighbor - * radius of gyration of the model - * diameter of the model - - :return invariants: 3-tuple containing (fineness, Rg, Dmax) - """ - if _distance and use_cython: - return _distance.calc_invariants(self.atoms) - - else: - size = self.atoms.shape[0] - D = delta_expand(self.atoms[:, 0], self.atoms[:, 0]) ** 2 + delta_expand(self.atoms[:, 1], self.atoms[:, 1]) ** 2 + delta_expand(self.atoms[:, 2], self.atoms[:, 2]) ** 2 - Rg = sqrt(D.sum() / 2.0) / size - Dmax = sqrt(D.max()) - d12 = (D.max() * numpy.eye(size) + D).min(axis=0).mean() - fineness = sqrt(d12) - return fineness, Rg, Dmax - - @property - def fineness(self): - if self._fineness is None: - with self._sem: - if self._fineness is None: - self._fineness, self._Rg, self._Dmax = self.calc_invariants() - return self._fineness - - @property - def Rg(self): - if self._Rg is None: - with self._sem: - if self._Rg is None: - self._fineness, self._Rg, self._Dmax = self.calc_invariants() - return self._Rg - - @property - def Dmax(self): - if self._Dmax is None: - with self._sem: - if self._Dmax is None: - self._fineness, self._Rg, self._Dmax = self.calc_invariants() - return self._Dmax - - def dist(self, other, molecule1, molecule2, use_cython=True): - """ - Calculate the distance with another model. - - :param self,other: two SASModel - :param molecule1: 2d array of the position of each atom of the first molecule - :param molecule2: 2d array of the position of each atom of the second molecule - :return D: NSD between the 2 molecules, in their position molecule1 and molecule2 - """ - if _distance and use_cython: - return _distance.calc_distance(molecule1, molecule2, self.fineness, other.fineness) - - else: - mol1 = molecule1[:, 0:3] - mol2 = molecule2[:, 0:3] - - mol1x = mol1[:, 0] - mol1y = mol1[:, 1] - mol1z = mol1[:, 2] - mol1x.shape = mol1.shape[0], 1 - mol1y.shape = mol1.shape[0], 1 - mol1z.shape = mol1.shape[0], 1 - - mol2x = mol2[:, 0] - mol2y = mol2[:, 1] - mol2z = mol2[:, 2] - mol2x.shape = mol2.shape[0], 1 - mol2y.shape = mol2.shape[0], 1 - mol2z.shape = mol2.shape[0], 1 - - d2 = delta_expand(mol1x, mol2x) ** 2 + delta_expand(mol1y, mol2y) ** 2 + delta_expand(mol1z, mol2z) ** 2 - - D = (0.5 * ((1. / ((mol1.shape[0]) * other.fineness * other.fineness)) * (d2.min(axis=1).sum()) + (1. / ((mol2.shape[0]) * self.fineness * self.fineness)) * (d2.min(axis=0)).sum())) ** 0.5 - return D - - def transform(self, param, symmetry, reverse=None): - """ - Calculate the new coordinates of each dummy atoms of the molecule after a transformation defined by six parameters and a symmetry - - :param param: 6 parameters of transformation (3 coordinates of translation, 3 Euler angles) - :param symmetry: list of three constants which define a symmetry to apply - :return mol: 2d array, coordinates after transformation - """ - mol = self.atoms - - sym = numpy.array([[symmetry[0], 0, 0, 0], [0, symmetry[1], 0, 0], [0, 0, symmetry[2], 0], [0, 0, 0, 1]], dtype="float") - if not reverse: - vect = numpy.array([param[0:3]]) - angles = (param[3:6]) - - translat1 = transformations.translation_matrix(vect) - rotation = transformations.euler_matrix(*angles) - translat2 = numpy.dot(numpy.dot(rotation, translat1), rotation.T) - transformation = numpy.dot(translat2, rotation) - - else: - vect = -numpy.array([param[0:3]]) - angles = (-param[5], -param[4], -param[3]) - - translat = transformations.translation_matrix(vect) - rotation = transformations.euler_matrix(*angles, axes="szyx") - transformation = numpy.dot(translat, rotation) - - mol = numpy.dot(transformation, mol.T) - mol = numpy.dot(sym, mol).T - return mol - - def dist_after_movement(self, param, other, symmetry): - """ - The first molecule, molref, is put on its canonical position. - The second one, mol2, is moved following the transformation selected - - :param param: list of 6 parameters for the transformation, 3 coordinates of translation and 3 Euler angles - :param symmetry: list of three constants which define a symmetry to apply - :return distance: the NSD between the first molecule and the second one after its movement - """ - if not self.can_param: - self.canonical_parameters() - - can_param1 = self.can_param - molref_can = self.transform(can_param1, [1, 1, 1]) # molecule reference put on its canonical position - - mol2_moved = other.transform(param, symmetry) # movement selected applied to mol2 - distance = self.dist(other, molref_can, mol2_moved) - - return distance diff --git a/freesas/plot.py b/freesas/plot.py deleted file mode 100644 index 3509274..0000000 --- a/freesas/plot.py +++ /dev/null @@ -1,562 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Functions to generating graphs related to SAS. -""" - -__authors__ = ["Jerome Kieffer"] -__license__ = "MIT" -__copyright__ = "2020, ESRF" -__date__ = "15/09/2022" - -import logging - -logger = logging.getLogger(__name__) -import numpy -from matplotlib.pyplot import subplots - - -def scatter_plot( - data, - guinier=None, - ift=None, - filename=None, - img_format="svg", - unit="nm", - title="Scattering curve", - ax=None, - labelsize=None, - fontsize=None, -): - """ - Generate a scattering plot I = f(q) in semi_log_y. - - :param data: data read from an ASCII file, 3 column (q,I,err) - :param filename: name of the file where the cuve should be saved - :param img_format: image format - :param unit: Unit name for Rg and 1/q - :param guinier: output of autoRg - :param ift: converged instance of BIFT (output of auto_bift) - :param ax: subplot where the plot shall go in - :return: the matplotlib figure - """ - label_exp = "Experimental data" - label_guinier = "Guinier region" - label_ift = "BIFT extraplolated" - exp_color = "blue" - err_color = "lightblue" - guinier_color = "limegreen" - ift_color = "crimson" - assert data.ndim == 2 - assert data.shape[1] >= 2 - q = data.T[0] - I = data.T[1] - try: - err = data.T[2] - except: - err = None - if ax: - fig = ax.figure - else: - fig, ax = subplots() - - # Extend q to zero - delta_q = (q[-1] - q[0]) / (len(q) - 1) - extra_q = int(q[0] / delta_q) - first = q[0] - extra_q * delta_q - q_ext = numpy.linspace(first, q[-1], extra_q + len(q)) - - if guinier is None: - if ift is not None: - # best = ift.calc_stats()[0] - I0 = guinier.I0 - rg = guinier.rg - first_point = ift.high_start - last_point = ift.high.stop - else: - rg = I0 = first_point = last_point = None - else: - I0 = guinier.I0 - rg = guinier.Rg - first_point = guinier.start_point - last_point = guinier.end_point - - if (rg is None) and (ift is None): - if err is not None: - ax.errorbar( - q, - I, - err, - label=label_exp, - capsize=0, - color=exp_color, - ecolor=err_color, - ) - else: - ax.plot(q, I, label=label_exp, color="blue") - else: - q_guinier = q[first_point:last_point] - I_guinier = I0 * numpy.exp(-((q_guinier * rg) ** 2) / 3) - if err is not None: - ax.errorbar( - q, - I, - err, - label=label_exp, - capsize=0, - color=exp_color, - ecolor=err_color, - alpha=0.5, - ) - else: - ax.plot(q, I, label=label_exp, color=exp_color, alpha=0.5) - label_guinier += ": $R_g=$%.2f %s, $I_0=$%.2f" % (rg, unit, I0) - ax.plot( - q_guinier, - I_guinier, - label=label_guinier, - color=guinier_color, - linewidth=5, - ) - - if ift: - from ._bift import BIFT, StatsResult - - if isinstance(ift, BIFT): - stats = ift.calc_stats() - elif isinstance(ift, StatsResult): - stats = ift - else: - raise TypeError("ift is expected to be a BIFT object") - - r = stats.radius - T = numpy.outer(q_ext, r / numpy.pi) - T = (4 * numpy.pi * (r[-1] - r[0]) / (len(r) - 1)) * numpy.sinc(T) - p = stats.density_avg - label_ift += ": $D_{max}=$%.2f %s,\n $R_g=$%.2f %s, $I_0=$%.2f" % ( - stats.Dmax_avg, - unit, - stats.Rg_avg, - unit, - stats.I0_avg, - ) - ax.plot(q_ext, T.dot(p), label=label_ift, color=ift_color) - - ax.set_ylabel("$I(q)$ (log scale)", fontsize=fontsize) - ax.set_xlabel("$q$ (%s$^{-1}$)" % unit, fontsize=fontsize) - ax.set_title(title) - ax.set_yscale("log") - # ax.set_ylim(ymin=I.min() * 10, top=I.max() * 1.1) - - # Re-order labels ... - crv, lab = ax.get_legend_handles_labels() - ordered_lab = [] - ordered_crv = [] - for l in [label_exp, label_guinier, label_ift]: - try: - idx = lab.index(l) - except: - continue - ordered_lab.append(lab[idx]) - ordered_crv.append(crv[idx]) - ax.legend(ordered_crv, ordered_lab, loc=3) - ax.tick_params(axis="x", labelsize=labelsize) - ax.tick_params(axis="y", labelsize=labelsize) - if filename: - if img_format: - fig.savefig(filename, format=img_format) - else: - fig.savefig(filename) - return fig - - -def kratky_plot( - data, - guinier, - filename=None, - img_format="svg", - unit="nm", - title="Dimensionless Kratky plot", - ax=None, - labelsize=None, - fontsize=None, -): - """ - Generate a Kratky plot q²Rg²I/I₀ = f(q·Rg) - - :param data: data read from an ASCII file, 3 column (q,I,err) - :param guinier: output of autoRg - :param filename: name of the file where the cuve should be saved - :param img_format: image format - :param unit: Unit name for Rg and 1/q - :param ax: subplot where the plot shall go in - :return: the matplotlib figure - """ - label = "Experimental data" - assert data.ndim == 2 - assert data.shape[1] >= 2 - q = data.T[0] - I = data.T[1] - try: - err = data.T[2] - except: - err = None - if ax: - fig = ax.figure - else: - fig, ax = subplots() - Rg = guinier.Rg - I0 = guinier.I0 - - xdata = q * Rg - ydata = xdata * xdata * I / I0 - if err is not None: - dy = xdata * xdata * err / I0 - dplot = ax.errorbar( - xdata, - ydata, - dy, - label=label, - capsize=0, - color="blue", - ecolor="lightblue", - ) - else: - dplot = ax.plot(xdata, ydata, label=label, color="blue") - ax.set_ylabel("$(qR_{g})^2 I/I_{0}$", fontsize=fontsize) - ax.set_xlabel("$qR_{g}$", fontsize=fontsize) - ax.legend(loc=1) - - ax.hlines( - 3.0 * numpy.exp(-1), - xmin=-0.05, - xmax=max(xdata), - color="0.75", - linewidth=1.0, - ) - ax.vlines( - numpy.sqrt(3.0), - ymin=-0.01, - ymax=max(ydata), - color="0.75", - linewidth=1.0, - ) - ax.set_xlim(left=-0.05, right=8.5) - ax.set_ylim(bottom=-0.01, top=(min(3.5, max(ydata)))) - ax.set_title(title) - # ax.legend([dplot[0]], [dplot[0].get_label()], loc=0) - ax.legend(loc=0) - ax.tick_params(axis="x", labelsize=labelsize) - ax.tick_params(axis="y", labelsize=labelsize) - - if filename: - if img_format: - fig.savefig(filename, format=img_format) - else: - fig.savefig(filename) - return fig - - -def guinier_plot( - data, - guinier, - filename=None, - img_format="png", - unit="nm", - ax=None, - labelsize=None, - fontsize=None, -): - """ - Generate a guinier plot: ln(I) = f(q²) - - :param data: data read from an ASCII file, 3 column (q,I,err) - :param guinier: A RG_RESULT object from AutoRg - :param filename: name of the file where the cuve should be saved - :param img_format: image format - :param: ax: subplot where to plot in - :return: the matplotlib figure - """ - assert data.ndim == 2 - assert data.shape[1] >= 2 - - q, I, err = data.T[:3] - - mask = (I > 0) & numpy.isfinite(I) & (q > 0) & numpy.isfinite(q) - if err is not None: - mask &= (err > 0.0) & numpy.isfinite(err) - mask = mask.astype(bool) - Rg = guinier.Rg - I0 = guinier.I0 - first_point = guinier.start_point - last_point = guinier.end_point - intercept = numpy.log(I0) - slope = -Rg * Rg / 3.0 - end = numpy.where(q > 1.5 / Rg)[0][0] - mask[end:] = False - - q2 = q[mask] ** 2 - logI = numpy.log(I[mask]) - - if ax: - fig = ax.figure - else: - fig, ax = subplots(figsize=(12, 10)) - if err is not None: - dlogI = err[mask] / logI - ax.errorbar( - q2, - logI, - dlogI, - label="Experimental curve", - capsize=0, - color="blue", - ecolor="lightblue", - alpha=0.5, - ) - else: - ax.plot( - q2[mask], - logI[mask], - label="Experimental curve", - color="blue", - alpha=0.5, - ) - # ax.plot(q2[first_point:last_point], logI[first_point:last_point], marker='D', markersize=5, label="guinier region") - xmin = q[first_point] ** 2 - xmax = q[last_point] ** 2 - ymax = numpy.log(I[first_point]) - ymin = numpy.log(I[last_point]) - dy = (ymax - ymin) / 2.0 - ax.vlines(xmin, ymin=ymin, ymax=ymax + dy, color="0.75", linewidth=1.0) - ax.vlines( - xmax, ymin=ymin - dy, ymax=ymin + dy, color="0.75", linewidth=1.0 - ) - ax.annotate( - "$(qR_{g})_{min}$=%.1f" % (Rg * q[first_point]), - (xmin, ymax + dy), - xytext=None, - xycoords="data", - textcoords="data", - ) - ax.annotate( - "$(qR_{g})_{max}$=%.1f" % (Rg * q[last_point]), - (xmax, ymin + dy), - xytext=None, - xycoords="data", - textcoords="data", - ) - ax.annotate( - "Guinier region", - (xmin, ymin - dy), - xytext=None, - xycoords="data", - textcoords="data", - ) - ax.plot( - q2[:end], - intercept + slope * q2[:end], - label="ln[$I(q)$] = %.2f %.2f * $q^2$" % (intercept, slope), - color="crimson", - ) - ax.set_ylabel("ln[$I(q)$]", fontsize=fontsize) - ax.set_xlabel("$q^2$ (%s$^{-2}$)" % unit, fontsize=fontsize) - ax.set_title("Guinier plot: $R_{g}=$%.2f %s $I_{0}=$%.2f" % (Rg, unit, I0)) - ax.legend() - ax.tick_params(axis="x", labelsize=labelsize) - ax.tick_params(axis="y", labelsize=labelsize) - - if filename: - if img_format: - fig.savefig(filename, format=img_format) - else: - fig.savefig(filename) - return fig - - -def density_plot( - ift, - filename=None, - img_format="png", - unit="nm", - ax=None, - labelsize=None, - fontsize=None, -): - """ - Generate a density plot p(r) - - :param ift: An IFT result comming out of BIFT - :param filename: name of the file where the cuve should be saved - :param img_format: image image format - :param ax: subplotib where to plot in - :return: the matplotlib figure - """ - if ax: - fig = ax.figure - else: - fig, ax = subplots(figsize=(12, 10)) - - from ._bift import BIFT, StatsResult - - if isinstance(ift, BIFT): - stats = ift.calc_stats() - elif isinstance(ift, StatsResult): - stats = ift - else: - raise TypeError("ift is expected to be a BIFT object") - - ax.errorbar( - ift.radius, - ift.density_avg, - ift.density_std, - label="BIFT: χ$_{r}^{2}=$%.2f\n $D_{max}=$%.2f %s\n $R_{g}=$%.2f %s\n $I_{0}=$%.2f" - % ( - stats.chi2r_avg, - stats.Dmax_avg, - unit, - stats.Rg_avg, - unit, - stats.I0_avg, - ), - capsize=0, - color="blue", - ecolor="lightblue", - ) - ax.set_ylabel("$p(r)$", fontsize=fontsize) - ax.set_xlabel("$r$ (%s)" % unit, fontsize=fontsize) - ax.set_title("Pair distribution function") - ax.legend() - ax.tick_params(axis="x", labelsize=labelsize) - ax.tick_params(axis="y", labelsize=labelsize) - - if filename: - if img_format: - fig.savefig(filename, format=img_format) - else: - fig.savefig(filename) - return fig - - -def plot_all( - data, - filename=None, - img_format=None, - unit="nm", - labelsize=None, - fontsize=None, -): - from . import bift, autorg - - try: - guinier = autorg.autoRg(data) - except autorg.InsufficientDataError: - raise - logger.debug(guinier) - try: - bo = bift.auto_bift(data, npt=100, scan_size=11, Dmax_over_Rg=3) - except ( - autorg.InsufficientDataError, - autorg.NoGuinierRegionError, - ValueError, - ): - raise - else: - ift = bo.calc_stats() - logger.debug(ift) - fig, ax = subplots(2, 2, figsize=(12, 10)) - scatter_plot( - data, - guinier=guinier, - ift=ift, - ax=ax[0, 0], - unit=unit, - labelsize=labelsize, - fontsize=fontsize, - ) - guinier_plot( - data, - guinier, - filename=None, - img_format=None, - unit=unit, - ax=ax[0, 1], - labelsize=labelsize, - fontsize=fontsize, - ) - kratky_plot( - data, - guinier, - filename=None, - img_format=None, - unit=unit, - ax=ax[1, 0], - labelsize=labelsize, - fontsize=fontsize, - ) - density_plot( - ift, - filename=None, - img_format=None, - unit=unit, - ax=ax[1, 1], - labelsize=labelsize, - fontsize=fontsize, - ) - if filename is not None: - if img_format: - fig.savefig(filename, format=img_format) - else: - fig.savefig(filename) - return fig - -def hplc_plot(hplc, - fractions = None, - title="Chromatogram", - filename=None, - img_format="png", - ax=None, - labelsize=None, - fontsize=None,): - """ - Generate an HPLC plot I=f(t) - - :param hplc: stack of diffraction data - :param fractions: list of 2tuple with first and last ndex if each fraction - :param filename: name of the file where the cuve should be saved - :param img_format: image image format - :param ax: subplotib where to plot in - :return: the matplotlib figure - """ - if ax: - fig = ax.figure - else: - fig, ax = subplots(figsize=(12, 10)) - data = [sum(i) if hasattr(i, '__iter__') else i for i in hplc] - ax.plot(data, label = "Chromatogram") - ax.set_xlabel("Elution (frame index)", fontsize=fontsize) - ax.set_ylabel("Summed intensities", fontsize=fontsize) - ax.set_title(title) - - ax.tick_params(axis="x", labelsize=labelsize) - ax.tick_params(axis="y", labelsize=labelsize) - if fractions is not None and len(fractions): - fractions.sort() - l = len(data) - idx = list(range(l)) - for start,stop in fractions: - start = int(min(l-1, max(0, start))) - stop = int(min(l-1, max(0, stop))) - ax.plot(idx[start:stop+1], - data[start:stop+1], - label=f"Fraction {start}-{stop}", - linewidth=10, - alpha=0.5) - ax.legend() - - if filename: - if img_format: - fig.savefig(filename, format=img_format) - else: - fig.savefig(filename) - return fig - diff --git a/freesas/sas_argparser.py b/freesas/sas_argparser.py deleted file mode 100644 index 50b2ef8..0000000 --- a/freesas/sas_argparser.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Generalized arg parser for freeSAS apps to ensure unified command line API. -""" - -__author__ = "Martha Brennich" -__license__ = "MIT" -__copyright__ = "2020, ESRF" -__date__ = "09/08/2020" - -import argparse -from pathlib import Path -from freesas import dated_version as freesas_version - - -def parse_unit(unit_input: str) -> str: - """ - Parser for sloppy acceptance of unit flags. - Current rules: - "A" ➔ "Å" - :param unit_input: unit flag as provided by the user - :return: cast of user input to known flag if sloppy rule defined, - else user input. - """ - if unit_input == "A": # pylint: disable=R1705 - return "Å" - else: - return unit_input - - -class SASParser: - - """ - Wrapper class for argparse ArgumentParser that provides predefined argument. - """ - - usage = "" - - def __init__(self, prog: str, description: str, epilog: str, **kwargs): - """ - Create parser argparse ArgumentParser - - standardized usage text - - standardized verion text - - verbose and version args added by default - - :param prog: name of the executable - :param description: description param of argparse ArgumentParser - :param epilog: epilog param of argparse ArgumentParser - :param kwargs: additional kwargs for argparse ArgumentParser - """ - - self.usage = "%s [OPTIONS] FILES " % (prog) - version = "%s version %s from %s" % ( - prog, - freesas_version.version, - freesas_version.date, - ) - - self.parser = argparse.ArgumentParser( - usage=self.usage, description=description, epilog=epilog, **kwargs - ) - self.add_argument( - "-v", - "--verbose", - default=0, - help="switch to verbose mode", - action="count", - ) - self.add_argument("-V", "--version", action="version", version=version) - - def parse_args(self, *args, **kwargs): - """ Wrapper for argparse parse_args() """ - return self.parser.parse_args(*args, **kwargs) - - def add_argument(self, *args, **kwargs): - """ Wrapper for argparse add_argument() """ - self.parser.add_argument(*args, **kwargs) - - def add_file_argument(self, help_text: str): - """ - Add positional file argument. - - :param help_text: specific help text to be displayed - """ - self.add_argument("file", metavar="FILE", nargs="+", help=help_text) - - def add_q_unit_argument(self): - """ - Add default argument for selecting length unit of input data - between Å and nm. nm is default. - """ - self.add_argument( - "-u", - "--unit", - action="store", - choices=["nm", "Å", "A"], - help="Unit for q: inverse nm or Ångstrom?", - default="nm", - type=parse_unit, - ) - - def add_output_filename_argument(self): - """ Add default argument for specifying output format. """ - self.add_argument( - "-o", - "--output", - action="store", - help="Output filename", - default=None, - type=Path, - ) - - def add_output_data_format(self, *formats: str, default: str = None): - """ Add default argument for specifying output format. """ - help_string = "Output format: " + ", ".join(formats) - self.add_argument( - "-f", - "--format", - action="store", - help=help_string, - default=default, - type=str, - ) - - -class GuinierParser: - """ - Wrapper class for argparse ArgumentParser that provides predefined - arguments for auto_rg like programs. - """ - - usage = "" - - def __init__(self, prog: str, description: str, epilog: str, **kwargs): - """ - Create parser argparse ArgumentParser with argument - - standardized usage text - - standardized version text - - verbose and version args added by default - - :param prog: name of the executable - :param description: description param of argparse ArgumentParser - :param epilog: epilog param of argparse ArgumentParser - :param kwargs: additional kwargs for argparse ArgumentParser - """ - - file_help_text = "dat files of the scattering curves" - self.parser = SASParser( - prog=prog, description=description, epilog=epilog, **kwargs - ) - self.parser.add_file_argument(help_text=file_help_text) - self.parser.add_output_filename_argument() - self.parser.add_output_data_format( - "native", "csv", "ssf", default="native" - ) - self.parser.add_q_unit_argument() - self.usage = self.parser.usage - - def parse_args(self, *args, **kwargs): - """ Wrapper for SASParser parse_args() """ - return self.parser.parse_args(*args, **kwargs) - - def add_argument(self, *args, **kwargs): - """ Wrapper for SASParser add_argument() """ - self.parser.add_argument(*args, **kwargs) diff --git a/freesas/sasio.py b/freesas/sasio.py deleted file mode 100644 index cb7084a..0000000 --- a/freesas/sasio.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Project: FreeSAS -# https://github.com/kif/freesas -# -# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France -# -# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) -# - -""" -Contains helper functions for loading SAS data from differents sources. -""" -__authors__ = ["Martha Brennich"] -__contact__ = "martha.brennich@googlemail.com" -__license__ = "MIT" -__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France" -__date__ = "19/09/2022" -__status__ = "development" -__docformat__ = "restructuredtext" - -import io -from typing import List, Union -from os import PathLike -from numpy import loadtxt, array, ndarray - -PathType = Union[PathLike, str, bytes, io.StringIO, io.BytesIO] - - -def load_scattering_data(filename: PathType) -> ndarray: - """ - Load scattering data q, I, err into a numpy array. - - :param filename: ASCII file, 3 column (q,I,err) - :return: numpy array with 3 column (q,I,err) - """ - try: - data = loadtxt(filename) - except OSError as err: - raise OSError("File could not be read.") - except ValueError as err: - text = None - if isinstance(filename, (io.StringIO, io.BytesIO)): - filename.seek(0) - text = filename.readlines() - else: - try: - with open(filename) as data_file: - text = data_file.readlines() - except OSError: - raise OSError("File could not be read.") - if text is not None: - try: - data = parse_ascii_data(text, number_of_columns=3) - except ValueError: - raise ValueError( - "File does not seem to be " "in the format q, I, err. " - ) - return data - - -def parse_ascii_data( - input_file_text: List[str], number_of_columns: int -) -> ndarray: - """ - Parse data from an ascii file into an N column numpy array - - :param input_file_text: List containing one line of input data per element - :param number_of_columns: Expected number of lines in the data file - :return: numpy array with 3 column (q,I,err) - """ - data = [] - for line in input_file_text: - split = line.split() - if len(split) == number_of_columns: - try: - data.append([float(x) for x in split]) - except ValueError as err: - if "could not convert string to float" in err.args[0]: - pass - else: - raise - if data == []: - raise ValueError - return array(data) - - -def convert_inverse_angstrom_to_nanometer( - data_in_inverse_angstrom: ndarray, -) -> ndarray: - """ - Convert data with q in 1/Å to 1/nm. - - :param data_in_inverse_angstrom: numpy array in format - (q_in_inverse_Angstrom,I,err) - :return: numpy array with 3 column (q_in_inverse_nm,I,err) - """ - q_in_angstrom, intensity, err = data_in_inverse_angstrom.T - return array([q_in_angstrom * 10.0, intensity, err]).T diff --git a/freesas/transformations.py b/freesas/transformations.py deleted file mode 100644 index 7eea6a0..0000000 --- a/freesas/transformations.py +++ /dev/null @@ -1,1918 +0,0 @@ -# -*- coding: utf-8 -*- -# transformations.py - -# Copyright (c) 2006-2015, Christoph Gohlke -# Copyright (c) 2006-2015, The Regents of the University of California -# Produced at the Laboratory for Fluorescence Dynamics -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holders nor the names of any -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -"""Homogeneous Transformation Matrices and Quaternions. - -A library for calculating 4x4 matrices for translating, rotating, reflecting, -scaling, shearing, projecting, orthogonalizing, and superimposing arrays of -3D homogeneous coordinates as well as for converting between rotation matrices, -Euler angles, and quaternions. Also includes an Arcball control object and -functions to decompose transformation matrices. - -:Author: - `Christoph Gohlke `_ - -:Organization: - Laboratory for Fluorescence Dynamics, University of California, Irvine - -:Version: 2015.03.19 - -Requirements ------------- -* `CPython 2.7 or 3.4 `_ -* `Numpy 1.9 `_ -* `Transformations.c 2015.03.19 `_ - (recommended for speedup of some functions) - -Notes ------ -The API is not stable yet and is expected to change between revisions. - -This Python code is not optimized for speed. Refer to the transformations.c -module for a faster implementation of some functions. - -Documentation in HTML format can be generated with epydoc. - -Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using -numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using -numpy.dot(M, v) for shape (4, \*) column vectors, respectively -numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points"). - -This module follows the "column vectors on the right" and "row major storage" -(C contiguous) conventions. The translation components are in the right column -of the transformation matrix, i.e. M[:3, 3]. -The transpose of the transformation matrices may have to be used to interface -with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16]. - -Calculations are carried out with numpy.float64 precision. - -Vector, point, quaternion, and matrix function arguments are expected to be -"array like", i.e. tuple, list, or numpy arrays. - -Return types are numpy arrays unless specified otherwise. - -Angles are in radians unless specified otherwise. - -Quaternions w+ix+jy+kz are represented as [w, x, y, z]. - -A triple of Euler angles can be applied/interpreted in 24 ways, which can -be specified using a 4 character string or encoded 4-tuple: - - *Axes 4-string*: e.g. 'sxyz' or 'ryxy' - - - first character : rotations are applied to 's'tatic or 'r'otating frame - - remaining characters : successive rotation axis 'x', 'y', or 'z' - - *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) - - - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. - - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed - by 'z', or 'z' is followed by 'x'. Otherwise odd (1). - - repetition : first and last axis are same (1) or different (0). - - frame : rotations are applied to static (0) or rotating (1) frame. - -Other Python packages and modules for 3D transformations and quaternions: - -* `Transforms3d `_ - includes most code of this module. -* `Blender.mathutils `_ -* `numpy-dtypes `_ - -References ----------- -(1) Matrices and transformations. Ronald Goldman. - In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. -(2) More matrices and transformations: shear and pseudo-perspective. - Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(3) Decomposing a matrix into simple transformations. Spencer Thomas. - In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. -(4) Recovering the data from the transformation matrix. Ronald Goldman. - In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. -(5) Euler angle conversion. Ken Shoemake. - In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. -(6) Arcball rotation control. Ken Shoemake. - In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. -(7) Representing attitude: Euler angles, unit quaternions, and rotation - vectors. James Diebel. 2006. -(8) A discussion of the solution for the best rotation to relate two sets - of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. -(9) Closed-form solution of absolute orientation using unit quaternions. - BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642. -(10) Quaternions. Ken Shoemake. - http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf -(11) From quaternion to matrix and back. JMP van Waveren. 2005. - http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm -(12) Uniform random rotations. Ken Shoemake. - In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. -(13) Quaternion in molecular modeling. CFF Karney. - J Mol Graph Mod, 25(5):595-604 -(14) New method for extracting the quaternion from a rotation matrix. - Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087. -(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann. - Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130. -(16) Column Vectors vs. Row Vectors. - http://steve.hollasch.net/cgindex/math/matrix/column-vec.html - -Examples --------- ->>> alpha, beta, gamma = 0.123, -1.234, 2.345 ->>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] ->>> I = identity_matrix() ->>> Rx = rotation_matrix(alpha, xaxis) ->>> Ry = rotation_matrix(beta, yaxis) ->>> Rz = rotation_matrix(gamma, zaxis) ->>> R = concatenate_matrices(Rx, Ry, Rz) ->>> euler = euler_from_matrix(R, 'rxyz') ->>> numpy.allclose([alpha, beta, gamma], euler) -True ->>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') ->>> is_same_transform(R, Re) -True ->>> al, be, ga = euler_from_matrix(Re, 'rxyz') ->>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) -True ->>> qx = quaternion_about_axis(alpha, xaxis) ->>> qy = quaternion_about_axis(beta, yaxis) ->>> qz = quaternion_about_axis(gamma, zaxis) ->>> q = quaternion_multiply(qx, qy) ->>> q = quaternion_multiply(q, qz) ->>> Rq = quaternion_matrix(q) ->>> is_same_transform(R, Rq) -True ->>> S = scale_matrix(1.23, origin) ->>> T = translation_matrix([1, 2, 3]) ->>> Z = shear_matrix(beta, xaxis, origin, zaxis) ->>> R = random_rotation_matrix(numpy.random.rand(3)) ->>> M = concatenate_matrices(T, R, Z, S) ->>> scale, shear, angles, trans, persp = decompose_matrix(M) ->>> numpy.allclose(scale, 1.23) -True ->>> numpy.allclose(trans, [1, 2, 3]) -True ->>> numpy.allclose(shear, [0, math.tan(beta), 0]) -True ->>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) -True ->>> M1 = compose_matrix(scale, shear, angles, trans, persp) ->>> is_same_transform(M, M1) -True ->>> v0, v1 = random_vector(3), random_vector(3) ->>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1)) ->>> v2 = numpy.dot(v0, M[:3,:3].T) ->>> numpy.allclose(unit_vector(v1), unit_vector(v2)) -True - -""" - -from __future__ import division, print_function - -import math - -import numpy - -__version__ = '2015.03.19' -__docformat__ = 'restructuredtext en' -__all__ = () - - -def identity_matrix(): - """Return 4x4 identity/unit matrix. - - >>> I = identity_matrix() - >>> numpy.allclose(I, numpy.dot(I, I)) - True - >>> numpy.sum(I), numpy.trace(I) - (4.0, 4.0) - >>> numpy.allclose(I, numpy.identity(4)) - True - - """ - return numpy.identity(4) - - -def translation_matrix(direction): - """Return matrix to translate by direction vector. - - >>> v = numpy.random.random(3) - 0.5 - >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) - True - - """ - M = numpy.identity(4) - M[:3, 3] = direction[:3] - return M - - -def translation_from_matrix(matrix): - """Return translation vector from translation matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = translation_from_matrix(translation_matrix(v0)) - >>> numpy.allclose(v0, v1) - True - - """ - return numpy.array(matrix, copy=False)[:3, 3].copy() - - -def reflection_matrix(point, normal): - """Return matrix to mirror at plane defined by point and normal vector. - - >>> v0 = numpy.random.random(4) - 0.5 - >>> v0[3] = 1. - >>> v1 = numpy.random.random(3) - 0.5 - >>> R = reflection_matrix(v0, v1) - >>> numpy.allclose(2, numpy.trace(R)) - True - >>> numpy.allclose(v0, numpy.dot(R, v0)) - True - >>> v2 = v0.copy() - >>> v2[:3] += v1 - >>> v3 = v0.copy() - >>> v2[:3] -= v1 - >>> numpy.allclose(v2, numpy.dot(R, v3)) - True - - """ - normal = unit_vector(normal[:3]) - M = numpy.identity(4) - M[:3, :3] -= 2.0 * numpy.outer(normal, normal) - M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal - return M - - -def reflection_from_matrix(matrix): - """Return mirror plane point and normal vector from reflection matrix. - - >>> v0 = numpy.random.random(3) - 0.5 - >>> v1 = numpy.random.random(3) - 0.5 - >>> M0 = reflection_matrix(v0, v1) - >>> point, normal = reflection_from_matrix(M0) - >>> M1 = reflection_matrix(point, normal) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - # normal: unit eigenvector corresponding to eigenvalue -1 - w, V = numpy.linalg.eig(M[:3, :3]) - i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue -1") - normal = numpy.real(V[:, i[0]]).squeeze() - # point: any unit eigenvector corresponding to eigenvalue 1 - w, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return point, normal - - -def rotation_matrix(angle, direction, point=None): - """Return matrix to rotate about axis defined by point and direction. - - >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0]) - >>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) - True - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) - >>> is_same_transform(R0, R1) - True - >>> R0 = rotation_matrix(angle, direc, point) - >>> R1 = rotation_matrix(-angle, -direc, point) - >>> is_same_transform(R0, R1) - True - >>> I = numpy.identity(4, numpy.float64) - >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) - True - >>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2, - ... direc, point))) - True - - """ - sina = math.sin(angle) - cosa = math.cos(angle) - direction = unit_vector(direction[:3]) - # rotation matrix around unit vector - R = numpy.diag([cosa, cosa, cosa]) - R += numpy.outer(direction, direction) * (1.0 - cosa) - direction *= sina - R += numpy.array([[ 0.0, -direction[2], direction[1]], - [ direction[2], 0.0, -direction[0]], - [-direction[1], direction[0], 0.0]]) - M = numpy.identity(4) - M[:3, :3] = R - if point is not None: - # rotation not around origin - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - M[:3, 3] = point - numpy.dot(R, point) - return M - - -def rotation_from_matrix(matrix): - """Return rotation angle and axis from rotation matrix. - - >>> angle = (random.random() - 0.5) * (2*math.pi) - >>> direc = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> R0 = rotation_matrix(angle, direc, point) - >>> angle, direc, point = rotation_from_matrix(R0) - >>> R1 = rotation_matrix(angle, direc, point) - >>> is_same_transform(R0, R1) - True - - """ - R = numpy.array(matrix, dtype=numpy.float64, copy=False) - R33 = R[:3, :3] - # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 - w, W = numpy.linalg.eig(R33.T) - i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - direction = numpy.real(W[:, i[-1]]).squeeze() - # point: unit eigenvector of R33 corresponding to eigenvalue of 1 - w, Q = numpy.linalg.eig(R) - i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no unit eigenvector corresponding to eigenvalue 1") - point = numpy.real(Q[:, i[-1]]).squeeze() - point /= point[3] - # rotation angle depending on direction - cosa = (numpy.trace(R33) - 1.0) / 2.0 - if abs(direction[2]) > 1e-8: - sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2] - elif abs(direction[1]) > 1e-8: - sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1] - else: - sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0] - angle = math.atan2(sina, cosa) - return angle, direction, point - - -def scale_matrix(factor, origin=None, direction=None): - """Return matrix to scale by factor around origin in direction. - - Use factor -1 for point symmetry. - - >>> v = (numpy.random.rand(4, 5) - 0.5) * 20 - >>> v[3] = 1 - >>> S = scale_matrix(-1.234) - >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) - True - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S = scale_matrix(factor, origin) - >>> S = scale_matrix(factor, origin, direct) - - """ - if direction is None: - # uniform scaling - M = numpy.diag([factor, factor, factor, 1.0]) - if origin is not None: - M[:3, 3] = origin[:3] - M[:3, 3] *= 1.0 - factor - else: - # nonuniform scaling - direction = unit_vector(direction[:3]) - factor = 1.0 - factor - M = numpy.identity(4) - M[:3, :3] -= factor * numpy.outer(direction, direction) - if origin is not None: - M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction - return M - - -def scale_from_matrix(matrix): - """Return scaling factor, origin and direction from scaling matrix. - - >>> factor = random.random() * 10 - 5 - >>> origin = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> S0 = scale_matrix(factor, origin) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - >>> S0 = scale_matrix(factor, origin, direct) - >>> factor, origin, direction = scale_from_matrix(S0) - >>> S1 = scale_matrix(factor, origin, direction) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - factor = numpy.trace(M33) - 2.0 - try: - # direction: unit eigenvector corresponding to eigenvalue factor - w, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0] - direction = numpy.real(V[:, i]).squeeze() - direction /= vector_norm(direction) - except IndexError: - # uniform scaling - factor = (factor + 2.0) / 3.0 - direction = None - # origin: any eigenvector corresponding to eigenvalue 1 - w, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - origin = numpy.real(V[:, i[-1]]).squeeze() - origin /= origin[3] - return factor, origin, direction - - -def projection_matrix(point, normal, direction=None, - perspective=None, pseudo=False): - """Return matrix to project onto plane defined by point and normal. - - Using either perspective point, projection direction, or none of both. - - If pseudo is True, perspective projections will preserve relative depth - such that Perspective = dot(Orthogonal, PseudoPerspective). - - >>> P = projection_matrix([0, 0, 0], [1, 0, 0]) - >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) - True - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> P1 = projection_matrix(point, normal, direction=direct) - >>> P2 = projection_matrix(point, normal, perspective=persp) - >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> is_same_transform(P2, numpy.dot(P0, P3)) - True - >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0]) - >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20 - >>> v0[3] = 1 - >>> v1 = numpy.dot(P, v0) - >>> numpy.allclose(v1[1], v0[1]) - True - >>> numpy.allclose(v1[0], 3-v1[1]) - True - - """ - M = numpy.identity(4) - point = numpy.array(point[:3], dtype=numpy.float64, copy=False) - normal = unit_vector(normal[:3]) - if perspective is not None: - # perspective projection - perspective = numpy.array(perspective[:3], dtype=numpy.float64, - copy=False) - M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) - M[:3, :3] -= numpy.outer(perspective, normal) - if pseudo: - # preserve relative depth - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) - else: - M[:3, 3] = numpy.dot(point, normal) * perspective - M[3, :3] = -normal - M[3, 3] = numpy.dot(perspective, normal) - elif direction is not None: - # parallel projection - direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) - scale = numpy.dot(direction, normal) - M[:3, :3] -= numpy.outer(direction, normal) / scale - M[:3, 3] = direction * (numpy.dot(point, normal) / scale) - else: - # orthogonal projection - M[:3, :3] -= numpy.outer(normal, normal) - M[:3, 3] = numpy.dot(point, normal) * normal - return M - - -def projection_from_matrix(matrix, pseudo=False): - """Return projection plane and perspective point from projection matrix. - - Return values are same as arguments for projection_matrix function: - point, normal, direction, perspective, and pseudo. - - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.random.random(3) - 0.5 - >>> direct = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(3) - 0.5 - >>> P0 = projection_matrix(point, normal) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, direct) - >>> result = projection_from_matrix(P0) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) - >>> result = projection_from_matrix(P0, pseudo=False) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) - >>> result = projection_from_matrix(P0, pseudo=True) - >>> P1 = projection_matrix(*result) - >>> is_same_transform(P0, P1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - w, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] - if not pseudo and len(i): - # point: any eigenvector corresponding to eigenvalue 1 - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - # direction: unit eigenvector corresponding to eigenvalue 0 - w, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 0") - direction = numpy.real(V[:, i[0]]).squeeze() - direction /= vector_norm(direction) - # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 - w, V = numpy.linalg.eig(M33.T) - i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] - if len(i): - # parallel projection - normal = numpy.real(V[:, i[0]]).squeeze() - normal /= vector_norm(normal) - return point, normal, direction, None, False - else: - # orthogonal projection, where normal equals direction vector - return point, direction, None, None, False - else: - # perspective projection - i = numpy.where(abs(numpy.real(w)) > 1e-8)[0] - if not len(i): - raise ValueError( - "no eigenvector not corresponding to eigenvalue 0") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - normal = - M[3, :3] - perspective = M[:3, 3] / numpy.dot(point[:3], normal) - if pseudo: - perspective -= normal - return point, normal, None, perspective, pseudo - - -def clip_matrix(left, right, bottom, top, near, far, perspective=False): - """Return matrix to obtain normalized device coordinates from frustum. - - The frustum bounds are axis-aligned along x (left, right), - y (bottom, top) and z (near, far). - - Normalized device coordinates are in range [-1, 1] if coordinates are - inside the frustum. - - If perspective is True the frustum is a truncated pyramid with the - perspective point at origin and direction along z axis, otherwise an - orthographic canonical view volume (a box). - - Homogeneous coordinates transformed by the perspective clip matrix - need to be dehomogenized (divided by w coordinate). - - >>> frustum = numpy.random.rand(6) - >>> frustum[1] += frustum[0] - >>> frustum[3] += frustum[2] - >>> frustum[5] += frustum[4] - >>> M = clip_matrix(perspective=False, *frustum) - >>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) - array([-1., -1., -1., 1.]) - >>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1]) - array([ 1., 1., 1., 1.]) - >>> M = clip_matrix(perspective=True, *frustum) - >>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) - >>> v / v[3] - array([-1., -1., -1., 1.]) - >>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1]) - >>> v / v[3] - array([ 1., 1., -1., 1.]) - - """ - if left >= right or bottom >= top or near >= far: - raise ValueError("invalid frustum") - if perspective: - if near <= _EPS: - raise ValueError("invalid frustum: near <= 0") - t = 2.0 * near - M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0], - [0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0], - [0.0, 0.0, (far+near)/(near-far), t*far/(far-near)], - [0.0, 0.0, -1.0, 0.0]] - else: - M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)], - [0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)], - [0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)], - [0.0, 0.0, 0.0, 1.0]] - return numpy.array(M) - - -def shear_matrix(angle, direction, point, normal): - """Return matrix to shear by angle along direction vector on shear plane. - - The shear plane is defined by a point and normal vector. The direction - vector must be orthogonal to the plane's normal vector. - - A point P is transformed by the shear matrix into P" such that - the vector P-P" is parallel to the direction vector and its extent is - given by the angle of P-P'-P", where P' is the orthogonal projection - of P onto the shear plane. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S = shear_matrix(angle, direct, point, normal) - >>> numpy.allclose(1, numpy.linalg.det(S)) - True - - """ - normal = unit_vector(normal[:3]) - direction = unit_vector(direction[:3]) - if abs(numpy.dot(normal, direction)) > 1e-6: - raise ValueError("direction and normal vectors are not orthogonal") - angle = math.tan(angle) - M = numpy.identity(4) - M[:3, :3] += angle * numpy.outer(direction, normal) - M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction - return M - - -def shear_from_matrix(matrix): - """Return shear angle, direction and plane from shear matrix. - - >>> angle = (random.random() - 0.5) * 4*math.pi - >>> direct = numpy.random.random(3) - 0.5 - >>> point = numpy.random.random(3) - 0.5 - >>> normal = numpy.cross(direct, numpy.random.random(3)) - >>> S0 = shear_matrix(angle, direct, point, normal) - >>> angle, direct, point, normal = shear_from_matrix(S0) - >>> S1 = shear_matrix(angle, direct, point, normal) - >>> is_same_transform(S0, S1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False) - M33 = M[:3, :3] - # normal: cross independent eigenvectors corresponding to the eigenvalue 1 - w, V = numpy.linalg.eig(M33) - i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0] - if len(i) < 2: - raise ValueError("no two linear independent eigenvectors found %s" % w) - V = numpy.real(V[:, i]).squeeze().T - lenorm = -1.0 - for i0, i1 in ((0, 1), (0, 2), (1, 2)): - n = numpy.cross(V[i0], V[i1]) - w = vector_norm(n) - if w > lenorm: - lenorm = w - normal = n - normal /= lenorm - # direction and angle - direction = numpy.dot(M33 - numpy.identity(3), normal) - angle = vector_norm(direction) - direction /= angle - angle = math.atan(angle) - # point: eigenvector corresponding to eigenvalue 1 - w, V = numpy.linalg.eig(M) - i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] - if not len(i): - raise ValueError("no eigenvector corresponding to eigenvalue 1") - point = numpy.real(V[:, i[-1]]).squeeze() - point /= point[3] - return angle, direction, point, normal - - -def decompose_matrix(matrix): - """Return sequence of transformations from transformation matrix. - - matrix : array_like - Non-degenerative homogeneous transformation matrix - - Return tuple of: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - Raise ValueError if matrix is of wrong type or degenerative. - - >>> T0 = translation_matrix([1, 2, 3]) - >>> scale, shear, angles, trans, persp = decompose_matrix(T0) - >>> T1 = translation_matrix(trans) - >>> numpy.allclose(T0, T1) - True - >>> S = scale_matrix(0.123) - >>> scale, shear, angles, trans, persp = decompose_matrix(S) - >>> scale[0] - 0.123 - >>> R0 = euler_matrix(1, 2, 3) - >>> scale, shear, angles, trans, persp = decompose_matrix(R0) - >>> R1 = euler_matrix(*angles) - >>> numpy.allclose(R0, R1) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=True).T - if abs(M[3, 3]) < _EPS: - raise ValueError("M[3, 3] is zero") - M /= M[3, 3] - P = M.copy() - P[:, 3] = 0.0, 0.0, 0.0, 1.0 - if not numpy.linalg.det(P): - raise ValueError("matrix is singular") - - scale = numpy.zeros((3, )) - shear = [0.0, 0.0, 0.0] - angles = [0.0, 0.0, 0.0] - - if any(abs(M[:3, 3]) > _EPS): - perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) - M[:, 3] = 0.0, 0.0, 0.0, 1.0 - else: - perspective = numpy.array([0.0, 0.0, 0.0, 1.0]) - - translate = M[3, :3].copy() - M[3, :3] = 0.0 - - row = M[:3, :3].copy() - scale[0] = vector_norm(row[0]) - row[0] /= scale[0] - shear[0] = numpy.dot(row[0], row[1]) - row[1] -= row[0] * shear[0] - scale[1] = vector_norm(row[1]) - row[1] /= scale[1] - shear[0] /= scale[1] - shear[1] = numpy.dot(row[0], row[2]) - row[2] -= row[0] * shear[1] - shear[2] = numpy.dot(row[1], row[2]) - row[2] -= row[1] * shear[2] - scale[2] = vector_norm(row[2]) - row[2] /= scale[2] - shear[1:] /= scale[2] - - if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: - numpy.negative(scale, scale) - numpy.negative(row, row) - - angles[1] = math.asin(-row[0, 2]) - if math.cos(angles[1]): - angles[0] = math.atan2(row[1, 2], row[2, 2]) - angles[2] = math.atan2(row[0, 1], row[0, 0]) - else: - #angles[0] = math.atan2(row[1, 0], row[1, 1]) - angles[0] = math.atan2(-row[2, 1], row[1, 1]) - angles[2] = 0.0 - - return scale, shear, angles, translate, perspective - - -def compose_matrix(scale=None, shear=None, angles=None, translate=None, - perspective=None): - """Return transformation matrix from sequence of transformations. - - This is the inverse of the decompose_matrix function. - - Sequence of transformations: - scale : vector of 3 scaling factors - shear : list of shear factors for x-y, x-z, y-z axes - angles : list of Euler angles about static x, y, z axes - translate : translation vector along x, y, z axes - perspective : perspective partition of matrix - - >>> scale = numpy.random.random(3) - 0.5 - >>> shear = numpy.random.random(3) - 0.5 - >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) - >>> trans = numpy.random.random(3) - 0.5 - >>> persp = numpy.random.random(4) - 0.5 - >>> M0 = compose_matrix(scale, shear, angles, trans, persp) - >>> result = decompose_matrix(M0) - >>> M1 = compose_matrix(*result) - >>> is_same_transform(M0, M1) - True - - """ - M = numpy.identity(4) - if perspective is not None: - P = numpy.identity(4) - P[3, :] = perspective[:4] - M = numpy.dot(M, P) - if translate is not None: - T = numpy.identity(4) - T[:3, 3] = translate[:3] - M = numpy.dot(M, T) - if angles is not None: - R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') - M = numpy.dot(M, R) - if shear is not None: - Z = numpy.identity(4) - Z[1, 2] = shear[2] - Z[0, 2] = shear[1] - Z[0, 1] = shear[0] - M = numpy.dot(M, Z) - if scale is not None: - S = numpy.identity(4) - S[0, 0] = scale[0] - S[1, 1] = scale[1] - S[2, 2] = scale[2] - M = numpy.dot(M, S) - M /= M[3, 3] - return M - - -def orthogonalization_matrix(lengths, angles): - """Return orthogonalization matrix for crystallographic cell coordinates. - - Angles are expected in degrees. - - The de-orthogonalization matrix is the inverse. - - >>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90]) - >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) - True - >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) - >>> numpy.allclose(numpy.sum(O), 43.063229) - True - - """ - a, b, c = lengths - angles = numpy.radians(angles) - sina, sinb, _ = numpy.sin(angles) - cosa, cosb, cosg = numpy.cos(angles) - co = (cosa * cosb - cosg) / (sina * sinb) - return numpy.array([ - [ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0], - [-a*sinb*co, b*sina, 0.0, 0.0], - [ a*cosb, b*cosa, c, 0.0], - [ 0.0, 0.0, 0.0, 1.0]]) - - -def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): - """Return affine transform matrix to register two point sets. - - v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous - coordinates, where ndims is the dimensionality of the coordinate space. - - If shear is False, a similarity transformation matrix is returned. - If also scale is False, a rigid/Euclidean transformation matrix - is returned. - - By default the algorithm by Hartley and Zissermann [15] is used. - If usesvd is True, similarity and Euclidean transformation matrices - are calculated by minimizing the weighted sum of squared deviations - (RMSD) according to the algorithm by Kabsch [8]. - Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9] - is used, which is slower when using this Python implementation. - - The returned matrix performs rotation, translation and uniform scaling - (if specified). - - >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]] - >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]] - >>> affine_matrix_from_points(v0, v1) - array([[ 0.14549, 0.00062, 675.50008], - [ 0.00048, 0.14094, 53.24971], - [ 0. , 0. , 1. ]]) - >>> T = translation_matrix(numpy.random.random(3)-0.5) - >>> R = random_rotation_matrix(numpy.random.random(3)) - >>> S = scale_matrix(random.random()) - >>> M = concatenate_matrices(T, R, S) - >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 - >>> v0[3] = 1 - >>> v1 = numpy.dot(M, v0) - >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1) - >>> M = affine_matrix_from_points(v0[:3], v1[:3]) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - - More examples in superimposition_matrix() - - """ - v0 = numpy.array(v0, dtype=numpy.float64, copy=True) - v1 = numpy.array(v1, dtype=numpy.float64, copy=True) - - ndims = v0.shape[0] - if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape: - raise ValueError("input arrays are of wrong shape or type") - - # move centroids to origin - t0 = -numpy.mean(v0, axis=1) - M0 = numpy.identity(ndims+1) - M0[:ndims, ndims] = t0 - v0 += t0.reshape(ndims, 1) - t1 = -numpy.mean(v1, axis=1) - M1 = numpy.identity(ndims+1) - M1[:ndims, ndims] = t1 - v1 += t1.reshape(ndims, 1) - - if shear: - # Affine transformation - A = numpy.concatenate((v0, v1), axis=0) - u, s, vh = numpy.linalg.svd(A.T) - vh = vh[:ndims].T - B = vh[:ndims] - C = vh[ndims:2*ndims] - t = numpy.dot(C, numpy.linalg.pinv(B)) - t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1) - M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,))) - elif usesvd or ndims != 3: - # Rigid transformation via SVD of covariance matrix - u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) - # rotation matrix from SVD orthonormal bases - R = numpy.dot(u, vh) - if numpy.linalg.det(R) < 0.0: - # R does not constitute right handed system - R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0) - s[-1] *= -1.0 - # homogeneous transformation matrix - M = numpy.identity(ndims+1) - M[:ndims, :ndims] = R - else: - # Rigid transformation matrix via quaternion - # compute symmetric matrix N - xx, yy, zz = numpy.sum(v0 * v1, axis=1) - xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) - xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) - N = [[xx+yy+zz, 0.0, 0.0, 0.0], - [yz-zy, xx-yy-zz, 0.0, 0.0], - [zx-xz, xy+yx, yy-xx-zz, 0.0], - [xy-yx, zx+xz, yz+zy, zz-xx-yy]] - # quaternion: eigenvector corresponding to most positive eigenvalue - w, V = numpy.linalg.eigh(N) - q = V[:, numpy.argmax(w)] - q /= vector_norm(q) # unit quaternion - # homogeneous transformation matrix - M = quaternion_matrix(q) - - if scale and not shear: - # Affine transformation; scale is ratio of RMS deviations from centroid - v0 *= v0 - v1 *= v1 - M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) - - # move centroids back - M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0)) - M /= M[ndims, ndims] - return M - - -def superimposition_matrix(v0, v1, scale=False, usesvd=True): - """Return matrix to transform given 3D point set into second point set. - - v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points. - - The parameters scale and usesvd are explained in the more general - affine_matrix_from_points function. - - The returned matrix is a similarity or Euclidean transformation matrix. - This function has a fast C implementation in transformations.c. - - >>> v0 = numpy.random.rand(3, 10) - >>> M = superimposition_matrix(v0, v0) - >>> numpy.allclose(M, numpy.identity(4)) - True - >>> R = random_rotation_matrix(numpy.random.random(3)) - >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]] - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 - >>> v0[3] = 1 - >>> v1 = numpy.dot(R, v0) - >>> M = superimposition_matrix(v0, v1) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> S = scale_matrix(random.random()) - >>> T = translation_matrix(numpy.random.random(3)-0.5) - >>> M = concatenate_matrices(T, R, S) - >>> v1 = numpy.dot(M, v0) - >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1) - >>> M = superimposition_matrix(v0, v1, scale=True) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v0)) - True - >>> v = numpy.empty((4, 100, 3)) - >>> v[:, :, 0] = v0 - >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) - >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) - True - - """ - v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] - v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] - return affine_matrix_from_points(v0, v1, shear=False, - scale=scale, usesvd=usesvd) - - -def euler_matrix(ai, aj, ak, axes='sxyz'): - """Return homogeneous rotation matrix from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> R = euler_matrix(1, 2, 3, 'syxz') - >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) - True - >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) - >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) - True - >>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - >>> for axes in _TUPLE2AXES.keys(): - ... R = euler_matrix(ai, aj, ak, axes) - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] - except (AttributeError, KeyError): - _TUPLE2AXES[axes] # validation - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - if frame: - ai, ak = ak, ai - if parity: - ai, aj, ak = -ai, -aj, -ak - - si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) - ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) - cc, cs = ci*ck, ci*sk - sc, ss = si*ck, si*sk - - M = numpy.identity(4) - if repetition: - M[i, i] = cj - M[i, j] = sj*si - M[i, k] = sj*ci - M[j, i] = sj*sk - M[j, j] = -cj*ss+cc - M[j, k] = -cj*cs-sc - M[k, i] = -sj*ck - M[k, j] = cj*sc+cs - M[k, k] = cj*cc-ss - else: - M[i, i] = cj*ck - M[i, j] = sj*sc-cs - M[i, k] = sj*cc+ss - M[j, i] = cj*sk - M[j, j] = sj*ss+cc - M[j, k] = sj*cs-sc - M[k, i] = -sj - M[k, j] = cj*si - M[k, k] = cj*ci - return M - - -def euler_from_matrix(matrix, axes='sxyz'): - """Return Euler angles from rotation matrix for specified axis sequence. - - axes : One of 24 axis sequences as string or encoded tuple - - Note that many Euler angle triplets can describe one matrix. - - >>> R0 = euler_matrix(1, 2, 3, 'syxz') - >>> al, be, ga = euler_from_matrix(R0, 'syxz') - >>> R1 = euler_matrix(al, be, ga, 'syxz') - >>> numpy.allclose(R0, R1) - True - >>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5) - >>> for axes in _AXES2TUPLE.keys(): - ... R0 = euler_matrix(axes=axes, *angles) - ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) - ... if not numpy.allclose(R0, R1): print(axes, "failed") - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _TUPLE2AXES[axes] # validation - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] - if repetition: - sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) - if sy > _EPS: - ax = math.atan2( M[i, j], M[i, k]) - ay = math.atan2( sy, M[i, i]) - az = math.atan2( M[j, i], -M[k, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2( sy, M[i, i]) - az = 0.0 - else: - cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) - if cy > _EPS: - ax = math.atan2( M[k, j], M[k, k]) - ay = math.atan2(-M[k, i], cy) - az = math.atan2( M[j, i], M[i, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(-M[k, i], cy) - az = 0.0 - - if parity: - ax, ay, az = -ax, -ay, -az - if frame: - ax, az = az, ax - return ax, ay, az - - -def euler_from_quaternion(quaternion, axes='sxyz'): - """Return Euler angles from quaternion for specified axis sequence. - - >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0]) - >>> numpy.allclose(angles, [0.123, 0, 0]) - True - - """ - return euler_from_matrix(quaternion_matrix(quaternion), axes) - - -def quaternion_from_euler(ai, aj, ak, axes='sxyz'): - """Return quaternion from Euler angles and axis sequence. - - ai, aj, ak : Euler's roll, pitch and yaw angles - axes : One of 24 axis sequences as string or encoded tuple - - >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') - >>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435]) - True - - """ - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _TUPLE2AXES[axes] # validation - firstaxis, parity, repetition, frame = axes - - i = firstaxis + 1 - j = _NEXT_AXIS[i+parity-1] + 1 - k = _NEXT_AXIS[i-parity] + 1 - - if frame: - ai, ak = ak, ai - if parity: - aj = -aj - - ai /= 2.0 - aj /= 2.0 - ak /= 2.0 - ci = math.cos(ai) - si = math.sin(ai) - cj = math.cos(aj) - sj = math.sin(aj) - ck = math.cos(ak) - sk = math.sin(ak) - cc = ci*ck - cs = ci*sk - sc = si*ck - ss = si*sk - - q = numpy.empty((4, )) - if repetition: - q[0] = cj*(cc - ss) - q[i] = cj*(cs + sc) - q[j] = sj*(cc + ss) - q[k] = sj*(cs - sc) - else: - q[0] = cj*cc + sj*ss - q[i] = cj*sc - sj*cs - q[j] = cj*ss + sj*cc - q[k] = cj*cs - sj*sc - if parity: - q[j] *= -1.0 - - return q - - -def quaternion_about_axis(angle, axis): - """Return quaternion for rotation about axis. - - >>> q = quaternion_about_axis(0.123, [1, 0, 0]) - >>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0]) - True - - """ - q = numpy.array([0.0, axis[0], axis[1], axis[2]]) - qlen = vector_norm(q) - if qlen > _EPS: - q *= math.sin(angle/2.0) / qlen - q[0] = math.cos(angle/2.0) - return q - - -def quaternion_matrix(quaternion): - """Return homogeneous rotation matrix from quaternion. - - >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) - >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) - True - >>> M = quaternion_matrix([1, 0, 0, 0]) - >>> numpy.allclose(M, numpy.identity(4)) - True - >>> M = quaternion_matrix([0, 1, 0, 0]) - >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) - True - - """ - q = numpy.array(quaternion, dtype=numpy.float64, copy=True) - n = numpy.dot(q, q) - if n < _EPS: - return numpy.identity(4) - q *= math.sqrt(2.0 / n) - q = numpy.outer(q, q) - return numpy.array([ - [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], - [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], - [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], - [ 0.0, 0.0, 0.0, 1.0]]) - - -def quaternion_from_matrix(matrix, isprecise=False): - """Return quaternion from rotation matrix. - - If isprecise is True, the input matrix is assumed to be a precise rotation - matrix and a faster algorithm is used. - - >>> q = quaternion_from_matrix(numpy.identity(4), True) - >>> numpy.allclose(q, [1, 0, 0, 0]) - True - >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1])) - >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0]) - True - >>> R = rotation_matrix(0.123, (1, 2, 3)) - >>> q = quaternion_from_matrix(R, True) - >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786]) - True - >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0], - ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]] - >>> q = quaternion_from_matrix(R) - >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611]) - True - >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0], - ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]] - >>> q = quaternion_from_matrix(R) - >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603]) - True - >>> R = random_rotation_matrix() - >>> q = quaternion_from_matrix(R) - >>> is_same_transform(R, quaternion_matrix(q)) - True - - """ - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] - if isprecise: - q = numpy.empty((4, )) - t = numpy.trace(M) - if t > M[3, 3]: - q[0] = t - q[3] = M[1, 0] - M[0, 1] - q[2] = M[0, 2] - M[2, 0] - q[1] = M[2, 1] - M[1, 2] - else: - i, j, k = 1, 2, 3 - if M[1, 1] > M[0, 0]: - i, j, k = 2, 3, 1 - if M[2, 2] > M[i, i]: - i, j, k = 3, 1, 2 - t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] - q[i] = t - q[j] = M[i, j] + M[j, i] - q[k] = M[k, i] + M[i, k] - q[3] = M[k, j] - M[j, k] - q *= 0.5 / math.sqrt(t * M[3, 3]) - else: - m00 = M[0, 0] - m01 = M[0, 1] - m02 = M[0, 2] - m10 = M[1, 0] - m11 = M[1, 1] - m12 = M[1, 2] - m20 = M[2, 0] - m21 = M[2, 1] - m22 = M[2, 2] - # symmetric matrix K - K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0], - [m01+m10, m11-m00-m22, 0.0, 0.0], - [m02+m20, m12+m21, m22-m00-m11, 0.0], - [m21-m12, m02-m20, m10-m01, m00+m11+m22]]) - K /= 3.0 - # quaternion is eigenvector of K that corresponds to largest eigenvalue - w, V = numpy.linalg.eigh(K) - q = V[[3, 0, 1, 2], numpy.argmax(w)] - if q[0] < 0.0: - numpy.negative(q, q) - return q - - -def quaternion_multiply(quaternion1, quaternion0): - """Return multiplication of two quaternions. - - >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7]) - >>> numpy.allclose(q, [28, -44, -14, 48]) - True - - """ - w0, x0, y0, z0 = quaternion0 - w1, x1, y1, z1 = quaternion1 - return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0, - x1*w0 + y1*z0 - z1*y0 + w1*x0, - -x1*z0 + y1*w0 + z1*x0 + w1*y0, - x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64) - - -def quaternion_conjugate(quaternion): - """Return conjugate of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_conjugate(q0) - >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:]) - True - - """ - q = numpy.array(quaternion, dtype=numpy.float64, copy=True) - numpy.negative(q[1:], q[1:]) - return q - - -def quaternion_inverse(quaternion): - """Return inverse of quaternion. - - >>> q0 = random_quaternion() - >>> q1 = quaternion_inverse(q0) - >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) - True - - """ - q = numpy.array(quaternion, dtype=numpy.float64, copy=True) - numpy.negative(q[1:], q[1:]) - return q / numpy.dot(q, q) - - -def quaternion_real(quaternion): - """Return real part of quaternion. - - >>> quaternion_real([3, 0, 1, 2]) - 3.0 - - """ - return float(quaternion[0]) - - -def quaternion_imag(quaternion): - """Return imaginary part of quaternion. - - >>> quaternion_imag([3, 0, 1, 2]) - array([ 0., 1., 2.]) - - """ - return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True) - - -def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): - """Return spherical linear interpolation between two quaternions. - - >>> q0 = random_quaternion() - >>> q1 = random_quaternion() - >>> q = quaternion_slerp(q0, q1, 0) - >>> numpy.allclose(q, q0) - True - >>> q = quaternion_slerp(q0, q1, 1, 1) - >>> numpy.allclose(q, q1) - True - >>> q = quaternion_slerp(q0, q1, 0.5) - >>> angle = math.acos(numpy.dot(q0, q)) - >>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \ - numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle) - True - - """ - q0 = unit_vector(quat0[:4]) - q1 = unit_vector(quat1[:4]) - if fraction == 0.0: - return q0 - elif fraction == 1.0: - return q1 - d = numpy.dot(q0, q1) - if abs(abs(d) - 1.0) < _EPS: - return q0 - if shortestpath and d < 0.0: - # invert rotation - d = -d - numpy.negative(q1, q1) - angle = math.acos(d) + spin * math.pi - if abs(angle) < _EPS: - return q0 - isin = 1.0 / math.sin(angle) - q0 *= math.sin((1.0 - fraction) * angle) * isin - q1 *= math.sin(fraction * angle) * isin - q0 += q1 - return q0 - - -def random_quaternion(rand=None): - """Return uniform random unit quaternion. - - rand: array like or None - Three independent random variables that are uniformly distributed - between 0 and 1. - - >>> q = random_quaternion() - >>> numpy.allclose(1, vector_norm(q)) - True - >>> q = random_quaternion(numpy.random.random(3)) - >>> len(q.shape), q.shape[0]==4 - (1, True) - - """ - if rand is None: - rand = numpy.random.rand(3) - else: - assert len(rand) == 3 - r1 = numpy.sqrt(1.0 - rand[0]) - r2 = numpy.sqrt(rand[0]) - pi2 = math.pi * 2.0 - t1 = pi2 * rand[1] - t2 = pi2 * rand[2] - return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1, - numpy.cos(t1)*r1, numpy.sin(t2)*r2]) - - -def random_rotation_matrix(rand=None): - """Return uniform random rotation matrix. - - rand: array like - Three independent random variables that are uniformly distributed - between 0 and 1 for each returned quaternion. - - >>> R = random_rotation_matrix() - >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) - True - - """ - return quaternion_matrix(random_quaternion(rand)) - - -class Arcball(object): - """Virtual Trackball Control. - - >>> ball = Arcball() - >>> ball = Arcball(initial=numpy.identity(4)) - >>> ball.place([320, 320], 320) - >>> ball.down([500, 250]) - >>> ball.drag([475, 275]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 3.90583455) - True - >>> ball = Arcball(initial=[1, 0, 0, 0]) - >>> ball.place([320, 320], 320) - >>> ball.setaxes([1, 1, 0], [-1, 1, 0]) - >>> ball.constrain = True - >>> ball.down([400, 200]) - >>> ball.drag([200, 400]) - >>> R = ball.matrix() - >>> numpy.allclose(numpy.sum(R), 0.2055924) - True - >>> ball.next() - - """ - def __init__(self, initial=None): - """Initialize virtual trackball control. - - initial : quaternion or rotation matrix - - """ - self._axis = None - self._axes = None - self._radius = 1.0 - self._center = [0.0, 0.0] - self._vdown = numpy.array([0.0, 0.0, 1.0]) - self._constrain = False - if initial is None: - self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0]) - else: - initial = numpy.array(initial, dtype=numpy.float64) - if initial.shape == (4, 4): - self._qdown = quaternion_from_matrix(initial) - elif initial.shape == (4, ): - initial /= vector_norm(initial) - self._qdown = initial - else: - raise ValueError("initial not a quaternion or matrix") - self._qnow = self._qpre = self._qdown - - def place(self, center, radius): - """Place Arcball, e.g. when window size changes. - - center : sequence[2] - Window coordinates of trackball center. - radius : float - Radius of trackball in window coordinates. - - """ - self._radius = float(radius) - self._center[0] = center[0] - self._center[1] = center[1] - - def setaxes(self, *axes): - """Set axes to constrain rotations.""" - if axes is None: - self._axes = None - else: - self._axes = [unit_vector(axis) for axis in axes] - - @property - def constrain(self): - """Return state of constrain to axis mode.""" - return self._constrain - - @constrain.setter - def constrain(self, value): - """Set state of constrain to axis mode.""" - self._constrain = bool(value) - - def down(self, point): - """Set initial cursor window coordinates and pick constrain-axis.""" - self._vdown = arcball_map_to_sphere(point, self._center, self._radius) - self._qdown = self._qpre = self._qnow - if self._constrain and self._axes is not None: - self._axis = arcball_nearest_axis(self._vdown, self._axes) - self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) - else: - self._axis = None - - def drag(self, point): - """Update current cursor window coordinates.""" - vnow = arcball_map_to_sphere(point, self._center, self._radius) - if self._axis is not None: - vnow = arcball_constrain_to_axis(vnow, self._axis) - self._qpre = self._qnow - t = numpy.cross(self._vdown, vnow) - if numpy.dot(t, t) < _EPS: - self._qnow = self._qdown - else: - q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]] - self._qnow = quaternion_multiply(q, self._qdown) - - def next(self, acceleration=0.0): - """Continue rotation in direction of last drag.""" - q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False) - self._qpre, self._qnow = self._qnow, q - - def matrix(self): - """Return homogeneous rotation matrix.""" - return quaternion_matrix(self._qnow) - - -def arcball_map_to_sphere(point, center, radius): - """Return unit sphere coordinates from window coordinates.""" - v0 = (point[0] - center[0]) / radius - v1 = (center[1] - point[1]) / radius - n = v0*v0 + v1*v1 - if n > 1.0: - # position outside of sphere - n = math.sqrt(n) - return numpy.array([v0/n, v1/n, 0.0]) - else: - return numpy.array([v0, v1, math.sqrt(1.0 - n)]) - - -def arcball_constrain_to_axis(point, axis): - """Return sphere point perpendicular to axis.""" - v = numpy.array(point, dtype=numpy.float64, copy=True) - a = numpy.array(axis, dtype=numpy.float64, copy=True) - v -= a * numpy.dot(a, v) # on plane - n = vector_norm(v) - if n > _EPS: - if v[2] < 0.0: - numpy.negative(v, v) - v /= n - return v - if a[2] == 1.0: - return numpy.array([1.0, 0.0, 0.0]) - return unit_vector([-a[1], a[0], 0.0]) - - -def arcball_nearest_axis(point, axes): - """Return axis, which arc is nearest to point.""" - point = numpy.array(point, dtype=numpy.float64, copy=False) - nearest = None - mx = -1.0 - for axis in axes: - t = numpy.dot(arcball_constrain_to_axis(point, axis), point) - if t > mx: - nearest = axis - mx = t - return nearest - - -# epsilon for testing whether a number is close to zero -_EPS = numpy.finfo(float).eps * 4.0 - -# axis sequences for Euler angles -_NEXT_AXIS = [1, 2, 0, 1] - -# map axes strings to/from tuples of inner axis, parity, repetition, frame -_AXES2TUPLE = { - 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), - 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), - 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), - 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), - 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), - 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), - 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), - 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} - -_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) - - -def vector_norm(data, axis=None, out=None): - """Return length, i.e. Euclidean norm, of ndarray along axis. - - >>> v = numpy.random.random(3) - >>> n = vector_norm(v) - >>> numpy.allclose(n, numpy.linalg.norm(v)) - True - >>> v = numpy.random.rand(6, 5, 3) - >>> n = vector_norm(v, axis=-1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) - True - >>> n = vector_norm(v, axis=1) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> v = numpy.random.rand(5, 4, 3) - >>> n = numpy.empty((5, 3)) - >>> vector_norm(v, axis=1, out=n) - >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) - True - >>> vector_norm([]) - 0.0 - >>> vector_norm([1]) - 1.0 - - """ - data = numpy.array(data, dtype=numpy.float64, copy=True) - if out is None: - if data.ndim == 1: - return math.sqrt(numpy.dot(data, data)) - data *= data - out = numpy.atleast_1d(numpy.sum(data, axis=axis)) - numpy.sqrt(out, out) - return out - else: - data *= data - numpy.sum(data, axis=axis, out=out) - numpy.sqrt(out, out) - - -def unit_vector(data, axis=None, out=None): - """Return ndarray normalized by length, i.e. Euclidean norm, along axis. - - >>> v0 = numpy.random.random(3) - >>> v1 = unit_vector(v0) - >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) - True - >>> v0 = numpy.random.rand(5, 4, 3) - >>> v1 = unit_vector(v0, axis=-1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) - >>> numpy.allclose(v1, v2) - True - >>> v1 = unit_vector(v0, axis=1) - >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) - >>> numpy.allclose(v1, v2) - True - >>> v1 = numpy.empty((5, 4, 3)) - >>> unit_vector(v0, axis=1, out=v1) - >>> numpy.allclose(v1, v2) - True - >>> list(unit_vector([])) - [] - >>> list(unit_vector([1])) - [1.0] - - """ - if out is None: - data = numpy.array(data, dtype=numpy.float64, copy=True) - if data.ndim == 1: - data /= math.sqrt(numpy.dot(data, data)) - return data - else: - if out is not data: - out[:] = numpy.array(data, copy=False) - data = out - length = numpy.atleast_1d(numpy.sum(data*data, axis)) - numpy.sqrt(length, length) - if axis is not None: - length = numpy.expand_dims(length, axis) - data /= length - if out is None: - return data - - -def random_vector(size): - """Return array of random doubles in the half-open interval [0.0, 1.0). - - >>> v = random_vector(10000) - >>> numpy.all(v >= 0) and numpy.all(v < 1) - True - >>> v0 = random_vector(10) - >>> v1 = random_vector(10) - >>> numpy.any(v0 == v1) - False - - """ - return numpy.random.random(size) - - -def vector_product(v0, v1, axis=0): - """Return vector perpendicular to vectors. - - >>> v = vector_product([2, 0, 0], [0, 3, 0]) - >>> numpy.allclose(v, [0, 0, 6]) - True - >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] - >>> v1 = [[3], [0], [0]] - >>> v = vector_product(v0, v1) - >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]]) - True - >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] - >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] - >>> v = vector_product(v0, v1, axis=1) - >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]]) - True - - """ - return numpy.cross(v0, v1, axis=axis) - - -def angle_between_vectors(v0, v1, directed=True, axis=0): - """Return angle between vectors. - - If directed is False, the input vectors are interpreted as undirected axes, - i.e. the maximum angle is pi/2. - - >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3]) - >>> numpy.allclose(a, math.pi) - True - >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False) - >>> numpy.allclose(a, 0) - True - >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] - >>> v1 = [[3], [0], [0]] - >>> a = angle_between_vectors(v0, v1) - >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532]) - True - >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] - >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] - >>> a = angle_between_vectors(v0, v1, axis=1) - >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532]) - True - - """ - v0 = numpy.array(v0, dtype=numpy.float64, copy=False) - v1 = numpy.array(v1, dtype=numpy.float64, copy=False) - dot = numpy.sum(v0 * v1, axis=axis) - dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis) - return numpy.arccos(dot if directed else numpy.fabs(dot)) - - -def inverse_matrix(matrix): - """Return inverse of square transformation matrix. - - >>> M0 = random_rotation_matrix() - >>> M1 = inverse_matrix(M0.T) - >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) - True - >>> for size in range(1, 7): - ... M0 = numpy.random.rand(size, size) - ... M1 = inverse_matrix(M0) - ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size) - - """ - return numpy.linalg.inv(matrix) - - -def concatenate_matrices(*matrices): - """Return concatenation of series of transformation matrices. - - >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 - >>> numpy.allclose(M, concatenate_matrices(M)) - True - >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) - True - - """ - M = numpy.identity(4) - for i in matrices: - M = numpy.dot(M, i) - return M - - -def is_same_transform(matrix0, matrix1): - """Return True if two matrices perform same transformation. - - >>> is_same_transform(numpy.identity(4), numpy.identity(4)) - True - >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) - False - - """ - matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) - matrix0 /= matrix0[3, 3] - matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) - matrix1 /= matrix1[3, 3] - return numpy.allclose(matrix0, matrix1) - - -def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'): - """Try import all public attributes from module into global namespace. - - Existing attributes with name clashes are renamed with prefix. - Attributes starting with underscore are ignored by default. - - Return True on successful import. - - """ - import warnings - from importlib import import_module - try: - if not package: - module = import_module(name) - else: - module = import_module('.' + name, package=package) - except ImportError: - if warn: - warnings.warn("failed to import module %s" % name) - else: - for attr in dir(module): - if ignore and attr.startswith(ignore): - continue - if prefix: - if attr in globals(): - globals()[prefix + attr] = globals()[attr] - elif warn: - warnings.warn("no Python implementation of " + attr) - globals()[attr] = getattr(module, attr) - return True - - - -#_import_module('_transformations') - -if __name__ == "__main__": - import doctest - import random # used in doctests - numpy.set_printoptions(suppress=True, precision=5) - doctest.testmod() - - diff --git a/pyproject.toml b/pyproject.toml index 53247f3..62892bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ classifiers = [ 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', - 'Natural Language :: English", + 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: Unix', @@ -64,7 +64,7 @@ free_guinier = 'freesas.app.auto_guinier:main' free_rg = 'freesas.app.autorg:main' cormapy = 'freesas.app.cormap:main' supycomb = 'freesas.app.supycomb:main' -free_bift = 'freesas.app.bift:main", +free_bift = 'freesas.app.bift:main' extract_ascii = 'freesas.app.extract_ascii:main' [project.gui-scripts] diff --git a/src/freesas/_version.py b/src/freesas/_version.py old mode 100644 new mode 100755 From 8120ebb6cdc5a40723f6645bf6b9f4a23028d231 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 16:43:47 +0100 Subject: [PATCH 06/45] update build --- freesas/_distance.pyx | 79 ------------- freesas/_distance_omp.pxi | 108 ------------------ src/freesas/__init__.py | 14 +-- src/freesas/ext/__init__.py | 0 {freesas => src/freesas/ext}/_autorg.pyx | 0 {freesas => src/freesas/ext}/_bift.pyx | 0 {freesas => src/freesas/ext}/_cormap.pyx | 0 .../freesas/ext/_distance.pyx | 76 +++++++++++- {freesas => src/freesas/ext}/include/isnan.h | 0 {freesas => src/freesas/ext}/isnan.pxd | 0 src/freesas/ext/meson.build | 31 +++++ src/freesas/meson.build | 2 +- 12 files changed, 109 insertions(+), 201 deletions(-) delete mode 100644 freesas/_distance.pyx delete mode 100644 freesas/_distance_omp.pxi create mode 100644 src/freesas/ext/__init__.py rename {freesas => src/freesas/ext}/_autorg.pyx (100%) rename {freesas => src/freesas/ext}/_bift.pyx (100%) rename {freesas => src/freesas/ext}/_cormap.pyx (100%) rename freesas/_distance_nomp.pxi => src/freesas/ext/_distance.pyx (56%) rename {freesas => src/freesas/ext}/include/isnan.h (100%) rename {freesas => src/freesas/ext}/isnan.pxd (100%) create mode 100644 src/freesas/ext/meson.build diff --git a/freesas/_distance.pyx b/freesas/_distance.pyx deleted file mode 100644 index d93fc3a..0000000 --- a/freesas/_distance.pyx +++ /dev/null @@ -1,79 +0,0 @@ -#Cython module to calculate distances of set of atoms - -__author__ = "Jerome Kieffer" -__license__ = "MIT" -__copyright__ = "2015, ESRF" - -import sys -import cython -cimport numpy -import numpy -from cython cimport floating -from libc.math cimport sqrt, fabs, exp - - -@cython.wraparound(False) -@cython.boundscheck(False) -def calc_invariants(floating[:, :] atoms): - """ - Calculate the invariants of the structure, i.e fineness, radius of gyration and diameter of the model. - - Nota: to economize size*numpy.sqrt, the sqrt is taken at the end of the calculation. - We should have done s += sqrt(d) and then s/size, but we do s+= d and then sqrt(s/size). - You can check that the result is the same. - - @param atoms: 2d-array with atom coordinates:[[x,y,z],...] - @return: 3-tuple containing (fineness, Rg, Dmax) - * average distance between an atoms and its nearest neighbor - * radius of gyration of the model - * diameter of the model - """ - cdef: - int i, j, size - floating d, x1, y1, z1, dx, dy, dz, big, d2, sum_d2, d2max - size = atoms.shape[0] - assert atoms.shape[1] >= 3 - big = sys.maxsize - s = 0.0 - sum_d2 = 0.0 - d2max = 0.0 - for i in range(size): - x1 = atoms[i, 0] - y1 = atoms[i, 1] - z1 = atoms[i, 2] - d = big - for j in range(size): - if i == j: - continue - dx = atoms[j, 0] - x1 - dy = atoms[j, 1] - y1 - dz = atoms[j, 2] - z1 - d2 = dx * dx + dy * dy + dz * dz - sum_d2 += d2 - d2max = max(d2max, d2) - d = min(d, d2) - s += d - return sqrt(s / size), sqrt(sum_d2 / 2.0) / size, sqrt(d2max) - - -cdef inline floating hard_sphere(floating pos, floating radius)nogil: - """Density using hard spheres - @param pos: fabs(d1-d) - """ - if pos > 2.0 * radius: - return 0.0 - return (4 * radius + pos) * (2 * radius - pos) ** 2 / (16.0 * radius ** 3) - -cdef inline floating soft_sphere(floating pos, floating radius)nogil: - """Density using soft spheres (gaussian density) - @param pos: fabs(d1-d) - @param radius: radius of the equivalent hard sphere - """ - cdef floating sigma = 0.40567 * radius - return exp(- pos * pos / (2.0 * sigma * sigma)) * 0.3989422804014327 / sigma - - -IF HAVE_OPENMP: - include "_distance_omp.pxi" -ELSE: - include "_distance_nomp.pxi" diff --git a/freesas/_distance_omp.pxi b/freesas/_distance_omp.pxi deleted file mode 100644 index 32139d2..0000000 --- a/freesas/_distance_omp.pxi +++ /dev/null @@ -1,108 +0,0 @@ -#OpenMP version of distance calculation - -from cython import parallel -cimport openmp - -@cython.wraparound(False) -@cython.boundscheck(False) -def calc_distance(floating[:, :] atoms1, floating[:, :] atoms2, floating fineness1, floating fineness2): - """ - Calculate the Normalized Spatial Discrepancy (NSD) between two molecules - - @param atoms1,atoms2: 2d-array with atom coordinates[[x,y,z],...] - @param fineness1, fineness2: fineness of each molecule - @return: NSD atoms1-atoms2 - """ - - cdef: - int i, j, size1 = atoms1.shape[0], size2 = atoms2.shape[0] - int threadid, numthreads = openmp.omp_get_max_threads() - double d, d2, dx, dy, dz, x1, y1, z1 - double s1 = 0.0, s2 = 0.0, big = sys.maxsize - double[:, ::1] min_col = numpy.zeros((numthreads, size2), numpy.float64) + big - assert atoms1.shape[1] >= 3 - assert atoms2.shape[1] >= 3 - assert size1 > 0 - assert size2 > 0 - - for i in parallel.prange(size1, nogil=True): - threadid = parallel.threadid() - x1 = atoms1[i, 0] - y1 = atoms1[i, 1] - z1 = atoms1[i, 2] - d = big - for j in range(size2): - dx = atoms2[j, 0] - x1 - dy = atoms2[j, 1] - y1 - dz = atoms2[j, 2] - z1 - d2 = dx * dx + dy * dy + dz * dz - d = min(d, d2) - min_col[threadid, j] = min(min_col[threadid, j], d2) - s1 += d - for j in parallel.prange(size2, nogil=True): - d = big - for i in range(numthreads): - d = min(d, min_col[i, j]) - s2 += d - - return sqrt(0.5 * ((1.0 / (size1 * fineness2 * fineness2)) * s1 + (1.0 / (size2 * fineness1 * fineness1)) * s2)) - - -@cython.wraparound(False) -@cython.boundscheck(False) -@cython.cdivision(True) -def calc_density(floating[:, :] atoms, floating dmax, int npt, floating r=0.0, bint hard=True): - """ - Calculate the density rho(r) - - #TODO: formula for rigid sphere: - A = (4*R+d)*(2*R-d)**2/16.0/R**3 - - @param atoms: 2d-array with atom coordinates[[x,y,z],...] - @param dmax: Diameter of the model - @param npt: number of point in the density - @param r: radius of an atom - @param hard: use hard spheres model - @return: 1d-array of - """ - - cdef: - int i, j, k, size = atoms.shape[0] - int threadid, numthreads = openmp.omp_get_max_threads() - int width = 1 if hard else 2 - floating d, dmax_plus, dx, dy, dz, x1, y1, z1 - floating delta, d_min, d_max, d1, den - double[:, ::1] tmp = numpy.zeros((numthreads, npt), numpy.float64) - double[::1] out = numpy.zeros(npt, numpy.float64) - double s - - assert atoms.shape[1] >= 3 - assert size > 0 - assert dmax > 0 - dmax_plus = dmax * (1.0 + numpy.finfo(numpy.float32).eps) - delta = dmax_plus / npt - - for i in parallel.prange(size, nogil=True): - threadid = parallel.threadid() - x1 = atoms[i, 0] - y1 = atoms[i, 1] - z1 = atoms[i, 2] - for j in range(size): - dx = atoms[j, 0] - x1 - dy = atoms[j, 1] - y1 - dz = atoms[j, 2] - z1 - d = sqrt(dx * dx + dy * dy + dz * dz) - d_min = max(0.0, d - width * r) - d_max = min(dmax, d + width * r) - for k in range((d_min / delta), (d_max / delta)+1): - if hard: - tmp[threadid, k] += hard_sphere(fabs(k * delta - d), r) - else: - tmp[threadid, k] += soft_sphere(fabs(k * delta - d), r) - for j in parallel.prange(npt, nogil=True): - s = 0 - for i in range(numthreads): - s = s + tmp[i, j] - out[j] += s - - return numpy.asarray(out) diff --git a/src/freesas/__init__.py b/src/freesas/__init__.py index 79072af..e3f1517 100644 --- a/src/freesas/__init__.py +++ b/src/freesas/__init__.py @@ -1,7 +1,7 @@ # coding: utf-8 # /*########################################################################## # -# Copyright (c) 2015-2018 European Synchrotron Radiation Facility +# Copyright (c) 2015-2023 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -23,22 +23,12 @@ # # ###########################################################################*/ """ -The silx package contains the following main sub-packages: -- silx.gui: Qt widgets for data visualization and data file browsing -- silx.image: Some processing functions for 2D images -- silx.io: Reading and writing data files (HDF5/NeXus, SPEC, ...) -- silx.math: Some processing functions for 1D, 2D, 3D, nD arrays -- silx.opencl: OpenCL-based data processing -- silx.sx: High-level silx functions suited for (I)Python console. -- silx.utils: Miscellaneous convenient functions - -See silx documentation: http://www.silx.org/doc/silx/latest/ """ __authors__ = ["Jérôme Kieffer"] __license__ = "MIT" -__date__ = "31/08/2018" +__date__ = "27/11/2023" import os as _os import logging as _logging diff --git a/src/freesas/ext/__init__.py b/src/freesas/ext/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/freesas/_autorg.pyx b/src/freesas/ext/_autorg.pyx similarity index 100% rename from freesas/_autorg.pyx rename to src/freesas/ext/_autorg.pyx diff --git a/freesas/_bift.pyx b/src/freesas/ext/_bift.pyx similarity index 100% rename from freesas/_bift.pyx rename to src/freesas/ext/_bift.pyx diff --git a/freesas/_cormap.pyx b/src/freesas/ext/_cormap.pyx similarity index 100% rename from freesas/_cormap.pyx rename to src/freesas/ext/_cormap.pyx diff --git a/freesas/_distance_nomp.pxi b/src/freesas/ext/_distance.pyx similarity index 56% rename from freesas/_distance_nomp.pxi rename to src/freesas/ext/_distance.pyx index 28a608f..fa373e8 100644 --- a/freesas/_distance_nomp.pxi +++ b/src/freesas/ext/_distance.pyx @@ -1,4 +1,77 @@ -#Serial version of distance calculation +#Cython module to calculate distances of set of atoms + +__author__ = "Jerome Kieffer" +__license__ = "MIT" +__copyright__ = "2015, ESRF" + +import sys +import cython +cimport numpy +import numpy +from cython cimport floating +from libc.math cimport sqrt, fabs, exp + + +@cython.wraparound(False) +@cython.boundscheck(False) +def calc_invariants(floating[:, :] atoms): + """ + Calculate the invariants of the structure, i.e fineness, radius of gyration and diameter of the model. + + Nota: to economize size*numpy.sqrt, the sqrt is taken at the end of the calculation. + We should have done s += sqrt(d) and then s/size, but we do s+= d and then sqrt(s/size). + You can check that the result is the same. + + @param atoms: 2d-array with atom coordinates:[[x,y,z],...] + @return: 3-tuple containing (fineness, Rg, Dmax) + * average distance between an atoms and its nearest neighbor + * radius of gyration of the model + * diameter of the model + """ + cdef: + int i, j, size + floating d, x1, y1, z1, dx, dy, dz, big, d2, sum_d2, d2max + size = atoms.shape[0] + assert atoms.shape[1] >= 3 + big = sys.maxsize + s = 0.0 + sum_d2 = 0.0 + d2max = 0.0 + for i in range(size): + x1 = atoms[i, 0] + y1 = atoms[i, 1] + z1 = atoms[i, 2] + d = big + for j in range(size): + if i == j: + continue + dx = atoms[j, 0] - x1 + dy = atoms[j, 1] - y1 + dz = atoms[j, 2] - z1 + d2 = dx * dx + dy * dy + dz * dz + sum_d2 += d2 + d2max = max(d2max, d2) + d = min(d, d2) + s += d + return sqrt(s / size), sqrt(sum_d2 / 2.0) / size, sqrt(d2max) + + +cdef inline floating hard_sphere(floating pos, floating radius)nogil: + """Density using hard spheres + @param pos: fabs(d1-d) + """ + if pos > 2.0 * radius: + return 0.0 + return (4 * radius + pos) * (2 * radius - pos) ** 2 / (16.0 * radius ** 3) + +cdef inline floating soft_sphere(floating pos, floating radius)nogil: + """Density using soft spheres (gaussian density) + @param pos: fabs(d1-d) + @param radius: radius of the equivalent hard sphere + """ + cdef floating sigma = 0.40567 * radius + return exp(- pos * pos / (2.0 * sigma * sigma)) * 0.3989422804014327 / sigma + @cython.wraparound(False) @cython.boundscheck(False) @@ -86,3 +159,4 @@ def calc_density(floating[:, :] atoms, floating dmax, int npt, floating r=0.0, b else: out[k] += soft_sphere(fabs(k * delta - d), r) return numpy.asarray(out) + diff --git a/freesas/include/isnan.h b/src/freesas/ext/include/isnan.h similarity index 100% rename from freesas/include/isnan.h rename to src/freesas/ext/include/isnan.h diff --git a/freesas/isnan.pxd b/src/freesas/ext/isnan.pxd similarity index 100% rename from freesas/isnan.pxd rename to src/freesas/ext/isnan.pxd diff --git a/src/freesas/ext/meson.build b/src/freesas/ext/meson.build new file mode 100644 index 0000000..4f3862c --- /dev/null +++ b/src/freesas/ext/meson.build @@ -0,0 +1,31 @@ +py.extension_module('_distance', + '_distance.pyx', + dependencies : py_dep, + include_directories : ['include'], + install: true, + subdir: 'freesas', + ) + +py.extension_module('_cormap', + '_cormap.pyx', + dependencies : py_dep, + include_directories : ['include'], + install: true, + subdir: 'freesas', + ) + +py.extension_module('_autorg', + '_autorg.pyx', + dependencies : py_dep, + include_directories : ['include'], + install: true, + subdir: 'freesas', + ) + +py.extension_module('_bift', + '_bift.pyx', + dependencies : py_dep, + include_directories : ['include'], + install: true, + subdir: 'freesas', + ) diff --git a/src/freesas/meson.build b/src/freesas/meson.build index 9292813..091c17b 100644 --- a/src/freesas/meson.build +++ b/src/freesas/meson.build @@ -9,7 +9,7 @@ py.install_sources([ 'autorg.py', 'average.py', 'bift.py', - 'collections.py' + 'collections.py', 'cormap.py', 'decorators.py', 'fitting.py', From 3780c378a7f4facc7b5aa30cd82bc6a819ea1515 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 16:56:53 +0100 Subject: [PATCH 07/45] make the code compile with meson --- src/freesas/ext/_autorg.pyx | 12 ++++++------ src/freesas/ext/_bift.pyx | 10 +++++----- src/freesas/ext/_distance.pyx | 5 ++--- src/freesas/ext/shared_types.pxd | 33 ++++++++++++++++++++++++++++++++ src/freesas/ext/shared_types.pyx | 1 + 5 files changed, 47 insertions(+), 14 deletions(-) create mode 100644 src/freesas/ext/shared_types.pxd create mode 100644 src/freesas/ext/shared_types.pyx diff --git a/src/freesas/ext/_autorg.pyx b/src/freesas/ext/_autorg.pyx index 2e2d009..2fb80b0 100644 --- a/src/freesas/ext/_autorg.pyx +++ b/src/freesas/ext/_autorg.pyx @@ -23,7 +23,6 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -from builtins import None, NotImplementedError """ Loosely based on the autoRg implementation in BioXTAS RAW by J. Hopkins @@ -57,7 +56,9 @@ class NoGuinierRegionError(Error): import cython -cimport numpy as cnumpy +from .shared_types cimport int8_t, uint8_t, int16_t, uint16_t, \ + int32_t, uint32_t, int64_t, uint64_t,\ + float32_t, float64_t, floating, any_int_t, any_t import numpy as numpy from libc.math cimport sqrt, log, fabs, exp, atanh, ceil, NAN from .isnan cimport isfinite @@ -67,7 +68,7 @@ logger = logging.getLogger(__name__) from .collections import RG_RESULT, FIT_RESULT DTYPE = numpy.float64 -ctypedef double DTYPE_t +ctypedef float64_t DTYPE_t # Definition of a few constants cdef: @@ -636,7 +637,7 @@ cdef class AutoGuinier: """ cdef: int start, stop, end, size, lower, upper, i - cnumpy.int32_t[::1] unweigted_start, unweigted_stop + int32_t[::1] unweigted_start, unweigted_stop DTYPE_t[::1] weigted_start,weigted_stop DTYPE_t max_weight, weight, qmin_Rg, qmax_Rg, RMSD end = numpy.max(fits[:,5]) @@ -1011,7 +1012,6 @@ def autoRg(sasm): cdef: DTYPE_t quality, intercept, slope, sigma_slope, lower, upper, r_sqr bint aggregated = 0 - cnumpy.ndarray qualities DTYPE_t[::1] q_ary, i_ary, sigma_ary, lgi_ary, q2_ary, wg_ary, DTYPE_t[::1] fit_data int[::1] offsets, data_range @@ -1020,7 +1020,7 @@ def autoRg(sasm): int start, end, nb_fit, array_size, block_size=39 #page of 4k int idx_min, idx_max, idx, err DTYPE_t[:, ::1] fit_mv, tmp_mv - cnumpy.ndarray[DTYPE_t, ndim=2] fit_array + # DTYPE_t[:, :] fit_array raw_size = len(sasm) q_ary = numpy.empty(raw_size, dtype=DTYPE) diff --git a/src/freesas/ext/_bift.pyx b/src/freesas/ext/_bift.pyx index 586a877..7536691 100644 --- a/src/freesas/ext/_bift.pyx +++ b/src/freesas/ext/_bift.pyx @@ -18,17 +18,16 @@ cdef: list authors str __license__, __copyright__, __date__ -__authors__ = ["Jerome Kieffer", "Jesse Hopkins"] +__authors__ = ["Jérôme Kieffer", "Jesse Hopkins"] __license__ = "MIT" -__copyright__ = "2020, ESRF" -__date__ = "10/06/2020" +__copyright__ = "2020-2023, ESRF" +__date__ = "27/11/2023" import time import cython from cython.parallel import prange from cython.view cimport array as cvarray import numpy -cimport numpy as cnumpy from libc.math cimport sqrt, fabs, pi, sin, log, exp, isfinite from scipy.linalg import lapack @@ -959,7 +958,8 @@ cdef class BIFT: double area, ev_max, evidence_avg, evidence_std, double Dmax_avg, Dmax_std, alpha_avg, alpha_std, chi2_avg, chi2_std, double regularization_avg, regularization_std, Rg_std, Rg_avg, I0_avg, I0_std - cnumpy.ndarray radius, densities, evidences, Dmaxs, alphas, chi2s, regularizations, proba, density_avg, density_std, areas, area2s, Rgs + #2d densities, + # 1d radius, evidences, Dmaxs, alphas, chi2s, regularizations, proba, density_avg, density_std, areas, area2s, Rgs best_key, best, nvalid = self.get_best() if nvalid < 2: diff --git a/src/freesas/ext/_distance.pyx b/src/freesas/ext/_distance.pyx index fa373e8..5d70f71 100644 --- a/src/freesas/ext/_distance.pyx +++ b/src/freesas/ext/_distance.pyx @@ -1,12 +1,11 @@ #Cython module to calculate distances of set of atoms -__author__ = "Jerome Kieffer" +__author__ = "Jérôme Kieffer" __license__ = "MIT" -__copyright__ = "2015, ESRF" +__copyright__ = "2023, ESRF" import sys import cython -cimport numpy import numpy from cython cimport floating from libc.math cimport sqrt, fabs, exp diff --git a/src/freesas/ext/shared_types.pxd b/src/freesas/ext/shared_types.pxd new file mode 100644 index 0000000..46524cb --- /dev/null +++ b/src/freesas/ext/shared_types.pxd @@ -0,0 +1,33 @@ +"""This replaces the `cimport numpy` for C-level datatypes""" + +from libc.stdint cimport int8_t, uint8_t, int16_t, uint16_t, \ + int32_t, uint32_t, int64_t, uint64_t + +ctypedef double float64_t +ctypedef float float32_t + +from cython cimport floating + +ctypedef fused any_int_t: + uint8_t + uint16_t + uint32_t + uint64_t + int8_t + int16_t + int32_t + int64_t + +ctypedef fused any_t: + int + long + uint8_t + uint16_t + uint32_t + uint64_t + int8_t + int16_t + int32_t + int64_t + float32_t + float64_t diff --git a/src/freesas/ext/shared_types.pyx b/src/freesas/ext/shared_types.pyx new file mode 100644 index 0000000..e9a2dfa --- /dev/null +++ b/src/freesas/ext/shared_types.pyx @@ -0,0 +1 @@ +"Place-holder" From 8d4da22dbab2dc342612887f6f18a4a68ea5f5a9 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 17:07:06 +0100 Subject: [PATCH 08/45] app folder moved to meson build system --- src/freesas/app/__init__.py | 29 +++ src/freesas/app/auto_gpa.py | 69 +++++++ src/freesas/app/auto_guinier.py | 69 +++++++ src/freesas/app/autorg.py | 69 +++++++ src/freesas/app/bift.py | 147 +++++++++++++ src/freesas/app/cormap.py | 94 +++++++++ src/freesas/app/extract_ascii.py | 341 +++++++++++++++++++++++++++++++ src/freesas/app/meson.build | 14 ++ src/freesas/app/plot_sas.py | 144 +++++++++++++ src/freesas/app/supycomb.py | 137 +++++++++++++ 10 files changed, 1113 insertions(+) create mode 100644 src/freesas/app/__init__.py create mode 100644 src/freesas/app/auto_gpa.py create mode 100644 src/freesas/app/auto_guinier.py create mode 100644 src/freesas/app/autorg.py create mode 100644 src/freesas/app/bift.py create mode 100644 src/freesas/app/cormap.py create mode 100644 src/freesas/app/extract_ascii.py create mode 100644 src/freesas/app/meson.build create mode 100644 src/freesas/app/plot_sas.py create mode 100644 src/freesas/app/supycomb.py diff --git a/src/freesas/app/__init__.py b/src/freesas/app/__init__.py new file mode 100644 index 0000000..4776656 --- /dev/null +++ b/src/freesas/app/__init__.py @@ -0,0 +1,29 @@ +# coding: utf-8 +# /*########################################################################## +# +# Copyright (c) 2016-2018 European Synchrotron Radiation Facility +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +# ###########################################################################*/ +"""Command line applications provided by the freesas launcher.""" + +__authors__ = ["Jérôme Kieffer", "Martha Brennich"] +__license__ = "MIT" +__date__ = "2021/03/24" diff --git a/src/freesas/app/auto_gpa.py b/src/freesas/app/auto_gpa.py new file mode 100644 index 0000000..fe02a90 --- /dev/null +++ b/src/freesas/app/auto_gpa.py @@ -0,0 +1,69 @@ +#!/usr/bin/python3 +# coding: utf-8 +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +__author__ = ["Jérôme Kieffer", "Martha Brennich"] +__license__ = "MIT" +__copyright__ = "2021, ESRF" +__date__ = "19/03/2021" + +import sys +import logging +from freesas.autorg import auto_gpa +from freesas.sas_argparser import GuinierParser +from freesas.fitting import run_guinier_fit + +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger("auto_gpa") + +if sys.version_info < (3, 6): + logger.error("This code uses F-strings and requires Python 3.6+") + + +def build_parser() -> GuinierParser: + """Build parser for input and return list of files. + :return: parser + """ + description = ( + "Calculate the radius of gyration using Guinier" + " Peak Analysis (Putnam 2016) for a set of scattering curves" + ) + epilog = """free_gpa is an open-source implementation of + the autorg algorithm originately part of the ATSAS suite. + As this tool used a different theory, some results may differ + """ + return GuinierParser( + prog="free_gpa", description=description, epilog=epilog + ) + + +def main() -> None: + """Entry point for free_gpa app""" + parser = build_parser() + run_guinier_fit(fit_function=auto_gpa, parser=parser, logger=logger) + + +if __name__ == "__main__": + main() diff --git a/src/freesas/app/auto_guinier.py b/src/freesas/app/auto_guinier.py new file mode 100644 index 0000000..8f918cc --- /dev/null +++ b/src/freesas/app/auto_guinier.py @@ -0,0 +1,69 @@ +#!/usr/bin/python3 +# coding: utf-8 +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +__author__ = ["Jérôme Kieffer", "Martha Brennich"] +__license__ = "MIT" +__copyright__ = "2021, ESRF" +__date__ = "19/03/2021" + +import sys +import logging +from freesas.autorg import auto_guinier +from freesas.sas_argparser import GuinierParser +from freesas.fitting import run_guinier_fit + +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger("auto_guinier") + +if sys.version_info < (3, 6): + logger.error("This code uses F-strings and requires Python 3.6+") + + +def build_parser() -> GuinierParser: + """Build parser for input and return list of files. + :return: parser + """ + description = ( + "Calculate the radius of gyration using linear fitting of" + "logarithmic intensities for a set of scattering curves" + ) + epilog = """free_guinier is an open-source implementation of + the autorg algorithm originately part of the ATSAS suite. + As this tool used a different theory, some results may differ + """ + return GuinierParser( + prog="free_guinier", description=description, epilog=epilog + ) + + +def main() -> None: + """Entry point for free_guinier app""" + parser = build_parser() + run_guinier_fit(fit_function=auto_guinier, parser=parser, logger=logger) + + +if __name__ == "__main__": + main() diff --git a/src/freesas/app/autorg.py b/src/freesas/app/autorg.py new file mode 100644 index 0000000..2e9ef0c --- /dev/null +++ b/src/freesas/app/autorg.py @@ -0,0 +1,69 @@ +#!/usr/bin/python3 +# coding: utf-8 +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2017-2020 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +__author__ = ["Jérôme Kieffer", "Martha Brennich"] +__license__ = "MIT" +__copyright__ = "2021, ESRF" +__date__ = "19/03/2021" + +import sys +import logging +from freesas.autorg import autoRg +from freesas.sas_argparser import GuinierParser +from freesas.fitting import run_guinier_fit + +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger("auto_gpa") + +if sys.version_info < (3, 6): + logger.error("This code uses F-strings and requires Python 3.6+") + + +def build_parser() -> GuinierParser: + """Build parser for input and return list of files. + :return: parser + """ + description = ( + "Calculate the radius of gyration using Guinier law" + " for a set of scattering curves" + ) + epilog = """free_rg is an open-source implementation of + the autorg algorithm originately part of the ATSAS suite. + As this is reverse engineered, some constants and results may differ + """ + return GuinierParser( + prog="free_rg", description=description, epilog=epilog + ) + + +def main() -> None: + """Entry point for free_rg app""" + parser = build_parser() + run_guinier_fit(fit_function=autoRg, parser=parser, logger=logger) + + +if __name__ == "__main__": + main() diff --git a/src/freesas/app/bift.py b/src/freesas/app/bift.py new file mode 100644 index 0000000..2a540b0 --- /dev/null +++ b/src/freesas/app/bift.py @@ -0,0 +1,147 @@ +#!/usr/bin/python3 +# coding: utf-8 +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2017 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +__author__ = "Jérôme Kieffer" +__license__ = "MIT" +__copyright__ = "2017, ESRF" +__date__ = "13/10/2020" + +import sys +import logging +import platform +import traceback +from freesas import bift +from freesas.sasio import ( + load_scattering_data, + convert_inverse_angstrom_to_nanometer, +) +from freesas.sas_argparser import SASParser +from freesas.fitting import ( + set_logging_level, + collect_files, +) + +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger("bift") + + +def build_parser() -> SASParser: + """Build parser for input and return list of files. + :return: parser + """ + + description = ( + "Calculate the density as function of distance p(r)" + " curve from an I(q) scattering curve" + ) + epilog = """free_bift is a Python implementation of the Bayesian Inverse Fourier Transform + + This code is the implementation of + Steen Hansen J. Appl. Cryst. (2000). 33, 1415-1421 + + Based on the BIFT from Jesse Hopkins, available at: + https://sourceforge.net/p/bioxtasraw/git/ci/master/tree/bioxtasraw/BIFT.py + + It aims at being a drop in replacement for datgnom of the ATSAS suite. + + """ + parser = SASParser( + prog="free_bift", description=description, epilog=epilog + ) + parser.add_file_argument(help_text="I(q) files to convert into p(r)") + parser.add_output_filename_argument() + parser.add_q_unit_argument() + parser.add_argument( + "-n", + "--npt", + default=100, + type=int, + help="number of points in p(r) curve", + ) + parser.add_argument( + "-s", + "--scan", + default=27, + type=int, + help="Initial alpha-scan size to guess the start parameter", + ) + parser.add_argument( + "-m", + "--mc", + default=100, + type=int, + help="Number of Monte-Carlo samples in post-refinement", + ) + parser.add_argument( + "-t", + "--threshold", + default=2.0, + type=float, + help="Sample at average ± threshold*sigma in MC", + ) + return parser + + +def main(): + """Entry point for bift app.""" + if platform.system() == "Windows": + sys.stdout = open(1, "w", encoding="utf-16", closefd=False) + + parser = build_parser() + args = parser.parse_args() + set_logging_level(args.verbose) + files = collect_files(args.file) + + for afile in files: + try: + data = load_scattering_data(afile) + except: + logger.error("Unable to parse file %s", afile) + else: + if args.unit == "Å": + data = convert_inverse_angstrom_to_nanometer(data) + try: + bo = bift.auto_bift(data, npt=args.npt, scan_size=args.scan) + except Exception as err: + print("%s: %s %s" % (afile, err.__class__.__name__, err)) + if logging.root.level < logging.WARNING: + traceback.print_exc(file=sys.stdout) + else: + try: + stats = bo.monte_carlo_sampling( + args.mc, args.threshold, npt=args.npt + ) + except RuntimeError as err: + print("%s: %s %s" % (afile, err.__class__.__name__, err)) + if logging.root.level < logging.WARNING: + traceback.print_exc(file=sys.stdout) + else: + dest = afile.stem + ".out" + print(stats.save(dest, source=afile)) + + +if __name__ == "__main__": + main() diff --git a/src/freesas/app/cormap.py b/src/freesas/app/cormap.py new file mode 100644 index 0000000..042ee70 --- /dev/null +++ b/src/freesas/app/cormap.py @@ -0,0 +1,94 @@ +#!/usr/bin/python3 +# coding: utf-8 + +__author__ = "Jérôme Kieffer" +__license__ = "MIT" +__copyright__ = "2015, ESRF" +__date__ = "20/04/2020" + +import os +import logging +import freesas +from freesas.cormap import gof + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("cormap") +import numpy +from itertools import combinations +from collections import namedtuple +from freesas.sasio import load_scattering_data +from freesas.sas_argparser import SASParser + +datum = namedtuple("datum", ["index", "filename", "data"]) + +import platform + +operatingSystem = platform.system() +if operatingSystem == "Windows": + import glob + + +def parse(): + """Parse input and return list of files. + :return: list of input files + """ + description = "Measure pair-wise similarity of spectra " + epilog = """cormapy is an open-source implementation of + the cormap algorithm in datcmp (from ATSAS). + It does not scale the data and assume they are already scaled + """ + parser = SASParser(prog="cormapy", description=description, epilog=epilog) + parser.add_file_argument(help_text="dat files to compare") + + args = parser.parse_args() + + if args.verbose: + logging.root.setLevel(logging.DEBUG) + files = [i for i in args.file if os.path.exists(i)] + if operatingSystem == "Windows" and files == []: + files = glob.glob(args.file[0]) + files.sort() + input_len = len(files) + logger.debug("%s input files" % input_len) + return files + + +def compare(lstfiles): + res = [ + "Pair-wise Correlation Map", + "" " C Pr(>C)", + ] + data = [] + for i, f in enumerate(lstfiles): + try: + ary = load_scattering_data(f) + except ValueError as e: + print(e) + if ary.ndim > 1 and ary.shape[1] > 1: + ary = ary[:, 1] + d = datum(i + 1, f, ary) + data.append(d) + for a, b in combinations(data, 2): + r = gof(a.data, b.data) + res.append( + "%6i vs. %6i %6i %8.6f" % (a.index, b.index, r.c, r.P) + ) + res.append("") + for a in data: + res.append( + "%6i %8f + %8f * %s" % (a.index, 0.0, 1.0, a.filename) + ) + res.append("") + print(os.linesep.join(res)) + return res + + +def main(): + """main entry point""" + f = parse() + if f: + compare(f) + + +if __name__ == "__main__": + main() diff --git a/src/freesas/app/extract_ascii.py b/src/freesas/app/extract_ascii.py new file mode 100644 index 0000000..30e8fc0 --- /dev/null +++ b/src/freesas/app/extract_ascii.py @@ -0,0 +1,341 @@ +#!/usr/bin/python3 +# coding: utf-8 +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +__author__ = "Jérôme Kieffer" +__license__ = "MIT" +__copyright__ = "2020, ESRF" +__date__ = "15/01/2021" + +import io +import os +import sys +import logging +import glob +import platform +import posixpath +from collections import namedtuple, OrderedDict +import json +import copy +import pyFAI +from pyFAI.io import Nexus +from freesas.sas_argparser import SASParser + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("extract_ascii") + +if sys.version_info[0] < 3: + logger.error("This code requires Python 3.4+") + +NexusJuice = namedtuple( + "NexusJuice", + "filename h5path npt unit q I poni mask energy polarization signal2d error2d buffer concentration", +) + + +def parse(): + + """Parse input and return list of files. + :return: list of input files + """ + description = "Extract the SAXS data from a Nexus files as a 3 column ascii (q, I, err). Metadata are exported in the headers as needed." + epilog = """extract_ascii.py allows you to export the data in inverse nm or inverse A with possible intensity scaling. + """ + parser = SASParser( + prog="extract-ascii.py", description=description, epilog=epilog + ) + # Commented option need to be implemented + # parser.add_argument("-o", "--output", action='store', help="Output filename, by default the same with .dat extension", default=None, type=str) + # parser.add_argument("-u", "--unit", action='store', help="Unit for q: inverse nm or Angstrom?", default="nm", type=str) + # parser.add_argument("-n", "--normalize", action='store', help="Re-normalize all intensities with this factor ", default=1.0, type=float) + parser.add_file_argument("HDF5 input data") + parser.add_argument( + "-a", + "--all", + action="store_true", + help="extract every individual frame", + default=False, + ) + return parser.parse_args() + + +def extract_averaged(filename): + "return some infomations extracted from a HDF5 file " + results = OrderedDict() + results["filename"] = filename + # Missing: comment normalization + with Nexus(filename, "r") as nxsr: + entry_grp = nxsr.get_entries()[0] + results["h5path"] = entry_grp.name + nxdata_grp = nxsr.h5[entry_grp.attrs["default"]] + signal = nxdata_grp.attrs["signal"] + axis = nxdata_grp.attrs["axes"] + results["I"] = nxdata_grp[signal][()] + results["q"] = nxdata_grp[axis][()] + results["std"] = nxdata_grp["errors"][()] + results["unit"] = pyFAI.units.to_unit( + axis + "_" + nxdata_grp[axis].attrs["units"] + ) + integration_grp = nxdata_grp.parent + results["geometry"] = json.loads( + integration_grp["configuration/data"][()] + ) + results["polarization"] = integration_grp[ + "configuration/polarization_factor" + ][()] + + instrument_grps = nxsr.get_class(entry_grp, class_type="NXinstrument") + if instrument_grps: + detector_grp = nxsr.get_class( + instrument_grps[0], class_type="NXdetector" + )[0] + results["mask"] = detector_grp["pixel_mask"].attrs["filename"] + sample_grp = nxsr.get_class(entry_grp, class_type="NXsample")[0] + results["sample"] = posixpath.split(sample_grp.name)[-1] + results["buffer"] = sample_grp["buffer"][()] + results["storage temperature"] = sample_grp["temperature_env"][()] + results["exposure temperature"] = sample_grp["temperature"][()] + results["concentration"] = sample_grp["concentration"][()] + if "2_correlation_mapping" in entry_grp: + results["to_merge"] = entry_grp[ + "2_correlation_mapping/results/to_merge" + ][()] + return results + + +def extract_all(filename): + "return some infomations extracted from a HDF5 file for all individual frames" + res = [] + results = OrderedDict() + results["filename"] = filename + with Nexus(filename, "r") as nxsr: + entry_grp = nxsr.get_entries()[0] + results["h5path"] = entry_grp.name + nxdata_grp = nxsr.h5[entry_grp.name + "/1_integration/results"] + signal = nxdata_grp.attrs["signal"] + axis = nxdata_grp.attrs["axes"][1] + I = nxdata_grp[signal][()] + results["q"] = nxdata_grp[axis][()] + std = nxdata_grp["errors"][()] + results["unit"] = pyFAI.units.to_unit( + axis + "_" + nxdata_grp[axis].attrs["units"] + ) + integration_grp = nxdata_grp.parent + results["geometry"] = json.loads( + integration_grp["configuration/data"][()] + ) + results["polarization"] = integration_grp[ + "configuration/polarization_factor" + ][()] + instrument_grp = nxsr.get_class(entry_grp, class_type="NXinstrument")[ + 0 + ] + detector_grp = nxsr.get_class(instrument_grp, class_type="NXdetector")[ + 0 + ] + results["mask"] = detector_grp["pixel_mask"].attrs["filename"] + sample_grp = nxsr.get_class(entry_grp, class_type="NXsample")[0] + results["sample"] = posixpath.split(sample_grp.name)[-1] + results["buffer"] = sample_grp["buffer"][()] + if "temperature_env" in sample_grp: + results["storage temperature"] = sample_grp["temperature_env"][()] + if "temperature" in sample_grp: + results["exposure temperature"] = sample_grp["temperature"][()] + if "concentration" in sample_grp: + results["concentration"] = sample_grp["concentration"][()] + # if "2_correlation_mapping" in entry_grp: + # results["to_merge"] = entry_grp["2_correlation_mapping/results/to_merge"][()] + for i, s in zip(I, std): + r = copy.copy(results) + r["I"] = i + r["std"] = s + res.append(r) + return res + + +def write_ascii(results, output=None, hdr="#", linesep=os.linesep): + """ + :param resusts: dict containing some NexusJuice + :param output: name of the 3-column ascii file to be written + :param hdr: header mark, usually '#' + :param linesep: to be able to addapt the end of lines + + Adam Round explicitelly asked for (email from Date: Tue, 04 Oct 2011 15:22:29 +0200) : + Modification from: + # BSA buffer + # Sample c= 0.0 mg/ml (these two lines are required for current DOS pipeline and can be cleaned up once we use EDNA to get to ab-initio models) + # + # Sample environment: + # Detector = Pilatus 1M + # PixelSize_1 = 0.000172 + # PixelSize_2 = 6.283185 (I think it could avoid confusion if we give teh actual pixel size as 0.000172 for X and Y and not to give the integrated sizes. Also could there also be a modification for PixelSize_1 as on the diagonal wont it be the hypotenuse (0.000243)? and thus will be on average a bit bigger than 0.000172) + # + # title = BSA buffer + # Frame 7 of 10 + # Time per frame (s) = 10 + # SampleDistance = 2.43 + # WaveLength = 9.31e-11 + # Normalization = 0.0004885 + # History-1 = saxs_angle +pass -omod n -rsys normal -da 360_deg -odim = 1 /data/id14eh3/inhouse/saxs_pilatus/Adam/EDNAtests/2d/dumdum_008_07.edf/data/id14eh3/inhouse/saxs_pilatus/Adam/EDNAtests/misc/dumdum_008_07.ang + # DiodeCurr = 0.0001592934 + # MachCurr = 163.3938 + # Mask = /data/id14eh3/archive/CALIBRATION/MASK/Pcon_01Jun_msk.edf + # SaxsDataVersion = 2.40 + # + # N 3 + # L q*nm I_BSA buffer stddev + # + # Sample Information: + # Storage Temperature (degrees C): 4 + # Measurement Temperature (degrees C): 10 + # Concentration: 0.0 + # Code: BSA + s-vector Intensity Error + s-vector Intensity Error + s-vector Intensity Error + s-vector Intensity Error + """ + hdr = str(hdr) + headers = [] + if "comments" in results: + headers.append(hdr + " " + results["comments"]) + else: + headers.append(hdr) + headers.append( + hdr + " Sample c= %s mg/ml" % results.get("concentration", -1) + ) + headers += [hdr, hdr + " Sample environment:"] + if "geometry" in results: + headers.append( + hdr + " Detector = %s" % results["geometry"]["detector"] + ) + headers.append( + hdr + " SampleDistance = %s" % results["geometry"]["dist"] + ) + headers.append( + hdr + " WaveLength = %s" % results["geometry"]["wavelength"] + ) + headers.append(hdr) + if "comments" in results: + headers.append(hdr + " title = %s" % results["comment"]) + if "to_merge" in results: + headers.append( + hdr + + " Frames merged: " + + " ".join([str(i) for i in results["to_merge"]]) + ) + if "normalization" in results: + headers.append(hdr + " Normalization = %s" % results["normalization"]) + if "mask" in results: + headers.append(hdr + " Mask = %s" % results["mask"]) + headers.append(hdr) + headers.append(hdr + (" N 3" if "std" in results else " N 2")) + line = hdr + " L " + if "unit" in results: + a, b = str(results["unit"]).split("_") + line += a + "*" + b.strip("^-1") + " I_" + else: + line += "q I_" + if "comment" in results: + line += results["comments"] + if "std" in results: + line += " stddev" + headers.append(line) + headers.append(hdr) + headers.append(hdr + " Sample Information:") + if "storage temperature" in results: + headers.append( + hdr + + " Storage Temperature (degrees C): %s" + % results["storage temperature"] + ) + if "exposure temperature" in results: + headers.append( + hdr + + " Measurement Temperature (degrees C): %s" + % results["exposure temperature"] + ) + + headers.append( + hdr + " Concentration: %s" % results.get("concentration", -1) + ) + if "buffer" in results: + headers.append(hdr + " Buffer: %s" % results["buffer"]) + headers.append(hdr + " Code: %s" % results.get("sample", "")) + + def write(headers, file_): + + file_.writelines(linesep.join(headers)) + file_.write(linesep) + + if "std" in results: + data = [ + "%14.6e\t%14.6e\t%14.6e" % (q, I, std) + for q, I, std in zip( + results["q"], results["I"], results["std"] + ) + ] + else: + data = [ + "%14.6e\t%14.6e\t" % (q, I) + for q, I in zip(results["q"], results["I"]) + ] + data.append("") + file_.writelines(linesep.join(data)) + + if output: + with open(output, "w") as f: + write(headers, f) + else: + f = io.StringIO() + write(headers, f) + f.seek(0) + return f.read() + + +def main(): + args = parse() + if args.verbose: + logging.root.setLevel(logging.DEBUG) + files = [i for i in args.file if os.path.exists(i)] + if platform.system() == "Windows" and files == []: + files = glob.glob(args.file[0]) + files.sort() + input_len = len(files) + logger.debug("%s input files", input_len) + for src in files: + if args.all: + dest = os.path.splitext(src)[0] + "%04i.dat" + for idx, frame in enumerate(extract_all(src)): + print(src, " --> ", dest % idx) + write_ascii(frame, dest % idx) + else: + dest = os.path.splitext(src)[0] + ".dat" + write_ascii(extract_averaged(src), dest) + print(src, " --> ", dest) + + +if __name__ == "__main__": + main() diff --git a/src/freesas/app/meson.build b/src/freesas/app/meson.build new file mode 100644 index 0000000..c114d40 --- /dev/null +++ b/src/freesas/app/meson.build @@ -0,0 +1,14 @@ +py.install_sources([ + '__init__.py', + 'auto_gpa.py', + 'auto_guinier.py', + 'autorg.py', + 'bift.py', + 'cormap.py', + 'extract_ascii.py', + 'plot_sas.py', + 'supycomb.py' +], + pure: false, # Will be installed next to binaries + subdir: 'freesas.app' # Folder relative to site-packages to install to +) diff --git a/src/freesas/app/plot_sas.py b/src/freesas/app/plot_sas.py new file mode 100644 index 0000000..6378b30 --- /dev/null +++ b/src/freesas/app/plot_sas.py @@ -0,0 +1,144 @@ +#!/usr/bin/python3 +# coding: utf-8 +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +"Tool to perform a simple plotting of a set of SAS curve" + +__author__ = "Jérôme Kieffer" +__license__ = "MIT" +__copyright__ = "2020, ESRF" +__date__ = "14/05/2020" + +import platform +import logging +from pathlib import Path +from matplotlib.pyplot import switch_backend +from matplotlib.backends.backend_pdf import PdfPages +from freesas import plot +from freesas.sasio import ( + load_scattering_data, + convert_inverse_angstrom_to_nanometer, +) +from freesas.autorg import InsufficientDataError, NoGuinierRegionError +from freesas.sas_argparser import SASParser + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("plot_sas") + + +def set_backend(output: Path = None, output_format: str = None): + """Explicitely set silent backend based on format or filename + Needed on MacOS + @param output: Name of the specified output file + @param output_format: User specified format + """ + if output_format: + output_format = output_format.lower() + elif output and len(output.suffix) > 0: + output_format = output.suffix.lower()[1:] + if output_format: + if output_format == "svg": + switch_backend("svg") + elif output_format in ["ps", "eps"]: + switch_backend("ps") + elif output_format == "pdf": + switch_backend("pdf") + elif output_format == "png": + switch_backend("agg") + + +def parse(): + """Parse input and return list of files. + :return: list of input files + """ + description = "Generate typical sas plots with matplotlib" + epilog = """freesas is an open-source implementation of a bunch of + small angle scattering algorithms. """ + parser = SASParser( + prog="freesas.py", description=description, epilog=epilog + ) + parser.add_file_argument(help_text="dat files to plot") + parser.add_output_filename_argument() + parser.add_output_data_format("jpeg", "svg", "png", "pdf") + parser.add_q_unit_argument() + return parser.parse_args() + + +def create_figure(file: Path, unit: str = "nm"): + """Create multi-plot SAS figure for data from a file + @param file: filename of SAS file in q I Ierr format + @param unit: length unit of input data, supported options are Å and nm. + :return: figure with SAS plots for this file + """ + data = load_scattering_data(file) + if unit == "Å": + data = convert_inverse_angstrom_to_nanometer(data) + fig = plot.plot_all(data) + fig.suptitle(file) + return fig + + +def main(): + args = parse() + if args.verbose: + logging.root.setLevel(logging.DEBUG) + files = [Path(i) for i in args.file if Path(i).exists()] + if platform.system() == "Windows" and files == []: + files = list(Path.cwd().glob(args.file[0])) + files.sort() + input_len = len(files) + logger.debug("%s input files", input_len) + figures = [] + + if args.output and len(files) > 1: + logger.warning("Only PDF export is possible in multi-frame mode") + if args.output and platform.system() == "Darwin": + if len(files) == 1: + set_backend(args.output, args.format) + elif len(files) > 1: + set_backend(output_format="pdf") + for afile in files: + try: + fig = create_figure(afile, args.unit) + except OSError: + logger.error("Unable to load file %s", afile) + except (InsufficientDataError, NoGuinierRegionError, ValueError): + logger.error("Unable to process file %s", afile) + else: + figures.append(fig) + if args.output is None: + fig.show() + elif len(files) == 1: + fig.savefig(args.output, format=args.format) + if len(figures) > 1 and args.output: + with PdfPages(args.output) as pdf_output_file: + for fig in figures: + pdf_output_file.savefig(fig) + if not args.output: + input("Press enter to quit") + + +if __name__ == "__main__": + main() diff --git a/src/freesas/app/supycomb.py b/src/freesas/app/supycomb.py new file mode 100644 index 0000000..2c4f688 --- /dev/null +++ b/src/freesas/app/supycomb.py @@ -0,0 +1,137 @@ +#!/usr/bin/python3 +__author__ = "Guillaume Bonamis" +__license__ = "MIT" +__copyright__ = "2015, ESRF" +__date__ = "09/07/2020" + +import logging +from os.path import dirname, abspath +from freesas.align import InputModels, AlignModels +from freesas.sas_argparser import SASParser + +base = dirname(dirname(abspath(__file__))) + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("supycomb") + + +def parse(): + + """Parse input and return list of files. + :return: list of args + """ + description = "Align several models and calculate NSD" + epilog = """supycomb is an open-source implementation of + [J. Appl. Cryst. (2001). 34, 33-41](doi:10.1107/S0021889800014126). + + The main difference with supcomb: the fast mode does not re-bin beads. It only refines the best matching orientation which provides a speed-up of a factor 8. + + """ + parser = SASParser(prog="supycomp", description=description, epilog=epilog) + parser.add_file_argument(help_text="pdb files to align") + parser.add_argument( + "-m", + "--mode", + dest="mode", + type=str, + choices=["SLOW", "FAST"], + default="SLOW", + help="Either SLOW or FAST, default: %(default)s)", + ) + parser.add_argument( + "-e", + "--enantiomorphs", + type=str, + choices=["YES", "NO"], + default="YES", + help="Search enantiomorphs, YES or NO, default: %(default)s)", + ) + parser.add_argument( + "-q", + "--quiet", + type=str, + choices=["ON", "OFF"], + default="ON", + help="Hide log or not, default: %(default)s", + ) + parser.add_argument( + "-g", + "--gui", + type=str, + choices=["YES", "NO"], + default="YES", + help="Use GUI for figures or not, default: %(default)s", + ) + parser.add_argument( + "-o", + "--output", + type=str, + default="aligned.pdb", + help="output filename, default: %(default)s", + ) + return parser.parse_args() + + +def main(): + """main application""" + + args = parse() + input_len = len(args.file) + logger.info("%s input files" % input_len) + selection = InputModels() + + if args.mode == "SLOW": + slow = True + logger.info("SLOW mode") + else: + slow = False + logger.info("FAST mode") + + if args.enantiomorphs == "YES": + enantiomorphs = True + else: + enantiomorphs = False + logger.info("NO enantiomorphs") + + if args.quiet == "OFF": + logger.setLevel(logging.DEBUG) + logger.info("setLevel: Debug") + + if args.gui == "NO": + save = True + logger.info( + "Figures saved automatically : \n R factor values and selection => Rfactor.png \n NSD table and selection => nsd.png" + ) + else: + save = False + + align = AlignModels(args.file, slow=slow, enantiomorphs=enantiomorphs) + if input_len == 2: + align.outputfiles = args.output + align.assign_models() + dist = align.alignment_2models() + logger.info("%s and %s aligned" % (args.file[0], args.file[1])) + logger.info("NSD after optimized alignment = %.2f" % dist) + else: + align.outputfiles = [ + "model-%02i.pdb" % (i + 1) for i in range(input_len) + ] + selection.inputfiles = args.file + selection.models_selection() + selection.rfactorplot(save=save) + align.models = selection.sasmodels + align.validmodels = selection.validmodels + + align.makeNSDarray() + align.alignment_reference() + logger.info( + "valid models aligned on the model %s" % (align.reference + 1) + ) + align.plotNSDarray(rmax=round(selection.rmax, 4), save=save) + + if not save and input_len > 2: + input("Press any key to exit") + + +if __name__ == "__main__": + main() From 10920fbd04a85accc79f271a54f3cffdc56fa342 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 17:08:43 +0100 Subject: [PATCH 09/45] test moved to meson --- src/freesas/test/__init__.py | 29 + src/freesas/test/meson.build | 19 + src/freesas/test/mock_open_38.py | 98 +++ src/freesas/test/test_align.py | 152 ++++ src/freesas/test/test_all.py | 38 + src/freesas/test/test_autorg.py | 448 ++++++++++++ src/freesas/test/test_average.py | 89 +++ src/freesas/test/test_bift.py | 132 ++++ src/freesas/test/test_cormap.py | 88 +++ src/freesas/test/test_distance.py | 62 ++ src/freesas/test/test_fitting.py | 938 +++++++++++++++++++++++++ src/freesas/test/test_model.py | 182 +++++ src/freesas/test/test_sas_argparser.py | 603 ++++++++++++++++ src/freesas/test/test_sasio.py | 192 +++++ src/freesas/test/utilstests.py | 24 + 15 files changed, 3094 insertions(+) create mode 100644 src/freesas/test/__init__.py create mode 100644 src/freesas/test/meson.build create mode 100644 src/freesas/test/mock_open_38.py create mode 100644 src/freesas/test/test_align.py create mode 100644 src/freesas/test/test_all.py create mode 100644 src/freesas/test/test_autorg.py create mode 100644 src/freesas/test/test_average.py create mode 100644 src/freesas/test/test_bift.py create mode 100644 src/freesas/test/test_cormap.py create mode 100644 src/freesas/test/test_distance.py create mode 100644 src/freesas/test/test_fitting.py create mode 100644 src/freesas/test/test_model.py create mode 100644 src/freesas/test/test_sas_argparser.py create mode 100644 src/freesas/test/test_sasio.py create mode 100644 src/freesas/test/utilstests.py diff --git a/src/freesas/test/__init__.py b/src/freesas/test/__init__.py new file mode 100644 index 0000000..dbcadf1 --- /dev/null +++ b/src/freesas/test/__init__.py @@ -0,0 +1,29 @@ +#!usr/bin/env python +# coding: utf-8 + +__author__ = "Jérôme Kieffer" +__license__ = "MIT" +__date__ = "15/01/2021" +__copyright__ = "2015-2021, ESRF" + +import sys +import unittest +from .test_all import suite + + +def run_tests(): + """Run test complete test_suite""" + mysuite = suite() + runner = unittest.TextTestRunner() + if not runner.run(mysuite).wasSuccessful(): + print("Test suite failed") + return 1 + else: + print("Test suite succeeded") + return 0 + + +run = run_tests + +if __name__ == '__main__': + sys.exit(run_tests()) diff --git a/src/freesas/test/meson.build b/src/freesas/test/meson.build new file mode 100644 index 0000000..8358bc1 --- /dev/null +++ b/src/freesas/test/meson.build @@ -0,0 +1,19 @@ +py.install_sources([ +'__init__.py', +'mock_open_38.py', +'test_align.py', +'test_all.py', +'test_autorg.py', +'test_average.py', +'test_bift.py', +'test_cormap.py', +'test_distance.py', +'test_fitting.py', +'test_model.py', +'test_sas_argparser.py', +'test_sasio.py', +'utilstests.py', +], + pure: false, # Will be installed next to binaries + subdir: 'freesas.test' # Folder relative to site-packages to install to +) diff --git a/src/freesas/test/mock_open_38.py b/src/freesas/test/mock_open_38.py new file mode 100644 index 0000000..cf85a66 --- /dev/null +++ b/src/freesas/test/mock_open_38.py @@ -0,0 +1,98 @@ +""" +This is the Python 3.8 implementation of mock_open taken from +https://github.com/python/cpython/blob/3.8/Lib/unittest/mock.py +Hence: +"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation; +All Rights Reserved" +""" + +import io +from unittest.mock import MagicMock, DEFAULT + + +file_spec = None +#sentinel = _Sentinel() +#DEFAULT = sentinel.DEFAULT + +def _to_stream(read_data): + if isinstance(read_data, bytes): + return io.BytesIO(read_data) + else: + return io.StringIO(read_data) + + +def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + `read_data` is a string for the `read`, `readline` and `readlines` of the + file handle to return. This is an empty string by default. + """ + _read_data = _to_stream(read_data) + _state = [_read_data, None] + + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return _state[0].readlines(*args, **kwargs) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return _state[0].read(*args, **kwargs) + + def _readline_side_effect(*args, **kwargs): + yield from _iter_side_effect() + while True: + yield _state[0].readline(*args, **kwargs) + + def _iter_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _state[0]: + yield line + + def _next_side_effect(): + if handle.readline.return_value is not None: + return handle.readline.return_value + return next(_state[0]) + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + _state[1] = _readline_side_effect() + handle.readline.side_effect = _state[1] + handle.readlines.side_effect = _readlines_side_effect + handle.__iter__.side_effect = _iter_side_effect + handle.__next__.side_effect = _next_side_effect + + def reset_data(*args, **kwargs): + _state[0] = _to_stream(read_data) + if handle.readline.side_effect == _state[1]: + # Only reset the side effect if the user hasn't overridden it. + _state[1] = _readline_side_effect() + handle.readline.side_effect = _state[1] + return DEFAULT + + mock.side_effect = reset_data + mock.return_value = handle + return mock diff --git a/src/freesas/test/test_align.py b/src/freesas/test/test_align.py new file mode 100644 index 0000000..3137e63 --- /dev/null +++ b/src/freesas/test/test_align.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +from __future__ import print_function + +__author__ = "Guillaume" +__license__ = "MIT" +__copyright__ = "2015, ESRF" + +import numpy +import unittest +from .utilstests import get_datafile +from ..model import SASModel +from ..align import AlignModels +from ..transformations import translation_matrix, euler_matrix +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("AlignModels_test") + + +def move(mol): + """ + Random movement of the molecule. + + @param mol: 2d array, coordinates of the molecule + @return mol:2D array, coordinates of the molecule after a translation and a rotation + """ + vect = numpy.random.random(3) + translation = translation_matrix(vect) + + euler = numpy.random.random(3) + rotation = euler_matrix(euler[0], euler[1], euler[2]) + + mol = numpy.dot(rotation, mol.T) + mol = numpy.dot(translation, mol).T + + return mol + + +def assign_random_mol(inf=None, sup=None): + """ + Create a random 2d array to create a molecule + + @param inf: inf limit of coordinates values + @param sup: sup limit of coordinates values + @return molecule: 2d array, random coordinates + """ + if not inf: + inf = 0 + if not sup: + sup = 100 + molecule = numpy.random.randint(inf, sup, size=400).reshape(100, 4).astype(float) + molecule[:, -1] = 1.0 + return molecule + + +class TestAlign(unittest.TestCase): + testfile1 = get_datafile("dammif-01.pdb") + testfile2 = get_datafile("dammif-02.pdb") + + def test_alignment(self): + inputfiles = [self.testfile1, self.testfile1] + align = AlignModels(inputfiles, slow=False) + align.assign_models() + m = align.models[0] + n = align.models[1] + n.atoms = move(n.atoms) + n.centroid() + n.inertiatensor() + n.canonical_parameters() + if m.dist(n, m.atoms, n.atoms) == 0: + logger.error(m.dist(n, m.atoms, n.atoms)) + logger.error("pb of movement") + dist = align.alignment_2models(save=False) + self.assertAlmostEqual(dist, 0, 12, msg="NSD unequal 0, %s!=0" % dist) + + def test_usefull_alignment(self): + inputfiles = [self.testfile1, self.testfile2] + align = AlignModels(inputfiles, slow=False) + align.assign_models() + mol1 = align.models[0] + mol2 = align.models[1] + dist_before = mol1.dist(mol2, mol1.atoms, mol2.atoms) + symmetry, par = align.alignment_sym(mol1, mol2) + dist_after = mol1.dist_after_movement(par, mol2, symmetry) + self.assertGreaterEqual(dist_before, dist_after, "increase of distance after alignment %s<%s" % (dist_before, dist_after)) + + def test_optimisation_align(self): + inputfiles = [self.testfile1, self.testfile2] + align = AlignModels(inputfiles, slow=False) + align.assign_models() + mol1 = align.models[0] + mol2 = align.models[1] + align.slow = False + sym0, p0 = align.alignment_sym(mol1, mol2) + dist_before = mol1.dist_after_movement(p0, mol2, sym0) + align.slow = True + sym, p = align.alignment_sym(mol1, mol2) + dist_after = mol1.dist_after_movement(p, mol2, sym) + self.assertGreaterEqual(dist_before, dist_after, "increase of distance after optimized alignment %s<%s" % (dist_before, dist_after)) + + def test_alignment_intruder(self): + intruder = numpy.random.randint(0, 8) + inputfiles = [] + for i in range(8): + if i == intruder: + inputfiles.append(self.testfile2) + else: + inputfiles.append(self.testfile1) + + align = AlignModels(inputfiles, slow=False, enantiomorphs=False) + align.assign_models() + align.validmodels = numpy.ones(8) + table = align.makeNSDarray() + if table.sum() == 0: + logger.error("there is no intruders") + + averNSD = ((table.sum(axis=-1)) / (align.validmodels.sum() - 1)) + num_intr = averNSD.argmax() + + if not num_intr and num_intr != 0: + logger.error("cannot find the intruder") + self.assertEqual(num_intr, intruder, msg="not find the good intruder, %s!=%s" % (num_intr, intruder)) + + def test_reference(self): + inputfiles = [self.testfile1] * 8 + align = AlignModels(inputfiles, slow=False, enantiomorphs=False) + align.assign_models() + for i in range(8): + mol = assign_random_mol() + align.models[i].atoms = mol + align.validmodels = numpy.ones(8) + table = align.makeNSDarray() + ref = align.find_reference() + neg_dif = 0 + for i in range(8): + dif = (table[i, :] - table[ref, :]).mean() + if dif < 0: + neg_dif += 1 + self.assertEqual(neg_dif, 0, msg="pb with reference choice") + + +def suite(): + testSuite = unittest.TestSuite() + testSuite.addTest(TestAlign("test_alignment")) + testSuite.addTest(TestAlign("test_usefull_alignment")) + testSuite.addTest(TestAlign("test_optimisation_align")) + testSuite.addTest(TestAlign("test_alignment_intruder")) + testSuite.addTest(TestAlign("test_reference")) + return testSuite + +if __name__ == '__main__': + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_all.py b/src/freesas/test/test_all.py new file mode 100644 index 0000000..1377672 --- /dev/null +++ b/src/freesas/test/test_all.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# coding: utf-8 +from __future__ import print_function + +__author__ = "Guillaume" +__license__ = "MIT" +__copyright__ = "2015, ESRF" +__date__ = "25/04/2020" + +import unittest +from . import test_model +from . import test_align +from . import test_distance +from . import test_cormap +from . import test_autorg +from . import test_bift +from . import test_sasio +from . import test_sas_argparser +from . import test_fitting + + +def suite(): + testSuite = unittest.TestSuite() + testSuite.addTest(test_bift.suite()) + testSuite.addTest(test_model.suite()) + testSuite.addTest(test_align.suite()) + testSuite.addTest(test_distance.suite()) + testSuite.addTest(test_cormap.suite()) + testSuite.addTest(test_autorg.suite()) + testSuite.addTest(test_sasio.suite()) + testSuite.addTest(test_sas_argparser.suite()) + testSuite.addTest(test_fitting.suite()) + return testSuite + + +if __name__ == "__main__": + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_autorg.py b/src/freesas/test/test_autorg.py new file mode 100644 index 0000000..e27560f --- /dev/null +++ b/src/freesas/test/test_autorg.py @@ -0,0 +1,448 @@ +# -*- coding: utf-8 -*- +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2017 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +__authors__ = ["J. Kieffer"] +__license__ = "MIT" +__date__ = "10/06/2020" + +import logging +import unittest +from math import sqrt, pi + +import numpy +from scipy.stats import linregress + +from .utilstests import get_datafile +from ..autorg import ( + autoRg, + RG_RESULT, + linear_fit, + auto_gpa, + auto_guinier, +) +from .._autorg import curate_data # pylint: disable=E0401 +from ..invariants import calc_Rambo_Tainer +from .._bift import distribution_sphere # pylint: disable=E0401 + +logger = logging.getLogger(__name__) + + +def create_synthetic_data(R0=4, I0=100): + """Create idealized data for a sphere of radius R0=4 whose Rg should be 4*sqrt(3/5)""" + npt = 1000 + Dmax = 2 * R0 + size = 5000 + r = numpy.linspace(0, Dmax, npt + 1) + p = distribution_sphere(I0, Dmax, npt) + q = numpy.linspace(0, 10, size) + qr = numpy.outer(q, r / pi) + T = (4 * pi * (r[-1] - r[0]) / npt) * numpy.sinc(qr) + I = T.dot(p) + err = numpy.sqrt(I) + return numpy.vstack((q, I, err)).T[1:] + + +class TestAutoRg(unittest.TestCase): + testfile = get_datafile("bsa_005_sub.dat") + + def __init__(self, testName, **extra_kwargs): + super().__init__(testName) + self.extra_arg = extra_kwargs + + # Reference implementation + atsas_autorg = { + "Version": "Atsas 2.6.1", + "Rg": 2.98016, + "sigma_Rg": 0.156859, + "I0": 61.3093, + "sigma_I0": 0.0606315, + "start_point": 46, + "end_point": 95, + "quality": 0.752564, + "aggregated": 0, + } + + def test_atsas(self): + logger.info("test file: %s", self.testfile) + data = numpy.loadtxt(self.testfile) + atsas_result = self.atsas_autorg.copy() + logger.debug("Reference version: %s" % atsas_result.pop("Version")) + atsas_result = RG_RESULT(**atsas_result) + free_result = autoRg(data) + logger.debug("Ref: %s" % (atsas_result,)) + logger.debug("Obt: %s" % (free_result,)) + self.assertAlmostEqual( + atsas_result.Rg, free_result.Rg, 1, "RG fits within 2 digits" + ) + self.assertAlmostEqual( + atsas_result.I0, + free_result.I0, + msg="I0 fits within +/- 1 ", + delta=1, + ) + self.assertAlmostEqual( + atsas_result.quality, + free_result.quality, + 0, + msg="quality fits within 0 digits", + ) + + def test_synthetic(self): + """Test based on sythetic data: a sphere of radius R0=4 which Rg should be 4*sqrt(3/5)""" + R0 = 4 + I0 = 100 + data = create_synthetic_data(R0=R0, I0=I0) + Rg = autoRg(data) + logger.info("auto_rg %s", Rg) + self.assertAlmostEqual( + R0 * sqrt(3 / 5), Rg.Rg, 0, "Rg matches for a sphere" + ) + self.assertGreater( + R0 * sqrt(3 / 5), + Rg.Rg - Rg.sigma_Rg, + "Rg in range matches for a sphere", + ) + self.assertLess( + R0 * sqrt(3 / 5), + Rg.Rg + Rg.sigma_Rg, + "Rg in range matches for a sphere", + ) + self.assertAlmostEqual(I0, Rg.I0, 0, "I0 matches for a sphere") + self.assertGreater(I0, Rg.I0 - Rg.sigma_I0, "I0 matches for a sphere") + self.assertLess(I0, Rg.I0 + Rg.sigma_I0, "I0 matches for a sphere") + + gpa = auto_gpa(data) + logger.info("auto_gpa %s", gpa) + self.assertAlmostEqual( + gpa.Rg / (R0 * sqrt(3.0 / 5)), 1.00, 0, "Rg matches for a sphere" + ) + self.assertAlmostEqual(gpa.I0 / I0, 1.0, 1, "I0 matches for a sphere") + + guinier = auto_guinier(data) + logger.info("auto_guinier %s", guinier) + self.assertAlmostEqual( + R0 * sqrt(3.0 / 5), guinier.Rg, 0, "Rg matches for a sphere" + ) + sigma_Rg = max(guinier.sigma_Rg, 1e-4) + sigma_I0 = max(guinier.sigma_I0, 1e-4) + self.assertGreater( + R0 * sqrt(3.0 / 5), + guinier.Rg - sigma_Rg, + "Rg in range matches for a sphere", + ) + self.assertLess( + R0 * sqrt(3.0 / 5), + guinier.Rg + sigma_Rg, + "Rg in range matches for a sphere", + ) + self.assertAlmostEqual(I0, guinier.I0, 0, "I0 matches for a sphere") + self.assertGreater( + I0, guinier.I0 - sigma_I0, "I0 matches for a sphere" + ) + self.assertLess(I0, guinier.I0 + sigma_I0, "I0 matches for a sphere") + + # Check RT invarients... + rt = calc_Rambo_Tainer(data, guinier) + self.assertIsNotNone( + rt, "Rambo-Tainer invariants are actually calculated" + ) + + def test_auto_gpa_with_outlier(self): + + """ + Test that auto_gpa gives reasonalbe results + even if one data point is excessively large (e.g. hot pixel) + """ + outlier_position = self.extra_arg["outlier_position"] + R0 = 4 + I0 = 100 + data = create_synthetic_data(R0=R0, I0=I0) + data[outlier_position, 1] *= 1000 + gpa = auto_gpa(data) + logger.info("auto_gpa %s", gpa) + self.assertAlmostEqual( + gpa.Rg / (R0 * sqrt(3.0 / 5)), + 1.00, + 0, + f"In case of outlier at {outlier_position} Rg matches for a sphere", + ) + self.assertAlmostEqual( + gpa.I0 / I0, + 1.0, + 1, + f"In case of outlier at {outlier_position} I0 matches for a sphere", + ) + + +class TestFit(unittest.TestCase): + # Testcase originally comes from wikipedia article on linear regression, expected results from scipy.stats.linregress + + def test_linear_fit_static(self): + testx = [ + 1.47, + 1.5, + 1.52, + 1.55, + 1.57, + 1.6, + 1.63, + 1.65, + 1.68, + 1.7, + 1.73, + 1.75, + 1.78, + 1.80, + 1.83, + ] + testy = [ + 52.21, + 53.12, + 54.48, + 55.84, + 57.20, + 58.57, + 59.93, + 61.29, + 63.11, + 64.47, + 66.28, + 68.1, + 69.92, + 72.19, + 74.46, + ] + testw = [1.0] * 15 + testintercept = -39.061956 + testslope = +61.2721865 + fit_result = linear_fit(testx, testy, testw) + # print(fit_result) + self.assertAlmostEqual( + fit_result.intercept, + testintercept, + 5, + "Intercept fits wihtin 4(?) digits", + ) + self.assertAlmostEqual( + fit_result.slope, testslope, 5, "Intercept fits wihtin 4(?) digits" + ) + + def test_linspace(self): + size = 100 + x = numpy.linspace(-10, 10, size) + y = numpy.linspace(10, 0, size) + w = numpy.random.random(size) + fit_result = linear_fit(x, y, w) + # print(fit_result) + self.assertAlmostEqual( + fit_result.intercept, 5, 5, "Intercept fits wihtin 4(?) digits" + ) + self.assertAlmostEqual( + fit_result.slope, -0.5, 5, "Intercept fits wihtin 4(?) digits" + ) + + def test_random(self): + + """ + Tests that our linear regression implementation + gives the same results as scipy.stats for random data + """ + size = 100 + x = numpy.random.random(size) + y = 1.6 * x + 5 + numpy.random.random(size) + w = numpy.ones(size) + fit_result = linear_fit(x, y, w) + ref = linregress(x, y) + self.assertAlmostEqual( + fit_result.intercept, + ref[1], + 5, + "Intercept fits wihtin 4(?) digits", + ) + self.assertAlmostEqual( + fit_result.slope, ref[0], 5, "Intercept fits wihtin 4(?) digits" + ) + self.assertAlmostEqual( + fit_result.R2, + ref.rvalue ** 2, + 5, + "R² value matcheswihtin 4(?) digits", + ) + + +class TestDataCuration(unittest.TestCase): + """Tests for the curate_data function.""" + + testfile = get_datafile("bsa_005_sub.dat") + + def __init__(self, testName, **extra_kwargs): + super().__init__(testName) + self.extra_arg = extra_kwargs + + def test_curate_data_BM29_bsa(self): + """Test data curration of "nice" BM29 data.""" + logger.info("test file: %s", self.testfile) + data = numpy.loadtxt(self.testfile) + DTYPE = numpy.float64 + raw_size = len(data) + q_ary = numpy.empty(raw_size, dtype=DTYPE) + i_ary = numpy.empty(raw_size, dtype=DTYPE) + sigma_ary = numpy.empty(raw_size, dtype=DTYPE) + q2_ary = numpy.empty(raw_size, dtype=DTYPE) + lgi_ary = numpy.empty(raw_size, dtype=DTYPE) + wg_ary = numpy.empty(raw_size, dtype=DTYPE) + offsets = numpy.empty(raw_size, dtype=numpy.int32) + data_range = numpy.zeros(3, dtype=numpy.int32) + + curate_data( + data, + q_ary, + i_ary, + sigma_ary, + q2_ary, + lgi_ary, + wg_ary, + offsets, + data_range, + ) + + self.assertListEqual( + list(data_range), + [2, 203, 1033], + msg="reproduced expected BM29 data range", + ) + + def test_curate_synthetic_data(self): + """Test that for idealized data the cut-off is at i0/10.""" + data = create_synthetic_data() + I_one = data[0, 1] + DTYPE = numpy.float64 + raw_size = len(data) + q_ary = numpy.empty(raw_size, dtype=DTYPE) + i_ary = numpy.empty(raw_size, dtype=DTYPE) + sigma_ary = numpy.empty(raw_size, dtype=DTYPE) + q2_ary = numpy.empty(raw_size, dtype=DTYPE) + lgi_ary = numpy.empty(raw_size, dtype=DTYPE) + wg_ary = numpy.empty(raw_size, dtype=DTYPE) + offsets = numpy.empty(raw_size, dtype=numpy.int32) + data_range = numpy.zeros(3, dtype=numpy.int32) + + curate_data( + data, + q_ary, + i_ary, + sigma_ary, + q2_ary, + lgi_ary, + wg_ary, + offsets, + data_range, + ) + + self.assertEqual( + offsets[0], + 0, + msg="curated data for artificial data starts at 0", + ) + + self.assertTrue( + data[data_range[1] - 1, 1] > I_one / 10 + and data[data_range[1] + 1, 1] < I_one / 10, + msg="curated data for artificial data ends at approx. I0/10", + ) + + def test_curate_synthetic_data_with_negative_points(self): + """Test that if one of the first three points is negative, all date before it gets ignored.""" + negative_point_index = self.extra_arg["negative_point_index"] + + data = create_synthetic_data() + DTYPE = numpy.float64 + raw_size = len(data) + data[negative_point_index, 1] = -1 + + q_ary = numpy.empty(raw_size, dtype=DTYPE) + i_ary = numpy.empty(raw_size, dtype=DTYPE) + sigma_ary = numpy.empty(raw_size, dtype=DTYPE) + q2_ary = numpy.empty(raw_size, dtype=DTYPE) + lgi_ary = numpy.empty(raw_size, dtype=DTYPE) + wg_ary = numpy.empty(raw_size, dtype=DTYPE) + offsets = numpy.empty(raw_size, dtype=numpy.int32) + data_range = numpy.zeros(3, dtype=numpy.int32) + + curate_data( + data, + q_ary, + i_ary, + sigma_ary, + q2_ary, + lgi_ary, + wg_ary, + offsets, + data_range, + ) + + self.assertEqual( + offsets[0], + negative_point_index + 1, + msg=f"curated data for artificial data starts after negative data point for negative point at {negative_point_index + 1}", + ) + + self.assertTrue( + data[offsets[data_range[1]] - 1, 1] + > data[negative_point_index + 1, 1] / 10 + and data[offsets[data_range[1]] + 1, 1] + < data[negative_point_index + 1, 1] / 10, + msg=f"curated data for artificial data ends at approx. I(point after negaitve point)/10 if negative point at {negative_point_index + 1}", + ) + + +def suite(): + """Generic builder for the test suite.""" + testSuite = unittest.TestSuite() + testSuite.addTest(TestAutoRg("test_atsas")) + testSuite.addTest(TestAutoRg("test_synthetic")) + for outlier_position in range(3): + testSuite.addTest( + TestAutoRg( + "test_auto_gpa_with_outlier", outlier_position=outlier_position + ) + ) + testSuite.addTest(TestFit("test_linear_fit_static")) + testSuite.addTest(TestFit("test_linspace")) + testSuite.addTest(TestDataCuration("test_curate_data_BM29_bsa")) + testSuite.addTest(TestDataCuration("test_curate_synthetic_data")) + for negative_point_index in range(3): + testSuite.addTest( + TestDataCuration( + "test_curate_synthetic_data_with_negative_points", + negative_point_index=negative_point_index, + ) + ) + return testSuite + + +if __name__ == "__main__": + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_average.py b/src/freesas/test/test_average.py new file mode 100644 index 0000000..317bd11 --- /dev/null +++ b/src/freesas/test/test_average.py @@ -0,0 +1,89 @@ +#!/usr/bin/python +# coding: utf-8 +from __future__ import print_function + +__author__ = "Guillaume" +__license__ = "MIT" +__copyright__ = "2015, ESRF" + +import numpy +import unittest +from .utilstests import get_datafile +from ..model import SASModel +from ..average import Grid, AverModels + +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("AlignModels_test") + + +class TestAverage(unittest.TestCase): + testfile1 = get_datafile("model-01.pdb") + testfile2 = get_datafile("model-02.pdb") + inputfiles = [testfile1, testfile2] + grid = Grid(inputfiles) + + def test_gridsize(self): + inputfiles = self.inputfiles + grid = self.grid + size = grid.spatial_extent() + coordmax = numpy.array([size[0:3]], dtype="float") + coordmin = numpy.array([size[3:6]], dtype="float") + + pb = 0 + for i in inputfiles: + m = SASModel(i) + a = coordmin + m.atoms[:, 0:3] + b = m.atoms[:, 0:3] - coordmax + if (a >= 0.0).any() or (b >= 0.0).any(): + pb += 1 + self.assertEqual(pb, 0, msg="computed size is not the good one") + + def test_knots(self): + grid = self.grid + nbknots = numpy.random.randint(4000, 6000) + threshold = 10.0 # acceptable difference between nbknots and the effective nb of knots in percent + grid.calc_radius(nbknots) + grid.make_grid() + gap = (1.0 * (grid.nbknots - nbknots) / nbknots) * 100 + self.assertGreater(threshold, gap, msg="final number of knots too different of wanted number: %s != %s" % (nbknots, grid.nbknots)) + + def test_makegrid(self): + grid = self.grid + lattice = grid.make_grid() + m = SASModel(lattice) + self.assertAlmostEqual(m.fineness, 2 * grid.radius, 10, msg="grid do not have the computed radius") + + def test_read(self): + inputfiles = self.inputfiles + average = AverModels(inputfiles, self.grid.coordknots) + models = [SASModel(inputfiles[1]), SASModel(inputfiles[0])] + average.read_files(reference=1) + diff = 0.0 + for i in range(len(inputfiles)): + diff += (models[i].atoms - average.models[i].atoms).max() + self.assertAlmostEqual(diff, 0.0, 10, msg="Files not read properly") + + def test_occupancy(self): + average = AverModels(self.inputfiles, self.grid.coordknots) + average.read_files() + occ_grid = average.assign_occupancy() + average.grid = occ_grid + assert occ_grid.shape[-1] == 5, "problem in grid shape" + diff = occ_grid[:-1, 3] - occ_grid[1:, 3] + self.assertTrue(diff.max() >= 0.0, msg="grid is not properly sorted with occupancy") + + +def suite(): + testSuite = unittest.TestSuite() + testSuite.addTest(TestAverage("test_gridsize")) + testSuite.addTest(TestAverage("test_knots")) + testSuite.addTest(TestAverage("test_makegrid")) + testSuite.addTest(TestAverage("test_read")) + testSuite.addTest(TestAverage("test_occupancy")) + return testSuite + + +if __name__ == '__main__': + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_bift.py b/src/freesas/test/test_bift.py new file mode 100644 index 0000000..c1b27bb --- /dev/null +++ b/src/freesas/test/test_bift.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2017 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +__authors__ = ["J. Kieffer"] +__license__ = "MIT" +__date__ = "10/06/2020" + +import numpy +import unittest +from .utilstests import get_datafile +from ..bift import auto_bift +from .._bift import BIFT, distribution_parabola, distribution_sphere, \ + ensure_edges_zero, smooth_density +import logging +logger = logging.getLogger(__name__) +import time + + +class TestBIFT(unittest.TestCase): + + DMAX = 10 + NPT = 100 + SIZE = 1000 + + @classmethod + def setUpClass(cls): + super(TestBIFT, cls).setUpClass() + cls.r = numpy.linspace(0, cls.DMAX, cls.NPT + 1) + dr = cls.DMAX / cls.NPT + cls.p = -cls.r * (cls.r - cls.DMAX) # Nice parabola + q = numpy.linspace(0, 8 * cls.DMAX / 3, cls.SIZE + 1) + sincqr = numpy.sinc(numpy.outer(q, cls.r / numpy.pi)) + I = 4 * numpy.pi * (cls.p * sincqr).sum(axis=-1) * dr + err = numpy.sqrt(I) + cls.I0 = I[0] + cls.q = q[1:] + cls.I = I[1:] + cls.err = err[1:] + cls.Rg = numpy.sqrt(0.5 * numpy.trapz(cls.p * cls.r ** 2, cls.r) / numpy.trapz(cls.p, cls.r)) + print(cls.Rg) + + @classmethod + def tearDownClass(cls): + super(TestBIFT, cls).tearDownClass() + cls.r = cls.p = cls.I = cls.q = cls.err = None + + def test_autobift(self): + data = numpy.vstack((self.q, self.I, self.err)).T + t0 = time.perf_counter() + bo = auto_bift(data) + key, value, valid = bo.get_best() +# print("key is ", key) + stats = bo.calc_stats() +# print("stat is ", stats) + logger.info("Auto_bift time: %s", time.perf_counter() - t0) + self.assertAlmostEqual(self.DMAX / key.Dmax, 1, 1, "DMax is correct") + self.assertAlmostEqual(self.I0 / stats.I0_avg, 1, 1, "I0 is correct") + self.assertAlmostEqual(self.Rg / stats.Rg_avg, 1, 2, "Rg is correct") + + def test_BIFT(self): + bift = BIFT(self.q, self.I, self.err) + # test two scan functions + key = bift.grid_scan(9, 11, 5, 10, 100, 5, 100) + # print("key is ", key) + self.assertAlmostEqual(self.DMAX / key.Dmax, 1, 2, "DMax is correct") + res = bift.monte_carlo_sampling(10, 3, 100) + # print("res is ", res) + self.assertAlmostEqual(self.DMAX / res.Dmax_avg, 1, 4, "DMax is correct") + + def test_disributions(self): + pp = numpy.asarray(distribution_parabola(self.I0, self.DMAX, self.NPT)) + ps = numpy.asarray(distribution_sphere(self.I0, self.DMAX, self.NPT)) + self.assertAlmostEqual(numpy.trapz(ps, self.r) * 4 * numpy.pi / self.I0, 1, 3, "Distribution for a sphere looks OK") + self.assertAlmostEqual(numpy.trapz(pp, self.r) * 4 * numpy.pi / self.I0, 1, 3, "Distribution for a parabola looks OK") + self.assertTrue(numpy.allclose(pp, self.p, 1e-4), "distribution matches") + + def test_fixEdges(self): + ones = numpy.ones(self.NPT) + ensure_edges_zero(ones) + self.assertAlmostEqual(ones[0], 0, msg="1st point set to 0") + self.assertAlmostEqual(ones[-1], 0, msg="last point set to 0") + self.assertTrue(numpy.allclose(ones[1:-1], numpy.ones(self.NPT-2), 1e-7), msg="non-edge points unchanged") + + def test_smoothing(self): + ones = numpy.ones(self.NPT) + empty = numpy.empty(self.NPT) + smooth_density(ones,empty) + self.assertTrue(numpy.allclose(ones, empty, 1e-7), msg="flat array smoothed into flat array") + random = numpy.random.rand(self.NPT) + smooth = numpy.empty(self.NPT) + smooth_density(random,smooth) + self.assertAlmostEqual(random[0], smooth[0], msg="first points of random array and smoothed random array match") + self.assertAlmostEqual(random[-1], smooth[-1], msg="last points of random array and smoothed random array match") + self.assertTrue(smooth[1]>=min(smooth[0], smooth[2]) and smooth[1]<=max(smooth[0], smooth[2]), msg="second point of random smoothed array between 1st and 3rd") + self.assertTrue(smooth[-2]>=min(smooth[-1], smooth[-3]) and smooth[-2]<= max(smooth[-1], smooth[-3]), msg="second to last point of random smoothed array between 3rd to last and last") + sign = numpy.sign(random[1:-3] - smooth[2:-2]) * numpy.sign(smooth[2:-2] - random[3:-1]) + self.assertTrue(numpy.allclose(sign, numpy.ones(self.NPT-4), 1e-7), msg="central points of random array and smoothed random array alternate") + +def suite(): + testSuite = unittest.TestSuite() + testSuite.addTest(TestBIFT("test_disributions")) + testSuite.addTest(TestBIFT("test_autobift")) + testSuite.addTest(TestBIFT("test_fixEdges")) + testSuite.addTest(TestBIFT("test_smoothing")) + return testSuite + + +if __name__ == '__main__': + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_cormap.py b/src/freesas/test/test_cormap.py new file mode 100644 index 0000000..7f8233f --- /dev/null +++ b/src/freesas/test/test_cormap.py @@ -0,0 +1,88 @@ +#!/usr/bin/python +# coding: utf-8 +from __future__ import print_function + +__author__ = "Jerome" +__license__ = "MIT" +__copyright__ = "2017, ESRF" + +import numpy +import unittest + +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("test_cormap") +from .. import cormap + + +class TestCormap(unittest.TestCase): + + def test_longest(self): + size = 1000 + target = 50 + start = 100 + + data = numpy.ones(size, dtype="float32") + res = cormap.measure_longest(data) + self.assertEqual(res, size, msg="computed size is correct: positive") + + data -= 2 + res = cormap.measure_longest(data) + self.assertEqual(res, size, msg="computed size is correct: negative") + + data[:] = 0 + data[start: start + target] = 1.0 + res = cormap.measure_longest(data) + self.assertEqual(res, target, msg="computed size is correct: positive/zero") + data = numpy.zeros(size, dtype="float32") + data[start: start + target] = -1.0 + res = cormap.measure_longest(data) + self.assertEqual(res, target, msg="computed size is correct: negative/zero") + data = numpy.fromfunction(lambda n:(-1) ** n, (size,)) + data[start: start + target] = 1.0 + res = cormap.measure_longest(data) + self.assertEqual(res, target + 1, msg="computed size is correct: positive/alternating") + data = numpy.fromfunction(lambda n:(-1) ** n, (size,)) + data[start: start + target] = -1.0 + res = cormap.measure_longest(data) + self.assertEqual(res, target + 1, msg="computed size is correct: negative/alternating") + + def test_stats(self): + self.assertEqual(cormap.LROH.A(10, 0), 1) + self.assertEqual(cormap.LROH.A(10, 1), 144) + self.assertEqual(cormap.LROH.A(10, 2), 504) + self.assertEqual(cormap.LROH.A(10, 10), 1024) + self.assertEqual(cormap.LROH.A(10, 11), 1024) + + self.assertEqual(cormap.LROH.A(0, 3), 1) + self.assertEqual(cormap.LROH.A(1, 3), 2) + self.assertEqual(cormap.LROH.A(2, 3), 4) + self.assertEqual(cormap.LROH.A(3, 3), 8) + self.assertEqual(cormap.LROH.A(4, 3), 15) + self.assertEqual(cormap.LROH.A(5, 3), 29) + self.assertEqual(cormap.LROH.A(6, 3), 56) + self.assertEqual(cormap.LROH.A(7, 3), 108) + self.assertEqual(cormap.LROH.A(8, 3), 208) + + self.assertAlmostEqual(cormap.LROH(200, 0), 1) + self.assertAlmostEqual(cormap.LROH(200, 4), 0.97, 2) + self.assertAlmostEqual(cormap.LROH(200, 5), 0.80, 2) + self.assertAlmostEqual(cormap.LROH(200, 6), 0.54, 2) + self.assertAlmostEqual(cormap.LROH(200, 7), 0.32, 2) + self.assertAlmostEqual(cormap.LROH(200, 8), 0.17, 2) + self.assertAlmostEqual(cormap.LROH(200, 9), 0.09, 2) + self.assertAlmostEqual(cormap.LROH(200, 10), 0.05, 2) + self.assertAlmostEqual(cormap.LROH(200, 11), 0.02, 2) + + +def suite(): + testSuite = unittest.TestSuite() + testSuite.addTest(TestCormap("test_longest")) + testSuite.addTest(TestCormap("test_longest")) + testSuite.addTest(TestCormap("test_stats")) + return testSuite + + +if __name__ == '__main__': + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_distance.py b/src/freesas/test/test_distance.py new file mode 100644 index 0000000..490e6a6 --- /dev/null +++ b/src/freesas/test/test_distance.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import print_function + +__author__ = "Jérôme Kieffer" +__license__ = "MIT" +__copyright__ = "2015, ESRF" +__date__ = "16/12/2015" + +import numpy +import unittest +from .utilstests import get_datafile +from ..model import SASModel +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("cdistance_test") + + +class TestDistance(unittest.TestCase): + testfile1 = get_datafile("model-01.pdb") + testfile2 = get_datafile("dammif-01.pdb") + + def test_invariants(self): + m = SASModel() + m.read(self.testfile1) + f_np, r_np, d_np = m.calc_invariants(False) + f_cy, r_cy, d_cy = m.calc_invariants(True) + self.assertAlmostEqual(f_np, f_cy, 10, "fineness is the same %s!=%s" % (f_np, f_cy)) + self.assertAlmostEqual(r_np, r_cy, 10, "Rg is the same %s!=%s" % (r_np, r_cy)) + self.assertAlmostEqual(d_np, d_cy, 10, "Dmax is the same %s!=%s" % (d_np, d_cy)) + + def test_distance(self): + m = SASModel() + n = SASModel() + m.read(self.testfile1) + n.read(self.testfile2) + f_np = m.dist(n, m.atoms, n.atoms, False) + f_cy = m.dist(n, m.atoms, n.atoms, True) + self.assertAlmostEqual(f_np, f_cy, 10, "distance is the same %s!=%s" % (f_np, f_cy)) + + def test_same(self): + m = SASModel() + n = SASModel() + m.read(self.testfile1) + n.read(self.testfile1) + numpy.random.shuffle(n.atoms) + f_np = m.dist(n, m.atoms, n.atoms, False) + f_cy = m.dist(n, m.atoms, n.atoms, True) + self.assertAlmostEqual(f_np, 0, 10, "NSD not nul with np") + self.assertAlmostEqual(f_cy, 0, 10, "NSD not nul with cy") + + +def suite(): + testSuite = unittest.TestSuite() + testSuite.addTest(TestDistance("test_invariants")) + testSuite.addTest(TestDistance("test_distance")) + testSuite.addTest(TestDistance("test_same")) + return testSuite + +if __name__ == '__main__': + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_fitting.py b/src/freesas/test/test_fitting.py new file mode 100644 index 0000000..1e55f33 --- /dev/null +++ b/src/freesas/test/test_fitting.py @@ -0,0 +1,938 @@ +#!/usr/bin/python +# coding: utf-8 + +"""Test the functionality of fitting module.""" + +__authors__ = ["Martha Brennich"] +__license__ = "MIT" +__date__ = "16/07/2021" + + +import unittest +from unittest.mock import patch, MagicMock +import logging +import sys +import importlib +import platform +from io import StringIO +import pathlib +import contextlib +from types import SimpleNamespace +from typing import Callable +from errno import ENOENT +import numpy +from ..fitting import ( + set_logging_level, + get_output_destination, + get_guinier_header, + rg_result_to_output_line, + get_linesep, + run_guinier_fit, +) +from ..autorg import RG_RESULT, InsufficientDataError, NoGuinierRegionError +from ..sas_argparser import GuinierParser + +if sys.version_info.minor > 6: + from unittest.mock import mock_open +else: + from .mock_open_38 import mock_open + + +logger = logging.getLogger(__name__) + + +def reload_os_and_fitting(): + """Some tests patch os and need to reload the modules.""" + os = importlib.import_module("os") + os = importlib.reload(os) + fit = importlib.import_module("..fitting", "freesas.subpkg") + fit = importlib.reload(fit) + return fit + + +def get_dummy_guinier_parser(**parse_output): + """Function which provides a fake GuinierParser with a predefined parse result.""" + + def get_mock_parse(**kwargs): + def mock_parse(): + return SimpleNamespace(**kwargs) + + return mock_parse + + parser = GuinierParser(prog="test", description="test", epilog="test") + parser.parse_args = get_mock_parse(**parse_output) + return parser + + +def patch_linesep(test_function): + """Patch fitting.linesep to "linesep".""" + + linesep_patch = patch( + "freesas.fitting.get_linesep", + MagicMock(return_value="linesep"), + ) + return linesep_patch(test_function) + + +def patch_collect_files(test_function): + """Patch fitting.collect_files to return Paths "test" and "test2".""" + + collect_files_patch = patch( + "freesas.fitting.collect_files", + MagicMock(return_value=[pathlib.Path("test"), pathlib.Path("test2")]), + ) + return collect_files_patch(test_function) + + +def counted(function: Callable) -> Callable: + """Wrapper for functions to keep track on how often it has been called.""" + + def wrapped(*args, **kwargs): + wrapped.calls += 1 + return function(*args, **kwargs) + + wrapped.calls = 0 + return wrapped + + +def build_mock_for_load_scattering_with_Errors(erronous_file: dict): + + """Create mock for loading of data from a file. + The resulting function will raise an error, + for files for which an error is provided in errenous_file, + and an ndarry for all other files.""" + + def mock_for_load_scattering(file: pathlib.Path): + if file.name in erronous_file: + raise erronous_file[file.name] + else: + return numpy.array( + [[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ) + + return mock_for_load_scattering + + +class TestFitting(unittest.TestCase): + def test_set_logging_level_does_not_change_logging_level_if_input_lower_1( + self, + ): + + """ + Test that the logging level only gets changed if the requested level is > 0. + """ + + initial_logging_level = logging.root.level + set_logging_level(0) + self.assertEqual( + logging.root.level, + initial_logging_level, + msg="setting verbosity to 0 dos not affect logging level", + ) + set_logging_level(-2) + self.assertEqual( + logging.root.level, + initial_logging_level, + msg="settting verbosity to -2 does not affect logging level", + ) + # Ensure that initial logging level is restored + logging.root.setLevel(initial_logging_level) + + def test_set_logging_level_sets_logging_to_INFO_if_input_is_1( + self, + ): + + """ + Test that the logging level gets changed to INFO if verbosity is 1. + """ + initial_logging_level = logging.root.level + # Ensure that the function actually changes the level + logging.root.setLevel(logging.WARNING) + + set_logging_level(1) + self.assertEqual( + logging.root.level, + logging.INFO, + msg="settting verbosity to 1 sets logging level to INFO", + ) + + # Ensure that initial logging level is restored + logging.root.setLevel(initial_logging_level) + + def test_set_logging_level_sets_logging_to_DEBUG_if_input_is_2_or_more( + self, + ): + + """ + Test that the logging level gets changed to DEBUG if verbosity is 2 or larger. + """ + initial_logging_level = logging.root.level + # Ensure that the function actually changes the level + logging.root.setLevel(logging.WARNING) + + set_logging_level(2) + self.assertEqual( + logging.root.level, + logging.DEBUG, + msg="settting verbosity to 2 sets logging level to DEBUG", + ) + + set_logging_level(3) + self.assertEqual( + logging.root.level, + logging.DEBUG, + msg="settting verbosity to 3 sets logging level to DEBUG", + ) + + # Ensure that initial logging level is restored + logging.root.setLevel(initial_logging_level) + + @patch.dict("sys.modules", {"nt": MagicMock()}) + def test_get_linesep_returns_rn_if_output_is_stdout_on_windows(self): + + """ + Test that get_linesep() returns \r\n if output destination is sys.stdout on Windows. + """ + # Reload to apply patches + with patch("sys.builtin_module_names", ["nt"]): + fit = reload_os_and_fitting() + + self.assertEqual(fit.get_linesep(sys.stdout), "\r\n") + + # Cleanup + reload_os_and_fitting() + + @unittest.skipIf(platform.system() == "Windows", "Only POSIX") + def test_get_linesep_returns_n_if_output_is_stdout_on_posix( + self, + ): + + """ + Test that get_linesep() returns \n if output destination is sys.stdout on Posix. + Only should run on posix. + """ + self.assertEqual(get_linesep(sys.stdout), "\n") + + @patch.dict("sys.modules", {"nt": MagicMock()}) + def test_get_linesep_returns_n_if_output_is_filestream_on_windows(self): + + """ + Test that get_linesep() returns \n if output destination is a filestream on Windows. + """ + # Reload to apply patches + with patch("sys.builtin_module_names", ["nt"]): + fit = reload_os_and_fitting() + output_dest = StringIO() + self.assertEqual(fit.get_linesep(output_dest), "\n") + + # Cleanup + _ = reload_os_and_fitting() + + @unittest.skipIf(platform.system() == "Windows", "Only POSIX") + def test_get_linesep_returns_n_if_output_is_filestream_on_posix( + self, + ): + + """ + Test that get_linesep() returns \n if output destination is filestream on Posix. + Only should run on posix. + """ + output_dest = StringIO() + self.assertEqual(get_linesep(output_dest), "\n") + + def test_get_output_destination_with_path_input_returns_writable_io( + self, + ): + + """Test that by calling get_output_destination with a Path as input + we obtain write access to the file of Path.""" + mocked_open = mock_open() + with patch("builtins.open", mocked_open): + with get_output_destination(pathlib.Path("test")) as destination: + self.assertTrue( + destination.writable(), + msg="file destination is writable", + ) + mocked_open.assert_called_once_with(pathlib.Path("test"), "w") + + def test_get_output_destination_without_input_returns_stdout( + self, + ): + + """Test that by calling get_output_destination without input + we obtain sys.stdout.""" + with get_output_destination() as destination: + self.assertEqual( + destination, + sys.stdout, + msg="default destination is sys.stdout", + ) + + def test_closing_get_output_destination_does_not_close_stdout( + self, + ): + + """Test that get_output_destination() can be safely used without closing sys.stdout.""" + + with get_output_destination() as _: + pass + output_catcher = StringIO() + with contextlib.redirect_stdout(output_catcher): + sys.stdout.write("test after context closed") + self.assertEqual( + output_catcher.getvalue(), + "test after context closed", + msg="Can write to sys.stdout after closing desitnation context", + ) + + def test_get_guinier_header_for_csv( + self, + ): + + """Test that by calling get_guinier_header with input csv we get the correct line.""" + + header = get_guinier_header("linesep", "csv") + self.assertEqual( + header, + "File,Rg,Rg StDev,I(0),I(0) StDev,First point,Last point,Quality,Aggregatedlinesep", + msg="csv header is correct", + ) + + def test_get_guinier_header_for_ssv( + self, + ): + + """Test that by calling get_guinier_header with input ssv we get an empty string.""" + + header = get_guinier_header("linesep", "ssv") + self.assertEqual( + header, + "", + msg="ssv header is correct", + ) + + def test_get_guinier_header_for_native( + self, + ): + + """Test that by calling get_guinier_header with input native we get an empty string.""" + + header = get_guinier_header("linesep", "native") + self.assertEqual( + header, + "", + msg="native header is correct", + ) + + def test_get_guinier_header_without_input_format( + self, + ): + + """Test that by calling get_guinier_header without input format we get an empty string.""" + + header = get_guinier_header("linesep", None) + self.assertEqual( + header, + "", + msg="header for undefined format is correct", + ) + + def test_collect_files_only_returns_existing_files(self): + + """Test that collect_files discards strings that do not match an existing file.""" + + def os_stat_mock(path, **_): + if "good" in pathlib.Path(path).name: + pass + else: + if sys.version_info.minor > 7: + raise ValueError + else: + raise OSError(ENOENT, "dummy") + + mocked_stat = MagicMock(side_effect=os_stat_mock) + with patch("os.stat", mocked_stat): + local_pathlib = importlib.import_module("pathlib") + local_pathlib = importlib.reload(local_pathlib) + fit = importlib.import_module("..fitting", "freesas.subpkg") + fit = importlib.reload(fit) + self.assertEqual( + fit.collect_files(["testgood", "testbad"]), + [local_pathlib.Path("testgood")], + ) + # Reload without the patch + local_pathlib = importlib.reload(local_pathlib) + reload_os_and_fitting() + + @patch("platform.system", MagicMock(return_value="Windows")) + def test_collect_files_globs_on_windows(self): + + """Test that collect_files globs on Windows if no existent files provided.""" + + def os_stat_mock(path): + if sys.version_info.minor > 7: + raise ValueError + else: + raise OSError(ENOENT, "dummy") + + mocked_stat = MagicMock(side_effect=os_stat_mock) + mocked_glob = MagicMock( + side_effect=[ + (p for p in [pathlib.Path("pathA"), pathlib.Path("pathB")]) + ] + ) + with patch("os.stat", mocked_stat): + with patch.object(pathlib.Path, "glob", mocked_glob): + fit = importlib.import_module("..fitting", "freesas.subpkg") + fit = importlib.reload(fit) + self.assertEqual( + fit.collect_files(["testgood"]), + [pathlib.Path("pathA"), pathlib.Path("pathB")], + msg="collect_files on windows returns list if fiel argument does not exist", + ) + mocked_glob.assert_called_once() + + # Reload without the patch + reload_os_and_fitting() + + def test_rg_result_line_csv(self): + + """Test the formatting of a csv result line for a Guinier fit.""" + + test_result = RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + expected_line = "test.file,3.1000,0.1000,103.0000,2.5000, 13,207,50.1000,0.0500lineend" + obtained_line = rg_result_to_output_line( + rg_result=test_result, + afile=pathlib.Path("test.file"), + output_format="csv", + linesep="lineend", + ) + self.assertEqual( + obtained_line, expected_line, msg="csv line for RG_Result correct" + ) + + def test_rg_result_line_ssv(self): + + """Test the formatting of a ssv result line for a Guinier fit.""" + + test_result = RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + expected_line = "3.1000 0.1000 103.0000 2.5000 13 207 50.1000 0.0500 test.filelineend" + obtained_line = rg_result_to_output_line( + rg_result=test_result, + afile=pathlib.Path("test.file"), + output_format="ssv", + linesep="lineend", + ) + self.assertEqual( + obtained_line, expected_line, msg="ssv line for RG_Result correct" + ) + + def test_rg_result_line_native(self): + """Test the formatting of a native result line for a Guinier fit.""" + test_result = RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + expected_line = "test.file Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% lineend" + obtained_line = rg_result_to_output_line( + rg_result=test_result, + afile=pathlib.Path("test.file"), + output_format="native", + linesep="lineend", + ) + self.assertEqual( + obtained_line, + expected_line, + msg="native line for RG_Result correct", + ) + + def test_rg_result_line_no_format(self): + + """Test the formatting of a native result line for a Guinier fit.""" + + test_result = RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + expected_line = "test.file Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% lineend" + obtained_line = rg_result_to_output_line( + rg_result=test_result, + afile=pathlib.Path("test.file"), + linesep="lineend", + ) + self.assertEqual( + obtained_line, + expected_line, + msg="line for RG_Result without format specification correct", + ) + + @patch( + "freesas.fitting.collect_files", + MagicMock(return_value=[pathlib.Path("test")]), + ) + @patch( + "freesas.fitting.load_scattering_data", + MagicMock( + return_value=numpy.array( + [[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ) + ), + ) + @patch( + "freesas.fitting.get_linesep", + MagicMock(return_value="linesep"), + ) + def test_run_guinier_fit_uses_provided_fit_function(self): + """Test that run_guinier_fit uses fit function provided in the arguments + and outputs its result in a line.""" + + @counted + def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: + return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + + dummy_parser = get_dummy_guinier_parser( + verbose=0, + file=[pathlib.Path("test")], + output=None, + format=None, + unit="nm", + ) + output_catcher = StringIO() + with contextlib.redirect_stdout(output_catcher): + run_guinier_fit( + fit_function=dummy_fit_function, + parser=dummy_parser, + logger=logger, + ) + expected_output = "test Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" + self.assertEqual( + output_catcher.getvalue(), + expected_output, + msg="run_guinier_fit provides expected output", + ) + self.assertEqual( + dummy_fit_function.calls, + 1, + msg="Provided fit function was called once", + ) + + @patch( + "freesas.fitting.collect_files", + MagicMock(return_value=[pathlib.Path("test"), pathlib.Path("test")]), + ) + @patch( + "freesas.fitting.load_scattering_data", + MagicMock( + return_value=numpy.array( + [[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ) + ), + ) + @patch( + "freesas.fitting.get_linesep", + MagicMock(return_value="linesep"), + ) + def test_run_guinier_fit_iterates_over_files(self): + + """Test that run_guinier_fit calls the provided fit function for each provided file.""" + + @counted + def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: + return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + + dummy_parser = get_dummy_guinier_parser( + verbose=0, + file=[pathlib.Path("test"), pathlib.Path("test")], + output=None, + format=None, + unit="nm", + ) + output_catcher = StringIO() + with contextlib.redirect_stdout(output_catcher): + run_guinier_fit( + fit_function=dummy_fit_function, + parser=dummy_parser, + logger=logger, + ) + expected_output = ( + "test Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" + * 2 + ) + self.assertEqual( + output_catcher.getvalue(), + expected_output, + msg="run_guinier_fit provides expected output", + ) + self.assertEqual( + dummy_fit_function.calls, + 2, + msg="Provided fit function was called twice", + ) + + @unittest.skip("Unreliable") + @patch( + "freesas.fitting.collect_files", + MagicMock(return_value=[pathlib.Path("test"), pathlib.Path("test2")]), + ) + @patch( + "freesas.fitting.load_scattering_data", + MagicMock( + side_effect=build_mock_for_load_scattering_with_Errors( + {pathlib.Path("test").name: OSError} + ) + ), + ) + @patch( + "freesas.fitting.get_linesep", + MagicMock(return_value="linesep"), + ) + def test_run_guinier_outputs_error_if_file_not_found(self): + """Test that run_guinier_fit outputs an error if data loading raises OSError + and continues to the next file.""" + + @counted + def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: + return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + + dummy_parser = get_dummy_guinier_parser( + verbose=0, + file=[pathlib.Path("test"), pathlib.Path("test2")], + output=None, + format=None, + unit="nm", + ) + output_catcher_stdout = StringIO() + output_catcher_stderr = StringIO() + with contextlib.redirect_stdout(output_catcher_stdout): + with contextlib.redirect_stderr(output_catcher_stderr): + run_guinier_fit( + fit_function=dummy_fit_function, + parser=dummy_parser, + logger=logger, + ) + expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" + self.assertEqual( + output_catcher_stdout.getvalue(), + expected_stdout_output, + msg="run_guinier_fit provides expected stdout output", + ) + expected_stderr_output = ( + "ERROR:freesas.test.test_fitting:Unable to read file test" + ) + self.assertTrue( + expected_stderr_output in output_catcher_stderr.getvalue(), + msg="run_guinier_fit provides expected stderr output", + ) + self.assertEqual( + dummy_fit_function.calls, + 1, + msg="Provided fit function was called once", + ) + + @unittest.skip("Unreliable") + @patch( + "freesas.fitting.load_scattering_data", + MagicMock( + side_effect=build_mock_for_load_scattering_with_Errors( + {pathlib.Path("test").name: ValueError} + ) + ), + ) + @patch_collect_files + @patch_linesep + def test_run_guinier_outputs_error_if_file_not_parsable(self): + + """Test that run_guinier_fit outputs an error if data loading raises ValueError + and continues to the next file.""" + + @counted + def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: + return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + + dummy_parser = get_dummy_guinier_parser( + verbose=0, + file=[pathlib.Path("test"), pathlib.Path("test2")], + output=None, + format=None, + unit="nm", + ) + output_catcher_stdout = StringIO() + output_catcher_stderr = StringIO() + with contextlib.redirect_stdout(output_catcher_stdout): + with contextlib.redirect_stderr(output_catcher_stderr): + run_guinier_fit( + fit_function=dummy_fit_function, + parser=dummy_parser, + logger=logger, + ) + expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" + self.assertEqual( + output_catcher_stdout.getvalue(), + expected_stdout_output, + msg="run_guinier_fit provides expected stdout output", + ) + expected_stderr_output = ( + "ERROR:freesas.test.test_fitting:Unable to parse file test" + ) + self.assertTrue( + expected_stderr_output in output_catcher_stderr.getvalue(), + msg="run_guinier_fit provides expected stderr output", + ) + self.assertEqual( + dummy_fit_function.calls, + 1, + msg="Provided fit function was called once", + ) + + @patch( + "freesas.fitting.load_scattering_data", + MagicMock( + side_effect=[ + numpy.array( + [[0.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ), + numpy.array( + [[2.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ), + ] + ), + ) + @patch_collect_files + @patch_linesep + def test_run_guinier_outputs_error_if_fitting_raises_insufficient_data_error( + self, + ): + + """Test that run_guinier_fit outputs an error if fitting raises InsufficientDataError + and continues to the next file.""" + + @counted + def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: + if input_data[0, 0] <= 0.1: + raise InsufficientDataError + return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + + dummy_parser = get_dummy_guinier_parser( + verbose=0, + file=[pathlib.Path("test"), pathlib.Path("test2")], + output=None, + format=None, + unit="nm", + ) + output_catcher_stdout = StringIO() + output_catcher_stderr = StringIO() + with contextlib.redirect_stdout(output_catcher_stdout): + with contextlib.redirect_stderr(output_catcher_stderr): + run_guinier_fit( + fit_function=dummy_fit_function, + parser=dummy_parser, + logger=logger, + ) + expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" + self.assertEqual( + output_catcher_stdout.getvalue(), + expected_stdout_output, + msg="run_guinier_fit provides expected stdout output", + ) + expected_stderr_output = "test, InsufficientDataError: " + self.assertTrue( + expected_stderr_output in output_catcher_stderr.getvalue(), + msg="run_guinier_fit provides expected stderr output for fitting InsufficientError", + ) + self.assertEqual( + dummy_fit_function.calls, + 2, + msg="Provided fit function was called twice", + ) + + @patch( + "freesas.fitting.load_scattering_data", + MagicMock( + side_effect=[ + numpy.array( + [[0.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ), + numpy.array( + [[2.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ), + ] + ), + ) + @patch_collect_files + @patch_linesep + def test_run_guinier_outputs_error_if_fitting_raises_no_guinier_region_error( + self, + ): + + """Test that run_guinier_fit outputs an error if fitting raises NoGuinierRegionError + and continues to the next file.""" + + @counted + def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: + if input_data[0, 0] <= 0.1: + raise NoGuinierRegionError + return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + + dummy_parser = get_dummy_guinier_parser( + verbose=0, + file=[pathlib.Path("test"), pathlib.Path("test2")], + output=None, + format=None, + unit="nm", + ) + output_catcher_stdout = StringIO() + output_catcher_stderr = StringIO() + with contextlib.redirect_stdout(output_catcher_stdout): + with contextlib.redirect_stderr(output_catcher_stderr): + run_guinier_fit( + fit_function=dummy_fit_function, + parser=dummy_parser, + logger=logger, + ) + expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" + self.assertEqual( + output_catcher_stdout.getvalue(), + expected_stdout_output, + msg="run_guinier_fit provides expected stdout output", + ) + expected_stderr_output = "test, NoGuinierRegionError: " + self.assertTrue( + expected_stderr_output in output_catcher_stderr.getvalue(), + msg="run_guinier_fit provides expected stderr output fitting NoGuinierRegionError", + ) + self.assertEqual( + dummy_fit_function.calls, + 2, + msg="Provided fit function was called twice", + ) + + @patch( + "freesas.fitting.load_scattering_data", + MagicMock( + side_effect=[ + numpy.array( + [[0.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ), + numpy.array( + [[2.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ), + ] + ), + ) + @patch_collect_files + @patch_linesep + def test_run_guinier_outputs_error_if_fitting_raises_value_error( + self, + ): + + """Test that run_guinier_fit outputs an error if fitting raises ValueError + and continues to the next file.""" + + @counted + def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: + if input_data[0, 0] <= 0.1: + raise ValueError + return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + + dummy_parser = get_dummy_guinier_parser( + verbose=0, + file=[pathlib.Path("test"), pathlib.Path("test2")], + output=None, + format=None, + unit="nm", + ) + output_catcher_stdout = StringIO() + output_catcher_stderr = StringIO() + with contextlib.redirect_stdout(output_catcher_stdout): + with contextlib.redirect_stderr(output_catcher_stderr): + run_guinier_fit( + fit_function=dummy_fit_function, + parser=dummy_parser, + logger=logger, + ) + expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" + self.assertEqual( + output_catcher_stdout.getvalue(), + expected_stdout_output, + msg="run_guinier_fit provides expected stdout output", + ) + expected_stderr_output = "test, ValueError: " + self.assertTrue( + expected_stderr_output in output_catcher_stderr.getvalue(), + msg="run_guinier_fit provides expected stderr output fitting ValueError", + ) + self.assertEqual( + dummy_fit_function.calls, + 2, + msg="Provided fit function was called twice", + ) + + @patch( + "freesas.fitting.load_scattering_data", + MagicMock( + side_effect=[ + numpy.array( + [[0.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ), + numpy.array( + [[2.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] + ), + ] + ), + ) + @patch_collect_files + @patch_linesep + def test_run_guinier_outputs_error_if_fitting_raises_index_error( + self, + ): + + """Test that run_guinier_fit outputs an error if fitting raises IndexError + and continues to the next file.""" + + @counted + def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: + if input_data[0, 0] <= 0.1: + raise IndexError + return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) + + dummy_parser = get_dummy_guinier_parser( + verbose=0, + file=[pathlib.Path("test"), pathlib.Path("test2")], + output=None, + format=None, + unit="nm", + ) + output_catcher_stdout = StringIO() + output_catcher_stderr = StringIO() + with contextlib.redirect_stdout(output_catcher_stdout): + with contextlib.redirect_stderr(output_catcher_stderr): + run_guinier_fit( + fit_function=dummy_fit_function, + parser=dummy_parser, + logger=logger, + ) + expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" + self.assertEqual( + output_catcher_stdout.getvalue(), + expected_stdout_output, + msg="run_guinier_fit provides expected stdout output", + ) + expected_stderr_output = "test, IndexError: " + self.assertTrue( + expected_stderr_output in output_catcher_stderr.getvalue(), + msg="run_guinier_fit provides expected stderr output for fitting IndexError", + ) + self.assertEqual( + dummy_fit_function.calls, + 2, + msg="Provided fit function was called twice", + ) + + +def suite(): + """Build a test suite from the TestFitting class.""" + + test_suite = unittest.TestSuite() + for class_element in dir(TestFitting): + if class_element.startswith("test"): + test_suite.addTest(TestFitting(class_element)) + return test_suite + + +if __name__ == "__main__": + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_model.py b/src/freesas/test/test_model.py new file mode 100644 index 0000000..bdbeb9c --- /dev/null +++ b/src/freesas/test/test_model.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python +# coding: utf-8 +from __future__ import print_function + +__author__ = "Guillaume" +__license__ = "MIT" +__copyright__ = "2015, ESRF" + +import numpy +import unittest +import os +import tempfile +from .utilstests import get_datafile +from ..model import SASModel +from ..transformations import translation_from_matrix, euler_from_matrix +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("SASModel_test") + + +def assign_random_mol(inf=None, sup=None): + if not inf: + inf = 0 + if not sup: + sup = 100 + molecule = numpy.random.randint(inf, sup, size=400).reshape(100, 4).astype(float) + molecule[:, -1] = 1.0 + m = SASModel(molecule) + return m + + +class TesttParser(unittest.TestCase): + testfile = get_datafile("model-01.pdb") + + def setUp(self): + unittest.TestCase.setUp(self) + self.tmpdir = tempfile.mkdtemp() + self.outfile = os.path.join(self.tmpdir, "out.pdb") + + def tearDown(self): + unittest.TestCase.tearDown(self) + for fn in (self.outfile, self.tmpdir): + if os.path.exists(fn): + if os.path.isdir(fn): + os.rmdir(fn) + else: + os.unlink(fn) + + def test_same(self): + m = SASModel() + m.read(self.testfile) + m.save(self.outfile) + infile = open(self.testfile).read() + outfile = open(self.outfile).read() + self.assertEqual(infile, outfile, msg="file content is the same") + + def test_rfactor(self): + m = SASModel() + m.read(self.testfile) + n = SASModel() + n.read(self.testfile) + self.assertEqual(m.rfactor, n.rfactor, msg="R-factor is not the same %s != %s" % (m.rfactor, n.rfactor)) + + def test_init(self): + m = SASModel() + m.read(self.testfile) + n = SASModel(self.testfile) + param1 = m.rfactor + param2 = n.rfactor + self.assertEqual(param1, param2, msg="pdb file not read correctly") + + def test_centroid(self): + m = assign_random_mol() + m.centroid() + if len(m.com) != 3: + logger.error("center of mass has not been saved correctly : length of COM position vector = %s!=3" % (len(m.com))) + mol_centered = m.atoms[:, 0:3] - m.com + center = mol_centered.mean(axis=0) + norm = (center * center).sum() + self.assertAlmostEqual(norm, 0, 12, msg="molecule is not centered : norm of the COM position vector %s!=0" % (norm)) + + def test_inertia_tensor(self): + m = assign_random_mol() + m.inertiatensor() + tensor = m.inertensor + assert tensor.shape == (3, 3), "inertia tensor has not been saved correctly : shape of inertia matrix = %s" % (tensor.shape) + + def test_canonical_translate(self): + m = assign_random_mol() + trans = m.canonical_translate() + if trans.shape != (4, 4): + logger.error("pb with translation matrix shape: shape=%s" % (trans.shape)) + com = m.com + com_componants = [com[0], com[1], com[2]] + trans_vect = [-trans[0, -1], -trans[1, -1], -trans[2, -1]] + self.assertEqual(com_componants, trans_vect, msg="do not translate on canonical position") + + def test_canonical_rotate(self): + m = assign_random_mol() + rot = m.canonical_rotate() + if rot.shape != (4, 4): + logger.error("pb with rotation matrix shape") + if not m.enantiomer: + logger.error("enantiomer has not been selected") + det = numpy.linalg.det(rot) + self.assertAlmostEqual(det, 1, 10, msg="rotation matrix determinant is not 1: %s" % (det)) + + def test_canonical_parameters(self): + m = assign_random_mol() + m.canonical_parameters() + can_param = m.can_param + if len(can_param) != 6: + logger.error("canonical parameters has not been saved properly") + com_trans = translation_from_matrix(m.canonical_translate()) + euler_rot = euler_from_matrix(m.canonical_rotate()) + out_param = [com_trans[0], com_trans[1], com_trans[2], euler_rot[0], euler_rot[1], euler_rot[2]] + self.assertEqual(can_param, out_param, msg="canonical parameters are not the good ones") + + def test_dist(self): + m = assign_random_mol() + n = SASModel(m.atoms) + distance = m.dist(n, m.atoms, n.atoms) + self.assertEqual(distance, 0, msg="NSD different of 0: %s!=0" % (distance)) + + def test_can_transform(self): + m = assign_random_mol() + m.canonical_parameters() + p0 = m.can_param + mol1 = m.transform(p0, [1, 1, 1]) + if abs(mol1 - m.atoms).max() == 0: + logger.error("molecule did not move") + m.atoms = mol1 + m.centroid() + m.inertiatensor() + com = m.com + tensor = m.inertensor + diag = numpy.eye(3) + matrix = tensor - tensor * diag + self.assertAlmostEqual(abs(com).sum(), 0, 10, msg="molecule not on its center of mass") + self.assertAlmostEqual(abs(matrix).sum(), 0, 10, "inertia moments unaligned ") + + def test_dist_move(self): + m = assign_random_mol() + n = SASModel(m.atoms) + m.canonical_parameters() + n.canonical_parameters() + if abs(n.atoms - m.atoms).max() != 0: + logger.error("molecules are different") + p0 = m.can_param + dist_after_mvt = m.dist_after_movement(p0, n, [1, 1, 1]) + self.assertEqual(dist_after_mvt, 0, msg="NSD different of 0: %s!=0" % (dist_after_mvt)) + + def test_reverse_transform(self): + m = assign_random_mol() + n = SASModel(m.atoms) + m.canonical_parameters() + m.atoms = m.transform(m.can_param, [1, 1, 1], reverse=None) + m.atoms = m.transform(m.can_param, [1, 1, 1], reverse=True) + dist = m.dist(n, m.atoms, n.atoms) + self.assertAlmostEqual(dist, 0.0, 10, msg="pb with reverse transformation : %s != 0.0" % dist) + + +def suite(): + testSuite = unittest.TestSuite() + testSuite.addTest(TesttParser("test_same")) + testSuite.addTest(TesttParser("test_rfactor")) + testSuite.addTest(TesttParser("test_init")) + testSuite.addTest(TesttParser("test_centroid")) + testSuite.addTest(TesttParser("test_inertia_tensor")) + testSuite.addTest(TesttParser("test_canonical_translate")) + testSuite.addTest(TesttParser("test_canonical_rotate")) + testSuite.addTest(TesttParser("test_canonical_parameters")) + testSuite.addTest(TesttParser("test_dist")) + testSuite.addTest(TesttParser("test_can_transform")) + testSuite.addTest(TesttParser("test_dist_move")) + testSuite.addTest(TesttParser("test_reverse_transform")) + return testSuite + + +if __name__ == '__main__': + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_sas_argparser.py b/src/freesas/test/test_sas_argparser.py new file mode 100644 index 0000000..3575bea --- /dev/null +++ b/src/freesas/test/test_sas_argparser.py @@ -0,0 +1,603 @@ +#!/usr/bin/python +# coding: utf-8 + +"""Test the functionality of SASParser and GuinierParser""" + +__authors__ = ["Martha Brennich"] +__license__ = "MIT" +__date__ = "25/03/2021" + + +import unittest +import logging +import io +import contextlib +from pathlib import Path +from .. import dated_version as freesas_version +from ..sas_argparser import SASParser, GuinierParser + + +logger = logging.getLogger(__name__) + + +class TestSasArgParser(unittest.TestCase): + def test_minimal_guinier_parser_requires_file_argument(self): + """ + Test that Guinier parser provides error if no file argument is provided. + """ + basic_parser = GuinierParser("program", "description", "epilog") + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args() + except SystemExit: + pass + + self.assertTrue( + basic_parser.usage in output_catcher.getvalue(), + msg="GuinierParser provides usage if no file provided", + ) + self.assertTrue( + "the following arguments are required: FILE" + in output_catcher.getvalue(), + msg="GuinierParser states that the FILE argument is missing if no file provided", + ) + + def test_minimal_guinier_parser_parses_list_of_files(self): + """ + Test that the Guinier parsers parses a list of files. + """ + basic_parser = GuinierParser("program", "description", "epilog") + + parsed_arguments = basic_parser.parse_args(["afile", "bfile", "cfile"]) + + self.assertEqual( + set(parsed_arguments.file), + {"afile", "bfile", "cfile"}, + msg="GuinierParser parses list of files", + ) + + def test_add_file_argument_enables_SASParser_to_recognize_file_lists( + self, + ): + """ + Test that add_file_argument adds the ability to parse a file list to SASParser. + """ + basic_parser = SASParser("program", "description", "epilog") + + # Before running add_file_argument a file argument is not recognized + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args(["afile"]) + except SystemExit: + pass + self.assertTrue( + "unrecognized arguments: afile" in output_catcher.getvalue(), + msg="Minimal SASParser does not recognize file argument", + ) + + basic_parser.add_file_argument(help_text="file help") + parsed_arguments = basic_parser.parse_args(["afile", "bfile", "cfile"]) + + self.assertEqual( + set(parsed_arguments.file), + {"afile", "bfile", "cfile"}, + msg="GuinierParser parses list of files", + ) + + def test_minimal_parser_usage_includes_program_name(self): + """ + Test that minimal parser includes the provided program in the usage string. + """ + basic_parser = SASParser("test❤️", "description", "epilog") + + self.assertTrue( + "test❤️" in basic_parser.usage, + msg="SASParser usage includes program name", + ) + + def test_minimal_guinier_parser_usage_includes_program_name(self): + """ + Test that minimal parser includes the provided program in the usage string. + """ + basic_parser = GuinierParser("test❤️", "description", "epilog") + + self.assertTrue( + "test❤️" in basic_parser.usage, + msg="GuinierParser usage includes program name", + ) + + def test_minimal_guinier_parser_help_includes_program_description_epilog( + self, + ): + """ + Test that minimal guinier parser includes help includes + the provided program name, description and epilog. + """ + basic_parser = GuinierParser("test❤️", "description📚", "epilog🎦") + output_catcher = io.StringIO() + + try: + with contextlib.redirect_stdout(output_catcher): + _ = basic_parser.parse_args(["--help"]) + except SystemExit: + pass + + self.assertTrue( + "test❤️" in output_catcher.getvalue(), + msg="GuinierParser outputs program name in help", + ) + + self.assertTrue( + "description📚" in output_catcher.getvalue(), + msg="GuinierParser outputs description in help", + ) + + self.assertTrue( + "epilog🎦" in output_catcher.getvalue(), + msg="GuinierParser outputs eplilog name in help", + ) + + def test_minimal_parser_help_includes_program_description_epilog(self): + """ + Test that minimal parser includes help includes + the provided program name, description and epilog. + """ + basic_parser = SASParser("test❤️", "description📚", "epilog🎦") + output_catcher = io.StringIO() + + try: + with contextlib.redirect_stdout(output_catcher): + _ = basic_parser.parse_args(["--help"]) + except SystemExit: + pass + + self.assertTrue( + "test❤️" in output_catcher.getvalue(), + msg="SASParser outputs program name in help", + ) + + self.assertTrue( + "description📚" in output_catcher.getvalue(), + msg="SASParser outputs description in help", + ) + + self.assertTrue( + "epilog🎦" in output_catcher.getvalue(), + msg="SASParser outputs eplilog name in help", + ) + + def test_minimal_parser_default_verbosity_level_is_0(self): + """ + Test that the parser sets the verbosity to 0 if no args are provided + """ + basic_parser = SASParser("program", "description", "epilog") + parsed_arguments = basic_parser.parse_args() + self.assertEqual( + parsed_arguments.verbose, + 0, + msg="SASParser default verbosity is 0", + ) + + def test_minimal_guinier_parser_default_verbosity_level_is_0(self): + """ + Test that the Guinier parser sets the verbosity to 0 if no args are provided + """ + basic_parser = GuinierParser("program", "description", "epilog") + parsed_arguments = basic_parser.parse_args(["afile"]) + self.assertEqual( + parsed_arguments.verbose, + 0, + msg="GuinierParser default verbosity is 0", + ) + + def test_minimal_parser_accumulates_verbosity_level(self): + """ + Test that the parser parser increases the verbosity level to two + if -vv argument is provided. + """ + basic_parser = SASParser("program", "description", "epilog") + parsed_arguments = basic_parser.parse_args(["-vv"]) + self.assertEqual( + parsed_arguments.verbose, + 2, + msg="SASParser verbosity increased to 2 by -vv", + ) + + def test_minimal_guinier_parser_accumulates_verbosity_level(self): + """ + Test that the parser parser increases the verbosity level to two + if -vv argument is provided. + """ + basic_parser = GuinierParser("program", "description", "epilog") + parsed_arguments = basic_parser.parse_args(["afile", "-vv"]) + self.assertEqual( + parsed_arguments.verbose, + 2, + msg="GuinierParser verbosity increased to 2 by -vv", + ) + + def test_minimal_parser_provides_correct_version(self): + """ + Test that parser provides the correct app version. + """ + basic_parser = SASParser("program", "description", "epilog") + output_catcher = io.StringIO() + try: + with contextlib.redirect_stdout(output_catcher): + _ = basic_parser.parse_args(["--version"]) + except SystemExit: + pass + + self.assertTrue( + freesas_version.version in output_catcher.getvalue(), + msg="SASParser outputs consistent version", + ) + self.assertTrue( + freesas_version.date in output_catcher.getvalue(), + msg="SASParser outputs consistent date", + ) + + def test_minimal_guinier_parser_provides_correct_version(self): + """ + Test that parser provides the correct app version. + """ + basic_parser = GuinierParser("program", "description", "epilog") + output_catcher = io.StringIO() + try: + with contextlib.redirect_stdout(output_catcher): + _ = basic_parser.parse_args(["--version"]) + except SystemExit: + pass + + self.assertTrue( + freesas_version.version in output_catcher.getvalue(), + msg="GuinierParser outputs consistent version", + ) + self.assertTrue( + freesas_version.date in output_catcher.getvalue(), + msg="GuinierParser outputs consistent date", + ) + + def test_minimal_guinier_parser_accepts_output_file_argument(self): + """ + Test that minimal Guinier parser accepts one output file argument. + """ + basic_parser = GuinierParser("program", "description", "epilog") + parsed_arguments = basic_parser.parse_args(["afile", "-o", "out.file"]) + + self.assertEqual( + parsed_arguments.output, + Path("out.file"), + msg="Minimal GuinierParser accepts output file argument", + ) + + def test_add_output_filename_argument_adds_output_file_argument_to_SASParser( + self, + ): + """ + Test that add_output_filename_argument adds one output file argument to as SASParser. + """ + basic_parser = SASParser("program", "description", "epilog") + + # Before running add_output_filename_argument -o file is not regognized + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args(["-o", "out.file"]) + except SystemExit: + pass + self.assertTrue( + "unrecognized arguments: -o out.file" in output_catcher.getvalue(), + msg="Minimal SASParser does not recognize -o argument", + ) + + basic_parser.add_output_filename_argument() + parsed_arguments = basic_parser.parse_args(["-o", "out.file"]) + + self.assertEqual( + parsed_arguments.output, + Path("out.file"), + msg="SASParser accepts output file argument" + "after running add_output_filename_argument()", + ) + + def test_minimal_guinier_parser_accepts_output_format_argument(self): + """ + Test that minimal Guinier parser accepts one output data format argument. + """ + basic_parser = GuinierParser("program", "description", "epilog") + parsed_arguments = basic_parser.parse_args(["afile", "-f", "aformat"]) + + self.assertEqual( + parsed_arguments.format, + "aformat", + msg="Minimal GuinierParser accepts output data format argument", + ) + + def test_add_output_data_format_adds_output_format_argument_to_SASParser( + self, + ): + """ + Test that add_output_data_format adds one output data format argument to as SASParser. + """ + basic_parser = SASParser("program", "description", "epilog") + + # Before running add_output_filename_argument -o file is not regognized + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args(["-f", "aformat"]) + except SystemExit: + pass + self.assertTrue( + "unrecognized arguments: -f aformat" in output_catcher.getvalue(), + msg="Minimal SASParser does not recognize -f argument", + ) + + basic_parser.add_output_data_format() + parsed_arguments = basic_parser.parse_args(["-f", "aformat"]) + + self.assertEqual( + parsed_arguments.format, + "aformat", + msg="SASParser accepts output data format argument" + "after running add_output_data_format()", + ) + + def test_minimal_guinier_parser_accepts_q_unit_argument(self): + """ + Test that minimal Guinier parser accepts a q unit argument. + """ + basic_parser = GuinierParser("program", "description", "epilog") + parsed_arguments = basic_parser.parse_args(["afile", "-u", "nm"]) + + self.assertEqual( + parsed_arguments.unit, + "nm", + msg="Minimal GuinierParser accepts q unit argument", + ) + + def test_add_q_unit_argument_adds_add_q_unit_argument_to_SASParser( + self, + ): + """ + Test that add_q_unit_argument adds a q unit argument to as SASParser. + """ + basic_parser = SASParser("program", "description", "epilog") + + # Before running add_output_filename_argument -o file is not regognized + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args(["-u", "nm"]) + except SystemExit: + pass + self.assertTrue( + "unrecognized arguments: -u nm" in output_catcher.getvalue(), + msg="Minimal SASParser does not recognize -u argument", + ) + + basic_parser.add_q_unit_argument() + parsed_arguments = basic_parser.parse_args(["-u", "nm"]) + + self.assertEqual( + parsed_arguments.unit, + "nm", + msg="SASParser accepts q unit argument after running add_q_unit_argument()", + ) + + def test_SASParser_q_unit_argument_allows_predefined_units( + self, + ): + """ + Test that the q unit argument of a SASparser accepts "nm", "Å", "A". + """ + basic_parser = SASParser("program", "description", "epilog") + basic_parser.add_q_unit_argument() + + parsed_arguments = basic_parser.parse_args(["-u", "nm"]) + self.assertEqual( + parsed_arguments.unit, + "nm", + msg="SASParser accepts unit format nm", + ) + + parsed_arguments = basic_parser.parse_args(["-u", "A"]) + self.assertEqual( + parsed_arguments.unit, + "Å", + msg="SASParser accepts unit format A", + ) + + parsed_arguments = basic_parser.parse_args(["-u", "Å"]) + self.assertEqual( + parsed_arguments.unit, + "Å", + msg="SASParser accepts unit format A", + ) + + def test_SASParser_q_unit_argument_does_not_allow_not_predefined_units( + self, + ): + """ + Test that the q unit argument of a SASparser does not accept a + unit that is not "nm", "Å", "A". + """ + basic_parser = SASParser("program", "description", "epilog") + basic_parser.add_q_unit_argument() + + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args(["-u", "m"]) + except SystemExit: + pass + self.assertTrue( + "argument -u/--unit: invalid choice: 'm' (choose from 'nm', 'Å', 'A')" + in output_catcher.getvalue(), + msg="SASParser does not accept '-u m' argument", + ) + + def test_SASParser_q_unit_A_gets_converted_to_Å( + self, + ): + """ + Test that for a SASParder q unit input "A" gets converted to "Å". + """ + basic_parser = SASParser("program", "description", "epilog") + basic_parser.add_q_unit_argument() + + parsed_arguments = basic_parser.parse_args(["-u", "A"]) + self.assertEqual( + parsed_arguments.unit, + "Å", + msg="SASParser converts unit input 'A' to 'Å'", + ) + + def test_GuinierParser_q_unit_argument_allows_predefined_units( + self, + ): + """ + Test that the q unit argument of a Guinierparser accepts "nm", "Å", "A". + """ + basic_parser = GuinierParser("program", "description", "epilog") + + parsed_arguments = basic_parser.parse_args(["afile", "-u", "nm"]) + self.assertEqual( + parsed_arguments.unit, + "nm", + msg="SASParser accepts unit format nm", + ) + + parsed_arguments = basic_parser.parse_args(["afile", "-u", "A"]) + self.assertEqual( + parsed_arguments.unit, + "Å", + msg="SASParser accepts unit format A", + ) + + parsed_arguments = basic_parser.parse_args(["afile", "-u", "Å"]) + self.assertEqual( + parsed_arguments.unit, + "Å", + msg="SASParser accepts unit format A", + ) + + def test_GuinierParser_q_unit_argument_does_not_allow_not_predefined_units( + self, + ): + """ + Test that the q unit argument of a Guinierparser does not accept a + unit that is not "nm", "Å", "A". + """ + basic_parser = GuinierParser("program", "description", "epilog") + + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args(["afile", "-u", "m"]) + except SystemExit: + pass + self.assertTrue( + "argument -u/--unit: invalid choice: 'm' (choose from 'nm', 'Å', 'A')" + in output_catcher.getvalue(), + msg="SASParser does not accept '-u m' argument", + ) + + def test_GuinierParser_q_unit_A_gets_converted_to_Å( + self, + ): + """ + Test that for a GuinierParser q unit input "A" gets converted to "Å". + """ + basic_parser = GuinierParser("program", "description", "epilog") + + parsed_arguments = basic_parser.parse_args(["afile", "-u", "A"]) + self.assertEqual( + parsed_arguments.unit, + "Å", + msg="SASParser converts unit input 'A' to 'Å'", + ) + + def test_add_argument_adds_an_argument_to_a_SASParser( + self, + ): + """ + Test that new arguments can be added to SASParser. + """ + basic_parser = SASParser("program", "description", "epilog") + + # Before running add_argument -c + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args(["-c"]) + except SystemExit: + pass + self.assertTrue( + "unrecognized arguments: -c" in output_catcher.getvalue(), + msg="Minimal SASParser does not recognize -c argument", + ) + + basic_parser.add_argument( + "-c", + "--check", + action="store_true", + ) + + parsed_arguments = basic_parser.parse_args(["-c"]) + self.assertEqual( + parsed_arguments.check, + True, + msg="-c argument added to SASParser", + ) + + def test_add_argument_adds_an_argument_to_a_GuinierParser( + self, + ): + """ + Test that new arguments can be added to GuinierParser. + """ + basic_parser = GuinierParser("program", "description", "epilog") + + # Before running add_argument -c + output_catcher = io.StringIO() + try: + with contextlib.redirect_stderr(output_catcher): + _ = basic_parser.parse_args(["afile", "-c"]) + except SystemExit: + pass + print(output_catcher.getvalue()) + self.assertTrue( + "unrecognized arguments: -c" in output_catcher.getvalue(), + msg="Minimal GuinierParser does not recognize -c argument", + ) + + basic_parser.add_argument( + "-c", + "--check", + action="store_true", + ) + + parsed_arguments = basic_parser.parse_args(["afile", "-c"]) + self.assertEqual( + parsed_arguments.check, + True, + msg="-c argument added to GuinierParser", + ) + + +def suite(): + """Build a test suite from the TestSasArgParser class""" + test_suite = unittest.TestSuite() + for class_element in dir(TestSasArgParser): + if class_element.startswith("test"): + test_suite.addTest(TestSasArgParser(class_element)) + return test_suite + + +if __name__ == "__main__": + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/test_sasio.py b/src/freesas/test/test_sasio.py new file mode 100644 index 0000000..c897b21 --- /dev/null +++ b/src/freesas/test/test_sasio.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +# +# Project: freesas +# https://github.com/kif/freesas +# +# Copyright (C) 2017-2022 European Synchrotron Radiation Facility, Grenoble, France +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +__authors__ = ["Martha Brennich", "Jérôme Kieffer"] +__license__ = "MIT" +__date__ = "16/09/2022" + +import unittest +import logging +import io +from numpy import array, allclose +from ..sasio import parse_ascii_data, load_scattering_data, \ + convert_inverse_angstrom_to_nanometer +logger = logging.getLogger(__name__) + +class TestSasIO(unittest.TestCase): + + def test_parse_3_ok(self): + """ + Test for successful parsing of file with some invalid lines + """ + file_content = ["Test data for", + "file parsing", + "1 1 1", + "2 a 2", + "3 3 3", + "some stuff at the end", + ] + expected_result = array([[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]]) + data = parse_ascii_data(file_content, number_of_columns=3) + self.assertTrue(allclose(data, expected_result, 1e-7), + msg="3 column parse returns expected result") + + def test_parse_no_data(self): + """ + Test that an empty input list raises a ValueError + """ + file_content = [] + with self.assertRaises(ValueError, msg="Empty list cannot be parsed"): + parse_ascii_data(file_content, number_of_columns=3) + + def test_parse_no_valid_data(self): + """ + Test that an input list with no valid data raises a ValueError + """ + file_content = ["a a a", "2 4", "3 4 5 6", "# 3 4 6"] + with self.assertRaises(ValueError, + msg="File with no float float float data" + " cannot be parsed"): + parse_ascii_data(file_content, number_of_columns=3) + + def test_load_clean_data(self): + """ + Test that clean float float float data is loaded correctly. + """ + file_content = ["# Test data for" + "# file parsing", + "1 1 1", + "2.0 2.0 1.0", + "3 3 3", + "#REMARK some stuff at the end", + ] + expected_result = array([[1.0, 1.0, 1.0], + [2.0, 2.0, 1.0], + [3.0, 3.0, 3.0]]) + file_data = "\n".join(file_content) + mocked_file = io.StringIO(file_data) + data = load_scattering_data(mocked_file) + self.assertTrue(allclose(data, expected_result, 1e-7), + msg="Sunny data loaded correctly") + + def test_load_data_with_unescaped_header(self): + """ + Test that an unescaped header does not hinder loading. + """ + file_content = ["Test data for", + "file parsing", + "1 1 1", + "2.0 2.0 1.0", + "3 3 3", + ] + expected_result = array([[1.0, 1.0, 1.0], + [2.0, 2.0, 1.0], + [3.0, 3.0, 3.0]]) + file_data = "\n".join(file_content) + mocked_file = io.StringIO(file_data) + data = load_scattering_data(mocked_file) + + self.assertTrue(allclose(data, expected_result, 1e-7), + msg="Sunny data loaded correctly") + + def test_load_data_with_unescaped_footer(self): + """ + Test that an unescaped footer does not hinder loading. + """ + file_content = [ + "1 1 1", + "2.0 2.0 1.0", + "3 3 3", + "REMARK some stuff at the end" + ] + expected_result = array([[1.0, 1.0, 1.0], + [2.0, 2.0, 1.0], + [3.0, 3.0, 3.0]]) + file_data = "\n".join(file_content) + mocked_file = io.StringIO(file_data) + data = load_scattering_data(mocked_file) + + self.assertTrue(allclose(data, expected_result, 1e-7), + msg="Sunny data loaded correctly") + + + def test_load_invalid_data(self): + """ + Test that invalid data raises a ValueError. + """ + file_content = ["a a a", "2 4", "3 4 5 6", "# 3 4 6"] + file_data = "\n".join(file_content) + mocked_file = io.StringIO(file_data) + with self.assertRaises(ValueError, + msg="File with no float float float " + "data cannot be loaded"): + load_scattering_data(mocked_file) + + def test_convert_inverse_angstrom_to_nanometer(self): + """ + Test conversion of data with q in 1/Å to 1/nm + """ + input_data = array([[1.0, 1.0, 1.0], + [2.0, 2.0, 1.0], + [3.0, 3.0, 3.0]]) + expected_result = array([[10, 1.0, 1.0], + [20, 2.0, 1.0], + [30, 3.0, 3.0]]) + result = convert_inverse_angstrom_to_nanometer(input_data) + self.assertTrue(allclose(result, expected_result, 1e-7), + msg="Converted to 1/nm from 1/Å") + + def test_unit_conversion_creates_new_array(self): + """ + Test conversion of data does not change original data + """ + input_data = array([[1.0, 1.0, 1.0], + [2.0, 2.0, 1.0], + [3.0, 3.0, 3.0]]) + expected_data = array([[1.0, 1.0, 1.0], + [2.0, 2.0, 1.0], + [3.0, 3.0, 3.0]]) + _ = convert_inverse_angstrom_to_nanometer(input_data) + self.assertTrue(allclose(input_data, expected_data, 1e-7), + msg="Conversion function does not change its input") + + +def suite(): + test_suite = unittest.TestSuite() + test_suite.addTest(TestSasIO("test_parse_3_ok")) + test_suite.addTest(TestSasIO("test_parse_no_data")) + test_suite.addTest(TestSasIO("test_parse_no_valid_data")) + test_suite.addTest(TestSasIO("test_load_clean_data")) + test_suite.addTest(TestSasIO("test_load_data_with_unescaped_header")) + test_suite.addTest(TestSasIO("test_load_data_with_unescaped_footer")) + test_suite.addTest(TestSasIO("test_load_invalid_data")) + test_suite.addTest(TestSasIO("test_convert_inverse_angstrom_to_nanometer")) + test_suite.addTest(TestSasIO("test_unit_conversion_creates_new_array")) + return test_suite + + +if __name__ == '__main__': + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/src/freesas/test/utilstests.py b/src/freesas/test/utilstests.py new file mode 100644 index 0000000..d899f57 --- /dev/null +++ b/src/freesas/test/utilstests.py @@ -0,0 +1,24 @@ +#!usr/bin/env python +# coding: utf-8 + +__author__ = "Jérôme Kieffer" +__license__ = "MIT" +__date__ = "19/07/2021" +__copyright__ = "2015-2021, ESRF" + +import logging +logger = logging.getLogger("utilstest") +from silx.resources import ExternalResources +downloader = ExternalResources("freesas", "http://www.silx.org/pub/freesas/testdata", "FREESAS_TESTDATA") + + +def get_datafile(name): + """Provides the full path of a test file, + downloading it from the internet if needed + + :param name: name of the file to get + :return: full path of the datafile + """ + logger.info(f"Download file {name}") + fullpath = downloader.getfile(name) + return fullpath From 593d9530f0227c2ce0d0822c24b37464cb9a4e4c Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 17:09:08 +0100 Subject: [PATCH 10/45] include subdir --- src/freesas/meson.build | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/freesas/meson.build b/src/freesas/meson.build index 091c17b..4a0b86b 100644 --- a/src/freesas/meson.build +++ b/src/freesas/meson.build @@ -1,6 +1,7 @@ +subdir('app') subdir('ext') -#subdir('test') -#subdir('utils') +subdir('test') + py.install_sources([ '_version.py', From ac41f7be5f86b59a052b4d968664a62eb5913b95 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 17:10:32 +0100 Subject: [PATCH 11/45] typo --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 62892bd..c783131 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = 'freesas' dynamic = ['version',] license = {file = 'copyright'} requires-python = '>=3.7' -readme = 'README.rd' +readme = 'README.md' description = 'Small angle scattering tools ... but unlike most others, free and written in Python' authors = [ From 8c79220cb131f4b86fa80604c17fb253e507e13b Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 17:52:57 +0100 Subject: [PATCH 12/45] scipy is a build dependency --- pyproject.toml | 3 ++- src/freesas/ext/_bift.pyx | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c783131..4ea6c2c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,8 @@ requires = [ "numpy<1.26.0; platform_machine == 'ppc64le'", "numpy; platform_machine != 'ppc64le'", 'pyproject-metadata>=0.5.0', - 'tomli>=1.0.0' + 'tomli>=1.0.0', + 'scipy' ] [project.urls] diff --git a/src/freesas/ext/_bift.pyx b/src/freesas/ext/_bift.pyx index 7536691..030fcfa 100644 --- a/src/freesas/ext/_bift.pyx +++ b/src/freesas/ext/_bift.pyx @@ -58,7 +58,7 @@ cpdef inline double blas_ddot(double[::1] a, double[::1] b) nogil: return ddot(&n, a0, &one, b0, &one) -cpdef int blas_dgemm(double[:,::1] a, double[:,::1] b, double[:,::1] c, double alpha=1.0, double beta=0.0) nogil except -1: +cpdef int blas_dgemm(double[:,::1] a, double[:,::1] b, double[:,::1] c, double alpha=1.0, double beta=0.0) noexcept nogil: "Wrapper for double matrix-matrix multiplication C = AxB " cdef: char *transa = 'n' @@ -86,7 +86,7 @@ cpdef int blas_dgemm(double[:,::1] a, double[:,::1] b, double[:,::1] c, double a return 0 -cpdef int lapack_svd(double[:, ::1] A, double[::1] eigen, double[::1] work) nogil except -1: +cpdef int lapack_svd(double[:, ::1] A, double[::1] eigen, double[::1] work) noexcept nogil: cdef: char *jobN = 'n' int n, lda, lwork, info, one=1 @@ -504,7 +504,7 @@ cdef class BIFT: double[:, ::1] transp_matrix, double[:, ::1] B, double[::1] sum_dia - ) nogil except -1: + ) noexcept nogil: cdef: double tmp, ql, prefactor, delta_r, il, varl From 7e4829e645c12787a5ca46cce46ee9ed6598d130 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 17:55:51 +0100 Subject: [PATCH 13/45] fix warning in cython3 --- src/freesas/ext/_distance.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/freesas/ext/_distance.pyx b/src/freesas/ext/_distance.pyx index 5d70f71..b0aeffd 100644 --- a/src/freesas/ext/_distance.pyx +++ b/src/freesas/ext/_distance.pyx @@ -55,7 +55,7 @@ def calc_invariants(floating[:, :] atoms): return sqrt(s / size), sqrt(sum_d2 / 2.0) / size, sqrt(d2max) -cdef inline floating hard_sphere(floating pos, floating radius)nogil: +cdef inline floating hard_sphere(floating pos, floating radius) noexcept nogil: """Density using hard spheres @param pos: fabs(d1-d) """ @@ -63,7 +63,7 @@ cdef inline floating hard_sphere(floating pos, floating radius)nogil: return 0.0 return (4 * radius + pos) * (2 * radius - pos) ** 2 / (16.0 * radius ** 3) -cdef inline floating soft_sphere(floating pos, floating radius)nogil: +cdef inline floating soft_sphere(floating pos, floating radius) noexcept nogil: """Density using soft spheres (gaussian density) @param pos: fabs(d1-d) @param radius: radius of the equivalent hard sphere From 26a8592c923d306b0d1550493a49534655dcd4d9 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 17:57:45 +0100 Subject: [PATCH 14/45] fix warning --- src/freesas/ext/_autorg.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/freesas/ext/_autorg.pyx b/src/freesas/ext/_autorg.pyx index 2fb80b0..74751c3 100644 --- a/src/freesas/ext/_autorg.pyx +++ b/src/freesas/ext/_autorg.pyx @@ -233,7 +233,7 @@ cdef inline void guinier_space(int start, DTYPE_t[::1] sigma, DTYPE_t[::1] q2, DTYPE_t[::1] lnI, - DTYPE_t[::1] I2_over_sigma2) nogil: + DTYPE_t[::1] I2_over_sigma2) noexcept nogil: "Initialize q², ln(I) and I/sigma array" cdef: int idx From 52bb4aeb5265b30563731d0a8f61d93f90ed2720 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Mon, 27 Nov 2023 18:03:03 +0100 Subject: [PATCH 15/45] remove old files --- freesas/app/__init__.py | 29 - freesas/app/auto_gpa.py | 69 --- freesas/app/auto_guinier.py | 69 --- freesas/app/autorg.py | 69 --- freesas/app/bift.py | 147 ----- freesas/app/cormap.py | 94 --- freesas/app/extract_ascii.py | 341 ----------- freesas/app/plot_sas.py | 144 ----- freesas/app/supycomb.py | 137 ----- freesas/test/__init__.py | 29 - freesas/test/mock_open_38.py | 98 --- freesas/test/test_align.py | 152 ----- freesas/test/test_all.py | 38 -- freesas/test/test_autorg.py | 448 -------------- freesas/test/test_average.py | 89 --- freesas/test/test_bift.py | 132 ---- freesas/test/test_cormap.py | 88 --- freesas/test/test_distance.py | 62 -- freesas/test/test_fitting.py | 938 ----------------------------- freesas/test/test_model.py | 182 ------ freesas/test/test_sas_argparser.py | 603 ------------------- freesas/test/test_sasio.py | 192 ------ freesas/test/utilstests.py | 24 - 23 files changed, 4174 deletions(-) delete mode 100644 freesas/app/__init__.py delete mode 100644 freesas/app/auto_gpa.py delete mode 100644 freesas/app/auto_guinier.py delete mode 100644 freesas/app/autorg.py delete mode 100644 freesas/app/bift.py delete mode 100644 freesas/app/cormap.py delete mode 100644 freesas/app/extract_ascii.py delete mode 100644 freesas/app/plot_sas.py delete mode 100644 freesas/app/supycomb.py delete mode 100644 freesas/test/__init__.py delete mode 100644 freesas/test/mock_open_38.py delete mode 100644 freesas/test/test_align.py delete mode 100644 freesas/test/test_all.py delete mode 100644 freesas/test/test_autorg.py delete mode 100644 freesas/test/test_average.py delete mode 100644 freesas/test/test_bift.py delete mode 100644 freesas/test/test_cormap.py delete mode 100644 freesas/test/test_distance.py delete mode 100644 freesas/test/test_fitting.py delete mode 100644 freesas/test/test_model.py delete mode 100644 freesas/test/test_sas_argparser.py delete mode 100644 freesas/test/test_sasio.py delete mode 100644 freesas/test/utilstests.py diff --git a/freesas/app/__init__.py b/freesas/app/__init__.py deleted file mode 100644 index 4776656..0000000 --- a/freesas/app/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# coding: utf-8 -# /*########################################################################## -# -# Copyright (c) 2016-2018 European Synchrotron Radiation Facility -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -# -# ###########################################################################*/ -"""Command line applications provided by the freesas launcher.""" - -__authors__ = ["Jérôme Kieffer", "Martha Brennich"] -__license__ = "MIT" -__date__ = "2021/03/24" diff --git a/freesas/app/auto_gpa.py b/freesas/app/auto_gpa.py deleted file mode 100644 index fe02a90..0000000 --- a/freesas/app/auto_gpa.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -__author__ = ["Jérôme Kieffer", "Martha Brennich"] -__license__ = "MIT" -__copyright__ = "2021, ESRF" -__date__ = "19/03/2021" - -import sys -import logging -from freesas.autorg import auto_gpa -from freesas.sas_argparser import GuinierParser -from freesas.fitting import run_guinier_fit - -logging.basicConfig(level=logging.WARNING) -logger = logging.getLogger("auto_gpa") - -if sys.version_info < (3, 6): - logger.error("This code uses F-strings and requires Python 3.6+") - - -def build_parser() -> GuinierParser: - """Build parser for input and return list of files. - :return: parser - """ - description = ( - "Calculate the radius of gyration using Guinier" - " Peak Analysis (Putnam 2016) for a set of scattering curves" - ) - epilog = """free_gpa is an open-source implementation of - the autorg algorithm originately part of the ATSAS suite. - As this tool used a different theory, some results may differ - """ - return GuinierParser( - prog="free_gpa", description=description, epilog=epilog - ) - - -def main() -> None: - """Entry point for free_gpa app""" - parser = build_parser() - run_guinier_fit(fit_function=auto_gpa, parser=parser, logger=logger) - - -if __name__ == "__main__": - main() diff --git a/freesas/app/auto_guinier.py b/freesas/app/auto_guinier.py deleted file mode 100644 index 8f918cc..0000000 --- a/freesas/app/auto_guinier.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -__author__ = ["Jérôme Kieffer", "Martha Brennich"] -__license__ = "MIT" -__copyright__ = "2021, ESRF" -__date__ = "19/03/2021" - -import sys -import logging -from freesas.autorg import auto_guinier -from freesas.sas_argparser import GuinierParser -from freesas.fitting import run_guinier_fit - -logging.basicConfig(level=logging.WARNING) -logger = logging.getLogger("auto_guinier") - -if sys.version_info < (3, 6): - logger.error("This code uses F-strings and requires Python 3.6+") - - -def build_parser() -> GuinierParser: - """Build parser for input and return list of files. - :return: parser - """ - description = ( - "Calculate the radius of gyration using linear fitting of" - "logarithmic intensities for a set of scattering curves" - ) - epilog = """free_guinier is an open-source implementation of - the autorg algorithm originately part of the ATSAS suite. - As this tool used a different theory, some results may differ - """ - return GuinierParser( - prog="free_guinier", description=description, epilog=epilog - ) - - -def main() -> None: - """Entry point for free_guinier app""" - parser = build_parser() - run_guinier_fit(fit_function=auto_guinier, parser=parser, logger=logger) - - -if __name__ == "__main__": - main() diff --git a/freesas/app/autorg.py b/freesas/app/autorg.py deleted file mode 100644 index 2e9ef0c..0000000 --- a/freesas/app/autorg.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2017-2020 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -__author__ = ["Jérôme Kieffer", "Martha Brennich"] -__license__ = "MIT" -__copyright__ = "2021, ESRF" -__date__ = "19/03/2021" - -import sys -import logging -from freesas.autorg import autoRg -from freesas.sas_argparser import GuinierParser -from freesas.fitting import run_guinier_fit - -logging.basicConfig(level=logging.WARNING) -logger = logging.getLogger("auto_gpa") - -if sys.version_info < (3, 6): - logger.error("This code uses F-strings and requires Python 3.6+") - - -def build_parser() -> GuinierParser: - """Build parser for input and return list of files. - :return: parser - """ - description = ( - "Calculate the radius of gyration using Guinier law" - " for a set of scattering curves" - ) - epilog = """free_rg is an open-source implementation of - the autorg algorithm originately part of the ATSAS suite. - As this is reverse engineered, some constants and results may differ - """ - return GuinierParser( - prog="free_rg", description=description, epilog=epilog - ) - - -def main() -> None: - """Entry point for free_rg app""" - parser = build_parser() - run_guinier_fit(fit_function=autoRg, parser=parser, logger=logger) - - -if __name__ == "__main__": - main() diff --git a/freesas/app/bift.py b/freesas/app/bift.py deleted file mode 100644 index 2a540b0..0000000 --- a/freesas/app/bift.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2017 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -__author__ = "Jérôme Kieffer" -__license__ = "MIT" -__copyright__ = "2017, ESRF" -__date__ = "13/10/2020" - -import sys -import logging -import platform -import traceback -from freesas import bift -from freesas.sasio import ( - load_scattering_data, - convert_inverse_angstrom_to_nanometer, -) -from freesas.sas_argparser import SASParser -from freesas.fitting import ( - set_logging_level, - collect_files, -) - -logging.basicConfig(level=logging.WARNING) -logger = logging.getLogger("bift") - - -def build_parser() -> SASParser: - """Build parser for input and return list of files. - :return: parser - """ - - description = ( - "Calculate the density as function of distance p(r)" - " curve from an I(q) scattering curve" - ) - epilog = """free_bift is a Python implementation of the Bayesian Inverse Fourier Transform - - This code is the implementation of - Steen Hansen J. Appl. Cryst. (2000). 33, 1415-1421 - - Based on the BIFT from Jesse Hopkins, available at: - https://sourceforge.net/p/bioxtasraw/git/ci/master/tree/bioxtasraw/BIFT.py - - It aims at being a drop in replacement for datgnom of the ATSAS suite. - - """ - parser = SASParser( - prog="free_bift", description=description, epilog=epilog - ) - parser.add_file_argument(help_text="I(q) files to convert into p(r)") - parser.add_output_filename_argument() - parser.add_q_unit_argument() - parser.add_argument( - "-n", - "--npt", - default=100, - type=int, - help="number of points in p(r) curve", - ) - parser.add_argument( - "-s", - "--scan", - default=27, - type=int, - help="Initial alpha-scan size to guess the start parameter", - ) - parser.add_argument( - "-m", - "--mc", - default=100, - type=int, - help="Number of Monte-Carlo samples in post-refinement", - ) - parser.add_argument( - "-t", - "--threshold", - default=2.0, - type=float, - help="Sample at average ± threshold*sigma in MC", - ) - return parser - - -def main(): - """Entry point for bift app.""" - if platform.system() == "Windows": - sys.stdout = open(1, "w", encoding="utf-16", closefd=False) - - parser = build_parser() - args = parser.parse_args() - set_logging_level(args.verbose) - files = collect_files(args.file) - - for afile in files: - try: - data = load_scattering_data(afile) - except: - logger.error("Unable to parse file %s", afile) - else: - if args.unit == "Å": - data = convert_inverse_angstrom_to_nanometer(data) - try: - bo = bift.auto_bift(data, npt=args.npt, scan_size=args.scan) - except Exception as err: - print("%s: %s %s" % (afile, err.__class__.__name__, err)) - if logging.root.level < logging.WARNING: - traceback.print_exc(file=sys.stdout) - else: - try: - stats = bo.monte_carlo_sampling( - args.mc, args.threshold, npt=args.npt - ) - except RuntimeError as err: - print("%s: %s %s" % (afile, err.__class__.__name__, err)) - if logging.root.level < logging.WARNING: - traceback.print_exc(file=sys.stdout) - else: - dest = afile.stem + ".out" - print(stats.save(dest, source=afile)) - - -if __name__ == "__main__": - main() diff --git a/freesas/app/cormap.py b/freesas/app/cormap.py deleted file mode 100644 index 042ee70..0000000 --- a/freesas/app/cormap.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 - -__author__ = "Jérôme Kieffer" -__license__ = "MIT" -__copyright__ = "2015, ESRF" -__date__ = "20/04/2020" - -import os -import logging -import freesas -from freesas.cormap import gof - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("cormap") -import numpy -from itertools import combinations -from collections import namedtuple -from freesas.sasio import load_scattering_data -from freesas.sas_argparser import SASParser - -datum = namedtuple("datum", ["index", "filename", "data"]) - -import platform - -operatingSystem = platform.system() -if operatingSystem == "Windows": - import glob - - -def parse(): - """Parse input and return list of files. - :return: list of input files - """ - description = "Measure pair-wise similarity of spectra " - epilog = """cormapy is an open-source implementation of - the cormap algorithm in datcmp (from ATSAS). - It does not scale the data and assume they are already scaled - """ - parser = SASParser(prog="cormapy", description=description, epilog=epilog) - parser.add_file_argument(help_text="dat files to compare") - - args = parser.parse_args() - - if args.verbose: - logging.root.setLevel(logging.DEBUG) - files = [i for i in args.file if os.path.exists(i)] - if operatingSystem == "Windows" and files == []: - files = glob.glob(args.file[0]) - files.sort() - input_len = len(files) - logger.debug("%s input files" % input_len) - return files - - -def compare(lstfiles): - res = [ - "Pair-wise Correlation Map", - "" " C Pr(>C)", - ] - data = [] - for i, f in enumerate(lstfiles): - try: - ary = load_scattering_data(f) - except ValueError as e: - print(e) - if ary.ndim > 1 and ary.shape[1] > 1: - ary = ary[:, 1] - d = datum(i + 1, f, ary) - data.append(d) - for a, b in combinations(data, 2): - r = gof(a.data, b.data) - res.append( - "%6i vs. %6i %6i %8.6f" % (a.index, b.index, r.c, r.P) - ) - res.append("") - for a in data: - res.append( - "%6i %8f + %8f * %s" % (a.index, 0.0, 1.0, a.filename) - ) - res.append("") - print(os.linesep.join(res)) - return res - - -def main(): - """main entry point""" - f = parse() - if f: - compare(f) - - -if __name__ == "__main__": - main() diff --git a/freesas/app/extract_ascii.py b/freesas/app/extract_ascii.py deleted file mode 100644 index 30e8fc0..0000000 --- a/freesas/app/extract_ascii.py +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -__author__ = "Jérôme Kieffer" -__license__ = "MIT" -__copyright__ = "2020, ESRF" -__date__ = "15/01/2021" - -import io -import os -import sys -import logging -import glob -import platform -import posixpath -from collections import namedtuple, OrderedDict -import json -import copy -import pyFAI -from pyFAI.io import Nexus -from freesas.sas_argparser import SASParser - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("extract_ascii") - -if sys.version_info[0] < 3: - logger.error("This code requires Python 3.4+") - -NexusJuice = namedtuple( - "NexusJuice", - "filename h5path npt unit q I poni mask energy polarization signal2d error2d buffer concentration", -) - - -def parse(): - - """Parse input and return list of files. - :return: list of input files - """ - description = "Extract the SAXS data from a Nexus files as a 3 column ascii (q, I, err). Metadata are exported in the headers as needed." - epilog = """extract_ascii.py allows you to export the data in inverse nm or inverse A with possible intensity scaling. - """ - parser = SASParser( - prog="extract-ascii.py", description=description, epilog=epilog - ) - # Commented option need to be implemented - # parser.add_argument("-o", "--output", action='store', help="Output filename, by default the same with .dat extension", default=None, type=str) - # parser.add_argument("-u", "--unit", action='store', help="Unit for q: inverse nm or Angstrom?", default="nm", type=str) - # parser.add_argument("-n", "--normalize", action='store', help="Re-normalize all intensities with this factor ", default=1.0, type=float) - parser.add_file_argument("HDF5 input data") - parser.add_argument( - "-a", - "--all", - action="store_true", - help="extract every individual frame", - default=False, - ) - return parser.parse_args() - - -def extract_averaged(filename): - "return some infomations extracted from a HDF5 file " - results = OrderedDict() - results["filename"] = filename - # Missing: comment normalization - with Nexus(filename, "r") as nxsr: - entry_grp = nxsr.get_entries()[0] - results["h5path"] = entry_grp.name - nxdata_grp = nxsr.h5[entry_grp.attrs["default"]] - signal = nxdata_grp.attrs["signal"] - axis = nxdata_grp.attrs["axes"] - results["I"] = nxdata_grp[signal][()] - results["q"] = nxdata_grp[axis][()] - results["std"] = nxdata_grp["errors"][()] - results["unit"] = pyFAI.units.to_unit( - axis + "_" + nxdata_grp[axis].attrs["units"] - ) - integration_grp = nxdata_grp.parent - results["geometry"] = json.loads( - integration_grp["configuration/data"][()] - ) - results["polarization"] = integration_grp[ - "configuration/polarization_factor" - ][()] - - instrument_grps = nxsr.get_class(entry_grp, class_type="NXinstrument") - if instrument_grps: - detector_grp = nxsr.get_class( - instrument_grps[0], class_type="NXdetector" - )[0] - results["mask"] = detector_grp["pixel_mask"].attrs["filename"] - sample_grp = nxsr.get_class(entry_grp, class_type="NXsample")[0] - results["sample"] = posixpath.split(sample_grp.name)[-1] - results["buffer"] = sample_grp["buffer"][()] - results["storage temperature"] = sample_grp["temperature_env"][()] - results["exposure temperature"] = sample_grp["temperature"][()] - results["concentration"] = sample_grp["concentration"][()] - if "2_correlation_mapping" in entry_grp: - results["to_merge"] = entry_grp[ - "2_correlation_mapping/results/to_merge" - ][()] - return results - - -def extract_all(filename): - "return some infomations extracted from a HDF5 file for all individual frames" - res = [] - results = OrderedDict() - results["filename"] = filename - with Nexus(filename, "r") as nxsr: - entry_grp = nxsr.get_entries()[0] - results["h5path"] = entry_grp.name - nxdata_grp = nxsr.h5[entry_grp.name + "/1_integration/results"] - signal = nxdata_grp.attrs["signal"] - axis = nxdata_grp.attrs["axes"][1] - I = nxdata_grp[signal][()] - results["q"] = nxdata_grp[axis][()] - std = nxdata_grp["errors"][()] - results["unit"] = pyFAI.units.to_unit( - axis + "_" + nxdata_grp[axis].attrs["units"] - ) - integration_grp = nxdata_grp.parent - results["geometry"] = json.loads( - integration_grp["configuration/data"][()] - ) - results["polarization"] = integration_grp[ - "configuration/polarization_factor" - ][()] - instrument_grp = nxsr.get_class(entry_grp, class_type="NXinstrument")[ - 0 - ] - detector_grp = nxsr.get_class(instrument_grp, class_type="NXdetector")[ - 0 - ] - results["mask"] = detector_grp["pixel_mask"].attrs["filename"] - sample_grp = nxsr.get_class(entry_grp, class_type="NXsample")[0] - results["sample"] = posixpath.split(sample_grp.name)[-1] - results["buffer"] = sample_grp["buffer"][()] - if "temperature_env" in sample_grp: - results["storage temperature"] = sample_grp["temperature_env"][()] - if "temperature" in sample_grp: - results["exposure temperature"] = sample_grp["temperature"][()] - if "concentration" in sample_grp: - results["concentration"] = sample_grp["concentration"][()] - # if "2_correlation_mapping" in entry_grp: - # results["to_merge"] = entry_grp["2_correlation_mapping/results/to_merge"][()] - for i, s in zip(I, std): - r = copy.copy(results) - r["I"] = i - r["std"] = s - res.append(r) - return res - - -def write_ascii(results, output=None, hdr="#", linesep=os.linesep): - """ - :param resusts: dict containing some NexusJuice - :param output: name of the 3-column ascii file to be written - :param hdr: header mark, usually '#' - :param linesep: to be able to addapt the end of lines - - Adam Round explicitelly asked for (email from Date: Tue, 04 Oct 2011 15:22:29 +0200) : - Modification from: - # BSA buffer - # Sample c= 0.0 mg/ml (these two lines are required for current DOS pipeline and can be cleaned up once we use EDNA to get to ab-initio models) - # - # Sample environment: - # Detector = Pilatus 1M - # PixelSize_1 = 0.000172 - # PixelSize_2 = 6.283185 (I think it could avoid confusion if we give teh actual pixel size as 0.000172 for X and Y and not to give the integrated sizes. Also could there also be a modification for PixelSize_1 as on the diagonal wont it be the hypotenuse (0.000243)? and thus will be on average a bit bigger than 0.000172) - # - # title = BSA buffer - # Frame 7 of 10 - # Time per frame (s) = 10 - # SampleDistance = 2.43 - # WaveLength = 9.31e-11 - # Normalization = 0.0004885 - # History-1 = saxs_angle +pass -omod n -rsys normal -da 360_deg -odim = 1 /data/id14eh3/inhouse/saxs_pilatus/Adam/EDNAtests/2d/dumdum_008_07.edf/data/id14eh3/inhouse/saxs_pilatus/Adam/EDNAtests/misc/dumdum_008_07.ang - # DiodeCurr = 0.0001592934 - # MachCurr = 163.3938 - # Mask = /data/id14eh3/archive/CALIBRATION/MASK/Pcon_01Jun_msk.edf - # SaxsDataVersion = 2.40 - # - # N 3 - # L q*nm I_BSA buffer stddev - # - # Sample Information: - # Storage Temperature (degrees C): 4 - # Measurement Temperature (degrees C): 10 - # Concentration: 0.0 - # Code: BSA - s-vector Intensity Error - s-vector Intensity Error - s-vector Intensity Error - s-vector Intensity Error - """ - hdr = str(hdr) - headers = [] - if "comments" in results: - headers.append(hdr + " " + results["comments"]) - else: - headers.append(hdr) - headers.append( - hdr + " Sample c= %s mg/ml" % results.get("concentration", -1) - ) - headers += [hdr, hdr + " Sample environment:"] - if "geometry" in results: - headers.append( - hdr + " Detector = %s" % results["geometry"]["detector"] - ) - headers.append( - hdr + " SampleDistance = %s" % results["geometry"]["dist"] - ) - headers.append( - hdr + " WaveLength = %s" % results["geometry"]["wavelength"] - ) - headers.append(hdr) - if "comments" in results: - headers.append(hdr + " title = %s" % results["comment"]) - if "to_merge" in results: - headers.append( - hdr - + " Frames merged: " - + " ".join([str(i) for i in results["to_merge"]]) - ) - if "normalization" in results: - headers.append(hdr + " Normalization = %s" % results["normalization"]) - if "mask" in results: - headers.append(hdr + " Mask = %s" % results["mask"]) - headers.append(hdr) - headers.append(hdr + (" N 3" if "std" in results else " N 2")) - line = hdr + " L " - if "unit" in results: - a, b = str(results["unit"]).split("_") - line += a + "*" + b.strip("^-1") + " I_" - else: - line += "q I_" - if "comment" in results: - line += results["comments"] - if "std" in results: - line += " stddev" - headers.append(line) - headers.append(hdr) - headers.append(hdr + " Sample Information:") - if "storage temperature" in results: - headers.append( - hdr - + " Storage Temperature (degrees C): %s" - % results["storage temperature"] - ) - if "exposure temperature" in results: - headers.append( - hdr - + " Measurement Temperature (degrees C): %s" - % results["exposure temperature"] - ) - - headers.append( - hdr + " Concentration: %s" % results.get("concentration", -1) - ) - if "buffer" in results: - headers.append(hdr + " Buffer: %s" % results["buffer"]) - headers.append(hdr + " Code: %s" % results.get("sample", "")) - - def write(headers, file_): - - file_.writelines(linesep.join(headers)) - file_.write(linesep) - - if "std" in results: - data = [ - "%14.6e\t%14.6e\t%14.6e" % (q, I, std) - for q, I, std in zip( - results["q"], results["I"], results["std"] - ) - ] - else: - data = [ - "%14.6e\t%14.6e\t" % (q, I) - for q, I in zip(results["q"], results["I"]) - ] - data.append("") - file_.writelines(linesep.join(data)) - - if output: - with open(output, "w") as f: - write(headers, f) - else: - f = io.StringIO() - write(headers, f) - f.seek(0) - return f.read() - - -def main(): - args = parse() - if args.verbose: - logging.root.setLevel(logging.DEBUG) - files = [i for i in args.file if os.path.exists(i)] - if platform.system() == "Windows" and files == []: - files = glob.glob(args.file[0]) - files.sort() - input_len = len(files) - logger.debug("%s input files", input_len) - for src in files: - if args.all: - dest = os.path.splitext(src)[0] + "%04i.dat" - for idx, frame in enumerate(extract_all(src)): - print(src, " --> ", dest % idx) - write_ascii(frame, dest % idx) - else: - dest = os.path.splitext(src)[0] + ".dat" - write_ascii(extract_averaged(src), dest) - print(src, " --> ", dest) - - -if __name__ == "__main__": - main() diff --git a/freesas/app/plot_sas.py b/freesas/app/plot_sas.py deleted file mode 100644 index 6378b30..0000000 --- a/freesas/app/plot_sas.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2020 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -"Tool to perform a simple plotting of a set of SAS curve" - -__author__ = "Jérôme Kieffer" -__license__ = "MIT" -__copyright__ = "2020, ESRF" -__date__ = "14/05/2020" - -import platform -import logging -from pathlib import Path -from matplotlib.pyplot import switch_backend -from matplotlib.backends.backend_pdf import PdfPages -from freesas import plot -from freesas.sasio import ( - load_scattering_data, - convert_inverse_angstrom_to_nanometer, -) -from freesas.autorg import InsufficientDataError, NoGuinierRegionError -from freesas.sas_argparser import SASParser - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("plot_sas") - - -def set_backend(output: Path = None, output_format: str = None): - """Explicitely set silent backend based on format or filename - Needed on MacOS - @param output: Name of the specified output file - @param output_format: User specified format - """ - if output_format: - output_format = output_format.lower() - elif output and len(output.suffix) > 0: - output_format = output.suffix.lower()[1:] - if output_format: - if output_format == "svg": - switch_backend("svg") - elif output_format in ["ps", "eps"]: - switch_backend("ps") - elif output_format == "pdf": - switch_backend("pdf") - elif output_format == "png": - switch_backend("agg") - - -def parse(): - """Parse input and return list of files. - :return: list of input files - """ - description = "Generate typical sas plots with matplotlib" - epilog = """freesas is an open-source implementation of a bunch of - small angle scattering algorithms. """ - parser = SASParser( - prog="freesas.py", description=description, epilog=epilog - ) - parser.add_file_argument(help_text="dat files to plot") - parser.add_output_filename_argument() - parser.add_output_data_format("jpeg", "svg", "png", "pdf") - parser.add_q_unit_argument() - return parser.parse_args() - - -def create_figure(file: Path, unit: str = "nm"): - """Create multi-plot SAS figure for data from a file - @param file: filename of SAS file in q I Ierr format - @param unit: length unit of input data, supported options are Å and nm. - :return: figure with SAS plots for this file - """ - data = load_scattering_data(file) - if unit == "Å": - data = convert_inverse_angstrom_to_nanometer(data) - fig = plot.plot_all(data) - fig.suptitle(file) - return fig - - -def main(): - args = parse() - if args.verbose: - logging.root.setLevel(logging.DEBUG) - files = [Path(i) for i in args.file if Path(i).exists()] - if platform.system() == "Windows" and files == []: - files = list(Path.cwd().glob(args.file[0])) - files.sort() - input_len = len(files) - logger.debug("%s input files", input_len) - figures = [] - - if args.output and len(files) > 1: - logger.warning("Only PDF export is possible in multi-frame mode") - if args.output and platform.system() == "Darwin": - if len(files) == 1: - set_backend(args.output, args.format) - elif len(files) > 1: - set_backend(output_format="pdf") - for afile in files: - try: - fig = create_figure(afile, args.unit) - except OSError: - logger.error("Unable to load file %s", afile) - except (InsufficientDataError, NoGuinierRegionError, ValueError): - logger.error("Unable to process file %s", afile) - else: - figures.append(fig) - if args.output is None: - fig.show() - elif len(files) == 1: - fig.savefig(args.output, format=args.format) - if len(figures) > 1 and args.output: - with PdfPages(args.output) as pdf_output_file: - for fig in figures: - pdf_output_file.savefig(fig) - if not args.output: - input("Press enter to quit") - - -if __name__ == "__main__": - main() diff --git a/freesas/app/supycomb.py b/freesas/app/supycomb.py deleted file mode 100644 index 2c4f688..0000000 --- a/freesas/app/supycomb.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/python3 -__author__ = "Guillaume Bonamis" -__license__ = "MIT" -__copyright__ = "2015, ESRF" -__date__ = "09/07/2020" - -import logging -from os.path import dirname, abspath -from freesas.align import InputModels, AlignModels -from freesas.sas_argparser import SASParser - -base = dirname(dirname(abspath(__file__))) - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("supycomb") - - -def parse(): - - """Parse input and return list of files. - :return: list of args - """ - description = "Align several models and calculate NSD" - epilog = """supycomb is an open-source implementation of - [J. Appl. Cryst. (2001). 34, 33-41](doi:10.1107/S0021889800014126). - - The main difference with supcomb: the fast mode does not re-bin beads. It only refines the best matching orientation which provides a speed-up of a factor 8. - - """ - parser = SASParser(prog="supycomp", description=description, epilog=epilog) - parser.add_file_argument(help_text="pdb files to align") - parser.add_argument( - "-m", - "--mode", - dest="mode", - type=str, - choices=["SLOW", "FAST"], - default="SLOW", - help="Either SLOW or FAST, default: %(default)s)", - ) - parser.add_argument( - "-e", - "--enantiomorphs", - type=str, - choices=["YES", "NO"], - default="YES", - help="Search enantiomorphs, YES or NO, default: %(default)s)", - ) - parser.add_argument( - "-q", - "--quiet", - type=str, - choices=["ON", "OFF"], - default="ON", - help="Hide log or not, default: %(default)s", - ) - parser.add_argument( - "-g", - "--gui", - type=str, - choices=["YES", "NO"], - default="YES", - help="Use GUI for figures or not, default: %(default)s", - ) - parser.add_argument( - "-o", - "--output", - type=str, - default="aligned.pdb", - help="output filename, default: %(default)s", - ) - return parser.parse_args() - - -def main(): - """main application""" - - args = parse() - input_len = len(args.file) - logger.info("%s input files" % input_len) - selection = InputModels() - - if args.mode == "SLOW": - slow = True - logger.info("SLOW mode") - else: - slow = False - logger.info("FAST mode") - - if args.enantiomorphs == "YES": - enantiomorphs = True - else: - enantiomorphs = False - logger.info("NO enantiomorphs") - - if args.quiet == "OFF": - logger.setLevel(logging.DEBUG) - logger.info("setLevel: Debug") - - if args.gui == "NO": - save = True - logger.info( - "Figures saved automatically : \n R factor values and selection => Rfactor.png \n NSD table and selection => nsd.png" - ) - else: - save = False - - align = AlignModels(args.file, slow=slow, enantiomorphs=enantiomorphs) - if input_len == 2: - align.outputfiles = args.output - align.assign_models() - dist = align.alignment_2models() - logger.info("%s and %s aligned" % (args.file[0], args.file[1])) - logger.info("NSD after optimized alignment = %.2f" % dist) - else: - align.outputfiles = [ - "model-%02i.pdb" % (i + 1) for i in range(input_len) - ] - selection.inputfiles = args.file - selection.models_selection() - selection.rfactorplot(save=save) - align.models = selection.sasmodels - align.validmodels = selection.validmodels - - align.makeNSDarray() - align.alignment_reference() - logger.info( - "valid models aligned on the model %s" % (align.reference + 1) - ) - align.plotNSDarray(rmax=round(selection.rmax, 4), save=save) - - if not save and input_len > 2: - input("Press any key to exit") - - -if __name__ == "__main__": - main() diff --git a/freesas/test/__init__.py b/freesas/test/__init__.py deleted file mode 100644 index dbcadf1..0000000 --- a/freesas/test/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -#!usr/bin/env python -# coding: utf-8 - -__author__ = "Jérôme Kieffer" -__license__ = "MIT" -__date__ = "15/01/2021" -__copyright__ = "2015-2021, ESRF" - -import sys -import unittest -from .test_all import suite - - -def run_tests(): - """Run test complete test_suite""" - mysuite = suite() - runner = unittest.TextTestRunner() - if not runner.run(mysuite).wasSuccessful(): - print("Test suite failed") - return 1 - else: - print("Test suite succeeded") - return 0 - - -run = run_tests - -if __name__ == '__main__': - sys.exit(run_tests()) diff --git a/freesas/test/mock_open_38.py b/freesas/test/mock_open_38.py deleted file mode 100644 index cf85a66..0000000 --- a/freesas/test/mock_open_38.py +++ /dev/null @@ -1,98 +0,0 @@ -""" -This is the Python 3.8 implementation of mock_open taken from -https://github.com/python/cpython/blob/3.8/Lib/unittest/mock.py -Hence: -"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation; -All Rights Reserved" -""" - -import io -from unittest.mock import MagicMock, DEFAULT - - -file_spec = None -#sentinel = _Sentinel() -#DEFAULT = sentinel.DEFAULT - -def _to_stream(read_data): - if isinstance(read_data, bytes): - return io.BytesIO(read_data) - else: - return io.StringIO(read_data) - - -def mock_open(mock=None, read_data=''): - """ - A helper function to create a mock to replace the use of `open`. It works - for `open` called directly or used as a context manager. - The `mock` argument is the mock object to configure. If `None` (the - default) then a `MagicMock` will be created for you, with the API limited - to methods or attributes available on standard file handles. - `read_data` is a string for the `read`, `readline` and `readlines` of the - file handle to return. This is an empty string by default. - """ - _read_data = _to_stream(read_data) - _state = [_read_data, None] - - def _readlines_side_effect(*args, **kwargs): - if handle.readlines.return_value is not None: - return handle.readlines.return_value - return _state[0].readlines(*args, **kwargs) - - def _read_side_effect(*args, **kwargs): - if handle.read.return_value is not None: - return handle.read.return_value - return _state[0].read(*args, **kwargs) - - def _readline_side_effect(*args, **kwargs): - yield from _iter_side_effect() - while True: - yield _state[0].readline(*args, **kwargs) - - def _iter_side_effect(): - if handle.readline.return_value is not None: - while True: - yield handle.readline.return_value - for line in _state[0]: - yield line - - def _next_side_effect(): - if handle.readline.return_value is not None: - return handle.readline.return_value - return next(_state[0]) - - global file_spec - if file_spec is None: - import _io - file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) - - if mock is None: - mock = MagicMock(name='open', spec=open) - - handle = MagicMock(spec=file_spec) - handle.__enter__.return_value = handle - - handle.write.return_value = None - handle.read.return_value = None - handle.readline.return_value = None - handle.readlines.return_value = None - - handle.read.side_effect = _read_side_effect - _state[1] = _readline_side_effect() - handle.readline.side_effect = _state[1] - handle.readlines.side_effect = _readlines_side_effect - handle.__iter__.side_effect = _iter_side_effect - handle.__next__.side_effect = _next_side_effect - - def reset_data(*args, **kwargs): - _state[0] = _to_stream(read_data) - if handle.readline.side_effect == _state[1]: - # Only reset the side effect if the user hasn't overridden it. - _state[1] = _readline_side_effect() - handle.readline.side_effect = _state[1] - return DEFAULT - - mock.side_effect = reset_data - mock.return_value = handle - return mock diff --git a/freesas/test/test_align.py b/freesas/test/test_align.py deleted file mode 100644 index 3137e63..0000000 --- a/freesas/test/test_align.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/python -from __future__ import print_function - -__author__ = "Guillaume" -__license__ = "MIT" -__copyright__ = "2015, ESRF" - -import numpy -import unittest -from .utilstests import get_datafile -from ..model import SASModel -from ..align import AlignModels -from ..transformations import translation_matrix, euler_matrix -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("AlignModels_test") - - -def move(mol): - """ - Random movement of the molecule. - - @param mol: 2d array, coordinates of the molecule - @return mol:2D array, coordinates of the molecule after a translation and a rotation - """ - vect = numpy.random.random(3) - translation = translation_matrix(vect) - - euler = numpy.random.random(3) - rotation = euler_matrix(euler[0], euler[1], euler[2]) - - mol = numpy.dot(rotation, mol.T) - mol = numpy.dot(translation, mol).T - - return mol - - -def assign_random_mol(inf=None, sup=None): - """ - Create a random 2d array to create a molecule - - @param inf: inf limit of coordinates values - @param sup: sup limit of coordinates values - @return molecule: 2d array, random coordinates - """ - if not inf: - inf = 0 - if not sup: - sup = 100 - molecule = numpy.random.randint(inf, sup, size=400).reshape(100, 4).astype(float) - molecule[:, -1] = 1.0 - return molecule - - -class TestAlign(unittest.TestCase): - testfile1 = get_datafile("dammif-01.pdb") - testfile2 = get_datafile("dammif-02.pdb") - - def test_alignment(self): - inputfiles = [self.testfile1, self.testfile1] - align = AlignModels(inputfiles, slow=False) - align.assign_models() - m = align.models[0] - n = align.models[1] - n.atoms = move(n.atoms) - n.centroid() - n.inertiatensor() - n.canonical_parameters() - if m.dist(n, m.atoms, n.atoms) == 0: - logger.error(m.dist(n, m.atoms, n.atoms)) - logger.error("pb of movement") - dist = align.alignment_2models(save=False) - self.assertAlmostEqual(dist, 0, 12, msg="NSD unequal 0, %s!=0" % dist) - - def test_usefull_alignment(self): - inputfiles = [self.testfile1, self.testfile2] - align = AlignModels(inputfiles, slow=False) - align.assign_models() - mol1 = align.models[0] - mol2 = align.models[1] - dist_before = mol1.dist(mol2, mol1.atoms, mol2.atoms) - symmetry, par = align.alignment_sym(mol1, mol2) - dist_after = mol1.dist_after_movement(par, mol2, symmetry) - self.assertGreaterEqual(dist_before, dist_after, "increase of distance after alignment %s<%s" % (dist_before, dist_after)) - - def test_optimisation_align(self): - inputfiles = [self.testfile1, self.testfile2] - align = AlignModels(inputfiles, slow=False) - align.assign_models() - mol1 = align.models[0] - mol2 = align.models[1] - align.slow = False - sym0, p0 = align.alignment_sym(mol1, mol2) - dist_before = mol1.dist_after_movement(p0, mol2, sym0) - align.slow = True - sym, p = align.alignment_sym(mol1, mol2) - dist_after = mol1.dist_after_movement(p, mol2, sym) - self.assertGreaterEqual(dist_before, dist_after, "increase of distance after optimized alignment %s<%s" % (dist_before, dist_after)) - - def test_alignment_intruder(self): - intruder = numpy.random.randint(0, 8) - inputfiles = [] - for i in range(8): - if i == intruder: - inputfiles.append(self.testfile2) - else: - inputfiles.append(self.testfile1) - - align = AlignModels(inputfiles, slow=False, enantiomorphs=False) - align.assign_models() - align.validmodels = numpy.ones(8) - table = align.makeNSDarray() - if table.sum() == 0: - logger.error("there is no intruders") - - averNSD = ((table.sum(axis=-1)) / (align.validmodels.sum() - 1)) - num_intr = averNSD.argmax() - - if not num_intr and num_intr != 0: - logger.error("cannot find the intruder") - self.assertEqual(num_intr, intruder, msg="not find the good intruder, %s!=%s" % (num_intr, intruder)) - - def test_reference(self): - inputfiles = [self.testfile1] * 8 - align = AlignModels(inputfiles, slow=False, enantiomorphs=False) - align.assign_models() - for i in range(8): - mol = assign_random_mol() - align.models[i].atoms = mol - align.validmodels = numpy.ones(8) - table = align.makeNSDarray() - ref = align.find_reference() - neg_dif = 0 - for i in range(8): - dif = (table[i, :] - table[ref, :]).mean() - if dif < 0: - neg_dif += 1 - self.assertEqual(neg_dif, 0, msg="pb with reference choice") - - -def suite(): - testSuite = unittest.TestSuite() - testSuite.addTest(TestAlign("test_alignment")) - testSuite.addTest(TestAlign("test_usefull_alignment")) - testSuite.addTest(TestAlign("test_optimisation_align")) - testSuite.addTest(TestAlign("test_alignment_intruder")) - testSuite.addTest(TestAlign("test_reference")) - return testSuite - -if __name__ == '__main__': - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_all.py b/freesas/test/test_all.py deleted file mode 100644 index 1377672..0000000 --- a/freesas/test/test_all.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 -from __future__ import print_function - -__author__ = "Guillaume" -__license__ = "MIT" -__copyright__ = "2015, ESRF" -__date__ = "25/04/2020" - -import unittest -from . import test_model -from . import test_align -from . import test_distance -from . import test_cormap -from . import test_autorg -from . import test_bift -from . import test_sasio -from . import test_sas_argparser -from . import test_fitting - - -def suite(): - testSuite = unittest.TestSuite() - testSuite.addTest(test_bift.suite()) - testSuite.addTest(test_model.suite()) - testSuite.addTest(test_align.suite()) - testSuite.addTest(test_distance.suite()) - testSuite.addTest(test_cormap.suite()) - testSuite.addTest(test_autorg.suite()) - testSuite.addTest(test_sasio.suite()) - testSuite.addTest(test_sas_argparser.suite()) - testSuite.addTest(test_fitting.suite()) - return testSuite - - -if __name__ == "__main__": - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_autorg.py b/freesas/test/test_autorg.py deleted file mode 100644 index e27560f..0000000 --- a/freesas/test/test_autorg.py +++ /dev/null @@ -1,448 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2017 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -__authors__ = ["J. Kieffer"] -__license__ = "MIT" -__date__ = "10/06/2020" - -import logging -import unittest -from math import sqrt, pi - -import numpy -from scipy.stats import linregress - -from .utilstests import get_datafile -from ..autorg import ( - autoRg, - RG_RESULT, - linear_fit, - auto_gpa, - auto_guinier, -) -from .._autorg import curate_data # pylint: disable=E0401 -from ..invariants import calc_Rambo_Tainer -from .._bift import distribution_sphere # pylint: disable=E0401 - -logger = logging.getLogger(__name__) - - -def create_synthetic_data(R0=4, I0=100): - """Create idealized data for a sphere of radius R0=4 whose Rg should be 4*sqrt(3/5)""" - npt = 1000 - Dmax = 2 * R0 - size = 5000 - r = numpy.linspace(0, Dmax, npt + 1) - p = distribution_sphere(I0, Dmax, npt) - q = numpy.linspace(0, 10, size) - qr = numpy.outer(q, r / pi) - T = (4 * pi * (r[-1] - r[0]) / npt) * numpy.sinc(qr) - I = T.dot(p) - err = numpy.sqrt(I) - return numpy.vstack((q, I, err)).T[1:] - - -class TestAutoRg(unittest.TestCase): - testfile = get_datafile("bsa_005_sub.dat") - - def __init__(self, testName, **extra_kwargs): - super().__init__(testName) - self.extra_arg = extra_kwargs - - # Reference implementation - atsas_autorg = { - "Version": "Atsas 2.6.1", - "Rg": 2.98016, - "sigma_Rg": 0.156859, - "I0": 61.3093, - "sigma_I0": 0.0606315, - "start_point": 46, - "end_point": 95, - "quality": 0.752564, - "aggregated": 0, - } - - def test_atsas(self): - logger.info("test file: %s", self.testfile) - data = numpy.loadtxt(self.testfile) - atsas_result = self.atsas_autorg.copy() - logger.debug("Reference version: %s" % atsas_result.pop("Version")) - atsas_result = RG_RESULT(**atsas_result) - free_result = autoRg(data) - logger.debug("Ref: %s" % (atsas_result,)) - logger.debug("Obt: %s" % (free_result,)) - self.assertAlmostEqual( - atsas_result.Rg, free_result.Rg, 1, "RG fits within 2 digits" - ) - self.assertAlmostEqual( - atsas_result.I0, - free_result.I0, - msg="I0 fits within +/- 1 ", - delta=1, - ) - self.assertAlmostEqual( - atsas_result.quality, - free_result.quality, - 0, - msg="quality fits within 0 digits", - ) - - def test_synthetic(self): - """Test based on sythetic data: a sphere of radius R0=4 which Rg should be 4*sqrt(3/5)""" - R0 = 4 - I0 = 100 - data = create_synthetic_data(R0=R0, I0=I0) - Rg = autoRg(data) - logger.info("auto_rg %s", Rg) - self.assertAlmostEqual( - R0 * sqrt(3 / 5), Rg.Rg, 0, "Rg matches for a sphere" - ) - self.assertGreater( - R0 * sqrt(3 / 5), - Rg.Rg - Rg.sigma_Rg, - "Rg in range matches for a sphere", - ) - self.assertLess( - R0 * sqrt(3 / 5), - Rg.Rg + Rg.sigma_Rg, - "Rg in range matches for a sphere", - ) - self.assertAlmostEqual(I0, Rg.I0, 0, "I0 matches for a sphere") - self.assertGreater(I0, Rg.I0 - Rg.sigma_I0, "I0 matches for a sphere") - self.assertLess(I0, Rg.I0 + Rg.sigma_I0, "I0 matches for a sphere") - - gpa = auto_gpa(data) - logger.info("auto_gpa %s", gpa) - self.assertAlmostEqual( - gpa.Rg / (R0 * sqrt(3.0 / 5)), 1.00, 0, "Rg matches for a sphere" - ) - self.assertAlmostEqual(gpa.I0 / I0, 1.0, 1, "I0 matches for a sphere") - - guinier = auto_guinier(data) - logger.info("auto_guinier %s", guinier) - self.assertAlmostEqual( - R0 * sqrt(3.0 / 5), guinier.Rg, 0, "Rg matches for a sphere" - ) - sigma_Rg = max(guinier.sigma_Rg, 1e-4) - sigma_I0 = max(guinier.sigma_I0, 1e-4) - self.assertGreater( - R0 * sqrt(3.0 / 5), - guinier.Rg - sigma_Rg, - "Rg in range matches for a sphere", - ) - self.assertLess( - R0 * sqrt(3.0 / 5), - guinier.Rg + sigma_Rg, - "Rg in range matches for a sphere", - ) - self.assertAlmostEqual(I0, guinier.I0, 0, "I0 matches for a sphere") - self.assertGreater( - I0, guinier.I0 - sigma_I0, "I0 matches for a sphere" - ) - self.assertLess(I0, guinier.I0 + sigma_I0, "I0 matches for a sphere") - - # Check RT invarients... - rt = calc_Rambo_Tainer(data, guinier) - self.assertIsNotNone( - rt, "Rambo-Tainer invariants are actually calculated" - ) - - def test_auto_gpa_with_outlier(self): - - """ - Test that auto_gpa gives reasonalbe results - even if one data point is excessively large (e.g. hot pixel) - """ - outlier_position = self.extra_arg["outlier_position"] - R0 = 4 - I0 = 100 - data = create_synthetic_data(R0=R0, I0=I0) - data[outlier_position, 1] *= 1000 - gpa = auto_gpa(data) - logger.info("auto_gpa %s", gpa) - self.assertAlmostEqual( - gpa.Rg / (R0 * sqrt(3.0 / 5)), - 1.00, - 0, - f"In case of outlier at {outlier_position} Rg matches for a sphere", - ) - self.assertAlmostEqual( - gpa.I0 / I0, - 1.0, - 1, - f"In case of outlier at {outlier_position} I0 matches for a sphere", - ) - - -class TestFit(unittest.TestCase): - # Testcase originally comes from wikipedia article on linear regression, expected results from scipy.stats.linregress - - def test_linear_fit_static(self): - testx = [ - 1.47, - 1.5, - 1.52, - 1.55, - 1.57, - 1.6, - 1.63, - 1.65, - 1.68, - 1.7, - 1.73, - 1.75, - 1.78, - 1.80, - 1.83, - ] - testy = [ - 52.21, - 53.12, - 54.48, - 55.84, - 57.20, - 58.57, - 59.93, - 61.29, - 63.11, - 64.47, - 66.28, - 68.1, - 69.92, - 72.19, - 74.46, - ] - testw = [1.0] * 15 - testintercept = -39.061956 - testslope = +61.2721865 - fit_result = linear_fit(testx, testy, testw) - # print(fit_result) - self.assertAlmostEqual( - fit_result.intercept, - testintercept, - 5, - "Intercept fits wihtin 4(?) digits", - ) - self.assertAlmostEqual( - fit_result.slope, testslope, 5, "Intercept fits wihtin 4(?) digits" - ) - - def test_linspace(self): - size = 100 - x = numpy.linspace(-10, 10, size) - y = numpy.linspace(10, 0, size) - w = numpy.random.random(size) - fit_result = linear_fit(x, y, w) - # print(fit_result) - self.assertAlmostEqual( - fit_result.intercept, 5, 5, "Intercept fits wihtin 4(?) digits" - ) - self.assertAlmostEqual( - fit_result.slope, -0.5, 5, "Intercept fits wihtin 4(?) digits" - ) - - def test_random(self): - - """ - Tests that our linear regression implementation - gives the same results as scipy.stats for random data - """ - size = 100 - x = numpy.random.random(size) - y = 1.6 * x + 5 + numpy.random.random(size) - w = numpy.ones(size) - fit_result = linear_fit(x, y, w) - ref = linregress(x, y) - self.assertAlmostEqual( - fit_result.intercept, - ref[1], - 5, - "Intercept fits wihtin 4(?) digits", - ) - self.assertAlmostEqual( - fit_result.slope, ref[0], 5, "Intercept fits wihtin 4(?) digits" - ) - self.assertAlmostEqual( - fit_result.R2, - ref.rvalue ** 2, - 5, - "R² value matcheswihtin 4(?) digits", - ) - - -class TestDataCuration(unittest.TestCase): - """Tests for the curate_data function.""" - - testfile = get_datafile("bsa_005_sub.dat") - - def __init__(self, testName, **extra_kwargs): - super().__init__(testName) - self.extra_arg = extra_kwargs - - def test_curate_data_BM29_bsa(self): - """Test data curration of "nice" BM29 data.""" - logger.info("test file: %s", self.testfile) - data = numpy.loadtxt(self.testfile) - DTYPE = numpy.float64 - raw_size = len(data) - q_ary = numpy.empty(raw_size, dtype=DTYPE) - i_ary = numpy.empty(raw_size, dtype=DTYPE) - sigma_ary = numpy.empty(raw_size, dtype=DTYPE) - q2_ary = numpy.empty(raw_size, dtype=DTYPE) - lgi_ary = numpy.empty(raw_size, dtype=DTYPE) - wg_ary = numpy.empty(raw_size, dtype=DTYPE) - offsets = numpy.empty(raw_size, dtype=numpy.int32) - data_range = numpy.zeros(3, dtype=numpy.int32) - - curate_data( - data, - q_ary, - i_ary, - sigma_ary, - q2_ary, - lgi_ary, - wg_ary, - offsets, - data_range, - ) - - self.assertListEqual( - list(data_range), - [2, 203, 1033], - msg="reproduced expected BM29 data range", - ) - - def test_curate_synthetic_data(self): - """Test that for idealized data the cut-off is at i0/10.""" - data = create_synthetic_data() - I_one = data[0, 1] - DTYPE = numpy.float64 - raw_size = len(data) - q_ary = numpy.empty(raw_size, dtype=DTYPE) - i_ary = numpy.empty(raw_size, dtype=DTYPE) - sigma_ary = numpy.empty(raw_size, dtype=DTYPE) - q2_ary = numpy.empty(raw_size, dtype=DTYPE) - lgi_ary = numpy.empty(raw_size, dtype=DTYPE) - wg_ary = numpy.empty(raw_size, dtype=DTYPE) - offsets = numpy.empty(raw_size, dtype=numpy.int32) - data_range = numpy.zeros(3, dtype=numpy.int32) - - curate_data( - data, - q_ary, - i_ary, - sigma_ary, - q2_ary, - lgi_ary, - wg_ary, - offsets, - data_range, - ) - - self.assertEqual( - offsets[0], - 0, - msg="curated data for artificial data starts at 0", - ) - - self.assertTrue( - data[data_range[1] - 1, 1] > I_one / 10 - and data[data_range[1] + 1, 1] < I_one / 10, - msg="curated data for artificial data ends at approx. I0/10", - ) - - def test_curate_synthetic_data_with_negative_points(self): - """Test that if one of the first three points is negative, all date before it gets ignored.""" - negative_point_index = self.extra_arg["negative_point_index"] - - data = create_synthetic_data() - DTYPE = numpy.float64 - raw_size = len(data) - data[negative_point_index, 1] = -1 - - q_ary = numpy.empty(raw_size, dtype=DTYPE) - i_ary = numpy.empty(raw_size, dtype=DTYPE) - sigma_ary = numpy.empty(raw_size, dtype=DTYPE) - q2_ary = numpy.empty(raw_size, dtype=DTYPE) - lgi_ary = numpy.empty(raw_size, dtype=DTYPE) - wg_ary = numpy.empty(raw_size, dtype=DTYPE) - offsets = numpy.empty(raw_size, dtype=numpy.int32) - data_range = numpy.zeros(3, dtype=numpy.int32) - - curate_data( - data, - q_ary, - i_ary, - sigma_ary, - q2_ary, - lgi_ary, - wg_ary, - offsets, - data_range, - ) - - self.assertEqual( - offsets[0], - negative_point_index + 1, - msg=f"curated data for artificial data starts after negative data point for negative point at {negative_point_index + 1}", - ) - - self.assertTrue( - data[offsets[data_range[1]] - 1, 1] - > data[negative_point_index + 1, 1] / 10 - and data[offsets[data_range[1]] + 1, 1] - < data[negative_point_index + 1, 1] / 10, - msg=f"curated data for artificial data ends at approx. I(point after negaitve point)/10 if negative point at {negative_point_index + 1}", - ) - - -def suite(): - """Generic builder for the test suite.""" - testSuite = unittest.TestSuite() - testSuite.addTest(TestAutoRg("test_atsas")) - testSuite.addTest(TestAutoRg("test_synthetic")) - for outlier_position in range(3): - testSuite.addTest( - TestAutoRg( - "test_auto_gpa_with_outlier", outlier_position=outlier_position - ) - ) - testSuite.addTest(TestFit("test_linear_fit_static")) - testSuite.addTest(TestFit("test_linspace")) - testSuite.addTest(TestDataCuration("test_curate_data_BM29_bsa")) - testSuite.addTest(TestDataCuration("test_curate_synthetic_data")) - for negative_point_index in range(3): - testSuite.addTest( - TestDataCuration( - "test_curate_synthetic_data_with_negative_points", - negative_point_index=negative_point_index, - ) - ) - return testSuite - - -if __name__ == "__main__": - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_average.py b/freesas/test/test_average.py deleted file mode 100644 index 317bd11..0000000 --- a/freesas/test/test_average.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 -from __future__ import print_function - -__author__ = "Guillaume" -__license__ = "MIT" -__copyright__ = "2015, ESRF" - -import numpy -import unittest -from .utilstests import get_datafile -from ..model import SASModel -from ..average import Grid, AverModels - -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("AlignModels_test") - - -class TestAverage(unittest.TestCase): - testfile1 = get_datafile("model-01.pdb") - testfile2 = get_datafile("model-02.pdb") - inputfiles = [testfile1, testfile2] - grid = Grid(inputfiles) - - def test_gridsize(self): - inputfiles = self.inputfiles - grid = self.grid - size = grid.spatial_extent() - coordmax = numpy.array([size[0:3]], dtype="float") - coordmin = numpy.array([size[3:6]], dtype="float") - - pb = 0 - for i in inputfiles: - m = SASModel(i) - a = coordmin + m.atoms[:, 0:3] - b = m.atoms[:, 0:3] - coordmax - if (a >= 0.0).any() or (b >= 0.0).any(): - pb += 1 - self.assertEqual(pb, 0, msg="computed size is not the good one") - - def test_knots(self): - grid = self.grid - nbknots = numpy.random.randint(4000, 6000) - threshold = 10.0 # acceptable difference between nbknots and the effective nb of knots in percent - grid.calc_radius(nbknots) - grid.make_grid() - gap = (1.0 * (grid.nbknots - nbknots) / nbknots) * 100 - self.assertGreater(threshold, gap, msg="final number of knots too different of wanted number: %s != %s" % (nbknots, grid.nbknots)) - - def test_makegrid(self): - grid = self.grid - lattice = grid.make_grid() - m = SASModel(lattice) - self.assertAlmostEqual(m.fineness, 2 * grid.radius, 10, msg="grid do not have the computed radius") - - def test_read(self): - inputfiles = self.inputfiles - average = AverModels(inputfiles, self.grid.coordknots) - models = [SASModel(inputfiles[1]), SASModel(inputfiles[0])] - average.read_files(reference=1) - diff = 0.0 - for i in range(len(inputfiles)): - diff += (models[i].atoms - average.models[i].atoms).max() - self.assertAlmostEqual(diff, 0.0, 10, msg="Files not read properly") - - def test_occupancy(self): - average = AverModels(self.inputfiles, self.grid.coordknots) - average.read_files() - occ_grid = average.assign_occupancy() - average.grid = occ_grid - assert occ_grid.shape[-1] == 5, "problem in grid shape" - diff = occ_grid[:-1, 3] - occ_grid[1:, 3] - self.assertTrue(diff.max() >= 0.0, msg="grid is not properly sorted with occupancy") - - -def suite(): - testSuite = unittest.TestSuite() - testSuite.addTest(TestAverage("test_gridsize")) - testSuite.addTest(TestAverage("test_knots")) - testSuite.addTest(TestAverage("test_makegrid")) - testSuite.addTest(TestAverage("test_read")) - testSuite.addTest(TestAverage("test_occupancy")) - return testSuite - - -if __name__ == '__main__': - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_bift.py b/freesas/test/test_bift.py deleted file mode 100644 index c1b27bb..0000000 --- a/freesas/test/test_bift.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2017 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -__authors__ = ["J. Kieffer"] -__license__ = "MIT" -__date__ = "10/06/2020" - -import numpy -import unittest -from .utilstests import get_datafile -from ..bift import auto_bift -from .._bift import BIFT, distribution_parabola, distribution_sphere, \ - ensure_edges_zero, smooth_density -import logging -logger = logging.getLogger(__name__) -import time - - -class TestBIFT(unittest.TestCase): - - DMAX = 10 - NPT = 100 - SIZE = 1000 - - @classmethod - def setUpClass(cls): - super(TestBIFT, cls).setUpClass() - cls.r = numpy.linspace(0, cls.DMAX, cls.NPT + 1) - dr = cls.DMAX / cls.NPT - cls.p = -cls.r * (cls.r - cls.DMAX) # Nice parabola - q = numpy.linspace(0, 8 * cls.DMAX / 3, cls.SIZE + 1) - sincqr = numpy.sinc(numpy.outer(q, cls.r / numpy.pi)) - I = 4 * numpy.pi * (cls.p * sincqr).sum(axis=-1) * dr - err = numpy.sqrt(I) - cls.I0 = I[0] - cls.q = q[1:] - cls.I = I[1:] - cls.err = err[1:] - cls.Rg = numpy.sqrt(0.5 * numpy.trapz(cls.p * cls.r ** 2, cls.r) / numpy.trapz(cls.p, cls.r)) - print(cls.Rg) - - @classmethod - def tearDownClass(cls): - super(TestBIFT, cls).tearDownClass() - cls.r = cls.p = cls.I = cls.q = cls.err = None - - def test_autobift(self): - data = numpy.vstack((self.q, self.I, self.err)).T - t0 = time.perf_counter() - bo = auto_bift(data) - key, value, valid = bo.get_best() -# print("key is ", key) - stats = bo.calc_stats() -# print("stat is ", stats) - logger.info("Auto_bift time: %s", time.perf_counter() - t0) - self.assertAlmostEqual(self.DMAX / key.Dmax, 1, 1, "DMax is correct") - self.assertAlmostEqual(self.I0 / stats.I0_avg, 1, 1, "I0 is correct") - self.assertAlmostEqual(self.Rg / stats.Rg_avg, 1, 2, "Rg is correct") - - def test_BIFT(self): - bift = BIFT(self.q, self.I, self.err) - # test two scan functions - key = bift.grid_scan(9, 11, 5, 10, 100, 5, 100) - # print("key is ", key) - self.assertAlmostEqual(self.DMAX / key.Dmax, 1, 2, "DMax is correct") - res = bift.monte_carlo_sampling(10, 3, 100) - # print("res is ", res) - self.assertAlmostEqual(self.DMAX / res.Dmax_avg, 1, 4, "DMax is correct") - - def test_disributions(self): - pp = numpy.asarray(distribution_parabola(self.I0, self.DMAX, self.NPT)) - ps = numpy.asarray(distribution_sphere(self.I0, self.DMAX, self.NPT)) - self.assertAlmostEqual(numpy.trapz(ps, self.r) * 4 * numpy.pi / self.I0, 1, 3, "Distribution for a sphere looks OK") - self.assertAlmostEqual(numpy.trapz(pp, self.r) * 4 * numpy.pi / self.I0, 1, 3, "Distribution for a parabola looks OK") - self.assertTrue(numpy.allclose(pp, self.p, 1e-4), "distribution matches") - - def test_fixEdges(self): - ones = numpy.ones(self.NPT) - ensure_edges_zero(ones) - self.assertAlmostEqual(ones[0], 0, msg="1st point set to 0") - self.assertAlmostEqual(ones[-1], 0, msg="last point set to 0") - self.assertTrue(numpy.allclose(ones[1:-1], numpy.ones(self.NPT-2), 1e-7), msg="non-edge points unchanged") - - def test_smoothing(self): - ones = numpy.ones(self.NPT) - empty = numpy.empty(self.NPT) - smooth_density(ones,empty) - self.assertTrue(numpy.allclose(ones, empty, 1e-7), msg="flat array smoothed into flat array") - random = numpy.random.rand(self.NPT) - smooth = numpy.empty(self.NPT) - smooth_density(random,smooth) - self.assertAlmostEqual(random[0], smooth[0], msg="first points of random array and smoothed random array match") - self.assertAlmostEqual(random[-1], smooth[-1], msg="last points of random array and smoothed random array match") - self.assertTrue(smooth[1]>=min(smooth[0], smooth[2]) and smooth[1]<=max(smooth[0], smooth[2]), msg="second point of random smoothed array between 1st and 3rd") - self.assertTrue(smooth[-2]>=min(smooth[-1], smooth[-3]) and smooth[-2]<= max(smooth[-1], smooth[-3]), msg="second to last point of random smoothed array between 3rd to last and last") - sign = numpy.sign(random[1:-3] - smooth[2:-2]) * numpy.sign(smooth[2:-2] - random[3:-1]) - self.assertTrue(numpy.allclose(sign, numpy.ones(self.NPT-4), 1e-7), msg="central points of random array and smoothed random array alternate") - -def suite(): - testSuite = unittest.TestSuite() - testSuite.addTest(TestBIFT("test_disributions")) - testSuite.addTest(TestBIFT("test_autobift")) - testSuite.addTest(TestBIFT("test_fixEdges")) - testSuite.addTest(TestBIFT("test_smoothing")) - return testSuite - - -if __name__ == '__main__': - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_cormap.py b/freesas/test/test_cormap.py deleted file mode 100644 index 7f8233f..0000000 --- a/freesas/test/test_cormap.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 -from __future__ import print_function - -__author__ = "Jerome" -__license__ = "MIT" -__copyright__ = "2017, ESRF" - -import numpy -import unittest - -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("test_cormap") -from .. import cormap - - -class TestCormap(unittest.TestCase): - - def test_longest(self): - size = 1000 - target = 50 - start = 100 - - data = numpy.ones(size, dtype="float32") - res = cormap.measure_longest(data) - self.assertEqual(res, size, msg="computed size is correct: positive") - - data -= 2 - res = cormap.measure_longest(data) - self.assertEqual(res, size, msg="computed size is correct: negative") - - data[:] = 0 - data[start: start + target] = 1.0 - res = cormap.measure_longest(data) - self.assertEqual(res, target, msg="computed size is correct: positive/zero") - data = numpy.zeros(size, dtype="float32") - data[start: start + target] = -1.0 - res = cormap.measure_longest(data) - self.assertEqual(res, target, msg="computed size is correct: negative/zero") - data = numpy.fromfunction(lambda n:(-1) ** n, (size,)) - data[start: start + target] = 1.0 - res = cormap.measure_longest(data) - self.assertEqual(res, target + 1, msg="computed size is correct: positive/alternating") - data = numpy.fromfunction(lambda n:(-1) ** n, (size,)) - data[start: start + target] = -1.0 - res = cormap.measure_longest(data) - self.assertEqual(res, target + 1, msg="computed size is correct: negative/alternating") - - def test_stats(self): - self.assertEqual(cormap.LROH.A(10, 0), 1) - self.assertEqual(cormap.LROH.A(10, 1), 144) - self.assertEqual(cormap.LROH.A(10, 2), 504) - self.assertEqual(cormap.LROH.A(10, 10), 1024) - self.assertEqual(cormap.LROH.A(10, 11), 1024) - - self.assertEqual(cormap.LROH.A(0, 3), 1) - self.assertEqual(cormap.LROH.A(1, 3), 2) - self.assertEqual(cormap.LROH.A(2, 3), 4) - self.assertEqual(cormap.LROH.A(3, 3), 8) - self.assertEqual(cormap.LROH.A(4, 3), 15) - self.assertEqual(cormap.LROH.A(5, 3), 29) - self.assertEqual(cormap.LROH.A(6, 3), 56) - self.assertEqual(cormap.LROH.A(7, 3), 108) - self.assertEqual(cormap.LROH.A(8, 3), 208) - - self.assertAlmostEqual(cormap.LROH(200, 0), 1) - self.assertAlmostEqual(cormap.LROH(200, 4), 0.97, 2) - self.assertAlmostEqual(cormap.LROH(200, 5), 0.80, 2) - self.assertAlmostEqual(cormap.LROH(200, 6), 0.54, 2) - self.assertAlmostEqual(cormap.LROH(200, 7), 0.32, 2) - self.assertAlmostEqual(cormap.LROH(200, 8), 0.17, 2) - self.assertAlmostEqual(cormap.LROH(200, 9), 0.09, 2) - self.assertAlmostEqual(cormap.LROH(200, 10), 0.05, 2) - self.assertAlmostEqual(cormap.LROH(200, 11), 0.02, 2) - - -def suite(): - testSuite = unittest.TestSuite() - testSuite.addTest(TestCormap("test_longest")) - testSuite.addTest(TestCormap("test_longest")) - testSuite.addTest(TestCormap("test_stats")) - return testSuite - - -if __name__ == '__main__': - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_distance.py b/freesas/test/test_distance.py deleted file mode 100644 index 490e6a6..0000000 --- a/freesas/test/test_distance.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -from __future__ import print_function - -__author__ = "Jérôme Kieffer" -__license__ = "MIT" -__copyright__ = "2015, ESRF" -__date__ = "16/12/2015" - -import numpy -import unittest -from .utilstests import get_datafile -from ..model import SASModel -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("cdistance_test") - - -class TestDistance(unittest.TestCase): - testfile1 = get_datafile("model-01.pdb") - testfile2 = get_datafile("dammif-01.pdb") - - def test_invariants(self): - m = SASModel() - m.read(self.testfile1) - f_np, r_np, d_np = m.calc_invariants(False) - f_cy, r_cy, d_cy = m.calc_invariants(True) - self.assertAlmostEqual(f_np, f_cy, 10, "fineness is the same %s!=%s" % (f_np, f_cy)) - self.assertAlmostEqual(r_np, r_cy, 10, "Rg is the same %s!=%s" % (r_np, r_cy)) - self.assertAlmostEqual(d_np, d_cy, 10, "Dmax is the same %s!=%s" % (d_np, d_cy)) - - def test_distance(self): - m = SASModel() - n = SASModel() - m.read(self.testfile1) - n.read(self.testfile2) - f_np = m.dist(n, m.atoms, n.atoms, False) - f_cy = m.dist(n, m.atoms, n.atoms, True) - self.assertAlmostEqual(f_np, f_cy, 10, "distance is the same %s!=%s" % (f_np, f_cy)) - - def test_same(self): - m = SASModel() - n = SASModel() - m.read(self.testfile1) - n.read(self.testfile1) - numpy.random.shuffle(n.atoms) - f_np = m.dist(n, m.atoms, n.atoms, False) - f_cy = m.dist(n, m.atoms, n.atoms, True) - self.assertAlmostEqual(f_np, 0, 10, "NSD not nul with np") - self.assertAlmostEqual(f_cy, 0, 10, "NSD not nul with cy") - - -def suite(): - testSuite = unittest.TestSuite() - testSuite.addTest(TestDistance("test_invariants")) - testSuite.addTest(TestDistance("test_distance")) - testSuite.addTest(TestDistance("test_same")) - return testSuite - -if __name__ == '__main__': - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_fitting.py b/freesas/test/test_fitting.py deleted file mode 100644 index 1e55f33..0000000 --- a/freesas/test/test_fitting.py +++ /dev/null @@ -1,938 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -"""Test the functionality of fitting module.""" - -__authors__ = ["Martha Brennich"] -__license__ = "MIT" -__date__ = "16/07/2021" - - -import unittest -from unittest.mock import patch, MagicMock -import logging -import sys -import importlib -import platform -from io import StringIO -import pathlib -import contextlib -from types import SimpleNamespace -from typing import Callable -from errno import ENOENT -import numpy -from ..fitting import ( - set_logging_level, - get_output_destination, - get_guinier_header, - rg_result_to_output_line, - get_linesep, - run_guinier_fit, -) -from ..autorg import RG_RESULT, InsufficientDataError, NoGuinierRegionError -from ..sas_argparser import GuinierParser - -if sys.version_info.minor > 6: - from unittest.mock import mock_open -else: - from .mock_open_38 import mock_open - - -logger = logging.getLogger(__name__) - - -def reload_os_and_fitting(): - """Some tests patch os and need to reload the modules.""" - os = importlib.import_module("os") - os = importlib.reload(os) - fit = importlib.import_module("..fitting", "freesas.subpkg") - fit = importlib.reload(fit) - return fit - - -def get_dummy_guinier_parser(**parse_output): - """Function which provides a fake GuinierParser with a predefined parse result.""" - - def get_mock_parse(**kwargs): - def mock_parse(): - return SimpleNamespace(**kwargs) - - return mock_parse - - parser = GuinierParser(prog="test", description="test", epilog="test") - parser.parse_args = get_mock_parse(**parse_output) - return parser - - -def patch_linesep(test_function): - """Patch fitting.linesep to "linesep".""" - - linesep_patch = patch( - "freesas.fitting.get_linesep", - MagicMock(return_value="linesep"), - ) - return linesep_patch(test_function) - - -def patch_collect_files(test_function): - """Patch fitting.collect_files to return Paths "test" and "test2".""" - - collect_files_patch = patch( - "freesas.fitting.collect_files", - MagicMock(return_value=[pathlib.Path("test"), pathlib.Path("test2")]), - ) - return collect_files_patch(test_function) - - -def counted(function: Callable) -> Callable: - """Wrapper for functions to keep track on how often it has been called.""" - - def wrapped(*args, **kwargs): - wrapped.calls += 1 - return function(*args, **kwargs) - - wrapped.calls = 0 - return wrapped - - -def build_mock_for_load_scattering_with_Errors(erronous_file: dict): - - """Create mock for loading of data from a file. - The resulting function will raise an error, - for files for which an error is provided in errenous_file, - and an ndarry for all other files.""" - - def mock_for_load_scattering(file: pathlib.Path): - if file.name in erronous_file: - raise erronous_file[file.name] - else: - return numpy.array( - [[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ) - - return mock_for_load_scattering - - -class TestFitting(unittest.TestCase): - def test_set_logging_level_does_not_change_logging_level_if_input_lower_1( - self, - ): - - """ - Test that the logging level only gets changed if the requested level is > 0. - """ - - initial_logging_level = logging.root.level - set_logging_level(0) - self.assertEqual( - logging.root.level, - initial_logging_level, - msg="setting verbosity to 0 dos not affect logging level", - ) - set_logging_level(-2) - self.assertEqual( - logging.root.level, - initial_logging_level, - msg="settting verbosity to -2 does not affect logging level", - ) - # Ensure that initial logging level is restored - logging.root.setLevel(initial_logging_level) - - def test_set_logging_level_sets_logging_to_INFO_if_input_is_1( - self, - ): - - """ - Test that the logging level gets changed to INFO if verbosity is 1. - """ - initial_logging_level = logging.root.level - # Ensure that the function actually changes the level - logging.root.setLevel(logging.WARNING) - - set_logging_level(1) - self.assertEqual( - logging.root.level, - logging.INFO, - msg="settting verbosity to 1 sets logging level to INFO", - ) - - # Ensure that initial logging level is restored - logging.root.setLevel(initial_logging_level) - - def test_set_logging_level_sets_logging_to_DEBUG_if_input_is_2_or_more( - self, - ): - - """ - Test that the logging level gets changed to DEBUG if verbosity is 2 or larger. - """ - initial_logging_level = logging.root.level - # Ensure that the function actually changes the level - logging.root.setLevel(logging.WARNING) - - set_logging_level(2) - self.assertEqual( - logging.root.level, - logging.DEBUG, - msg="settting verbosity to 2 sets logging level to DEBUG", - ) - - set_logging_level(3) - self.assertEqual( - logging.root.level, - logging.DEBUG, - msg="settting verbosity to 3 sets logging level to DEBUG", - ) - - # Ensure that initial logging level is restored - logging.root.setLevel(initial_logging_level) - - @patch.dict("sys.modules", {"nt": MagicMock()}) - def test_get_linesep_returns_rn_if_output_is_stdout_on_windows(self): - - """ - Test that get_linesep() returns \r\n if output destination is sys.stdout on Windows. - """ - # Reload to apply patches - with patch("sys.builtin_module_names", ["nt"]): - fit = reload_os_and_fitting() - - self.assertEqual(fit.get_linesep(sys.stdout), "\r\n") - - # Cleanup - reload_os_and_fitting() - - @unittest.skipIf(platform.system() == "Windows", "Only POSIX") - def test_get_linesep_returns_n_if_output_is_stdout_on_posix( - self, - ): - - """ - Test that get_linesep() returns \n if output destination is sys.stdout on Posix. - Only should run on posix. - """ - self.assertEqual(get_linesep(sys.stdout), "\n") - - @patch.dict("sys.modules", {"nt": MagicMock()}) - def test_get_linesep_returns_n_if_output_is_filestream_on_windows(self): - - """ - Test that get_linesep() returns \n if output destination is a filestream on Windows. - """ - # Reload to apply patches - with patch("sys.builtin_module_names", ["nt"]): - fit = reload_os_and_fitting() - output_dest = StringIO() - self.assertEqual(fit.get_linesep(output_dest), "\n") - - # Cleanup - _ = reload_os_and_fitting() - - @unittest.skipIf(platform.system() == "Windows", "Only POSIX") - def test_get_linesep_returns_n_if_output_is_filestream_on_posix( - self, - ): - - """ - Test that get_linesep() returns \n if output destination is filestream on Posix. - Only should run on posix. - """ - output_dest = StringIO() - self.assertEqual(get_linesep(output_dest), "\n") - - def test_get_output_destination_with_path_input_returns_writable_io( - self, - ): - - """Test that by calling get_output_destination with a Path as input - we obtain write access to the file of Path.""" - mocked_open = mock_open() - with patch("builtins.open", mocked_open): - with get_output_destination(pathlib.Path("test")) as destination: - self.assertTrue( - destination.writable(), - msg="file destination is writable", - ) - mocked_open.assert_called_once_with(pathlib.Path("test"), "w") - - def test_get_output_destination_without_input_returns_stdout( - self, - ): - - """Test that by calling get_output_destination without input - we obtain sys.stdout.""" - with get_output_destination() as destination: - self.assertEqual( - destination, - sys.stdout, - msg="default destination is sys.stdout", - ) - - def test_closing_get_output_destination_does_not_close_stdout( - self, - ): - - """Test that get_output_destination() can be safely used without closing sys.stdout.""" - - with get_output_destination() as _: - pass - output_catcher = StringIO() - with contextlib.redirect_stdout(output_catcher): - sys.stdout.write("test after context closed") - self.assertEqual( - output_catcher.getvalue(), - "test after context closed", - msg="Can write to sys.stdout after closing desitnation context", - ) - - def test_get_guinier_header_for_csv( - self, - ): - - """Test that by calling get_guinier_header with input csv we get the correct line.""" - - header = get_guinier_header("linesep", "csv") - self.assertEqual( - header, - "File,Rg,Rg StDev,I(0),I(0) StDev,First point,Last point,Quality,Aggregatedlinesep", - msg="csv header is correct", - ) - - def test_get_guinier_header_for_ssv( - self, - ): - - """Test that by calling get_guinier_header with input ssv we get an empty string.""" - - header = get_guinier_header("linesep", "ssv") - self.assertEqual( - header, - "", - msg="ssv header is correct", - ) - - def test_get_guinier_header_for_native( - self, - ): - - """Test that by calling get_guinier_header with input native we get an empty string.""" - - header = get_guinier_header("linesep", "native") - self.assertEqual( - header, - "", - msg="native header is correct", - ) - - def test_get_guinier_header_without_input_format( - self, - ): - - """Test that by calling get_guinier_header without input format we get an empty string.""" - - header = get_guinier_header("linesep", None) - self.assertEqual( - header, - "", - msg="header for undefined format is correct", - ) - - def test_collect_files_only_returns_existing_files(self): - - """Test that collect_files discards strings that do not match an existing file.""" - - def os_stat_mock(path, **_): - if "good" in pathlib.Path(path).name: - pass - else: - if sys.version_info.minor > 7: - raise ValueError - else: - raise OSError(ENOENT, "dummy") - - mocked_stat = MagicMock(side_effect=os_stat_mock) - with patch("os.stat", mocked_stat): - local_pathlib = importlib.import_module("pathlib") - local_pathlib = importlib.reload(local_pathlib) - fit = importlib.import_module("..fitting", "freesas.subpkg") - fit = importlib.reload(fit) - self.assertEqual( - fit.collect_files(["testgood", "testbad"]), - [local_pathlib.Path("testgood")], - ) - # Reload without the patch - local_pathlib = importlib.reload(local_pathlib) - reload_os_and_fitting() - - @patch("platform.system", MagicMock(return_value="Windows")) - def test_collect_files_globs_on_windows(self): - - """Test that collect_files globs on Windows if no existent files provided.""" - - def os_stat_mock(path): - if sys.version_info.minor > 7: - raise ValueError - else: - raise OSError(ENOENT, "dummy") - - mocked_stat = MagicMock(side_effect=os_stat_mock) - mocked_glob = MagicMock( - side_effect=[ - (p for p in [pathlib.Path("pathA"), pathlib.Path("pathB")]) - ] - ) - with patch("os.stat", mocked_stat): - with patch.object(pathlib.Path, "glob", mocked_glob): - fit = importlib.import_module("..fitting", "freesas.subpkg") - fit = importlib.reload(fit) - self.assertEqual( - fit.collect_files(["testgood"]), - [pathlib.Path("pathA"), pathlib.Path("pathB")], - msg="collect_files on windows returns list if fiel argument does not exist", - ) - mocked_glob.assert_called_once() - - # Reload without the patch - reload_os_and_fitting() - - def test_rg_result_line_csv(self): - - """Test the formatting of a csv result line for a Guinier fit.""" - - test_result = RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - expected_line = "test.file,3.1000,0.1000,103.0000,2.5000, 13,207,50.1000,0.0500lineend" - obtained_line = rg_result_to_output_line( - rg_result=test_result, - afile=pathlib.Path("test.file"), - output_format="csv", - linesep="lineend", - ) - self.assertEqual( - obtained_line, expected_line, msg="csv line for RG_Result correct" - ) - - def test_rg_result_line_ssv(self): - - """Test the formatting of a ssv result line for a Guinier fit.""" - - test_result = RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - expected_line = "3.1000 0.1000 103.0000 2.5000 13 207 50.1000 0.0500 test.filelineend" - obtained_line = rg_result_to_output_line( - rg_result=test_result, - afile=pathlib.Path("test.file"), - output_format="ssv", - linesep="lineend", - ) - self.assertEqual( - obtained_line, expected_line, msg="ssv line for RG_Result correct" - ) - - def test_rg_result_line_native(self): - """Test the formatting of a native result line for a Guinier fit.""" - test_result = RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - expected_line = "test.file Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% lineend" - obtained_line = rg_result_to_output_line( - rg_result=test_result, - afile=pathlib.Path("test.file"), - output_format="native", - linesep="lineend", - ) - self.assertEqual( - obtained_line, - expected_line, - msg="native line for RG_Result correct", - ) - - def test_rg_result_line_no_format(self): - - """Test the formatting of a native result line for a Guinier fit.""" - - test_result = RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - expected_line = "test.file Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% lineend" - obtained_line = rg_result_to_output_line( - rg_result=test_result, - afile=pathlib.Path("test.file"), - linesep="lineend", - ) - self.assertEqual( - obtained_line, - expected_line, - msg="line for RG_Result without format specification correct", - ) - - @patch( - "freesas.fitting.collect_files", - MagicMock(return_value=[pathlib.Path("test")]), - ) - @patch( - "freesas.fitting.load_scattering_data", - MagicMock( - return_value=numpy.array( - [[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ) - ), - ) - @patch( - "freesas.fitting.get_linesep", - MagicMock(return_value="linesep"), - ) - def test_run_guinier_fit_uses_provided_fit_function(self): - """Test that run_guinier_fit uses fit function provided in the arguments - and outputs its result in a line.""" - - @counted - def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: - return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - - dummy_parser = get_dummy_guinier_parser( - verbose=0, - file=[pathlib.Path("test")], - output=None, - format=None, - unit="nm", - ) - output_catcher = StringIO() - with contextlib.redirect_stdout(output_catcher): - run_guinier_fit( - fit_function=dummy_fit_function, - parser=dummy_parser, - logger=logger, - ) - expected_output = "test Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" - self.assertEqual( - output_catcher.getvalue(), - expected_output, - msg="run_guinier_fit provides expected output", - ) - self.assertEqual( - dummy_fit_function.calls, - 1, - msg="Provided fit function was called once", - ) - - @patch( - "freesas.fitting.collect_files", - MagicMock(return_value=[pathlib.Path("test"), pathlib.Path("test")]), - ) - @patch( - "freesas.fitting.load_scattering_data", - MagicMock( - return_value=numpy.array( - [[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ) - ), - ) - @patch( - "freesas.fitting.get_linesep", - MagicMock(return_value="linesep"), - ) - def test_run_guinier_fit_iterates_over_files(self): - - """Test that run_guinier_fit calls the provided fit function for each provided file.""" - - @counted - def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: - return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - - dummy_parser = get_dummy_guinier_parser( - verbose=0, - file=[pathlib.Path("test"), pathlib.Path("test")], - output=None, - format=None, - unit="nm", - ) - output_catcher = StringIO() - with contextlib.redirect_stdout(output_catcher): - run_guinier_fit( - fit_function=dummy_fit_function, - parser=dummy_parser, - logger=logger, - ) - expected_output = ( - "test Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" - * 2 - ) - self.assertEqual( - output_catcher.getvalue(), - expected_output, - msg="run_guinier_fit provides expected output", - ) - self.assertEqual( - dummy_fit_function.calls, - 2, - msg="Provided fit function was called twice", - ) - - @unittest.skip("Unreliable") - @patch( - "freesas.fitting.collect_files", - MagicMock(return_value=[pathlib.Path("test"), pathlib.Path("test2")]), - ) - @patch( - "freesas.fitting.load_scattering_data", - MagicMock( - side_effect=build_mock_for_load_scattering_with_Errors( - {pathlib.Path("test").name: OSError} - ) - ), - ) - @patch( - "freesas.fitting.get_linesep", - MagicMock(return_value="linesep"), - ) - def test_run_guinier_outputs_error_if_file_not_found(self): - """Test that run_guinier_fit outputs an error if data loading raises OSError - and continues to the next file.""" - - @counted - def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: - return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - - dummy_parser = get_dummy_guinier_parser( - verbose=0, - file=[pathlib.Path("test"), pathlib.Path("test2")], - output=None, - format=None, - unit="nm", - ) - output_catcher_stdout = StringIO() - output_catcher_stderr = StringIO() - with contextlib.redirect_stdout(output_catcher_stdout): - with contextlib.redirect_stderr(output_catcher_stderr): - run_guinier_fit( - fit_function=dummy_fit_function, - parser=dummy_parser, - logger=logger, - ) - expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" - self.assertEqual( - output_catcher_stdout.getvalue(), - expected_stdout_output, - msg="run_guinier_fit provides expected stdout output", - ) - expected_stderr_output = ( - "ERROR:freesas.test.test_fitting:Unable to read file test" - ) - self.assertTrue( - expected_stderr_output in output_catcher_stderr.getvalue(), - msg="run_guinier_fit provides expected stderr output", - ) - self.assertEqual( - dummy_fit_function.calls, - 1, - msg="Provided fit function was called once", - ) - - @unittest.skip("Unreliable") - @patch( - "freesas.fitting.load_scattering_data", - MagicMock( - side_effect=build_mock_for_load_scattering_with_Errors( - {pathlib.Path("test").name: ValueError} - ) - ), - ) - @patch_collect_files - @patch_linesep - def test_run_guinier_outputs_error_if_file_not_parsable(self): - - """Test that run_guinier_fit outputs an error if data loading raises ValueError - and continues to the next file.""" - - @counted - def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: - return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - - dummy_parser = get_dummy_guinier_parser( - verbose=0, - file=[pathlib.Path("test"), pathlib.Path("test2")], - output=None, - format=None, - unit="nm", - ) - output_catcher_stdout = StringIO() - output_catcher_stderr = StringIO() - with contextlib.redirect_stdout(output_catcher_stdout): - with contextlib.redirect_stderr(output_catcher_stderr): - run_guinier_fit( - fit_function=dummy_fit_function, - parser=dummy_parser, - logger=logger, - ) - expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" - self.assertEqual( - output_catcher_stdout.getvalue(), - expected_stdout_output, - msg="run_guinier_fit provides expected stdout output", - ) - expected_stderr_output = ( - "ERROR:freesas.test.test_fitting:Unable to parse file test" - ) - self.assertTrue( - expected_stderr_output in output_catcher_stderr.getvalue(), - msg="run_guinier_fit provides expected stderr output", - ) - self.assertEqual( - dummy_fit_function.calls, - 1, - msg="Provided fit function was called once", - ) - - @patch( - "freesas.fitting.load_scattering_data", - MagicMock( - side_effect=[ - numpy.array( - [[0.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ), - numpy.array( - [[2.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ), - ] - ), - ) - @patch_collect_files - @patch_linesep - def test_run_guinier_outputs_error_if_fitting_raises_insufficient_data_error( - self, - ): - - """Test that run_guinier_fit outputs an error if fitting raises InsufficientDataError - and continues to the next file.""" - - @counted - def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: - if input_data[0, 0] <= 0.1: - raise InsufficientDataError - return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - - dummy_parser = get_dummy_guinier_parser( - verbose=0, - file=[pathlib.Path("test"), pathlib.Path("test2")], - output=None, - format=None, - unit="nm", - ) - output_catcher_stdout = StringIO() - output_catcher_stderr = StringIO() - with contextlib.redirect_stdout(output_catcher_stdout): - with contextlib.redirect_stderr(output_catcher_stderr): - run_guinier_fit( - fit_function=dummy_fit_function, - parser=dummy_parser, - logger=logger, - ) - expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" - self.assertEqual( - output_catcher_stdout.getvalue(), - expected_stdout_output, - msg="run_guinier_fit provides expected stdout output", - ) - expected_stderr_output = "test, InsufficientDataError: " - self.assertTrue( - expected_stderr_output in output_catcher_stderr.getvalue(), - msg="run_guinier_fit provides expected stderr output for fitting InsufficientError", - ) - self.assertEqual( - dummy_fit_function.calls, - 2, - msg="Provided fit function was called twice", - ) - - @patch( - "freesas.fitting.load_scattering_data", - MagicMock( - side_effect=[ - numpy.array( - [[0.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ), - numpy.array( - [[2.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ), - ] - ), - ) - @patch_collect_files - @patch_linesep - def test_run_guinier_outputs_error_if_fitting_raises_no_guinier_region_error( - self, - ): - - """Test that run_guinier_fit outputs an error if fitting raises NoGuinierRegionError - and continues to the next file.""" - - @counted - def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: - if input_data[0, 0] <= 0.1: - raise NoGuinierRegionError - return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - - dummy_parser = get_dummy_guinier_parser( - verbose=0, - file=[pathlib.Path("test"), pathlib.Path("test2")], - output=None, - format=None, - unit="nm", - ) - output_catcher_stdout = StringIO() - output_catcher_stderr = StringIO() - with contextlib.redirect_stdout(output_catcher_stdout): - with contextlib.redirect_stderr(output_catcher_stderr): - run_guinier_fit( - fit_function=dummy_fit_function, - parser=dummy_parser, - logger=logger, - ) - expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" - self.assertEqual( - output_catcher_stdout.getvalue(), - expected_stdout_output, - msg="run_guinier_fit provides expected stdout output", - ) - expected_stderr_output = "test, NoGuinierRegionError: " - self.assertTrue( - expected_stderr_output in output_catcher_stderr.getvalue(), - msg="run_guinier_fit provides expected stderr output fitting NoGuinierRegionError", - ) - self.assertEqual( - dummy_fit_function.calls, - 2, - msg="Provided fit function was called twice", - ) - - @patch( - "freesas.fitting.load_scattering_data", - MagicMock( - side_effect=[ - numpy.array( - [[0.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ), - numpy.array( - [[2.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ), - ] - ), - ) - @patch_collect_files - @patch_linesep - def test_run_guinier_outputs_error_if_fitting_raises_value_error( - self, - ): - - """Test that run_guinier_fit outputs an error if fitting raises ValueError - and continues to the next file.""" - - @counted - def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: - if input_data[0, 0] <= 0.1: - raise ValueError - return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - - dummy_parser = get_dummy_guinier_parser( - verbose=0, - file=[pathlib.Path("test"), pathlib.Path("test2")], - output=None, - format=None, - unit="nm", - ) - output_catcher_stdout = StringIO() - output_catcher_stderr = StringIO() - with contextlib.redirect_stdout(output_catcher_stdout): - with contextlib.redirect_stderr(output_catcher_stderr): - run_guinier_fit( - fit_function=dummy_fit_function, - parser=dummy_parser, - logger=logger, - ) - expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" - self.assertEqual( - output_catcher_stdout.getvalue(), - expected_stdout_output, - msg="run_guinier_fit provides expected stdout output", - ) - expected_stderr_output = "test, ValueError: " - self.assertTrue( - expected_stderr_output in output_catcher_stderr.getvalue(), - msg="run_guinier_fit provides expected stderr output fitting ValueError", - ) - self.assertEqual( - dummy_fit_function.calls, - 2, - msg="Provided fit function was called twice", - ) - - @patch( - "freesas.fitting.load_scattering_data", - MagicMock( - side_effect=[ - numpy.array( - [[0.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ), - numpy.array( - [[2.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 3.0]] - ), - ] - ), - ) - @patch_collect_files - @patch_linesep - def test_run_guinier_outputs_error_if_fitting_raises_index_error( - self, - ): - - """Test that run_guinier_fit outputs an error if fitting raises IndexError - and continues to the next file.""" - - @counted - def dummy_fit_function(input_data: numpy.ndarray) -> RG_RESULT: - if input_data[0, 0] <= 0.1: - raise IndexError - return RG_RESULT(3.1, 0.1, 103, 2.5, 13, 207, 50.1, 0.05) - - dummy_parser = get_dummy_guinier_parser( - verbose=0, - file=[pathlib.Path("test"), pathlib.Path("test2")], - output=None, - format=None, - unit="nm", - ) - output_catcher_stdout = StringIO() - output_catcher_stderr = StringIO() - with contextlib.redirect_stdout(output_catcher_stdout): - with contextlib.redirect_stderr(output_catcher_stderr): - run_guinier_fit( - fit_function=dummy_fit_function, - parser=dummy_parser, - logger=logger, - ) - expected_stdout_output = "test2 Rg=3.1000(±0.1000) I0=103.0000(±2.5000) [13-207] 5010.00% linesep" - self.assertEqual( - output_catcher_stdout.getvalue(), - expected_stdout_output, - msg="run_guinier_fit provides expected stdout output", - ) - expected_stderr_output = "test, IndexError: " - self.assertTrue( - expected_stderr_output in output_catcher_stderr.getvalue(), - msg="run_guinier_fit provides expected stderr output for fitting IndexError", - ) - self.assertEqual( - dummy_fit_function.calls, - 2, - msg="Provided fit function was called twice", - ) - - -def suite(): - """Build a test suite from the TestFitting class.""" - - test_suite = unittest.TestSuite() - for class_element in dir(TestFitting): - if class_element.startswith("test"): - test_suite.addTest(TestFitting(class_element)) - return test_suite - - -if __name__ == "__main__": - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_model.py b/freesas/test/test_model.py deleted file mode 100644 index bdbeb9c..0000000 --- a/freesas/test/test_model.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 -from __future__ import print_function - -__author__ = "Guillaume" -__license__ = "MIT" -__copyright__ = "2015, ESRF" - -import numpy -import unittest -import os -import tempfile -from .utilstests import get_datafile -from ..model import SASModel -from ..transformations import translation_from_matrix, euler_from_matrix -import logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger("SASModel_test") - - -def assign_random_mol(inf=None, sup=None): - if not inf: - inf = 0 - if not sup: - sup = 100 - molecule = numpy.random.randint(inf, sup, size=400).reshape(100, 4).astype(float) - molecule[:, -1] = 1.0 - m = SASModel(molecule) - return m - - -class TesttParser(unittest.TestCase): - testfile = get_datafile("model-01.pdb") - - def setUp(self): - unittest.TestCase.setUp(self) - self.tmpdir = tempfile.mkdtemp() - self.outfile = os.path.join(self.tmpdir, "out.pdb") - - def tearDown(self): - unittest.TestCase.tearDown(self) - for fn in (self.outfile, self.tmpdir): - if os.path.exists(fn): - if os.path.isdir(fn): - os.rmdir(fn) - else: - os.unlink(fn) - - def test_same(self): - m = SASModel() - m.read(self.testfile) - m.save(self.outfile) - infile = open(self.testfile).read() - outfile = open(self.outfile).read() - self.assertEqual(infile, outfile, msg="file content is the same") - - def test_rfactor(self): - m = SASModel() - m.read(self.testfile) - n = SASModel() - n.read(self.testfile) - self.assertEqual(m.rfactor, n.rfactor, msg="R-factor is not the same %s != %s" % (m.rfactor, n.rfactor)) - - def test_init(self): - m = SASModel() - m.read(self.testfile) - n = SASModel(self.testfile) - param1 = m.rfactor - param2 = n.rfactor - self.assertEqual(param1, param2, msg="pdb file not read correctly") - - def test_centroid(self): - m = assign_random_mol() - m.centroid() - if len(m.com) != 3: - logger.error("center of mass has not been saved correctly : length of COM position vector = %s!=3" % (len(m.com))) - mol_centered = m.atoms[:, 0:3] - m.com - center = mol_centered.mean(axis=0) - norm = (center * center).sum() - self.assertAlmostEqual(norm, 0, 12, msg="molecule is not centered : norm of the COM position vector %s!=0" % (norm)) - - def test_inertia_tensor(self): - m = assign_random_mol() - m.inertiatensor() - tensor = m.inertensor - assert tensor.shape == (3, 3), "inertia tensor has not been saved correctly : shape of inertia matrix = %s" % (tensor.shape) - - def test_canonical_translate(self): - m = assign_random_mol() - trans = m.canonical_translate() - if trans.shape != (4, 4): - logger.error("pb with translation matrix shape: shape=%s" % (trans.shape)) - com = m.com - com_componants = [com[0], com[1], com[2]] - trans_vect = [-trans[0, -1], -trans[1, -1], -trans[2, -1]] - self.assertEqual(com_componants, trans_vect, msg="do not translate on canonical position") - - def test_canonical_rotate(self): - m = assign_random_mol() - rot = m.canonical_rotate() - if rot.shape != (4, 4): - logger.error("pb with rotation matrix shape") - if not m.enantiomer: - logger.error("enantiomer has not been selected") - det = numpy.linalg.det(rot) - self.assertAlmostEqual(det, 1, 10, msg="rotation matrix determinant is not 1: %s" % (det)) - - def test_canonical_parameters(self): - m = assign_random_mol() - m.canonical_parameters() - can_param = m.can_param - if len(can_param) != 6: - logger.error("canonical parameters has not been saved properly") - com_trans = translation_from_matrix(m.canonical_translate()) - euler_rot = euler_from_matrix(m.canonical_rotate()) - out_param = [com_trans[0], com_trans[1], com_trans[2], euler_rot[0], euler_rot[1], euler_rot[2]] - self.assertEqual(can_param, out_param, msg="canonical parameters are not the good ones") - - def test_dist(self): - m = assign_random_mol() - n = SASModel(m.atoms) - distance = m.dist(n, m.atoms, n.atoms) - self.assertEqual(distance, 0, msg="NSD different of 0: %s!=0" % (distance)) - - def test_can_transform(self): - m = assign_random_mol() - m.canonical_parameters() - p0 = m.can_param - mol1 = m.transform(p0, [1, 1, 1]) - if abs(mol1 - m.atoms).max() == 0: - logger.error("molecule did not move") - m.atoms = mol1 - m.centroid() - m.inertiatensor() - com = m.com - tensor = m.inertensor - diag = numpy.eye(3) - matrix = tensor - tensor * diag - self.assertAlmostEqual(abs(com).sum(), 0, 10, msg="molecule not on its center of mass") - self.assertAlmostEqual(abs(matrix).sum(), 0, 10, "inertia moments unaligned ") - - def test_dist_move(self): - m = assign_random_mol() - n = SASModel(m.atoms) - m.canonical_parameters() - n.canonical_parameters() - if abs(n.atoms - m.atoms).max() != 0: - logger.error("molecules are different") - p0 = m.can_param - dist_after_mvt = m.dist_after_movement(p0, n, [1, 1, 1]) - self.assertEqual(dist_after_mvt, 0, msg="NSD different of 0: %s!=0" % (dist_after_mvt)) - - def test_reverse_transform(self): - m = assign_random_mol() - n = SASModel(m.atoms) - m.canonical_parameters() - m.atoms = m.transform(m.can_param, [1, 1, 1], reverse=None) - m.atoms = m.transform(m.can_param, [1, 1, 1], reverse=True) - dist = m.dist(n, m.atoms, n.atoms) - self.assertAlmostEqual(dist, 0.0, 10, msg="pb with reverse transformation : %s != 0.0" % dist) - - -def suite(): - testSuite = unittest.TestSuite() - testSuite.addTest(TesttParser("test_same")) - testSuite.addTest(TesttParser("test_rfactor")) - testSuite.addTest(TesttParser("test_init")) - testSuite.addTest(TesttParser("test_centroid")) - testSuite.addTest(TesttParser("test_inertia_tensor")) - testSuite.addTest(TesttParser("test_canonical_translate")) - testSuite.addTest(TesttParser("test_canonical_rotate")) - testSuite.addTest(TesttParser("test_canonical_parameters")) - testSuite.addTest(TesttParser("test_dist")) - testSuite.addTest(TesttParser("test_can_transform")) - testSuite.addTest(TesttParser("test_dist_move")) - testSuite.addTest(TesttParser("test_reverse_transform")) - return testSuite - - -if __name__ == '__main__': - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_sas_argparser.py b/freesas/test/test_sas_argparser.py deleted file mode 100644 index 3575bea..0000000 --- a/freesas/test/test_sas_argparser.py +++ /dev/null @@ -1,603 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -"""Test the functionality of SASParser and GuinierParser""" - -__authors__ = ["Martha Brennich"] -__license__ = "MIT" -__date__ = "25/03/2021" - - -import unittest -import logging -import io -import contextlib -from pathlib import Path -from .. import dated_version as freesas_version -from ..sas_argparser import SASParser, GuinierParser - - -logger = logging.getLogger(__name__) - - -class TestSasArgParser(unittest.TestCase): - def test_minimal_guinier_parser_requires_file_argument(self): - """ - Test that Guinier parser provides error if no file argument is provided. - """ - basic_parser = GuinierParser("program", "description", "epilog") - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args() - except SystemExit: - pass - - self.assertTrue( - basic_parser.usage in output_catcher.getvalue(), - msg="GuinierParser provides usage if no file provided", - ) - self.assertTrue( - "the following arguments are required: FILE" - in output_catcher.getvalue(), - msg="GuinierParser states that the FILE argument is missing if no file provided", - ) - - def test_minimal_guinier_parser_parses_list_of_files(self): - """ - Test that the Guinier parsers parses a list of files. - """ - basic_parser = GuinierParser("program", "description", "epilog") - - parsed_arguments = basic_parser.parse_args(["afile", "bfile", "cfile"]) - - self.assertEqual( - set(parsed_arguments.file), - {"afile", "bfile", "cfile"}, - msg="GuinierParser parses list of files", - ) - - def test_add_file_argument_enables_SASParser_to_recognize_file_lists( - self, - ): - """ - Test that add_file_argument adds the ability to parse a file list to SASParser. - """ - basic_parser = SASParser("program", "description", "epilog") - - # Before running add_file_argument a file argument is not recognized - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args(["afile"]) - except SystemExit: - pass - self.assertTrue( - "unrecognized arguments: afile" in output_catcher.getvalue(), - msg="Minimal SASParser does not recognize file argument", - ) - - basic_parser.add_file_argument(help_text="file help") - parsed_arguments = basic_parser.parse_args(["afile", "bfile", "cfile"]) - - self.assertEqual( - set(parsed_arguments.file), - {"afile", "bfile", "cfile"}, - msg="GuinierParser parses list of files", - ) - - def test_minimal_parser_usage_includes_program_name(self): - """ - Test that minimal parser includes the provided program in the usage string. - """ - basic_parser = SASParser("test❤️", "description", "epilog") - - self.assertTrue( - "test❤️" in basic_parser.usage, - msg="SASParser usage includes program name", - ) - - def test_minimal_guinier_parser_usage_includes_program_name(self): - """ - Test that minimal parser includes the provided program in the usage string. - """ - basic_parser = GuinierParser("test❤️", "description", "epilog") - - self.assertTrue( - "test❤️" in basic_parser.usage, - msg="GuinierParser usage includes program name", - ) - - def test_minimal_guinier_parser_help_includes_program_description_epilog( - self, - ): - """ - Test that minimal guinier parser includes help includes - the provided program name, description and epilog. - """ - basic_parser = GuinierParser("test❤️", "description📚", "epilog🎦") - output_catcher = io.StringIO() - - try: - with contextlib.redirect_stdout(output_catcher): - _ = basic_parser.parse_args(["--help"]) - except SystemExit: - pass - - self.assertTrue( - "test❤️" in output_catcher.getvalue(), - msg="GuinierParser outputs program name in help", - ) - - self.assertTrue( - "description📚" in output_catcher.getvalue(), - msg="GuinierParser outputs description in help", - ) - - self.assertTrue( - "epilog🎦" in output_catcher.getvalue(), - msg="GuinierParser outputs eplilog name in help", - ) - - def test_minimal_parser_help_includes_program_description_epilog(self): - """ - Test that minimal parser includes help includes - the provided program name, description and epilog. - """ - basic_parser = SASParser("test❤️", "description📚", "epilog🎦") - output_catcher = io.StringIO() - - try: - with contextlib.redirect_stdout(output_catcher): - _ = basic_parser.parse_args(["--help"]) - except SystemExit: - pass - - self.assertTrue( - "test❤️" in output_catcher.getvalue(), - msg="SASParser outputs program name in help", - ) - - self.assertTrue( - "description📚" in output_catcher.getvalue(), - msg="SASParser outputs description in help", - ) - - self.assertTrue( - "epilog🎦" in output_catcher.getvalue(), - msg="SASParser outputs eplilog name in help", - ) - - def test_minimal_parser_default_verbosity_level_is_0(self): - """ - Test that the parser sets the verbosity to 0 if no args are provided - """ - basic_parser = SASParser("program", "description", "epilog") - parsed_arguments = basic_parser.parse_args() - self.assertEqual( - parsed_arguments.verbose, - 0, - msg="SASParser default verbosity is 0", - ) - - def test_minimal_guinier_parser_default_verbosity_level_is_0(self): - """ - Test that the Guinier parser sets the verbosity to 0 if no args are provided - """ - basic_parser = GuinierParser("program", "description", "epilog") - parsed_arguments = basic_parser.parse_args(["afile"]) - self.assertEqual( - parsed_arguments.verbose, - 0, - msg="GuinierParser default verbosity is 0", - ) - - def test_minimal_parser_accumulates_verbosity_level(self): - """ - Test that the parser parser increases the verbosity level to two - if -vv argument is provided. - """ - basic_parser = SASParser("program", "description", "epilog") - parsed_arguments = basic_parser.parse_args(["-vv"]) - self.assertEqual( - parsed_arguments.verbose, - 2, - msg="SASParser verbosity increased to 2 by -vv", - ) - - def test_minimal_guinier_parser_accumulates_verbosity_level(self): - """ - Test that the parser parser increases the verbosity level to two - if -vv argument is provided. - """ - basic_parser = GuinierParser("program", "description", "epilog") - parsed_arguments = basic_parser.parse_args(["afile", "-vv"]) - self.assertEqual( - parsed_arguments.verbose, - 2, - msg="GuinierParser verbosity increased to 2 by -vv", - ) - - def test_minimal_parser_provides_correct_version(self): - """ - Test that parser provides the correct app version. - """ - basic_parser = SASParser("program", "description", "epilog") - output_catcher = io.StringIO() - try: - with contextlib.redirect_stdout(output_catcher): - _ = basic_parser.parse_args(["--version"]) - except SystemExit: - pass - - self.assertTrue( - freesas_version.version in output_catcher.getvalue(), - msg="SASParser outputs consistent version", - ) - self.assertTrue( - freesas_version.date in output_catcher.getvalue(), - msg="SASParser outputs consistent date", - ) - - def test_minimal_guinier_parser_provides_correct_version(self): - """ - Test that parser provides the correct app version. - """ - basic_parser = GuinierParser("program", "description", "epilog") - output_catcher = io.StringIO() - try: - with contextlib.redirect_stdout(output_catcher): - _ = basic_parser.parse_args(["--version"]) - except SystemExit: - pass - - self.assertTrue( - freesas_version.version in output_catcher.getvalue(), - msg="GuinierParser outputs consistent version", - ) - self.assertTrue( - freesas_version.date in output_catcher.getvalue(), - msg="GuinierParser outputs consistent date", - ) - - def test_minimal_guinier_parser_accepts_output_file_argument(self): - """ - Test that minimal Guinier parser accepts one output file argument. - """ - basic_parser = GuinierParser("program", "description", "epilog") - parsed_arguments = basic_parser.parse_args(["afile", "-o", "out.file"]) - - self.assertEqual( - parsed_arguments.output, - Path("out.file"), - msg="Minimal GuinierParser accepts output file argument", - ) - - def test_add_output_filename_argument_adds_output_file_argument_to_SASParser( - self, - ): - """ - Test that add_output_filename_argument adds one output file argument to as SASParser. - """ - basic_parser = SASParser("program", "description", "epilog") - - # Before running add_output_filename_argument -o file is not regognized - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args(["-o", "out.file"]) - except SystemExit: - pass - self.assertTrue( - "unrecognized arguments: -o out.file" in output_catcher.getvalue(), - msg="Minimal SASParser does not recognize -o argument", - ) - - basic_parser.add_output_filename_argument() - parsed_arguments = basic_parser.parse_args(["-o", "out.file"]) - - self.assertEqual( - parsed_arguments.output, - Path("out.file"), - msg="SASParser accepts output file argument" - "after running add_output_filename_argument()", - ) - - def test_minimal_guinier_parser_accepts_output_format_argument(self): - """ - Test that minimal Guinier parser accepts one output data format argument. - """ - basic_parser = GuinierParser("program", "description", "epilog") - parsed_arguments = basic_parser.parse_args(["afile", "-f", "aformat"]) - - self.assertEqual( - parsed_arguments.format, - "aformat", - msg="Minimal GuinierParser accepts output data format argument", - ) - - def test_add_output_data_format_adds_output_format_argument_to_SASParser( - self, - ): - """ - Test that add_output_data_format adds one output data format argument to as SASParser. - """ - basic_parser = SASParser("program", "description", "epilog") - - # Before running add_output_filename_argument -o file is not regognized - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args(["-f", "aformat"]) - except SystemExit: - pass - self.assertTrue( - "unrecognized arguments: -f aformat" in output_catcher.getvalue(), - msg="Minimal SASParser does not recognize -f argument", - ) - - basic_parser.add_output_data_format() - parsed_arguments = basic_parser.parse_args(["-f", "aformat"]) - - self.assertEqual( - parsed_arguments.format, - "aformat", - msg="SASParser accepts output data format argument" - "after running add_output_data_format()", - ) - - def test_minimal_guinier_parser_accepts_q_unit_argument(self): - """ - Test that minimal Guinier parser accepts a q unit argument. - """ - basic_parser = GuinierParser("program", "description", "epilog") - parsed_arguments = basic_parser.parse_args(["afile", "-u", "nm"]) - - self.assertEqual( - parsed_arguments.unit, - "nm", - msg="Minimal GuinierParser accepts q unit argument", - ) - - def test_add_q_unit_argument_adds_add_q_unit_argument_to_SASParser( - self, - ): - """ - Test that add_q_unit_argument adds a q unit argument to as SASParser. - """ - basic_parser = SASParser("program", "description", "epilog") - - # Before running add_output_filename_argument -o file is not regognized - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args(["-u", "nm"]) - except SystemExit: - pass - self.assertTrue( - "unrecognized arguments: -u nm" in output_catcher.getvalue(), - msg="Minimal SASParser does not recognize -u argument", - ) - - basic_parser.add_q_unit_argument() - parsed_arguments = basic_parser.parse_args(["-u", "nm"]) - - self.assertEqual( - parsed_arguments.unit, - "nm", - msg="SASParser accepts q unit argument after running add_q_unit_argument()", - ) - - def test_SASParser_q_unit_argument_allows_predefined_units( - self, - ): - """ - Test that the q unit argument of a SASparser accepts "nm", "Å", "A". - """ - basic_parser = SASParser("program", "description", "epilog") - basic_parser.add_q_unit_argument() - - parsed_arguments = basic_parser.parse_args(["-u", "nm"]) - self.assertEqual( - parsed_arguments.unit, - "nm", - msg="SASParser accepts unit format nm", - ) - - parsed_arguments = basic_parser.parse_args(["-u", "A"]) - self.assertEqual( - parsed_arguments.unit, - "Å", - msg="SASParser accepts unit format A", - ) - - parsed_arguments = basic_parser.parse_args(["-u", "Å"]) - self.assertEqual( - parsed_arguments.unit, - "Å", - msg="SASParser accepts unit format A", - ) - - def test_SASParser_q_unit_argument_does_not_allow_not_predefined_units( - self, - ): - """ - Test that the q unit argument of a SASparser does not accept a - unit that is not "nm", "Å", "A". - """ - basic_parser = SASParser("program", "description", "epilog") - basic_parser.add_q_unit_argument() - - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args(["-u", "m"]) - except SystemExit: - pass - self.assertTrue( - "argument -u/--unit: invalid choice: 'm' (choose from 'nm', 'Å', 'A')" - in output_catcher.getvalue(), - msg="SASParser does not accept '-u m' argument", - ) - - def test_SASParser_q_unit_A_gets_converted_to_Å( - self, - ): - """ - Test that for a SASParder q unit input "A" gets converted to "Å". - """ - basic_parser = SASParser("program", "description", "epilog") - basic_parser.add_q_unit_argument() - - parsed_arguments = basic_parser.parse_args(["-u", "A"]) - self.assertEqual( - parsed_arguments.unit, - "Å", - msg="SASParser converts unit input 'A' to 'Å'", - ) - - def test_GuinierParser_q_unit_argument_allows_predefined_units( - self, - ): - """ - Test that the q unit argument of a Guinierparser accepts "nm", "Å", "A". - """ - basic_parser = GuinierParser("program", "description", "epilog") - - parsed_arguments = basic_parser.parse_args(["afile", "-u", "nm"]) - self.assertEqual( - parsed_arguments.unit, - "nm", - msg="SASParser accepts unit format nm", - ) - - parsed_arguments = basic_parser.parse_args(["afile", "-u", "A"]) - self.assertEqual( - parsed_arguments.unit, - "Å", - msg="SASParser accepts unit format A", - ) - - parsed_arguments = basic_parser.parse_args(["afile", "-u", "Å"]) - self.assertEqual( - parsed_arguments.unit, - "Å", - msg="SASParser accepts unit format A", - ) - - def test_GuinierParser_q_unit_argument_does_not_allow_not_predefined_units( - self, - ): - """ - Test that the q unit argument of a Guinierparser does not accept a - unit that is not "nm", "Å", "A". - """ - basic_parser = GuinierParser("program", "description", "epilog") - - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args(["afile", "-u", "m"]) - except SystemExit: - pass - self.assertTrue( - "argument -u/--unit: invalid choice: 'm' (choose from 'nm', 'Å', 'A')" - in output_catcher.getvalue(), - msg="SASParser does not accept '-u m' argument", - ) - - def test_GuinierParser_q_unit_A_gets_converted_to_Å( - self, - ): - """ - Test that for a GuinierParser q unit input "A" gets converted to "Å". - """ - basic_parser = GuinierParser("program", "description", "epilog") - - parsed_arguments = basic_parser.parse_args(["afile", "-u", "A"]) - self.assertEqual( - parsed_arguments.unit, - "Å", - msg="SASParser converts unit input 'A' to 'Å'", - ) - - def test_add_argument_adds_an_argument_to_a_SASParser( - self, - ): - """ - Test that new arguments can be added to SASParser. - """ - basic_parser = SASParser("program", "description", "epilog") - - # Before running add_argument -c - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args(["-c"]) - except SystemExit: - pass - self.assertTrue( - "unrecognized arguments: -c" in output_catcher.getvalue(), - msg="Minimal SASParser does not recognize -c argument", - ) - - basic_parser.add_argument( - "-c", - "--check", - action="store_true", - ) - - parsed_arguments = basic_parser.parse_args(["-c"]) - self.assertEqual( - parsed_arguments.check, - True, - msg="-c argument added to SASParser", - ) - - def test_add_argument_adds_an_argument_to_a_GuinierParser( - self, - ): - """ - Test that new arguments can be added to GuinierParser. - """ - basic_parser = GuinierParser("program", "description", "epilog") - - # Before running add_argument -c - output_catcher = io.StringIO() - try: - with contextlib.redirect_stderr(output_catcher): - _ = basic_parser.parse_args(["afile", "-c"]) - except SystemExit: - pass - print(output_catcher.getvalue()) - self.assertTrue( - "unrecognized arguments: -c" in output_catcher.getvalue(), - msg="Minimal GuinierParser does not recognize -c argument", - ) - - basic_parser.add_argument( - "-c", - "--check", - action="store_true", - ) - - parsed_arguments = basic_parser.parse_args(["afile", "-c"]) - self.assertEqual( - parsed_arguments.check, - True, - msg="-c argument added to GuinierParser", - ) - - -def suite(): - """Build a test suite from the TestSasArgParser class""" - test_suite = unittest.TestSuite() - for class_element in dir(TestSasArgParser): - if class_element.startswith("test"): - test_suite.addTest(TestSasArgParser(class_element)) - return test_suite - - -if __name__ == "__main__": - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/test_sasio.py b/freesas/test/test_sasio.py deleted file mode 100644 index c897b21..0000000 --- a/freesas/test/test_sasio.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Project: freesas -# https://github.com/kif/freesas -# -# Copyright (C) 2017-2022 European Synchrotron Radiation Facility, Grenoble, France -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -__authors__ = ["Martha Brennich", "Jérôme Kieffer"] -__license__ = "MIT" -__date__ = "16/09/2022" - -import unittest -import logging -import io -from numpy import array, allclose -from ..sasio import parse_ascii_data, load_scattering_data, \ - convert_inverse_angstrom_to_nanometer -logger = logging.getLogger(__name__) - -class TestSasIO(unittest.TestCase): - - def test_parse_3_ok(self): - """ - Test for successful parsing of file with some invalid lines - """ - file_content = ["Test data for", - "file parsing", - "1 1 1", - "2 a 2", - "3 3 3", - "some stuff at the end", - ] - expected_result = array([[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]]) - data = parse_ascii_data(file_content, number_of_columns=3) - self.assertTrue(allclose(data, expected_result, 1e-7), - msg="3 column parse returns expected result") - - def test_parse_no_data(self): - """ - Test that an empty input list raises a ValueError - """ - file_content = [] - with self.assertRaises(ValueError, msg="Empty list cannot be parsed"): - parse_ascii_data(file_content, number_of_columns=3) - - def test_parse_no_valid_data(self): - """ - Test that an input list with no valid data raises a ValueError - """ - file_content = ["a a a", "2 4", "3 4 5 6", "# 3 4 6"] - with self.assertRaises(ValueError, - msg="File with no float float float data" - " cannot be parsed"): - parse_ascii_data(file_content, number_of_columns=3) - - def test_load_clean_data(self): - """ - Test that clean float float float data is loaded correctly. - """ - file_content = ["# Test data for" - "# file parsing", - "1 1 1", - "2.0 2.0 1.0", - "3 3 3", - "#REMARK some stuff at the end", - ] - expected_result = array([[1.0, 1.0, 1.0], - [2.0, 2.0, 1.0], - [3.0, 3.0, 3.0]]) - file_data = "\n".join(file_content) - mocked_file = io.StringIO(file_data) - data = load_scattering_data(mocked_file) - self.assertTrue(allclose(data, expected_result, 1e-7), - msg="Sunny data loaded correctly") - - def test_load_data_with_unescaped_header(self): - """ - Test that an unescaped header does not hinder loading. - """ - file_content = ["Test data for", - "file parsing", - "1 1 1", - "2.0 2.0 1.0", - "3 3 3", - ] - expected_result = array([[1.0, 1.0, 1.0], - [2.0, 2.0, 1.0], - [3.0, 3.0, 3.0]]) - file_data = "\n".join(file_content) - mocked_file = io.StringIO(file_data) - data = load_scattering_data(mocked_file) - - self.assertTrue(allclose(data, expected_result, 1e-7), - msg="Sunny data loaded correctly") - - def test_load_data_with_unescaped_footer(self): - """ - Test that an unescaped footer does not hinder loading. - """ - file_content = [ - "1 1 1", - "2.0 2.0 1.0", - "3 3 3", - "REMARK some stuff at the end" - ] - expected_result = array([[1.0, 1.0, 1.0], - [2.0, 2.0, 1.0], - [3.0, 3.0, 3.0]]) - file_data = "\n".join(file_content) - mocked_file = io.StringIO(file_data) - data = load_scattering_data(mocked_file) - - self.assertTrue(allclose(data, expected_result, 1e-7), - msg="Sunny data loaded correctly") - - - def test_load_invalid_data(self): - """ - Test that invalid data raises a ValueError. - """ - file_content = ["a a a", "2 4", "3 4 5 6", "# 3 4 6"] - file_data = "\n".join(file_content) - mocked_file = io.StringIO(file_data) - with self.assertRaises(ValueError, - msg="File with no float float float " - "data cannot be loaded"): - load_scattering_data(mocked_file) - - def test_convert_inverse_angstrom_to_nanometer(self): - """ - Test conversion of data with q in 1/Å to 1/nm - """ - input_data = array([[1.0, 1.0, 1.0], - [2.0, 2.0, 1.0], - [3.0, 3.0, 3.0]]) - expected_result = array([[10, 1.0, 1.0], - [20, 2.0, 1.0], - [30, 3.0, 3.0]]) - result = convert_inverse_angstrom_to_nanometer(input_data) - self.assertTrue(allclose(result, expected_result, 1e-7), - msg="Converted to 1/nm from 1/Å") - - def test_unit_conversion_creates_new_array(self): - """ - Test conversion of data does not change original data - """ - input_data = array([[1.0, 1.0, 1.0], - [2.0, 2.0, 1.0], - [3.0, 3.0, 3.0]]) - expected_data = array([[1.0, 1.0, 1.0], - [2.0, 2.0, 1.0], - [3.0, 3.0, 3.0]]) - _ = convert_inverse_angstrom_to_nanometer(input_data) - self.assertTrue(allclose(input_data, expected_data, 1e-7), - msg="Conversion function does not change its input") - - -def suite(): - test_suite = unittest.TestSuite() - test_suite.addTest(TestSasIO("test_parse_3_ok")) - test_suite.addTest(TestSasIO("test_parse_no_data")) - test_suite.addTest(TestSasIO("test_parse_no_valid_data")) - test_suite.addTest(TestSasIO("test_load_clean_data")) - test_suite.addTest(TestSasIO("test_load_data_with_unescaped_header")) - test_suite.addTest(TestSasIO("test_load_data_with_unescaped_footer")) - test_suite.addTest(TestSasIO("test_load_invalid_data")) - test_suite.addTest(TestSasIO("test_convert_inverse_angstrom_to_nanometer")) - test_suite.addTest(TestSasIO("test_unit_conversion_creates_new_array")) - return test_suite - - -if __name__ == '__main__': - runner = unittest.TextTestRunner() - runner.run(suite()) diff --git a/freesas/test/utilstests.py b/freesas/test/utilstests.py deleted file mode 100644 index d899f57..0000000 --- a/freesas/test/utilstests.py +++ /dev/null @@ -1,24 +0,0 @@ -#!usr/bin/env python -# coding: utf-8 - -__author__ = "Jérôme Kieffer" -__license__ = "MIT" -__date__ = "19/07/2021" -__copyright__ = "2015-2021, ESRF" - -import logging -logger = logging.getLogger("utilstest") -from silx.resources import ExternalResources -downloader = ExternalResources("freesas", "http://www.silx.org/pub/freesas/testdata", "FREESAS_TESTDATA") - - -def get_datafile(name): - """Provides the full path of a test file, - downloading it from the internet if needed - - :param name: name of the file to get - :return: full path of the datafile - """ - logger.info(f"Download file {name}") - fullpath = downloader.getfile(name) - return fullpath From f19151a1345398c3564eb30b8b3bb8a5dc9fa2ba Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Tue, 28 Nov 2023 10:16:44 +0100 Subject: [PATCH 16/45] fix path --- src/freesas/_version.py | 2 +- src/freesas/app/meson.build | 2 +- src/freesas/test/meson.build | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/freesas/_version.py b/src/freesas/_version.py index 355029c..6691418 100755 --- a/src/freesas/_version.py +++ b/src/freesas/_version.py @@ -72,7 +72,7 @@ MINOR = 9 MICRO = 9 RELEV = "dev" # <16 -SERIAL = 0 # <16 +SERIAL = 1 # <16 date = __date__ diff --git a/src/freesas/app/meson.build b/src/freesas/app/meson.build index c114d40..aac2c24 100644 --- a/src/freesas/app/meson.build +++ b/src/freesas/app/meson.build @@ -10,5 +10,5 @@ py.install_sources([ 'supycomb.py' ], pure: false, # Will be installed next to binaries - subdir: 'freesas.app' # Folder relative to site-packages to install to + subdir: 'freesas/app' # Folder relative to site-packages to install to ) diff --git a/src/freesas/test/meson.build b/src/freesas/test/meson.build index 8358bc1..bb84baf 100644 --- a/src/freesas/test/meson.build +++ b/src/freesas/test/meson.build @@ -15,5 +15,5 @@ py.install_sources([ 'utilstests.py', ], pure: false, # Will be installed next to binaries - subdir: 'freesas.test' # Folder relative to site-packages to install to + subdir: 'freesas/test' # Folder relative to site-packages to install to ) From 656e35e060b2ec1b7337b75378470f993ff679d0 Mon Sep 17 00:00:00 2001 From: Jerome Kieffer Date: Tue, 28 Nov 2023 10:45:03 +0100 Subject: [PATCH 17/45] auxillary functions --- bootstrap.py | 252 ++++++++++----------- run_tests.py | 601 +++++++++++++++++++++++++-------------------------- 2 files changed, 410 insertions(+), 443 deletions(-) mode change 100644 => 100755 run_tests.py diff --git a/bootstrap.py b/bootstrap.py index f73ff1a..c147415 100755 --- a/bootstrap.py +++ b/bootstrap.py @@ -10,73 +10,94 @@ __authors__ = ["Frédéric-Emmanuel Picca", "Jérôme Kieffer"] __contact__ = "jerome.kieffer@esrf.eu" __license__ = "MIT" -__date__ = "09/07/2020" +__date__ = "03/03/2023" import sys import os -import distutils.util import subprocess import logging -import collections -from argparse import ArgumentParser - +if sys.version_info[:2] < (3, 11): + import tomli +else: + import tomllib as tomli logging.basicConfig() logger = logging.getLogger("bootstrap") -def is_debug_python(): - """Returns true if the Python interpreter is in debug mode.""" - try: - import sysconfig - except ImportError: # pragma nocover - # Python < 2.7 - import distutils.sysconfig as sysconfig +def get_project_name(root_dir): + """Retrieve project name by running python setup.py --name in root_dir. - if sysconfig.get_config_var("Py_DEBUG"): - return True + :param str root_dir: Directory where to run the command. + :return: The name of the project stored in root_dir + """ + logger.debug("Getting project name in %s", root_dir) + with open("pyproject.toml") as f: + pyproject = tomli.loads(f.read()) + return pyproject.get("project", {}).get("name") - return hasattr(sys, "gettotalrefcount") +def build_project(name, root_dir): + """Build locally the project using meson -def _distutils_dir_name(dname="lib"): - """ - Returns the name of a distutils build directory + :param str name: Name of the project. + :param str root_dir: Root directory of the project + :return: The path to the directory were build was performed """ - platform = distutils.util.get_platform() - architecture = "%s.%s-%i.%i" % (dname, platform, - sys.version_info[0], sys.version_info[1]) - if is_debug_python(): - architecture += "-pydebug" - return architecture - - -def _distutils_scripts_name(): - """Return the name of the distrutils scripts sirectory""" - f = "scripts-{version[0]}.{version[1]}" - return f.format(version=sys.version_info) + extra = [] + libdir = "lib" + if sys.platform == "win32": + libdir = "Lib" + # extra = ["--buildtype", "plain"] + + build = os.path.join(root_dir, "build") + if not(os.path.isdir(build) and os.path.isdir(os.path.join(build, name))): + p = subprocess.Popen(["meson", "setup", "build"], + shell=False, cwd=root_dir, env=os.environ) + p.wait() + p = subprocess.Popen(["meson", "configure", "--prefix", "/"] + extra, + shell=False, cwd=build, env=os.environ) + p.wait() + p = subprocess.Popen(["meson", "install", "--destdir", "."], + shell=False, cwd=build, env=os.environ) + logger.debug("meson install ended with rc= %s", p.wait()) + + home = None + if os.environ.get("PYBUILD_NAME") == name: + # we are in the debian packaging way + home = os.environ.get("PYTHONPATH", "").split(os.pathsep)[-1] + if not home: + if os.environ.get("BUILDPYTHONPATH"): + home = os.path.abspath(os.environ.get("BUILDPYTHONPATH", "")) + else: + if sys.platform == "win32": + home = os.path.join(build, libdir, "site-packages") + else: + python_version = f"python{sys.version_info.major}.{sys.version_info.minor}" + home = os.path.join(build, libdir, python_version, "site-packages") + home = os.path.abspath(home) + cnt = 0 + while not os.path.isdir(home): + cnt += 1 + home = os.path.split(home)[0] + for _ in range(cnt): + n = os.listdir(home)[0] + home = os.path.join(home, n) -def _get_available_scripts(path): - res = [] - try: - res = " ".join([s.rstrip('.py') for s in os.listdir(path)]) - except OSError: - res = ["no script available, did you ran " - "'python setup.py build' before bootstrapping ?"] - return res + logger.warning("Building %s to %s", name, home) + return home -if sys.version_info[0] >= 3: # Python3 - def execfile(fullpath, globals=None, locals=None): - "Python3 implementation for execfile" - with open(fullpath) as f: - try: - data = f.read() - except UnicodeDecodeError: - raise SyntaxError("Not a Python script") - code = compile(data, fullpath, 'exec') - exec(code, globals, locals) +def execfile(fullpath, globals=None, locals=None): + "Python3 implementation for execfile" + with open(fullpath) as f: + try: + data = f.read() + except UnicodeDecodeError: + raise SyntaxError("Not a Python script") + code = compile(data, fullpath, 'exec') + exec(code, globals, locals) def run_file(filename, argv): @@ -117,21 +138,18 @@ def run_file(filename, argv): run.wait() -def run_entry_point(entry_point, argv): +def run_entry_point(target_name, entry_point, argv): """ Execute an entry_point using the current python context - (http://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation) :param str entry_point: A string identifying a function from a module - (NAME = PACKAGE.MODULE:FUNCTION [EXTRA]) + (NAME = PACKAGE.MODULE:FUNCTION) + :param argv: list of arguments """ import importlib - elements = entry_point.split("=") - target_name = elements[0].strip() - elements = elements[1].split(":") + elements = entry_point.split(":") module_name = elements[0].strip() - # Take care of entry_point optional "extra" requirements declaration - function_name = elements[1].split()[0].strip() + function_name = elements[1].strip() logger.info("Execute target %s (function %s from module %s) using importlib", target_name, function_name, module_name) full_args = [target_name] @@ -163,74 +181,49 @@ def find_executable(target): if os.path.isfile(target): return ("path", os.path.abspath(target)) - # search the file from setup.py - import setup - config = setup.get_project_configuration(dry_run=True) - # scripts from project configuration - if "scripts" in config: - for script_name in config["scripts"]: - if os.path.basename(script_name) == target: - return ("path", os.path.abspath(script_name)) - # entry-points from project configuration - if "entry_points" in config: - for kind in config["entry_points"]: - for entry_point in config["entry_points"][kind]: - elements = entry_point.split("=") - name = elements[0].strip() - if name == target: - return ("entry_point", entry_point) - - # search the file from env PATH - for dirname in os.environ.get("PATH", "").split(os.pathsep): - path = os.path.join(dirname, target) - if os.path.isfile(path): - return ("path", path) + # search the executable in pyproject.toml + with open(os.path.join(PROJECT_DIR, "pyproject.toml")) as f: + pyproject = tomli.loads(f.read()) + scripts = {} + scripts.update(pyproject.get("project", {}).get("scripts", {})) + scripts.update(pyproject.get("project", {}).get("gui-scripts", {})) + + for script, entry_point in scripts.items(): + if script == target: + print(script, entry_point) + return ("entry_point", target, entry_point) return None, None -def main(argv): - parser = ArgumentParser(prog="bootstrap", usage="./bootstrap.py