diff --git a/.coveragerc b/.coveragerc index 96024441ef..bcb36fe5de 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,8 +3,12 @@ source = qutip omit = # QuTiP test files */qutip/tests/* + # Tool for tests + */qutip/solver/sode/_noise.py [report] exclude_lines = # Skip Python wrappers which help load in C extension modules. __bootstrap__() + # Skip empty method that are never meant to be used. + raise NotImplementedError diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 45554412b8..d68410d7cb 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -5,8 +5,8 @@ Thank you for contributing to QuTiP! Please make sure you have finished the foll - [ ] Contributions to qutip should follow the [pep8 style](https://www.python.org/dev/peps/pep-0008/). You can use [pycodestyle](http://pycodestyle.pycqa.org/en/latest/index.html) to check your code automatically - [ ] Please add tests to cover your changes if applicable. -- [ ] If the behavior of the code has changed or new feature has been added, please also update the documentation in the `doc` folder, and the [notebook](https://github.com/qutip/qutip-notebooks). Feel free to ask if you are not sure. -- [ ] Include the changelog in a file named: `doc/changes/.` 'type' can be one of the following: feature, bugfix, doc, removal, misc, or deprecation (see [here](http://qutip.org/docs/latest/development/contributing.html#Changelog%20Generation) for more information). +- [ ] If the behavior of the code has changed or new feature has been added, please also update the documentation in the `doc` folder, and the [notebook](https://github.com/qutip/qutip-tutorials). Feel free to ask if you are not sure. +- [ ] Include the changelog in a file named: `doc/changes/.` 'type' can be one of the following: feature, bugfix, doc, removal, misc, or deprecation (see [here](http://qutip.org/docs/latest/development/contributing.html#changelog-generation) for more information). Delete this checklist after you have completed all the tasks. If you have not finished them all, you can also open a [Draft Pull Request](https://github.blog/2019-02-14-introducing-draft-pull-requests/) to let the others know this on-going work and keep this checklist in the PR description. diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b6c5261e85..096e5cb593 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -51,6 +51,7 @@ jobs: numpy-requirement: ">=1.20,<1.21" scipy-requirement: ">=1.5,<1.6" condaforge: 1 + oldcython: 1 # No MKL runs. MKL is now the default for conda installations, but # not necessarily for pip. @@ -69,38 +70,24 @@ jobs: # Python 3.10 and numpy 1.22 # Use conda-forge to provide numpy 1.22 - # Ignore ImportWarning because pyximport registered an importer - # PyxImporter that does not have a find_spec method and this raises - # a warning on Python 3.10 - # Ignore DeprecationWarnings raised by cvxpy importing scipy.sparse.X - # under SciPy 1.8.0+. - # Ignore DeprecationWarnings raised by versions of - # setuptools >= 65.0.0 during pyximport imports This can be removed - # once https://github.com/cython/cython/issues/4985 - # is fixed and released. - case-name: Python 3.10 os: ubuntu-latest python-version: "3.10" condaforge: 1 - pytest-extra-options: "-W ignore::ImportWarning -W ignore::DeprecationWarning:cvxpy.interface.scipy_wrapper -W ignore:Absolute:DeprecationWarning" + oldcython: 1 # Python 3.11 and latest numpy # Use conda-forge to provide Python 3.11 and latest numpy - # Ignore ImportWarning because pyximport registered an importer - # PyxImporter that does not have a find_spec method and this raises - # a warning on Python 3.10 - # Ignore DeprecationWarnings raised by cvxpy importing scipy.sparse.X - # under SciPy 1.8.0+. - # Ignore DeprecationWarnings raised by versions of - # setuptools >= 65.0.0 during pyximport imports This can be removed - # once https://github.com/cython/cython/issues/4985 - # is fixed and released. + # Ignore deprecation of the cgi module in Python 3.11 that is + # still imported by Cython.Tempita. This was addressed in + # https://github.com/cython/cython/pull/5128 but not backported + # to any currently released version. - case-name: Python 3.11 os: ubuntu-latest python-version: "3.11" condaforge: 1 conda-extra-pkgs: "suitesparse" # for compiling cvxopt - pytest-extra-options: "-W ignore::ImportWarning -W ignore::DeprecationWarning:cvxpy.interface.scipy_wrapper -W ignore:Absolute:DeprecationWarning -W ignore::DeprecationWarning:Cython.Tempita" + pytest-extra-options: "-W ignore::DeprecationWarning:Cython.Tempita" # Windows. Once all tests pass without special options needed, this # can be moved to the main os list in the test matrix. All the tests @@ -111,7 +98,6 @@ jobs: - case-name: Windows Latest os: windows-latest python-version: "3.10" - pytest-extra-options: "-W ignore::ImportWarning -k 'not (test_correlation or test_interpolate or test_mcsolve)'" steps: - uses: actions/checkout@v3 @@ -131,6 +117,9 @@ jobs: if [[ -z "${{ matrix.nocython }}" ]]; then QUTIP_TARGET="$QUTIP_TARGET,runtime_compilation" fi + if [[ "${{ matrix.oldcython }}" ]]; then + pip install cython==0.29.36 + fi export CI_QUTIP_WITH_OPENMP=${{ matrix.openmp }} if [[ -z "${{ matrix.nomkl }}" ]]; then conda install blas=*=mkl "numpy${{ matrix.numpy-requirement }}" "scipy${{ matrix.scipy-requirement }}" @@ -220,7 +209,7 @@ jobs: python -m pip install towncrier - name: Verify Towncrier entry added - if: False # github.event_name == 'pull_request' + if: github.event_name == 'pull_request' env: BASE_BRANCH: ${{ github.base_ref }} run: | diff --git a/.gitignore b/.gitignore index e9cd9d2670..619d39250c 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ rhs*.pyx qutip/cy/*.c *.cpp +!qutip/core/data/src/*.cpp *.dat qutip/core/*.h diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000..c5e1295754 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,20 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 + +formats: + - pdf + +build: + os: ubuntu-22.04 + tools: + python: "mambaforge-4.10" + +conda: + environment: doc/rtd-environment.yml + +sphinx: + configuration: doc/conf.py + fail_on_warning: true \ No newline at end of file diff --git a/README.md b/README.md index eb08599852..376e25b3a8 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ and [J. R. Johansson](https://github.com/jrjohansson) [![Build Status](https://github.com/qutip/qutip/actions/workflows/tests.yml/badge.svg?branch=master)](https://github.com/qutip/qutip/actions/workflows/tests.yml) [![Coverage Status](https://img.shields.io/coveralls/qutip/qutip.svg?logo=Coveralls)](https://coveralls.io/r/qutip/qutip) [![Maintainability](https://api.codeclimate.com/v1/badges/df502674f1dfa1f1b67a/maintainability)](https://codeclimate.com/github/qutip/qutip/maintainability) -[![license](https://img.shields.io/badge/license-New%20BSD-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![license](https://img.shields.io/badge/license-New%20BSD-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) [![PyPi Downloads](https://img.shields.io/pypi/dm/qutip?label=downloads%20%7C%20pip&logo=PyPI)](https://pypi.org/project/qutip) [![Conda-Forge Downloads](https://img.shields.io/conda/dn/conda-forge/qutip?label=downloads%20%7C%20conda&logo=Conda-Forge)](https://anaconda.org/conda-forge/qutip) @@ -72,15 +72,31 @@ All back releases are also available for download in the [releases section of th For the most complete set of release notes and changelogs for historic versions, see the [changelog](https://qutip.org/docs/latest/changelog.html) section in the documentation. +The pre-release of QuTiP 5.0 is available on PyPI and can be installed using pip: + +```bash +pip install --pre qutip +``` + +This version breaks compatibility with QuTiP 4.7 in many small ways. +Please see the [changelog](https://github.com/qutip/qutip/blob/master/doc/changelog.rst) for a list of changes, new features and deprecations. +This version should be fully working. If you find any bugs, confusing documentation or missing features, please create a GitHub issue. + + Documentation ------------- +[![Documentation Status - Latest](https://readthedocs.org/projects/qutip/badge/?version=latest)](https://qutip.readthedocs.io/en/latest/?badge=latest) + +The documentation for the latest [stable release](https://qutip.readthedocs.io/en/latest/) and the [master](https://qutip.readthedocs.io/en/master/) branch is available for reading on Read The Docs. + The documentation for official releases, in HTML and PDF formats, can be found in the [documentation section of the QuTiP website](https://qutip.org/documentation.html). + The latest development documentation is available in this repository in the `doc` folder. A [selection of demonstration notebooks is available](https://qutip.org/tutorials.html), which demonstrate some of the many features of QuTiP. -These are stored in the [qutip/qutip-notebooks repository](https://github.com/qutip/qutip-notebooks) here on GitHub. -You can run the notebooks online using myBinder: [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/qutip/qutip-notebooks/master?filepath=index.ipynb) +These are stored in the [qutip/qutip-tutorials repository](https://github.com/qutip/qutip-tutorials) here on GitHub. + Contribute ---------- diff --git a/VERSION b/VERSION index f1cb422421..d51bd13201 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -5.0.0a1 +5.0.0a2 diff --git a/doc/QuTiP_tree_plot/qutip-structure.py b/doc/QuTiP_tree_plot/qutip-structure.py index cf18a9f171..7234598fba 100755 --- a/doc/QuTiP_tree_plot/qutip-structure.py +++ b/doc/QuTiP_tree_plot/qutip-structure.py @@ -47,7 +47,6 @@ # States ("#007143", { "states", "continuous_variables", "qstate", "random_objects", - "three_level_atom", }), # QIP ("#36d695", {"measurement"}), diff --git a/doc/apidoc/classes.rst b/doc/apidoc/classes.rst index aea4e75e18..270e6c4acc 100644 --- a/doc/apidoc/classes.rst +++ b/doc/apidoc/classes.rst @@ -41,17 +41,51 @@ Distributions .. _classes-solver: -Solver ------- +Solvers +------- .. autoclass:: qutip.solver.sesolve.SESolver :members: + :inherited-members: + :show-inheritance: .. autoclass:: qutip.solver.mesolve.MESolver :members: + :inherited-members: + :show-inheritance: .. autoclass:: qutip.solver.brmesolve.BRSolver :members: + :inherited-members: + :show-inheritance: + + +.. autoclass:: qutip.solver.stochastic.SMESolver + :members: + :inherited-members: + :show-inheritance: + +.. autoclass:: qutip.solver.stochastic.SSESolver + :members: + :inherited-members: + :show-inheritance: + + + +.. _classes-monte-carlo-solver: + +Monte Carlo Solvers +------------------- + +.. autoclass:: qutip.solver.mcsolve.MCSolver + :members: + :inherited-members: + :show-inheritance: + +.. autoclass:: qutip.solver.nm_mcsolve.NonMarkovianMCSolver + :members: + :inherited-members: + :show-inheritance: .. _classes-non_markov_heom: @@ -132,16 +166,37 @@ Integrator :members: options -.. _classes-non_markov_mc_and_tt: +.. _classes-sode: -Non-Markovian Memory Cascade and Transfer Tensor Solvers --------------------------------------------------------- +Stochastic Integrator +--------------------- -.. autoclass:: qutip.solve.nonmarkov.memorycascade.MemoryCascade - :members: +.. autoclass:: qutip.solver.sode.rouchon.RouchonSODE + :members: options -.. autoclass:: qutip.solve.nonmarkov.transfertensor.TTMSolverOptions - :members: +.. autoclass:: qutip.solver.sode.itotaylor.EulerSODE + :members: options + +.. autoclass:: qutip.solver.sode.itotaylor.Milstein_SODE + :members: options + +.. autoclass:: qutip.solver.sode.itotaylor.Taylor1_5_SODE + :members: options + +.. autoclass:: qutip.solver.sode.itotaylor.Implicit_Milstein_SODE + :members: options + +.. autoclass:: qutip.solver.sode.itotaylor.Implicit_Taylor1_5_SODE + :members: options + +.. autoclass:: qutip.solver.sode.sode.PlatenSODE + :members: options + +.. autoclass:: qutip.solver.sode.itotaylor.Explicit1_5_SODE + :members: options + +.. autoclass:: qutip.solver.sode.sode.PredCorr_SODE + :members: options .. _classes-odeoptions: @@ -157,10 +212,10 @@ Solver Options and Results Permutational Invariance ------------------------ -.. autoclass:: qutip.solve.piqs.Dicke +.. autoclass:: qutip.piqs.piqs.Dicke :members: -.. autoclass:: qutip.solve.piqs.Pim +.. autoclass:: qutip.piqs.piqs.Pim :members: .. _classes-distributions: @@ -185,135 +240,3 @@ Distribution functions .. autoclass:: qutip.distributions.HarmonicOscillatorProbabilityFunction :members: - - -.. _classes-control: - -Optimal control ---------------- - -.. autoclass:: qutip.control.optimizer.Optimizer - :members: - -.. autoclass:: qutip.control.optimizer.OptimizerBFGS - :members: - -.. autoclass:: qutip.control.optimizer.OptimizerLBFGSB - :members: - -.. autoclass:: qutip.control.optimizer.OptimizerCrab - :members: - -.. autoclass:: qutip.control.optimizer.OptimizerCrabFmin - :members: - -.. autoclass:: qutip.control.optimizer.OptimIterSummary - :members: - -.. autoclass:: qutip.control.termcond.TerminationConditions - :members: - -.. autoclass:: qutip.control.optimresult.OptimResult - :members: - -.. autoclass:: qutip.control.dynamics.Dynamics - :members: - -.. autoclass:: qutip.control.dynamics.DynamicsGenMat - :members: - -.. autoclass:: qutip.control.dynamics.DynamicsUnitary - :members: - -.. autoclass:: qutip.control.dynamics.DynamicsSymplectic - :members: - -.. autoclass:: qutip.control.propcomp.PropagatorComputer - :members: - -.. autoclass:: qutip.control.propcomp.PropCompApproxGrad - :members: - -.. autoclass:: qutip.control.propcomp.PropCompDiag - :members: - -.. autoclass:: qutip.control.propcomp.PropCompFrechet - :members: - -.. autoclass:: qutip.control.fidcomp.FidelityComputer - :members: - -.. autoclass:: qutip.control.fidcomp.FidCompUnitary - :members: - -.. autoclass:: qutip.control.fidcomp.FidCompTraceDiff - :members: - -.. autoclass:: qutip.control.fidcomp.FidCompTraceDiffApprox - :members: - -.. autoclass:: qutip.control.tslotcomp.TimeslotComputer - :members: - -.. autoclass:: qutip.control.tslotcomp.TSlotCompUpdateAll - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGen - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenRandom - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenZero - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenLinear - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenPeriodic - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenSine - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenSquare - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenSaw - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenTriangle - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenGaussian - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenGaussianEdge - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenCrab - :members: - -.. autoclass:: qutip.control.pulsegen.PulseGenCrabFourier - :members: - -.. autoclass:: qutip.control.stats.Stats - :members: - -.. autoclass:: qutip.control.dump.Dump - :members: - -.. autoclass:: qutip.control.dump.OptimDump - :members: - -.. autoclass:: qutip.control.dump.DynamicsDump - :members: - -.. autoclass:: qutip.control.dump.DumpItem - :members: - -.. autoclass:: qutip.control.dump.EvoCompDumpItem - :members: - -.. autoclass:: qutip.control.dump.DumpSummaryItem - :members: diff --git a/doc/apidoc/functions.rst b/doc/apidoc/functions.rst index 78a06941b5..3c781b4200 100644 --- a/doc/apidoc/functions.rst +++ b/doc/apidoc/functions.rst @@ -18,7 +18,7 @@ Quantum Operators ----------------- .. automodule:: qutip.core.operators - :members: charge, commutator, create, destroy, displace, enr_destroy, enr_identity, jmat, num, qeye, identity, momentum, phase, position, qdiags, qutrit_ops, qzero, sigmam, sigmap, sigmax, sigmay, sigmaz, spin_Jx, spin_Jy, spin_Jz, spin_Jm, spin_Jp, squeeze, squeezing, tunneling + :members: charge, commutator, create, destroy, displace, enr_destroy, enr_identity, fcreate, fdestroy, jmat, num, qeye, identity, momentum, phase, position, qdiags, qutrit_ops, qzero, sigmam, sigmap, sigmax, sigmay, sigmaz, spin_Jx, spin_Jy, spin_Jz, spin_Jm, spin_Jp, squeeze, squeezing, tunneling .. _functions-rand: @@ -37,14 +37,6 @@ Random Operators and States :members: rand_dm, rand_herm, rand_ket, rand_stochastic, rand_unitary, rand_super, rand_super_bcsz -Three-Level Atoms ------------------ - -.. automodule:: qutip.three_level_atom - :members: three_level_basis, three_level_ops - :undoc-members: - - Superoperators and Liouvillians ------------------------------- @@ -144,6 +136,9 @@ Monte Carlo Evolution .. automodule:: qutip.solver.mcsolve :members: mcsolve +.. automodule:: qutip.solver.nm_mcsolve + :members: nm_mcsolve + Krylov Subspace Solver ---------------------- @@ -169,8 +164,8 @@ Floquet States and Floquet-Markov Master Equation Stochastic Schrödinger Equation and Master Equation --------------------------------------------------- -.. automodule:: qutip.solve.stochastic - :members: ssesolve, photocurrent_sesolve, smepdpsolve, smesolve, photocurrent_mesolve, ssepdpsolve, stochastic_solvers, general_stochastic +.. automodule:: qutip.solver.stochastic + :members: ssesolve, smesolve Hierarchical Equations of Motion @@ -214,7 +209,7 @@ Scattering in Quantum Optical Systems Permutational Invariance ------------------------ -.. automodule:: qutip.solve.piqs +.. automodule:: qutip.piqs.piqs :members: num_dicke_states, num_dicke_ladders, num_tls, isdiagonal, dicke_blocks, dicke_blocks_full, dicke_function_trace, purity_dicke, entropy_vn_dicke, state_degeneracy, m_degeneracy, energy_degeneracy, ap, am, spin_algebra, jspin, collapse_uncoupled, dicke_basis, dicke, excited, superradiant, css, ghz, ground, identity_uncoupled, block_matrix, tau_column, @@ -232,11 +227,12 @@ Graphs and Visualization ------------------------ .. automodule:: qutip.visualization - :members: hinton, matrix_histogram, matrix_histogram_complex, plot_energy_levels, plot_fock_distribution, plot_wigner_fock_distribution, plot_wigner, sphereplot, plot_schmidt, plot_qubism, plot_expectation_values, plot_spin_distribution_2d, plot_spin_distribution_3d, plot_wigner_sphere + :members: hinton, matrix_histogram, plot_energy_levels, plot_fock_distribution, plot_wigner, sphereplot, plot_schmidt, plot_qubism, plot_expectation_values, plot_wigner_sphere, plot_spin_distribution :undoc-members: -.. automodule:: qutip.orbital - :members: orbital +.. automodule:: qutip.animation + :members: anim_hinton, anim_matrix_histogram, anim_fock_distribution, anim_wigner, anim_sphereplot, anim_schmidt, anim_qubism, anim_wigner_sphere, anim_spin_distribution + .. automodule:: qutip.matplotlib_utilities :members: wigner_cmap, complex_phase_cmap @@ -255,19 +251,9 @@ Quantum Process Tomography Non-Markovian Solvers ===================== -.. automodule:: qutip.solve.nonmarkov.transfertensor +.. automodule:: qutip.solver.nonmarkov.transfertensor :members: ttmsolve -.. _functions-control: - -Optimal control -=============== - -.. automodule:: qutip.control.pulseoptim - :members: optimize_pulse, optimize_pulse_unitary, create_pulse_optimizer, opt_pulse_crab, opt_pulse_crab_unitary - -.. automodule:: qutip.control.pulsegen - :members: create_pulse_gen Utility Functions ================= diff --git a/doc/biblio.rst b/doc/biblio.rst index 7d3de972c4..248e492fa4 100644 --- a/doc/biblio.rst +++ b/doc/biblio.rst @@ -71,3 +71,9 @@ Bibliography .. [NKanej] N Khaneja et. al. *Optimal control of coupled spin dynamics: Design of NMR pulse sequences by gradient ascent algorithms.* J. Magn. Reson. **172**, 296–305 (2005). :doi:`10.1016/j.jmr.2004.11.004` + +.. [Donvil22] + B. Donvil, P. Muratore-Ginanneschi, *Quantum trajectory framework for general time-local master equations*, Nat Commun **13**, 4140 (2022). :doi:`10.1038/s41467-022-31533-8`. + +.. [Abd19] + M. Abdelhafez, D. I. Schuster, J. Koch, *Gradient-based optimal control of open quantum systems using quantumtrajectories and automatic differentiation*, Phys. Rev. A **99**, 052327 (2019). :doi:`10.1103/PhysRevA.99.052327`. \ No newline at end of file diff --git a/doc/changelog.rst b/doc/changelog.rst index 071f1e5de9..ad63be5726 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,84 @@ Change Log .. towncrier release notes start +QuTiP 5.0.0a2 (2023-09-06) +========================== + +Features +-------- + +- Add support for different spectra types for bloch_redfield_tensor (#1951) +- Improve qutip import times by setting logger names explicitly. (#1981, by Pieter Eendebak) +- Change the order of parameters in expand_operator (#1991) +- Add `svn` and `solve` to dispatched (#2002) +- Added nm_mcsolve to provide support for Monte-Carlo simulations of master equations with possibly negative rates. The method implemented here is described in arXiv:2209.08958 [quant-ph]. (#2070 by pmenczel) +- Add support for combining bosinic and fermionic HEOM baths (#2089) +- Added __repr__ to QobjEvo (#2111 by lklivingstone) +- Improve print(qutip.settings) by make it shorter (#2113 by tamakoshi2001) +- Create the `trace_oper_ket` operation (#2126) +- Speed up the construction of the RHS of the HEOM solver by a factor of 4x by converting the final step to Cython. (#2128) +- Rewrite the stochastic solver to use the v5 solver interface. (#2131) +- Add `Qobj.get` to extract underlying data in original format. (#2141) +- Add qeye_like and qzero_like (#2153) +- Add capacity to dispatch on ``Data`` (#2157) +- Added fermionic annihilation and creation operators. (#2166 by khnikhil) +- Changed arguments and applied colorblind_safe to functions in visualization.py (#2170 by Yuji Tamakoshi) +- Changed arguments and applied colorblind_safe to plot_wigner_sphere and matrix_histogram in visualization.py (#2193 by Yuji Tamakoshi) +- Added Dia data layer which represents operators as multi-diagonal matrices. (#2196) +- Added support for animated plots. (#2203 by Yuji Tamakoshi) +- Improved sampling algorithm for mcsolve (#2218 by Daniel Weiss) +- Added support for early termination of map functions. (#2222) + + + +Bug Fixes +--------- + +- Add missing state transformation to floquet_markov_mesolve (#1952 by christian512) +- Added default _isherm value (True) for momentum and position operators. (#2032 by Asier Galicia) +- Changed qutip-notebooks to qutip-tutorials and fixed the typo in the link redirecting to the changelog section in the PR template. (#2107 by Valan Baptist Mathuranayagam) +- Increase missing colorbar padding for matrix_histogram_complex() from 0 to 0.05. (#2181 by SJUW) +- Raise error on insufficient memory. (#2224) +- Fixed fallback to fsesolve call in fmmesolve (#2225) + + +Removals +-------- + +- Remove qutip.control and replace with qutip_qtrl. (#2116) +- Deleted _solve in countstat.py and used _data.solve. (#2120 by Yuji Tamakoshi) +- Deprecate three_level_atom (#2221) +- Deprecate orbital (#2223) + + +Documentation +------------- + +- Add a guide on Superoperators, Pauli Basis and Channel Contraction. (#1984 by christian512) +- Added information on sec_cutoff to the documentation (#2136 by Gerardo Jose Suarez) +- Added inherited members to API doc of MESolver, SMESolver, SSESolver, NonMarkovianMCSolver (#2167 by Cristian Emiliano Godinez Ramirez) +- Corrected grammar in Bloch-Redfield master equation documentation (#2174 by Andrey Rakhubovsky) + + +Miscellaneous +------------- + +- Update scipy version requirement to 1.5+ (#1982 by Pieter Eendebak) +- Added __all__ to qutip/measurements.py and qutip/core/semidefinite.py (#2103 by Rushiraj Gadhvi) +- Restore towncrier check (#2105) +- qutip.ipynbtools.version_table() can now be called without Cython installed (#2110 by Rushiraj Gadhvi) +- Moved HTMLProgressBar from qutip/ipynbtools.py to qutip/ui/progressbar.py (#2112 by Harsh Khilawala) +- Added new argument bc_type to take boundary conditions when creating QobjEvo (#2114 by Avatar Srinidhi P V ) +- Remove Windows build warning suppression. (#2119) +- Optimize dispatcher by dispatching on positional only args. (#2135) +- Clean semidefinite (#2138) +- Migrate `transfertensor.py` to solver (#2142) +- Add a test for progress_bar (#2150) +- Enable cython 3 (#2151) +- Added tests for visualization.py (#2192 by Yuji Tamakoshi) +- Sorted arguments of sphereplot so that the order is similar to those of plot_spin_distribution (#2219 by Yuji Tamakoshi) + + Version 5.0.0a1 (February 7, 2023) ++++++++++++++++++++++++++++++++++ @@ -63,7 +141,7 @@ significantly to test the data layer API: - ``qutip-tensorflow``: a TensorFlow backend by Asier Galicia (``) - ``qutip-cupy``: a CuPy GPU backend by Felipe Bivort Haiek (``)` - ``qutip-tensornetwork``: a TensorNetwork backend by Asier Galicia (``) -- ``qutip-jax```: a JAX backend by Eric Giguère (``) +- ``qutip-jax``: a JAX backend by Eric Giguère (``) We have also had many other contributors, whose specific contributions are detailed below: diff --git a/doc/changes/1951.feature b/doc/changes/1951.feature deleted file mode 100644 index a0315722eb..0000000000 --- a/doc/changes/1951.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for different spectra types for bloch_redfield_tensor \ No newline at end of file diff --git a/doc/changes/1952.bugfix b/doc/changes/1952.bugfix deleted file mode 100644 index 01b1506516..0000000000 --- a/doc/changes/1952.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add missing state transformation to floquet_markov_mesolve \ No newline at end of file diff --git a/doc/changes/1981.feature b/doc/changes/1981.feature deleted file mode 100644 index f875ad9774..0000000000 --- a/doc/changes/1981.feature +++ /dev/null @@ -1 +0,0 @@ -Improve qutip import times by setting logger names explicitly. diff --git a/doc/changes/1982.misc b/doc/changes/1982.misc deleted file mode 100644 index d1a5b4f07b..0000000000 --- a/doc/changes/1982.misc +++ /dev/null @@ -1 +0,0 @@ -Update scipy version requirement to 1.5+ diff --git a/doc/changes/1991.feature b/doc/changes/1991.feature deleted file mode 100644 index fca40f0011..0000000000 --- a/doc/changes/1991.feature +++ /dev/null @@ -1 +0,0 @@ -Change the order of parameters in expand_operator diff --git a/doc/changes/2032.bugfix b/doc/changes/2032.bugfix deleted file mode 100644 index 8cf71d67e7..0000000000 --- a/doc/changes/2032.bugfix +++ /dev/null @@ -1 +0,0 @@ -Added default _isherm value (True) for momentum and position operators. diff --git a/doc/conf.py b/doc/conf.py index 7113e450fc..5f0c8d3779 100755 --- a/doc/conf.py +++ b/doc/conf.py @@ -117,7 +117,7 @@ def qutip_version(): # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -252,7 +252,7 @@ def qutip_version(): # # See: # - https://docs.mathjax.org/en/v3.0-latest/input/tex/extensions/physics.html -mathjax_config = { +mathjax3_config = { 'TeX': { 'Macros': { 'bra': [r'\left\langle{#1}\right\rvert', 1], @@ -362,8 +362,8 @@ def qutip_version(): ## EXTLINKS CONFIGURATION ###################################################### extlinks = { - 'arxiv': ('https://arxiv.org/abs/%s', 'arXiv:'), - 'doi': ('https://dx.doi.org/%s', 'doi:'), + 'arxiv': ('https://arxiv.org/abs/%s', 'arXiv:%s'), + 'doi': ('https://dx.doi.org/%s', 'doi:%s'), } # configuration declares the location of the examples directory for diff --git a/doc/contributors.rst b/doc/contributors.rst index 3ae1aebb3a..e112abeeee 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -14,11 +14,12 @@ Developers import numpy as np import matplotlib.pyplot as plt + from matplotlib.path import Path from matplotlib.patches import PathPatch from matplotlib.textpath import TextPath from matplotlib.collections import PolyCollection from matplotlib.font_manager import FontProperties - import matplotlib.image as mpimg + import PIL LINK_CONTRIBUTORS = "https://api.github.com/repos/qutip/qutip/contributors" @@ -39,8 +40,12 @@ Developers url_object = urllib.request.urlopen(LINK_CONTRIBUTORS) list_contributors = json.loads(url_object.read()) qutip_contributors = [element["login"] for element in list_contributors] + qutip_contributors = [s.lower() for s in qutip_contributors] text = " ".join(qutip_contributors) + # load the QuTiP logo + img = PIL.Image.open(urllib.request.urlopen(LINK_LOGO)) + # code below was inspired in the following link: # https://github.com/dynamicwebpaige/nanowrimo-2021/blob/main/15_VS_Code_contributors.ipynb @@ -56,12 +61,21 @@ Developers L = np.zeros(len(T)) np.cumsum(np.sqrt(((T[1:] - T[:-1]) ** 2).sum(axis=1)), out=L[1:]) - path = TextPath((0, 0), text, size=FONT_SIZE, - prop=FontProperties(family=FONT_FAMILY)) - Vx, Vy = path.vertices[:, 0], path.vertices[:, 1] + path = TextPath( + (0, 0), text, + size=FONT_SIZE, + prop=FontProperties(family=FONT_FAMILY), + ) + + vertices = path.vertices + codes = path.codes + + Vx, Vy = vertices[:, 0], vertices[:, 1] X = np.interp(Vx, L, T[:, 0]) + Vy * np.interp(Vx, L, O[:, 0]) Y = np.interp(Vx, L, T[:, 1]) + Vy * np.interp(Vx, L, O[:, 1]) - Vx[...], Vy[...] = X, Y + vertices = np.stack([X, Y], axis=-1) + + path = Path(vertices, codes, closed=False) # creating figure fig, ax = plt.subplots(figsize=(FIGURE_SIZE, FIGURE_SIZE)) @@ -70,8 +84,7 @@ Developers ax.set_xlim(-AXIS_SIZE, AXIS_SIZE), ax.set_xticks([]) ax.set_ylim(-AXIS_SIZE, AXIS_SIZE), ax.set_yticks([]) - # uncomment lines below to add qutip logo - img = mpimg.imread(LINK_LOGO) + # add qutip logo ax.imshow(img, alpha=LOGO_TRANSPARENCY, extent=[-LOGO_SIZE,LOGO_SIZE, -LOGO_SIZE, LOGO_SIZE]) @@ -88,6 +101,7 @@ Lead Developers - `Boxi Li `_ - `Jake Lishman `_ - `Simon Cross `_ +- `Asier Galicia `_ Past Lead Developers ==================== diff --git a/doc/guide/dynamics/dynamics-bloch-redfield.rst b/doc/guide/dynamics/dynamics-bloch-redfield.rst index 5ec31e0e09..ceaf9a997c 100644 --- a/doc/guide/dynamics/dynamics-bloch-redfield.rst +++ b/doc/guide/dynamics/dynamics-bloch-redfield.rst @@ -6,6 +6,7 @@ Bloch-Redfield master equation .. plot:: + :context: reset :include-source: False import pylab as plt @@ -18,7 +19,7 @@ Bloch-Redfield master equation Introduction ============ -The Lindblad master equation introduced earlier is constructed so that it describes a physical evolution of the density matrix (i.e., trace and positivity preserving), but it does not provide a connection to any underlaying microscopic physical model. +The Lindblad master equation introduced earlier is constructed so that it describes a physical evolution of the density matrix (i.e., trace and positivity preserving), but it does not provide a connection to any underlying microscopic physical model. The Lindblad operators (collapse operators) describe phenomenological processes, such as for example dephasing and spin flips, and the rates of these processes are arbitrary parameters in the model. In many situations the collapse operators and their corresponding rates have clear physical interpretation, such as dephasing and relaxation rates, and in those cases the Lindblad master equation is usually the method of choice. @@ -282,6 +283,15 @@ The two steps of calculating the Bloch-Redfield tensor and evolving according to where the resulting `output` is an instance of the class :class:`qutip.Result`. +.. note:: + While the code example simulates the Bloch-Redfield equation in the secular approximation, QuTiP's implementation allows the user to simulate the non-secular version of the Bloch-Redfield equation by setting ``sec_cutoff=-1``, as well as do a partial secular approximation by setting it to a ``float`` , this float will become the cutoff for the sum in :eq:`br-final` meaning terms with :math:`|\omega_{ab}-\omega_{cd}|` greater than the cutoff will be neglected. + Its default value is 0.1 which corresponds to the secular approximation. + For example the command + :: + + output = brmesolve(H, psi0, tlist, a_ops=[[sigmax(), ohmic_spectrum]], e_ops=e_ops, sec_cutoff=-1) + + will simulate the same example as above without the secular approximation. Note that using the non-secular version may lead to negativity issues. .. _td-bloch-redfield: @@ -300,7 +310,7 @@ Fortunately, this eigen decomposition occurs at the Hamiltonian level, as oppose For time-dependent Hamiltonians, the Hamiltonian itself can be passed into the solver like any other time dependent Hamiltonian, as thus we will not discuss this topic further. Instead, here the focus is on time-dependent bath coupling terms. To this end, suppose that we have a dissipative harmonic oscillator, where the white-noise dissipation rate decreases exponentially with time :math:`\kappa(t) = \kappa(0)\exp(-t)`. -In the Lindblad or monte-carlo solvers, this could be implemented as a time-dependent collapse operator list ``c_ops = [[a, 'sqrt(kappa*exp(-t))']]``. +In the Lindblad or Monte Carlo solvers, this could be implemented as a time-dependent collapse operator list ``c_ops = [[a, 'sqrt(kappa*exp(-t))']]``. In the Bloch-Redfield solver, the bath coupling terms must be Hermitian. As such, in this example, our coupling operator is the position operator ``a+a.dag()``. The complete example, and comparison to the analytic expression is: @@ -392,7 +402,9 @@ A full example is: plt.show() -.. plot:: - :context: close-figs - Further examples on time-dependent Bloch-Redfield simulations can be found in the online tutorials. + +.. plot:: + :context: reset + :include-source: false + :nofigs: diff --git a/doc/guide/dynamics/dynamics-floquet.rst b/doc/guide/dynamics/dynamics-floquet.rst index e3d0a3020c..7d3da8c38e 100644 --- a/doc/guide/dynamics/dynamics-floquet.rst +++ b/doc/guide/dynamics/dynamics-floquet.rst @@ -93,7 +93,7 @@ Consider for example the case of a strongly driven two-level atom, described by In QuTiP we can define this Hamiltonian as follows: .. plot:: - :context: close-figs + :context: reset >>> delta = 0.2 * 2*np.pi >>> eps0 = 1.0 * 2*np.pi @@ -107,7 +107,7 @@ In QuTiP we can define this Hamiltonian as follows: The :math:`t=0` Floquet modes corresponding to the Hamiltonian :eq:`eq_driven_qubit` can then be calculated using the :class:`qutip.FloquetBasis` class, which encapsulates the Floquet modes and the quasienergies: .. plot:: - :context: + :context: close-figs >>> T = 2*np.pi / omega >>> floquet_basis = FloquetBasis(H, T, args) @@ -131,7 +131,7 @@ For certain driving amplitudes the quasienergy levels cross. Since the quasienergies can be associated with the time-scale of the long-term dynamics due that the driving, degenerate quasienergies indicates a "freezing" of the dynamics (sometimes known as coherent destruction of tunneling). .. plot:: - :context: + :context: close-figs >>> delta = 0.2 * 2 * np.pi >>> eps0 = 0.0 * 2 * np.pi @@ -175,7 +175,7 @@ The purpose of calculating the Floquet modes is to find the wavefunction solutio To do that, we first need to decompose the initial state in the Floquet states, using the function :meth:`FloquetBasis.to_floquet_basis` .. plot:: - :context: + :context: close-figs >>> psi0 = rand_ket(2) >>> f_coeff = floquet_basis.to_floquet_basis(psi0) @@ -186,7 +186,7 @@ To do that, we first need to decompose the initial state in the Floquet states, and given this decomposition of the initial state in the Floquet states we can easily evaluate the wavefunction that is the solution to :eq:`eq_driven_qubit` at an arbitrary time :math:`t` using the function :meth:`FloquetBasis.from_floquet_basis`: .. plot:: - :context: + :context: close-figs >>> t = 10 * np.random.rand() >>> psi_t = floquet_basis.from_floquet_basis(f_coeff, t) @@ -283,3 +283,8 @@ Finally, :func:`qutip.solver.floquet.fmmesolve` always expects the ``e_ops`` to output = fmmesolve(H, psi0, tlist, [sigmax()], e_ops=[num(2)], spectra_cb=[noise_spectrum], T=T, args=args) p_ex = output.expect[0] + +.. plot:: + :context: reset + :include-source: false + :nofigs: \ No newline at end of file diff --git a/doc/guide/dynamics/dynamics-master.rst b/doc/guide/dynamics/dynamics-master.rst index 2fa7f87d9b..832b971e3f 100644 --- a/doc/guide/dynamics/dynamics-master.rst +++ b/doc/guide/dynamics/dynamics-master.rst @@ -30,7 +30,7 @@ It evolves the state vector and evaluates the expectation values for a set of op For example, the time evolution of a quantum spin-1/2 system with tunneling rate 0.1 that initially is in the up state is calculated, and the expectation values of the :math:`\sigma_z` operator evaluated, with the following code .. plot:: - :context: + :context: reset >>> H = 2*np.pi * 0.1 * sigmax() >>> psi0 = basis(2, 0) @@ -47,7 +47,7 @@ Adding operators to this list results in a larger output list returned by the fu .. plot:: - :context: + :context: close-figs >>> result = sesolve(H, psi0, times, e_ops=[sigmaz(), sigmay()]) >>> result.expect # doctest: +NORMALIZE_WHITESPACE @@ -64,7 +64,7 @@ Adding operators to this list results in a larger output list returned by the fu The resulting list of expectation values can easily be visualized using matplotlib's plotting functions: .. plot:: - :context: + :context: close-figs >>> H = 2*np.pi * 0.1 * sigmax() >>> psi0 = basis(2, 0) @@ -173,7 +173,7 @@ operators ``[sigmaz(), sigmay()]`` to the fifth argument. .. plot:: - :context: + :context: close-figs >>> times = np.linspace(0.0, 10.0, 100) >>> result = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], e_ops=[sigmaz(), sigmay()]) @@ -207,3 +207,8 @@ Now a slightly more complex example: Consider a two-level atom coupled to a leak >>> plt.ylabel('Expectation values') # doctest: +SKIP >>> plt.legend(("cavity photon number", "atom excitation probability")) # doctest: +SKIP >>> plt.show() # doctest: +SKIP + +.. plot:: + :context: reset + :include-source: false + :nofigs: \ No newline at end of file diff --git a/doc/guide/dynamics/dynamics-monte.rst b/doc/guide/dynamics/dynamics-monte.rst index ce05c5e912..5dda6b35eb 100644 --- a/doc/guide/dynamics/dynamics-monte.rst +++ b/doc/guide/dynamics/dynamics-monte.rst @@ -13,30 +13,30 @@ Introduction Where as the density matrix formalism describes the ensemble average over many identical realizations of a quantum system, the Monte Carlo (MC), or quantum-jump approach to wave function evolution, allows for simulating an individual realization of the system dynamics. Here, the environment is continuously monitored, resulting in a series of quantum jumps in the system wave function, conditioned on the increase in information gained about the state of the system via the environmental measurements. In general, this evolution is governed by the Schrödinger equation with a **non-Hermitian** effective Hamiltonian .. math:: - :label: heff + :label: heff - H_{\rm eff}=H_{\rm sys}-\frac{i\hbar}{2}\sum_{i}C^{+}_{n}C_{n}, + H_{\rm eff}=H_{\rm sys}-\frac{i\hbar}{2}\sum_{i}C^{+}_{n}C_{n}, where again, the :math:`C_{n}` are collapse operators, each corresponding to a separate irreversible process with rate :math:`\gamma_{n}`. Here, the strictly negative non-Hermitian portion of Eq. :eq:`heff` gives rise to a reduction in the norm of the wave function, that to first-order in a small time :math:`\delta t`, is given by :math:`\left<\psi(t+\delta t)|\psi(t+\delta t)\right>=1-\delta p` where .. math:: - :label: jump + :label: jump - \delta p =\delta t \sum_{n}\left<\psi(t)|C^{+}_{n}C_{n}|\psi(t)\right>, + \delta p =\delta t \sum_{n}\left<\psi(t)|C^{+}_{n}C_{n}|\psi(t)\right>, and :math:`\delta t` is such that :math:`\delta p \ll 1`. With a probability of remaining in the state :math:`\left|\psi(t+\delta t)\right>` given by :math:`1-\delta p`, the corresponding quantum jump probability is thus Eq. :eq:`jump`. If the environmental measurements register a quantum jump, say via the emission of a photon into the environment, or a change in the spin of a quantum dot, the wave function undergoes a jump into a state defined by projecting :math:`\left|\psi(t)\right>` using the collapse operator :math:`C_{n}` corresponding to the measurement .. math:: - :label: project + :label: project - \left|\psi(t+\delta t)\right>=C_{n}\left|\psi(t)\right>/\left<\psi(t)|C_{n}^{+}C_{n}|\psi(t)\right>^{1/2}. + \left|\psi(t+\delta t)\right>=C_{n}\left|\psi(t)\right>/\left<\psi(t)|C_{n}^{+}C_{n}|\psi(t)\right>^{1/2}. If more than a single collapse operator is present in Eq. :eq:`heff`, the probability of collapse due to the :math:`i\mathrm{th}`-operator :math:`C_{i}` is given by .. math:: - :label: pcn + :label: pcn - P_{i}(t)=\left<\psi(t)|C_{i}^{+}C_{i}|\psi(t)\right>/\delta p. + P_{i}(t)=\left<\psi(t)|C_{i}^{+}C_{i}|\psi(t)\right>/\delta p. Evaluating the MC evolution to first-order in time is quite tedious. Instead, QuTiP uses the following algorithm to simulate a single realization of a quantum system. Starting from a pure state :math:`\left|\psi(0)\right>`: @@ -69,7 +69,7 @@ function for master-equation evolution, except that the initial state must be a To illustrate the use of the Monte Carlo evolution of quantum systems in QuTiP, let's again consider the case of a two-level atom coupled to a leaky cavity. The only differences to the master-equation treatment is that in this case we invoke the :func:`qutip.mcsolve` function instead of :func:`qutip.mesolve` .. plot:: - :context: + :context: reset from qutip.solver.mcsolve import MCSolver, mcsolve @@ -102,7 +102,12 @@ When trajectories are stored, ``result.runs_expect`` is a list over the expectat The averages are stored in ``result.average_expect`` and the standard derivation of the expectation values in ``result.std_expect``. When the states are returned, ``result.runs_states`` will be an array of length ``ntraj``. Each element contains an array of "Qobj" type ket with the same number of elements as ``times``. ``result.average_states`` is a list of density matrices computed as the average of the states at each time step. Furthermore, the output will also contain a list of times at which the collapse occurred, and which collapse operators did the collapse. These can be obtained in ``result.col_times`` and ``result.col_which`` respectively. -Lastly ``result.photocurrent`` contain the measurement of the evolution. + + +Photocurrent +------------ + +The photocurrent, previously computed using the ``photocurrent_sesolve`` and ``photocurrent_sesolve`` functions, are now included in the output of :func:`qutip.solver.mcsolve` as ``result.photocurrent``. .. plot:: @@ -143,6 +148,60 @@ Now, the Monte Carlo solver will calculate expectation values for both operators +Using the Improved Sampling Algorithm +------------------------------------- + +Oftentimes, quantum jumps are rare. This is especially true in the context of simulating gates +for quantum information purposes, where typical gate times are orders of magnitude smaller than +typical timescales for decoherence. In this case, using the standard monte-carlo sampling algorithm, +we often repeatedly sample the no-jump trajectory. We can thus reduce the number of required runs +by only sampling the no-jump trajectory once. We then extract the no-jump probability :math:`p`, +and for all future runs we only sample random numbers :math:`r_1` where :math:`r_1>p`, thus ensuring +that a jump will occur. When it comes time to compute expectation values, we weight the no-jump +trajectory by :math:`p` and the jump trajectories by :math:`1-p`. This algorithm is described +in [Abd19]_ and can be utilized by setting the option ``"improved_sampling"`` in the call to +``mcsolve``: + +.. plot:: + :context: close-figs + + data = mcsolve(H, psi0, times, [np.sqrt(0.1) * a], e_ops=[a.dag() * a, sm.dag() * sm], options={"improved_sampling": True}) + +where in this case the first run samples the no-jump trajectory, and the remaining 499 trajectories are all +guaranteed to include (at least) one jump. + +The power of this algorithm is most obvious when considering systems that rarely undergo jumps. +For instance, consider the following T1 simulation of a qubit with a lifetime of 10 microseconds +(assuming time is in units of nanoseconds) + + +.. plot:: + :context: close-figs + + times = np.linspace(0.0, 300.0, 100) + psi0 = fock(2, 1) + sm = fock(2, 0) * fock(2, 1).dag() + omega = 2.0 * np.pi * 1.0 + H0 = -0.5 * omega * sigmaz() + gamma = 1/10000 + data = mcsolve([H0], psi0, times, [np.sqrt(gamma) * sm], [sm.dag() * sm], ntraj=100) + data_imp = mcsolve([H0], psi0, times, [np.sqrt(gamma) * sm], [sm.dag() * sm],ntraj=100, options={"improved_sampling": True}) + + plt.figure() + plt.plot(times, data.expect[0], label="original") + plt.plot(times, data_imp.expect[0], label="improved sampling") + plt.plot(times, np.exp(-gamma * times), label=r"$\exp(-\gamma t)$") + plt.title('Monte Carlo: improved sampling algorithm') + plt.xlabel("time [ns]") + plt.ylabel(r"$p_{1}$") + plt.legend() + plt.show() + + +The original sampling algorithm samples the no-jump trajectory on average 96.7% of the time, while the improved +sampling algorithm only does so once. + + .. _monte-reuse: Reusing Hamiltonian Data @@ -235,3 +294,149 @@ For example, the following code block plots expectation values for 1, 10 and 100 plt.ylabel('Expectation values') plt.legend() plt.show() + +.. openmcsolve: + +Open Systems +------------ + +``mcsolve`` can be used to study system with have measured and dissipative interaction with the bath. +This is done by using a liouvillian including the dissipative interaction instead of an Hamiltonian. + +.. plot:: + :context: close-figs + + times = np.linspace(0.0, 10.0, 200) + psi0 = tensor(fock(2, 0), fock(10, 8)) + a = tensor(qeye(2), destroy(10)) + sm = tensor(destroy(2), qeye(10)) + H = 2*np.pi*a.dag()*a + 2*np.pi*sm.dag()*sm + 2*np.pi*0.25*(sm*a.dag() + sm.dag()*a) + L = liouvillian(H, [0.01 * sm, np.sqrt(0.1) * a]) + data = mcsolve(L, psi0, times, [np.sqrt(0.1) * a], e_ops=[a.dag() * a, sm.dag() * sm]) + + plt.figure() + plt.plot((times[:-1] + times[1:])/2, data.photocurrent[0]) + plt.title('Monte Carlo Photocurrent') + plt.xlabel('Time') + plt.ylabel('Photon detections') + plt.show() + + + +.. _monte-nonmarkov: + +Monte Carlo for Non-Markovian Dynamics +-------------------------------------- + +The Monte Carlo solver of QuTiP can also be used to solve the dynamics of time-local non-Markovian master equations, i.e., master equations of the Lindblad form + +.. math:: + :label: lindblad_master_equation_with_rates + + \dot\rho(t) = -\frac{i}{\hbar} [H, \rho(t)] + \sum_n \frac{\gamma_n(t)}{2} \left[2 A_n \rho(t) A_n^\dagger - \rho(t) A_n^\dagger A_n - A_n^\dagger A_n \rho(t)\right] + +with "rates" :math:`\gamma_n(t)` that can take negative values. +This can be done with the :func:`qutip.nm_mcsolve` function. +The function is based on the influence martingale formalism [Donvil22]_ and formally requires that the collapse operators :math:`A_n` satisfy a completeness relation of the form + +.. math:: + :label: nmmcsolve_completeness + + \sum_n A_n^\dagger A_n = \alpha \mathbb{I} , + +where :math:`\mathbb{I}` is the identity operator on the system Hilbert space and :math:`\alpha>0`. +Note that when the collapse operators of a model don't satisfy such a relation, ``qutip.nm_mcsolve`` automatically adds an extra collapse operator such that :eq:`nmmcsolve_completeness` is satisfied. +The rate corresponding to this extra collapse operator is set to zero. + +Technically, the influence martingale formalism works as follows. +We introduce an influence martingale :math:`\mu(t)`, which follows the evolution of the system state. +When no jump happens, it evolves as + +.. math:: + :label: influence_cont + + \mu(t) = \exp\left( \alpha\int_0^t K(\tau) d\tau \right) + +where :math:`K(t)` is for now an arbitrary function. +When a jump corresponding to the collapse operator :math:`A_n` happens, the influence martingale becomes + +.. math:: + :label: influence_disc + + \mu(t+\delta t) = \mu(t)\left(\frac{K(t)-\gamma_n(t)}{\gamma_n(t)}\right) + +Assuming that the state :math:`\bar\rho(t)` computed by the Monte Carlo average + +.. math:: + :label: mc_paired_state + + \bar\rho(t) = \frac{1}{N}\sum_{l=1}^N |\psi_l(t)\rangle\langle \psi_l(t)| + +solves a Lindblad master equation with collapse operators :math:`A_n` and rates :math:`\Gamma_n(t)`, the state :math:`\rho(t)` defined by + +.. math:: + :label: mc_martingale_state + + \rho(t) = \frac{1}{N}\sum_{l=1}^N \mu_l(t) |\psi_l(t)\rangle\langle \psi_l(t)| + +solves a Lindblad master equation with collapse operators :math:`A_n` and shifted rates :math:`\gamma_n(t)-K(t)`. +Thus, while :math:`\Gamma_n(t) \geq 0`, the new "rates" :math:`\gamma_n(t) = \Gamma_n(t) - K(t)` satisfy no positivity requirement. + +The input of :func:`qutip.nm_mcsolve` is almost the same as for :func:`qutip.mcsolve`. +The only difference is how the collapse operators and rate functions should be defined. +``nm_mcsolve`` requires collapse operators :math:`A_n` and target "rates" :math:`\gamma_n` (which are allowed to take negative values) to be given in list form ``[[C_1, gamma_1], [C_2, gamma_2], ...]``. +Note that we give the actual rate and not its square root, and that ``nm_mcsolve`` automatically computes associated jump rates :math:`\Gamma_n(t)\geq0` appropriate for simulation. + +We conclude with a simple example demonstrating the usage of the ``nm_mcsolve`` function. +For more elaborate, physically motivated examples, we refer to the `accompanying tutorial notebook `_. + + +.. plot:: + :context: reset + + import qutip as qt + + times = np.linspace(0, 1, 201) + psi0 = qt.basis(2, 1) + a0 = qt.destroy(2) + H = a0.dag() * a0 + + # Rate functions + gamma1 = "kappa * nth" + gamma2 = "kappa * (nth+1) + 12 * np.exp(-2*t**3) * (-np.sin(15*t)**2)" + # gamma2 becomes negative during some time intervals + + # nm_mcsolve integration + ops_and_rates = [] + ops_and_rates.append([a0.dag(), gamma1]) + ops_and_rates.append([a0, gamma2]) + MCSol = qt.nm_mcsolve(H, psi0, times, ops_and_rates, + args={'kappa': 1.0 / 0.129, 'nth': 0.063}, + e_ops=[a0.dag() * a0, a0 * a0.dag()], + options={'map': 'parallel'}, ntraj=2500) + + # mesolve integration for comparison + d_ops = [[qt.lindblad_dissipator(a0.dag(), a0.dag()), gamma1], + [qt.lindblad_dissipator(a0, a0), gamma2]] + MESol = qt.mesolve(H, psi0, times, d_ops, e_ops=[a0.dag() * a0, a0 * a0.dag()], + args={'kappa': 1.0 / 0.129, 'nth': 0.063}) + + plt.figure() + plt.plot(times, MCSol.expect[0], 'g', + times, MCSol.expect[1], 'b', + times, MCSol.trace, 'r') + plt.plot(times, MESol.expect[0], 'g--', + times, MESol.expect[1], 'b--') + plt.title('Monte Carlo time evolution') + plt.xlabel('Time') + plt.ylabel('Expectation values') + plt.legend((r'$\langle 1 | \rho | 1 \rangle$', + r'$\langle 0 | \rho | 0 \rangle$', + r'$\operatorname{tr} \rho$')) + plt.show() + + +.. plot:: + :context: reset + :include-source: false + :nofigs: diff --git a/doc/guide/dynamics/dynamics-photocurrent.rst b/doc/guide/dynamics/dynamics-photocurrent.rst deleted file mode 100644 index 5b69a338d4..0000000000 --- a/doc/guide/dynamics/dynamics-photocurrent.rst +++ /dev/null @@ -1,104 +0,0 @@ -.. _stochastic_photo: - -******************************** -Stochastic Solver - Photocurrent -******************************** - -.. _photocurrent-intro: - -Photocurrent method, like monte-carlo method, allows for simulating an -individual realization of the system evolution under continuous measurement. - -Closed system -------------- - -.. photocurent_Schrodinger_equation - -Photocurrent evolution have the state evolve deterministically between quantum jumps. -During the deterministic part, the system evolve by schrodinger equation with a -non-hermitian, norm conserving effective Hamiltonian. - -.. math:: - :label: pssesolve_heff - - H_{\rm eff}=H_{\rm sys}+ - \frac{i\hbar}{2}\left( -\sum_{n}C^{+}_{n}C_{n}+ |C_{n} \psi |^2\right). - -With :math:`C_{n}`, the collapse operators. -This effective Hamiltonian is equivalent to the monte-carlo effective -Hamiltonian with an extra term to keep the state normalized. -At each time step of :math:`\delta t`, the wave function has a probability - -.. math:: - :label: pssesolve_jump_prob - - \delta p_{n} = \left<\psi(t)|C_{n}^{+}C_{n}|\psi(t)\right> \delta t - -of making a quantum jump. :math:`\delta t` must be chosen small enough to keep -that probability small :math:`\delta p << 1`. *If multiple jumps happen at the -same time step, the state become unphysical.* -Each jump result in a sharp variation of the state by, - -.. math:: - :label: pssesolve_jump - - \delta \psi = \left( \frac{C_n \psi} {\left| C_n \psi \right|} - \psi \right) - -The basic photocurrent method directly integrates these equations to the first-order. -Starting from a state :math:`\left|\psi(0)\right>`, it evolves the state according to - -.. math:: - :label: pssesolve_sde - - \delta \psi(t) = - i H_{\rm sys} \psi(t) \delta t + \sum_n \left( - -\frac{C_n^{+} C_n}{2} \psi(t) \delta t - + \frac{ \left| C_n \psi \right| ^2}{2} \delta t - + \delta N_n \left( \frac{C_n \psi} - {\left| C_n \psi \right|} - \psi \right)\right), - -for each time-step. -Here :math:`\delta N = 1` with a probability of :math:`\delta \omega` and -:math:`\delta N_n = 0` with a probability of :math:`1-\delta \omega`. - -Trajectories obtained with this algorithm are equivalent to those obtained with -monte-carlo evolution (up to :math:`O(\delta t^2)`). -In most cases, :func:`qutip.mcsolve` is more efficient than -:func:`qutip.stochastic.photocurrent_sesolve`. - -Open system ------------ -.. photocurent_Master_equation - -Photocurrent approach allows to obtain trajectories for a system with -both measured and dissipative interaction with the bath. -The system evolves according to the master equation between jumps with a modified -liouvillian - -.. math:: - :label: master_equation - - L_{\rm eff}(\rho(t)) = L_{\rm sys}(\rho(t)) + - \sum_{n}\left( - \rm{tr} \left(C_{n}^{+}C_{n} \rho C_{n}^{+}C_{n} \right) - - C_{n}^{+}C_{n} \rho C_{n}^{+}C_{n} \right), - -with the probability of jumps in a time step :math:`\delta t` given by - -.. math:: - :label: psmesolve_rate - - \delta p = \rm{tr} \left( C \rho C^{+} \right) \delta t. - -After a jump, the density matrix become - -.. math:: - - \rho' = \frac{C \rho C^{+}}{\rm{tr} \left( C \rho C^{+} \right)}. - -The evolution of the system at each time step if thus given by - -.. math:: - :label: psmesolve_sde - - \rho(t + \delta t) = \rho(t) + L_{\rm eff}(\rho) \delta t + \delta N - \left(\frac{C \rho C^{+}}{\rm{tr} \left( C \rho C^{+} \right)} - \rho \right). diff --git a/doc/guide/dynamics/dynamics-stochastic.rst b/doc/guide/dynamics/dynamics-stochastic.rst index 890a27457b..faa2baf9a4 100644 --- a/doc/guide/dynamics/dynamics-stochastic.rst +++ b/doc/guide/dynamics/dynamics-stochastic.rst @@ -6,7 +6,8 @@ Stochastic Solver .. _stochastic-intro: -When a quantum system is subjected to continuous measurement, through homodyne detection for example, it is possible to simulate the conditional quantum state using stochastic Schrodinger and master equations. The solution of these stochastic equations are quantum trajectories, which represent the conditioned evolution of the system given a specific measurement record. +When a quantum system is subjected to continuous measurement, through homodyne detection for example, it is possible to simulate the conditional quantum state using stochastic Schrodinger and master equations. +The solution of these stochastic equations are quantum trajectories, which represent the conditioned evolution of the system given a specific measurement record. In general, the stochastic evolution of a quantum state is calculated in QuTiP by solving the general equation @@ -16,7 +17,7 @@ QuTiP by solving the general equation d \rho (t) = d_1 \rho dt + \sum_n d_{2,n} \rho dW_n, -where :math:`dW_n` is a Wiener increment, which has the expectation values :math:`E[dW] = 0` and :math:`E[dW^2] = dt`. Stochastic evolution is implemented with the :func:`qutip.stochastic.general_stochastic` function. +where :math:`dW_n` is a Wiener increment, which has the expectation values :math:`E[dW] = 0` and :math:`E[dW^2] = dt`. Stochastic Schrodinger Equation =============================== @@ -29,9 +30,9 @@ The stochastic Schrodinger equation is given by (see section 4.4, [Wis09]_) :label: jump_ssesolve d \psi(t) = - i H \psi(t) dt - - \sum_n \left( \frac{S_n^\dagger S_n}{2} -\frac{e_n}{2} S_n - + \frac{e_n^2}{8} \right) \psi(t) dt - + \sum_n \left( S_n - \frac{e_n}{2} \right) \psi(t) dW_n, + - \sum_n \left( \frac{S_n^\dagger S_n}{2} -\frac{e_n}{2} S_n + + \frac{e_n^2}{8} \right) \psi(t) dt + + \sum_n \left( S_n - \frac{e_n}{2} \right) \psi(t) dW_n, where :math:`H` is the Hamiltonian, :math:`S_n` are the stochastic collapse operators, and :math:`e_n` is @@ -40,12 +41,12 @@ where :math:`H` is the Hamiltonian, :math:`S_n` are the stochastic collapse oper e_n = \left<\psi(t)|S_n + S_n^\dagger|\psi(t)\right> -In QuTiP, this equation can be solved using the function :func:`qutip.stochastic.ssesolve`, which is implemented by defining :math:`d_1` and :math:`d_{2,n}` from Equation :eq:`general_form` as +In QuTiP, this equation can be solved using the function :func:`qutip.solver.stochastic.ssesolve`, which is implemented by defining :math:`d_1` and :math:`d_{2,n}` from Equation :eq:`general_form` as .. math:: :label: d1_def - d_1 = -iH - \frac{1}{2} \sum_n \left(S_n^\dagger S_n - e_n S_n + \frac{e_i^2}{4} \right), + d_1 = -iH - \frac{1}{2} \sum_n \left(S_n^\dagger S_n - e_n S_n + \frac{e_i^2}{4} \right), and @@ -54,9 +55,11 @@ and d_{2, n} = S_n - \frac{e_n}{2}. -The solver :func:`qutip.stochastic.ssesolve` will construct the operators :math:`d_1` and :math:`d_{2,n}` once the user passes the Hamiltonian (``H``) and the stochastic operator list (``sc_ops``). As with the :func:`qutip.mcsolve`, the number of trajectories and the seed for the noise realisation can be fixed using the arguments: ``ntraj`` and ``noise``, respectively. If the user also requires the measurement output, the argument ``store_measurement=True`` should be included. +The solver :func:`qutip.solver.stochastic.ssesolve` will construct the operators :math:`d_1` and :math:`d_{2,n}` once the user passes the Hamiltonian (``H``) and the stochastic operator list (``sc_ops``). +As with the :func:`qutip.solver.mcsolve.mcsolve`, the number of trajectories and the seed for the noise realisation can be fixed using the arguments: ``ntraj`` and ``seeds``, respectively. +If the user also requires the measurement output, the options entry ``{"store_measurement": True}`` should be included. -Additionally, homodyne and heterodyne detections can be easily simulated by passing the arguments ``method='homodyne'`` or ``method='heterodyne'`` to :func:`qutip.stochastic.ssesolve`. +Per default, homodyne is used. Heterodyne detections can be easily simulated by passing the arguments ``'heterodyne=True'`` to :func:`qutip.solver.stochastic.ssesolve`. Examples of how to solve the stochastic Schrodinger equation using QuTiP can be found in this `development notebook `_. @@ -65,7 +68,8 @@ Stochastic Master Equation .. Stochastic Master equation -When the initial state of the system is a density matrix :math:`\rho`, the stochastic master equation solver :func:`qutip.stochastic.smesolve` must be used. The stochastic master equation is given by (see section 4.4, [Wis09]_) +When the initial state of the system is a density matrix :math:`\rho`, the stochastic master equation solver :func:`qutip.stochastic.smesolve` must be used. +The stochastic master equation is given by (see section 4.4, [Wis09]_) .. math:: :label: stochastic_master @@ -88,7 +92,9 @@ and \mathcal{H}[A]\rho = A\rho(t) + \rho(t) A^\dagger - \tr[A\rho(t) + \rho(t) A^\dagger]. -In QuTiP, solutions for the stochastic master equation are obtained using the solver :func:`qutip.stochastic.smesolve`. The implementation takes into account 2 types of collapse operators. :math:`C_i` (``c_ops``) represent the dissipation in the environment, while :math:`S_n` (``sc_ops``) are monitored operators. The deterministic part of the evolution, described by the :math:`d_1` in Equation :eq:`general_form`, takes into account all operators :math:`C_i` and :math:`S_n`: +In QuTiP, solutions for the stochastic master equation are obtained using the solver :func:`qutip.solver.stochastic.smesolve`. +The implementation takes into account 2 types of collapse operators. :math:`C_i` (``c_ops``) represent the dissipation in the environment, while :math:`S_n` (``sc_ops``) are monitored operators. +The deterministic part of the evolution, described by the :math:`d_1` in Equation :eq:`general_form`, takes into account all operators :math:`C_i` and :math:`S_n`: .. math:: :label: liouvillian @@ -98,7 +104,6 @@ In QuTiP, solutions for the stochastic master equation are obtained using the so + \sum_n D[S_n]\rho, - The stochastic part, :math:`d_{2,n}`, is given solely by the operators :math:`S_n` .. math:: @@ -107,55 +112,56 @@ The stochastic part, :math:`d_{2,n}`, is given solely by the operators :math:`S_ d_{2,n} = S_n \rho(t) + \rho(t) S_n^\dagger - \tr \left(S_n \rho (t) + \rho(t) S_n^\dagger \right)\rho(t). -As in the stochastic Schrodinger equation, the detection method can be specified using the ``method`` argument. +As in the stochastic Schrodinger equation, heterodyne detection can be chosen by passing ``heterodyne=True``. Example ------- -Below, we solve the dynamics for an optical cavity at 0K whose output is monitored using homodyne detection. The cavity decay rate is given by :math:`\kappa` and the :math:`\Delta` is the cavity detuning with respect to the driving field. The measurement operators can be passed using the option ``m_ops``. The homodyne current :math:`J_x` is calculated using +Below, we solve the dynamics for an optical cavity at 0K whose output is monitored using homodyne detection. +The cavity decay rate is given by :math:`\kappa` and the :math:`\Delta` is the cavity detuning with respect to the driving field. +The measurement operators can be passed using the option ``m_ops``. The homodyne current :math:`J_x` is calculated using .. math:: :label: measurement_result - J_x = \langle x \rangle + dW, + J_x = \langle x \rangle + dW / dt, where :math:`x` is the operator passed using ``m_ops``. The results are available in ``result.measurements``. .. plot:: - :context: close-figs + :context: reset import numpy as np import matplotlib.pyplot as plt - import qutip as qt + import qutip # parameters - DIM = 20 # Hilbert space dimension - DELTA = 5*2*np.pi # cavity detuning - KAPPA = 2 # cavity decay rate - INTENSITY = 4 # intensity of initial state + DIM = 20 # Hilbert space dimension + DELTA = 5 * 2 * np.pi # cavity detuning + KAPPA = 2 # cavity decay rate + INTENSITY = 4 # intensity of initial state NUMBER_OF_TRAJECTORIES = 500 # operators - a = qt.destroy(DIM) + a = qutip.destroy(DIM) x = a + a.dag() - H = DELTA*a.dag()* a + H = DELTA * a.dag() * a - rho_0 = qt.coherent(DIM, np.sqrt(INTENSITY)) + rho_0 = qutip.coherent(DIM, np.sqrt(INTENSITY)) times = np.arange(0, 1, 0.0025) - stoc_solution = qt.smesolve(H, rho_0, times, - c_ops=[], - sc_ops=[np.sqrt(KAPPA) * a], - e_ops=[x], - ntraj=NUMBER_OF_TRAJECTORIES, - nsubsteps=2, - store_measurement=True, - dW_factors=[1], - method='homodyne') + stoc_solution = qutip.smesolve( + H, rho_0, times, + c_ops=[], + sc_ops=[np.sqrt(KAPPA) * a], + e_ops=[x], + ntraj=NUMBER_OF_TRAJECTORIES, + options={"dt": 0.00125, "store_measurement":True,} + ) fig, ax = plt.subplots() ax.set_title('Stochastic Master Equation - Homodyne Detection') - ax.plot(times, np.array(stoc_solution.measurement).mean(axis=0)[:].real, + ax.plot(times[1:], np.array(stoc_solution.measurement).mean(axis=0)[0, :].real, 'r', lw=2, label=r'$J_x$') ax.plot(times, stoc_solution.expect[0], 'k', lw=2, label=r'$\langle x \rangle$') @@ -163,4 +169,9 @@ where :math:`x` is the operator passed using ``m_ops``. The results are availabl ax.legend() -For other examples on :func:`qutip.stochastic.smesolve`, see the `following notebook `_, as well as these notebooks available at `QuTiP Tutorials page `_: `heterodyne detection `_, `inneficient detection `_, and `feedback control `_. +For other examples on :func:`qutip.solver.stochastic.smesolve`, see the `following notebook `_, as well as these notebooks available at `QuTiP Tutorials page `_: `heterodyne detection `_, `inefficient detection `_, and `feedback control `_. + +.. plot:: + :context: reset + :include-source: false + :nofigs: diff --git a/doc/guide/dynamics/dynamics-time.rst b/doc/guide/dynamics/dynamics-time.rst index 8902d2b94d..4885803a65 100644 --- a/doc/guide/dynamics/dynamics-time.rst +++ b/doc/guide/dynamics/dynamics-time.rst @@ -73,7 +73,7 @@ As an example, we will look at a case with a time-dependent Hamiltonian of the f The following code sets up the problem .. plot:: - :context: close-figs + :context: reset ustate = basis(3, 0) excited = basis(3, 1) @@ -107,7 +107,7 @@ The following code sets up the problem Given that we have a single time-dependent Hamiltonian term, and constant collapse terms, we need to specify a single Python function for the coefficient :math:`f(t)`. In this case, one can simply do .. plot:: - :context: + :context: close-figs :nofigs: def H1_coeff(t): @@ -342,10 +342,10 @@ Any function or method that can be called by ``f(t, args)``, ``f(t, **args)`` is **String coefficients** : Use a string containing a simple Python expression. The variable ``t``, common mathematical functions such as ``sin`` or ``exp`` an variable in args will be available. -If available, the string will be compiled using cython, fixing variable type when possible, allowing slightly faster excution than function. +If available, the string will be compiled using cython, fixing variable type when possible, allowing slightly faster execution than function. While the speed up is usually very small, in long evolution, numerous calls to the functions are made and it's can accumulate. From version 5, compilation of the coefficient is done only once and saved between sessions. -When Cython is not available, the code will be executed in python with the same environment. +When either the cython or filelock modules are not available, the code will be executed in python using ``exec`` with the same environment . This, however, as no advantage over using python function. @@ -435,3 +435,8 @@ Accessing the state from solver =============================== In QuTiP 4.4 to 4.7, it was possible to request that the solver pass the state, expectation values or collapse operators via arguments to :class:`QobjEvo`. Support for this is not yet available in QuTiP 5. + +.. plot:: + :context: reset + :include-source: false + :nofigs: diff --git a/doc/guide/figures/sprep-wood-diagram.png b/doc/guide/figures/sprep-wood-diagram.png new file mode 100644 index 0000000000..f7c2b2c3d0 Binary files /dev/null and b/doc/guide/figures/sprep-wood-diagram.png differ diff --git a/doc/guide/guide-dynamics.rst b/doc/guide/guide-dynamics.rst index 5f214bb8b3..8f6d85538c 100644 --- a/doc/guide/guide-dynamics.rst +++ b/doc/guide/guide-dynamics.rst @@ -12,7 +12,6 @@ Time Evolution and Quantum System Dynamics dynamics/dynamics-master.rst dynamics/dynamics-monte.rst dynamics/dynamics-krylov.rst - dynamics/dynamics-photocurrent.rst dynamics/dynamics-stochastic.rst dynamics/dynamics-time.rst dynamics/dynamics-bloch-redfield.rst diff --git a/doc/guide/guide-super.rst b/doc/guide/guide-super.rst new file mode 100644 index 0000000000..76ec70cb19 --- /dev/null +++ b/doc/guide/guide-super.rst @@ -0,0 +1,106 @@ +.. _super: + +***************************************************** +Superoperators, Pauli Basis and Channel Contraction +***************************************************** +written by `Christopher Granade `, Institute for Quantum Computing + + +In this guide, we will demonstrate the :func:`tensor_contract` function, which contracts one or more pairs of indices of a Qobj. This functionality can be used to find rectangular superoperators that implement the partial trace channel :math:S(\rho) = \Tr_2(\rho)`, for instance. Using this functionality, we can quickly turn a system-environment representation of an open quantum process into a superoperator representation. + +.. _super-representation-plotting: + +Superoperator Representations and Plotting +========================================== + + +We start off by first demonstrating plotting of superoperators, as this will be useful to us in visualizing the results of a contracted channel. + + +In particular, we will use Hinton diagrams as implemented by :func:`qutip.visualization.hinton`, which +show the real parts of matrix elements as squares whose size and color both correspond to the magnitude of each element. To illustrate, we first plot a few density operators. + +.. plot:: + :context: reset + + from qutip import hinton, identity, Qobj, to_super, sigmaz, tensor, tensor_contract + from qutip.core.gates import cnot, hadamard_transform + + hinton(identity([2, 3]).unit()) + hinton(Qobj([[1, 0.5], [0.5, 1]]).unit()) + + +We show superoperators as matrices in the *Pauli basis*, such that any Hermicity-preserving map is represented by a real-valued matrix. This is especially convienent for use with Hinton diagrams, as the plot thus carries complete information about the channel. + +As an example, conjugation by :math:`\sigma_z` leaves :math:`\mathbb{1}` and :math:`\sigma_z` invariant, but flips the sign of :math:`\sigma_x` and :math:`\sigma_y`. This is indicated in Hinton diagrams by a negative-valued square for the sign change and a positive-valued square for a +1 sign. + +.. plot:: + :context: + + hinton(to_super(sigmaz())) + + +As a couple more examples, we also consider the supermatrix for a Hadamard transform and for :math:`\sigma_z \otimes H`. + +.. plot:: + :context: + + hinton(to_super(hadamard_transform())) + hinton(to_super(tensor(sigmaz(), hadamard_transform()))) + +.. _super-reduced-channels: + +Reduced Channels +================ + +As an example of tensor contraction, we now consider the map + +.. math:: + + S(\rho)=\Tr_2 (\scriptstyle \rm CNOT (\rho \otimes \ket{0}\bra{0}) \scriptstyle \rm CNOT^\dagger) + +We can think of the :math:`\scriptstyle \rm CNOT` here as a system-environment representation of an open quantum process, in which an environment register is prepared in a state :math:`\rho_{\text{anc}}`, then a unitary acts jointly on the system of interest and environment. Finally, the environment is traced out, leaving a *channel* on the system alone. In terms of `Wood diagrams `, this can be represented as the composition of a preparation map, evolution under the system-environment unitary, and then a measurement map. + +.. figure:: figures/sprep-wood-diagram.png + :align: center + :width: 2.5in + + +The two tensor wires on the left indicate where we must take a tensor contraction to obtain the measurement map. Numbering the tensor wires from 0 to 3, this corresponds to a :func:`tensor_contract` argument of ``(1, 3)``. + +.. plot:: + :context: + :nofigs: + + tensor_contract(to_super(identity([2, 2])), (1, 3)) + +Meanwhile, the :func:`super_tensor` function implements the swap on the right, such that we can quickly find the preparation map. + +.. plot:: + :context: + :nofigs: + + q = tensor(identity(2), basis(2)) + s_prep = sprepost(q, q.dag()) + +For a :math:`\scriptstyle \rm CNOT` system-environment model, the composition of these maps should give us a completely dephasing channel. The channel on both qubits is just the superunitary :math:`\scriptstyle \rm CNOT` channel: + +.. plot:: + :context: + + hinton(to_super(cnot())) + +We now complete by multiplying the superunitary :math:`\scriptstyle \rm CNOT` by the preparation channel above, then applying the partial trace channel by contracting the second and fourth index indices. As expected, this gives us a dephasing map. + +.. plot:: + :context: + + hinton(tensor_contract(to_super(cnot()), (1, 3)) * s_prep) + + +.. plot:: + :context: reset + :include-source: false + :nofigs: + + # reset the context at the end diff --git a/doc/guide/guide-visualization.rst b/doc/guide/guide-visualization.rst index 0b99bf4f57..2b91b30ebb 100644 --- a/doc/guide/guide-visualization.rst +++ b/doc/guide/guide-visualization.rst @@ -105,11 +105,14 @@ above, so QuTiP provides a convenience function for doing this, see fig, axes = plt.subplots(1, 3, figsize=(12,3)) - plot_fock_distribution(rho_coherent, fig=fig, ax=axes[0], title="Coherent state"); + fig, axes[0] = plot_fock_distribution(rho_coherent, fig=fig, ax=axes[0]); + axes[0].set_title('Coherent state') - plot_fock_distribution(rho_thermal, fig=fig, ax=axes[1], title="Thermal state"); + fig, axes[1] = plot_fock_distribution(rho_thermal, fig=fig, ax=axes[1]); + axes[1].set_title('Thermal state') - plot_fock_distribution(rho_fock, fig=fig, ax=axes[2], title="Fock state"); + fig, axes[2] = plot_fock_distribution(rho_fock, fig=fig, ax=axes[2]); + axes[2].set_title('Fock state') fig.tight_layout() @@ -278,10 +281,9 @@ structure and relative importance of various elements. QuTiP offers a few functions for quickly visualizing matrix data in the form of histograms, :func:`qutip.visualization.matrix_histogram` and -:func:`qutip.visualization.matrix_histogram_complex`, and as Hinton diagram of weighted -squares, :func:`qutip.visualization.hinton`. These functions takes a -:class:`qutip.Qobj` as first argument, and optional arguments to, for -example, set the axis labels and figure title (see the function's documentation +as Hinton diagram of weighted squares, :func:`qutip.visualization.hinton`. +These functions takes a :class:`qutip.Qobj` as first argument, and optional arguments to, +for example, set the axis labels and figure title (see the function's documentation for details). For example, to illustrate the use of :func:`qutip.visualization.matrix_histogram`, diff --git a/doc/guide/guide.rst b/doc/guide/guide.rst index b014a23c00..39fdc1ccd3 100644 --- a/doc/guide/guide.rst +++ b/doc/guide/guide.rst @@ -11,6 +11,7 @@ Users Guide guide-basics.rst guide-states.rst guide-tensor.rst + guide-super.rst guide-dynamics.rst guide-heom.rst guide-steady.rst diff --git a/doc/guide/heom/bosonic.rst b/doc/guide/heom/bosonic.rst index 3ca3b6e1df..48856a1187 100644 --- a/doc/guide/heom/bosonic.rst +++ b/doc/guide/heom/bosonic.rst @@ -1,4 +1,4 @@ -solver.heom#################### +#################### Bosonic Environments #################### diff --git a/doc/guide/heom/history.rst b/doc/guide/heom/history.rst index 1328a1e949..dedcdf4922 100644 --- a/doc/guide/heom/history.rst +++ b/doc/guide/heom/history.rst @@ -46,12 +46,12 @@ at https://github.com/tehruhn/bofin/tree/main/examples. Current implementation ---------------------- -The current implementation is a rewrite of BoFiN in pure Python. It's -right-hand side construction has similar speed to BoFiN-fast, but is written -in pure Python. Built-in implementations of a variety of different baths -are provided, and a single solver is used for both fermionic and bosonic baths. -Multiple baths of the same kind (either fermionic or bosonic) may be -specified in a single problem, and there is good support for working with -the auxiliary density operator (ADO) state and extracting information from it. +The current implementation is a rewrite of BoFiN in pure Python. It's right-hand +side construction has similar speed to BoFiN-fast, but is written in pure +Python. Built-in implementations of a variety of different baths are provided, +and a single solver is used for both fermionic and bosonic baths. Multiple baths +of either the same kind, or a mixture of fermionic and bosonic baths, may be +specified in a single problem, and there is good support for working with the +auxiliary density operator (ADO) state and extracting information from it. The code was written by Neill Lambert and Simon Cross. diff --git a/doc/installation.rst b/doc/installation.rst index df3308a1fd..efce6af430 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -64,7 +64,7 @@ In addition, there are several optional packages that provide additional functio | ``pytest``, | 5.3+ | For running the test suite. | | ``pytest-rerunfailures`` | | | +--------------------------+--------------+-----------------------------------------------------+ -| LaTeX | TeXLive 2009+| Needed if using LaTeX in matplotlib figures, or for | +| LaTeX | TeXLive 2009+| Needed if using LaTeX in matplotlib figures, or for | | | | nice circuit drawings in IPython. | +--------------------------+--------------+-----------------------------------------------------+ @@ -128,6 +128,24 @@ You activate the new environment by running You can also install any more optional packages you want with ``conda install``, for example ``matplotlib``, ``ipython`` or ``jupyter``. + +Installation of the pre-release of version 5 +============================================ + +QuTiP version 5 has been in development for some time and brings many new features, heavily reworks the core functionalities of QuTiP. +It is available as a pre-release on PyPI. Anyone wanting to try the new features can install it with: + +.. code-block:: bash + + pip install --pre qutip + +We expect the pre-release to fully work. +If you find any bugs, confusing documentation or missing features, please tell create an issue on `github `_. + +This version breaks compatibility with QuTiP 4.7 in many small ways. +Please see the :doc:`changelog` for a list of changes, new features and deprecations. + + .. _install-from-source: Installing from Source @@ -192,7 +210,7 @@ To install OpenMP support, if available, run: This will attempt to load up OpenMP libraries during the compilation process, which depends on you having suitable C++ compiler and library support. If you are on Linux this is probably already done, but the compiler macOS ships with does not have OpenMP support. You will likely need to refer to external operating-system-specific guides for more detail here, as it may be very non-trivial to correctly configure. - + If you wish to contribute to the QuTiP project, then you will want to create your own fork of `the QuTiP git repository `_, clone this to a local folder, and install it into your Python environment using: .. code-block:: bash diff --git a/doc/requirements.txt b/doc/requirements.txt index cb4d830af3..68e490ddb4 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,50 +1,48 @@ -alabaster==0.7.12 -appnope==0.1.2 -Babel==2.9.1 +alabaster==0.7.13 +Babel==2.12.1 backcall==0.2.0 -certifi==2022.12.7 +certifi==2023.7.22 chardet==4.0.0 cycler==0.10.0 -Cython==0.29.23 -decorator==5.0.7 -docutils==0.16 -idna==2.10 -imagesize==1.2.0 -ipython==7.31.1 -ipython-genutils==0.2.0 -jedi==0.18.0 -Jinja2==2.11.3 -kiwisolver==1.3.1 -MarkupSafe==1.1.1 -matplotlib==3.3.4 -numpy==1.22.0 -numpydoc==1.1.0 -packaging==20.9 -parso==0.8.2 +Cython==0.29.33 +decorator==5.1.1 +docutils==0.18.1 +idna==3.4 +imagesize==1.4.1 +ipython==8.11.0 +jedi==0.18.2 +Jinja2==3.1.2 +kiwisolver==1.4.4 +MarkupSafe==2.1.2 +matplotlib==3.7.1 +numpy==1.24.2 +numpydoc==1.5.0 +packaging==23.0 +parso==0.8.3 pexpect==4.8.0 pickleshare==0.7.5 -Pillow==9.3.0 -prompt-toolkit==3.0.18 +Pillow==9.4.0 +prompt-toolkit==3.0.38 ptyprocess==0.7.0 -Pygments==2.8.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -pytz==2021.1 -requests==2.25.1 -scipy==1.6.2 -six==1.15.0 -snowballstemmer==2.1.0 -Sphinx==3.5.4 -sphinx-gallery==0.8.2 -sphinx-rtd-theme==0.5.2 -sphinxcontrib-applehelp==1.0.2 -sphinxcontrib-bibtex==2.4.1 +Pygments==2.15.0 +pyparsing==3.0.9 +python-dateutil==2.8.2 +pytz==2023.3 +requests==2.31.0 +scipy==1.10.1 +six==1.16.0 +snowballstemmer==2.2.0 +Sphinx==6.1.3 +sphinx-gallery==0.12.2 +sphinx-rtd-theme==1.2.0 +sphinxcontrib-applehelp==1.0.3 +sphinxcontrib-bibtex==2.5.0 sphinxcontrib-devhelp==1.0.2 -sphinxcontrib-htmlhelp==1.0.3 +sphinxcontrib-htmlhelp==2.0.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.4 -traitlets==5.0.5 -urllib3==1.26.5 -wcwidth==0.2.5 -wheel==0.38.1 +sphinxcontrib-serializinghtml==1.1.5 +traitlets==5.9.0 +urllib3==1.26.14 +wcwidth==0.2.6 +wheel==0.38.4 diff --git a/doc/rtd-environment.yml b/doc/rtd-environment.yml new file mode 100644 index 0000000000..dd4e9f26ca --- /dev/null +++ b/doc/rtd-environment.yml @@ -0,0 +1,56 @@ +name: rtd-environment +channels: +- conda-forge +dependencies: +- alabaster==0.7.13 +- Babel==2.12.1 +- backcall==0.2.0 +- certifi==2022.12.7 +- chardet==4.0.0 +- cycler==0.10.0 +- Cython==0.29.33 +- decorator==5.1.1 +- docutils==0.18.1 +- idna==3.4 +- imagesize==1.4.1 +- ipython==8.11.0 +- jedi==0.18.2 +- Jinja2==3.1.2 +- kiwisolver==1.4.4 +- MarkupSafe==2.1.2 +- matplotlib==3.7.1 +- numpy==1.24.2 +- numpydoc==1.5.0 +- packaging==23.0 +- parso==0.8.3 +- pexpect==4.8.0 +- pickleshare==0.7.5 +- Pillow==9.4.0 +- prompt-toolkit==3.0.38 +- ptyprocess==0.7.0 +- Pygments==2.14.0 +- pyparsing==3.0.9 +- python-dateutil==2.8.2 +- pytz==2023.3 +- requests==2.28.2 +- scipy==1.10.1 +- six==1.16.0 +- snowballstemmer==2.2.0 +- Sphinx==6.1.3 +- sphinx-gallery==0.12.2 +- sphinx-rtd-theme==1.2.0 +- sphinxcontrib-applehelp==1.0.4 +- sphinxcontrib-bibtex==2.5.0 +- sphinxcontrib-devhelp==1.0.2 +- sphinxcontrib-htmlhelp==2.0.1 +- sphinxcontrib-jsmath==1.0.1 +- sphinxcontrib-qthelp==1.0.3 +- sphinxcontrib-serializinghtml==1.1.5 +- suitesparse +- traitlets==5.9.0 +- urllib3==1.26.14 +- wcwidth==0.2.6 +- wheel==0.38.4 +- pip +- pip: + - ..[full] diff --git a/qutip/__init__.py b/qutip/__init__.py index fc7f65b409..ff8d76a7e8 100644 --- a/qutip/__init__.py +++ b/qutip/__init__.py @@ -5,7 +5,6 @@ from qutip.settings import settings import qutip.version from qutip.version import version as __version__ - # ----------------------------------------------------------------------------- # Look to see if we are running with OPENMP # @@ -33,14 +32,13 @@ from .core import * from .solver import * -from .solve import nonmarkov -import qutip.solve.piqs as piqs -from .solve.stochastic import * +from .solver import nonmarkov +import qutip.piqs.piqs as piqs # graphics from .bloch import * from .visualization import * -from .orbital import * +from .animation import * from .bloch3d import * from .matplotlib_utilities import * @@ -53,7 +51,6 @@ from .partial_transpose import * from .continuous_variables import * from .distributions import * -from .three_level_atom import * # utilities diff --git a/qutip/animation.py b/qutip/animation.py new file mode 100644 index 0000000000..326d73ea59 --- /dev/null +++ b/qutip/animation.py @@ -0,0 +1,572 @@ +""" +Functions to animate results of quantum dynamics simulations, +""" +__all__ = ['anim_wigner_sphere', 'anim_hinton', 'anim_sphereplot', + 'anim_matrix_histogram', 'anim_fock_distribution', 'anim_wigner', + 'anim_spin_distribution', 'anim_qubism', 'anim_schmidt'] + +from . import (plot_wigner_sphere, hinton, sphereplot, matrix_histogram, + plot_fock_distribution, plot_wigner, plot_spin_distribution, + plot_qubism, plot_schmidt) +from .solver import Result + + +def _result_state(obj): + if isinstance(obj, Result): + obj = obj.states + if len(obj) == 0: + raise ValueError('Nothing to visualize. You might have forgotten ' + 'to set options={"store_states": True}.') + + return obj + + +def anim_wigner_sphere(wigners, reflections=False, *, cmap=None, + colorbar=True, fig=None, ax=None): + """Animate a coloured Bloch sphere. + + Parameters + ---------- + wigners : list of transformations + The wigner transformation at `steps` different theta and phi. + + reflections : bool, default=False + If the reflections of the sphere should be plotted as well. + + cmap : a matplotlib colormap instance, optional + Color map to use when plotting. + + colorbar : bool, default=True + Whether (True) or not (False) a colorbar should be attached. + + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The ax context in which the plot will be drawn. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + + Notes + ----- + Special thanks to Russell P Rundle for writing this function. + """ + + fig, ani = plot_wigner_sphere(wigners, reflections, cmap=cmap, + colorbar=colorbar, fig=fig, ax=ax) + + return fig, ani + + +def anim_hinton(rhos, x_basis=None, y_basis=None, color_style="scaled", + label_top=True, *, cmap=None, colorbar=True, + fig=None, ax=None): + """Draws an animation of Hinton diagram. + + Parameters + ---------- + rhos : :class:`qutip.solver.Result` or list of :class:`qutip.Qobj` + Input density matrix or superoperator. + + .. note:: + + Hinton plots of superoperators are currently only + supported for qubits. + + x_basis : list of strings, optional + list of x ticklabels to represent x basis of the input. + + y_basis : list of strings, optional + list of y ticklabels to represent y basis of the input. + + color_style : string, default="scaled" + + Determines how colors are assigned to each square: + + - If set to ``"scaled"`` (default), each color is chosen by + passing the absolute value of the corresponding matrix + element into `cmap` with the sign of the real part. + - If set to ``"threshold"``, each square is plotted as + the maximum of `cmap` for the positive real part and as + the minimum for the negative part of the matrix element; + note that this generalizes `"threshold"` to complex numbers. + - If set to ``"phase"``, each color is chosen according to + the angle of the corresponding matrix element. + + label_top : bool, default=True + If True, x ticklabels will be placed on top, otherwise + they will appear below the plot. + + cmap : a matplotlib colormap instance, optional + Color map to use when plotting. + + colorbar : bool, default=True + Whether (True) or not (False) a colorbar should be attached. + + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The ax context in which the plot will be drawn. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + + Raises + ------ + ValueError + Input argument is not a quantum object. + + """ + + rhos = _result_state(rhos) + + fig, ani = hinton(rhos, x_basis, y_basis, color_style, label_top, + cmap=cmap, colorbar=colorbar, fig=fig, ax=ax) + + return fig, ani + + +def anim_sphereplot(V, theta, phi, *, cmap=None, + colorbar=True, fig=None, ax=None): + """animation of a matrices of values on a sphere + + Parameters + ---------- + V : list of array instances + Data set to be plotted + + theta : float + Angle with respect to z-axis. Its range is between 0 and pi + + phi : float + Angle in x-y plane. Its range is between 0 and 2*pi + + cmap : a matplotlib colormap instance, optional + Color map to use when plotting. + + colorbar : bool, default=True + Whether (True) or not (False) a colorbar should be attached. + + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The axes context in which the plot will be drawn. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + """ + + fig, ani = sphereplot(V, theta, phi, cmap=cmap, + colorbar=colorbar, fig=fig, ax=ax) + + return fig, ani + + +def anim_matrix_histogram(Ms, x_basis=None, y_basis=None, limits=None, + bar_style='real', color_limits=None, + color_style='real', options=None, *, cmap=None, + colorbar=True, fig=None, ax=None): + """ + Draw an animation of a histogram for the matrix M, + with the given x and y labels. + + Parameters + ---------- + Ms : list of matrices or :class:`qutip.solver.Result` + The matrix to visualize + + x_basis : list of strings, optional + list of x ticklabels + + y_basis : list of strings, optional + list of y ticklabels + + limits : list/array with two float numbers, optional + The z-axis limits [min, max] + + bar_style : string, default="real" + + - If set to ``"real"`` (default), each bar is plotted + as the real part of the corresponding matrix element + - If set to ``"img"``, each bar is plotted + as the imaginary part of the corresponding matrix element + - If set to ``"abs"``, each bar is plotted + as the absolute value of the corresponding matrix element + - If set to ``"phase"`` (default), each bar is plotted + as the angle of the corresponding matrix element + + color_limits : list/array with two float numbers, optional + The limits of colorbar [min, max] + + color_style : string, default="real" + Determines how colors are assigned to each square: + + - If set to ``"real"`` (default), each color is chosen + according to the real part of the corresponding matrix element. + - If set to ``"img"``, each color is chosen according to + the imaginary part of the corresponding matrix element. + - If set to ``"abs"``, each color is chosen according to + the absolute value of the corresponding matrix element. + - If set to ``"phase"``, each color is chosen according to + the angle of the corresponding matrix element. + + cmap : a matplotlib colormap instance, optional + Color map to use when plotting. + + colorbar : bool, default=True + show colorbar + + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The axes context in which the plot will be drawn. + + options : dict, optional + A dictionary containing extra options for the plot. + The names (keys) and values of the options are + described below: + + 'zticks' : list of numbers, optional + A list of z-axis tick locations. + + 'bars_spacing' : float, default=0.1 + spacing between bars. + + 'bars_alpha' : float, default=1. + transparency of bars, should be in range 0 - 1 + + 'bars_lw' : float, default=0.5 + linewidth of bars' edges. + + 'bars_edgecolor' : color, default='k' + The colors of the bars' edges. + Examples: 'k', (0.1, 0.2, 0.5) or '#0f0f0f80'. + + 'shade' : bool, default=True + Whether to shade the dark sides of the bars (True) or not (False). + The shading is relative to plot's source of light. + + 'azim' : float, default=-35 + The azimuthal viewing angle. + + 'elev' : float, default=35 + The elevation viewing angle. + + 'stick' : bool, default=False + Changes xlim and ylim in such a way that bars next to + XZ and YZ planes will stick to those planes. + This option has no effect if ``ax`` is passed as a parameter. + + 'cbar_pad' : float, default=0.04 + The fraction of the original axes between the colorbar + and the new image axes. + (i.e. the padding between the 3D figure and the colorbar). + + 'cbar_to_z' : bool, default=False + Whether to set the color of maximum and minimum z-values to the + maximum and minimum colors in the colorbar (True) or not (False). + + 'threshold': float, optional + Threshold for when bars of smaller height should be transparent. If + not set, all bars are colored according to the color map. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + + Raises + ------ + ValueError + Input argument is not valid. + + """ + + Ms = _result_state(Ms) + + fig, ani = matrix_histogram(Ms, x_basis, y_basis, limits, bar_style, + color_limits, color_style, options, cmap=cmap, + colorbar=colorbar, fig=fig, ax=ax) + + return fig, ani + + +def anim_fock_distribution(rhos, fock_numbers=None, color="green", + unit_y_range=True, *, fig=None, ax=None): + """ + Animation of the Fock distribution for a density matrix (or ket) + that describes an oscillator mode. + + Parameters + ---------- + rhos : :class:`qutip.solver.Result` or list of :class:`qutip.Qobj` + The density matrix (or ket) of the state to visualize. + + fock_numbers : list of strings, optional + list of x ticklabels to represent fock numbers + + color : color or list of colors, default="green" + The colors of the bar faces. + + unit_y_range : bool, default=True + Set y-axis limits [0, 1] or not + + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The axes context in which the plot will be drawn. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + """ + + rhos = _result_state(rhos) + + fig, ani = plot_fock_distribution(rhos, fock_numbers, color, + unit_y_range, fig=fig, ax=ax) + + return fig, ani + + +def anim_wigner(rhos, xvec=None, yvec=None, method='clenshaw', + projection='2d', *, cmap=None, colorbar=False, + fig=None, ax=None): + """ + Animation of the Wigner function for a density matrix (or ket) + that describes an oscillator mode. + + Parameters + ---------- + rhos : :class:`qutip.solver.Result` or list of :class:`qutip.Qobj` + The density matrix (or ket) of the state to visualize. + + xvec : array_like, optional + x-coordinates at which to calculate the Wigner function. + + yvec : array_like, optional + y-coordinates at which to calculate the Wigner function. Does not + apply to the 'fft' method. + + method : string {'clenshaw', 'iterative', 'laguerre', 'fft'}, + default='clenshaw' + The method used for calculating the wigner function. See the + documentation for qutip.wigner for details. + + projection: string {'2d', '3d'}, default='2d' + Specify whether the Wigner function is to be plotted as a + contour graph ('2d') or surface plot ('3d'). + + cmap : a matplotlib cmap instance, optional + The colormap. + + colorbar : bool, default=False + Whether (True) or not (False) a colorbar should be attached to the + Wigner function graph. + + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The axes context in which the plot will be drawn. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + """ + + rhos = _result_state(rhos) + + fig, ani = plot_wigner(rhos, xvec, yvec, method, projection, + cmap=cmap, colorbar=colorbar, fig=fig, ax=ax) + + return fig, ani + + +def anim_spin_distribution(Ps, THETA, PHI, projection='2d', *, + cmap=None, colorbar=False, fig=None, ax=None): + """ + Animation of a spin distribution (given as meshgrid data). + + Parameters + ---------- + Ps : list of matrices + Distribution values as a meshgrid matrix. + + THETA : matrix + Meshgrid matrix for the theta coordinate. Its range is between 0 and pi + + PHI : matrix + Meshgrid matrix for the phi coordinate. Its range is between 0 and 2*pi + + projection: string {'2d', '3d'}, default='2d' + Specify whether the spin distribution function is to be plotted as a 2D + projection where the surface of the unit sphere is mapped on + the unit disk ('2d') or surface plot ('3d'). + + cmap : a matplotlib cmap instance, optional + The colormap. + + colorbar : bool, default=False + Whether (True) or not (False) a colorbar should be attached to the + Wigner function graph. + + fig : a matplotlib figure instance, optional + The figure canvas on which the plot will be drawn. + + ax : a matplotlib axis instance, optional + The axis context in which the plot will be drawn. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + """ + + fig, ani = plot_spin_distribution(Ps, THETA, PHI, projection, cmap=cmap, + colorbar=colorbar, fig=fig, ax=ax) + + return fig, ani + + +def anim_qubism(kets, theme='light', how='pairs', grid_iteration=1, + legend_iteration=0, *, fig=None, ax=None): + """ + Animation of Qubism plot for pure states of many qudits. + Works best for spin chains, especially with even number of particles + of the same dimension. Allows to see entanglement between first + 2k particles and the rest. + + .. note:: + + colorblind_safe does not apply because of its unique colormap + + Parameters + ---------- + kets : :class:`qutip.solver.Result` or list of :class:`qutip.Qobj` + Pure states for animation. + + theme : 'light' or 'dark', default='light' + Set coloring theme for mapping complex values into colors. + See: complex_array_to_rgb. + + how : 'pairs', 'pairs_skewed' or 'before_after', default='pairs' + Type of Qubism plotting. Options: + + - 'pairs' - typical coordinates, + - 'pairs_skewed' - for ferromagnetic/antriferromagnetic plots, + - 'before_after' - related to Schmidt plot (see also: plot_schmidt). + + grid_iteration : int, default=1 + Helper lines to be drawn on plot. + Show tiles for 2*grid_iteration particles vs all others. + + legend_iteration : int or 'grid_iteration' or 'all', default=0 + Show labels for first ``2*legend_iteration`` particles. Option + 'grid_iteration' sets the same number of particles as for + grid_iteration. Option 'all' makes label for all particles. Typically + it should be 0, 1, 2 or perhaps 3. + + fig : a matplotlib figure instance, optional + The figure canvas on which the plot will be drawn. + + ax : a matplotlib axis instance, optional + The axis context in which the plot will be drawn. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + + Notes + ----- + See also [1]_. + + References + ---------- + .. [1] J. Rodriguez-Laguna, P. Migdal, M. Ibanez Berganza, M. Lewenstein + and G. Sierra, *Qubism: self-similar visualization of many-body + wavefunctions*, `New J. Phys. 14 053028 + `_, arXiv:1112.3560 + (2012), open access. + """ + + kets = _result_state(kets) + + fig, ani = plot_qubism(kets, theme, how, grid_iteration, + legend_iteration, fig=fig, ax=ax) + + return fig, ani + + +def anim_schmidt(kets, theme='light', splitting=None, + labels_iteration=(3, 2), *, fig=None, ax=None): + """ + Animation of Schmidt decomposition. + Converts a state into a matrix (A_ij -> A_i^j), + where rows are first particles and columns - last. + + See also: plot_qubism with how='before_after' for a similar plot. + + .. note:: + + colorblind_safe does not apply because of its unique colormap + + Parameters + ---------- + ket : :class:`qutip.solver.Result` or list of :class:`qutip.Qobj` + Pure states for animation. + + theme : 'light' or 'dark', default='light' + Set coloring theme for mapping complex values into colors. + See: complex_array_to_rgb. + + splitting : int, optional + Plot for a number of first particles versus the rest. + If not given, it is (number of particles + 1) // 2. + + labels_iteration : int or pair of ints, default=(3,2) + Number of particles to be shown as tick labels, + for first (vertical) and last (horizontal) particles, respectively. + + fig : a matplotlib figure instance, optional + The figure canvas on which the plot will be drawn. + + ax : a matplotlib axis instance, optional + The axis context in which the plot will be drawn. + + Returns + ------- + fig, ani : tuple + A tuple of the matplotlib figure and the animation instance + used to produce the figure. + + """ + + kets = _result_state(kets) + + fig, ani = plot_schmidt(kets, theme, splitting, labels_iteration, + fig=fig, ax=ax) + + return fig, ani diff --git a/qutip/control.py b/qutip/control.py new file mode 100644 index 0000000000..559a6a8d27 --- /dev/null +++ b/qutip/control.py @@ -0,0 +1,13 @@ +"""Module replicating the qutip_qtrl package from within qutip.""" +import sys + +try: + import qutip_qtrl + del qutip_qtrl + sys.modules["qutip.control"] = sys.modules["qutip_qtrl"] +except ImportError: + raise ImportError( + "Importing 'qutip.control' requires the 'qutip_qtrl' package. " + "Install it with `pip install qutip-qtrl` (for more details, go to " + "https://qutip-qtrl.readthedocs.io/)." + ) diff --git a/qutip/control/__init__.py b/qutip/control/__init__.py deleted file mode 100644 index 5a270b50c0..0000000000 --- a/qutip/control/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from qutip.control.grape import * diff --git a/qutip/control/cy_grape.pyx b/qutip/control/cy_grape.pyx deleted file mode 100644 index c43f3f53e8..0000000000 --- a/qutip/control/cy_grape.pyx +++ /dev/null @@ -1,96 +0,0 @@ -#cython: language_level=3 - -import numpy as np -cimport numpy as np -cimport cython -cimport libc.math - -DTYPE = np.float64 -ctypedef np.float64_t DTYPE_t - -ITYPE = np.int32 -ctypedef np.int32_t ITYPE_t - -CTYPE = np.complex128 -ctypedef np.complex128_t CTYPE_t - -CTYPE = np.int64 -ctypedef np.int64_t LTYPE_t - -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef CTYPE_t cy_overlap(object op1, object op2): - - cdef Py_ssize_t row - cdef CTYPE_t tr = 0.0 - - op1 = op1.T.tocsr() - - cdef int col1, row1_idx_start, row1_idx_end - cdef np.ndarray[CTYPE_t, ndim=1, mode="c"] data1 = op1.data.conj() - cdef np.ndarray[ITYPE_t, ndim=1, mode="c"] idx1 = op1.indices - cdef np.ndarray[ITYPE_t, ndim=1, mode="c"] ptr1 = op1.indptr - - cdef int col2, row2_idx_start, row2_idx_end - cdef np.ndarray[CTYPE_t, ndim=1, mode="c"] data2 = op2.data - cdef np.ndarray[ITYPE_t, ndim=1, mode="c"] idx2 = op2.indices - cdef np.ndarray[ITYPE_t, ndim=1, mode="c"] ptr2 = op2.indptr - - cdef int num_rows = ptr1.shape[0]-1 - - for row in range(num_rows): - - row1_idx_start = ptr1[row] - row1_idx_end = ptr1[row + 1] - for row1_idx from row1_idx_start <= row1_idx < row1_idx_end: - col1 = idx1[row1_idx] - - row2_idx_start = ptr2[col1] - row2_idx_end = ptr2[col1 + 1] - for row2_idx from row2_idx_start <= row2_idx < row2_idx_end: - col2 = idx2[row2_idx] - - if col2 == row: - tr += data1[row1_idx] * data2[row2_idx] - - return tr / op1.shape[0] - - -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef cy_grape_inner(U, np.ndarray[DTYPE_t, ndim=3, mode="c"] u, - int r, int J, int M, U_b_list, U_f_list, H_ops, - float dt, float eps, float alpha, float beta, - int phase_sensitive, - int use_u_limits, float u_min, float u_max): - - cdef int j, k - - for m in range(M-1): - P = U_b_list[m] * U - for j in range(J): - Q = 1j * dt * H_ops[j] * U_f_list[m] - - if phase_sensitive: - du = - cy_overlap(P, Q) - else: - du = - 2 * cy_overlap(P, Q) * cy_overlap(U_f_list[m], P) - - if alpha > 0.0: - # penalty term for high power control signals u - du += -2 * alpha * u[r, j, m] * dt - - if beta: - # penalty term for late control signals u - du += -2 * beta * m ** 2 * u[r, j, m] * dt - - u[r + 1, j, m] = u[r, j, m] + eps * du.real - - if use_u_limits: - if u[r + 1, j, m] < u_min: - u[r + 1, j, m] = u_min - elif u[r + 1, j, m] > u_max: - u[r + 1, j, m] = u_max - - for j in range(J): - u[r + 1, j, M-1] = u[r + 1, j, M-2] diff --git a/qutip/control/dump.py b/qutip/control/dump.py deleted file mode 100644 index 7e707a65cd..0000000000 --- a/qutip/control/dump.py +++ /dev/null @@ -1,956 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Classes that enable the storing of historical objects created during the -pulse optimisation. -These are intented for debugging. -See the optimizer and dynamics objects for instrutcions on how to enable -data dumping. -""" - -import os -import copy -import numpy as np -from numpy.compat import asbytes -# QuTiP control modules -import qutip.control.io as qtrlio -# QuTiP logging -import qutip.logging_utils -logger = qutip.logging_utils.get_logger('qutip.control.dump') - -DUMP_DIR = "~/.qtrl_dump" - - -class Dump: - """ - A container for dump items. - The lists for dump items is depends on the type - Note: abstract class - - Attributes - ---------- - parent : some control object (Dynamics or Optimizer) - aka the host. Object that generates the data that is dumped and is - host to this dump object. - - dump_dir : str - directory where files (if any) will be written out - the path and be relative or absolute - use ~/ to specify user home directory - Note: files are only written when write_to_file is True - of writeout is called explicitly - Defaults to ~/.qtrl_dump - - level : string - level of data dumping: SUMMARY, FULL or CUSTOM - See property docstring for details - Set automatically if dump is created by the setting host dumping attrib - - write_to_file : bool - When set True data and summaries (as configured) will be written - interactively to file during the processing - Set during instantiation by the host based on its dump_to_file attrib - - dump_file_ext : str - Default file extension for any file names that are auto generated - - fname_base : str - First part of any auto generated file names. - This is usually overridden in the subclass - - dump_summary : bool - If True a summary is recorded each time a new item is added to the - the dump. - Default is True - - summary_sep : str - delimiter for the summary file. - default is a space - - data_sep : str - delimiter for the data files (arrays saved to file). - default is a space - - summary_file : str - File path for summary file. - Automatically generated. Can be set specifically - - """ - def __init__(self): - self.parent = None - self.reset() - - def reset(self): - if self.parent is not None: - self.log_level = self.parent.log_level - self.write_to_file = self.parent.dump_to_file - else: - self.write_to_file = False - self._dump_dir = None - self.dump_file_ext = "txt" - self._fname_base = 'dump' - self.dump_summary = True - self.summary_sep = ' ' - self.data_sep = ' ' - self._summary_file_path = None - self._summary_file_specified = False - - @property - def log_level(self): - return logger.level - - @log_level.setter - def log_level(self, lvl): - """ - Set the log_level attribute and set the level of the logger - that is call logger.setLevel(lvl) - """ - logger.setLevel(lvl) - - @property - def level(self): - """ - The level of data dumping that will occur. - - SUMMARY - A summary will be recorded - - FULL - All possible dumping - - CUSTOM - Some customised level of dumping - - When first set to CUSTOM this is equivalent to SUMMARY. It is then up - to the user to specify what specifically is dumped - """ - lvl = 'CUSTOM' - if (self.dump_summary and not self.dump_any): - lvl = 'SUMMARY' - elif (self.dump_summary and self.dump_all): - lvl = 'FULL' - - return lvl - - @level.setter - def level(self, value): - self._level = value - self._apply_level() - - @property - def dump_any(self): - raise NotImplementedError( - "This is an abstract class, " - "use subclass such as DynamicsDump or OptimDump") - - @property - def dump_all(self): - raise NotImplementedError( - "This is an abstract class, " - "use subclass such as DynamicsDump or OptimDump") - - @property - def dump_dir(self): - if self._dump_dir is None: - self.create_dump_dir() - return self._dump_dir - - @dump_dir.setter - def dump_dir(self, value): - self._dump_dir = value - if not self.create_dump_dir(): - self._dump_dir = None - - def create_dump_dir(self): - """ - Checks dump directory exists, creates it if not - """ - if self._dump_dir is None or len(self._dump_dir) == 0: - self._dump_dir = DUMP_DIR - - dir_ok, self._dump_dir, msg = qtrlio.create_dir( - self._dump_dir, desc='dump') - - if not dir_ok: - self.write_to_file = False - msg += "\ndump file output will be suppressed." - logger.error(msg) - - return dir_ok - - @property - def fname_base(self): - return self._fname_base - - @fname_base.setter - def fname_base(self, value): - if not isinstance(value, str): - raise ValueError("File name base must be a string") - self._fname_base = value - self._summary_file_path = None - - @property - def summary_file(self): - if self._summary_file_path is None: - fname = "{}-summary.{}".format(self._fname_base, - self.dump_file_ext) - self._summary_file_path = os.path.join(self.dump_dir, fname) - return self._summary_file_path - - @summary_file.setter - def summary_file(self, value): - if not isinstance(value, str): - raise ValueError("File path must be a string") - self._summary_file_specified = True - if os.path.abspath(value): - self._summary_file_path = value - elif '~' in value: - self._summary_file_path = os.path.expanduser(value) - else: - self._summary_file_path = os.path.join(self.dump_dir, value) - - -class OptimDump(Dump): - """ - A container for dumps of optimisation data generated during the pulse - optimisation. - - Attributes - ---------- - dump_summary : bool - When True summary items are appended to the iter_summary - - iter_summary : list of :class:`qutip.control.optimizer.OptimIterSummary` - Summary at each iteration - - dump_fid_err : bool - When True values are appended to the fid_err_log - - fid_err_log : list of float - Fidelity error at each call of the fid_err_func - - dump_grad_norm : bool - When True values are appended to the fid_err_log - - grad_norm_log : list of float - Gradient norm at each call of the grad_norm_log - - dump_grad : bool - When True values are appended to the grad_log - - grad_log : list of ndarray - Gradients at each call of the fid_grad_func - - """ - def __init__(self, optim, level='SUMMARY'): - from qutip.control.optimizer import Optimizer - if not isinstance(optim, Optimizer): - raise TypeError("Must instantiate with {} type".format( - Optimizer)) - self.parent = optim - self._level = level - self.reset() - - def reset(self): - Dump.reset(self) - self._apply_level() - self.iter_summary = [] - self.fid_err_log = [] - self.grad_norm_log = [] - self.grad_log = [] - self._fname_base = 'optimdump' - self._fid_err_file = None - self._grad_norm_file = None - - def clear(self): - del self.iter_summary[:] - del self.fid_err_log[:] - del self.grad_norm_log[:] - del self.grad_log[:] - - @property - def dump_any(self): - """True if anything other than the summary is to be dumped""" - return self.dump_fid_err or self.dump_grad_norm or self.dump_grad - - @property - def dump_all(self): - """True if everything (ignoring the summary) is to be dumped""" - return self.dump_fid_err and self.dump_grad_norm and self.dump_grad - - def _apply_level(self, level=None): - if level is None: - level = self._level - if not isinstance(level, str): - raise ValueError("Dump level must be a string") - level = level.upper() - if level == 'CUSTOM': - if self._level == 'CUSTOM': - # dumping level has not changed keep the same specific config - pass - else: - # Switching to custom, start from SUMMARY - level = 'SUMMARY' - - if level == 'SUMMARY': - self.dump_summary = True - self.dump_fid_err = False - self.dump_grad_norm = False - self.dump_grad = False - elif level == 'FULL': - self.dump_summary = True - self.dump_fid_err = True - self.dump_grad_norm = True - self.dump_grad = True - else: - raise ValueError("No option for dumping level '{}'".format(level)) - - def add_iter_summary(self): - """add copy of current optimizer iteration summary""" - optim = self.parent - if optim.iter_summary is None: - raise RuntimeError("Cannot add iter_summary as not available") - ois = copy.copy(optim.iter_summary) - ois.idx = len(self.iter_summary) - self.iter_summary.append(ois) - if self.write_to_file: - if ois.idx == 0: - f = open(self.summary_file, 'w') - f.write("{}\n{}\n".format( - ois.get_header_line(self.summary_sep), - ois.get_value_line(self.summary_sep))) - else: - f = open(self.summary_file, 'a') - f.write("{}\n".format( - ois.get_value_line(self.summary_sep))) - - f.close() - return ois - - @property - def fid_err_file(self): - if self._fid_err_file is None: - fname = "{}-fid_err_log.{}".format(self.fname_base, - self.dump_file_ext) - self._fid_err_file = os.path.join(self.dump_dir, fname) - return self._fid_err_file - - def update_fid_err_log(self, fid_err): - """add an entry to the fid_err log""" - self.fid_err_log.append(fid_err) - if self.write_to_file: - if len(self.fid_err_log) == 1: - mode = 'w' - else: - mode = 'a' - f = open(self.fid_err_file, mode) - f.write("{}\n".format(fid_err)) - f.close() - - @property - def grad_norm_file(self): - if self._grad_norm_file is None: - fname = "{}-grad_norm_log.{}".format(self.fname_base, - self.dump_file_ext) - self._grad_norm_file = os.path.join(self.dump_dir, fname) - return self._grad_norm_file - - def update_grad_norm_log(self, grad_norm): - """add an entry to the grad_norm log""" - self.grad_norm_log.append(grad_norm) - if self.write_to_file: - if len(self.grad_norm_log) == 1: - mode = 'w' - else: - mode = 'a' - f = open(self.grad_norm_file, mode) - f.write("{}\n".format(grad_norm)) - f.close() - - def update_grad_log(self, grad): - """add an entry to the grad log""" - self.grad_log.append(grad) - if self.write_to_file: - fname = "{}-fid_err_gradients{}.{}".format(self.fname_base, - len(self.grad_log), - self.dump_file_ext) - fpath = os.path.join(self.dump_dir, fname) - np.savetxt(fpath, grad, delimiter=self.data_sep) - - def writeout(self, f=None): - """write all the logs and the summary out to file(s) - - Parameters - ---------- - f : filename or filehandle - If specified then all summary and object data will go in one file. - If None is specified then type specific files will be generated - in the dump_dir - If a filehandle is specified then it must be a byte mode file - as numpy.savetxt is used, and requires this. - """ - fall = None - # If specific file given then write everything to it - if hasattr(f, 'write'): - if 'b' not in f.mode: - raise RuntimeError("File stream must be in binary mode") - # write all to this stream - fall = f - fs = f - closefall = False - closefs = False - elif f: - # Assume f is a filename - fall = open(f, 'wb') - fs = fall - closefs = False - closefall = True - else: - self.create_dump_dir() - closefall = False - if self.dump_summary: - fs = open(self.summary_file, 'wb') - closefs = True - - if self.dump_summary: - for ois in self.iter_summary: - if ois.idx == 0: - fs.write(asbytes("{}\n{}\n".format( - ois.get_header_line(self.summary_sep), - ois.get_value_line(self.summary_sep)))) - else: - fs.write(asbytes("{}\n".format( - ois.get_value_line(self.summary_sep)))) - - if closefs: - fs.close() - logger.info("Optim dump summary saved to {}".format( - self.summary_file)) - - if self.dump_fid_err: - if fall: - fall.write(asbytes("Fidelity errors:\n")) - np.savetxt(fall, self.fid_err_log) - else: - np.savetxt(self.fid_err_file, self.fid_err_log) - - if self.dump_grad_norm: - if fall: - fall.write(asbytes("gradients norms:\n")) - np.savetxt(fall, self.grad_norm_log) - else: - np.savetxt(self.grad_norm_file, self.grad_norm_log) - - if self.dump_grad: - g_num = 0 - for grad in self.grad_log: - g_num += 1 - if fall: - fall.write(asbytes("gradients (call {}):\n".format(g_num))) - np.savetxt(fall, grad) - else: - fname =\ - "{}-fid_err_gradients{}.{}".format(self.fname_base, - g_num, - self.dump_file_ext) - fpath = os.path.join(self.dump_dir, fname) - np.savetxt(fpath, grad, delimiter=self.data_sep) - - if closefall: - fall.close() - logger.info("Optim dump saved to {}".format(f)) - else: - if fall: - logger.info("Optim dump saved to specified stream") - else: - logger.info("Optim dump saved to {}".format(self.dump_dir)) - - -class DynamicsDump(Dump): - """ - A container for dumps of dynamics data. Mainly time evolution calculations. - - Attributes - ---------- - dump_summary : bool - If True a summary is recorded - - evo_summary : list of :class:`tslotcomp.EvoCompSummary` - Summary items are appended if dump_summary is True - at each recomputation of the evolution. - - dump_amps : bool - If True control amplitudes are dumped - - dump_dyn_gen : bool - If True the dynamics generators (Hamiltonians) are dumped - - dump_prop : bool - If True propagators are dumped - - dump_prop_grad : bool - If True propagator gradients are dumped - - dump_fwd_evo : bool - If True forward evolution operators are dumped - - dump_onwd_evo : bool - If True onward evolution operators are dumped - - dump_onto_evo : bool - If True onto (or backward) evolution operators are dumped - - evo_dumps : list of :class:`EvoCompDumpItem` - A new dump item is appended at each recomputation of the evolution. - That is if any of the calculation objects are to be dumped. - - """ - def __init__(self, dynamics, level='SUMMARY'): - from qutip.control.dynamics import Dynamics - if not isinstance(dynamics, Dynamics): - raise TypeError("Must instantiate with {} type".format( - Dynamics)) - self.parent = dynamics - self._level = level - self.reset() - - def reset(self): - Dump.reset(self) - self._apply_level() - self.evo_dumps = [] - self.evo_summary = [] - self._fname_base = 'dyndump' - - def clear(self): - del self.evo_dumps[:] - del self.evo_summary[:] - - @property - def dump_any(self): - """True if any of the calculation objects are to be dumped""" - return any([ - self.dump_amps, - self.dump_dyn_gen, - self.dump_prop, - self.dump_prop_grad, - self.dump_fwd_evo, - self.dump_onwd_evo, - self.dump_onto_evo, - ]) - - @property - def dump_all(self): - """True if all of the calculation objects are to be dumped""" - dyn = self.parent - return all([ - self.dump_amps, - self.dump_dyn_gen, - self.dump_prop, - self.dump_prop_grad, - self.dump_fwd_evo, - self.dump_onwd_evo == dyn.fid_computer.uses_onwd_evo, - self.dump_onto_evo == dyn.fid_computer.uses_onto_evo, - ]) - - def _apply_level(self, level=None): - dyn = self.parent - if level is None: - level = self._level - - if not isinstance(level, str): - raise ValueError("Dump level must be a string") - level = level.upper() - if level == 'CUSTOM': - if self._level == 'CUSTOM': - # dumping level has not changed keep the same specific config - pass - else: - # Switching to custom, start from SUMMARY - level = 'SUMMARY' - - if level == 'SUMMARY': - self.dump_summary = True - self.dump_amps = False - self.dump_dyn_gen = False - self.dump_prop = False - self.dump_prop_grad = False - self.dump_fwd_evo = False - self.dump_onwd_evo = False - self.dump_onto_evo = False - elif level == 'FULL': - self.dump_summary = True - self.dump_amps = True - self.dump_dyn_gen = True - self.dump_prop = True - self.dump_prop_grad = True - self.dump_fwd_evo = True - self.dump_onwd_evo = dyn.fid_computer.uses_onwd_evo - self.dump_onto_evo = dyn.fid_computer.uses_onto_evo - else: - raise ValueError("No option for dumping level '{}'".format(level)) - - def add_evo_dump(self): - """Add dump of current time evolution generating objects""" - dyn = self.parent - item = EvoCompDumpItem(self) - item.idx = len(self.evo_dumps) - self.evo_dumps.append(item) - if self.dump_amps: - item.ctrl_amps = copy.deepcopy(dyn.ctrl_amps) - if self.dump_dyn_gen: - item.dyn_gen = copy.deepcopy(dyn._dyn_gen) - if self.dump_prop: - item.prop = copy.deepcopy(dyn._prop) - if self.dump_prop_grad: - item.prop_grad = copy.deepcopy(dyn._prop_grad) - if self.dump_fwd_evo: - item.fwd_evo = copy.deepcopy(dyn._fwd_evo) - if self.dump_onwd_evo: - item.onwd_evo = copy.deepcopy(dyn._onwd_evo) - if self.dump_onto_evo: - item.onto_evo = copy.deepcopy(dyn._onto_evo) - - if self.write_to_file: - item.writeout() - return item - - def add_evo_comp_summary(self, dump_item_idx=None): - """add copy of current evo comp summary""" - dyn = self.parent - if dyn.tslot_computer.evo_comp_summary is None: - raise RuntimeError("Cannot add evo_comp_summary as not available") - ecs = copy.copy(dyn.tslot_computer.evo_comp_summary) - ecs.idx = len(self.evo_summary) - ecs.evo_dump_idx = dump_item_idx - if dyn.stats: - ecs.iter_num = dyn.stats.num_iter - ecs.fid_func_call_num = dyn.stats.num_fidelity_func_calls - ecs.grad_func_call_num = dyn.stats.num_grad_func_calls - - self.evo_summary.append(ecs) - if self.write_to_file: - if ecs.idx == 0: - f = open(self.summary_file, 'w') - f.write("{}\n{}\n".format( - ecs.get_header_line(self.summary_sep), - ecs.get_value_line(self.summary_sep))) - else: - f = open(self.summary_file, 'a') - f.write("{}\n".format(ecs.get_value_line(self.summary_sep))) - - f.close() - return ecs - - def writeout(self, f=None): - """ - Write all the dump items and the summary out to file(s). - - Parameters - ---------- - f : filename or filehandle - If specified then all summary and object data will go in one file. - If None is specified then type specific files will be generated in - the dump_dir. If a filehandle is specified then it must be a byte - mode file as numpy.savetxt is used, and requires this. - """ - fall = None - # If specific file given then write everything to it - if hasattr(f, 'write'): - if 'b' not in f.mode: - raise RuntimeError("File stream must be in binary mode") - # write all to this stream - fall = f - fs = f - closefall = False - closefs = False - elif f: - # Assume f is a filename - fall = open(f, 'wb') - fs = fall - closefs = False - closefall = True - else: - self.create_dump_dir() - closefall = False - if self.dump_summary: - fs = open(self.summary_file, 'wb') - closefs = True - - if self.dump_summary: - for ecs in self.evo_summary: - if ecs.idx == 0: - fs.write(asbytes("{}\n{}\n".format( - ecs.get_header_line(self.summary_sep), - ecs.get_value_line(self.summary_sep)))) - else: - fs.write(asbytes("{}\n".format( - ecs.get_value_line(self.summary_sep)))) - - if closefs: - fs.close() - logger.info("Dynamics dump summary saved to {}".format( - self.summary_file)) - - for di in self.evo_dumps: - di.writeout(fall) - - if closefall: - fall.close() - logger.info("Dynamics dump saved to {}".format(f)) - else: - if fall: - logger.info("Dynamics dump saved to specified stream") - else: - logger.info("Dynamics dump saved to {}".format(self.dump_dir)) - - -class DumpItem: - """ - An item in a dump list - """ - def __init__(self): - pass - - -class EvoCompDumpItem(DumpItem): - """ - A copy of all objects generated to calculate one time evolution. Note the - attributes are only set if the corresponding :class:`DynamicsDump` - ``dump_*`` attribute is set. - """ - def __init__(self, dump): - if not isinstance(dump, DynamicsDump): - raise TypeError("Must instantiate with {} type".format( - DynamicsDump)) - self.parent = dump - self.reset() - - def reset(self): - self.idx = None - self.ctrl_amps = None - self.dyn_gen = None - self.prop = None - self.prop_grad = None - self.fwd_evo = None - self.onwd_evo = None - self.onto_evo = None - - def writeout(self, f=None): - """ write all the objects out to files - - Parameters - ---------- - f : filename or filehandle - If specified then all object data will go in one file. - If None is specified then type specific files will be generated - in the dump_dir - If a filehandle is specified then it must be a byte mode file - as numpy.savetxt is used, and requires this. - """ - dump = self.parent - fall = None - closefall = True - closef = False - # If specific file given then write everything to it - if hasattr(f, 'write'): - if 'b' not in f.mode: - raise RuntimeError("File stream must be in binary mode") - # write all to this stream - fall = f - closefall = False - f.write(asbytes("EVOLUTION COMPUTATION {}\n".format(self.idx))) - elif f: - fall = open(f, 'wb') - else: - # otherwise files for each type will be created - fnbase = "{}-evo{}".format(dump._fname_base, self.idx) - closefall = False - - # ctrl amps - if self.ctrl_amps is not None: - if fall: - f = fall - f.write(asbytes("Ctrl amps\n")) - else: - fname = "{}-ctrl_amps.{}".format(fnbase, - dump.dump_file_ext) - f = open(os.path.join(dump.dump_dir, fname), 'wb') - closef = True - np.savetxt(f, self.ctrl_amps, fmt='%14.6g', - delimiter=dump.data_sep) - if closef: - f.close() - - # dynamics generators - if self.dyn_gen is not None: - k = 0 - if fall: - f = fall - f.write(asbytes("Dynamics Generators\n")) - else: - fname = "{}-dyn_gen.{}".format(fnbase, - dump.dump_file_ext) - f = open(os.path.join(dump.dump_dir, fname), 'wb') - closef = True - for dg in self.dyn_gen: - f.write(asbytes( - "dynamics generator for timeslot {}\n".format(k))) - np.savetxt(f, self.dyn_gen[k], delimiter=dump.data_sep) - k += 1 - if closef: - f.close() - - # Propagators - if self.prop is not None: - k = 0 - if fall: - f = fall - f.write(asbytes("Propagators\n")) - else: - fname = "{}-prop.{}".format(fnbase, - dump.dump_file_ext) - f = open(os.path.join(dump.dump_dir, fname), 'wb') - closef = True - for dg in self.dyn_gen: - f.write(asbytes("Propagator for timeslot {}\n".format(k))) - np.savetxt(f, self.prop[k], delimiter=dump.data_sep) - k += 1 - if closef: - f.close() - - # Propagator gradient - if self.prop_grad is not None: - k = 0 - if fall: - f = fall - f.write(asbytes("Propagator gradients\n")) - else: - fname = "{}-prop_grad.{}".format(fnbase, - dump.dump_file_ext) - f = open(os.path.join(dump.dump_dir, fname), 'wb') - closef = True - for k in range(self.prop_grad.shape[0]): - for j in range(self.prop_grad.shape[1]): - f.write(asbytes("Propagator gradient for timeslot {} " - "control {}\n".format(k, j))) - np.savetxt(f, self.prop_grad[k, j], - delimiter=dump.data_sep) - if closef: - f.close() - - # forward evolution - if self.fwd_evo is not None: - k = 0 - if fall: - f = fall - f.write(asbytes("Forward evolution\n")) - else: - fname = "{}-fwd_evo.{}".format(fnbase, - dump.dump_file_ext) - f = open(os.path.join(dump.dump_dir, fname), 'wb') - closef = True - for dg in self.dyn_gen: - f.write(asbytes("Evolution from 0 to {}\n".format(k))) - np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep) - k += 1 - if closef: - f.close() - - # onward evolution - if self.onwd_evo is not None: - k = 0 - if fall: - f = fall - f.write(asbytes("Onward evolution\n")) - else: - fname = "{}-onwd_evo.{}".format(fnbase, - dump.dump_file_ext) - f = open(os.path.join(dump.dump_dir, fname), 'wb') - closef = True - for dg in self.dyn_gen: - f.write(asbytes("Evolution from {} to end\n".format(k))) - np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep) - k += 1 - if closef: - f.close() - - # onto evolution - if self.onto_evo is not None: - k = 0 - if fall: - f = fall - f.write(asbytes("Onto evolution\n")) - else: - fname = "{}-onto_evo.{}".format(fnbase, - dump.dump_file_ext) - f = open(os.path.join(dump.dump_dir, fname), 'wb') - closef = True - for dg in self.dyn_gen: - f.write(asbytes("Evolution from {} onto target\n".format(k))) - np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep) - k += 1 - if closef: - f.close() - - if closefall: - fall.close() - -class DumpSummaryItem: - """ - A summary of the most recent iteration. Abstract class only. - - Attributes - ---------- - idx : int - Index in the summary list in which this is stored - """ - min_col_width = 11 - summary_property_names = () - summary_property_fmt_type = () - summary_property_fmt_prec = () - - @classmethod - def get_header_line(cls, sep=' '): - if sep == ' ': - line = '' - i = 0 - for a in cls.summary_property_names: - if i > 0: - line += sep - i += 1 - line += format(a, str(max(len(a), cls.min_col_width)) + 's') - else: - line = sep.join(cls.summary_property_names) - return line - - def reset(self): - self.idx = 0 - - def get_value_line(self, sep=' '): - line = "" - i = 0 - for a in zip(self.summary_property_names, - self.summary_property_fmt_type, - self.summary_property_fmt_prec): - if i > 0: - line += sep - i += 1 - v = getattr(self, a[0]) - w = max(len(a[0]), self.min_col_width) - if v is not None: - fmt = '' - if sep == ' ': - fmt += str(w) - else: - fmt += '0' - if a[2] > 0: - fmt += '.' + str(a[2]) - fmt += a[1] - line += format(v, fmt) - else: - if sep == ' ': - line += format('None', str(w) + 's') - else: - line += 'None' - - return line diff --git a/qutip/control/dynamics.py b/qutip/control/dynamics.py deleted file mode 100644 index 70600392ed..0000000000 --- a/qutip/control/dynamics.py +++ /dev/null @@ -1,1751 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Classes that define the dynamics of the (quantum) system and target evolution -to be optimised. -The contols are also defined here, i.e. the dynamics generators (Hamiltonians, -Limbladians etc). The dynamics for the time slices are calculated here, along -with the evolution as determined by the control amplitudes. - -See the subclass descriptions and choose the appropriate class for the -application. The choice depends on the type of matrix used to define -the dynamics. - -These class implement functions for getting the dynamics generators for -the combined (drift + ctrls) dynamics with the approriate operator applied - -Note the methods in these classes were inspired by: -DYNAMO - Dynamic Framework for Quantum Optimal Control -See Machnes et.al., arXiv.1011.4874 -""" -import warnings -import numpy as np -import scipy.linalg as la -import scipy.sparse as sp -# QuTiP -from qutip import Qobj -from qutip.core import data as _data -from qutip.core.data.eigen import eigh -from qutip.settings import settings -# QuTiP control modules -import qutip.control.errors as errors -import qutip.control.tslotcomp as tslotcomp -import qutip.control.fidcomp as fidcomp -import qutip.control.propcomp as propcomp -import qutip.control.symplectic as sympl -import qutip.control.dump as qtrldump -# QuTiP logging -import qutip.logging_utils as logging -logger = logging.get_logger() - -DEF_NUM_TSLOTS = 10 -DEF_EVO_TIME = 1.0 - - -def _check_ctrls_container(ctrls): - """ - Check through the controls container. - Convert to an array if its a list of lists - return the processed container - raise type error if the container structure is invalid - """ - if isinstance(ctrls, (list, tuple)): - # Check to see if list of lists - try: - if isinstance(ctrls[0], (list, tuple)): - ctrls_ = np.empty((len(ctrls), len(ctrls[0])), dtype=object) - for i, ctrl in enumerate(ctrls): - ctrls_[i, :] = ctrl - ctrls = ctrls_ - except IndexError: - pass - - if isinstance(ctrls, np.ndarray): - if len(ctrls.shape) != 2: - raise TypeError("Incorrect shape for ctrl dyn gen array") - for k in range(ctrls.shape[0]): - for j in range(ctrls.shape[1]): - if not isinstance(ctrls[k, j], Qobj): - raise TypeError("All control dyn gen must be Qobj") - elif isinstance(ctrls, (list, tuple)): - for ctrl in ctrls: - if not isinstance(ctrl, Qobj): - raise TypeError("All control dyn gen must be Qobj") - else: - raise TypeError("Controls list or array not set correctly") - - return ctrls - - -def _check_drift_dyn_gen(drift): - if isinstance(drift, Qobj): - return - if not isinstance(drift, (list, tuple)): - raise TypeError("drift should be a Qobj or a list of Qobj") - for d in drift: - if not isinstance(d, Qobj): - raise TypeError("drift should be a Qobj or a list of Qobj") - - -def _attrib_deprecation(message, stacklevel=3): - """ - Issue deprecation warning - Using stacklevel=3 will ensure message refers the function - calling with the deprecated parameter, - """ - warnings.warn(message, FutureWarning, stacklevel=stacklevel) - - -def _func_deprecation(message, stacklevel=3): - """ - Issue deprecation warning - Using stacklevel=3 will ensure message refers the function - calling with the deprecated parameter, - """ - warnings.warn(message, FutureWarning, stacklevel=stacklevel) - - -class Dynamics(object): - """ - This is a base class only. See subclass descriptions and choose an - appropriate one for the application. - - Note that initialize_controls must be called before most of the methods - can be used. init_timeslots can be called sometimes earlier in order - to access timeslot related attributes - - This acts as a container for the operators that are used to calculate - time evolution of the system under study. That is the dynamics generators - (Hamiltonians, Lindbladians etc), the propagators from one timeslot to - the next, and the evolution operators. Due to the large number of matrix - additions and multiplications, for small systems at least, the optimisation - performance is much better using ndarrays to represent these operators. - However - - Attributes - ---------- - log_level : integer - level of messaging output from the logger. - Options are attributes of qutip.logging_utils, - in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, - assuming everything runs as expected. - The default NOTSET implies that the level will be taken from - the QuTiP settings file, which by default is WARN - - params: Dictionary - The key value pairs are the attribute name and value - Note: attributes are created if they do not exist already, - and are overwritten if they do. - - stats : Stats - Attributes of which give performance stats for the optimisation - set to None to reduce overhead of calculating stats. - Note it is (usually) shared with the Optimizer object - - tslot_computer : TimeslotComputer (subclass instance) - Used to manage when the timeslot dynamics - generators, propagators, gradients etc are updated - - prop_computer : PropagatorComputer (subclass instance) - Used to compute the propagators and their gradients - - fid_computer : FidelityComputer (subclass instance) - Used to computer the fidelity error and the fidelity error - gradient. - - memory_optimization : int - Level of memory optimisation. Setting to 0 (default) means that - execution speed is prioritized over memory. - Setting to 1 means that some memory prioritisation steps will be - taken, for instance using Qobj (and hence sparse arrays) as the - the internal operator data type, and not caching some operators - Potentially further memory saving maybe made with - memory_optimization > 1. - The options are processed in _set_memory_optimizations, see - this for more information. Individual memory saving options can be - switched by settting them directly (see below) - - oper_dtype : type - Data type for internal dynamics generators, propagators and time - evolution operators. This can be ndarray or Qobj. - Qobj may perform better for larger systems, and will also - perform better when (custom) fidelity measures use Qobj methods - such as partial trace. - See _choose_oper_dtype for how this is chosen when not specified - - cache_phased_dyn_gen : bool - If True then the dynamics generators will be saved with and - without the propagation prefactor (if there is one) - Defaults to True when memory_optimization=0, otherwise False - - cache_prop_grad : bool - If the True then the propagator gradients (for exact gradients) will - be computed when the propagator are computed and cache until - the are used by the fidelity computer. If False then the - fidelity computer will calculate them as needed. - Defaults to True when memory_optimization=0, otherwise False - - cache_dyn_gen_eigenvectors_adj: bool - If True then DynamicsUnitary will cached the adjoint of - the Hamiltion eignvector matrix - Defaults to True when memory_optimization=0, otherwise False - - sparse_eigen_decomp: bool - If True then DynamicsUnitary will use the sparse eigenvalue - decomposition. - Defaults to True when memory_optimization<=1, otherwise False - - num_tslots : integer - Number of timeslots (aka timeslices) - - num_ctrls : integer - Number of controls. - Note this is calculated as the length of ctrl_dyn_gen when first used. - And is recalculated during initialise_controls only. - - evo_time : float - Total time for the evolution - - tau : array[num_tslots] of float - Duration of each timeslot - Note that if this is set before initialize_controls is called - then num_tslots and evo_time are calculated from tau, otherwise - tau is generated from num_tslots and evo_time, that is - equal size time slices - - time : array[num_tslots+1] of float - Cumulative time for the evolution, that is the time at the start - of each time slice - - drift_dyn_gen : Qobj or list of Qobj - Drift or system dynamics generator (Hamiltonian) - Matrix defining the underlying dynamics of the system - Can also be a list of Qobj (length num_tslots) for time varying - drift dynamics - - ctrl_dyn_gen : List of Qobj - Control dynamics generator (Hamiltonians) - List of matrices defining the control dynamics - - initial : Qobj - Starting state / gate - The matrix giving the initial state / gate, i.e. at time 0 - Typically the identity for gate evolution - - target : Qobj - Target state / gate: - The matrix giving the desired state / gate for the evolution - - ctrl_amps : array[num_tslots, num_ctrls] of float - Control amplitudes - The amplitude (scale factor) for each control in each timeslot - - initial_ctrl_scaling : float - Scale factor applied to be applied the control amplitudes - when they are initialised - This is used by the PulseGens rather than in any fucntions in - this class - - initial_ctrl_offset : float - Linear offset applied to be applied the control amplitudes - when they are initialised - This is used by the PulseGens rather than in any fucntions in - this class - - dyn_gen : List of Qobj - Dynamics generators - the combined drift and control dynamics generators - for each timeslot - - prop : list of Qobj - Propagators - used to calculate time evolution from one - timeslot to the next - - prop_grad : array[num_tslots, num_ctrls] of Qobj - Propagator gradient (exact gradients only) - Array of Qobj that give the gradient - with respect to the control amplitudes in a timeslot - Note this attribute is only created when the selected - PropagatorComputer is an exact gradient type. - - fwd_evo : List of Qobj - Forward evolution (or propagation) - the time evolution operator from the initial state / gate to the - specified timeslot as generated by the dyn_gen - - onwd_evo : List of Qobj - Onward evolution (or propagation) - the time evolution operator from the specified timeslot to - end of the evolution time as generated by the dyn_gen - - onto_evo : List of Qobj - 'Backward' List of Qobj propagation - the overlap of the onward propagation with the inverse of the - target. - Note this is only used (so far) by the unitary dynamics fidelity - - evo_current : Boolean - Used to flag that the dynamics used to calculate the evolution - operators is current. It is set to False when the amplitudes - change - - fact_mat_round_prec : float - Rounding precision used when calculating the factor matrix - to determine if two eigenvalues are equivalent - Only used when the PropagatorComputer uses diagonalisation - - def_amps_fname : string - Default name for the output used when save_amps is called - - unitarity_check_level : int - If > 0 then unitarity of the system evolution is checked at at - evolution recomputation. - level 1 checks all propagators - level 2 checks eigen basis as well - Default is 0 - - unitarity_tol : - Tolerance used in checking if operator is unitary - Default is 1e-10 - - dump : :class:`qutip.control.dump.DynamicsDump` - Store of historical calculation data. - Set to None (Default) for no storing of historical data - Use dumping property to set level of data dumping - - dumping : string - level of data dumping: NONE, SUMMARY, FULL or CUSTOM - See property docstring for details - - dump_to_file : bool - If set True then data will be dumped to file during the calculations - dumping will be set to SUMMARY during init_evo if dump_to_file is True - and dumping not set. - Default is False - - dump_dir : string - Basically a link to dump.dump_dir. Exists so that it can be set through - dyn_params. - If dump is None then will return None or will set dumping to SUMMARY - when setting a path - - """ - def __init__(self, optimconfig, params=None): - self.config = optimconfig - self.params = params - self.reset() - - def reset(self): - # Link to optimiser object if self is linked to one - self.parent = None - # Main functional attributes - self.time = None - self.initial = None - self.target = None - self.ctrl_amps = None - self.initial_ctrl_scaling = 1.0 - self.initial_ctrl_offset = 0.0 - self.drift_dyn_gen = None - self.ctrl_dyn_gen = None - self._tau = None - self._evo_time = None - self._num_ctrls = None - self._num_tslots = None - # attributes used for processing evolution - self.memory_optimization = 0 - self.oper_dtype = None - self.cache_phased_dyn_gen = None - self.cache_prop_grad = None - self.cache_dyn_gen_eigenvectors_adj = None - self.sparse_eigen_decomp = None - self.dyn_dims = None - self.dyn_shape = None - self.sys_dims = None - self.sys_shape = None - self.time_depend_drift = False - self.time_depend_ctrl_dyn_gen = False - # These internal attributes will be of the internal operator data type - # used to compute the evolution - # This will be either ndarray or Qobj - self._drift_dyn_gen = None - self._ctrl_dyn_gen = None - self._phased_ctrl_dyn_gen = None - self._dyn_gen_phase = None - self._phase_application = None - self._initial = None - self._target = None - self._onto_evo_target = None - self._dyn_gen = None - self._phased_dyn_gen = None - self._prop = None - self._prop_grad = None - self._fwd_evo = None - self._onwd_evo = None - self._onto_evo = None - # The _qobj attribs are Qobj representations of the equivalent - # internal attribute. They are only set when the extenal accessors - # are used - self._onto_evo_target_qobj = None - self._dyn_gen_qobj = None - self._prop_qobj = None - self._prop_grad_qobj = None - self._fwd_evo_qobj = None - self._onwd_evo_qobj = None - self._onto_evo_qobj = None - # Atrributes used in diagonalisation - # again in internal operator data type (see above) - self._decomp_curr = None - self._prop_eigen = None - self._dyn_gen_eigenvectors = None - self._dyn_gen_eigenvectors_adj = None - self._dyn_gen_factormatrix = None - self.fact_mat_round_prec = 1e-10 - - # Debug and information attribs - self.stats = None - self.id_text = 'DYN_BASE' - self.def_amps_fname = "ctrl_amps.txt" - self.log_level = self.config.log_level - # Internal flags - self._dyn_gen_mapped = False - self._evo_initialized = False - self._timeslots_initialized = False - self._ctrls_initialized = False - self._ctrl_dyn_gen_checked = False - self._drift_dyn_gen_checked = False - # Unitary checking - self.unitarity_check_level = 0 - self.unitarity_tol = 1e-10 - # Data dumping - self.dump = None - self.dump_to_file = False - - self.apply_params() - - # Create the computing objects - self._create_computers() - - self.clear() - - def apply_params(self, params=None): - """ - Set object attributes based on the dictionary (if any) passed in the - instantiation, or passed as a parameter - This is called during the instantiation automatically. - The key value pairs are the attribute name and value - Note: attributes are created if they do not exist already, - and are overwritten if they do. - """ - if not params: - params = self.params - - if isinstance(params, dict): - self.params = params - for key in params: - setattr(self, key, params[key]) - - @property - def log_level(self): - return logger.level - - @log_level.setter - def log_level(self, lvl): - """ - Set the log_level attribute and set the level of the logger - that is call logger.setLevel(lvl) - """ - logger.setLevel(lvl) - - @property - def dumping(self): - """ - The level of data dumping that will occur during the time evolution - calculation. - - - NONE : No processing data dumped (Default) - - SUMMARY : A summary of each time evolution will be recorded - - FULL : All operators used or created in the calculation dumped - - CUSTOM : Some customised level of dumping - - When first set to CUSTOM this is equivalent to SUMMARY. It is then up - to the user to specify which operators are dumped. WARNING: FULL could - consume a lot of memory! - """ - if self.dump is None: - lvl = 'NONE' - else: - lvl = self.dump.level - - return lvl - - @dumping.setter - def dumping(self, value): - if value is None: - self.dump = None - else: - if not isinstance(value, str): - raise TypeError("Value must be string value") - lvl = value.upper() - if lvl == 'NONE': - self.dump = None - else: - if not isinstance(self.dump, qtrldump.DynamicsDump): - self.dump = qtrldump.DynamicsDump(self, level=lvl) - else: - self.dump.level = lvl - - @property - def dump_dir(self): - if self.dump: - return self.dump.dump_dir - else: - return None - - @dump_dir.setter - def dump_dir(self, value): - if not self.dump: - self.dumping = 'SUMMARY' - self.dump.dump_dir = value - - def _create_computers(self): - """ - Create the default timeslot, fidelity and propagator computers - """ - # The time slot computer. By default it is set to UpdateAll - # can be set to DynUpdate in the configuration - # (see class file for details) - if self.config.tslot_type == 'DYNAMIC': - self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self) - else: - self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self) - - self.prop_computer = propcomp.PropCompFrechet(self) - self.fid_computer = fidcomp.FidCompTraceDiff(self) - - def clear(self): - self.ctrl_amps = None - self.evo_current = False - if self.fid_computer is not None: - self.fid_computer.clear() - - @property - def num_tslots(self): - if not self._timeslots_initialized: - self.init_timeslots() - return self._num_tslots - - @num_tslots.setter - def num_tslots(self, value): - self._num_tslots = value - if self._timeslots_initialized: - self._tau = None - self.init_timeslots() - - @property - def evo_time(self): - if not self._timeslots_initialized: - self.init_timeslots() - return self._evo_time - - @evo_time.setter - def evo_time(self, value): - self._evo_time = value - if self._timeslots_initialized: - self._tau = None - self.init_timeslots() - - @property - def tau(self): - if not self._timeslots_initialized: - self.init_timeslots() - return self._tau - - @tau.setter - def tau(self, value): - self._tau = value - self.init_timeslots() - - def init_timeslots(self): - """ - Generate the timeslot duration array 'tau' based on the evo_time - and num_tslots attributes, unless the tau attribute is already set - in which case this step in ignored - Generate the cumulative time array 'time' based on the tau values - """ - # set the time intervals to be equal timeslices of the total if - # the have not been set already (as part of user config) - if self._num_tslots is None: - self._num_tslots = DEF_NUM_TSLOTS - if self._evo_time is None: - self._evo_time = DEF_EVO_TIME - - if self._tau is None: - self._tau = np.ones(self._num_tslots, dtype='f') * \ - self._evo_time/self._num_tslots - else: - self._num_tslots = len(self._tau) - self._evo_time = np.sum(self._tau) - - self.time = np.zeros(self._num_tslots+1, dtype=float) - # set the cumulative time by summing the time intervals - for t in range(self._num_tslots): - self.time[t+1] = self.time[t] + self._tau[t] - - self._timeslots_initialized = True - - def _set_memory_optimizations(self): - """ - Set various memory optimisation attributes based on the - memory_optimization attribute - If they have been set already, e.g. in apply_params - then they will not be overridden here - """ - logger.info("Setting memory optimisations for level {}".format( - self.memory_optimization)) - - if self.oper_dtype is None: - self._choose_oper_dtype() - logger.info("Internal operator data type choosen to be {}".format( - self.oper_dtype)) - else: - logger.info("Using operator data type {}".format( - self.oper_dtype)) - - if self.cache_phased_dyn_gen is None: - if self.memory_optimization > 0: - self.cache_phased_dyn_gen = False - else: - self.cache_phased_dyn_gen = True - logger.info("phased dynamics generator caching {}".format( - self.cache_phased_dyn_gen)) - - if self.cache_prop_grad is None: - if self.memory_optimization > 0: - self.cache_prop_grad = False - else: - self.cache_prop_grad = True - logger.info("propagator gradient caching {}".format( - self.cache_prop_grad)) - - if self.cache_dyn_gen_eigenvectors_adj is None: - if self.memory_optimization > 0: - self.cache_dyn_gen_eigenvectors_adj = False - else: - self.cache_dyn_gen_eigenvectors_adj = True - logger.info("eigenvector adjoint caching {}".format( - self.cache_dyn_gen_eigenvectors_adj)) - - if self.sparse_eigen_decomp is None: - if self.memory_optimization > 1: - self.sparse_eigen_decomp = True - else: - self.sparse_eigen_decomp = False - logger.info("use sparse eigen decomp {}".format( - self.sparse_eigen_decomp)) - - def _choose_oper_dtype(self): - """ - Attempt select most efficient internal operator data type - """ - - if self.memory_optimization > 0: - self.oper_dtype = Qobj - else: - # Method taken from Qobj.expm() - # if method is not explicitly given, try to make a good choice - # between sparse and dense solvers by considering the size of the - # system and the number of non-zero elements. - if self.time_depend_drift: - dg = self.drift_dyn_gen[0] - else: - dg = self.drift_dyn_gen - if self.time_depend_ctrl_dyn_gen: - ctrls = self.ctrl_dyn_gen[0, :] - else: - ctrls = self.ctrl_dyn_gen - for c in ctrls: - dg = dg + c - - N = dg.shape[0] - if isinstance(dg.data, _data.CSR): - n = _data.csr.nnz(dg.data) - else: - n = N**2 - - if N ** 2 < 100 * n: - # large number of nonzero elements, revert to dense solver - self.oper_dtype = np.ndarray - elif N > 400: - # large system, and quite sparse -> qutips sparse method - self.oper_dtype = Qobj - else: - # small system, but quite sparse -> qutips sparse/dense method - self.oper_dtype = np.ndarray - - return self.oper_dtype - - def _init_evo(self): - """ - Create the container lists / arrays for the: - dynamics generations, propagators, and evolutions etc - Set the time slices and cumulative time - """ - # check evolution operators - if not self._drift_dyn_gen_checked: - _check_drift_dyn_gen(self.drift_dyn_gen) - if not self._ctrl_dyn_gen_checked: - self.ctrl_dyn_gen = _check_ctrls_container(self.ctrl_dyn_gen) - - if not isinstance(self.initial, Qobj): - raise TypeError("initial must be a Qobj") - - if not isinstance(self.target, Qobj): - raise TypeError("target must be a Qobj") - - self.refresh_drift_attribs() - self.sys_dims = self.initial.dims - self.sys_shape = self.initial.shape - # Set the phase application method - self._init_phase() - - self._set_memory_optimizations() - if self.sparse_eigen_decomp and self.sys_shape[0] <= 2: - raise ValueError( - "Single qubit pulse optimization dynamics cannot use sparse" - " eigenvector decomposition because of limitations in" - " scipy.linalg.eigsh. Pleae set sparse_eigen_decomp to False" - " or increase the size of the system.") - - n_ts = self.num_tslots - n_ctrls = self.num_ctrls - if self.oper_dtype == Qobj: - self._initial = self.initial - self._target = self.target - self._drift_dyn_gen = self.drift_dyn_gen - self._ctrl_dyn_gen = self.ctrl_dyn_gen - elif self.oper_dtype == np.ndarray: - self._initial = self.initial.full() - self._target = self.target.full() - if self.time_depend_drift: - self._drift_dyn_gen = [d.full() for d in self.drift_dyn_gen] - else: - self._drift_dyn_gen = self.drift_dyn_gen.full() - if self.time_depend_ctrl_dyn_gen: - self._ctrl_dyn_gen = np.empty([n_ts, n_ctrls], dtype=object) - for k in range(n_ts): - for j in range(n_ctrls): - self._ctrl_dyn_gen[k, j] = \ - self.ctrl_dyn_gen[k, j].full() - else: - self._ctrl_dyn_gen = [ctrl.full() - for ctrl in self.ctrl_dyn_gen] - else: - raise ValueError( - "Unknown oper_dtype {!r}. The oper_dtype may be qutip.Qobj or" - " numpy.ndarray.".format(self.oper_dtype)) - - if self.cache_phased_dyn_gen: - if self.time_depend_ctrl_dyn_gen: - self._phased_ctrl_dyn_gen = np.empty([n_ts, n_ctrls], - dtype=object) - for k in range(n_ts): - for j in range(n_ctrls): - self._phased_ctrl_dyn_gen[k, j] = self._apply_phase( - self._ctrl_dyn_gen[k, j]) - else: - self._phased_ctrl_dyn_gen = [self._apply_phase(ctrl) - for ctrl in self._ctrl_dyn_gen] - - self._dyn_gen = [object for x in range(self.num_tslots)] - if self.cache_phased_dyn_gen: - self._phased_dyn_gen = [object for x in range(self.num_tslots)] - self._prop = [object for x in range(self.num_tslots)] - if self.prop_computer.grad_exact and self.cache_prop_grad: - self._prop_grad = np.empty([self.num_tslots, self.num_ctrls], - dtype=object) - # Time evolution operator (forward propagation) - self._fwd_evo = [object for x in range(self.num_tslots+1)] - self._fwd_evo[0] = self._initial - if self.fid_computer.uses_onwd_evo: - # Time evolution operator (onward propagation) - self._onwd_evo = [object for x in range(self.num_tslots)] - if self.fid_computer.uses_onto_evo: - # Onward propagation overlap with inverse target - self._onto_evo = [object for x in range(self.num_tslots+1)] - self._onto_evo[self.num_tslots] = self._get_onto_evo_target() - - if isinstance(self.prop_computer, propcomp.PropCompDiag): - self._create_decomp_lists() - - if ( - self.log_level <= logging.DEBUG - and isinstance(self, DynamicsUnitary) - ): - self.unitarity_check_level = 1 - - if self.dump_to_file: - if self.dump is None: - self.dumping = 'SUMMARY' - self.dump.write_to_file = True - self.dump.create_dump_dir() - logger.info("Dynamics dump will be written to:\n{}".format( - self.dump.dump_dir)) - - self._evo_initialized = True - - @property - def dyn_gen_phase(self): - """ - Some op that is applied to the dyn_gen before expontiating to - get the propagator. - See `phase_application` for how this is applied - """ - # Note that if this returns None then _apply_phase will never be - # called - return self._dyn_gen_phase - - @dyn_gen_phase.setter - def dyn_gen_phase(self, value): - self._dyn_gen_phase = value - - @property - def phase_application(self): - """ - phase_application : scalar(string), default='preop' - Determines how the phase is applied to the dynamics generators - - - 'preop' : P = expm(phase*dyn_gen) - - 'postop' : P = expm(dyn_gen*phase) - - 'custom' : Customised phase application - - The 'custom' option assumes that the _apply_phase method has been - set to a custom function. - """ - return self._phase_application - - @phase_application.setter - def phase_application(self, value): - self._set_phase_application(value) - - def _set_phase_application(self, value): - self._config_phase_application(value) - self._phase_application = value - - def _config_phase_application(self, ph_app=None): - """ - Set the appropriate function for the phase application - """ - err_msg = ("Invalid value '{}' for phase application. Must be either " - "'preop', 'postop' or 'custom'".format(ph_app)) - - if ph_app is None: - ph_app = self._phase_application - - try: - ph_app = ph_app.lower() - except AttributeError: - raise ValueError(err_msg) - - if ph_app == 'preop': - self._apply_phase = self._apply_phase_preop - elif ph_app == 'postop': - self._apply_phase = self._apply_phase_postop - elif ph_app == 'custom': - # Do nothing, assume _apply_phase set elsewhere - pass - else: - raise ValueError(err_msg) - - def _init_phase(self): - if self.dyn_gen_phase is not None: - self._config_phase_application() - else: - self.cache_phased_dyn_gen = False - - def _apply_phase(self, dg): - """ - This default method does nothing. - It will be set to another method automatically if `phase_application` - is 'preop' or 'postop'. It should be overridden repointed if - `phase_application` is 'custom' - It will never be called if `dyn_gen_phase` is None - """ - return dg - - def _apply_phase_preop(self, dg): - """ - Apply phasing operator to dynamics generator. - This called during the propagator calculation. - In this case it will be applied as phase*dg - """ - if hasattr(self.dyn_gen_phase, 'dot'): - phased_dg = self._dyn_gen_phase.dot(dg) - else: - phased_dg = self._dyn_gen_phase*dg - return phased_dg - - def _apply_phase_postop(self, dg): - """ - Apply phasing operator to dynamics generator. - This called during the propagator calculation. - In this case it will be applied as dg*phase - """ - if hasattr(self.dyn_gen_phase, 'dot'): - phased_dg = dg.dot(self._dyn_gen_phase) - else: - phased_dg = dg*self._dyn_gen_phase - return phased_dg - - def _create_decomp_lists(self): - """ - Create lists that will hold the eigen decomposition - used in calculating propagators and gradients - Note: used with PropCompDiag propagator calcs - """ - n_ts = self.num_tslots - self._decomp_curr = [False for x in range(n_ts)] - self._prop_eigen = [object for x in range(n_ts)] - self._dyn_gen_eigenvectors = [object for x in range(n_ts)] - if self.cache_dyn_gen_eigenvectors_adj: - self._dyn_gen_eigenvectors_adj = [object for x in range(n_ts)] - self._dyn_gen_factormatrix = [object for x in range(n_ts)] - - def initialize_controls(self, amps, init_tslots=True): - """ - Set the initial control amplitudes and time slices - Note this must be called after the configuration is complete - before any dynamics can be calculated - """ - if not isinstance(self.prop_computer, propcomp.PropagatorComputer): - raise errors.UsageError( - "No prop_computer (propagator computer) " - "set. A default should be assigned by the Dynamics subclass") - - if not isinstance(self.tslot_computer, tslotcomp.TimeslotComputer): - raise errors.UsageError( - "No tslot_computer (Timeslot computer)" - " set. A default should be assigned by the Dynamics class") - - if not isinstance(self.fid_computer, fidcomp.FidelityComputer): - raise errors.UsageError( - "No fid_computer (Fidelity computer)" - " set. A default should be assigned by the Dynamics subclass") - - self.ctrl_amps = None - if not self._timeslots_initialized: - init_tslots = True - if init_tslots: - self.init_timeslots() - self._init_evo() - self.tslot_computer.init_comp() - self.fid_computer.init_comp() - self._ctrls_initialized = True - self.update_ctrl_amps(amps) - - def check_ctrls_initialized(self): - if not self._ctrls_initialized: - raise errors.UsageError( - "Controls not initialised. " - "Ensure Dynamics.initialize_controls has been " - "executed with the initial control amplitudes.") - - def get_amp_times(self): - return self.time[:self.num_tslots] - - def save_amps(self, file_name=None, times=None, amps=None, verbose=False): - """ - Save a file with the current control amplitudes in each timeslot - The first column in the file will be the start time of the slot - - Parameters - ---------- - file_name : string - Name of the file - If None given the def_amps_fname attribuite will be used - - times : List type (or string) - List / array of the start times for each slot - If None given this will be retrieved through get_amp_times() - If 'exclude' then times will not be saved in the file, just - the amplitudes - - amps : Array[num_tslots, num_ctrls] - Amplitudes to be saved - If None given the ctrl_amps attribute will be used - - verbose : Boolean - If True then an info message will be logged - """ - self.check_ctrls_initialized() - - inctimes = True - if file_name is None: - file_name = self.def_amps_fname - if amps is None: - amps = self.ctrl_amps - if times is None: - times = self.get_amp_times() - else: - if isinstance(times, str): - if times.lower() == 'exclude': - inctimes = False - else: - logger.warn("Unknown option for times '{}' " - "when saving amplitudes".format(times)) - times = self.get_amp_times() - - try: - if inctimes: - shp = amps.shape - data = np.empty([shp[0], shp[1] + 1], dtype=float) - data[:, 0] = times - data[:, 1:] = amps - else: - data = amps - - np.savetxt(file_name, data, delimiter='\t', fmt='%14.6g') - - if verbose: - logger.info("Amplitudes saved to file: " + file_name) - except Exception as e: - logger.error("Failed to save amplitudes due to underling " - "error: {}".format(e)) - - def update_ctrl_amps(self, new_amps): - """ - Determine if any amplitudes have changed. If so, then mark the - timeslots as needing recalculation - The actual work is completed by the compare_amps method of the - timeslot computer - """ - - if self.log_level <= logging.DEBUG_INTENSE: - logger.log(logging.DEBUG_INTENSE, - "Updating amplitudes...\n" - "Current control amplitudes:\n" - + str(self.ctrl_amps) - + "\n(potenially) new amplitudes:\n" - + str(new_amps)) - - self.tslot_computer.compare_amps(new_amps) - - def flag_system_changed(self): - """ - Flag evolution, fidelity and gradients as needing recalculation - """ - self.evo_current = False - self.fid_computer.flag_system_changed() - - def get_drift_dim(self): - """ - Returns the size of the matrix that defines the drift dynamics - that is assuming the drift is NxN, then this returns N - """ - if self.dyn_shape is None: - self.refresh_drift_attribs() - return self.dyn_shape[0] - - def refresh_drift_attribs(self): - """Reset the dyn_shape, dyn_dims and time_depend_drift attribs""" - - if isinstance(self.drift_dyn_gen, (list, tuple)): - d0 = self.drift_dyn_gen[0] - self.time_depend_drift = True - else: - d0 = self.drift_dyn_gen - self.time_depend_drift = False - - if not isinstance(d0, Qobj): - raise TypeError( - "Unable to determine drift attributes, " - "because drift_dyn_gen is not Qobj (nor list of)" - ) - - self.dyn_shape = d0.shape - self.dyn_dims = d0.dims - - def get_num_ctrls(self): - """ - calculate the of controls from the length of the control list - sets the num_ctrls property, which can be used alternatively - subsequently - """ - _func_deprecation("'get_num_ctrls' has been replaced by " - "'num_ctrls' property") - return self.num_ctrls - - def _get_num_ctrls(self): - if not self._ctrl_dyn_gen_checked: - self.ctrl_dyn_gen = _check_ctrls_container(self.ctrl_dyn_gen) - self._ctrl_dyn_gen_checked = True - if isinstance(self.ctrl_dyn_gen, np.ndarray): - self._num_ctrls = self.ctrl_dyn_gen.shape[1] - self.time_depend_ctrl_dyn_gen = True - else: - self._num_ctrls = len(self.ctrl_dyn_gen) - - return self._num_ctrls - - @property - def num_ctrls(self): - """ - calculate the of controls from the length of the control list - sets the num_ctrls property, which can be used alternatively - subsequently - """ - if self._num_ctrls is None: - self._num_ctrls = self._get_num_ctrls() - return self._num_ctrls - - @property - def onto_evo_target(self): - if self._onto_evo_target is None: - self._get_onto_evo_target() - - if self._onto_evo_target_qobj is None: - if isinstance(self._onto_evo_target, Qobj): - self._onto_evo_target_qobj = self._onto_evo_target - else: - rev_dims = [self.sys_dims[1], self.sys_dims[0]] - self._onto_evo_target_qobj = Qobj(self._onto_evo_target, - dims=rev_dims) - - return self._onto_evo_target_qobj - - def get_owd_evo_target(self): - _func_deprecation("'get_owd_evo_target' has been replaced by " - "'onto_evo_target' property") - return self.onto_evo_target - - def _get_onto_evo_target(self): - """ - Get the inverse of the target. - Used for calculating the 'onto target' evolution - This is actually only relevant for unitary dynamics where - the target.dag() is what is required - However, for completeness, in general the inverse of the target - operator is is required - For state-to-state, the bra corresponding to the is required ket - """ - if self.target.shape[0] == self.target.shape[1]: - # Target is operator - targ = la.inv(self.target.full()) - if self.oper_dtype == Qobj: - rev_dims = [self.target.dims[1], self.target.dims[0]] - self._onto_evo_target = Qobj(targ, dims=rev_dims) - elif self.oper_dtype == np.ndarray: - self._onto_evo_target = targ - else: - assert False, f"Unknown oper_dtype {self.oper_dtype!r}" - else: - if self.oper_dtype == Qobj: - self._onto_evo_target = self.target.dag() - elif self.oper_dtype == np.ndarray: - self._onto_evo_target = self.target.dag().full() - else: - assert False, f"Unknown oper_dtype {self.oper_dtype!r}" - - return self._onto_evo_target - - def combine_dyn_gen(self, k): - """ - Computes the dynamics generator for a given timeslot - The is the combined Hamiltion for unitary systems - """ - _func_deprecation("'combine_dyn_gen' has been replaced by " - "'_combine_dyn_gen'") - self._combine_dyn_gen(k) - return self._dyn_gen(k) - - def _combine_dyn_gen(self, k): - """ - Computes the dynamics generator for a given timeslot - The is the combined Hamiltion for unitary systems - Also applies the phase (if any required by the propagation) - """ - if self.time_depend_drift: - dg = self._drift_dyn_gen[k] - else: - dg = self._drift_dyn_gen - for j in range(self._num_ctrls): - if self.time_depend_ctrl_dyn_gen: - dg = dg + self.ctrl_amps[k, j]*self._ctrl_dyn_gen[k, j] - else: - dg = dg + self.ctrl_amps[k, j]*self._ctrl_dyn_gen[j] - - self._dyn_gen[k] = dg - if self.cache_phased_dyn_gen: - self._phased_dyn_gen[k] = self._apply_phase(dg) - - def get_dyn_gen(self, k): - """ - Get the combined dynamics generator for the timeslot - Not implemented in the base class. Choose a subclass - """ - _func_deprecation("'get_dyn_gen' has been replaced by " - "'_get_phased_dyn_gen'") - return self._get_phased_dyn_gen(k) - - def _get_phased_dyn_gen(self, k): - if self.dyn_gen_phase is None: - return self._dyn_gen[k] - else: - if self._phased_dyn_gen is None: - return self._apply_phase(self._dyn_gen[k]) - else: - return self._phased_dyn_gen[k] - - def get_ctrl_dyn_gen(self, j): - """ - Get the dynamics generator for the control - Not implemented in the base class. Choose a subclass - """ - _func_deprecation("'get_ctrl_dyn_gen' has been replaced by " - "'_get_phased_ctrl_dyn_gen'") - return self._get_phased_ctrl_dyn_gen(0, j) - - def _get_phased_ctrl_dyn_gen(self, k, j): - if self._phased_ctrl_dyn_gen is not None: - if self.time_depend_ctrl_dyn_gen: - return self._phased_ctrl_dyn_gen[k, j] - else: - return self._phased_ctrl_dyn_gen[j] - else: - if self.time_depend_ctrl_dyn_gen: - if self.dyn_gen_phase is None: - return self._ctrl_dyn_gen[k, j] - else: - return self._apply_phase(self._ctrl_dyn_gen[k, j]) - else: - if self.dyn_gen_phase is None: - return self._ctrl_dyn_gen[j] - else: - return self._apply_phase(self._ctrl_dyn_gen[j]) - - @property - def dyn_gen(self): - """ - List of combined dynamics generators (Qobj) for each timeslot - """ - if self._dyn_gen is not None: - if self._dyn_gen_qobj is None: - if self.oper_dtype == Qobj: - self._dyn_gen_qobj = self._dyn_gen - else: - self._dyn_gen_qobj = [Qobj(dg, dims=self.dyn_dims) - for dg in self._dyn_gen] - return self._dyn_gen_qobj - - @property - def prop(self): - """ - List of propagators (Qobj) for each timeslot - """ - if self._prop is not None: - if self._prop_qobj is None: - if self.oper_dtype == Qobj: - self._prop_qobj = self._prop - else: - self._prop_qobj = [Qobj(dg, dims=self.dyn_dims) - for dg in self._prop] - return self._prop_qobj - - @property - def prop_grad(self): - """ - Array of propagator gradients (Qobj) for each timeslot, control - """ - if self._prop_grad is not None: - if self._prop_grad_qobj is None: - if self.oper_dtype == Qobj: - self._prop_grad_qobj = self._prop_grad - else: - self._prop_grad_qobj = np.empty( - [self.num_tslots, self.num_ctrls], - dtype=object) - for k in range(self.num_tslots): - for j in range(self.num_ctrls): - self._prop_grad_qobj[k, j] = Qobj( - self._prop_grad[k, j], - dims=self.dyn_dims) - return self._prop_grad_qobj - - def _get_prop_grad(self, k, j): - if self.cache_prop_grad: - prop_grad = self._prop_grad[k, j] - else: - prop_grad =\ - self.prop_computer._compute_prop_grad(k, j, compute_prop=False) - return prop_grad - - @property - def evo_init2t(self): - _attrib_deprecation( - "'evo_init2t' has been replaced by '_fwd_evo'") - return self._fwd_evo - - @property - def fwd_evo(self): - """ - List of evolution operators (Qobj) from the initial to the given - timeslot - """ - if self._fwd_evo is not None: - if self._fwd_evo_qobj is None: - if self.oper_dtype == Qobj: - self._fwd_evo_qobj = self._fwd_evo - else: - self._fwd_evo_qobj = [self.initial] - for k in range(1, self.num_tslots+1): - self._fwd_evo_qobj.append(Qobj(self._fwd_evo[k], - dims=self.sys_dims)) - return self._fwd_evo_qobj - - def _get_full_evo(self): - return self._fwd_evo[self._num_tslots] - - @property - def full_evo(self): - """Full evolution - time evolution at final time slot""" - return self.fwd_evo[self.num_tslots] - - @property - def evo_t2end(self): - _attrib_deprecation( - "'evo_t2end' has been replaced by '_onwd_evo'") - return self._onwd_evo - - @property - def onwd_evo(self): - """ - List of evolution operators (Qobj) from the initial to the given - timeslot - """ - if self._onwd_evo is not None: - if self._onwd_evo_qobj is None: - if self.oper_dtype == Qobj: - self._onwd_evo_qobj = self._fwd_evo - else: - self._onwd_evo_qobj = [Qobj(dg, dims=self.sys_dims) - for dg in self._onwd_evo] - return self._onwd_evo_qobj - - @property - def evo_t2targ(self): - _attrib_deprecation( - "'evo_t2targ' has been replaced by '_onto_evo'") - return self._onto_evo - - @property - def onto_evo(self): - """ - List of evolution operators (Qobj) from the initial to the given - timeslot - """ - if self._onto_evo is not None: - if self._onto_evo_qobj is None: - if self.oper_dtype == Qobj: - self._onto_evo_qobj = self._onto_evo - else: - self._onto_evo_qobj = [] - for k in range(0, self.num_tslots): - self._onto_evo_qobj.append(Qobj(self._onto_evo[k], - dims=self.sys_dims)) - self._onto_evo_qobj.append(self.onto_evo_target) - - return self._onto_evo_qobj - - def compute_evolution(self): - """ - Recalculate the time evolution operators - Dynamics generators (e.g. Hamiltonian) and - prop (propagators) are calculated as necessary - Actual work is completed by the recompute_evolution method - of the timeslot computer - """ - - # Check if values are already current, otherwise calculate all values - if not self.evo_current: - if self.log_level <= logging.DEBUG_VERBOSE: - logger.log(logging.DEBUG_VERBOSE, "Computing evolution") - self.tslot_computer.recompute_evolution() - self.evo_current = True - return True - return False - - def _ensure_decomp_curr(self, k): - """ - Checks to see if the diagonalisation has been completed since - the last update of the dynamics generators - (after the amplitude update) - If not then the diagonlisation is completed - """ - if self._decomp_curr is None: - raise errors.UsageError("Decomp lists have not been created") - if not self._decomp_curr[k]: - self._spectral_decomp(k) - - def _spectral_decomp(self, k): - """ - Calculate the diagonalization of the dynamics generator - generating lists of eigenvectors, propagators in the diagonalised - basis, and the 'factormatrix' used in calculating the propagator - gradient - Not implemented in this base class, because the method is specific - to the matrix type - """ - raise errors.UsageError("Decomposition cannot be completed by " - "this class. Try a(nother) subclass") - - def _is_unitary(self, A): - """ - Checks whether operator A is unitary - A can be either Qobj or ndarray - """ - if isinstance(A, Qobj): - unitary = np.allclose(np.eye(A.shape[0]), (A*A.dag()).full(), - atol=self.unitarity_tol) - else: - unitary = np.allclose(np.eye(len(A)), A.dot(A.T.conj()), - atol=self.unitarity_tol) - - return unitary - - def _calc_unitary_err(self, A): - if isinstance(A, Qobj): - err = np.sum(abs(np.eye(A.shape[0]) - (A*A.dag()).full())) - else: - err = np.sum(abs(np.eye(len(A)) - A.dot(A.T.conj()))) - - return err - - def unitarity_check(self): - """ - Checks whether all propagators are unitary - """ - for k in range(self.num_tslots): - if not self._is_unitary(self._prop[k]): - logger.warning( - "Progator of timeslot {} is not unitary".format(k)) - - -class DynamicsGenMat(Dynamics): - """ - This sub class can be used for any system where no additional - operator is applied to the dynamics generator before calculating - the propagator, e.g. classical dynamics, Lindbladian - """ - def reset(self): - Dynamics.reset(self) - self.id_text = 'GEN_MAT' - self.apply_params() - - -class DynamicsUnitary(Dynamics): - """ - This is the subclass to use for systems with dynamics described by - unitary matrices. E.g. closed systems with Hermitian Hamiltonians - Note a matrix diagonalisation is used to compute the exponent - The eigen decomposition is also used to calculate the propagator gradient. - The method is taken from DYNAMO (see file header) - - Attributes - ---------- - drift_ham : Qobj - This is the drift Hamiltonian for unitary dynamics - It is mapped to drift_dyn_gen during initialize_controls - - ctrl_ham : List of Qobj - These are the control Hamiltonians for unitary dynamics - It is mapped to ctrl_dyn_gen during initialize_controls - - H : List of Qobj - The combined drift and control Hamiltonians for each timeslot - These are the dynamics generators for unitary dynamics. - It is mapped to dyn_gen during initialize_controls - """ - - def reset(self): - Dynamics.reset(self) - self.id_text = 'UNIT' - self.drift_ham = None - self.ctrl_ham = None - self.H = None - self._dyn_gen_phase = -1j - self._phase_application = 'preop' - self.apply_params() - - def _create_computers(self): - """ - Create the default timeslot, fidelity and propagator computers - """ - # The time slot computer. By default it is set to _UpdateAll - # can be set to _DynUpdate in the configuration - # (see class file for details) - if self.config.tslot_type == 'DYNAMIC': - self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self) - else: - self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self) - # set the default fidelity computer - self.fid_computer = fidcomp.FidCompUnitary(self) - # set the default propagator computer - self.prop_computer = propcomp.PropCompDiag(self) - - def initialize_controls(self, amplitudes, init_tslots=True): - # Either the _dyn_gen or _ham names can be used - # This assumes that one or other has been set in the configuration - self._map_dyn_gen_to_ham() - Dynamics.initialize_controls(self, amplitudes, init_tslots=init_tslots) - - def _map_dyn_gen_to_ham(self): - if self.drift_dyn_gen is None: - self.drift_dyn_gen = self.drift_ham - else: - self.drift_ham = self.drift_dyn_gen - if self.ctrl_dyn_gen is None: - self.ctrl_dyn_gen = self.ctrl_ham - else: - self.ctrl_ham = self.ctrl_dyn_gen - self._dyn_gen_mapped = True - - @property - def num_ctrls(self): - if not self._dyn_gen_mapped: - self._map_dyn_gen_to_ham() - if self._num_ctrls is None: - self._num_ctrls = self._get_num_ctrls() - return self._num_ctrls - - def _get_onto_evo_target(self): - """ - Get the adjoint of the target. - Used for calculating the 'backward' evolution - """ - if self.oper_dtype == Qobj: - self._onto_evo_target = self.target.dag() - else: - self._onto_evo_target = self._target.T.conj() - return self._onto_evo_target - - def _spectral_decomp(self, k): - """ - Calculates the diagonalization of the dynamics generator - generating lists of eigenvectors, propagators in the diagonalised - basis, and the 'factormatrix' used in calculating the propagator - gradient - """ - - if self.oper_dtype == Qobj: - H = self._dyn_gen[k] - # Returns eigenvalues as array (row) - # and eigenvectors as rows of an array - _type = _data.CSR if self.sparse_eigen_decomp else _data.Dense - eig_val, eig_vec = _data.eigs(_data.to(_type, H.data)) - eig_vec = eig_vec.to_array() - - elif self.oper_dtype == np.ndarray: - H = self._dyn_gen[k] - # returns row vector of eigenvals, columns with the eigenvecs - eig_val, eig_vec = eigh(H) - - else: - assert False, f"Unknown oper_dtype {self.oper_dtype!r}" - - # assuming H is an nxn matrix, find n - n = self.get_drift_dim() - - # Calculate the propagator in the diagonalised basis - eig_val_tau = -1j*eig_val*self.tau[k] - prop_eig = np.exp(eig_val_tau) - - # Generate the factor matrix through the differences - # between each of the eigenvectors and the exponentiations - # create nxn matrix where each eigen val is repeated n times - # down the columns - o = np.ones([n, n]) - eig_val_cols = eig_val_tau*o - # calculate all the differences by subtracting it from its transpose - eig_val_diffs = eig_val_cols - eig_val_cols.T - # repeat for the propagator - prop_eig_cols = prop_eig*o - prop_eig_diffs = prop_eig_cols - prop_eig_cols.T - # the factor matrix is the elementwise quotient of the - # differeneces between the exponentiated eigen vals and the - # differences between the eigen vals - # need to avoid division by zero that would arise due to denegerate - # eigenvalues and the diagonals - degen_mask = np.abs(eig_val_diffs) < self.fact_mat_round_prec - eig_val_diffs[degen_mask] = 1 - factors = prop_eig_diffs / eig_val_diffs - # for degenerate eigenvalues the factor is just the exponent - factors[degen_mask] = prop_eig_cols[degen_mask] - - # Store eigenvectors, propagator and factor matric - # for use in propagator computations - self._decomp_curr[k] = True - if isinstance(factors, np.ndarray): - self._dyn_gen_factormatrix[k] = factors - else: - self._dyn_gen_factormatrix[k] = np.array(factors) - - if self.oper_dtype == Qobj: - self._prop_eigen[k] = Qobj(np.diagflat(prop_eig), - dims=self.dyn_dims) - self._dyn_gen_eigenvectors[k] = Qobj(eig_vec, - dims=self.dyn_dims) - # The _dyn_gen_eigenvectors_adj list is not used in - # memory optimised modes - if self._dyn_gen_eigenvectors_adj is not None: - self._dyn_gen_eigenvectors_adj[k] = \ - self._dyn_gen_eigenvectors[k].dag() - elif self.oper_dtype == np.ndarray: - self._prop_eigen[k] = np.diagflat(prop_eig) - self._dyn_gen_eigenvectors[k] = eig_vec - # The _dyn_gen_eigenvectors_adj list is not used in - # memory optimised modes - if self._dyn_gen_eigenvectors_adj is not None: - self._dyn_gen_eigenvectors_adj[k] = \ - self._dyn_gen_eigenvectors[k].conj().T - else: - assert False, f"Unknown oper_dtype {self.oper_dtype!r}" - - def _get_dyn_gen_eigenvectors_adj(self, k): - # The _dyn_gen_eigenvectors_adj list is not used in - # memory optimised modes - if self._dyn_gen_eigenvectors_adj is not None: - return self._dyn_gen_eigenvectors_adj[k] - if self.oper_dtype == Qobj: - return self._dyn_gen_eigenvectors[k].dag() - return self._dyn_gen_eigenvectors[k].conj().T - - def check_unitarity(self): - """ - Checks whether all propagators are unitary - For propagators found not to be unitary, the potential underlying - causes are investigated. - """ - for k in range(self.num_tslots): - prop_unit = self._is_unitary(self._prop[k]) - if not prop_unit: - logger.warning( - "Progator of timeslot {} is not unitary".format(k)) - if not prop_unit or self.unitarity_check_level > 1: - # Check Hamiltonian - H = self._dyn_gen[k] - if isinstance(H, Qobj): - herm = H.isherm - else: - diff = np.abs(H.T.conj() - H) - herm = np.all(diff < settings.core['atol']) - eigval_unit = self._is_unitary(self._prop_eigen[k]) - eigvec_unit = self._is_unitary(self._dyn_gen_eigenvectors[k]) - if self._dyn_gen_eigenvectors_adj is not None: - eigvecadj_unit = self._is_unitary( - self._dyn_gen_eigenvectors_adj[k]) - else: - eigvecadj_unit = None - msg = ("prop unit: {}; H herm: {}; " - "eigval unit: {}; eigvec unit: {}; " - "eigvecadj_unit: {}".format( - prop_unit, herm, eigval_unit, eigvec_unit, - eigvecadj_unit)) - logger.info(msg) - - -class DynamicsSymplectic(Dynamics): - """ - Symplectic systems - This is the subclass to use for systems where the dynamics is described - by symplectic matrices, e.g. coupled oscillators, quantum optics - - Attributes - ---------- - omega : array[drift_dyn_gen.shape] - matrix used in the calculation of propagators (time evolution) - with symplectic systems. - - """ - - def reset(self): - Dynamics.reset(self) - self.id_text = 'SYMPL' - self._omega = None - self._omega_qobj = None - self._phase_application = 'postop' - self.grad_exact = True - self.apply_params() - - def _create_computers(self): - """ - Create the default timeslot, fidelity and propagator computers - """ - # The time slot computer. By default it is set to _UpdateAll - # can be set to _DynUpdate in the configuration - # (see class file for details) - if self.config.tslot_type == 'DYNAMIC': - self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self) - else: - self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self) - - self.prop_computer = propcomp.PropCompFrechet(self) - self.fid_computer = fidcomp.FidCompTraceDiff(self) - - @property - def omega(self): - if self._omega is None: - self._get_omega() - if self._omega_qobj is None: - self._omega_qobj = Qobj(self._omega, dims=self.dyn_dims) - return self._omega_qobj - - def _get_omega(self): - if self._omega is None: - n = self.get_drift_dim() // 2 - omg = sympl.calc_omega(n) - if self.oper_dtype == Qobj: - self._omega = Qobj(omg, dims=self.dyn_dims) - self._omega_qobj = self._omega - else: - self._omega = omg - return self._omega - - def _set_phase_application(self, value): - Dynamics._set_phase_application(self, value) - if self._evo_initialized: - phase = self._get_dyn_gen_phase() - if phase is not None: - self._dyn_gen_phase = phase - - def _get_dyn_gen_phase(self): - if self._phase_application == 'postop': - phase = -self._get_omega() - elif self._phase_application == 'preop': - phase = self._get_omega() - elif self._phase_application == 'custom': - phase = None - # Assume phase set by user - else: - raise ValueError("No option for phase_application " - "'{}'".format(self._phase_application)) - return phase - - @property - def dyn_gen_phase(self): - r""" - The phasing operator for the symplectic group generators - usually refered to as \Omega - By default this is applied as 'postop' dyn_gen*-\Omega - If phase_application is 'preop' it is applied as \Omega*dyn_gen - """ - # Cannot be calculated until the dyn_shape is set - # that is after the drift dyn gen has been set. - if self._dyn_gen_phase is None: - self._dyn_gen_phase = self._get_dyn_gen_phase() - return self._dyn_gen_phase diff --git a/qutip/control/errors.py b/qutip/control/errors.py deleted file mode 100644 index 0997502a65..0000000000 --- a/qutip/control/errors.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Exception classes for the Quantum Control library -""" - - -class Error(Exception): - """Base class for all qutip control exceptions""" - def __str__(self): - return repr(self.message) - - -class UsageError(Error): - """ - A function has been used incorrectly. Most likely when a base class - was used when a sub class should have been. - funcname: function name where error occurred - msg: Explanation - """ - def __init__(self, msg): - self.message = msg - - -class FunctionalError(Error): - """ - A function behaved in an unexpected way - Attributes: - funcname: function name where error occurred - msg: Explanation - """ - def __init__(self, msg): - self.message = msg - - -class OptimizationTerminate(Error): - """ - Superclass for all early terminations from the optimisation algorithm - """ - pass - - -class GoalAchievedTerminate(OptimizationTerminate): - """ - Exception raised to terminate execution when the goal has been reached - during the optimisation algorithm - """ - def __init__(self, fid_err): - self.reason = "Goal achieved" - self.fid_err = fid_err - - -class MaxWallTimeTerminate(OptimizationTerminate): - """ - Exception raised to terminate execution when the optimisation time has - exceeded the maximum set in the config - """ - def __init__(self): - self.reason = "Max wall time exceeded" - - -class MaxFidFuncCallTerminate(OptimizationTerminate): - """ - Exception raised to terminate execution when the number of calls to the - fidelity error function has exceeded the maximum - """ - def __init__(self): - self.reason = "Number of fidelity error calls has exceeded the maximum" - - -class GradMinReachedTerminate(OptimizationTerminate): - """ - Exception raised to terminate execution when the minimum gradient normal - has been reached during the optimisation algorithm - """ - def __init__(self, gradient): - self.reason = "Gradient normal minimum reached" - self.gradient = gradient diff --git a/qutip/control/fidcomp.py b/qutip/control/fidcomp.py deleted file mode 100644 index 3ba9e65357..0000000000 --- a/qutip/control/fidcomp.py +++ /dev/null @@ -1,746 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Fidelity Computer - -These classes calculate the fidelity error - function to be minimised -and fidelity error gradient, which is used to direct the optimisation - -They may calculate the fidelity as an intermediary step, as in some case -e.g. unitary dynamics, this is more efficient - -The idea is that different methods for computing the fidelity can be tried -and compared using simple configuration switches. - -Note the methods in these classes were inspired by: -DYNAMO - Dynamic Framework for Quantum Optimal Control -See Machnes et.al., arXiv.1011.4874 -The unitary dynamics fidelity is taken directly frm DYNAMO -The other fidelity measures are extensions, and the sources are given -in the class descriptions. -""" - -import timeit -import warnings -import numpy as np -# QuTiP -from qutip import Qobj -# QuTiP control modules -import qutip.control.errors as errors -# QuTiP logging -import qutip.logging_utils as logging -logger = logging.get_logger() - - -def _attrib_deprecation(message, stacklevel=3): - """ - Issue deprecation warning - Using stacklevel=3 will ensure message refers the function - calling with the deprecated parameter, - """ - warnings.warn(message, FutureWarning, stacklevel=stacklevel) - - -def _func_deprecation(message, stacklevel=3): - """ - Issue deprecation warning - Using stacklevel=3 will ensure message refers the function - calling with the deprecated parameter, - """ - warnings.warn(message, FutureWarning, stacklevel=stacklevel) - - -def _trace(A): - """wrapper for calculating the trace""" - # input is an operator (Qobj, array, sparse etc), so - if isinstance(A, Qobj): - return A.tr() - else: - return np.trace(A) - - -class FidelityComputer(object): - """ - Base class for all Fidelity Computers. - This cannot be used directly. See subclass descriptions and choose - one appropriate for the application - Note: this must be instantiated with a Dynamics object, that is the - container for the data that the methods operate on - - Attributes - ---------- - log_level : integer - level of messaging output from the logger. - Options are attributes of qutip.logging_utils, - in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, - assuming everything runs as expected. - The default NOTSET implies that the level will be taken from - the QuTiP settings file, which by default is WARN - - dimensional_norm : float - Normalisation constant - - fid_norm_func : function - Used to normalise the fidelity - See SU and PSU options for the unitary dynamics - - grad_norm_func : function - Used to normalise the fidelity gradient - See SU and PSU options for the unitary dynamics - - uses_onwd_evo : boolean - flag to specify whether the onwd_evo evolution operator - (see Dynamics) is used by the FidelityComputer - - uses_onto_evo : boolean - flag to specify whether the onto_evo evolution operator - (see Dynamics) is used by the FidelityComputer - - fid_err : float - Last computed value of the fidelity error - - fidelity : float - Last computed value of the normalised fidelity - - fidelity_current : boolean - flag to specify whether the fidelity / fid_err are based on the - current amplitude values. Set False when amplitudes change - - fid_err_grad: array[num_tslot, num_ctrls] of float - Last computed values for the fidelity error gradients wrt the - control in the timeslot - - grad_norm : float - Last computed value for the norm of the fidelity error gradients - (sqrt of the sum of the squares) - - fid_err_grad_current : boolean - flag to specify whether the fidelity / fid_err are based on the - current amplitude values. Set False when amplitudes change - """ - def __init__(self, dynamics, params=None): - self.parent = dynamics - self.params = params - self.reset() - - def reset(self): - """ - reset any configuration data and - clear any temporarily held status data - """ - self.log_level = self.parent.log_level - self.id_text = 'FID_COMP_BASE' - self.dimensional_norm = 1.0 - self.fid_norm_func = None - self.grad_norm_func = None - self.uses_onwd_evo = False - self.uses_onto_evo = False - self.apply_params() - self.clear() - - def clear(self): - """ - clear any temporarily held status data - """ - self.fid_err = None - self.fidelity = None - self.fid_err_grad = None - self.grad_norm = np.inf - self.fidelity_current = False - self.fid_err_grad_current = False - self.grad_norm = 0.0 - - def apply_params(self, params=None): - """ - Set object attributes based on the dictionary (if any) passed in the - instantiation, or passed as a parameter - This is called during the instantiation automatically. - The key value pairs are the attribute name and value - Note: attributes are created if they do not exist already, - and are overwritten if they do. - """ - if not params: - params = self.params - - if isinstance(params, dict): - self.params = params - for key in params: - setattr(self, key, params[key]) - - @property - def log_level(self): - return logger.level - - @log_level.setter - def log_level(self, lvl): - """ - Set the log_level attribute and set the level of the logger - that is call logger.setLevel(lvl) - """ - logger.setLevel(lvl) - - def init_comp(self): - """ - initialises the computer based on the configuration of the Dynamics - """ - # optionally implemented in subclass - pass - - def get_fid_err(self): - """ - returns the absolute distance from the maximum achievable fidelity - """ - # must be implemented by subclass - raise errors.UsageError( - "No method defined for getting fidelity error." - " Suspect base class was used where sub class should have been") - - def get_fid_err_gradient(self): - """ - Returns the normalised gradient of the fidelity error - in a (nTimeslots x n_ctrls) array wrt the timeslot control amplitude - """ - # must be implemented by subclass - raise errors.UsageError("No method defined for getting fidelity" - " error gradient. Suspect base class was" - " used where sub class should have been") - - def flag_system_changed(self): - """ - Flag fidelity and gradients as needing recalculation - """ - self.fidelity_current = False - # Flag gradient as needing recalculating - self.fid_err_grad_current = False - - @property - def uses_evo_t2end(self): - _attrib_deprecation( - "'uses_evo_t2end' has been replaced by 'uses_onwd_evo'") - return self.uses_onwd_evo - - @uses_evo_t2end.setter - def uses_evo_t2end(self, value): - _attrib_deprecation( - "'uses_evo_t2end' has been replaced by 'uses_onwd_evo'") - self.uses_onwd_evo = value - - @property - def uses_evo_t2targ(self): - _attrib_deprecation( - "'uses_evo_t2targ' has been replaced by 'uses_onto_evo'") - return self.uses_onto_evo - - @uses_evo_t2targ.setter - def uses_evo_t2targ(self, value): - _attrib_deprecation( - "'uses_evo_t2targ' has been replaced by 'uses_onto_evo'") - self.uses_onto_evo = value - - -class FidCompUnitary(FidelityComputer): - """ - Computes fidelity error and gradient assuming unitary dynamics, e.g. - closed qubit systems - Note fidelity and gradient calculations were taken from DYNAMO - (see file header) - - Attributes - ---------- - phase_option : string - determines how global phase is treated in fidelity calculations: - PSU - global phase ignored - SU - global phase included - - fidelity_prenorm : complex - Last computed value of the fidelity before it is normalised - It is stored to use in the gradient normalisation calculation - - fidelity_prenorm_current : boolean - flag to specify whether fidelity_prenorm are based on the - current amplitude values. Set False when amplitudes change - """ - def reset(self): - FidelityComputer.reset(self) - self.id_text = 'UNIT' - self.uses_onto_evo = True - self._init_phase_option('PSU') - self.apply_params() - - def clear(self): - FidelityComputer.clear(self) - self.fidelity_prenorm = None - self.fidelity_prenorm_current = False - - def set_phase_option(self, phase_option=None): - """ - Deprecated - use phase_option - Phase options are - SU - global phase important - PSU - global phase is not important - """ - _func_deprecation("'set_phase_option' is deprecated. " - "Use phase_option property") - self._init_phase_option(phase_option) - - @property - def phase_option(self): - return self._phase_option - - @phase_option.setter - def phase_option(self, value): - """ - Phase options are - SU - global phase important - PSU - global phase is not important - """ - self._init_phase_option(value) - - def _init_phase_option(self, value): - self._phase_option = value - if value == 'PSU': - self.fid_norm_func = self.normalize_PSU - self.grad_norm_func = self.normalize_gradient_PSU - elif value == 'SU': - self.fid_norm_func = self.normalize_SU - self.grad_norm_func = self.normalize_gradient_SU - elif value is None: - raise errors.UsageError("phase_option cannot be set to None" - " for this FidelityComputer.") - else: - raise errors.UsageError( - "No option for phase_option '{}'".format(value)) - - def init_comp(self): - """ - Check configuration and initialise the normalisation - """ - if self.fid_norm_func is None or self.grad_norm_func is None: - raise errors.UsageError("The phase_option must be be set" - "for this fidelity computer") - self.init_normalization() - - def flag_system_changed(self): - """ - Flag fidelity and gradients as needing recalculation - """ - FidelityComputer.flag_system_changed(self) - # Flag the fidelity (prenormalisation) value as needing calculation - self.fidelity_prenorm_current = False - - def init_normalization(self): - """ - Calc norm of to scale subsequent norms - When considering unitary time evolution operators, this basically - results in calculating the trace of the identity matrix - and is hence equal to the size of the target matrix - There may be situations where this is not the case, and hence it - is not assumed to be so. - The normalisation function called should be set to either the - PSU - global phase ignored - SU - global phase respected - """ - dyn = self.parent - self.dimensional_norm = 1.0 - self.dimensional_norm = \ - self.fid_norm_func(dyn.target.dag()*dyn.target) - - def normalize_SU(self, A): - try: - if A.shape[0] == A.shape[1]: - # input is an operator (Qobj, array), so - norm = _trace(A) - else: - raise TypeError("Cannot compute trace (not square)") - except AttributeError: - # assume input is already scalar and hence assumed - # to be the prenormalised scalar value, e.g. fidelity - norm = A - return np.real(norm) / self.dimensional_norm - - def normalize_gradient_SU(self, grad): - """ - Normalise the gradient matrix passed as grad - This SU version respects global phase - """ - return np.real(grad) / self.dimensional_norm - - def normalize_PSU(self, A): - try: - if A.shape[0] == A.shape[1]: - # input is an operator (Qobj, array, sparse etc), so - norm = _trace(A) - else: - raise TypeError("Cannot compute trace (not square)") - except (AttributeError, IndexError): - # assume input is already scalar and hence assumed - # to be the prenormalised scalar value, e.g. fidelity - norm = A - return np.abs(norm) / self.dimensional_norm - - def normalize_gradient_PSU(self, grad): - """ - Normalise the gradient matrix passed as grad - This PSU version is independent of global phase - """ - fid_pn = self.get_fidelity_prenorm() - return np.real(grad * np.exp(-1j * np.angle(fid_pn)) - / self.dimensional_norm) - - def get_fid_err(self): - """ - Gets the absolute error in the fidelity - """ - return np.abs(1 - self.get_fidelity()) - - def get_fidelity(self): - """ - Gets the appropriately normalised fidelity value - The normalisation is determined by the fid_norm_func pointer - which should be set in the config - """ - if not self.fidelity_current: - self.fidelity = \ - self.fid_norm_func(self.get_fidelity_prenorm()) - self.fidelity_current = True - if self.log_level <= logging.DEBUG: - logger.debug("Fidelity (normalised): {}".format(self.fidelity)) - return self.fidelity - - def get_fidelity_prenorm(self): - """ - Gets the current fidelity value prior to normalisation - Note the gradient function uses this value - The value is cached, because it is used in the gradient calculation - """ - if not self.fidelity_prenorm_current: - dyn = self.parent - k = dyn.tslot_computer._get_timeslot_for_fidelity_calc() - dyn.compute_evolution() - if dyn.oper_dtype == Qobj: - f = dyn._onto_evo[k]*dyn._fwd_evo[k] - if isinstance(f, Qobj): - f = f.tr() - else: - f = _trace(dyn._onto_evo[k].dot(dyn._fwd_evo[k])) - self.fidelity_prenorm = f - self.fidelity_prenorm_current = True - if dyn.stats is not None: - dyn.stats.num_fidelity_computes += 1 - if self.log_level <= logging.DEBUG: - logger.debug("Fidelity (pre normalisation): {}".format( - self.fidelity_prenorm)) - return self.fidelity_prenorm - - def get_fid_err_gradient(self): - """ - Returns the normalised gradient of the fidelity error - in a (nTimeslots x n_ctrls) array - The gradients are cached in case they are requested - mutliple times between control updates - (although this is not typically found to happen) - """ - if not self.fid_err_grad_current: - dyn = self.parent - grad_prenorm = self.compute_fid_grad() - if self.log_level <= logging.DEBUG_INTENSE: - logger.log(logging.DEBUG_INTENSE, "pre-normalised fidelity " - "gradients:\n{}".format(grad_prenorm)) - # AJGP: Note this check should not be necessary if dynamics are - # unitary. However, if they are not then this gradient - # can still be used, however the interpretation is dubious - if self.get_fidelity() >= 1: - self.fid_err_grad = self.grad_norm_func(grad_prenorm) - else: - self.fid_err_grad = -self.grad_norm_func(grad_prenorm) - - self.fid_err_grad_current = True - if dyn.stats is not None: - dyn.stats.num_grad_computes += 1 - - self.grad_norm = np.sqrt(np.sum(self.fid_err_grad**2)) - if self.log_level <= logging.DEBUG_INTENSE: - logger.log(logging.DEBUG_INTENSE, "Normalised fidelity error " - "gradients:\n{}".format(self.fid_err_grad)) - - if self.log_level <= logging.DEBUG: - logger.debug("Gradient (sum sq norm): " - "{} ".format(self.grad_norm)) - - return self.fid_err_grad - - def compute_fid_grad(self): - """ - Calculates exact gradient of function wrt to each timeslot - control amplitudes. Note these gradients are not normalised - These are returned as a (nTimeslots x n_ctrls) array - """ - dyn = self.parent - n_ctrls = dyn.num_ctrls - n_ts = dyn.num_tslots - - # create n_ts x n_ctrls zero array for grad start point - grad = np.zeros([n_ts, n_ctrls], dtype=complex) - - dyn.tslot_computer.flag_all_calc_now() - dyn.compute_evolution() - - # loop through all ctrl timeslots calculating gradients - time_st = timeit.default_timer() - for j in range(n_ctrls): - for k in range(n_ts): - fwd_evo = dyn._fwd_evo[k] - onto_evo = dyn._onto_evo[k+1] - if dyn.oper_dtype == Qobj: - g = onto_evo*dyn._get_prop_grad(k, j)*fwd_evo - if isinstance(g, Qobj): - g = g.tr() - else: - g = _trace(onto_evo.dot( - dyn._get_prop_grad(k, j)).dot(fwd_evo)) - grad[k, j] = g - if dyn.stats is not None: - dyn.stats.wall_time_gradient_compute += \ - timeit.default_timer() - time_st - return grad - - -class FidCompTraceDiff(FidelityComputer): - """ - Computes fidelity error and gradient for general system dynamics - by calculating the the fidelity error as the trace of the overlap - of the difference between the target and evolution resulting from - the pulses with the transpose of the same. - This should provide a distance measure for dynamics described by matrices - Note the gradient calculation is taken from: - 'Robust quantum gates for open systems via optimal control: - Markovian versus non-Markovian dynamics' - Frederik F Floether, Pierre de Fouquieres, and Sophie G Schirmer - - Attributes - ---------- - scale_factor : float - The fidelity error calculated is of some arbitary scale. This - factor can be used to scale the fidelity error such that it may - represent some physical measure - If None is given then it is caculated as 1/2N, where N - is the dimension of the drift, when the Dynamics are initialised. - """ - - def reset(self): - FidelityComputer.reset(self) - self.id_text = 'TRACEDIFF' - self.scale_factor = None - self.uses_onwd_evo = True - if not self.parent.prop_computer.grad_exact: - raise errors.UsageError( - "This FidelityComputer can only be" - " used with an exact gradient PropagatorComputer.") - self.apply_params() - - def init_comp(self): - """ - initialises the computer based on the configuration of the Dynamics - Calculates the scale_factor is not already set - """ - if self.scale_factor is None: - self.scale_factor = 1.0 / (2.0*self.parent.get_drift_dim()) - if self.log_level <= logging.DEBUG: - logger.debug("Scale factor calculated as {}".format( - self.scale_factor)) - - def get_fid_err(self): - """ - Gets the absolute error in the fidelity - """ - if not self.fidelity_current: - dyn = self.parent - dyn.compute_evolution() - n_ts = dyn.num_tslots - evo_final = dyn._fwd_evo[n_ts] - evo_f_diff = dyn._target - evo_final - if self.log_level <= logging.DEBUG_VERBOSE: - logger.log(logging.DEBUG_VERBOSE, "Calculating TraceDiff " - "fidelity...\n Target:\n{}\n Evo final:\n{}\n" - "Evo final diff:\n{}".format(dyn._target, evo_final, - evo_f_diff)) - - # Calculate the fidelity error using the trace difference norm - # Note that the value should have not imagnary part, so using - # np.real, just avoids the complex casting warning - if dyn.oper_dtype == Qobj: - self.fid_err = self.scale_factor*np.real( - (evo_f_diff.dag()*evo_f_diff).tr()) - else: - self.fid_err = self.scale_factor*np.real(_trace( - evo_f_diff.conj().T.dot(evo_f_diff))) - - if np.isnan(self.fid_err): - self.fid_err = np.Inf - - if dyn.stats is not None: - dyn.stats.num_fidelity_computes += 1 - - self.fidelity_current = True - if self.log_level <= logging.DEBUG: - logger.debug("Fidelity error: {}".format(self.fid_err)) - - return self.fid_err - - def get_fid_err_gradient(self): - """ - Returns the normalised gradient of the fidelity error - in a (nTimeslots x n_ctrls) array - The gradients are cached in case they are requested - mutliple times between control updates - (although this is not typically found to happen) - """ - if not self.fid_err_grad_current: - dyn = self.parent - self.fid_err_grad = self.compute_fid_err_grad() - self.fid_err_grad_current = True - if dyn.stats is not None: - dyn.stats.num_grad_computes += 1 - - self.grad_norm = np.sqrt(np.sum(self.fid_err_grad**2)) - if self.log_level <= logging.DEBUG_INTENSE: - logger.log(logging.DEBUG_INTENSE, "fidelity error gradients:\n" - "{}".format(self.fid_err_grad)) - - if self.log_level <= logging.DEBUG: - logger.debug("Gradient norm: " - "{} ".format(self.grad_norm)) - - return self.fid_err_grad - - def compute_fid_err_grad(self): - """ - Calculate exact gradient of the fidelity error function - wrt to each timeslot control amplitudes. - Uses the trace difference norm fidelity - These are returned as a (nTimeslots x n_ctrls) array - """ - dyn = self.parent - n_ctrls = dyn.num_ctrls - n_ts = dyn.num_tslots - - # create n_ts x n_ctrls zero array for grad start point - grad = np.zeros([n_ts, n_ctrls]) - - dyn.tslot_computer.flag_all_calc_now() - dyn.compute_evolution() - - # loop through all ctrl timeslots calculating gradients - time_st = timeit.default_timer() - - evo_final = dyn._fwd_evo[n_ts] - evo_f_diff = dyn._target - evo_final - for j in range(n_ctrls): - for k in range(n_ts): - fwd_evo = dyn._fwd_evo[k] - if dyn.oper_dtype == Qobj: - evo_grad = dyn._get_prop_grad(k, j)*fwd_evo - if k+1 < n_ts: - evo_grad = dyn._onwd_evo[k+1]*evo_grad - # Note that the value should have not imagnary part, so - # using np.real, just avoids the complex casting warning - g = -2*self.scale_factor*np.real( - (evo_f_diff.dag()*evo_grad).tr()) - else: - evo_grad = dyn._get_prop_grad(k, j).dot(fwd_evo) - if k+1 < n_ts: - evo_grad = dyn._onwd_evo[k+1].dot(evo_grad) - g = -2*self.scale_factor*np.real(_trace( - evo_f_diff.conj().T.dot(evo_grad))) - if np.isnan(g): - g = np.Inf - - grad[k, j] = g - if dyn.stats is not None: - dyn.stats.wall_time_gradient_compute += \ - timeit.default_timer() - time_st - return grad - - -class FidCompTraceDiffApprox(FidCompTraceDiff): - """ - As FidCompTraceDiff, except uses the finite difference method to - compute approximate gradients - - Attributes - ---------- - epsilon : float - control amplitude offset to use when approximating the gradient wrt - a timeslot control amplitude - """ - def reset(self): - FidelityComputer.reset(self) - self.id_text = 'TDAPPROX' - self.uses_onwd_evo = True - self.scale_factor = None - self.epsilon = 0.001 - self.apply_params() - - def compute_fid_err_grad(self): - """ - Calculates gradient of function wrt to each timeslot - control amplitudes. Note these gradients are not normalised - They are calulated - These are returned as a (nTimeslots x n_ctrls) array - """ - dyn = self.parent - prop_comp = dyn.prop_computer - n_ctrls = dyn.num_ctrls - n_ts = dyn.num_tslots - - if self.log_level >= logging.DEBUG: - logger.debug("Computing fidelity error gradient") - # create n_ts x n_ctrls zero array for grad start point - grad = np.zeros([n_ts, n_ctrls]) - - dyn.tslot_computer.flag_all_calc_now() - dyn.compute_evolution() - curr_fid_err = self.get_fid_err() - - # loop through all ctrl timeslots calculating gradients - time_st = timeit.default_timer() - - for j in range(n_ctrls): - for k in range(n_ts): - fwd_evo = dyn._fwd_evo[k] - prop_eps = prop_comp._compute_diff_prop(k, j, self.epsilon) - if dyn.oper_dtype == Qobj: - evo_final_eps = fwd_evo*prop_eps - if k+1 < n_ts: - evo_final_eps = evo_final_eps*dyn._onwd_evo[k+1] - evo_f_diff_eps = dyn._target - evo_final_eps - # Note that the value should have not imagnary part, so - # using np.real, just avoids the complex casting warning - fid_err_eps = self.scale_factor*np.real( - (evo_f_diff_eps.dag()*evo_f_diff_eps).tr()) - else: - evo_final_eps = fwd_evo.dot(prop_eps) - if k+1 < n_ts: - evo_final_eps = evo_final_eps.dot(dyn._onwd_evo[k+1]) - evo_f_diff_eps = dyn._target - evo_final_eps - fid_err_eps = self.scale_factor*np.real(_trace( - evo_f_diff_eps.conj().T.dot(evo_f_diff_eps))) - - g = (fid_err_eps - curr_fid_err)/self.epsilon - if np.isnan(g): - g = np.Inf - - grad[k, j] = g - - if dyn.stats is not None: - dyn.stats.wall_time_gradient_compute += \ - timeit.default_timer() - time_st - - return grad diff --git a/qutip/control/grape.py b/qutip/control/grape.py deleted file mode 100644 index c174a96039..0000000000 --- a/qutip/control/grape.py +++ /dev/null @@ -1,603 +0,0 @@ -""" -This module contains functions that implement the GRAPE algorithm for -calculating pulse sequences for quantum systems. -""" - -__all__ = ['plot_grape_control_fields', - 'grape_unitary', 'cy_grape_unitary', 'grape_unitary_adaptive'] - -import warnings -import time -import numpy as np -from scipy.interpolate import interp1d -import scipy.sparse as sp - -from qutip import Qobj -from qutip.ui.progressbar import BaseProgressBar -from qutip.control.cy_grape import cy_overlap, cy_grape_inner - -import qutip.logging_utils -logger = qutip.logging_utils.get_logger('qutip.control.grape') - - -class GRAPEResult: - """ - Class for representing the result of a GRAPE simulation. - - Attributes - ---------- - u : array - GRAPE control pulse matrix. - - H_t : time-dependent Hamiltonian - The time-dependent Hamiltonian that realize the GRAPE pulse sequence. - - U_f : Qobj - The final unitary transformation that is realized by the evolution - of the system with the GRAPE generated pulse sequences. - """ - def __init__(self, u=None, H_t=None, U_f=None): - self.u = u - self.H_t = H_t - self.U_f = U_f - - -def plot_grape_control_fields(times, u, labels, uniform_axes=False): - """ - Plot a series of plots showing the GRAPE control fields given in the - given control pulse matrix u. - - Parameters - ---------- - times : array - Time coordinate array. - - u : array - Control pulse matrix. - - labels : list - List of labels for each control pulse sequence in the control pulse - matrix. - - uniform_axes : bool - Whether or not to plot all pulse sequences using the same y-axis scale. - """ - import matplotlib.pyplot as plt - R, J, M = u.shape - fig, axes = plt.subplots(J, 1, figsize=(8, 2 * J), squeeze=False) - y_max = abs(u).max() - for r in range(R): - for j in range(J): - if r == R - 1: - lw, lc, alpha = 2.0, 'k', 1.0 - axes[j, 0].set_ylabel(labels[j], fontsize=18) - axes[j, 0].set_xlabel(r'$t$', fontsize=18) - axes[j, 0].set_xlim(0, times[-1]) - else: - lw, lc, alpha = 0.5, 'b', 0.25 - axes[j, 0].step(times, u[r, j, :], lw=lw, color=lc, alpha=alpha) - if uniform_axes: - axes[j, 0].set_ylim(-y_max, y_max) - fig.tight_layout() - return fig, axes - - -def _overlap(A, B): - return (A.dag() * B).tr() / A.shape[0] - - -def grape_unitary(U, H0, H_ops, R, times, eps=None, u_start=None, - u_limits=None, interp_kind='linear', use_interp=False, - alpha=None, beta=None, phase_sensitive=True, - progress_bar=None): - """ - Calculate control pulses for the Hamiltonian operators in H_ops so that the - unitary U is realized. - - Experimental: Work in progress. - - Parameters - ---------- - U : Qobj - Target unitary evolution operator. - - H0 : Qobj - Static Hamiltonian (that cannot be tuned by the control fields). - - H_ops: list of Qobj - A list of operators that can be tuned in the Hamiltonian via the - control fields. - - R : int - Number of GRAPE iterations. - - time : array / list - Array of time coordinates for control pulse evalutation. - - u_start : array - Optional array with initial control pulse values. - - Returns - ------- - Instance of GRAPEResult, which contains the control pulses calculated - with GRAPE, a time-dependent Hamiltonian that is defined by the - control pulses, as well as the resulting propagator. - """ - progress_bar = progress_bar or BaseProgressBar() - if eps is None: - eps = 0.1 * (2 * np.pi) / (times[-1]) - - M = len(times) - J = len(H_ops) - - u = np.zeros((R, J, M)) - - if u_limits and len(u_limits) != 2: - raise ValueError("u_limits must be a list with two values") - - if u_limits: - warnings.warn("Caution: Using experimental feature u_limits") - - if u_limits and u_start: - # make sure that no values in u0 violates the u_limits conditions - u_start = np.array(u_start) - u_start[u_start < u_limits[0]] = u_limits[0] - u_start[u_start > u_limits[1]] = u_limits[1] - - if u_start is not None: - for idx, u0 in enumerate(u_start): - u[0, idx, :] = u0 - - if beta: - warnings.warn("Causion: Using experimental feature time-penalty") - - progress_bar.start(R) - for r in range(R - 1): - progress_bar.update(r) - - dt = times[1] - times[0] - - if use_interp: - ip_funcs = [interp1d(times, u[r, j, :], kind=interp_kind, - bounds_error=False, fill_value=u[r, j, -1]) - for j in range(J)] - - def _H_t(t, args=None): - return H0 + sum(float(ip_funcs[j](t)) * H_ops[j] - for j in range(J)) - - U_list = [(-1j * _H_t(times[idx]) * dt).expm() - for idx in range(M-1)] - - else: - def _H_idx(idx): - return H0 + sum([u[r, j, idx] * H_ops[j] for j in range(J)]) - - U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)] - - U_f_list = [] - U_b_list = [] - - U_f = 1 - U_b = 1 - for n in range(M - 1): - U_f = U_list[n] * U_f - U_f_list.append(U_f) - U_b_list.insert(0, U_b) - U_b = U_list[M - 2 - n].dag() * U_b - - for j in range(J): - for m in range(M-1): - P = U_b_list[m] * U - Q = 1j * dt * H_ops[j] * U_f_list[m] - - if phase_sensitive: - du = - _overlap(P, Q) - else: - du = - 2 * _overlap(P, Q) * _overlap(U_f_list[m], P) - - if alpha: - # penalty term for high power control signals u - du += -2 * alpha * u[r, j, m] * dt - - if beta: - # penalty term for late control signals u - du += -2 * beta * m * u[r, j, m] * dt - - u[r + 1, j, m] = u[r, j, m] + eps * du.real - - if u_limits: - if u[r + 1, j, m] < u_limits[0]: - u[r + 1, j, m] = u_limits[0] - elif u[r + 1, j, m] > u_limits[1]: - u[r + 1, j, m] = u_limits[1] - - u[r + 1, j, -1] = u[r + 1, j, -2] - - if use_interp: - ip_funcs = [interp1d(times, u[R - 1, j, :], kind=interp_kind, - bounds_error=False, fill_value=u[R - 1, j, -1]) - for j in range(J)] - - H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)] - for j in range(J)] - else: - H_td_func = [H0] + [[H_ops[j], u[-1, j, :]] for j in range(J)] - - progress_bar.finished() - - # return U_f_list[-1], H_td_func, u - return GRAPEResult(u=u, U_f=U_f_list[-1], H_t=H_td_func) - - -def cy_grape_unitary(U, H0, H_ops, R, times, eps=None, u_start=None, - u_limits=None, interp_kind='linear', use_interp=False, - alpha=None, beta=None, phase_sensitive=True, - progress_bar=None): - """ - Calculate control pulses for the Hamitonian operators in H_ops so that the - unitary U is realized. - - Experimental: Work in progress. - - Parameters - ---------- - U : Qobj - Target unitary evolution operator. - - H0 : Qobj - Static Hamiltonian (that cannot be tuned by the control fields). - - H_ops: list of Qobj - A list of operators that can be tuned in the Hamiltonian via the - control fields. - - R : int - Number of GRAPE iterations. - - time : array / list - Array of time coordinates for control pulse evalutation. - - u_start : array - Optional array with initial control pulse values. - - Returns - ------- - Instance of GRAPEResult, which contains the control pulses calculated - with GRAPE, a time-dependent Hamiltonian that is defined by the - control pulses, as well as the resulting propagator. - """ - progress_bar = progress_bar or BaseProgressBar() - - if eps is None: - eps = 0.1 * (2 * np.pi) / (times[-1]) - - M = len(times) - J = len(H_ops) - - u = np.zeros((R, J, M)) - - H_ops_data = [H_op.data for H_op in H_ops] - - if u_limits and len(u_limits) != 2: - raise ValueError("u_limits must be a list with two values") - - if u_limits: - warnings.warn("Causion: Using experimental feature u_limits") - - if u_limits and u_start: - # make sure that no values in u0 violates the u_limits conditions - u_start = np.array(u_start) - u_start[u_start < u_limits[0]] = u_limits[0] - u_start[u_start > u_limits[1]] = u_limits[1] - - if u_limits: - use_u_limits = 1 - u_min = u_limits[0] - u_max = u_limits[1] - else: - use_u_limits = 0 - u_min = 0.0 - u_max = 0.0 - - if u_start is not None: - for idx, u0 in enumerate(u_start): - u[0, idx, :] = u0 - - if beta: - warnings.warn("Causion: Using experimental feature time-penalty") - - alpha_val = alpha if alpha else 0.0 - beta_val = beta if beta else 0.0 - - progress_bar.start(R) - for r in range(R - 1): - progress_bar.update(r) - - dt = times[1] - times[0] - - if use_interp: - ip_funcs = [interp1d(times, u[r, j, :], kind=interp_kind, - bounds_error=False, fill_value=u[r, j, -1]) - for j in range(J)] - - def _H_t(t, args=None): - return H0 + sum([float(ip_funcs[j](t)) * H_ops[j] - for j in range(J)]) - - U_list = [(-1j * _H_t(times[idx]) * dt).expm().data - for idx in range(M-1)] - - else: - def _H_idx(idx): - return H0 + sum([u[r, j, idx] * H_ops[j] for j in range(J)]) - - U_list = [(-1j * _H_idx(idx) * dt).expm().data - for idx in range(M-1)] - - U_f_list = [] - U_b_list = [] - - U_f = 1 - U_b = sp.eye(*(U.shape)) - for n in range(M - 1): - - U_f = U_list[n] * U_f - U_f_list.append(U_f) - - U_b_list.insert(0, U_b) - U_b = U_list[M - 2 - n].T.conj().tocsr() * U_b - - cy_grape_inner(U.data, u, r, J, M, U_b_list, U_f_list, H_ops_data, - dt, eps, alpha_val, beta_val, phase_sensitive, - use_u_limits, u_min, u_max) - - if use_interp: - ip_funcs = [interp1d(times, u[R - 1, j, :], kind=interp_kind, - bounds_error=False, fill_value=u[R - 1, j, -1]) - for j in range(J)] - - H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)] - for j in range(J)] - else: - H_td_func = [H0] + [[H_ops[j], u[-1, j, :]] for j in range(J)] - - progress_bar.finished() - - return GRAPEResult(u=u, U_f=Qobj(U_f_list[-1], dims=U.dims), - H_t=H_td_func) - - -def grape_unitary_adaptive(U, H0, H_ops, R, times, eps=None, u_start=None, - u_limits=None, interp_kind='linear', - use_interp=False, alpha=None, beta=None, - phase_sensitive=False, overlap_terminate=1.0, - progress_bar=None): - """ - Calculate control pulses for the Hamiltonian operators in H_ops so that - the unitary U is realized. - - Experimental: Work in progress. - - Parameters - ---------- - U : Qobj - Target unitary evolution operator. - - H0 : Qobj - Static Hamiltonian (that cannot be tuned by the control fields). - - H_ops: list of Qobj - A list of operators that can be tuned in the Hamiltonian via the - control fields. - - R : int - Number of GRAPE iterations. - - time : array / list - Array of time coordinates for control pulse evalutation. - - u_start : array - Optional array with initial control pulse values. - - Returns - ------- - Instance of GRAPEResult, which contains the control pulses calculated - with GRAPE, a time-dependent Hamiltonian that is defined by the - control pulses, as well as the resulting propagator. - """ - progress_bar = progress_bar or BaseProgressBar() - - if eps is None: - eps = 0.1 * (2 * np.pi) / (times[-1]) - - eps_vec = np.array([eps / 2, eps, 2 * eps]) - eps_log = np.zeros(R) - overlap_log = np.zeros(R) - - best_k = 0 - _k_overlap = np.array([0.0, 0.0, 0.0]) - - M = len(times) - J = len(H_ops) - K = len(eps_vec) - Uf = [None for _ in range(K)] - - u = np.zeros((R, J, M, K)) - - if u_limits and len(u_limits) != 2: - raise ValueError("u_limits must be a list with two values") - - if u_limits: - warnings.warn("Causion: Using experimental feature u_limits") - - if u_limits and u_start: - # make sure that no values in u0 violates the u_limits conditions - u_start = np.array(u_start) - u_start[u_start < u_limits[0]] = u_limits[0] - u_start[u_start > u_limits[1]] = u_limits[1] - - if u_start is not None: - for idx, u0 in enumerate(u_start): - for k in range(K): - u[0, idx, :, k] = u0 - - if beta: - warnings.warn("Causion: Using experimental feature time-penalty") - - if phase_sensitive: - def _fidelity_function(x): return x - else: - def _fidelity_function(x): return abs(x)**2 - - best_k = 1 - _r = 0 - _prev_overlap = 0 - - progress_bar.start(R) - for r in range(R - 1): - progress_bar.update(r) - - _r = r - eps_log[r] = eps_vec[best_k] - - logger.debug("eps_vec: {}".format(eps_vec)) - - _t0 = time.time() - - dt = times[1] - times[0] - - if use_interp: - ip_funcs = [interp1d(times, u[r, j, :, best_k], kind=interp_kind, - bounds_error=False, - fill_value=u[r, j, -1, best_k]) - for j in range(J)] - - def _H_t(t, args=None): - return H0 + sum([float(ip_funcs[j](t)) * H_ops[j] - for j in range(J)]) - - U_list = [(-1j * _H_t(times[idx]) * dt).expm() - for idx in range(M-1)] - - else: - def _H_idx(idx): - return H0 + sum([u[r, j, idx, best_k] * H_ops[j] - for j in range(J)]) - - U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)] - - logger.debug("Time 1: %fs" % (time.time() - _t0)) - _t0 = time.time() - - U_f_list = [] - U_b_list = [] - - U_f = 1 - U_b = 1 - for m in range(M - 1): - - U_f = U_list[m] * U_f - U_f_list.append(U_f) - - U_b_list.insert(0, U_b) - U_b = U_list[M - 2 - m].dag() * U_b - - logger.debug("Time 2: %fs" % (time.time() - _t0)) - _t0 = time.time() - - for j in range(J): - for m in range(M-1): - P = U_b_list[m] * U - Q = 1j * dt * H_ops[j] * U_f_list[m] - - if phase_sensitive: - du = - cy_overlap(P.data, Q.data) - else: - du = (- 2 * cy_overlap(P.data, Q.data) * - cy_overlap(U_f_list[m].data, P.data)) - - if alpha: - # penalty term for high power control signals u - du += -2 * alpha * u[r, j, m, best_k] * dt - - if beta: - # penalty term for late control signals u - du += -2 * beta * k ** 2 * u[r, j, k] * dt - - for k, eps_val in enumerate(eps_vec): - u[r + 1, j, m, k] = u[r, j, m, k] + eps_val * du.real - - if u_limits: - if u[r + 1, j, m, k] < u_limits[0]: - u[r + 1, j, m, k] = u_limits[0] - elif u[r + 1, j, m, k] > u_limits[1]: - u[r + 1, j, m, k] = u_limits[1] - - u[r + 1, j, -1, :] = u[r + 1, j, -2, :] - - logger.debug("Time 3: %fs", time.time() - _t0) - _t0 = time.time() - - for k, eps_val in enumerate(eps_vec): - - def _H_idx(idx): - return H0 + sum([u[r + 1, j, idx, k] * H_ops[j] - for j in range(J)]) - - U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)] - - Uf[k] = 1 - for U in U_list: - Uf[k] = U * Uf[k] - - _k_overlap[k] = _fidelity_function(cy_overlap(Uf[k].data, - U.data)).real - - best_k = np.argmax(_k_overlap) - logger.debug("k_overlap: %s, %e", repr(_k_overlap), best_k) - - if _prev_overlap > _k_overlap[best_k]: - logger.debug("Regression, stepping back with smaller eps.") - - u[r + 1, :, :, :] = u[r, :, :, :] - eps_vec /= 2 - else: - - if best_k == 0: - eps_vec /= 2 - - elif best_k == 2: - eps_vec *= 2 - - _prev_overlap = _k_overlap[best_k] - - overlap_log[r] = _k_overlap[best_k] - - if overlap_terminate < 1.0: - if _k_overlap[best_k] > overlap_terminate: - logger.info("Reached target fidelity, terminating.") - break - - logger.debug("Time 4: %fs", time.time() - _t0) - _t0 = time.time() - - if use_interp: - ip_funcs = [interp1d(times, u[_r, j, :, best_k], kind=interp_kind, - bounds_error=False, fill_value=u[R - 1, j, -1]) - for j in range(J)] - - H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)] - for j in range(J)] - else: - H_td_func = [H0] + [[H_ops[j], u[_r, j, :, best_k]] for j in range(J)] - - progress_bar.finished() - - result = GRAPEResult(u=u[:_r, :, :, best_k], U_f=Uf[best_k], - H_t=H_td_func) - - result.eps = eps_log - result.overlap = overlap_log - - return result diff --git a/qutip/control/io.py b/qutip/control/io.py deleted file mode 100644 index ae9e5a2d96..0000000000 --- a/qutip/control/io.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import errno - - -def create_dir(dir_name, desc='output'): - """ - Checks if the given directory exists, if not it is created - - Returns - ------- - dir_ok : boolean - True if directory exists (previously or created) - False if failed to create the directory - - dir_name : string - Path to the directory, which may be been made absolute - - msg : string - Error msg if directory creation failed - """ - dir_ok = True - if '~' in dir_name: - dir_name = os.path.expanduser(dir_name) - elif not os.path.isabs(dir_name): - # Assume relative path from cwd given - dir_name = os.path.abspath(dir_name) - - msg = "{} directory is ready".format(desc) - errmsg = "Failed to create {} directory:\n{}\n".format(desc, - dir_name) - if os.path.exists(dir_name): - if os.path.isfile(dir_name): - dir_ok = False - errmsg += "A file already exists with the same name" - else: - try: - os.makedirs(dir_name) - msg += ("directory {} created (recursively)".format(dir_name)) - except OSError as e: - if e.errno == errno.EEXIST: - msg += ( - "Assume directory {} created " - "(recursively) by some other process. ".format(dir_name) - ) - else: - dir_ok = False - errmsg += "Underling error (makedirs) :({}) {}".format( - type(e).__name__, e) - - if dir_ok: - return dir_ok, dir_name, msg - else: - return dir_ok, dir_name, errmsg diff --git a/qutip/control/loadparams.py b/qutip/control/loadparams.py deleted file mode 100644 index 1320a7e0be..0000000000 --- a/qutip/control/loadparams.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Loads parameters for config, termconds, dynamics and Optimiser objects from a -parameter (ini) file with appropriate sections and options, these being -Sections: optimconfig, termconds, dynamics, optimizer -The options are assumed to be properties for these classes -Note that new attributes will be created, even if they are not usually -defined for that object -""" - -import numpy as np -from configparser import ConfigParser -# QuTiP logging -from qutip import Qobj -import qutip.logging_utils as logging -logger = logging.get_logger() - - -def load_parameters(file_name, config=None, term_conds=None, - dynamics=None, optim=None, pulsegen=None, - obj=None, section=None): - """ - Import parameters for the optimisation objects - Will throw a ValueError if file_name does not exist - """ - parser = ConfigParser() - - readFiles = parser.read(str(file_name)) - if len(readFiles) == 0: - raise ValueError("Parameter file '{}' not found".format(file_name)) - - if config is not None: - s = 'optimconfig' - try: - attr_names = parser.options(s) - for a in attr_names: - set_param(parser, s, a, config, a) - except Exception as e: - logger.warn("Unable to load {} parameters:({}) {}".format( - s, type(e).__name__, e)) - - if term_conds is not None: - s = 'termconds' - try: - attr_names = parser.options(s) - for a in attr_names: - set_param(parser, s, a, term_conds, a) - except Exception as e: - logger.warn("Unable to load {} parameters:({}) {}".format( - s, type(e).__name__, e)) - - if dynamics is not None: - s = 'dynamics' - try: - attr_names = parser.options(s) - for a in attr_names: - set_param(parser, s, a, dynamics, a) - except Exception as e: - logger.warn("Unable to load {} parameters:({}) {}".format( - s, type(e).__name__, e)) - - if optim is not None: - s = 'optimizer' - try: - attr_names = parser.options(s) - for a in attr_names: - set_param(parser, s, a, optim, a) - except Exception as e: - logger.warn("Unable to load {} parameters:({}) {}".format( - s, type(e).__name__, e)) - - if pulsegen is not None: - s = 'pulsegen' - try: - attr_names = parser.options(s) - for a in attr_names: - set_param(parser, s, a, pulsegen, a) - except Exception as e: - logger.warn("Unable to load {} parameters:({}) {}".format( - s, type(e).__name__, e)) - - if obj is not None: - if not isinstance(section, str): - raise ValueError( - "Section name must be given when loading " - "parameters of general object" - ) - s = section - try: - attr_names = parser.options(s) - for a in attr_names: - set_param(parser, s, a, obj, a) - except Exception as e: - logger.warn("Unable to load {} parameters:({}) {}".format( - s, type(e).__name__, e)) - - -def set_param(parser, section, option, obj, attrib_name): - """ - Set the object attribute value based on the option value from the - config file. - If the attribute exists already, then its datatype - is used to call the appropriate parser.get method - Otherwise the parameter is assumed to be a string - - """ - val = parser.get(section, attrib_name) - - dtype = None - if hasattr(obj, attrib_name): - a = getattr(obj, attrib_name) - dtype = type(a) - else: - logger.warn("Unable to load parameter {}.{}\n" - "Attribute does not exist".format(section, attrib_name)) - return - - if isinstance(a, Qobj): - try: - q = Qobj(eval(val)) - except: - raise ValueError("Value '{}' cannot be used to generate a Qobj" - " in parameter file [{}].{}".format( - val, section, option)) - setattr(obj, attrib_name, q) - elif isinstance(a, np.ndarray): - try: - arr = np.array(eval(val), dtype=a.dtype) - except: - raise ValueError("Value '{}' cannot be used to generate an ndarray" - " in parameter file [{}].{}".format( - val, section, option)) - setattr(obj, attrib_name, arr) - elif isinstance(a, list): - try: - l = list(eval(val)) - except: - raise ValueError("Value '{}' cannot be used to generate a list" - " in parameter file [{}].{}".format( - val, section, option)) - setattr(obj, attrib_name, l) - elif dtype == float: - try: - f = parser.getfloat(section, attrib_name) - except: - try: - f = eval(val) - except: - raise ValueError( - "Value '{}' cannot be cast or evaluated as a " - "float in parameter file [{}].{}".format( - val, section, option)) - setattr(obj, attrib_name, f) - elif dtype == complex: - try: - c = complex(val) - except: - raise ValueError("Value '{}' cannot be cast as complex" - " in parameter file [{}].{}".format( - val, section, option)) - setattr(obj, attrib_name, c) - elif dtype == int: - try: - i = parser.getint(section, attrib_name) - except: - raise ValueError("Value '{}' cannot be cast as an int" - " in parameter file [{}].{}".format( - val, section, option)) - setattr(obj, attrib_name, i) - elif dtype == bool: - try: - b = parser.getboolean(section, attrib_name) - except: - raise ValueError("Value '{}' cannot be cast as a bool" - " in parameter file [{}].{}".format( - val, section, option)) - setattr(obj, attrib_name, b) - else: - try: - val = parser.getfloat(section, attrib_name) - except: - try: - val = parser.getboolean(section, attrib_name) - except: - pass - - setattr(obj, attrib_name, val) diff --git a/qutip/control/optimconfig.py b/qutip/control/optimconfig.py deleted file mode 100644 index dd4d5ecbfd..0000000000 --- a/qutip/control/optimconfig.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Configuration parameters for control pulse optimisation -""" - -import numpy as np -# QuTiP logging -import qutip.logging_utils -logger = qutip.logging_utils.get_logger('qutip.control.optimconfig') -import qutip.control.io as qtrlio - -class OptimConfig(object): - """ - Configuration parameters for control pulse optimisation - - Attributes - ---------- - log_level : integer - level of messaging output from the logger. - Options are attributes of qutip.logging_utils, - in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, - assuming everything runs as expected. - The default NOTSET implies that the level will be taken from - the QuTiP settings file, which by default is WARN - - dyn_type : string - Dynamics type, i.e. the type of matrix used to describe - the dynamics. Options are UNIT, GEN_MAT, SYMPL - (see Dynamics classes for details) - - prop_type : string - Propagator type i.e. the method used to calculate the - propagtors and propagtor gradient for each timeslot - options are DEF, APPROX, DIAG, FRECHET, AUG_MAT - DEF will use the default for the specific dyn_type - (see PropagatorComputer classes for details) - - fid_type : string - Fidelity error (and fidelity error gradient) computation method - Options are DEF, UNIT, TRACEDIFF, TD_APPROX - DEF will use the default for the specific dyn_type - (See FidelityComputer classes for details) - """ - - def __init__(self): - self.reset() - - def reset(self): - self.log_level = logger.getEffectiveLevel() - self.alg = 'GRAPE' # Alts: 'CRAB' - self.optim_method = 'DEF' - self.dyn_type = 'DEF' - self.fid_type = 'DEF' - self.fid_type = 'DEF' - self.tslot_type = 'DEF' - self.init_pulse_type = 'DEF' - - @property - def log_level(self): - return logger.level - - @log_level.setter - def log_level(self, lvl): - """ - Set the log_level attribute and set the level of the logger - that is call logger.setLevel(lvl) - """ - logger.setLevel(lvl) - - def check_create_output_dir(self, output_dir, desc='output'): - """ - Checks if the given directory exists, if not it is created - Returns - ------- - dir_ok : boolean - True if directory exists (previously or created) - False if failed to create the directory - - output_dir : string - Path to the directory, which may be been made absolute - - msg : string - Error msg if directory creation failed - """ - return qtrlio.create_dir(output_dir, desc=desc) - - -# create global instance -optimconfig = OptimConfig() diff --git a/qutip/control/optimizer.py b/qutip/control/optimizer.py deleted file mode 100644 index 2cd0094046..0000000000 --- a/qutip/control/optimizer.py +++ /dev/null @@ -1,1308 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Classes here are expected to implement a run_optimization function -that will use some method for optimising the control pulse, as defined -by the control amplitudes. The system that the pulse acts upon are defined -by the Dynamics object that must be passed in the instantiation. - -The methods are typically N dimensional function optimisers that -find the minima of a fidelity error function. Note the number of variables -for the fidelity function is the number of control timeslots, -i.e. n_ctrls x Ntimeslots -The methods will call functions on the Dynamics.fid_computer object, -one or many times per interation, -to get the fidelity error and gradient wrt to the amplitudes. -The optimisation will stop when one of the termination conditions are met, -for example: the fidelity aim has be reached, a local minima has been found, -the maximum time allowed has been exceeded - -These function optimisation methods are so far from SciPy.optimize -The two methods implemented are: - - BFGS - Broyden–Fletcher–Goldfarb–Shanno algorithm - - This a quasi second order Newton method. It uses successive calls to - the gradient function to make an estimation of the curvature (Hessian) - and hence direct its search for the function minima - The SciPy implementation is pure Python and hance is execution speed is - not high - use subclass: OptimizerBFGS - - L-BFGS-B - Bounded, limited memory BFGS - - This a version of the BFGS method where the Hessian approximation is - only based on a set of the most recent gradient calls. It generally - performs better where the are a large number of variables - The SciPy implementation of L-BFGS-B is wrapper around a well - established and actively maintained implementation in Fortran - Its is therefore very fast. - # See SciPy documentation for credit and details on the - # scipy.optimize.fmin_l_bfgs_b function - use subclass: OptimizerLBFGSB - -The baseclass Optimizer implements the function wrappers to the -fidelity error, gradient, and iteration callback functions. -These are called from the within the SciPy optimisation functions. -The subclasses implement the algorithm specific pulse optimisation function. -""" - -import functools -import numpy as np -import timeit -import warnings -from packaging.version import parse as _parse_version -import scipy -import scipy.optimize as spopt -import copy -import collections -import timeit - -import numpy as np -import scipy.optimize as spopt - -from qutip import Qobj -import qutip.control.optimresult as optimresult -import qutip.control.termcond as termcond -import qutip.control.errors as errors -import qutip.control.dynamics as dynamics -import qutip.control.pulsegen as pulsegen -import qutip.control.dump as qtrldump - -import qutip.logging_utils as logging -logger = logging.get_logger() - -# Older versions of SciPy use the method numpy.ndarray.tostring(), which has -# been deprecated since Numpy 1.19 in favour of the identical-in-all-but-name -# tobytes() method. This is simply a deprecated call in SciPy, there's nothing -# we or our users can do about it, and the function shouldn't actually be -# removed from Numpy until at least 1.22, by which point we'll have been able -# to drop support for SciPy 1.4. -if _parse_version(scipy.__version__) < _parse_version("1.5"): - @functools.wraps(spopt.fmin_l_bfgs_b) - def fmin_l_bfgs_b(*args, **kwargs): - with warnings.catch_warnings(): - message = r"tostring\(\) is deprecated\. Use tobytes\(\) instead\." - warnings.filterwarnings("ignore", message=message, - category=DeprecationWarning) - return spopt.fmin_l_bfgs_b(*args, **kwargs) -else: - fmin_l_bfgs_b = spopt.fmin_l_bfgs_b - - -def _is_string(var): - try: - if isinstance(var, basestring): - return True - except NameError: - try: - if isinstance(var, str): - return True - except: - return False - except: - return False - - - -class Optimizer(object): - """ - Base class for all control pulse optimisers. This class should not be - instantiated, use its subclasses. This class implements the fidelity, - gradient and interation callback functions. All subclass objects must be - initialised with a - - - ``OptimConfig`` instance - various configuration options - - ``Dynamics`` instance - describes the dynamics of the (quantum) system - to be control optimised - - Attributes - ---------- - log_level : integer - level of messaging output from the logger. Options are attributes of - qutip.logging_utils, in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, assuming - everything runs as expected. The default NOTSET implies that the level - will be taken from the QuTiP settings file, which by default is WARN. - - params: Dictionary - The key value pairs are the attribute name and value. Note: attributes - are created if they do not exist already, and are overwritten if they - do. - - alg : string - Algorithm to use in pulse optimisation. Options are: - - - 'GRAPE' (default) - GRadient Ascent Pulse Engineering - - 'CRAB' - Chopped RAndom Basis - - alg_params : Dictionary - Options that are specific to the pulse optim algorithm ``alg``. - - disp_conv_msg : bool - Set true to display a convergence message - (for scipy.optimize.minimize methods anyway) - - optim_method : string - a scipy.optimize.minimize method that will be used to optimise - the pulse for minimum fidelity error - - method_params : Dictionary - Options for the optim_method. - Note that where there is an equivalent attribute of this instance - or the termination_conditions (for example maxiter) - it will override an value in these options - - approx_grad : bool - If set True then the method will approximate the gradient itself - (if it has requirement and facility for this) - This will mean that the fid_err_grad_wrapper will not get called - Note it should be left False when using the Dynamics - to calculate approximate gradients - Note it is set True automatically when the alg is CRAB - - amp_lbound : float or list of floats - lower boundaries for the control amplitudes - Can be a scalar value applied to all controls - or a list of bounds for each control - - amp_ubound : float or list of floats - upper boundaries for the control amplitudes - Can be a scalar value applied to all controls - or a list of bounds for each control - - bounds : List of floats - Bounds for the parameters. - If not set before the run_optimization call then the list - is built automatically based on the amp_lbound and amp_ubound - attributes. - Setting this attribute directly allows specific bounds to be set - for individual parameters. - Note: Only some methods use bounds - - dynamics : Dynamics (subclass instance) - describes the dynamics of the (quantum) system to be control optimised - (see Dynamics classes for details) - - config : OptimConfig instance - various configuration options - (see OptimConfig for details) - - termination_conditions : TerminationCondition instance - attributes determine when the optimisation will end - - pulse_generator : PulseGen (subclass instance) - (can be) used to create initial pulses - not used by the class, but set by pulseoptim.create_pulse_optimizer - - stats : Stats - attributes of which give performance stats for the optimisation - set to None to reduce overhead of calculating stats. - Note it is (usually) shared with the Dynamics instance - - dump : :class:`qutip.control.dump.OptimDump` - Container for data dumped during the optimisation. - Can be set by specifying the dumping level or set directly. - Note this is mainly intended for user and a development debugging - but could be used for status information during a long optimisation. - - dumping : string - level of data dumping: NONE, SUMMARY, FULL or CUSTOM - See property docstring for details - - dump_to_file : bool - If set True then data will be dumped to file during the optimisation - dumping will be set to SUMMARY during init_optim - if dump_to_file is True and dumping not set. - Default is False - - dump_dir : string - Basically a link to dump.dump_dir. Exists so that it can be set through - optim_params. - If dump is None then will return None or will set dumping to SUMMARY - when setting a path - - iter_summary : :class:`OptimIterSummary` - Summary of the most recent iteration. - Note this is only set if dummping is on - """ - - def __init__(self, config, dyn, params=None): - self.dynamics = dyn - self.config = config - self.params = params - self.reset() - dyn.parent = self - - def reset(self): - self.log_level = self.config.log_level - self.id_text = 'OPTIM' - self.termination_conditions = None - self.pulse_generator = None - self.disp_conv_msg = False - self.iteration_steps = None - self.record_iteration_steps = False - self.alg = 'GRAPE' - self.alg_params = None - self.method = 'l_bfgs_b' - self.method_params = None - self.method_options = None - self.approx_grad = False - self.amp_lbound = None - self.amp_ubound = None - self.bounds = None - self.num_iter = 0 - self.num_fid_func_calls = 0 - self.num_grad_func_calls = 0 - self.stats = None - self.wall_time_optim_start = 0.0 - - self.dump_to_file = False - self.dump = None - self.iter_summary = None - - # AJGP 2015-04-21: - # These (copying from config) are here for backward compatibility - if hasattr(self.config, 'amp_lbound'): - if self.config.amp_lbound: - self.amp_lbound = self.config.amp_lbound - if hasattr(self.config, 'amp_ubound'): - if self.config.amp_ubound: - self.amp_ubound = self.config.amp_ubound - - self.apply_params() - - @property - def log_level(self): - return logger.level - - @log_level.setter - def log_level(self, lvl): - """ - Set the log_level attribute and set the level of the logger - that is call logger.setLevel(lvl) - """ - logger.setLevel(lvl) - - def apply_params(self, params=None): - """ - Set object attributes based on the dictionary (if any) passed in the - instantiation, or passed as a parameter - This is called during the instantiation automatically. - The key value pairs are the attribute name and value - Note: attributes are created if they do not exist already, - and are overwritten if they do. - """ - if not params: - params = self.params - - if isinstance(params, dict): - self.params = params - for key in params: - setattr(self, key, params[key]) - - @property - def dumping(self): - """ - The level of data dumping that will occur during the optimisation - - - NONE : No processing data dumped (Default) - - SUMMARY : A summary at each iteration will be recorded - - FULL : All logs will be generated and dumped - - CUSTOM : Some customised level of dumping - - When first set to CUSTOM this is equivalent to SUMMARY. It is then up - to the user to specify which logs are dumped - """ - if self.dump is None: - lvl = 'NONE' - else: - lvl = self.dump.level - - return lvl - - @dumping.setter - def dumping(self, value): - if value is None: - self.dump = None - else: - if not isinstance(value, str): - raise TypeError("Value must be string value") - lvl = value.upper() - if lvl == 'NONE': - self.dump = None - else: - if not isinstance(self.dump, qtrldump.OptimDump): - self.dump = qtrldump.OptimDump(self, level=lvl) - else: - self.dump.level = lvl - - @property - def dump_dir(self): - if self.dump: - return self.dump.dump_dir - else: - return None - - @dump_dir.setter - def dump_dir(self, value): - if not self.dump: - self.dumping = 'SUMMARY' - self.dump.dump_dir = value - - def _create_result(self): - """ - create the result object - and set the initial_amps attribute as the current amplitudes - """ - result = optimresult.OptimResult() - result.initial_fid_err = self.dynamics.fid_computer.get_fid_err() - result.initial_amps = self.dynamics.ctrl_amps.copy() - result.evo_full_initial = self.dynamics.full_evo.copy() - result.time = self.dynamics.time.copy() - result.optimizer = self - return result - - def init_optim(self, term_conds): - """ - Check optimiser attribute status and passed parameters before - running the optimisation. - This is called by run_optimization, but could called independently - to check the configuration. - """ - if term_conds is not None: - self.termination_conditions = term_conds - term_conds = self.termination_conditions - - if not isinstance(term_conds, termcond.TerminationConditions): - raise errors.UsageError("No termination conditions for the " - "optimisation function") - - if not isinstance(self.dynamics, dynamics.Dynamics): - raise errors.UsageError("No dynamics object attribute set") - self.dynamics.check_ctrls_initialized() - - self.apply_method_params() - - if term_conds.fid_err_targ is None and term_conds.fid_goal is None: - raise errors.UsageError("Either the goal or the fidelity " - "error tolerance must be set") - - if term_conds.fid_err_targ is None: - term_conds.fid_err_targ = np.abs(1 - term_conds.fid_goal) - - if term_conds.fid_goal is None: - term_conds.fid_goal = 1 - term_conds.fid_err_targ - - if self.alg == 'CRAB': - self.approx_grad = True - - if self.stats is not None: - self.stats.clear() - - if self.dump_to_file: - if self.dump is None: - self.dumping = 'SUMMARY' - self.dump.write_to_file = True - self.dump.create_dump_dir() - logger.info("Optimiser dump will be written to:\n{}".format( - self.dump.dump_dir)) - - if self.dump: - self.iter_summary = OptimIterSummary() - else: - self.iter_summary = None - - self.num_iter = 0 - self.num_fid_func_calls = 0 - self.num_grad_func_calls = 0 - self.iteration_steps = None - - def _build_method_options(self): - """ - Creates the method_options dictionary for the scipy.optimize.minimize - function based on the attributes of this object and the - termination_conditions - It assumes that apply_method_params has already been run and - hence the method_options attribute may already contain items. - These values will NOT be overridden - """ - tc = self.termination_conditions - if self.method_options is None: - self.method_options = {} - mo = self.method_options - - if 'max_metric_corr' in mo and 'maxcor' not in mo: - mo['maxcor'] = mo['max_metric_corr'] - elif hasattr(self, 'max_metric_corr') and 'maxcor' not in mo: - mo['maxcor'] = self.max_metric_corr - if 'accuracy_factor' in mo and 'ftol' not in mo: - mo['ftol'] = mo['accuracy_factor'] - elif hasattr(tc, 'accuracy_factor') and 'ftol' not in mo: - mo['ftol'] = tc.accuracy_factor - if tc.max_iterations > 0 and 'maxiter' not in mo: - mo['maxiter'] = tc.max_iterations - if tc.max_fid_func_calls > 0 and 'maxfev' not in mo: - mo['maxfev'] = tc.max_fid_func_calls - if tc.min_gradient_norm > 0 and 'gtol' not in mo: - mo['gtol'] = tc.min_gradient_norm - if 'disp' not in mo: - mo['disp'] = self.disp_conv_msg - return mo - - def apply_method_params(self, params=None): - """ - Loops through all the method_params - (either passed here or the method_params attribute) - If the name matches an attribute of this object or the - termination conditions object, then the value of this attribute - is set. Otherwise it is assumed to a method_option for the - scipy.optimize.minimize function - """ - if not params: - params = self.method_params - - if isinstance(params, dict): - self.method_params = params - unused_params = {} - for key in params: - val = params[key] - if hasattr(self, key): - setattr(self, key, val) - if hasattr(self.termination_conditions, key): - setattr(self.termination_conditions, key, val) - else: - unused_params[key] = val - - if len(unused_params) > 0: - if not isinstance(self.method_options, dict): - self.method_options = unused_params - else: - self.method_options.update(unused_params) - - def _build_bounds_list(self): - cfg = self.config - dyn = self.dynamics - n_ctrls = dyn.num_ctrls - self.bounds = [] - for t in range(dyn.num_tslots): - for c in range(n_ctrls): - if isinstance(self.amp_lbound, list): - lb = self.amp_lbound[c] - else: - lb = self.amp_lbound - if isinstance(self.amp_ubound, list): - ub = self.amp_ubound[c] - else: - ub = self.amp_ubound - - if lb is not None and np.isinf(lb): - lb = None - if ub is not None and np.isinf(ub): - ub = None - - self.bounds.append((lb, ub)) - - def run_optimization(self, term_conds=None): - """ - This default function optimisation method is a wrapper to the - scipy.optimize.minimize function. - - It will attempt to minimise the fidelity error with respect to some - parameters, which are determined by _get_optim_var_vals (see below) - - The optimisation end when one of the passed termination conditions - has been met, e.g. target achieved, wall time, or - function call or iteration count exceeded. Note these - conditions include gradient minimum met (local minima) for - methods that use a gradient. - - The function minimisation method is taken from the optim_method - attribute. Note that not all of these methods have been tested. - Note that some of these use a gradient and some do not. - See the scipy documentation for details. Options specific to the - method can be passed setting the method_params attribute. - - If the parameter term_conds=None, then the termination_conditions - attribute must already be set. It will be overwritten if the - parameter is not None - - The result is returned in an OptimResult object, which includes - the final fidelity, time evolution, reason for termination etc - """ - self.init_optim(term_conds) - term_conds = self.termination_conditions - dyn = self.dynamics - cfg = self.config - self.optim_var_vals = self._get_optim_var_vals() - st_time = timeit.default_timer() - self.wall_time_optimize_start = st_time - - if self.stats is not None: - self.stats.wall_time_optim_start = st_time - self.stats.wall_time_optim_end = 0.0 - self.stats.num_iter = 0 - - if self.bounds is None: - self._build_bounds_list() - - self._build_method_options() - - result = self._create_result() - - if self.approx_grad: - jac = None - else: - jac = self.fid_err_grad_wrapper - - if self.log_level <= logging.INFO: - msg = ("Optimising pulse(s) using {} with " - "minimise '{}' method").format(self.alg, self.method) - if self.approx_grad: - msg += " (approx grad)" - logger.info(msg) - - try: - opt_res = spopt.minimize( - self.fid_err_func_wrapper, self.optim_var_vals, - method=self.method, - jac=jac, - bounds=self.bounds, - options=self.method_options, - callback=self.iter_step_callback_func) - - amps = self._get_ctrl_amps(opt_res.x) - dyn.update_ctrl_amps(amps) - result.termination_reason = opt_res.message - # Note the iterations are counted in this object as well - # so there are compared here for interest sake only - if self.num_iter != opt_res.nit: - logger.info("The number of iterations counted {} " - " does not match the number reported {} " - "by {}".format(self.num_iter, opt_res.nit, - self.method)) - result.num_iter = opt_res.nit - - except errors.OptimizationTerminate as except_term: - self._interpret_term_exception(except_term, result) - - end_time = timeit.default_timer() - self._add_common_result_attribs(result, st_time, end_time) - - return result - - def _get_optim_var_vals(self): - """ - Generate the 1d array that holds the current variable values - of the function to be optimised - By default (as used in GRAPE) these are the control amplitudes - in each timeslot - """ - return self.dynamics.ctrl_amps.reshape([-1]) - - def _get_ctrl_amps(self, optim_var_vals): - """ - Get the control amplitudes from the current variable values - of the function to be optimised. - that is the 1d array that is passed from the optimisation method - Note for GRAPE these are the function optimiser parameters - (and this is the default) - - Returns - ------- - float array[dynamics.num_tslots, dynamics.num_ctrls] - """ - amps = optim_var_vals.reshape(self.dynamics.ctrl_amps.shape) - - return amps - - def fid_err_func_wrapper(self, *args): - """ - Get the fidelity error achieved using the ctrl amplitudes passed - in as the first argument. - - This is called by generic optimisation algorithm as the - func to the minimised. The argument is the current - variable values, i.e. control amplitudes, passed as - a flat array. Hence these are reshaped as [nTimeslots, n_ctrls] - and then used to update the stored ctrl values (if they have changed) - - The error is checked against the target, and the optimisation is - terminated if the target has been achieved. - """ - self.num_fid_func_calls += 1 - # *** update stats *** - if self.stats is not None: - self.stats.num_fidelity_func_calls = self.num_fid_func_calls - if self.log_level <= logging.DEBUG: - logger.debug("fidelity error call {}".format( - self.stats.num_fidelity_func_calls)) - - amps = self._get_ctrl_amps(args[0].copy()) - self.dynamics.update_ctrl_amps(amps) - - tc = self.termination_conditions - err = self.dynamics.fid_computer.get_fid_err() - - if self.iter_summary: - self.iter_summary.fid_func_call_num = self.num_fid_func_calls - self.iter_summary.fid_err = err - - if self.dump and self.dump.dump_fid_err: - self.dump.update_fid_err_log(err) - - if err <= tc.fid_err_targ: - raise errors.GoalAchievedTerminate(err) - - if self.num_fid_func_calls > tc.max_fid_func_calls: - raise errors.MaxFidFuncCallTerminate() - - return err - - def fid_err_grad_wrapper(self, *args): - """ - Get the gradient of the fidelity error with respect to all of the - variables, i.e. the ctrl amplidutes in each timeslot - - This is called by generic optimisation algorithm as the gradients of - func to the minimised wrt the variables. The argument is the current - variable values, i.e. control amplitudes, passed as - a flat array. Hence these are reshaped as [nTimeslots, n_ctrls] - and then used to update the stored ctrl values (if they have changed) - - Although the optimisation algorithms have a check within them for - function convergence, i.e. local minima, the sum of the squares - of the normalised gradient is checked explicitly, and the - optimisation is terminated if this is below the min_gradient_norm - condition - """ - # *** update stats *** - self.num_grad_func_calls += 1 - if self.stats is not None: - self.stats.num_grad_func_calls = self.num_grad_func_calls - if self.log_level <= logging.DEBUG: - logger.debug("gradient call {}".format( - self.stats.num_grad_func_calls)) - amps = self._get_ctrl_amps(args[0].copy()) - self.dynamics.update_ctrl_amps(amps) - fid_comp = self.dynamics.fid_computer - # gradient_norm_func is a pointer to the function set in the config - # that returns the normalised gradients - grad = fid_comp.get_fid_err_gradient() - - if self.iter_summary: - self.iter_summary.grad_func_call_num = self.num_grad_func_calls - self.iter_summary.grad_norm = fid_comp.grad_norm - - if self.dump: - if self.dump.dump_grad_norm: - self.dump.update_grad_norm_log(fid_comp.grad_norm) - - if self.dump.dump_grad: - self.dump.update_grad_log(grad) - - tc = self.termination_conditions - if fid_comp.grad_norm < tc.min_gradient_norm: - raise errors.GradMinReachedTerminate(fid_comp.grad_norm) - return grad.flatten() - - def iter_step_callback_func(self, *args): - """ - Check the elapsed wall time for the optimisation run so far. - Terminate if this has exceeded the maximum allowed time - """ - self.num_iter += 1 - - if self.log_level <= logging.DEBUG: - logger.debug("Iteration callback {}".format(self.num_iter)) - - wall_time = timeit.default_timer() - self.wall_time_optimize_start - - if self.iter_summary: - self.iter_summary.iter_num = self.num_iter - self.iter_summary.wall_time = wall_time - - if self.dump and self.dump.dump_summary: - self.dump.add_iter_summary() - - tc = self.termination_conditions - - if wall_time > tc.max_wall_time: - raise errors.MaxWallTimeTerminate() - - # *** update stats *** - if self.stats is not None: - self.stats.num_iter = self.num_iter - - def _interpret_term_exception(self, except_term, result): - """ - Update the result object based on the exception that occurred - during the optimisation - """ - result.termination_reason = except_term.reason - if isinstance(except_term, errors.GoalAchievedTerminate): - result.goal_achieved = True - elif isinstance(except_term, errors.MaxWallTimeTerminate): - result.wall_time_limit_exceeded = True - elif isinstance(except_term, errors.GradMinReachedTerminate): - result.grad_norm_min_reached = True - elif isinstance(except_term, errors.MaxFidFuncCallTerminate): - result.max_fid_func_exceeded = True - - def _add_common_result_attribs(self, result, st_time, end_time): - """ - Update the result object attributes which are common to all - optimisers and outcomes - """ - dyn = self.dynamics - result.num_iter = self.num_iter - result.num_fid_func_calls = self.num_fid_func_calls - result.wall_time = end_time - st_time - result.fid_err = dyn.fid_computer.get_fid_err() - result.grad_norm_final = dyn.fid_computer.grad_norm - result.final_amps = dyn.ctrl_amps - final_evo = dyn.full_evo - if isinstance(final_evo, Qobj): - result.evo_full_final = final_evo - else: - result.evo_full_final = Qobj(final_evo, dims=dyn.sys_dims) - # *** update stats *** - if self.stats is not None: - self.stats.wall_time_optim_end = end_time - self.stats.calculate() - result.stats = copy.copy(self.stats) - - -class OptimizerBFGS(Optimizer): - """ - Implements the run_optimization method using the BFGS algorithm - """ - def reset(self): - Optimizer.reset(self) - self.id_text = 'BFGS' - - def run_optimization(self, term_conds=None): - """ - Optimise the control pulse amplitudes to minimise the fidelity error - using the BFGS (Broyden–Fletcher–Goldfarb–Shanno) algorithm - The optimisation end when one of the passed termination conditions - has been met, e.g. target achieved, gradient minimum met - (local minima), wall time / iteration count exceeded. - - Essentially this is wrapper to the: - scipy.optimize.fmin_bfgs - function - - If the parameter term_conds=None, then the termination_conditions - attribute must already be set. It will be overwritten if the - parameter is not None - - The result is returned in an OptimResult object, which includes - the final fidelity, time evolution, reason for termination etc - """ - self.init_optim(term_conds) - term_conds = self.termination_conditions - dyn = self.dynamics - self.optim_var_vals = self._get_optim_var_vals() - self._build_method_options() - - st_time = timeit.default_timer() - self.wall_time_optimize_start = st_time - - if self.stats is not None: - self.stats.wall_time_optim_start = st_time - self.stats.wall_time_optim_end = 0.0 - self.stats.num_iter = 1 - - if self.approx_grad: - fprime = None - else: - fprime = self.fid_err_grad_wrapper - - if self.log_level <= logging.INFO: - msg = ("Optimising pulse(s) using {} with " - "'fmin_bfgs' method").format(self.alg) - if self.approx_grad: - msg += " (approx grad)" - logger.info(msg) - - result = self._create_result() - try: - optim_var_vals, cost, grad, invHess, nFCalls, nGCalls, warn = \ - spopt.fmin_bfgs(self.fid_err_func_wrapper, - self.optim_var_vals, - fprime=fprime, - callback=self.iter_step_callback_func, - gtol=term_conds.min_gradient_norm, - maxiter=term_conds.max_iterations, - full_output=True, disp=True) - - amps = self._get_ctrl_amps(optim_var_vals) - dyn.update_ctrl_amps(amps) - if warn == 1: - result.max_iter_exceeded = True - result.termination_reason = "Iteration count limit reached" - elif warn == 2: - result.grad_norm_min_reached = True - result.termination_reason = "Gradient normal minimum reached" - - except errors.OptimizationTerminate as except_term: - self._interpret_term_exception(except_term, result) - - end_time = timeit.default_timer() - self._add_common_result_attribs(result, st_time, end_time) - - return result - - -class OptimizerLBFGSB(Optimizer): - """ - Implements the run_optimization method using the L-BFGS-B algorithm - - Attributes - ---------- - max_metric_corr : integer - The maximum number of variable metric corrections used to define - the limited memory matrix. That is the number of previous - gradient values that are used to approximate the Hessian - see the scipy.optimize.fmin_l_bfgs_b documentation for description - of m argument - """ - - def reset(self): - Optimizer.reset(self) - self.id_text = 'LBFGSB' - self.max_metric_corr = 10 - self.msg_level = None - - def init_optim(self, term_conds): - """ - Check optimiser attribute status and passed parameters before - running the optimisation. - This is called by run_optimization, but could called independently - to check the configuration. - """ - if term_conds is None: - term_conds = self.termination_conditions - - # AJGP 2015-04-21: - # These (copying from config) are here for backward compatibility - if hasattr(self.config, 'max_metric_corr'): - if self.config.max_metric_corr: - self.max_metric_corr = self.config.max_metric_corr - if hasattr(self.config, 'accuracy_factor'): - if self.config.accuracy_factor: - term_conds.accuracy_factor = self.config.accuracy_factor - - Optimizer.init_optim(self, term_conds) - - if not isinstance(self.msg_level, int): - if self.log_level < logging.DEBUG: - self.msg_level = 2 - elif self.log_level <= logging.DEBUG: - self.msg_level = 1 - else: - self.msg_level = 0 - - def run_optimization(self, term_conds=None): - """ - Optimise the control pulse amplitudes to minimise the fidelity error - using the L-BFGS-B algorithm, which is the constrained - (bounded amplitude values), limited memory, version of the - Broyden–Fletcher–Goldfarb–Shanno algorithm. - - The optimisation end when one of the passed termination conditions - has been met, e.g. target achieved, gradient minimum met - (local minima), wall time / iteration count exceeded. - - Essentially this is wrapper to the: - scipy.optimize.fmin_l_bfgs_b function - This in turn is a warpper for well established implementation of - the L-BFGS-B algorithm written in Fortran, which is therefore - very fast. See SciPy documentation for credit and details on - this function. - - If the parameter term_conds=None, then the termination_conditions - attribute must already be set. It will be overwritten if the - parameter is not None - - The result is returned in an OptimResult object, which includes - the final fidelity, time evolution, reason for termination etc - """ - self.init_optim(term_conds) - term_conds = self.termination_conditions - dyn = self.dynamics - cfg = self.config - self.optim_var_vals = self._get_optim_var_vals() - self._build_method_options() - - st_time = timeit.default_timer() - self.wall_time_optimize_start = st_time - - if self.stats is not None: - self.stats.wall_time_optim_start = st_time - self.stats.wall_time_optim_end = 0.0 - self.stats.num_iter = 1 - - bounds = self._build_bounds_list() - result = self._create_result() - - if self.approx_grad: - fprime = None - else: - fprime = self.fid_err_grad_wrapper - - if 'accuracy_factor' in self.method_options: - factr = self.method_options['accuracy_factor'] - elif 'ftol' in self.method_options: - factr = self.method_options['ftol'] - elif hasattr(term_conds, 'accuracy_factor'): - factr = term_conds.accuracy_factor - else: - factr = 1e7 - - if 'max_metric_corr' in self.method_options: - m = self.method_options['max_metric_corr'] - elif 'maxcor' in self.method_options: - m = self.method_options['maxcor'] - elif hasattr(self, 'max_metric_corr'): - m = self.max_metric_corr - else: - m = 10 - - if self.log_level <= logging.INFO: - msg = ("Optimising pulse(s) using {} with " - "'fmin_l_bfgs_b' method").format(self.alg) - if self.approx_grad: - msg += " (approx grad)" - logger.info(msg) - try: - optim_var_vals, fid, res_dict = fmin_l_bfgs_b( - self.fid_err_func_wrapper, self.optim_var_vals, - fprime=fprime, - approx_grad=self.approx_grad, - callback=self.iter_step_callback_func, - bounds=self.bounds, m=m, factr=factr, - pgtol=term_conds.min_gradient_norm, - disp=self.msg_level, - maxfun=term_conds.max_fid_func_calls, - maxiter=term_conds.max_iterations) - - amps = self._get_ctrl_amps(optim_var_vals) - dyn.update_ctrl_amps(amps) - warn = res_dict['warnflag'] - if warn == 0: - result.grad_norm_min_reached = True - result.termination_reason = "function converged" - elif warn == 1: - result.max_iter_exceeded = True - result.termination_reason = ("Iteration or fidelity " - "function call limit reached") - elif warn == 2: - result.termination_reason = res_dict['task'] - - result.num_iter = res_dict['nit'] - except errors.OptimizationTerminate as except_term: - self._interpret_term_exception(except_term, result) - - end_time = timeit.default_timer() - self._add_common_result_attribs(result, st_time, end_time) - - return result - - -class OptimizerCrab(Optimizer): - """ - Optimises the pulse using the CRAB algorithm [1]. - It uses the scipy.optimize.minimize function with the method specified - by the optim_method attribute. See Optimizer.run_optimization for details - It minimises the fidelity error function with respect to the CRAB - basis function coefficients. - - AJGP ToDo: Add citation here - """ - - def reset(self): - Optimizer.reset(self) - self.id_text = 'CRAB' - self.num_optim_vars = 0 - - def init_optim(self, term_conds): - """ - Check optimiser attribute status and passed parameters before - running the optimisation. - This is called by run_optimization, but could called independently - to check the configuration. - """ - Optimizer.init_optim(self, term_conds) - dyn = self.dynamics - - self.num_optim_vars = 0 - pulse_gen_valid = True - # check the pulse generators match the ctrls - # (in terms of number) - # and count the number of parameters - if self.pulse_generator is None: - pulse_gen_valid = False - err_msg = "pulse_generator attribute is None" - elif not isinstance(self.pulse_generator, collections.abc.Iterable): - pulse_gen_valid = False - err_msg = "pulse_generator is not iterable" - - elif len(self.pulse_generator) != dyn.num_ctrls: - pulse_gen_valid = False - err_msg = ("the number of pulse generators {} does not equal " - "the number of controls {}".format( - len(self.pulse_generator), dyn.num_ctrls)) - - if pulse_gen_valid: - for p_gen in self.pulse_generator: - if not isinstance(p_gen, pulsegen.PulseGenCrab): - pulse_gen_valid = False - err_msg =\ - "pulse_generator contained object of type '{}'".format( - p_gen.__class__.__name__) - break - self.num_optim_vars += p_gen.num_optim_vars - - if not pulse_gen_valid: - raise errors.UsageError( - "The pulse_generator attribute must be set to a list of " - "PulseGenCrab - one for each control. Here " + err_msg) - - def _build_bounds_list(self): - """ - No bounds necessary here, as the bounds for the CRAB parameters - do not have much physical meaning. - This needs to override the default method, otherwise the shape - will be wrong - """ - return None - - def _get_optim_var_vals(self): - """ - Generate the 1d array that holds the current variable values - of the function to be optimised - For CRAB these are the basis coefficients - - Returns - ------- - ndarray (1d) of float - """ - pvals = [] - for pgen in self.pulse_generator: - pvals.extend(pgen.get_optim_var_vals()) - - return np.array(pvals) - - def _get_ctrl_amps(self, optim_var_vals): - """ - Get the control amplitudes from the current variable values - of the function to be optimised. - that is the 1d array that is passed from the optimisation method - For CRAB the amplitudes will need to calculated by expanding the - series - - Returns - ------- - float array[dynamics.num_tslots, dynamics.num_ctrls] - """ - dyn = self.dynamics - - if self.log_level <= logging.DEBUG: - changed_params = self.optim_var_vals != optim_var_vals - logger.debug( - "{} out of {} optimisation parameters changed".format( - changed_params.sum(), len(optim_var_vals))) - - amps = np.empty([dyn.num_tslots, dyn.num_ctrls]) - j = 0 - param_idx_st = 0 - for p_gen in self.pulse_generator: - param_idx_end = param_idx_st + p_gen.num_optim_vars - pg_pvals = optim_var_vals[param_idx_st:param_idx_end] - p_gen.set_optim_var_vals(pg_pvals) - amps[:, j] = p_gen.gen_pulse() - param_idx_st = param_idx_end - j += 1 - - self.optim_var_vals = optim_var_vals - return amps - - -class OptimizerCrabFmin(OptimizerCrab): - """ - Optimises the pulse using the CRAB algorithm [1]_, [2]_. - It uses the ``scipy.optimize.fmin`` function which is effectively a wrapper - for the Nelder-Mead method. It minimises the fidelity error function with - respect to the CRAB basis function coefficients. This is the default - Optimizer for CRAB. - - References - ---------- - .. [1] P. Doria, T. Calarco & S. Montangero. Phys. Rev. Lett. 106, 190501 - (2011). - .. [2] T. Caneva, T. Calarco, & S. Montangero. Phys. Rev. A 84, 022326 - (2011). - """ - - def reset(self): - OptimizerCrab.reset(self) - self.id_text = 'CRAB_FMIN' - self.xtol = 1e-4 - self.ftol = 1e-4 - - def run_optimization(self, term_conds=None): - """ - This function optimisation method is a wrapper to the - scipy.optimize.fmin function. - - It will attempt to minimise the fidelity error with respect to some - parameters, which are determined by _get_optim_var_vals which - in the case of CRAB are the basis function coefficients - - The optimisation end when one of the passed termination conditions - has been met, e.g. target achieved, wall time, or - function call or iteration count exceeded. Specifically to the fmin - method, the optimisation will stop when change parameter values - is less than xtol or the change in function value is below ftol. - - If the parameter term_conds=None, then the termination_conditions - attribute must already be set. It will be overwritten if the - parameter is not None - - The result is returned in an OptimResult object, which includes - the final fidelity, time evolution, reason for termination etc - """ - self.init_optim(term_conds) - term_conds = self.termination_conditions - dyn = self.dynamics - cfg = self.config - self.optim_var_vals = self._get_optim_var_vals() - self._build_method_options() - - st_time = timeit.default_timer() - self.wall_time_optimize_start = st_time - - if self.stats is not None: - self.stats.wall_time_optim_start = st_time - self.stats.wall_time_optim_end = 0.0 - self.stats.num_iter = 1 - - result = self._create_result() - - if self.log_level <= logging.INFO: - logger.info("Optimising pulse(s) using {} with " - "'fmin' (Nelder-Mead) method".format(self.alg)) - - try: - ret = spopt.fmin( - self.fid_err_func_wrapper, self.optim_var_vals, - xtol=self.xtol, ftol=self.ftol, - maxiter=term_conds.max_iterations, - maxfun=term_conds.max_fid_func_calls, - full_output=True, disp=self.disp_conv_msg, - retall=self.record_iteration_steps, - callback=self.iter_step_callback_func) - - final_param_vals = ret[0] - num_iter = ret[2] - warn_flag = ret[4] - if self.record_iteration_steps: - self.iteration_steps = ret[5] - amps = self._get_ctrl_amps(final_param_vals) - dyn.update_ctrl_amps(amps) - - # Note the iterations are counted in this object as well - # so there are compared here for interest sake only - if self.num_iter != num_iter: - logger.info("The number of iterations counted {} " - " does not match the number reported {} " - "by {}".format(self.num_iter, num_iter, - self.method)) - result.num_iter = num_iter - if warn_flag == 0: - result.termination_reason = \ - "Function converged (within tolerance)" - elif warn_flag == 1: - result.termination_reason = \ - "Maximum number of function evaluations reached" - result.max_fid_func_exceeded = True - elif warn_flag == 2: - result.termination_reason = \ - "Maximum number of iterations reached" - result.max_iter_exceeded = True - else: - result.termination_reason = \ - "Unknown (warn_flag={})".format(warn_flag) - - except errors.OptimizationTerminate as except_term: - self._interpret_term_exception(except_term, result) - - end_time = timeit.default_timer() - self._add_common_result_attribs(result, st_time, end_time) - - return result - - -class OptimIterSummary(qtrldump.DumpSummaryItem): - """ - A summary of the most recent iteration of the pulse optimisation - - Attributes - ---------- - iter_num : int - Iteration number of the pulse optimisation - - fid_func_call_num : int - Fidelity function call number of the pulse optimisation - - grad_func_call_num : int - Gradient function call number of the pulse optimisation - - fid_err : float - Fidelity error - - grad_norm : float - fidelity gradient (wrt the control parameters) vector norm - that is the magnitude of the gradient - - wall_time : float - Time spent computing the pulse optimisation so far - (in seconds of elapsed time) - """ - # Note there is some duplication here with Optimizer attributes - # this exists solely to be copied into the summary dump - min_col_width = 11 - summary_property_names = ( - "idx", "iter_num", "fid_func_call_num", "grad_func_call_num", - "fid_err", "grad_norm", "wall_time" - ) - - summary_property_fmt_type = ( - 'd', 'd', 'd', 'd', - 'g', 'g', 'g' - ) - - summary_property_fmt_prec = ( - 0, 0, 0, 0, - 4, 4, 2 - ) - - def __init__(self): - self.reset() - - def reset(self): - qtrldump.DumpSummaryItem.reset(self) - self.iter_num = None - self.fid_func_call_num = None - self.grad_func_call_num = None - self.fid_err = None - self.grad_norm = None - self.wall_time = 0.0 diff --git a/qutip/control/optimresult.py b/qutip/control/optimresult.py deleted file mode 100644 index 35cc6bc7e4..0000000000 --- a/qutip/control/optimresult.py +++ /dev/null @@ -1,104 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Class containing the results of the pulse optimisation -""" - -import numpy as np - - -class OptimResult(object): - """ - Attributes give the result of the pulse optimisation attempt - - Attributes - ---------- - termination_reason : string - Description of the reason for terminating the optimisation - - fidelity : float - final (normalised) fidelity that was achieved - - initial_fid_err : float - fidelity error before optimisation starting - - fid_err : float - final fidelity error that was achieved - - goal_achieved : boolean - True is the fidely error achieved was below the target - - grad_norm_final : float - Final value of the sum of the squares of the (normalised) fidelity - error gradients - - grad_norm_min_reached : float - True if the optimisation terminated due to the minimum value - of the gradient being reached - - num_iter : integer - Number of iterations of the optimisation algorithm completed - - max_iter_exceeded : boolean - True if the iteration limit was reached - - max_fid_func_exceeded : boolean - True if the fidelity function call limit was reached - - wall_time : float - time elapsed during the optimisation - - wall_time_limit_exceeded : boolean - True if the wall time limit was reached - - time : array[num_tslots+1] of float - Time are the start of each timeslot - with the final value being the total evolution time - - initial_amps : array[num_tslots, n_ctrls] - The amplitudes at the start of the optimisation - - final_amps : array[num_tslots, n_ctrls] - The amplitudes at the end of the optimisation - - evo_full_final : Qobj - The evolution operator from t=0 to t=T based on the final amps - - evo_full_initial : Qobj - The evolution operator from t=0 to t=T based on the initial amps - - stats : Stats - Object contaning the stats for the run (if any collected) - - optimizer : Optimizer - Instance of the Optimizer used to generate the result - """ - def __init__(self): - self.reset() - - def reset(self): - self.fidelity = 0.0 - self.initial_fid_err = np.Inf - self.fid_err = np.Inf - self.goal_achieved = False - self.grad_norm_final = 0.0 - self.grad_norm_min_reached = False - self.num_iter = 0 - self.max_iter_exceeded = False - self.num_fid_func_calls = 0 - self.max_fid_func_exceeded = False - self.wall_time = 0.0 - self.wall_time_limit_exceeded = False - self.termination_reason = "not started yet" - self.time = None - self.initial_amps = None - self.final_amps = None - self.evo_full_final = None - self.evo_full_initial = None - self.stats = None - self.optimizer = None diff --git a/qutip/control/propcomp.py b/qutip/control/propcomp.py deleted file mode 100644 index aee23b4030..0000000000 --- a/qutip/control/propcomp.py +++ /dev/null @@ -1,405 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Propagator Computer -Classes used to calculate the propagators, -and also the propagator gradient when exact gradient methods are used - -Note the methods in the _Diag class was inspired by: -DYNAMO - Dynamic Framework for Quantum Optimal Control -See Machnes et.al., arXiv.1011.4874 -""" - -import warnings -import numpy as np -import scipy.linalg as la -import scipy.sparse as sp -# QuTiP -from qutip import Qobj -# QuTiP control modules -from qutip.control import errors -# QuTiP logging -import qutip.logging_utils as logging -logger = logging.get_logger() - - -def _func_deprecation(message, stacklevel=3): - """ - Issue deprecation warning - Using stacklevel=3 will ensure message refers the function - calling with the deprecated parameter, - """ - warnings.warn(message, FutureWarning, stacklevel=stacklevel) - - -class PropagatorComputer: - """ - Base for all Propagator Computer classes - that are used to calculate the propagators, - and also the propagator gradient when exact gradient methods are used - Note: they must be instantiated with a Dynamics object, that is the - container for the data that the functions operate on - This base class cannot be used directly. See subclass descriptions - and choose the appropriate one for the application - - Attributes - ---------- - log_level : integer - level of messaging output from the logger. - Options are attributes of qutip_utils.logging, - in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, - assuming everything runs as expected. - The default NOTSET implies that the level will be taken from - the QuTiP settings file, which by default is WARN - - grad_exact : boolean - indicates whether the computer class instance is capable - of computing propagator gradients. It is used to determine - whether to create the Dynamics prop_grad array - """ - def __init__(self, dynamics, params=None): - self.parent = dynamics - self.params = params - self.reset() - - def reset(self): - """ - reset any configuration data - """ - self.id_text = 'PROP_COMP_BASE' - self.log_level = self.parent.log_level - self._grad_exact = False - - def apply_params(self, params=None): - """ - Set object attributes based on the dictionary (if any) passed in the - instantiation, or passed as a parameter - This is called during the instantiation automatically. - The key value pairs are the attribute name and value - Note: attributes are created if they do not exist already, - and are overwritten if they do. - """ - if not params: - params = self.params - - if isinstance(params, dict): - self.params = params - for key in params: - setattr(self, key, params[key]) - - @property - def log_level(self): - return logger.level - - @log_level.setter - def log_level(self, lvl): - """ - Set the log_level attribute and set the level of the logger - that is call logger.setLevel(lvl) - """ - logger.setLevel(lvl) - - def grad_exact(self): - return self._grad_exact - - def compute_propagator(self, k): - _func_deprecation("'compute_propagator' has been replaced " - "by '_compute_propagator'") - return self._compute_propagator(k) - - def _compute_propagator(self, k): - """ - calculate the progator between X(k) and X(k+1) - Uses matrix expm of the dyn_gen at that point (in time) - Assumes that the dyn_gen have been been calculated, - i.e. drift and ctrls combined - Return the propagator - """ - dyn = self.parent - dgt = dyn._get_phased_dyn_gen(k)*dyn.tau[k] - if dyn.oper_dtype == Qobj: - prop = dgt.expm() - else: - prop = la.expm(dgt) - return prop - - def compute_diff_prop(self, k, j, epsilon): - _func_deprecation("'compute_diff_prop' has been replaced " - "by '_compute_diff_prop'") - return self._compute_diff_prop(k, j, epsilon) - - def _compute_diff_prop(self, k, j, epsilon): - """ - Calculate the propagator from the current point to a trial point - a distance 'epsilon' (change in amplitude) - in the direction the given control j in timeslot k - Returns the propagator - """ - raise errors.UsageError("Not implemented in the baseclass." - " Choose a subclass") - - def compute_prop_grad(self, k, j, compute_prop=True): - _func_deprecation("'compute_prop_grad' has been replaced " - "by '_compute_prop_grad'") - return self._compute_prop_grad(self, k, j, compute_prop=compute_prop) - - def _compute_prop_grad(self, k, j, compute_prop=True): - """ - Calculate the gradient of propagator wrt the control amplitude - in the timeslot. - """ - raise errors.UsageError("Not implemented in the baseclass." - " Choose a subclass") - - -class PropCompApproxGrad(PropagatorComputer): - """ - This subclass can be used when the propagator is calculated simply - by expm of the dynamics generator, i.e. when gradients will be calculated - using approximate methods. - """ - - def reset(self): - """ - reset any configuration data - """ - PropagatorComputer.reset(self) - self.id_text = 'APPROX' - self.grad_exact = False - self.apply_params() - - def _compute_diff_prop(self, k, j, epsilon): - """ - Calculate the propagator from the current point to a trial point - a distance 'epsilon' (change in amplitude) - in the direction the given control j in timeslot k - Returns the propagator - """ - dyn = self.parent - dgt_eps = dyn.tau[k] * ( - dyn._get_phased_dyn_gen(k) - + epsilon*dyn._get_phased_ctrl_dyn_gen(k, j) - ) - - if dyn.oper_dtype == Qobj: - prop_eps = dgt_eps.expm() - else: - prop_eps = la.expm(dgt_eps) - - return prop_eps - - -class PropCompDiag(PropagatorComputer): - """ - Coumputes the propagator exponentiation using diagonalisation of - of the dynamics generator - """ - def reset(self): - """ - reset any configuration data - """ - PropagatorComputer.reset(self) - self.id_text = 'DIAG' - self.grad_exact = True - self.apply_params() - - def _compute_propagator(self, k): - """ - Calculates the exponentiation of the dynamics generator (H) - As part of the calc the the eigen decomposition is required, which - is reused in the propagator gradient calculation - """ - dyn = self.parent - dyn._ensure_decomp_curr(k) - - if dyn.oper_dtype == Qobj: - prop = ( - dyn._dyn_gen_eigenvectors[k] - * dyn._prop_eigen[k] - * dyn._get_dyn_gen_eigenvectors_adj(k) - ) - else: - prop = ( - dyn._dyn_gen_eigenvectors[k] - .dot(dyn._prop_eigen[k]) - .dot(dyn._get_dyn_gen_eigenvectors_adj(k)) - ) - - return prop - - def _compute_prop_grad(self, k, j, compute_prop=True): - """ - Calculate the gradient of propagator wrt the control amplitude - in the timeslot. - - Returns: - [prop], prop_grad - """ - dyn = self.parent - dyn._ensure_decomp_curr(k) - - if compute_prop: - prop = self._compute_propagator(k) - - if dyn.oper_dtype == Qobj: - # put control dyn_gen in combined dg diagonal basis - cdg = ( - dyn._get_dyn_gen_eigenvectors_adj(k) - * dyn._get_phased_ctrl_dyn_gen(k, j) - * dyn._dyn_gen_eigenvectors[k]) - # multiply (elementwise) by timeslice and factor matrix - cdg = Qobj(np.multiply(cdg.full()*dyn.tau[k], - dyn._dyn_gen_factormatrix[k]), - dims=dyn.dyn_dims) - # Return to canonical basis - prop_grad = ( - dyn._dyn_gen_eigenvectors[k] * cdg - * dyn._get_dyn_gen_eigenvectors_adj(k)) - else: - # put control dyn_gen in combined dg diagonal basis - cdg = ( - dyn._get_dyn_gen_eigenvectors_adj(k) - .dot(dyn._get_phased_ctrl_dyn_gen(k, j)) - .dot(dyn._dyn_gen_eigenvectors[k])) - # multiply (elementwise) by timeslice and factor matrix - cdg = np.multiply(cdg * dyn.tau[k], dyn._dyn_gen_factormatrix[k]) - # Return to canonical basis - prop_grad = ( - dyn._dyn_gen_eigenvectors[k].dot(cdg) - .dot(dyn._get_dyn_gen_eigenvectors_adj(k))) - - if compute_prop: - return prop, prop_grad - else: - return prop_grad - - -class PropCompAugMat(PropagatorComputer): - """ - Augmented Matrix (deprecated - see _Frechet) - - It should work for all systems, e.g. open, symplectic - There will be other PropagatorComputer subclasses that are more efficient - The _Frechet class should provide exactly the same functionality - more efficiently. - - Note the propagator gradient calculation using the augmented matrix - is taken from: - 'Robust quantum gates for open systems via optimal control: - Markovian versus non-Markovian dynamics' - Frederik F Floether, Pierre de Fouquieres, and Sophie G Schirmer - """ - def reset(self): - PropagatorComputer.reset(self) - self.id_text = 'AUG_MAT' - self.grad_exact = True - self.apply_params() - - def _get_aug_mat(self, k, j): - """ - Generate the matrix [[A, E], [0, A]] where - A is the overall dynamics generator - E is the control dynamics generator - for a given timeslot and control - returns this augmented matrix - """ - dyn = self.parent - dg = dyn._get_phased_dyn_gen(k) - - if dyn.oper_dtype == Qobj: - A = dg.data*dyn.tau[k] - E = dyn._get_phased_ctrl_dyn_gen(k, j).data*dyn.tau[k] - Z = sp.csr_matrix(dg.data.shape) - aug = Qobj(sp.vstack([sp.hstack([A, E]), sp.hstack([Z, A])])) - else: - A = dg*dyn.tau[k] - E = dyn._get_phased_ctrl_dyn_gen(k, j)*dyn.tau[k] - Z = np.zeros(dg.shape) - aug = np.vstack([np.hstack([A, E]), np.hstack([Z, A])]) - return aug - - def _compute_prop_grad(self, k, j, compute_prop=True): - """ - Calculate the gradient of propagator wrt the control amplitude - in the timeslot using the exponentiation of the the augmented - matrix. - The propagtor is calculated for 'free' in this method - and hence it is returned if compute_prop==True - Returns: - [prop], prop_grad - """ - dyn = self.parent - dg = dyn._get_phased_dyn_gen(k) - aug = self._get_aug_mat(k, j) - - if dyn.oper_dtype == Qobj: - aug_exp = aug.expm() - prop_grad = Qobj(aug_exp[:dg.shape[0], dg.shape[1]:], - dims=dyn.dyn_dims) - if compute_prop: - prop = Qobj(aug_exp[:dg.shape[0], :dg.shape[1]], - dims=dyn.dyn_dims) - else: - aug_exp = la.expm(aug) - prop_grad = aug_exp[:dg.shape[0], dg.shape[1]:] - if compute_prop: - prop = aug_exp[:dg.shape[0], :dg.shape[1]] - - if compute_prop: - return prop, prop_grad - else: - return prop_grad - - -class PropCompFrechet(PropagatorComputer): - """ - Frechet method for calculating the propagator: exponentiating the combined - dynamics generator and the propagator gradient. It should work for all - systems, e.g. unitary, open, symplectic. There are other - :obj:`PropagatorComputer` subclasses that may be more efficient. - """ - def reset(self): - PropagatorComputer.reset(self) - self.id_text = 'FRECHET' - self.grad_exact = True - self.apply_params() - - def _compute_prop_grad(self, k, j, compute_prop=True): - """ - Calculate the gradient of propagator wrt the control amplitude - in the timeslot using the expm_frechet method - The propagtor is calculated (almost) for 'free' in this method - and hence it is returned if compute_prop==True - Returns: - [prop], prop_grad - """ - dyn = self.parent - - if dyn.oper_dtype == Qobj: - A = dyn._get_phased_dyn_gen(k).full()*dyn.tau[k] - E = dyn._get_phased_ctrl_dyn_gen(k, j).full()*dyn.tau[k] - if compute_prop: - prop_dense, prop_grad_dense = la.expm_frechet(A, E) - prop = Qobj(prop_dense, dims=dyn.dyn_dims) - prop_grad = Qobj(prop_grad_dense, dims=dyn.dyn_dims) - else: - prop_grad_dense = la.expm_frechet(A, E, compute_expm=False) - prop_grad = Qobj(prop_grad_dense, dims=dyn.dyn_dims) - else: - A = dyn._get_phased_dyn_gen(k)*dyn.tau[k] - E = dyn._get_phased_ctrl_dyn_gen(k, j)*dyn.tau[k] - if compute_prop: - prop, prop_grad = la.expm_frechet(A, E) - else: - prop_grad = la.expm_frechet(A, E, compute_expm=False) - if compute_prop: - return prop, prop_grad - else: - return prop_grad diff --git a/qutip/control/pulsegen.py b/qutip/control/pulsegen.py deleted file mode 100644 index b01ee3a829..0000000000 --- a/qutip/control/pulsegen.py +++ /dev/null @@ -1,1250 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Pulse generator - Generate pulses for the timeslots -Each class defines a gen_pulse function that produces a float array of -size num_tslots. Each class produces a differ type of pulse. -See the class and gen_pulse function descriptions for details -""" - -import numpy as np - -import qutip.control.dynamics as dynamics -import qutip.control.errors as errors - -import qutip.logging_utils as logging -logger = logging.get_logger() - -def create_pulse_gen(pulse_type='RND', dyn=None, pulse_params=None): - """ - Create and return a pulse generator object matching the given type. - The pulse generators each produce a different type of pulse, - see the gen_pulse function description for details. - These are the random pulse options: - - RND - Independent random value in each timeslot - RNDFOURIER - Fourier series with random coefficients - RNDWAVES - Summation of random waves - RNDWALK1 - Random change in amplitude each timeslot - RNDWALK2 - Random change in amp gradient each timeslot - - These are the other non-periodic options: - - LIN - Linear, i.e. contant gradient over the time - ZERO - special case of the LIN pulse, where the gradient is 0 - - These are the periodic options - - SINE - Sine wave - SQUARE - Square wave - SAW - Saw tooth wave - TRIANGLE - Triangular wave - - If a Dynamics object is passed in then this is used in instantiate - the PulseGen, meaning that some timeslot and amplitude properties - are copied over. - - """ - if pulse_type == 'RND': - return PulseGenRandom(dyn, params=pulse_params) - if pulse_type == 'RNDFOURIER': - return PulseGenRndFourier(dyn, params=pulse_params) - if pulse_type == 'RNDWAVES': - return PulseGenRndWaves(dyn, params=pulse_params) - if pulse_type == 'RNDWALK1': - return PulseGenRndWalk1(dyn, params=pulse_params) - if pulse_type == 'RNDWALK2': - return PulseGenRndWalk2(dyn, params=pulse_params) - elif pulse_type == 'LIN': - return PulseGenLinear(dyn, params=pulse_params) - elif pulse_type == 'ZERO': - return PulseGenZero(dyn, params=pulse_params) - elif pulse_type == 'SINE': - return PulseGenSine(dyn, params=pulse_params) - elif pulse_type == 'SQUARE': - return PulseGenSquare(dyn, params=pulse_params) - elif pulse_type == 'SAW': - return PulseGenSaw(dyn, params=pulse_params) - elif pulse_type == 'TRIANGLE': - return PulseGenTriangle(dyn, params=pulse_params) - elif pulse_type == 'GAUSSIAN': - return PulseGenGaussian(dyn, params=pulse_params) - elif pulse_type == 'CRAB_FOURIER': - return PulseGenCrabFourier(dyn, params=pulse_params) - elif pulse_type == 'GAUSSIAN_EDGE': - return PulseGenGaussianEdge(dyn, params=pulse_params) - else: - raise ValueError("No option for pulse_type '{}'".format(pulse_type)) - - -class PulseGen: - """ - Pulse generator - Base class for all Pulse generators - The object can optionally be instantiated with a Dynamics object, - in which case the timeslots and amplitude scaling and offset - are copied from that. - Otherwise the class can be used independently by setting: - tau (array of timeslot durations) - or - num_tslots and pulse_time for equally spaced timeslots - - Attributes - ---------- - num_tslots : integer - Number of timeslots, aka timeslices - (copied from Dynamics if given) - - pulse_time : float - total duration of the pulse - (copied from Dynamics.evo_time if given) - - scaling : float - linear scaling applied to the pulse - (copied from Dynamics.initial_ctrl_scaling if given) - - offset : float - linear offset applied to the pulse - (copied from Dynamics.initial_ctrl_offset if given) - - tau : array[num_tslots] of float - Duration of each timeslot - (copied from Dynamics if given) - - lbound : float - Lower boundary for the pulse amplitudes - Note that the scaling and offset attributes can be used to fully - bound the pulse for all generators except some of the random ones - This bound (if set) may result in additional shifting / scaling - Default is -Inf - - ubound : float - Upper boundary for the pulse amplitudes - Note that the scaling and offset attributes can be used to fully - bound the pulse for all generators except some of the random ones - This bound (if set) may result in additional shifting / scaling - Default is Inf - - periodic : boolean - True if the pulse generator produces periodic pulses - - random : boolean - True if the pulse generator produces random pulses - - log_level : integer - level of messaging output from the logger. - Options are attributes of qutip.logging_utils, - in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, - assuming everything runs as expected. - The default NOTSET implies that the level will be taken from - the QuTiP settings file, which by default is WARN - """ - def __init__(self, dyn=None, params=None): - self.parent = dyn - self.params = params - self.reset() - - def reset(self): - """ - reset attributes to default values - """ - if isinstance(self.parent, dynamics.Dynamics): - dyn = self.parent - self.num_tslots = dyn.num_tslots - self.pulse_time = dyn.evo_time - self.scaling = dyn.initial_ctrl_scaling - self.offset = dyn.initial_ctrl_offset - self.tau = dyn.tau - self.log_level = dyn.log_level - else: - self.num_tslots = 100 - self.pulse_time = 1.0 - self.scaling = 1.0 - self.tau = None - self.offset = 0.0 - - self._uses_time = False - self.time = None - self._pulse_initialised = False - self.periodic = False - self.random = False - self.lbound = None - self.ubound = None - self.ramping_pulse = None - - self.apply_params() - - def apply_params(self, params=None): - """ - Set object attributes based on the dictionary (if any) passed in the - instantiation, or passed as a parameter - This is called during the instantiation automatically. - The key value pairs are the attribute name and value - """ - if not params: - params = self.params - - if isinstance(params, dict): - self.params = params - for key in params: - setattr(self, key, params[key]) - - @property - def log_level(self): - return logger.level - - @log_level.setter - def log_level(self, lvl): - """ - Set the log_level attribute and set the level of the logger - that is call logger.setLevel(lvl) - """ - logger.setLevel(lvl) - - def gen_pulse(self): - """ - returns the pulse as an array of vales for each timeslot - Must be implemented by subclass - """ - # must be implemented by subclass - raise errors.UsageError( - "No method defined for generating a pulse. " - " Suspect base class was used where sub class should have been") - - def init_pulse(self): - """ - Initialise the pulse parameters - """ - if self.tau is None: - self.tau = np.ones(self.num_tslots, dtype='f') * \ - self.pulse_time/self.num_tslots - - if self._uses_time: - self.time = np.zeros(self.num_tslots, dtype=float) - for k in range(self.num_tslots-1): - self.time[k+1] = self.time[k] + self.tau[k] - - self._pulse_initialised = True - - if self.lbound is not None: - if np.isinf(self.lbound): - self.lbound = None - if self.ubound is not None: - if np.isinf(self.ubound): - self.ubound = None - - if self.ubound is not None and self.lbound is not None: - if self.ubound < self.lbound: - raise ValueError("ubound cannot be less the lbound") - - def _apply_bounds_and_offset(self, pulse): - """ - Ensure that the randomly generated pulse fits within the bounds - (after applying the offset) - Assumes that pulses passed are centered around zero (on average) - """ - if self.lbound is None and self.ubound is None: - return pulse + self.offset - - max_amp = max(pulse) - min_amp = min(pulse) - if ( - (self.ubound is None or max_amp + self.offset <= self.ubound) - and (self.lbound is None or min_amp + self.offset >= self.lbound) - ): - return pulse + self.offset - - # Some shifting / scaling is required. - if self.ubound is None or self.lbound is None: - # One of the bounds is inf, so just shift the pulse - if self.lbound is None: - # max_amp + offset must exceed the ubound - return pulse + self.ubound - max_amp - else: - # min_amp + offset must exceed the lbound - return pulse + self.lbound - min_amp - else: - bound_range = self.ubound - self.lbound - amp_range = max_amp - min_amp - if max_amp - min_amp > bound_range: - # pulse range is too high, it must be scaled - pulse = pulse * bound_range / amp_range - - # otherwise the pulse should fit anyway - return pulse + self.lbound - min(pulse) - - def _apply_ramping_pulse(self, pulse, ramping_pulse=None): - if ramping_pulse is None: - ramping_pulse = self.ramping_pulse - if ramping_pulse is not None: - pulse = pulse*ramping_pulse - - return pulse - - -class PulseGenZero(PulseGen): - """ - Generates a flat pulse - """ - def gen_pulse(self): - """ - Generate a pulse with the same value in every timeslot. - The value will be zero, unless the offset is not zero, - in which case it will be the offset - """ - pulse = np.zeros(self.num_tslots) - return self._apply_bounds_and_offset(pulse) - - -class PulseGenRandom(PulseGen): - """ - Generates random pulses as simply random values for each timeslot - """ - def reset(self): - PulseGen.reset(self) - self.random = True - self.apply_params() - - def gen_pulse(self): - """ - Generate a pulse of random values between 1 and -1 - Values are scaled using the scaling property - and shifted using the offset property - Returns the pulse as an array of vales for each timeslot - """ - pulse = (2*np.random.random(self.num_tslots) - 1) * self.scaling - - return self._apply_bounds_and_offset(pulse) - - -class PulseGenRndFourier(PulseGen): - """ - Generates pulses by summing sine waves as a Fourier series - with random coefficients - - Attributes - ---------- - scaling : float - The pulses should fit approximately within -/+scaling - (before the offset is applied) - as it is used to set a maximum for each component wave - Use bounds to be sure - (copied from Dynamics.initial_ctrl_scaling if given) - - min_wavelen : float - Minimum wavelength of any component wave - Set by default to 1/10th of the pulse time - """ - - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - self.random = True - self._uses_time = True - try: - self.min_wavelen = self.pulse_time / 10.0 - except AttributeError: - self.min_wavelen = 0.1 - self.apply_params() - - def gen_pulse(self, min_wavelen=None): - """ - Generate a random pulse based on a Fourier series with a minimum - wavelength - """ - if min_wavelen is not None: - self.min_wavelen = min_wavelen - min_wavelen = self.min_wavelen - - if min_wavelen > self.pulse_time: - raise ValueError("Minimum wavelength cannot be greater than " - "the pulse time") - if not self._pulse_initialised: - self.init_pulse() - - # use some phase to avoid the first pulse being always 0 - - sum_wave = np.zeros(self.tau.shape) - wavelen = 2.0*self.pulse_time - - t = self.time - wl = [] - while wavelen > min_wavelen: - wl.append(wavelen) - wavelen = wavelen/2.0 - - num_comp_waves = len(wl) - amp_scale = np.sqrt(8)*self.scaling / float(num_comp_waves) - - for wavelen in wl: - amp = amp_scale*(np.random.rand()*2 - 1) - phase_off = np.random.rand()*np.pi/2.0 - curr_wave = amp*np.sin(2*np.pi*t/wavelen + phase_off) - sum_wave += curr_wave - - return self._apply_bounds_and_offset(sum_wave) - - -class PulseGenRndWaves(PulseGen): - """ - Generates pulses by summing sine waves with random frequencies - amplitudes and phase offset - - Attributes - ---------- - scaling : float - The pulses should fit approximately within -/+scaling - (before the offset is applied) - as it is used to set a maximum for each component wave - Use bounds to be sure - (copied from Dynamics.initial_ctrl_scaling if given) - - num_comp_waves : integer - Number of component waves. That is the number of waves that - are summed to make the pulse signal - Set to 20 by default. - - min_wavelen : float - Minimum wavelength of any component wave - Set by default to 1/10th of the pulse time - - max_wavelen : float - Maximum wavelength of any component wave - Set by default to twice the pulse time - """ - - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - self.random = True - self._uses_time = True - self.num_comp_waves = 20 - try: - self.min_wavelen = self.pulse_time / 10.0 - except AttributeError: - self.min_wavelen = 0.1 - try: - self.max_wavelen = 2*self.pulse_time - except AttributeError: - self.max_wavelen = 10.0 - self.apply_params() - - def gen_pulse(self, num_comp_waves=None, - min_wavelen=None, max_wavelen=None): - """ - Generate a random pulse by summing sine waves with random freq, - amplitude and phase offset - """ - - if num_comp_waves is not None: - self.num_comp_waves = num_comp_waves - if min_wavelen is not None: - self.min_wavelen = min_wavelen - if max_wavelen is not None: - self.max_wavelen = max_wavelen - - num_comp_waves = self.num_comp_waves - min_wavelen = self.min_wavelen - max_wavelen = self.max_wavelen - - if min_wavelen > self.pulse_time: - raise ValueError("Minimum wavelength cannot be greater than " - "the pulse time") - if max_wavelen <= min_wavelen: - raise ValueError("Maximum wavelength must be greater than " - "the minimum wavelength") - - if not self._pulse_initialised: - self.init_pulse() - - # use some phase to avoid the first pulse being always 0 - - sum_wave = np.zeros(self.tau.shape) - - t = self.time - wl_range = max_wavelen - min_wavelen - amp_scale = np.sqrt(8)*self.scaling / float(num_comp_waves) - for n in range(num_comp_waves): - amp = amp_scale*(np.random.rand()*2 - 1) - phase_off = np.random.rand()*np.pi/2.0 - wavelen = min_wavelen + np.random.rand()*wl_range - curr_wave = amp*np.sin(2*np.pi*t/wavelen + phase_off) - sum_wave += curr_wave - - return self._apply_bounds_and_offset(sum_wave) - - -class PulseGenRndWalk1(PulseGen): - """ - Generates pulses by using a random walk algorithm - - Attributes - ---------- - scaling : float - Used as the range for the starting amplitude - Note must used bounds if values must be restricted. - Also scales the max_d_amp value - (copied from Dynamics.initial_ctrl_scaling if given) - - max_d_amp : float - Maximum amount amplitude will change between timeslots - Note this is also factored by the scaling attribute - """ - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - self.random = True - self.max_d_amp = 0.1 - self.apply_params() - - def gen_pulse(self, max_d_amp=None): - """ - Generate a pulse by changing the amplitude a random amount between - -max_d_amp and +max_d_amp at each timeslot. The walk will start at - a random amplitude between -/+scaling. - """ - if max_d_amp is not None: - self.max_d_amp = max_d_amp - max_d_amp = self.max_d_amp*self.scaling - - if not self._pulse_initialised: - self.init_pulse() - - walk = np.zeros(self.tau.shape) - amp = self.scaling*(np.random.rand()*2 - 1) - for k in range(len(walk)): - walk[k] = amp - amp += (np.random.rand()*2 - 1)*max_d_amp - - return self._apply_bounds_and_offset(walk) - - -class PulseGenRndWalk2(PulseGen): - """ - Generates pulses by using a random walk algorithm - Note this is best used with bounds as the walks tend to wander far - - Attributes - ---------- - scaling : float - Used as the range for the starting amplitude - Note must used bounds if values must be restricted. - Also scales the max_d2_amp value - (copied from Dynamics.initial_ctrl_scaling if given) - - max_d2_amp : float - Maximum amount amplitude gradient will change between timeslots - Note this is also factored by the scaling attribute - """ - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - self.random = True - self.max_d2_amp = 0.01 - self.apply_params() - - def gen_pulse(self, init_grad_range=None, max_d2_amp=None): - """ - Generate a pulse by changing the amplitude gradient a random amount - between -max_d2_amp and +max_d2_amp at each timeslot. - The walk will start at a random amplitude between -/+scaling. - The gradient will start at 0 - """ - if max_d2_amp is not None: - self.max_d2_amp = max_d2_amp - - max_d2_amp = self.max_d2_amp - - if not self._pulse_initialised: - self.init_pulse() - - walk = np.zeros(self.tau.shape) - amp = self.scaling*(np.random.rand()*2 - 1) - print("Start amp {}".format(amp)) - grad = 0.0 - print("Start grad {}".format(grad)) - for k in range(len(walk)): - walk[k] = amp - grad += (np.random.rand()*2 - 1)*max_d2_amp - amp += grad - # print("grad {}".format(grad)) - - return self._apply_bounds_and_offset(walk) - - -class PulseGenLinear(PulseGen): - """ - Generates linear pulses - - Attributes - ---------- - gradient : float - Gradient of the line. - Note this is calculated from the start_val and end_val if these - are given - - start_val : float - Start point of the line. That is the starting amplitude - - end_val : float - End point of the line. - That is the amplitude at the start of the last timeslot - """ - - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - - self.gradient = None - self.start_val = -1.0 - self.end_val = 1.0 - self.apply_params() - - def init_pulse(self, gradient=None, start_val=None, end_val=None): - """ - Calculate the gradient if pulse is defined by start and - end point values - """ - PulseGen.init_pulse(self) - if start_val is not None and end_val is not None: - self.start_val = start_val - self.end_val = end_val - - if self.start_val is not None and self.end_val is not None: - self.gradient = float(self.end_val - self.start_val) / \ - (self.pulse_time - self.tau[-1]) - - def gen_pulse(self, gradient=None, start_val=None, end_val=None): - """ - Generate a linear pulse using either the gradient and start value - or using the end point to calulate the gradient - Note that the scaling and offset parameters are still applied, - so unless these values are the default 1.0 and 0.0, then the - actual gradient etc will be different - Returns the pulse as an array of vales for each timeslot - """ - if (gradient is not None or - start_val is not None or end_val is not None): - self.init_pulse(gradient, start_val, end_val) - if not self._pulse_initialised: - self.init_pulse() - - pulse = np.empty(self.num_tslots) - t = 0.0 - for k in range(self.num_tslots): - y = self.gradient*t + self.start_val - pulse[k] = self.scaling*y - t = t + self.tau[k] - - return self._apply_bounds_and_offset(pulse) - - -class PulseGenPeriodic(PulseGen): - """ - Intermediate class for all periodic pulse generators - All of the periodic pulses range from -1 to 1 - All have a start phase that can be set between 0 and 2pi - - Attributes - ---------- - num_waves : float - Number of complete waves (cycles) that occur in the pulse. - wavelen and freq calculated from this if it is given - - wavelen : float - Wavelength of the pulse (assuming the speed is 1) - freq is calculated from this if it is given - - freq : float - Frequency of the pulse - - start_phase : float - Phase of the pulse signal when t=0 - """ - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - self.periodic = True - self.num_waves = None - self.freq = 1.0 - self.wavelen = None - self.start_phase = 0.0 - self.apply_params() - - def init_pulse(self, num_waves=None, wavelen=None, - freq=None, start_phase=None): - """ - Calculate the wavelength, frequency, number of waves etc - from the each other and the other parameters - If num_waves is given then the other parameters are worked from this - Otherwise if the wavelength is given then it is the driver - Otherwise the frequency is used to calculate wavelength and num_waves - """ - PulseGen.init_pulse(self) - - if start_phase is not None: - self.start_phase = start_phase - - if num_waves is not None or wavelen is not None or freq is not None: - self.num_waves = num_waves - self.wavelen = wavelen - self.freq = freq - - if self.num_waves is not None: - self.freq = float(self.num_waves) / self.pulse_time - self.wavelen = 1.0/self.freq - elif self.wavelen is not None: - self.freq = 1.0/self.wavelen - self.num_waves = self.wavelen*self.pulse_time - else: - self.wavelen = 1.0/self.freq - self.num_waves = self.wavelen*self.pulse_time - - -class PulseGenSine(PulseGenPeriodic): - """ - Generates sine wave pulses - """ - def gen_pulse(self, num_waves=None, wavelen=None, - freq=None, start_phase=None): - """ - Generate a sine wave pulse - If no params are provided then the class object attributes are used. - If they are provided, then these will reinitialise the object attribs. - returns the pulse as an array of vales for each timeslot - """ - if start_phase is not None: - self.start_phase = start_phase - - if num_waves is not None or wavelen is not None or freq is not None: - self.init_pulse(num_waves, wavelen, freq, start_phase) - - if not self._pulse_initialised: - self.init_pulse() - - pulse = np.empty(self.num_tslots) - t = 0.0 - for k in range(self.num_tslots): - phase = 2*np.pi*self.freq*t + self.start_phase - pulse[k] = self.scaling*np.sin(phase) - t = t + self.tau[k] - return self._apply_bounds_and_offset(pulse) - - -class PulseGenSquare(PulseGenPeriodic): - """ - Generates square wave pulses - """ - def gen_pulse(self, num_waves=None, wavelen=None, - freq=None, start_phase=None): - """ - Generate a square wave pulse - If no parameters are pavided then the class object attributes are used. - If they are provided, then these will reinitialise the object attribs - """ - if start_phase is not None: - self.start_phase = start_phase - - if num_waves is not None or wavelen is not None or freq is not None: - self.init_pulse(num_waves, wavelen, freq, start_phase) - - if not self._pulse_initialised: - self.init_pulse() - - pulse = np.empty(self.num_tslots) - t = 0.0 - for k in range(self.num_tslots): - phase = 2*np.pi*self.freq*t + self.start_phase - x = phase/(2*np.pi) - y = 4*np.floor(x) - 2*np.floor(2*x) + 1 - pulse[k] = self.scaling*y - t = t + self.tau[k] - return self._apply_bounds_and_offset(pulse) - - -class PulseGenSaw(PulseGenPeriodic): - """ - Generates saw tooth wave pulses - """ - def gen_pulse(self, num_waves=None, wavelen=None, - freq=None, start_phase=None): - """ - Generate a saw tooth wave pulse - If no parameters are pavided then the class object attributes are used. - If they are provided, then these will reinitialise the object attribs - """ - if start_phase is not None: - self.start_phase = start_phase - - if num_waves is not None or wavelen is not None or freq is not None: - self.init_pulse(num_waves, wavelen, freq, start_phase) - - if not self._pulse_initialised: - self.init_pulse() - - pulse = np.empty(self.num_tslots) - t = 0.0 - for k in range(self.num_tslots): - phase = 2*np.pi*self.freq*t + self.start_phase - x = phase/(2*np.pi) - y = 2*(x - np.floor(0.5 + x)) - pulse[k] = self.scaling*y - t = t + self.tau[k] - return self._apply_bounds_and_offset(pulse) - - -class PulseGenTriangle(PulseGenPeriodic): - """ - Generates triangular wave pulses - """ - def gen_pulse(self, num_waves=None, wavelen=None, - freq=None, start_phase=None): - """ - Generate a sine wave pulse - If no parameters are pavided then the class object attributes are used. - If they are provided, then these will reinitialise the object attribs - """ - if start_phase is not None: - self.start_phase = start_phase - - if num_waves is not None or wavelen is not None or freq is not None: - self.init_pulse(num_waves, wavelen, freq, start_phase) - - if not self._pulse_initialised: - self.init_pulse() - - pulse = np.empty(self.num_tslots) - t = 0.0 - for k in range(self.num_tslots): - phase = 2*np.pi*self.freq*t + self.start_phase + np.pi/2.0 - x = phase/(2*np.pi) - y = 2*np.abs(2*(x - np.floor(0.5 + x))) - 1 - pulse[k] = self.scaling*y - t = t + self.tau[k] - - return self._apply_bounds_and_offset(pulse) - - -class PulseGenGaussian(PulseGen): - """ - Generates pulses with a Gaussian profile - """ - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - self._uses_time = True - self.mean = 0.5*self.pulse_time - self.variance = 0.5*self.pulse_time - self.apply_params() - - def gen_pulse(self, mean=None, variance=None): - """ - Generate a pulse with Gaussian shape. The peak is centre around the - mean and the variance determines the breadth - The scaling and offset attributes are applied as an amplitude - and fixed linear offset. Note that the maximum amplitude will be - scaling + offset. - """ - if not self._pulse_initialised: - self.init_pulse() - - if mean: - Tm = mean - else: - Tm = self.mean - if variance: - Tv = variance - else: - Tv = self.variance - t = self.time - T = self.pulse_time - - pulse = self.scaling*np.exp(-(t-Tm)**2/(2*Tv)) - return self._apply_bounds_and_offset(pulse) - - -class PulseGenGaussianEdge(PulseGen): - """ - Generate pulses with inverted Gaussian ramping in and out - It's intended use for a ramping modulation, which is often required in - experimental setups. - - Attributes - ---------- - decay_time : float - Determines the ramping rate. It is approximately the time - required to bring the pulse to full amplitude - It is set to 1/10 of the pulse time by default - """ - - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - self._uses_time = True - self.decay_time = self.pulse_time / 10.0 - self.apply_params() - - def gen_pulse(self, decay_time=None): - """ - Generate a pulse that starts and ends at zero and 1.0 in between - then apply scaling and offset - The tailing in and out is an inverted Gaussian shape - """ - if not self._pulse_initialised: - self.init_pulse() - - t = self.time - if decay_time: - Td = decay_time - else: - Td = self.decay_time - T = self.pulse_time - pulse = 1.0 - np.exp(-t**2/Td) - np.exp(-(t-T)**2/Td) - pulse = pulse*self.scaling - - return self._apply_bounds_and_offset(pulse) - - -### The following are pulse generators for the CRAB algorithm ### -# AJGP 2015-05-14: -# The intention is to have a more general base class that allows -# setting of general basis functions - -class PulseGenCrab(PulseGen): - """ - Base class for all CRAB pulse generators - Note these are more involved in the optimisation process as they are - used to produce piecewise control amplitudes each time new optimisation - parameters are tried - - Attributes - ---------- - num_coeffs : integer - Number of coefficients used for each basis function - - num_basis_funcs : integer - Number of basis functions - In this case set at 2 and should not be changed - - coeffs : float array[num_coeffs, num_basis_funcs] - The basis coefficient values - - randomize_coeffs : bool - If True (default) then the coefficients are set to some random values - when initialised, otherwise they will all be equal to self.scaling - """ - def __init__(self, dyn=None, num_coeffs=None, params=None): - self.parent = dyn - self.num_coeffs = num_coeffs - self.params = params - self.reset() - - def reset(self): - """ - reset attributes to default values - """ - PulseGen.reset(self) - self.NUM_COEFFS_WARN_LVL = 20 - self.DEF_NUM_COEFFS = 4 - self._BSC_ALL = 1 - self._BSC_GT_MEAN = 2 - self._BSC_LT_MEAN = 3 - - self._uses_time = True - self.time = None - self.num_basis_funcs = 2 - self.num_optim_vars = 0 - self.coeffs = None - self.randomize_coeffs = True - self._num_coeffs_estimated = False - self.guess_pulse_action = 'MODULATE' - self.guess_pulse = None - self.guess_pulse_func = None - self.apply_params() - - def init_pulse(self, num_coeffs=None): - """ - Set the initial freq and coefficient values - """ - PulseGen.init_pulse(self) - self.init_coeffs(num_coeffs=num_coeffs) - - if self.guess_pulse is not None: - self.init_guess_pulse() - self._init_bounds() - - if self.log_level <= logging.DEBUG and not self._num_coeffs_estimated: - logger.debug( - "CRAB pulse initialised with {} coefficients per basis " - "function, which means a total of {} " - "optimisation variables for this pulse".format( - self.num_coeffs, self.num_optim_vars)) - -# def generate_guess_pulse(self) -# if isinstance(self.guess_pulsegen, PulseGen): -# self.guess_pulse = self.guess_pulsegen.gen_pulse() -# return self.guess_pulse - - def init_coeffs(self, num_coeffs=None): - """ - Generate the initial ceofficent values. - - Parameters - ---------- - num_coeffs : integer - Number of coefficients used for each basis function - If given this overides the default and sets the attribute - of the same name. - """ - if num_coeffs: - self.num_coeffs = num_coeffs - - self._num_coeffs_estimated = False - if not self.num_coeffs: - if isinstance(self.parent, dynamics.Dynamics): - dim = self.parent.get_drift_dim() - self.num_coeffs = self.estimate_num_coeffs(dim) - self._num_coeffs_estimated = True - else: - self.num_coeffs = self.DEF_NUM_COEFFS - self.num_optim_vars = self.num_coeffs*self.num_basis_funcs - - if self._num_coeffs_estimated: - if self.log_level <= logging.INFO: - logger.info( - "The number of CRAB coefficients per basis function " - "has been estimated as {}, which means a total of {} " - "optimisation variables for this pulse. Based on the " - "dimension ({}) of the system".format( - self.num_coeffs, self.num_optim_vars, dim)) - # Issue warning if beyond the recommended level - if self.log_level <= logging.WARN: - if self.num_coeffs > self.NUM_COEFFS_WARN_LVL: - logger.warn( - "The estimated number of coefficients {} exceeds " - "the amount ({}) recommended for efficient " - "optimisation. You can set this level explicitly " - "to suppress this message.".format( - self.num_coeffs, self.NUM_COEFFS_WARN_LVL)) - - if self.randomize_coeffs: - r = np.random.random([self.num_coeffs, self.num_basis_funcs]) - self.coeffs = (2*r - 1.0) * self.scaling - else: - self.coeffs = np.ones([self.num_coeffs, - self.num_basis_funcs])*self.scaling - - def estimate_num_coeffs(self, dim): - """ - Estimate the number coefficients based on the dimensionality of the - system. - Returns - ------- - num_coeffs : int - estimated number of coefficients - """ - num_coeffs = max(2, dim - 1) - return num_coeffs - - def get_optim_var_vals(self): - """ - Get the parameter values to be optimised - Returns - ------- - list (or 1d array) of floats - """ - return self.coeffs.ravel().tolist() - - def set_optim_var_vals(self, param_vals): - """ - Set the values of the any of the pulse generation parameters - based on new values from the optimisation method - Typically this will be the basis coefficients - """ - # Type and size checking avoided here as this is in the - # main optmisation call sequence - self.set_coeffs(param_vals) - - def set_coeffs(self, param_vals): - self.coeffs = param_vals.reshape( - [self.num_coeffs, self.num_basis_funcs]) - - def init_guess_pulse(self): - - self.guess_pulse_func = None - if not self.guess_pulse_action: - logger.WARN("No guess pulse action given, hence ignored.") - elif self.guess_pulse_action.upper() == 'MODULATE': - self.guess_pulse_func = self.guess_pulse_modulate - elif self.guess_pulse_action.upper() == 'ADD': - self.guess_pulse_func = self.guess_pulse_add - else: - logger.WARN("No option for guess pulse action '{}' " - ", hence ignored.".format(self.guess_pulse_action)) - - def guess_pulse_add(self, pulse): - pulse = pulse + self.guess_pulse - return pulse - - def guess_pulse_modulate(self, pulse): - pulse = (1.0 + pulse)*self.guess_pulse - return pulse - - def _init_bounds(self): - add_guess_pulse_scale = False - if self.lbound is None and self.ubound is None: - # no bounds to apply - self._bound_scale_cond = None - elif self.lbound is None: - # only upper bound - if self.ubound > 0: - self._bound_mean = 0.0 - self._bound_scale = self.ubound - else: - add_guess_pulse_scale = True - self._bound_scale =\ - self.scaling*self.num_coeffs + self.get_guess_pulse_scale() - self._bound_mean = -abs(self._bound_scale) + self.ubound - self._bound_scale_cond = self._BSC_GT_MEAN - - elif self.ubound is None: - # only lower bound - if self.lbound < 0: - self._bound_mean = 0.0 - self._bound_scale = abs(self.lbound) - else: - self._bound_scale = self.scaling*self.num_coeffs + \ - self.get_guess_pulse_scale() - self._bound_mean = abs(self._bound_scale) + self.lbound - self._bound_scale_cond = self._BSC_LT_MEAN - - else: - # lower and upper bounds - self._bound_mean = 0.5*(self.ubound + self.lbound) - self._bound_scale = 0.5*(self.ubound - self.lbound) - self._bound_scale_cond = self._BSC_ALL - - def get_guess_pulse_scale(self): - scale = 0.0 - if self.guess_pulse is not None: - scale = max(np.amax(self.guess_pulse) - np.amin(self.guess_pulse), - np.amax(self.guess_pulse)) - return scale - - def _apply_bounds(self, pulse): - """ - Scaling the amplitudes using the tanh function if there are bounds - """ - if self._bound_scale_cond == self._BSC_ALL: - pulse = np.tanh(pulse)*self._bound_scale + self._bound_mean - return pulse - elif self._bound_scale_cond == self._BSC_GT_MEAN: - scale_where = pulse > self._bound_mean - pulse[scale_where] = (np.tanh(pulse[scale_where])*self._bound_scale - + self._bound_mean) - return pulse - elif self._bound_scale_cond == self._BSC_LT_MEAN: - scale_where = pulse < self._bound_mean - pulse[scale_where] = (np.tanh(pulse[scale_where])*self._bound_scale - + self._bound_mean) - return pulse - else: - return pulse - - -class PulseGenCrabFourier(PulseGenCrab): - """ - Generates a pulse using the Fourier basis functions, i.e. sin and cos - - Attributes - ---------- - freqs : float array[num_coeffs] - Frequencies for the basis functions - randomize_freqs : bool - If True (default) the some random offset is applied to the frequencies - """ - - def reset(self): - """ - reset attributes to default values - """ - PulseGenCrab.reset(self) - self.freqs = None - self.randomize_freqs = True - - def init_pulse(self, num_coeffs=None): - """ - Set the initial freq and coefficient values - """ - PulseGenCrab.init_pulse(self) - - self.init_freqs() - - def init_freqs(self): - """ - Generate the frequencies - These are the Fourier harmonics with a uniformly distributed - random offset - """ - self.freqs = np.empty(self.num_coeffs) - ff = 2*np.pi / self.pulse_time - for i in range(self.num_coeffs): - self.freqs[i] = ff*(i + 1) - - if self.randomize_freqs: - self.freqs += np.random.random(self.num_coeffs) - 0.5 - - def gen_pulse(self, coeffs=None): - """ - Generate a pulse using the Fourier basis with the freqs and - coeffs attributes. - - Parameters - ---------- - coeffs : float array[num_coeffs, num_basis_funcs] - The basis coefficient values - If given this overides the default and sets the attribute - of the same name. - """ - if coeffs: - self.coeffs = coeffs - - if not self._pulse_initialised: - self.init_pulse() - - pulse = np.zeros(self.num_tslots) - - for i in range(self.num_coeffs): - phase = self.freqs[i]*self.time - pulse += (self.coeffs[i, 0]*np.sin(phase) - + self.coeffs[i, 1]*np.cos(phase)) - - if self.guess_pulse_func: - pulse = self.guess_pulse_func(pulse) - if self.ramping_pulse is not None: - pulse = self._apply_ramping_pulse(pulse) - - return self._apply_bounds(pulse) diff --git a/qutip/control/pulseoptim.py b/qutip/control/pulseoptim.py deleted file mode 100644 index b96eec4cfa..0000000000 --- a/qutip/control/pulseoptim.py +++ /dev/null @@ -1,1884 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Wrapper functions that will manage the creation of the objects, -build the configuration, and execute the algorithm required to optimise -a set of ctrl pulses for a given (quantum) system. -The fidelity error is some measure of distance of the system evolution -from the given target evolution in the time allowed for the evolution. -The functions minimise this fidelity error wrt the piecewise control -amplitudes in the timeslots - -There are currently two quantum control pulse optmisations algorithms -implemented in this library. There are accessible through the methods -in this module. Both the algorithms use the scipy.optimize methods -to minimise the fidelity error with respect to to variables that define -the pulse. - -GRAPE ------ -The default algorithm (as it was implemented here first) is GRAPE -GRadient Ascent Pulse Engineering [1][2]. It uses a gradient based method such -as BFGS to minimise the fidelity error. This makes convergence very quick -when an exact gradient can be calculated, but this limits the factors that can -taken into account in the fidelity. - -CRAB ----- -The CRAB [3][4] algorithm was developed at the University of Ulm. -In full it is the Chopped RAndom Basis algorithm. -The main difference is that it reduces the number of optimisation variables -by defining the control pulses by expansions of basis functions, -where the variables are the coefficients. Typically a Fourier series is chosen, -i.e. the variables are the Fourier coefficients. -Therefore it does not need to compute an explicit gradient. -By default it uses the Nelder-Mead method for fidelity error minimisation. - -References ----------- -1. N Khaneja et. al. - Optimal control of coupled spin dynamics: Design of NMR pulse sequences - by gradient ascent algorithms. J. Magn. Reson. 172, 296–305 (2005). -2. Shai Machnes et.al - DYNAMO - Dynamic Framework for Quantum Optimal Control - arXiv.1011.4874 -3. Doria, P., Calarco, T. & Montangero, S. - Optimal Control Technique for Many-Body Quantum Dynamics. - Phys. Rev. Lett. 106, 1–4 (2011). -4. Caneva, T., Calarco, T. & Montangero, S. - Chopped random-basis quantum optimization. - Phys. Rev. A - At. Mol. Opt. Phys. 84, (2011). -""" - -import numpy as np -import warnings - -# QuTiP -from qutip import Qobj -# QuTiP control modules -import qutip.control.optimconfig as optimconfig -import qutip.control.dynamics as dynamics -import qutip.control.termcond as termcond -import qutip.control.optimizer as optimizer -import qutip.control.stats as stats -import qutip.control.errors as errors -import qutip.control.fidcomp as fidcomp -import qutip.control.propcomp as propcomp -import qutip.control.pulsegen as pulsegen -import qutip.logging_utils as logging -logger = logging.get_logger() - - -def _param_deprecation(message, stacklevel=3): - """ - Issue deprecation warning - Using stacklevel=3 will ensure message refers the function - calling with the deprecated parameter, - """ - warnings.warn(message, FutureWarning, stacklevel=stacklevel) - - -def _upper_safe(s): - try: - s = s.upper() - except AttributeError: - pass - return s - - -def optimize_pulse( - drift, ctrls, initial, target, - num_tslots=None, evo_time=None, tau=None, - amp_lbound=None, amp_ubound=None, - fid_err_targ=1e-10, min_grad=1e-10, - max_iter=500, max_wall_time=180, - alg='GRAPE', alg_params=None, - optim_params=None, optim_method='DEF', method_params=None, - optim_alg=None, max_metric_corr=None, accuracy_factor=None, - dyn_type='GEN_MAT', dyn_params=None, - prop_type='DEF', prop_params=None, - fid_type='DEF', fid_params=None, - phase_option=None, fid_err_scale_factor=None, - tslot_type='DEF', tslot_params=None, - amp_update_mode=None, - init_pulse_type='DEF', init_pulse_params=None, - pulse_scaling=1.0, pulse_offset=0.0, - ramping_pulse_type=None, ramping_pulse_params=None, - log_level=logging.NOTSET, out_file_ext=None, gen_stats=False): - """ - Optimise a control pulse to minimise the fidelity error. The dynamics of - the system in any given timeslot are governed by the combined dynamics - generator, i.e. the sum of the ``drift + ctrl_amp[j]*ctrls[j]``. - - The control pulse is an ``[n_ts, n_ctrls]`` array of piecewise amplitudes - Starting from an initial (typically random) pulse, a multivariable - optimisation algorithm attempts to determines the optimal values for the - control pulse to minimise the fidelity error. The fidelity error is some - measure of distance of the system evolution from the given target evolution - in the time allowed for the evolution. - - Parameters - ---------- - drift : Qobj or list of Qobj - The underlying dynamics generator of the system can provide list (of - length ``num_tslots``) for time dependent drift. - - ctrls : List of Qobj or array like [num_tslots, evo_time] - A list of control dynamics generators. These are scaled by the - amplitudes to alter the overall dynamics. Array-like input can be - provided for time dependent control generators. - - initial : Qobj - Starting point for the evolution. Typically the identity matrix. - - target : Qobj - Target transformation, e.g. gate or state, for the time evolution. - - num_tslots : integer or None - Number of timeslots. ``None`` implies that timeslots will be given in - the tau array. - - evo_time : float or None - Total time for the evolution. ``None`` implies that timeslots will be - given in the tau array. - - tau : array[num_tslots] of floats or None - Durations for the timeslots. If this is given then ``num_tslots`` and - ``evo_time`` are derived from it. ``None`` implies that timeslot - durations will be equal and calculated as ``evo_time/num_tslots``. - - amp_lbound : float or list of floats - Lower boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - amp_ubound : float or list of floats - Upper boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - fid_err_targ : float - Fidelity error target. Pulse optimisation will terminate when the - fidelity error falls below this value. - - mim_grad : float - Minimum gradient. When the sum of the squares of the gradients wrt to - the control amplitudes falls below this value, the optimisation - terminates, assuming local minima. - - max_iter : integer - Maximum number of iterations of the optimisation algorithm. - - max_wall_time : float - Maximum allowed elapsed time for the optimisation algorithm. - - alg : string - Algorithm to use in pulse optimisation. Options are: - - - 'GRAPE' (default) - GRadient Ascent Pulse Engineering - - 'CRAB' - Chopped RAndom Basis - - alg_params : Dictionary - Options that are specific to the algorithm see above. - - optim_params : Dictionary - The key value pairs are the attribute name and value used to set - attribute values. Note: attributes are created if they do not exist - already, and are overwritten if they do. Note: ``method_params`` are - applied afterwards and so may override these. - - optim_method : string - A ``scipy.optimize.minimize`` method that will be used to optimise the - pulse for minimum fidelity error. Note that ``FMIN``, ``FMIN_BFGS`` & - ``FMIN_L_BFGS_B`` will all result in calling these specific - ``scipy.optimize methods``. Note the ``LBFGSB`` is equivalent to - ``FMIN_L_BFGS_B`` for backwards compatibility reasons. Supplying DEF - will given alg dependent result: - - - GRAPE - Default ``optim_method`` is ``FMIN_L_BFGS_B`` - - CRAB - Default ``optim_method`` is ``FMIN`` - - method_params : dict - Parameters for the ``optim_method``. Note that where there is an - attribute of the :obj:`~qutip.control.optimizer.Optimizer` object or - the termination_conditions matching the key that attribute. - Otherwise, and in some case also, they are assumed to be method_options - for the ``scipy.optimize.minimize`` method. - - optim_alg : string - Deprecated. Use ``optim_method``. - - max_metric_corr : integer - Deprecated. Use ``method_params`` instead. - - accuracy_factor : float - Deprecated. Use ``method_params`` instead. - - dyn_type : string - Dynamics type, i.e. the type of matrix used to describe the dynamics. - Options are ``UNIT``, ``GEN_MAT``, ``SYMPL`` - (see :obj:`~qutip.control.dynamics.Dynamics` classes for details). - - dyn_params : dict - Parameters for the :obj:`~qutip.control.dynamics.Dynamics` object. - The key value pairs are assumed to be attribute name value pairs. - They applied after the object is created. - - prop_type : string - Propagator type i.e. the method used to calculate the propagators and - propagator gradient for each timeslot options are DEF, APPROX, DIAG, - FRECHET, AUG_MAT. DEF will use the default for the specific - ``dyn_type`` (see :obj:`~qutip.control.propcomp.PropagatorComputer` - classes for details). - - prop_params : dict - Parameters for the :obj:`~qutip.control.propcomp.PropagatorComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - fid_type : string - Fidelity error (and fidelity error gradient) computation method. - Options are DEF, UNIT, TRACEDIFF, TD_APPROX. DEF will use the default - for the specific ``dyn_type`` - (See :obj:`~qutip.control.fidcomp.FidelityComputer` classes for - details). - - fid_params : dict - Parameters for the :obj:`~qutip.control.fidcomp.FidelityComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - phase_option : string - Deprecated. Pass in ``fid_params`` instead. - - fid_err_scale_factor : float - Deprecated. Use ``scale_factor`` key in ``fid_params`` instead. - - tslot_type : string - Method for computing the dynamics generators, propagators and evolution - in the timeslots. Options: DEF, UPDATE_ALL, DYNAMIC. UPDATE_ALL is - the only one that currently works. - (See :obj:`~qutip.control.tslotcomp.TimeslotComputer` classes for - details.) - - tslot_params : dict - Parameters for the :obj:`~qutip.control.tslotcomp.TimeslotComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - amp_update_mode : string - Deprecated. Use ``tslot_type`` instead. - - init_pulse_type : string - Type / shape of pulse(s) used to initialise the control amplitudes. - Options (GRAPE) include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW. - Default is RND. (see :obj:`~qutip.control.pulsegen.PulseGen` classes - for details). For the CRAB the this the ``guess_pulse_type``. - - init_pulse_params : dict - Parameters for the initial / guess pulse generator object. - The key value pairs are assumed to be attribute name value pairs. - They applied after the object is created. - - pulse_scaling : float - Linear scale factor for generated initial / guess pulses. By default - initial pulses are generated with amplitudes in the range (-1.0, 1.0). - These will be scaled by this parameter. - - pulse_offset : float - Linear offset for the pulse. That is this value will be added to any - initial / guess pulses generated. - - ramping_pulse_type : string - Type of pulse used to modulate the control pulse. It's intended use - for a ramping modulation, which is often required in experimental - setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was - added for this purpose. - - ramping_pulse_params : dict - Parameters for the ramping pulse generator object. The key value pairs - are assumed to be attribute name value pairs. They applied after the - object is created. - - log_level : integer - Level of messaging output from the logger. Options are attributes of - :obj:`qutip.logging_utils`, in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL. - Anything WARN or above is effectively 'quiet' execution, assuming - everything runs as expected. The default NOTSET implies that the level - will be taken from the QuTiP settings file, which by default is WARN. - - out_file_ext : string or None - Files containing the initial and final control pulse amplitudes are - saved to the current directory. The default name will be postfixed - with this extension. Setting this to None will suppress the output of - files. - - gen_stats : boolean - If set to True then statistics for the optimisation run will be - generated - accessible through attributes of the stats object. - - Returns - ------- - opt : OptimResult - Returns instance of :obj:`~qutip.control.optimresult.OptimResult`, - which has attributes giving the reason for termination, final fidelity - error, final evolution final amplitudes, statistics etc. - """ - if log_level == logging.NOTSET: - log_level = logger.getEffectiveLevel() - else: - logger.setLevel(log_level) - - # The parameters types are checked in create_pulse_optimizer - # so no need to do so here - # However, the deprecation management is repeated here - # so that the stack level is correct - if optim_alg is not None: - optim_method = optim_alg - _param_deprecation( - "The 'optim_alg' parameter is deprecated. " - "Use 'optim_method' instead") - - if max_metric_corr is not None: - if isinstance(method_params, dict): - if 'max_metric_corr' not in method_params: - method_params['max_metric_corr'] = max_metric_corr - else: - method_params = {'max_metric_corr': max_metric_corr} - _param_deprecation( - "The 'max_metric_corr' parameter is deprecated. " - "Use 'max_metric_corr' in method_params instead") - - if accuracy_factor is not None: - if isinstance(method_params, dict): - if 'accuracy_factor' not in method_params: - method_params['accuracy_factor'] = accuracy_factor - else: - method_params = {'accuracy_factor': accuracy_factor} - _param_deprecation( - "The 'accuracy_factor' parameter is deprecated. " - "Use 'accuracy_factor' in method_params instead") - - # phase_option - if phase_option is not None: - if isinstance(fid_params, dict): - if 'phase_option' not in fid_params: - fid_params['phase_option'] = phase_option - else: - fid_params = {'phase_option': phase_option} - _param_deprecation( - "The 'phase_option' parameter is deprecated. " - "Use 'phase_option' in fid_params instead") - - # fid_err_scale_factor - if fid_err_scale_factor is not None: - if isinstance(fid_params, dict): - if 'fid_err_scale_factor' not in fid_params: - fid_params['scale_factor'] = fid_err_scale_factor - else: - fid_params = {'scale_factor': fid_err_scale_factor} - _param_deprecation( - "The 'fid_err_scale_factor' parameter is deprecated. " - "Use 'scale_factor' in fid_params instead") - - # amp_update_mode - if amp_update_mode is not None: - amp_update_mode_up = _upper_safe(amp_update_mode) - if amp_update_mode_up == 'ALL': - tslot_type = 'UPDATE_ALL' - else: - tslot_type = amp_update_mode - _param_deprecation( - "The 'amp_update_mode' parameter is deprecated. " - "Use 'tslot_type' instead") - - optim = create_pulse_optimizer( - drift, ctrls, initial, target, - num_tslots=num_tslots, evo_time=evo_time, tau=tau, - amp_lbound=amp_lbound, amp_ubound=amp_ubound, - fid_err_targ=fid_err_targ, min_grad=min_grad, - max_iter=max_iter, max_wall_time=max_wall_time, - alg=alg, alg_params=alg_params, optim_params=optim_params, - optim_method=optim_method, method_params=method_params, - dyn_type=dyn_type, dyn_params=dyn_params, - prop_type=prop_type, prop_params=prop_params, - fid_type=fid_type, fid_params=fid_params, - init_pulse_type=init_pulse_type, init_pulse_params=init_pulse_params, - pulse_scaling=pulse_scaling, pulse_offset=pulse_offset, - ramping_pulse_type=ramping_pulse_type, - ramping_pulse_params=ramping_pulse_params, - log_level=log_level, gen_stats=gen_stats) - - dyn = optim.dynamics - - dyn.init_timeslots() - # Generate initial pulses for each control - init_amps = np.zeros([dyn.num_tslots, dyn.num_ctrls]) - - if alg == 'CRAB': - for j in range(dyn.num_ctrls): - pgen = optim.pulse_generator[j] - pgen.init_pulse() - init_amps[:, j] = pgen.gen_pulse() - else: - pgen = optim.pulse_generator - for j in range(dyn.num_ctrls): - init_amps[:, j] = pgen.gen_pulse() - - # Initialise the starting amplitudes - dyn.initialize_controls(init_amps) - - if log_level <= logging.INFO: - msg = "System configuration:\n" - dg_name = "dynamics generator" - if dyn_type == 'UNIT': - dg_name = "Hamiltonian" - if dyn.time_depend_drift: - msg += "Initial drift {}:\n".format(dg_name) - msg += str(dyn.drift_dyn_gen[0]) - else: - msg += "Drift {}:\n".format(dg_name) - msg += str(dyn.drift_dyn_gen) - for j in range(dyn.num_ctrls): - msg += "\nControl {} {}:\n".format(j+1, dg_name) - msg += str(dyn.ctrl_dyn_gen[j]) - msg += "\nInitial state / operator:\n" - msg += str(dyn.initial) - msg += "\nTarget state / operator:\n" - msg += str(dyn.target) - logger.info(msg) - - if out_file_ext is not None: - # Save initial amplitudes to a text file - pulsefile = "ctrl_amps_initial_" + out_file_ext - dyn.save_amps(pulsefile) - if log_level <= logging.INFO: - logger.info("Initial amplitudes output to file: " + pulsefile) - - # Start the optimisation - result = optim.run_optimization() - - if out_file_ext is not None: - # Save final amplitudes to a text file - pulsefile = "ctrl_amps_final_" + out_file_ext - dyn.save_amps(pulsefile) - if log_level <= logging.INFO: - logger.info("Final amplitudes output to file: " + pulsefile) - - return result - -def optimize_pulse_unitary( - H_d, H_c, U_0, U_targ, - num_tslots=None, evo_time=None, tau=None, - amp_lbound=None, amp_ubound=None, - fid_err_targ=1e-10, min_grad=1e-10, - max_iter=500, max_wall_time=180, - alg='GRAPE', alg_params=None, - optim_params=None, optim_method='DEF', method_params=None, - optim_alg=None, max_metric_corr=None, accuracy_factor=None, - phase_option='PSU', - dyn_params=None, prop_params=None, fid_params=None, - tslot_type='DEF', tslot_params=None, - amp_update_mode=None, - init_pulse_type='DEF', init_pulse_params=None, - pulse_scaling=1.0, pulse_offset=0.0, - ramping_pulse_type=None, ramping_pulse_params=None, - log_level=logging.NOTSET, out_file_ext=None, gen_stats=False): - """ - Optimise a control pulse to minimise the fidelity error, assuming that the - dynamics of the system are generated by unitary operators. This function - is simply a wrapper for optimize_pulse, where the appropriate options for - unitary dynamics are chosen and the parameter names are in the format - familiar to unitary dynamics The dynamics of the system in any given - timeslot are governed by the combined Hamiltonian, i.e. the sum of the - ``H_d + ctrl_amp[j]*H_c[j]`` The control pulse is an ``[n_ts, n_ctrls]`` - array of piecewise amplitudes Starting from an initial (typically random) - pulse, a multivariable optimisation algorithm attempts to determines the - optimal values for the control pulse to minimise the fidelity error The - maximum fidelity for a unitary system is 1, i.e. when the time evolution - resulting from the pulse is equivalent to the target. And therefore the - fidelity error is ``1 - fidelity``. - - Parameters - ---------- - H_d : Qobj or list of Qobj - Drift (aka system) the underlying Hamiltonian of the system can provide - list (of length ``num_tslots``) for time dependent drift. - - H_c : List of Qobj or array like [num_tslots, evo_time] - A list of control Hamiltonians. These are scaled by the amplitudes to - alter the overall dynamics. Array-like input can be provided for time - dependent control generators. - - U_0 : Qobj - Starting point for the evolution. Typically the identity matrix. - - U_targ : Qobj - Target transformation, e.g. gate or state, for the time evolution. - - num_tslots : integer or None - Number of timeslots. ``None`` implies that timeslots will be given in - the tau array. - - evo_time : float or None - Total time for the evolution. ``None`` implies that timeslots will be - given in the tau array. - - tau : array[num_tslots] of floats or None - Durations for the timeslots. If this is given then ``num_tslots`` and - ``evo_time`` are derived from it. ``None`` implies that timeslot - durations will be equal and calculated as ``evo_time/num_tslots``. - - amp_lbound : float or list of floats - Lower boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - amp_ubound : float or list of floats - Upper boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - fid_err_targ : float - Fidelity error target. Pulse optimisation will terminate when the - fidelity error falls below this value. - - mim_grad : float - Minimum gradient. When the sum of the squares of the gradients wrt to - the control amplitudes falls below this value, the optimisation - terminates, assuming local minima. - - max_iter : integer - Maximum number of iterations of the optimisation algorithm. - - max_wall_time : float - Maximum allowed elapsed time for the optimisation algorithm. - - alg : string - Algorithm to use in pulse optimisation. Options are: - - - 'GRAPE' (default) - GRadient Ascent Pulse Engineering - - 'CRAB' - Chopped RAndom Basis - - alg_params : Dictionary - options that are specific to the algorithm see above - - optim_params : Dictionary - The key value pairs are the attribute name and value used to set - attribute values. Note: attributes are created if they do not exist - already, and are overwritten if they do. Note: ``method_params`` are - applied afterwards and so may override these. - - optim_method : string - A ``scipy.optimize.minimize`` method that will be used to optimise the - pulse for minimum fidelity error Note that ``FMIN``, ``FMIN_BFGS`` & - ``FMIN_L_BFGS_B`` will all result in calling these specific - scipy.optimize methods Note the ``LBFGSB`` is equivalent to - ``FMIN_L_BFGS_B`` for backwards compatibility reasons. Supplying - ``DEF`` will given algorithm-dependent result: - - - GRAPE - Default ``optim_method`` is FMIN_L_BFGS_B - - CRAB - Default ``optim_method`` is FMIN - - method_params : dict - Parameters for the ``optim_method``. Note that where there is an - attribute of the :obj:`~qutip.control.optimizer.Optimizer` object or - the ``termination_conditions`` matching the key that attribute. - Otherwise, and in some case also, they are assumed to be - method_options for the ``scipy.optimize.minimize`` method. - - optim_alg : string - Deprecated. Use ``optim_method``. - - max_metric_corr : integer - Deprecated. Use ``method_params`` instead. - - accuracy_factor : float - Deprecated. Use ``method_params`` instead. - - phase_option : string - Determines how global phase is treated in fidelity calculations - (``fid_type='UNIT'`` only). Options: - - - PSU - global phase ignored - - SU - global phase included - - dyn_params : dict - Parameters for the :obj:`~qutip.control.dynamics.Dynamics` object. - The key value pairs are assumed to be attribute name value pairs. - They applied after the object is created. - - prop_params : dict - Parameters for the :obj:`~qutip.control.propcomp.PropagatorComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - fid_params : dict - Parameters for the :obj:`~qutip.control.fidcomp.FidelityComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - tslot_type : string - Method for computing the dynamics generators, propagators and evolution - in the timeslots. Options: ``DEF``, ``UPDATE_ALL``, ``DYNAMIC``. - ``UPDATE_ALL`` is the only one that currently works. (See - :obj:`~qutip.control.tslotcomp.TimeslotComputer` classes for details.) - - tslot_params : dict - Parameters for the :obj:`~qutip.control.tslotcomp.TimeslotComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - amp_update_mode : string - Deprecated. Use ``tslot_type`` instead. - - init_pulse_type : string - Type / shape of pulse(s) used to initialise the control amplitudes. - Options (GRAPE) include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW. - DEF is RND. (see :obj:`~qutip.control.pulsegen.PulseGen` classes for - details.) For the CRAB the this the guess_pulse_type. - - init_pulse_params : dict - Parameters for the initial / guess pulse generator object. The key - value pairs are assumed to be attribute name value pairs. They applied - after the object is created. - - pulse_scaling : float - Linear scale factor for generated initial / guess pulses. By default - initial pulses are generated with amplitudes in the range (-1.0, 1.0). - These will be scaled by this parameter. - - pulse_offset : float - Linear offset for the pulse. That is this value will be added to any - initial / guess pulses generated. - - ramping_pulse_type : string - Type of pulse used to modulate the control pulse. It's intended use - for a ramping modulation, which is often required in experimental - setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was - added for this purpose. - - ramping_pulse_params : dict - Parameters for the ramping pulse generator object. The key value pairs - are assumed to be attribute name value pairs. They applied after the - object is created. - - log_level : integer - Level of messaging output from the logger. Options are attributes of - :obj:`qutip.logging_utils` in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, assuming - everything runs as expected. The default NOTSET implies that the level - will be taken from the QuTiP settings file, which by default is WARN. - - out_file_ext : string or None - Files containing the initial and final control pulse amplitudes are - saved to the current directory. The default name will be postfixed - with this extension. Setting this to ``None`` will suppress the output - of files. - - gen_stats : boolean - If set to ``True`` then statistics for the optimisation run will be - generated - accessible through attributes of the stats object. - - Returns - ------- - opt : OptimResult - Returns instance of :obj:`~qutip.control.optimresult.OptimResult`, - which has attributes giving the reason for termination, final fidelity - error, final evolution final amplitudes, statistics etc. - """ - - # parameters are checked in create pulse optimiser - - # The deprecation management is repeated here - # so that the stack level is correct - if optim_alg is not None: - optim_method = optim_alg - _param_deprecation( - "The 'optim_alg' parameter is deprecated. " - "Use 'optim_method' instead") - - if max_metric_corr is not None: - if isinstance(method_params, dict): - if 'max_metric_corr' not in method_params: - method_params['max_metric_corr'] = max_metric_corr - else: - method_params = {'max_metric_corr': max_metric_corr} - _param_deprecation( - "The 'max_metric_corr' parameter is deprecated. " - "Use 'max_metric_corr' in method_params instead") - - if accuracy_factor is not None: - if isinstance(method_params, dict): - if 'accuracy_factor' not in method_params: - method_params['accuracy_factor'] = accuracy_factor - else: - method_params = {'accuracy_factor': accuracy_factor} - _param_deprecation( - "The 'accuracy_factor' parameter is deprecated. " - "Use 'accuracy_factor' in method_params instead") - - # amp_update_mode - if amp_update_mode is not None: - amp_update_mode_up = _upper_safe(amp_update_mode) - if amp_update_mode_up == 'ALL': - tslot_type = 'UPDATE_ALL' - else: - tslot_type = amp_update_mode - _param_deprecation( - "The 'amp_update_mode' parameter is deprecated. " - "Use 'tslot_type' instead") - - # phase_option is still valid for this method - # pass it via the fid_params - if phase_option is not None: - if fid_params is None: - fid_params = {'phase_option': phase_option} - else: - if 'phase_option' not in fid_params: - fid_params['phase_option'] = phase_option - - return optimize_pulse( - drift=H_d, ctrls=H_c, initial=U_0, target=U_targ, - num_tslots=num_tslots, evo_time=evo_time, tau=tau, - amp_lbound=amp_lbound, amp_ubound=amp_ubound, - fid_err_targ=fid_err_targ, min_grad=min_grad, - max_iter=max_iter, max_wall_time=max_wall_time, - alg=alg, alg_params=alg_params, optim_params=optim_params, - optim_method=optim_method, method_params=method_params, - dyn_type='UNIT', dyn_params=dyn_params, - prop_params=prop_params, fid_params=fid_params, - init_pulse_type=init_pulse_type, - init_pulse_params=init_pulse_params, - pulse_scaling=pulse_scaling, pulse_offset=pulse_offset, - ramping_pulse_type=ramping_pulse_type, - ramping_pulse_params=ramping_pulse_params, - log_level=log_level, out_file_ext=out_file_ext, - gen_stats=gen_stats) - - -def opt_pulse_crab( - drift, ctrls, initial, target, - num_tslots=None, evo_time=None, tau=None, - amp_lbound=None, amp_ubound=None, - fid_err_targ=1e-5, - max_iter=500, max_wall_time=180, - alg_params=None, - num_coeffs=None, init_coeff_scaling=1.0, - optim_params=None, optim_method='fmin', method_params=None, - dyn_type='GEN_MAT', dyn_params=None, - prop_type='DEF', prop_params=None, - fid_type='DEF', fid_params=None, - tslot_type='DEF', tslot_params=None, - guess_pulse_type=None, guess_pulse_params=None, - guess_pulse_scaling=1.0, guess_pulse_offset=0.0, - guess_pulse_action='MODULATE', - ramping_pulse_type=None, ramping_pulse_params=None, - log_level=logging.NOTSET, out_file_ext=None, gen_stats=False): - """ - Optimise a control pulse to minimise the fidelity error. - The dynamics of the system in any given timeslot are governed - by the combined dynamics generator, - i.e. the sum of the drift+ctrl_amp[j]*ctrls[j] - The control pulse is an [n_ts, n_ctrls] array of piecewise amplitudes. - The CRAB algorithm uses basis function coefficents as the variables to - optimise. It does NOT use any gradient function. - A multivariable optimisation algorithm attempts to determines the - optimal values for the control pulse to minimise the fidelity error - The fidelity error is some measure of distance of the system evolution - from the given target evolution in the time allowed for the evolution. - - Parameters - ---------- - drift : Qobj or list of Qobj - the underlying dynamics generator of the system - can provide list (of length num_tslots) for time dependent drift - - ctrls : List of Qobj or array like [num_tslots, evo_time] - a list of control dynamics generators. These are scaled by - the amplitudes to alter the overall dynamics - Array like imput can be provided for time dependent control generators - - initial : Qobj - Starting point for the evolution. Typically the identity matrix. - - target : Qobj - Target transformation, e.g. gate or state, for the time evolution. - - num_tslots : integer or None - Number of timeslots. ``None`` implies that timeslots will be given in - the tau array. - - evo_time : float or None - Total time for the evolution. ``None`` implies that timeslots will be - given in the tau array. - - tau : array[num_tslots] of floats or None - Durations for the timeslots. If this is given then ``num_tslots`` and - ``evo_time`` are dervived from it. - ``None`` implies that timeslot durations will be equal and calculated - as ``evo_time/num_tslots``. - - amp_lbound : float or list of floats - Lower boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - amp_ubound : float or list of floats - Upper boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - fid_err_targ : float - Fidelity error target. Pulse optimisation will terminate when the - fidelity error falls below this value. - - max_iter : integer - Maximum number of iterations of the optimisation algorithm. - - max_wall_time : float - Maximum allowed elapsed time for the optimisation algorithm. - - alg_params : Dictionary - Options that are specific to the algorithm see above. - - optim_params : Dictionary - The key value pairs are the attribute name and value used to set - attribute values. Note: attributes are created if they do not exist - already, and are overwritten if they do. Note: method_params are - applied afterwards and so may override these. - - coeff_scaling : float - Linear scale factor for the random basis coefficients. By default - these range from -1.0 to 1.0. Note this is overridden by alg_params - (if given there). - - num_coeffs : integer - Number of coefficients used for each basis function. Note this is - calculated automatically based on the dimension of the dynamics if not - given. It is crucial to the performane of the algorithm that it is set - as low as possible, while still giving high enough frequencies. Note - this is overridden by alg_params (if given there). - - optim_method : string - Multi-variable optimisation method. The only tested options are 'fmin' - and 'Nelder-mead'. In theory any non-gradient method implemented in - scipy.optimize.mininize could be used. - - method_params : dict - Parameters for the optim_method. Note that where there is an attribute - of the :class:`~qutip.control.optimizer.Optimizer` object or the - termination_conditions matching the key that attribute. Otherwise, - and in some case also, they are assumed to be method_options for the - ``scipy.optimize.minimize`` method. The commonly used parameter are: - - - xtol - limit on variable change for convergence - - ftol - limit on fidelity error change for convergence - - dyn_type : string - Dynamics type, i.e. the type of matrix used to describe the dynamics. - Options are UNIT, GEN_MAT, SYMPL (see Dynamics classes for details). - - dyn_params : dict - Parameters for the :class:`qutip.control.dynamics.Dynamics` object. - The key value pairs are assumed to be attribute name value pairs. - They applied after the object is created. - - prop_type : string - Propagator type i.e. the method used to calculate the propagtors and - propagtor gradient for each timeslot options are DEF, APPROX, DIAG, - FRECHET, AUG_MAT DEF will use the default for the specific dyn_type - (see :obj:`~qutip.control.propcomp.PropagatorComputer` classes for - details). - - prop_params : dict - Parameters for the :obj:`~qutip.control.propcomp.PropagatorComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - fid_type : string - Fidelity error (and fidelity error gradient) computation method. - Options are DEF, UNIT, TRACEDIFF, TD_APPROX. DEF will use the default - for the specific dyn_type. - (See :obj:`~qutip.control.fidcomp.FidelityComputer` classes for - details). - - fid_params : dict - Parameters for the :obj:`~qutip.control.fidcomp.FidelityComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - tslot_type : string - Method for computing the dynamics generators, propagators and evolution - in the timeslots. Options: DEF, UPDATE_ALL, DYNAMIC UPDATE_ALL is the - only one that currently works. - (See :obj:`~qutip.control.tslotcomp.TimeslotComputer` classes - for details). - - tslot_params : dict - Parameters for the :obj:`~qutip.control.tslotcomp.TimeslotComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - guess_pulse_type : string, default None - Type / shape of pulse(s) used modulate the control amplitudes. - Options include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW, GAUSSIAN. - - guess_pulse_params : dict - Parameters for the guess pulse generator object. The key value pairs - are assumed to be attribute name value pairs. They applied after the - object is created. - - guess_pulse_action : string, default 'MODULATE' - Determines how the guess pulse is applied to the pulse generated by the - basis expansion. Options are: MODULATE, ADD. - - pulse_scaling : float - Linear scale factor for generated guess pulses. By default initial - pulses are generated with amplitudes in the range (-1.0, 1.0). These - will be scaled by this parameter. - - pulse_offset : float - Linear offset for the pulse. That is this value will be added to any - guess pulses generated. - - ramping_pulse_type : string - Type of pulse used to modulate the control pulse. It's intended use - for a ramping modulation, which is often required in experimental - setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was - added for this purpose. - - ramping_pulse_params : dict - Parameters for the ramping pulse generator object. The key value pairs - are assumed to be attribute name value pairs. They applied after the - object is created. - - log_level : integer - level of messaging output from the logger. Options are attributes of - :obj:`qutip.logging_utils`, in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, assuming - everything runs as expected. The default NOTSET implies that the level - will be taken from the QuTiP settings file, which by default is WARN. - - out_file_ext : string or None - Files containing the initial and final control pulse. Amplitudes are - saved to the current directory. The default name will be postfixed - with this extension. Setting this to ``None`` will suppress the output - of files. - - gen_stats : boolean - If set to ``True`` then statistics for the optimisation run will be - generated - accessible through attributes of the stats object. - - Returns - ------- - opt : OptimResult - Returns instance of OptimResult, which has attributes giving the - reason for termination, final fidelity error, final evolution - final amplitudes, statistics etc - """ - - # The parameters are checked in create_pulse_optimizer - # so no need to do so here - - if log_level == logging.NOTSET: - log_level = logger.getEffectiveLevel() - else: - logger.setLevel(log_level) - - # build the algorithm options - if not isinstance(alg_params, dict): - alg_params = {'num_coeffs': num_coeffs, - 'init_coeff_scaling': init_coeff_scaling} - else: - if (num_coeffs is not None and 'num_coeffs' not in alg_params): - alg_params['num_coeffs'] = num_coeffs - if ( - init_coeff_scaling is not None - and 'init_coeff_scaling' not in alg_params - ): - alg_params['init_coeff_scaling'] = init_coeff_scaling - - # Build the guess pulse options - # Any options passed in the guess_pulse_params take precedence - # over the parameter values. - if guess_pulse_type: - if not isinstance(guess_pulse_params, dict): - guess_pulse_params = {} - if (guess_pulse_scaling is not None - and 'scaling' not in guess_pulse_params): - guess_pulse_params['scaling'] = guess_pulse_scaling - if (guess_pulse_offset is not None - and 'offset' not in guess_pulse_params): - guess_pulse_params['offset'] = guess_pulse_offset - if (guess_pulse_action is not None - and 'pulse_action' not in guess_pulse_params): - guess_pulse_params['pulse_action'] = guess_pulse_action - - return optimize_pulse( - drift, ctrls, initial, target, - num_tslots=num_tslots, evo_time=evo_time, tau=tau, - amp_lbound=amp_lbound, amp_ubound=amp_ubound, - fid_err_targ=fid_err_targ, min_grad=0.0, - max_iter=max_iter, max_wall_time=max_wall_time, - alg='CRAB', alg_params=alg_params, optim_params=optim_params, - optim_method=optim_method, method_params=method_params, - dyn_type=dyn_type, dyn_params=dyn_params, - prop_type=prop_type, prop_params=prop_params, - fid_type=fid_type, fid_params=fid_params, - tslot_type=tslot_type, tslot_params=tslot_params, - init_pulse_type=guess_pulse_type, - init_pulse_params=guess_pulse_params, - ramping_pulse_type=ramping_pulse_type, - ramping_pulse_params=ramping_pulse_params, - log_level=log_level, out_file_ext=out_file_ext, gen_stats=gen_stats) - - -def opt_pulse_crab_unitary( - H_d, H_c, U_0, U_targ, - num_tslots=None, evo_time=None, tau=None, - amp_lbound=None, amp_ubound=None, - fid_err_targ=1e-5, - max_iter=500, max_wall_time=180, - alg_params=None, - num_coeffs=None, init_coeff_scaling=1.0, - optim_params=None, optim_method='fmin', method_params=None, - phase_option='PSU', - dyn_params=None, prop_params=None, fid_params=None, - tslot_type='DEF', tslot_params=None, - guess_pulse_type=None, guess_pulse_params=None, - guess_pulse_scaling=1.0, guess_pulse_offset=0.0, - guess_pulse_action='MODULATE', - ramping_pulse_type=None, ramping_pulse_params=None, - log_level=logging.NOTSET, out_file_ext=None, gen_stats=False): - """ - Optimise a control pulse to minimise the fidelity error, assuming that the - dynamics of the system are generated by unitary operators. This function - is simply a wrapper for optimize_pulse, where the appropriate options for - unitary dynamics are chosen and the parameter names are in the format - familiar to unitary dynamics. The dynamics of the system in any given - timeslot are governed by the combined Hamiltonian, i.e. the sum of the - ``H_d + ctrl_amp[j]*H_c[j]`` The control pulse is an ``[n_ts, n_ctrls]`` - array of piecewise amplitudes. - - The CRAB algorithm uses basis function coefficents as the variables to - optimise. It does NOT use any gradient function. A multivariable - optimisation algorithm attempts to determines the optimal values for the - control pulse to minimise the fidelity error. The fidelity error is some - measure of distance of the system evolution from the given target evolution - in the time allowed for the evolution. - - Parameters - ---------- - - H_d : Qobj or list of Qobj - Drift (aka system) the underlying Hamiltonian of the system can provide - list (of length num_tslots) for time dependent drift. - - H_c : List of Qobj or array like [num_tslots, evo_time] - A list of control Hamiltonians. These are scaled by the amplitudes to - alter the overall dynamics. Array like imput can be provided for time - dependent control generators. - - U_0 : Qobj - Starting point for the evolution. Typically the identity matrix. - - U_targ : Qobj - Target transformation, e.g. gate or state, for the time evolution. - - num_tslots : integer or None - Number of timeslots. ``None`` implies that timeslots will be given in - the tau array. - - evo_time : float or None - Total time for the evolution. ``None`` implies that timeslots will be - given in the tau array. - - tau : array[num_tslots] of floats or None - Durations for the timeslots. If this is given then ``num_tslots`` and - ``evo_time`` are derived from it. ``None`` implies that timeslot - durations will be equal and calculated as ``evo_time/num_tslots``. - - amp_lbound : float or list of floats - Lower boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - amp_ubound : float or list of floats - Upper boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - fid_err_targ : float - Fidelity error target. Pulse optimisation will terminate when the - fidelity error falls below this value. - - max_iter : integer - Maximum number of iterations of the optimisation algorithm. - - max_wall_time : float - Maximum allowed elapsed time for the optimisation algorithm. - - alg_params : Dictionary - Options that are specific to the algorithm see above. - - optim_params : Dictionary - The key value pairs are the attribute name and value used to set - attribute values. Note: attributes are created if they do not exist - already, and are overwritten if they do. Note: ``method_params`` are - applied afterwards and so may override these. - - coeff_scaling : float - Linear scale factor for the random basis coefficients. By default - these range from -1.0 to 1.0. Note this is overridden by - ``alg_params`` (if given there). - - num_coeffs : integer - Number of coefficients used for each basis function. Note this is - calculated automatically based on the dimension of the dynamics if not - given. It is crucial to the performance of the algorithm that it is set - as low as possible, while still giving high enough frequencies. Note - this is overridden by ``alg_params`` (if given there). - - optim_method : string - Multi-variable optimisation method. The only tested options are 'fmin' - and 'Nelder-mead'. In theory any non-gradient method implemented in - ``scipy.optimize.minimize`` could be used. - - method_params : dict - Parameters for the ``optim_method``. Note that where there is an - attribute of the :obj:`~qutip.control.optimizer.Optimizer` object or - the termination_conditions matching the key that attribute. Otherwise, - and in some case also, they are assumed to be method_options for the - ``scipy.optimize.minimize`` method. The commonly used parameter are: - - - xtol - limit on variable change for convergence - - ftol - limit on fidelity error change for convergence - - phase_option : string - Determines how global phase is treated in fidelity calculations - (``fid_type='UNIT'`` only). Options: - - - PSU - global phase ignored - - SU - global phase included - - dyn_params : dict - Parameters for the :obj:`~qutip.control.dynamics.Dynamics` object. - The key value pairs are assumed to be attribute name value pairs. - They applied after the object is created. - - prop_params : dict - Parameters for the :obj:`~qutip.control.propcomp.PropagatorComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - fid_params : dict - Parameters for the :obj:`~qutip.control.fidcomp.FidelityComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - tslot_type : string - Method for computing the dynamics generators, propagators and evolution - in the timeslots. Options: DEF, UPDATE_ALL, DYNAMIC. UPDATE_ALL is - the only one that currently works. - (See :obj:`~qutip.control.tslotcomp.TimeslotComputer` classes for - details). - - tslot_params : dict - Parameters for the :obj:`~qutip.control.tslotcomp.TimeslotComputer` - object. The key value pairs are assumed to be attribute name value - pairs. They applied after the object is created. - - guess_pulse_type : string, optional - Type / shape of pulse(s) used modulate the control amplitudes. - Options include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW, GAUSSIAN. - - guess_pulse_params : dict - Parameters for the guess pulse generator object. The key value pairs - are assumed to be attribute name value pairs. They applied after the - object is created. - - guess_pulse_action : string, 'MODULATE' - Determines how the guess pulse is applied to the pulse generated by the - basis expansion. Options are: MODULATE, ADD. - - pulse_scaling : float - Linear scale factor for generated guess pulses. By default initial - pulses are generated with amplitudes in the range (-1.0, 1.0). These - will be scaled by this parameter. - - pulse_offset : float - Linear offset for the pulse. That is this value will be added to any - guess pulses generated. - - ramping_pulse_type : string - Type of pulse used to modulate the control pulse. It's intended use - for a ramping modulation, which is often required in experimental - setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was - added for this purpose. - - ramping_pulse_params : dict - Parameters for the ramping pulse generator object. The key value pairs - are assumed to be attribute name value pairs. They applied after the - object is created. - - log_level : integer - Level of messaging output from the logger. Options are attributes of - :obj:`qutip.logging_utils`, in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL. - Anything WARN or above is effectively 'quiet' execution, assuming - everything runs as expected. The default NOTSET implies that the level - will be taken from the QuTiP settings file, which by default is WARN. - - out_file_ext : string or None - Files containing the initial and final control pulse amplitudes are - saved to the current directory. The default name will be postfixed - with this extension. Setting this to None will suppress the output of - files. - - gen_stats : boolean - If set to ``True`` then statistics for the optimisation run will be - generated - accessible through attributes of the stats object. - - Returns - ------- - opt : OptimResult - Returns instance of :obj:`~qutip.control.optimresult.OptimResult`, - which has attributes giving the reason for termination, final fidelity - error, final evolution final amplitudes, statistics etc. - """ - - # The parameters are checked in create_pulse_optimizer - # so no need to do so here - - if log_level == logging.NOTSET: - log_level = logger.getEffectiveLevel() - else: - logger.setLevel(log_level) - - # build the algorithm options - if not isinstance(alg_params, dict): - alg_params = {'num_coeffs': num_coeffs, - 'init_coeff_scaling': init_coeff_scaling} - else: - if (num_coeffs is not None - and 'num_coeffs' not in alg_params): - alg_params['num_coeffs'] = num_coeffs - if (init_coeff_scaling is not None - and 'init_coeff_scaling' not in alg_params): - alg_params['init_coeff_scaling'] = init_coeff_scaling - - # Build the guess pulse options - # Any options passed in the guess_pulse_params take precedence - # over the parameter values. - if guess_pulse_type: - if not isinstance(guess_pulse_params, dict): - guess_pulse_params = {} - if (guess_pulse_scaling is not None - and 'scaling' not in guess_pulse_params): - guess_pulse_params['scaling'] = guess_pulse_scaling - if (guess_pulse_offset is not None - and 'offset' not in guess_pulse_params): - guess_pulse_params['offset'] = guess_pulse_offset - if (guess_pulse_action is not None - and 'pulse_action' not in guess_pulse_params): - guess_pulse_params['pulse_action'] = guess_pulse_action - - return optimize_pulse_unitary( - H_d, H_c, U_0, U_targ, - num_tslots=num_tslots, evo_time=evo_time, tau=tau, - amp_lbound=amp_lbound, amp_ubound=amp_ubound, - fid_err_targ=fid_err_targ, min_grad=0.0, - max_iter=max_iter, max_wall_time=max_wall_time, - alg='CRAB', alg_params=alg_params, optim_params=optim_params, - optim_method=optim_method, method_params=method_params, - phase_option=phase_option, - dyn_params=dyn_params, prop_params=prop_params, fid_params=fid_params, - tslot_type=tslot_type, tslot_params=tslot_params, - init_pulse_type=guess_pulse_type, - init_pulse_params=guess_pulse_params, - ramping_pulse_type=ramping_pulse_type, - ramping_pulse_params=ramping_pulse_params, - log_level=log_level, out_file_ext=out_file_ext, gen_stats=gen_stats) - - -def create_pulse_optimizer( - drift, ctrls, initial, target, - num_tslots=None, evo_time=None, tau=None, - amp_lbound=None, amp_ubound=None, - fid_err_targ=1e-10, min_grad=1e-10, - max_iter=500, max_wall_time=180, - alg='GRAPE', alg_params=None, - optim_params=None, optim_method='DEF', method_params=None, - optim_alg=None, max_metric_corr=None, accuracy_factor=None, - dyn_type='GEN_MAT', dyn_params=None, - prop_type='DEF', prop_params=None, - fid_type='DEF', fid_params=None, - phase_option=None, fid_err_scale_factor=None, - tslot_type='DEF', tslot_params=None, - amp_update_mode=None, - init_pulse_type='DEF', init_pulse_params=None, - pulse_scaling=1.0, pulse_offset=0.0, - ramping_pulse_type=None, ramping_pulse_params=None, - log_level=logging.NOTSET, gen_stats=False): - """ - Generate the objects of the appropriate subclasses required for the pulse - optmisation based on the parameters given Note this method may be - preferable to calling optimize_pulse if more detailed configuration is - required before running the optmisation algorthim, or the algorithm will be - run many times, for instances when trying to finding global the optimum or - minimum time optimisation - - Parameters - ---------- - drift : Qobj or list of Qobj - The underlying dynamics generator of the system can provide list (of - length num_tslots) for time dependent drift. - - ctrls : List of Qobj or array like [num_tslots, evo_time] - A list of control dynamics generators. These are scaled by the - amplitudes to alter the overall dynamics. Array-like input can be - provided for time dependent control generators. - - initial : Qobj - Starting point for the evolution. Typically the identity matrix. - - target : Qobj - Target transformation, e.g. gate or state, for the time evolution. - - num_tslots : integer or None - Number of timeslots. ``None`` implies that timeslots will be given in - the tau array. - - evo_time : float or None - Total time for the evolution. ``None`` implies that timeslots will be - given in the tau array. - - tau : array[num_tslots] of floats or None - Durations for the timeslots. If this is given then ``num_tslots`` and - ``evo_time`` are dervived from it. ``None`` implies that timeslot - durations will be equal and calculated as ``evo_time/num_tslots``. - - amp_lbound : float or list of floats - Lower boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - amp_ubound : float or list of floats - Upper boundaries for the control amplitudes. Can be a scalar value - applied to all controls or a list of bounds for each control. - - fid_err_targ : float - Fidelity error target. Pulse optimisation will terminate when the - fidelity error falls below this value. - - mim_grad : float - Minimum gradient. When the sum of the squares of the gradients wrt to - the control amplitudes falls below this value, the optimisation - terminates, assuming local minima. - - max_iter : integer - Maximum number of iterations of the optimisation algorithm. - - max_wall_time : float - Maximum allowed elapsed time for the optimisation algorithm. - - alg : string - Algorithm to use in pulse optimisation. - Options are: - - - 'GRAPE' (default) - GRadient Ascent Pulse Engineering - - 'CRAB' - Chopped RAndom Basis - - alg_params : Dictionary - options that are specific to the algorithm see above - - optim_params : Dictionary - The key value pairs are the attribute name and value used to set - attribute values. Note: attributes are created if they do not exist - already, and are overwritten if they do. Note: method_params are - applied afterwards and so may override these. - - optim_method : string - a scipy.optimize.minimize method that will be used to optimise - the pulse for minimum fidelity error - Note that FMIN, FMIN_BFGS & FMIN_L_BFGS_B will all result - in calling these specific scipy.optimize methods - Note the LBFGSB is equivalent to FMIN_L_BFGS_B for backwards - capatibility reasons. - Supplying DEF will given alg dependent result: - - - GRAPE - Default optim_method is FMIN_L_BFGS_B - - CRAB - Default optim_method is Nelder-Mead - - method_params : dict - Parameters for the optim_method. - Note that where there is an attribute of the - :class:`~qutip.control.optimizer.Optimizer` object or the - termination_conditions matching the key that attribute. Otherwise, - and in some case also, they are assumed to be method_options - for the scipy.optimize.minimize method. - - optim_alg : string - Deprecated. Use optim_method. - - max_metric_corr : integer - Deprecated. Use method_params instead - - accuracy_factor : float - Deprecated. Use method_params instead - - dyn_type : string - Dynamics type, i.e. the type of matrix used to describe - the dynamics. Options are UNIT, GEN_MAT, SYMPL - (see Dynamics classes for details) - - dyn_params : dict - Parameters for the Dynamics object - The key value pairs are assumed to be attribute name value pairs - They applied after the object is created - - prop_type : string - Propagator type i.e. the method used to calculate the - propagtors and propagtor gradient for each timeslot - options are DEF, APPROX, DIAG, FRECHET, AUG_MAT - DEF will use the default for the specific dyn_type - (see PropagatorComputer classes for details) - - prop_params : dict - Parameters for the PropagatorComputer object - The key value pairs are assumed to be attribute name value pairs - They applied after the object is created - - fid_type : string - Fidelity error (and fidelity error gradient) computation method - Options are DEF, UNIT, TRACEDIFF, TD_APPROX - DEF will use the default for the specific dyn_type - (See FidelityComputer classes for details) - - fid_params : dict - Parameters for the FidelityComputer object - The key value pairs are assumed to be attribute name value pairs - They applied after the object is created - - phase_option : string - Deprecated. Pass in fid_params instead. - - fid_err_scale_factor : float - Deprecated. Use scale_factor key in fid_params instead. - - tslot_type : string - Method for computing the dynamics generators, propagators and - evolution in the timeslots. - Options: DEF, UPDATE_ALL, DYNAMIC - UPDATE_ALL is the only one that currently works - (See TimeslotComputer classes for details) - - tslot_params : dict - Parameters for the TimeslotComputer object. - The key value pairs are assumed to be attribute name value pairs. - They applied after the object is created. - - amp_update_mode : string - Deprecated. Use tslot_type instead. - - init_pulse_type : string - type / shape of pulse(s) used to initialise the - the control amplitudes. - Options (GRAPE) include: - - RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW - DEF is RND - - (see PulseGen classes for details) - For the CRAB the this the guess_pulse_type. - - init_pulse_params : dict - Parameters for the initial / guess pulse generator object. - The key value pairs are assumed to be attribute name value pairs. - They applied after the object is created. - - pulse_scaling : float - Linear scale factor for generated initial / guess pulses - By default initial pulses are generated with amplitudes in the - range (-1.0, 1.0). These will be scaled by this parameter - - pulse_offset : float - Linear offset for the pulse. That is this value will be added - to any initial / guess pulses generated. - - ramping_pulse_type : string - Type of pulse used to modulate the control pulse. - It's intended use for a ramping modulation, which is often required in - experimental setups. - This is only currently implemented in CRAB. - GAUSSIAN_EDGE was added for this purpose. - - ramping_pulse_params : dict - Parameters for the ramping pulse generator object. - The key value pairs are assumed to be attribute name value pairs. - They applied after the object is created - - log_level : integer - level of messaging output from the logger. - Options are attributes of qutip.logging_utils, - in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, - assuming everything runs as expected. - The default NOTSET implies that the level will be taken from - the QuTiP settings file, which by default is WARN - - gen_stats : boolean - if set to True then statistics for the optimisation - run will be generated - accessible through attributes - of the stats object - - Returns - ------- - opt : Optimizer - Instance of an Optimizer, through which the - Config, Dynamics, PulseGen, and TerminationConditions objects - can be accessed as attributes. - The PropagatorComputer, FidelityComputer and TimeslotComputer objects - can be accessed as attributes of the Dynamics object, - e.g. optimizer.dynamics.fid_computer The optimisation can be run - through the optimizer.run_optimization - - """ - - # check parameters - ctrls = dynamics._check_ctrls_container(ctrls) - dynamics._check_drift_dyn_gen(drift) - - if not isinstance(initial, Qobj): - raise TypeError("initial must be a Qobj") - - if not isinstance(target, Qobj): - raise TypeError("target must be a Qobj") - - # Deprecated parameter management - if optim_alg is not None: - optim_method = optim_alg - _param_deprecation( - "The 'optim_alg' parameter is deprecated. " - "Use 'optim_method' instead") - - if max_metric_corr is not None: - if isinstance(method_params, dict): - if 'max_metric_corr' not in method_params: - method_params['max_metric_corr'] = max_metric_corr - else: - method_params = {'max_metric_corr': max_metric_corr} - _param_deprecation( - "The 'max_metric_corr' parameter is deprecated. " - "Use 'max_metric_corr' in method_params instead") - - if accuracy_factor is not None: - if isinstance(method_params, dict): - if 'accuracy_factor' not in method_params: - method_params['accuracy_factor'] = accuracy_factor - else: - method_params = {'accuracy_factor': accuracy_factor} - _param_deprecation( - "The 'accuracy_factor' parameter is deprecated. " - "Use 'accuracy_factor' in method_params instead") - - # phase_option - if phase_option is not None: - if isinstance(fid_params, dict): - if 'phase_option' not in fid_params: - fid_params['phase_option'] = phase_option - else: - fid_params = {'phase_option': phase_option} - _param_deprecation( - "The 'phase_option' parameter is deprecated. " - "Use 'phase_option' in fid_params instead") - - # fid_err_scale_factor - if fid_err_scale_factor is not None: - if isinstance(fid_params, dict): - if 'fid_err_scale_factor' not in fid_params: - fid_params['scale_factor'] = fid_err_scale_factor - else: - fid_params = {'scale_factor': fid_err_scale_factor} - _param_deprecation( - "The 'fid_err_scale_factor' parameter is deprecated. " - "Use 'scale_factor' in fid_params instead") - - # amp_update_mode - if amp_update_mode is not None: - amp_update_mode_up = _upper_safe(amp_update_mode) - if amp_update_mode_up == 'ALL': - tslot_type = 'UPDATE_ALL' - else: - tslot_type = amp_update_mode - _param_deprecation( - "The 'amp_update_mode' parameter is deprecated. " - "Use 'tslot_type' instead") - - # set algorithm defaults - alg_up = _upper_safe(alg) - if alg is None: - raise errors.UsageError( - "Optimisation algorithm must be specified through 'alg' parameter") - elif alg_up == 'GRAPE': - if optim_method is None or optim_method.upper() == 'DEF': - optim_method = 'FMIN_L_BFGS_B' - if init_pulse_type is None or init_pulse_type.upper() == 'DEF': - init_pulse_type = 'RND' - elif alg_up == 'CRAB': - if optim_method is None or optim_method.upper() == 'DEF': - optim_method = 'FMIN' - if prop_type is None or prop_type.upper() == 'DEF': - prop_type = 'APPROX' - if init_pulse_type is None or init_pulse_type.upper() == 'DEF': - init_pulse_type = None - else: - raise errors.UsageError( - "No option for pulse optimisation algorithm alg={}".format(alg)) - - cfg = optimconfig.OptimConfig() - cfg.optim_method = optim_method - cfg.dyn_type = dyn_type - cfg.prop_type = prop_type - cfg.fid_type = fid_type - cfg.init_pulse_type = init_pulse_type - - if log_level == logging.NOTSET: - log_level = logger.getEffectiveLevel() - else: - logger.setLevel(log_level) - - cfg.log_level = log_level - - # Create the Dynamics instance - if dyn_type == 'GEN_MAT' or dyn_type is None or dyn_type == '': - dyn = dynamics.DynamicsGenMat(cfg) - elif dyn_type == 'UNIT': - dyn = dynamics.DynamicsUnitary(cfg) - elif dyn_type == 'SYMPL': - dyn = dynamics.DynamicsSymplectic(cfg) - else: - raise errors.UsageError("No option for dyn_type: " + dyn_type) - dyn.apply_params(dyn_params) - dyn._drift_dyn_gen_checked = True - dyn._ctrl_dyn_gen_checked = True - - # Create the PropagatorComputer instance - # The default will be typically be the best option - if prop_type == 'DEF' or prop_type is None or prop_type == '': - # Do nothing use the default for the Dynamics - pass - elif prop_type == 'APPROX': - if not isinstance(dyn.prop_computer, propcomp.PropCompApproxGrad): - dyn.prop_computer = propcomp.PropCompApproxGrad(dyn) - elif prop_type == 'DIAG': - if not isinstance(dyn.prop_computer, propcomp.PropCompDiag): - dyn.prop_computer = propcomp.PropCompDiag(dyn) - elif prop_type == 'AUG_MAT': - if not isinstance(dyn.prop_computer, propcomp.PropCompAugMat): - dyn.prop_computer = propcomp.PropCompAugMat(dyn) - elif prop_type == 'FRECHET': - if not isinstance(dyn.prop_computer, propcomp.PropCompFrechet): - dyn.prop_computer = propcomp.PropCompFrechet(dyn) - else: - raise errors.UsageError("No option for prop_type: " + prop_type) - dyn.prop_computer.apply_params(prop_params) - - # Create the FidelityComputer instance - # The default will be typically be the best option - # Note: the FidCompTraceDiffApprox is a subclass of FidCompTraceDiff - # so need to check this type first - fid_type_up = _upper_safe(fid_type) - if fid_type_up == 'DEF' or fid_type_up is None or fid_type_up == '': - # None given, use the default for the Dynamics - pass - elif fid_type_up == 'TDAPPROX': - if not isinstance(dyn.fid_computer, fidcomp.FidCompTraceDiffApprox): - dyn.fid_computer = fidcomp.FidCompTraceDiffApprox(dyn) - elif fid_type_up == 'TRACEDIFF': - if not isinstance(dyn.fid_computer, fidcomp.FidCompTraceDiff): - dyn.fid_computer = fidcomp.FidCompTraceDiff(dyn) - elif fid_type_up == 'UNIT': - if not isinstance(dyn.fid_computer, fidcomp.FidCompUnitary): - dyn.fid_computer = fidcomp.FidCompUnitary(dyn) - else: - raise errors.UsageError("No option for fid_type: " + fid_type) - dyn.fid_computer.apply_params(fid_params) - - # Currently the only working option for tslot computer is - # TSlotCompUpdateAll. - # so just apply the parameters - dyn.tslot_computer.apply_params(tslot_params) - - # Create the Optimiser instance - optim_method_up = _upper_safe(optim_method) - if optim_method is None or optim_method_up == '': - raise errors.UsageError("Optimisation method must be specified " - "via 'optim_method' parameter") - elif optim_method_up == 'FMIN_BFGS': - optim = optimizer.OptimizerBFGS(cfg, dyn) - elif optim_method_up == 'LBFGSB' or optim_method_up == 'FMIN_L_BFGS_B': - optim = optimizer.OptimizerLBFGSB(cfg, dyn) - elif optim_method_up == 'FMIN': - if alg_up == 'CRAB': - optim = optimizer.OptimizerCrabFmin(cfg, dyn) - else: - raise errors.UsageError( - "Invalid optim_method '{}' for '{}' algorthim".format( - optim_method, alg)) - else: - # Assume that the optim_method is a valid - # scipy.optimize.minimize method - # Choose an optimiser based on the algorithm - if alg_up == 'CRAB': - optim = optimizer.OptimizerCrab(cfg, dyn) - else: - optim = optimizer.Optimizer(cfg, dyn) - - optim.alg = alg - optim.method = optim_method - optim.amp_lbound = amp_lbound - optim.amp_ubound = amp_ubound - optim.apply_params(optim_params) - - # Create the TerminationConditions instance - tc = termcond.TerminationConditions() - tc.fid_err_targ = fid_err_targ - tc.min_gradient_norm = min_grad - tc.max_iterations = max_iter - tc.max_wall_time = max_wall_time - optim.termination_conditions = tc - - optim.apply_method_params(method_params) - - if gen_stats: - # Create a stats object - # Note that stats object is optional - # if the Dynamics and Optimizer stats attribute is not set - # then no stats will be collected, which could improve performance - if amp_update_mode == 'DYNAMIC': - sts = stats.StatsDynTsUpdate() - else: - sts = stats.Stats() - - dyn.stats = sts - optim.stats = sts - - # Configure the dynamics - dyn.drift_dyn_gen = drift - dyn.ctrl_dyn_gen = ctrls - dyn.initial = initial - dyn.target = target - if tau is None: - # Check that parameters have been supplied to generate the - # timeslot durations - try: - evo_time / num_tslots - except Exception: - raise errors.UsageError( - "Either the timeslot durations should be supplied as an " - "array 'tau' or the number of timeslots 'num_tslots' " - "and the evolution time 'evo_time' must be given.") - - dyn.num_tslots = num_tslots - dyn.evo_time = evo_time - else: - dyn.tau = tau - - # this function is called, so that the num_ctrls attribute will be set - n_ctrls = dyn.num_ctrls - - ramping_pgen = None - if ramping_pulse_type: - ramping_pgen = pulsegen.create_pulse_gen( - pulse_type=ramping_pulse_type, dyn=dyn, - pulse_params=ramping_pulse_params) - if alg_up == 'CRAB': - # Create a pulse generator for each ctrl - crab_pulse_params = None - num_coeffs = None - init_coeff_scaling = None - if isinstance(alg_params, dict): - num_coeffs = alg_params.get('num_coeffs') - init_coeff_scaling = alg_params.get('init_coeff_scaling') - if 'crab_pulse_params' in alg_params: - crab_pulse_params = alg_params.get('crab_pulse_params') - - guess_pulse_type = init_pulse_type - if guess_pulse_type: - guess_pulse_action = None - guess_pgen = pulsegen.create_pulse_gen( - pulse_type=guess_pulse_type, dyn=dyn) - guess_pgen.scaling = pulse_scaling - guess_pgen.offset = pulse_offset - if init_pulse_params is not None: - guess_pgen.apply_params(init_pulse_params) - guess_pulse_action = init_pulse_params.get('pulse_action') - - optim.pulse_generator = [] - for j in range(n_ctrls): - crab_pgen = pulsegen.PulseGenCrabFourier( - dyn=dyn, num_coeffs=num_coeffs) - if init_coeff_scaling is not None: - crab_pgen.scaling = init_coeff_scaling - if isinstance(crab_pulse_params, dict): - crab_pgen.apply_params(crab_pulse_params) - - lb = None - if amp_lbound: - if isinstance(amp_lbound, list): - try: - lb = amp_lbound[j] - except Exception: - lb = amp_lbound[-1] - else: - lb = amp_lbound - ub = None - if amp_ubound: - if isinstance(amp_ubound, list): - try: - ub = amp_ubound[j] - except Exception: - ub = amp_ubound[-1] - else: - ub = amp_ubound - crab_pgen.lbound = lb - crab_pgen.ubound = ub - - if guess_pulse_type: - guess_pgen.lbound = lb - guess_pgen.ubound = ub - crab_pgen.guess_pulse = guess_pgen.gen_pulse() - if guess_pulse_action: - crab_pgen.guess_pulse_action = guess_pulse_action - - if ramping_pgen: - crab_pgen.ramping_pulse = ramping_pgen.gen_pulse() - - optim.pulse_generator.append(crab_pgen) - # This is just for the debug message now - pgen = optim.pulse_generator[0] - - else: - # Create a pulse generator of the type specified - pgen = pulsegen.create_pulse_gen(pulse_type=init_pulse_type, dyn=dyn, - pulse_params=init_pulse_params) - pgen.scaling = pulse_scaling - pgen.offset = pulse_offset - pgen.lbound = amp_lbound - pgen.ubound = amp_ubound - - optim.pulse_generator = pgen - - if log_level <= logging.DEBUG: - logger.debug( - "Optimisation config summary...\n" - " object classes:\n" - " optimizer: " + optim.__class__.__name__ + - "\n dynamics: " + dyn.__class__.__name__ + - "\n tslotcomp: " + dyn.tslot_computer.__class__.__name__ + - "\n fidcomp: " + dyn.fid_computer.__class__.__name__ + - "\n propcomp: " + dyn.prop_computer.__class__.__name__ + - "\n pulsegen: " + pgen.__class__.__name__) - return optim diff --git a/qutip/control/stats.py b/qutip/control/stats.py deleted file mode 100644 index d5e3b33b4c..0000000000 --- a/qutip/control/stats.py +++ /dev/null @@ -1,383 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Statistics for the optimisation -Note that some of the stats here are redundant copies from the optimiser -used here for calculations -""" -import numpy as np -import datetime - - -class Stats(object): - """ - Base class for all optimisation statistics - Used for configurations where all timeslots are updated each iteration - e.g. exact gradients - Note that all times are generated using timeit.default_timer() and are - in seconds - - Attributes - ---------- - dyn_gen_name : string - Text used in some report functions. - Makes sense to set it to 'Hamiltonian' when using unitary dynamics - Default is simply 'dynamics generator' - - num_iter : integer - Number of iterations of the optimisation algorithm - - wall_time_optim_start : float - Start time for the optimisation - - wall_time_optim_end : float - End time for the optimisation - - wall_time_optim : float - Time elasped during the optimisation - - wall_time_dyn_gen_compute : float - Total wall (elasped) time computing combined dynamics generator - (for example combining drift and control Hamiltonians) - - wall_time_prop_compute : float - Total wall (elasped) time computing propagators, that is the - time evolution from one timeslot to the next - Includes calculating the propagator gradient for exact gradients - - wall_time_fwd_prop_compute : float - Total wall (elasped) time computing combined forward propagation, - that is the time evolution from the start to a specific timeslot. - Excludes calculating the propagators themselves - - wall_time_onwd_prop_compute : float - Total wall (elasped) time computing combined onward propagation, - that is the time evolution from a specific timeslot to the end time. - Excludes calculating the propagators themselves - - wall_time_gradient_compute : float - Total wall (elasped) time computing the fidelity error gradient. - Excludes calculating the propagator gradients (in exact gradient - methods) - - num_fidelity_func_calls : integer - Number of calls to fidelity function by the optimisation algorithm - - num_grad_func_calls : integer - Number of calls to gradient function by the optimisation algorithm - - num_tslot_recompute : integer - Number of time the timeslot evolution is recomputed - (It is only computed if any amplitudes changed since the last call) - - num_fidelity_computes : integer - Number of time the fidelity is computed - (It is only computed if any amplitudes changed since the last call) - - num_grad_computes : integer - Number of time the gradient is computed - (It is only computed if any amplitudes changed since the last call) - - num_ctrl_amp_updates : integer - Number of times the control amplitudes are updated - - mean_num_ctrl_amp_updates_per_iter : float - Mean number of control amplitude updates per iteration - - num_timeslot_changes : integer - Number of times the amplitudes of a any control in a timeslot changes - - mean_num_timeslot_changes_per_update : float - Mean average number of timeslot amplitudes that are changed per update - - num_ctrl_amp_changes : integer - Number of times individual control amplitudes that are changed - - mean_num_ctrl_amp_changes_per_update : float - Mean average number of control amplitudes that are changed per update - """ - def __init__(self): - self.reset() - - def reset(self): - self.dyn_gen_name = "dynamics generator" - self.clear() - - def clear(self): - self.num_iter = 0 - # Duration attributes - self.wall_time_optim_start = 0.0 - self.wall_time_optim_end = 0.0 - self.wall_time_optim = 0.0 - self.wall_time_dyn_gen_compute = 0.0 - self.wall_time_prop_compute = 0.0 - self.wall_time_fwd_prop_compute = 0.0 - self.wall_time_onwd_prop_compute = 0.0 - self.wall_time_gradient_compute = 0.0 - # Fidelity and gradient function calls and computes - self.num_fidelity_func_calls = 0 - self.num_grad_func_calls = 0 - self.num_tslot_recompute = 0 - self.num_fidelity_computes = 0 - self.num_grad_computes = 0 - # Control amplitudes - self.num_ctrl_amp_updates = 0 - self.mean_num_ctrl_amp_updates_per_iter = 0.0 - self.num_timeslot_changes = 0 - self.mean_num_timeslot_changes_per_update = 0.0 - self.num_ctrl_amp_changes = 0 - self.mean_num_ctrl_amp_changes_per_update = 0.0 - - def calculate(self): - """ - Perform the calculations (e.g. averages) that are required on the stats - Should be called before calling report - """ - # If the optimation is still running then the optimisation - # time is the time so far - if self.wall_time_optim_end > 0.0: - self.wall_time_optim = \ - self.wall_time_optim_end - self.wall_time_optim_start - - try: - self.mean_num_ctrl_amp_updates_per_iter = \ - self.num_ctrl_amp_updates / float(self.num_iter) - except Exception: - self.mean_num_ctrl_amp_updates_per_iter = np.NaN - - try: - self.mean_num_timeslot_changes_per_update = \ - self.num_timeslot_changes / float(self.num_ctrl_amp_updates) - except Exception: - self.mean_num_timeslot_changes_per_update = np.NaN - - try: - self.mean_num_ctrl_amp_changes_per_update = \ - self.num_ctrl_amp_changes / float(self.num_ctrl_amp_updates) - except Exception: - self.mean_num_ctrl_amp_changes_per_update = np.NaN - - def _format_datetime(self, t, tot=0.0): - dtStr = str(datetime.timedelta(seconds=t)) - if tot > 0: - percent = 100*t/tot - dtStr += " ({:03.2f}%)".format(percent) - return dtStr - - def report(self): - """ - Print a report of the stats to the console - """ - print("\n------------------------------------" - "\n---- Control optimisation stats ----") - self.report_timings() - self.report_func_calls() - self.report_amp_updates() - print("------------------------------------") - - def report_timings(self): - print("**** Timings (HH:MM:SS.US) ****") - tot = self.wall_time_optim - print("Total wall time elapsed during optimisation: " + - self._format_datetime(tot)) - print("Wall time computing Hamiltonians: " + - self._format_datetime(self.wall_time_dyn_gen_compute, tot)) - print("Wall time computing propagators: " + - self._format_datetime(self.wall_time_prop_compute, tot)) - print("Wall time computing forward propagation: " + - self._format_datetime(self.wall_time_fwd_prop_compute, tot)) - print("Wall time computing onward propagation: " + - self._format_datetime(self.wall_time_onwd_prop_compute, tot)) - print("Wall time computing gradient: " + - self._format_datetime(self.wall_time_gradient_compute, tot)) - print("") - - def report_func_calls(self): - print("**** Iterations and function calls ****") - print("Number of iterations: {}".format(self.num_iter)) - print("Number of fidelity function calls: " - "{}".format(self.num_fidelity_func_calls)) - print("Number of times fidelity is computed: " - "{}".format(self.num_fidelity_computes)) - print("Number of gradient function calls: " - "{}".format(self.num_grad_func_calls)) - print("Number of times gradients are computed: " - "{}".format(self.num_grad_computes)) - print("Number of times timeslot evolution is recomputed: " - "{}".format(self.num_tslot_recompute)) - print("") - - def report_amp_updates(self): - print("**** Control amplitudes ****") - print("Number of control amplitude updates: " - "{}".format(self.num_ctrl_amp_updates)) - print("Mean number of updates per iteration: " - "{}".format(self.mean_num_ctrl_amp_updates_per_iter)) - print("Number of timeslot values changed: " - "{}".format(self.num_timeslot_changes)) - print("Mean number of timeslot changes per update: " - "{}".format(self.mean_num_timeslot_changes_per_update)) - print("Number of amplitude values changed: " - "{}".format(self.num_ctrl_amp_changes)) - print("Mean number of amplitude changes per update: " - "{}".format(self.mean_num_ctrl_amp_changes_per_update)) - - -class StatsDynTsUpdate(Stats): - """ - Optimisation stats class for configurations where all timeslots are not - necessarily updated at each iteration. In this case it may be interesting - to know how many Hamiltions etc are computed each ctrl amplitude update - - Attributes - ---------- - num_dyn_gen_computes : integer - Total number of dynamics generator (Hamiltonian) computations, - that is combining drift and control dynamics to calculate the - combined dynamics generator for the timeslot - - mean_num_dyn_gen_computes_per_update : float - # Mean average number of dynamics generator computations per update - - mean_wall_time_dyn_gen_compute : float - # Mean average time to compute a timeslot dynamics generator - - num_prop_computes : integer - Total number of propagator (and propagator gradient for exact - gradient types) computations - - mean_num_prop_computes_per_update : float - Mean average number of propagator computations per update - - mean_wall_time_prop_compute : float - Mean average time to compute a propagator (and its gradient) - - num_fwd_prop_step_computes : integer - Total number of steps (matrix product) computing forward propagation - - mean_num_fwd_prop_step_computes_per_update : float - Mean average number of steps computing forward propagation - - mean_wall_time_fwd_prop_compute : float - Mean average time to compute forward propagation - - num_onwd_prop_step_computes : integer - Total number of steps (matrix product) computing onward propagation - - mean_num_onwd_prop_step_computes_per_update : float - Mean average number of steps computing onward propagation - - mean_wall_time_onwd_prop_compute - Mean average time to compute onward propagation - """ - - def __init__(self): - self.reset() - - def reset(self): - Stats.reset(self) - # Dynamics generators (Hamiltonians) - self.num_dyn_gen_computes = 0 - self.mean_num_dyn_gen_computes_per_update = 0.0 - self.mean_wall_time_dyn_gen_compute = 0.0 - # **** Propagators ***** - self.num_prop_computes = 0 - self.mean_num_prop_computes_per_update = 0.0 - self.mean_wall_time_prop_compute = 0.0 - # **** Forward propagation **** - self.num_fwd_prop_step_computes = 0 - self.mean_num_fwd_prop_step_computes_per_update = 0.0 - self.mean_wall_time_fwd_prop_compute = 0.0 - # **** onward propagation **** - self.num_onwd_prop_step_computes = 0 - self.mean_num_onwd_prop_step_computes_per_update = 0.0 - self.mean_wall_time_onwd_prop_compute = 0.0 - - def calculate(self): - Stats.calculate(self) - self.mean_num_dyn_gen_computes_per_update = \ - self.num_dyn_gen_computes / float(self.num_ctrl_amp_updates) - - self.mean_wall_time_dyn_gen_compute = \ - (self.wall_time_dyn_gen_compute / - float(self.num_dyn_gen_computes)) - - self.mean_num_prop_computes_per_update = \ - self.num_prop_computes / float(self.num_ctrl_amp_updates) - - self.mean_wall_time_prop_compute = \ - self.wall_time_prop_compute / float(self.num_prop_computes) - - self.mean_num_fwd_prop_step_computes_per_update = \ - (self.num_fwd_prop_step_computes / - float(self.num_ctrl_amp_updates)) - - self.mean_wall_time_fwd_prop_compute = \ - (self.wall_time_fwd_prop_compute / - float(self.num_fwd_prop_step_computes)) - - self.mean_num_onwd_prop_step_computes_per_update = \ - (self.num_onwd_prop_step_computes / - float(self.num_ctrl_amp_updates)) - - self.mean_wall_time_onwd_prop_compute = \ - (self.wall_time_onwd_prop_compute / - float(self.num_onwd_prop_step_computes)) - - def report(self): - """ - Print a report of the stats to the console - """ - - print("\n------------------------------------" - "\n---- Control optimisation stats ----") - self.report_timings() - self.report_func_calls() - self.report_amp_updates() - self.report_dyn_gen_comps() - self.report_fwd_prop() - self.report_onwd_prop() - print("------------------------------------") - - def report_dyn_gen_comps(self): - print("**** {} Computations ****".format(self.dyn_gen_name)) - print("Total number of {} computations: " - "{}".format(self.dyn_gen_name, self.num_dyn_gen_computes)) - print("Mean number of {} computations per update: " - "{}".format(self.dyn_gen_name, - self.mean_num_dyn_gen_computes_per_update)) - print("Mean wall time to compute {}s: " - "{} s".format(self.dyn_gen_name, - self.mean_wall_time_dyn_gen_compute)) - print("**** Propagator Computations ****") - print("Total number of propagator computations: " - "{}".format(self.num_prop_computes)) - print("Mean number of propagator computations per update: " - "{}".format(self.mean_num_prop_computes_per_update)) - print("Mean wall time to compute propagator " - "{} s".format(self.mean_wall_time_prop_compute)) - - def report_fwd_prop(self): - print("**** Forward Propagation ****") - print("Total number of forward propagation step computations: " - "{}".format(self.num_fwd_prop_step_computes)) - print("Mean number of forward propagation step computations" - " per update: " - "{}".format(self.mean_num_fwd_prop_step_computes_per_update)) - print("Mean wall time to compute forward propagation " - "{} s".format(self.mean_wall_time_fwd_prop_compute)) - - def report_onwd_prop(self): - print("**** Onward Propagation ****") - print("Total number of onward propagation step computations: " - "{}".format(self.num_onwd_prop_step_computes)) - print("Mean number of onward propagation step computations" - " per update: " - "{}".format(self.mean_num_onwd_prop_step_computes_per_update)) - print("Mean wall time to compute onward propagation " - "{} s".format(self.mean_wall_time_onwd_prop_compute)) diff --git a/qutip/control/symplectic.py b/qutip/control/symplectic.py deleted file mode 100644 index 20189ebcaa..0000000000 --- a/qutip/control/symplectic.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Utility functions for symplectic matrices -""" - -import numpy as np - - -def calc_omega(n): - """ - Calculate the 2n x 2n Omega matrix - Used as dynamics generator phase to calculate symplectic propagators - - Parameters - ---------- - n : scalar(int) - number of modes in oscillator system - - Returns - ------- - array(float) - Symplectic phase Omega - """ - - omg = np.zeros((2*n, 2*n)) - for j in range(2*n): - for k in range(2*n): - if k == j+1: - omg[j, k] = (1 + (-1)**j)/2 - if k == j-1: - omg[j, k] = -(1 - (-1)**j)/2 - - return omg diff --git a/qutip/control/termcond.py b/qutip/control/termcond.py deleted file mode 100644 index a390b3c9d8..0000000000 --- a/qutip/control/termcond.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Classes containing termination conditions for the control pulse optimisation -i.e. attributes that will be checked during the optimisation, that -will determine if the algorithm has completed its task / exceeded limits -""" - - -class TerminationConditions: - """ - Base class for all termination conditions - Used to determine when to stop the optimisation algorithm - Note different subclasses should be used to match the type of - optimisation being used - - Attributes - ---------- - fid_err_targ : float - Target fidelity error - - fid_goal : float - goal fidelity, e.g. 1 - self.fid_err_targ - It its typical to set this for unitary systems - - max_wall_time : float - # maximum time for optimisation (seconds) - - min_gradient_norm : float - Minimum normalised gradient after which optimisation will terminate - - max_iterations : integer - Maximum iterations of the optimisation algorithm - - max_fid_func_calls : integer - Maximum number of calls to the fidelity function during - the optimisation algorithm - - accuracy_factor : float - Determines the accuracy of the result. - Typical values for accuracy_factor are: 1e12 for low accuracy; - 1e7 for moderate accuracy; 10.0 for extremely high accuracy - scipy.optimize.fmin_l_bfgs_b factr argument. - Only set for specific methods (fmin_l_bfgs_b) that uses this - Otherwise the same thing is passed as method_option ftol - (although the scale is different) - Hence it is not defined here, but may be set by the user - """ - def __init__(self): - self.reset() - - def reset(self): - self.fid_err_targ = 1e-5 - self.fid_goal = None - self.max_wall_time = 60*60.0 - self.min_gradient_norm = 1e-5 - self.max_iterations = 1e10 - self.max_fid_func_calls = 1e10 diff --git a/qutip/control/tslotcomp.py b/qutip/control/tslotcomp.py deleted file mode 100644 index 1ce070d26c..0000000000 --- a/qutip/control/tslotcomp.py +++ /dev/null @@ -1,704 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Alexander Pitchford -# @email1: agp1@aber.ac.uk -# @email2: alex.pitchford@gmail.com -# @organization: Aberystwyth University -# @supervisor: Daniel Burgarth - -""" -Timeslot Computer -These classes determine which dynamics generators, propagators and evolutions -are recalculated when there is a control amplitude update. -The timeslot computer processes the lists held by the dynamics object - -The default (UpdateAll) updates all of these each amp update, on the -assumption that all amplitudes are changed each iteration. This is typical -when using optimisation methods like BFGS in the GRAPE algorithm - -The alternative (DynUpdate) assumes that only a subset of amplitudes -are updated each iteration and attempts to minimise the number of expensive -calculations accordingly. This would be the appropriate class for Krotov type -methods. Note that the Stats_DynTsUpdate class must be used for stats -in conjunction with this class. -NOTE: AJGP 2011-10-2014: This _DynUpdate class currently has some bug, -no pressing need to fix it presently - -If all amplitudes change at each update, then the behavior of the classes is -equivalent. _UpdateAll is easier to understand and potentially slightly faster -in this situation. - -Note the methods in the _DynUpdate class were inspired by: -DYNAMO - Dynamic Framework for Quantum Optimal Control -See Machnes et.al., arXiv.1011.4874 -""" - -import warnings -import numpy as np -import timeit -# QuTiP -from qutip import Qobj -# QuTiP control modules -import qutip.control.errors as errors -import qutip.control.dump as qtrldump -# QuTiP logging -import qutip.logging_utils as logging -logger = logging.get_logger() - - -def _func_deprecation(message, stacklevel=3): - """ - Issue deprecation warning - Using stacklevel=3 will ensure message refers the function - calling with the deprecated parameter, - """ - warnings.warn(message, FutureWarning, stacklevel=stacklevel) - - -class TimeslotComputer: - """ - Base class for all Timeslot Computers - Note: this must be instantiated with a Dynamics object, that is the - container for the data that the methods operate on - - Attributes - ---------- - log_level : integer - level of messaging output from the logger. - Options are attributes of qutip.logging_utils, - in decreasing levels of messaging, are: - DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL - Anything WARN or above is effectively 'quiet' execution, - assuming everything runs as expected. - The default NOTSET implies that the level will be taken from - the QuTiP settings file, which by default is WARN - - evo_comp_summary : EvoCompSummary - A summary of the most recent evolution computation - Used in the stats and dump - Will be set to None if neither stats or dump are set - """ - def __init__(self, dynamics, params=None): - from qutip.control.dynamics import Dynamics - if not isinstance(dynamics, Dynamics): - raise TypeError("Must instantiate with {} type".format( - Dynamics)) - self.parent = dynamics - self.params = params - self.reset() - - def reset(self): - self.log_level = self.parent.log_level - self.id_text = 'TS_COMP_BASE' - self.evo_comp_summary = None - - def apply_params(self, params=None): - """ - Set object attributes based on the dictionary (if any) passed in the - instantiation, or passed as a parameter - This is called during the instantiation automatically. - The key value pairs are the attribute name and value - Note: attributes are created if they do not exist already, - and are overwritten if they do. - """ - if not params: - params = self.params - - if isinstance(params, dict): - self.params = params - for key in params: - setattr(self, key, params[key]) - - def flag_all_calc_now(self): - pass - - def init_comp(self): - pass - - @property - def log_level(self): - return logger.level - - @log_level.setter - def log_level(self, lvl): - """ - Set the log_level attribute and set the level of the logger - that is call logger.setLevel(lvl) - """ - logger.setLevel(lvl) - - def dump_current(self): - """Store a copy of the current time evolution""" - dyn = self.parent - dump = dyn.dump - if not isinstance(dump, qtrldump.DynamicsDump): - raise RuntimeError("Cannot dump current evolution, " - "as dynamics.dump is not set") - - anything_dumped = False - item_idx = None - if dump.dump_any: - dump_item = dump.add_evo_dump() - item_idx = dump_item.idx - anything_dumped = True - - if dump.dump_summary: - dump.add_evo_comp_summary(dump_item_idx=item_idx) - anything_dumped = True - - if not anything_dumped: - logger.warning("Dump set, but nothing dumped, check dump config") - - -class TSlotCompUpdateAll(TimeslotComputer): - """ - Timeslot Computer - Update All - Updates all dynamics generators, propagators and evolutions when - ctrl amplitudes are updated - """ - def reset(self): - TimeslotComputer.reset(self) - self.id_text = 'ALL' - self.apply_params() - - def compare_amps(self, new_amps): - """ - Determine if any amplitudes have changed. If so, then mark the - timeslots as needing recalculation - Returns: True if amplitudes are the same, False if they have changed - """ - changed = False - dyn = self.parent - - if (dyn.stats or dyn.dump): - if self.evo_comp_summary: - self.evo_comp_summary.reset() - else: - self.evo_comp_summary = EvoCompSummary() - ecs = self.evo_comp_summary - - if dyn.ctrl_amps is None: - # Flag fidelity and gradients as needing recalculation - changed = True - if ecs: - ecs.num_amps_changed = len(new_amps.flat) - ecs.num_timeslots_changed = new_amps.shape[0] - else: - # create boolean array with same shape as ctrl_amps - # True where value in new_amps differs, otherwise false - changed_amps = dyn.ctrl_amps != new_amps - if np.any(changed_amps): - # Flag fidelity and gradients as needing recalculation - changed = True - if self.log_level <= logging.DEBUG: - logger.debug("{} amplitudes changed".format( - changed_amps.sum())) - - if ecs: - ecs.num_amps_changed = changed_amps.sum() - ecs.num_timeslots_changed = np.any(changed_amps, 1).sum() - - else: - if self.log_level <= logging.DEBUG: - logger.debug("No amplitudes changed") - - # *** update stats *** - if dyn.stats: - dyn.stats.num_ctrl_amp_updates += bool(ecs.num_amps_changed) - dyn.stats.num_ctrl_amp_changes += ecs.num_amps_changed - dyn.stats.num_timeslot_changes += ecs.num_timeslots_changed - - if changed: - dyn.ctrl_amps = new_amps - dyn.flag_system_changed() - return False - else: - return True - - def recompute_evolution(self): - """ - Recalculates the evolution operators. - Dynamics generators (e.g. Hamiltonian) and - prop (propagators) are calculated as necessary - """ - - dyn = self.parent - prop_comp = dyn.prop_computer - n_ts = dyn.num_tslots - n_ctrls = dyn.num_ctrls - - # Clear the public lists - # These are only set if (external) users access them - dyn._dyn_gen_qobj = None - dyn._prop_qobj = None - dyn._prop_grad_qobj = None - dyn._fwd_evo_qobj = None - dyn._onwd_evo_qobj = None - dyn._onto_evo_qobj = None - - if (dyn.stats or dyn.dump) and not self.evo_comp_summary: - self.evo_comp_summary = EvoCompSummary() - ecs = self.evo_comp_summary - - if dyn.stats is not None: - dyn.stats.num_tslot_recompute += 1 - if self.log_level <= logging.DEBUG: - logger.log(logging.DEBUG, "recomputing evolution {} " - "(UpdateAll)".format( - dyn.stats.num_tslot_recompute)) - - # calculate the Hamiltonians - if ecs: - time_start = timeit.default_timer() - for k in range(n_ts): - dyn._combine_dyn_gen(k) - if dyn._decomp_curr is not None: - dyn._decomp_curr[k] = False - - if ecs: - ecs.wall_time_dyn_gen_compute = \ - timeit.default_timer() - time_start - - # calculate the propagators and the propagotor gradients - if ecs: - time_start = timeit.default_timer() - for k in range(n_ts): - if prop_comp.grad_exact and dyn.cache_prop_grad: - for j in range(n_ctrls): - if j == 0: - dyn._prop[k], dyn._prop_grad[k, j] = \ - prop_comp._compute_prop_grad(k, j) - if self.log_level <= logging.DEBUG_INTENSE: - logger.log(logging.DEBUG_INTENSE, - "propagator {}:\n{:10.3g}".format( - k, self._prop[k])) - else: - dyn._prop_grad[k, j] = \ - prop_comp._compute_prop_grad(k, j, - compute_prop=False) - else: - dyn._prop[k] = prop_comp._compute_propagator(k) - - if ecs: - ecs.wall_time_prop_compute = \ - timeit.default_timer() - time_start - - if ecs: - time_start = timeit.default_timer() - # compute the forward propagation - R = range(n_ts) - for k in R: - if dyn.oper_dtype == Qobj: - dyn._fwd_evo[k+1] = dyn._prop[k]*dyn._fwd_evo[k] - else: - dyn._fwd_evo[k+1] = dyn._prop[k].dot(dyn._fwd_evo[k]) - - if ecs: - ecs.wall_time_fwd_prop_compute = \ - timeit.default_timer() - time_start - time_start = timeit.default_timer() - # compute the onward propagation - if dyn.fid_computer.uses_onwd_evo: - dyn._onwd_evo[n_ts-1] = dyn._prop[n_ts-1] - R = range(n_ts-2, -1, -1) - for k in R: - if dyn.oper_dtype == Qobj: - dyn._onwd_evo[k] = dyn._onwd_evo[k+1]*dyn._prop[k] - else: - dyn._onwd_evo[k] = dyn._onwd_evo[k+1].dot(dyn._prop[k]) - - if dyn.fid_computer.uses_onto_evo: - R = range(n_ts-1, -1, -1) - for k in R: - if dyn.oper_dtype == Qobj: - dyn._onto_evo[k] = dyn._onto_evo[k+1]*dyn._prop[k] - else: - dyn._onto_evo[k] = dyn._onto_evo[k+1].dot(dyn._prop[k]) - - if ecs: - ecs.wall_time_onwd_prop_compute = \ - timeit.default_timer() - time_start - - if dyn.stats: - dyn.stats.wall_time_dyn_gen_compute += \ - ecs.wall_time_dyn_gen_compute - dyn.stats.wall_time_prop_compute += \ - ecs.wall_time_prop_compute - dyn.stats.wall_time_fwd_prop_compute += \ - ecs.wall_time_fwd_prop_compute - dyn.stats.wall_time_onwd_prop_compute += \ - ecs.wall_time_onwd_prop_compute - - if dyn.unitarity_check_level: - dyn.check_unitarity() - - if dyn.dump: - self.dump_current() - - def get_timeslot_for_fidelity_calc(self): - """ - Returns the timeslot index that will be used calculate current fidelity - value. - This (default) method simply returns the last timeslot - """ - _func_deprecation("'get_timeslot_for_fidelity_calc' is deprecated. " - "Use '_get_timeslot_for_fidelity_calc'") - return self._get_timeslot_for_fidelity_calc - - def _get_timeslot_for_fidelity_calc(self): - """ - Returns the timeslot index that will be used calculate current fidelity - value. - This (default) method simply returns the last timeslot - """ - return self.parent.num_tslots - - -class TSlotCompDynUpdate(TimeslotComputer): - """ - Timeslot Computer - Dynamic Update - ******************************** - ***** CURRENTLY HAS ISSUES ***** - ***** AJGP 2014-10-02 - ***** and is therefore not being maintained - ***** i.e. changes made to _UpdateAll are not being implemented here - ******************************** - Updates only the dynamics generators, propagators and evolutions as - required when a subset of the ctrl amplitudes are updated. - Will update all if all amps have changed. - """ - - def reset(self): - self.dyn_gen_recalc = None - self.prop_recalc = None - self.evo_init2t_recalc = None - self.evo_t2targ_recalc = None - self.dyn_gen_calc_now = None - self.prop_calc_now = None - self.evo_init2t_calc_now = None - self.evo_t2targ_calc_now = None - TimeslotComputer.reset(self) - self.id_text = 'DYNAMIC' - self.apply_params() - - def init_comp(self): - """ - Initialise the flags - """ - #### - # These maps are used to determine what needs to be updated - #### - # Note _recalc means the value needs updating at some point - # e.g. here no values have been set, except the initial and final - # evolution operator vals (which never change) and hence all other - # values are set as requiring calculation. - n_ts = self.parent.num_tslots - self.dyn_gen_recalc = np.ones(n_ts, dtype=bool) - # np.ones(n_ts, dtype=bool) - self.prop_recalc = np.ones(n_ts, dtype=bool) - self.evo_init2t_recalc = np.ones(n_ts + 1, dtype=bool) - self.evo_init2t_recalc[0] = False - self.evo_t2targ_recalc = np.ones(n_ts + 1, dtype=bool) - self.evo_t2targ_recalc[-1] = False - - # The _calc_now map is used to during the calcs to specify - # which values need updating immediately - self.dyn_gen_calc_now = np.zeros(n_ts, dtype=bool) - self.prop_calc_now = np.zeros(n_ts, dtype=bool) - self.evo_init2t_calc_now = np.zeros(n_ts + 1, dtype=bool) - self.evo_t2targ_calc_now = np.zeros(n_ts + 1, dtype=bool) - - def compare_amps(self, new_amps): - """ - Determine which timeslots will have changed Hamiltonians - i.e. any where control amplitudes have changed for that slot - and mark (using masks) them and corresponding exponentiations and - time evo operators for update - Returns: True if amplitudes are the same, False if they have changed - """ - dyn = self.parent - n_ts = dyn.num_tslots - # create boolean array with same shape as ctrl_amps - # True where value in New_amps differs, otherwise false - if self.parent.ctrl_amps is None: - changed_amps = np.ones(new_amps.shape, dtype=bool) - else: - changed_amps = self.parent.ctrl_amps != new_amps - - if self.log_level <= logging.DEBUG_VERBOSE: - logger.log(logging.DEBUG_VERBOSE, "changed_amps:\n{}".format( - changed_amps)) - # create Boolean vector with same length as number of timeslots - # True where any of the amplitudes have changed, otherwise false - changed_ts_mask = np.any(changed_amps, 1) - # if any of the amplidudes have changed then mark for recalc - if np.any(changed_ts_mask): - self.dyn_gen_recalc[changed_ts_mask] = True - self.prop_recalc[changed_ts_mask] = True - dyn.ctrl_amps = new_amps - if self.log_level <= logging.DEBUG: - logger.debug("Control amplitudes updated") - # find first and last changed dynamics generators - first_changed = None - for i in range(n_ts): - if changed_ts_mask[i]: - last_changed = i - if first_changed is None: - first_changed = i - - # set all fwd evo ops after first changed Ham to be recalculated - self.evo_init2t_recalc[first_changed + 1:] = True - # set all bkwd evo ops up to (incl) last changed Ham to be - # recalculated - self.evo_t2targ_recalc[:last_changed + 1] = True - - # Flag fidelity and gradients as needing recalculation - dyn.flag_system_changed() - - # *** update stats *** - if dyn.stats is not None: - dyn.stats.num_ctrl_amp_updates += 1 - dyn.stats.num_ctrl_amp_changes += changed_amps.sum() - dyn.stats.num_timeslot_changes += changed_ts_mask.sum() - - return False - else: - return True - - def flag_all_calc_now(self): - """ - Flags all Hamiltonians, propagators and propagations to be - calculated now - """ - # set flags for calculations - self.dyn_gen_calc_now[:] = True - self.prop_calc_now[:] = True - self.evo_init2t_calc_now[:-1] = True - self.evo_t2targ_calc_now[1:] = True - - def recompute_evolution(self): - """ - Recalculates the evo_init2t (forward) and evo_t2targ (onward) time - evolution operators - DynGen (Hamiltonians etc) and prop (propagator) are calculated - as necessary - """ - if self.log_level <= logging.DEBUG_VERBOSE: - logger.log(logging.DEBUG_VERBOSE, "recomputing evolution " - "(DynUpdate)") - - dyn = self.parent - n_ts = dyn.num_tslots - # find the op slots that have been marked for update now - # and need recalculation - evo_init2t_recomp_now = self.evo_init2t_calc_now & \ - self.evo_init2t_recalc - evo_t2targ_recomp_now = self.evo_t2targ_calc_now & \ - self.evo_t2targ_recalc - - # to recomupte evo_init2t, will need to start - # at a cell that has been computed - if np.any(evo_init2t_recomp_now): - for k in range(n_ts, 0, -1): - if evo_init2t_recomp_now[k] and self.evo_init2t_recalc[k-1]: - evo_init2t_recomp_now[k-1] = True - - # for evo_t2targ, will also need to start - # at a cell that has been computed - if np.any(evo_t2targ_recomp_now): - for k in range(0, n_ts): - if evo_t2targ_recomp_now[k] and self.evo_t2targ_recalc[k+1]: - evo_t2targ_recomp_now[k+1] = True - - # determine which dyn gen and prop need recalculating now in order to - # calculate the forwrd and onward evolutions - prop_recomp_now = (evo_init2t_recomp_now[1:] - | evo_t2targ_recomp_now[:-1] - | self.prop_calc_now[:]) & self.prop_recalc[:] - dyn_gen_recomp_now = (prop_recomp_now[:] | self.dyn_gen_calc_now[:]) \ - & self.dyn_gen_recalc[:] - - if np.any(dyn_gen_recomp_now): - time_start = timeit.default_timer() - for k in range(n_ts): - if dyn_gen_recomp_now[k]: - # calculate the dynamics generators - dyn.dyn_gen[k] = dyn.compute_dyn_gen(k) - self.dyn_gen_recalc[k] = False - if dyn.stats is not None: - dyn.stats.num_dyn_gen_computes += dyn_gen_recomp_now.sum() - dyn.stats.wall_time_dyn_gen_compute += \ - timeit.default_timer() - time_start - - if np.any(prop_recomp_now): - time_start = timeit.default_timer() - for k in range(n_ts): - if prop_recomp_now[k]: - # calculate exp(H) and other per H computations needed for - # the gradient function - dyn.prop[k] = dyn._compute_propagator(k) - self.prop_recalc[k] = False - if dyn.stats is not None: - dyn.stats.num_prop_computes += prop_recomp_now.sum() - dyn.stats.wall_time_prop_compute += \ - timeit.default_timer() - time_start - - # compute the forward propagation - if np.any(evo_init2t_recomp_now): - time_start = timeit.default_timer() - R = range(1, n_ts + 1) - for k in R: - if evo_init2t_recomp_now[k]: - dyn.evo_init2t[k] = \ - dyn.prop[k-1].dot(dyn.evo_init2t[k-1]) - self.evo_init2t_recalc[k] = False - if dyn.stats is not None: - dyn.stats.num_fwd_prop_step_computes += \ - evo_init2t_recomp_now.sum() - dyn.stats.wall_time_fwd_prop_compute += \ - timeit.default_timer() - time_start - - if np.any(evo_t2targ_recomp_now): - time_start = timeit.default_timer() - # compute the onward propagation - R = range(n_ts-1, -1, -1) - for k in R: - if evo_t2targ_recomp_now[k]: - dyn.evo_t2targ[k] = dyn.evo_t2targ[k+1].dot(dyn.prop[k]) - self.evo_t2targ_recalc[k] = False - if dyn.stats is not None: - dyn.stats.num_onwd_prop_step_computes += \ - evo_t2targ_recomp_now.sum() - dyn.stats.wall_time_onwd_prop_compute += \ - timeit.default_timer() - time_start - - # Clear calc now flags - self.dyn_gen_calc_now[:] = False - self.prop_calc_now[:] = False - self.evo_init2t_calc_now[:] = False - self.evo_t2targ_calc_now[:] = False - - def get_timeslot_for_fidelity_calc(self): - """ - Returns the timeslot index that will be used calculate current fidelity - value. Attempts to find a timeslot where the least number of propagator - calculations will be required. - Flags the associated evolution operators for calculation now - """ - dyn = self.parent - n_ts = dyn.num_tslots - kBothEvoCurrent = -1 - kFwdEvoCurrent = -1 - kUse = -1 - # If no specific timeslot set in config, then determine dynamically - if kUse < 0: - for k in range(n_ts): - # find first timeslot where both evo_init2t and - # evo_t2targ are current - if not self.evo_init2t_recalc[k]: - kFwdEvoCurrent = k - if not self.evo_t2targ_recalc[k]: - kBothEvoCurrent = k - break - - if kBothEvoCurrent >= 0: - kUse = kBothEvoCurrent - elif kFwdEvoCurrent >= 0: - kUse = kFwdEvoCurrent - else: - raise errors.FunctionalError("No timeslot found matching " - "criteria") - - self.evo_init2t_calc_now[kUse] = True - self.evo_t2targ_calc_now[kUse] = True - return kUse - - -class EvoCompSummary(qtrldump.DumpSummaryItem): - """ - A summary of the most recent time evolution computation - Used in stats calculations and for data dumping - - Attributes - ---------- - evo_dump_idx : int - Index of the linked :class:`dump.EvoCompDumpItem` - None if no linked item - - iter_num : int - Iteration number of the pulse optimisation - None if evolution compute outside of a pulse optimisation - - fid_func_call_num : int - Fidelity function call number of the pulse optimisation - None if evolution compute outside of a pulse optimisation - - grad_func_call_num : int - Gradient function call number of the pulse optimisation - None if evolution compute outside of a pulse optimisation - - num_amps_changed : int - Number of control timeslot amplitudes changed since previous - evolution calculation - - num_timeslots_changed : int - Number of timeslots in which any amplitudes changed since previous - evolution calculation - - wall_time_dyn_gen_compute : float - Time spent computing dynamics generators - (in seconds of elapsed time) - - wall_time_prop_compute : float - Time spent computing propagators (including and propagator gradients) - (in seconds of elapsed time) - - wall_time_fwd_prop_compute : float - Time spent computing the forward evolution of the system - see :property:`dynamics.fwd_evo` - (in seconds of elapsed time) - - wall_time_onwd_prop_compute : float - Time spent computing the 'backward' evolution of the system - see :property:`dynamics.onwd_evo` and :property:`dynamics.onto_evo` - (in seconds of elapsed time) - """ - - min_col_width = 11 - summary_property_names = ( - "idx", "evo_dump_idx", - "iter_num", "fid_func_call_num", "grad_func_call_num", - "num_amps_changed", "num_timeslots_changed", - "wall_time_dyn_gen_compute", "wall_time_prop_compute", - "wall_time_fwd_prop_compute", "wall_time_onwd_prop_compute") - - summary_property_fmt_type = ( - 'd', 'd', - 'd', 'd', 'd', - 'd', 'd', - 'g', 'g', - 'g', 'g' - ) - - summary_property_fmt_prec = ( - 0, 0, - 0, 0, 0, - 0, 0, - 3, 3, - 3, 3 - ) - - def __init__(self): - self.reset() - - def reset(self): - qtrldump.DumpSummaryItem.reset(self) - self.evo_dump_idx = None - self.iter_num = None - self.fid_func_call_num = None - self.grad_func_call_num = None - self.num_amps_changed = 0 - self.num_timeslots_changed = 0 - self.wall_time_dyn_gen_compute = 0.0 - self.wall_time_prop_compute = 0.0 - self.wall_time_fwd_prop_compute = 0.0 - self.wall_time_onwd_prop_compute = 0.0 diff --git a/qutip/core/_brtensor.pyx b/qutip/core/_brtensor.pyx index ee4d3f2e9f..530cf63d1b 100644 --- a/qutip/core/_brtensor.pyx +++ b/qutip/core/_brtensor.pyx @@ -265,14 +265,14 @@ cdef class _BlochRedfieldElement(_BaseElement): return _br_term_data(A_eig, self.spectrum, self.skew, cutoff) raise ValueError('Invalid tensortype') - cpdef object qobj(self, double t): + cpdef object qobj(self, t): return Qobj(self.data(t), dims=self.dims, type="super", copy=False, superrep="super") - cpdef double complex coeff(self, double t) except *: + cpdef object coeff(self, t): return 1. - cpdef Data data(self, double t): + cpdef Data data(self, t): cdef size_t i cdef double cutoff = self.sec_cutoff * self._compute_spectrum(t) A_eig = self.H.to_eigbasis(t, self.a_op._call(t)) @@ -281,7 +281,7 @@ cdef class _BlochRedfieldElement(_BaseElement): return BR_eig return self.H.from_eigbasis(t, BR_eig) - cdef Data matmul_data_t(self, double t, Data state, Data out=None): + cdef Data matmul_data_t(self, t, Data state, Data out=None): cdef size_t i cdef double cutoff = self.sec_cutoff * self._compute_spectrum(t) cdef Data A_eig, BR_eig diff --git a/qutip/core/_brtools.pyx b/qutip/core/_brtools.pyx index a7e379c964..45144053c4 100644 --- a/qutip/core/_brtools.pyx +++ b/qutip/core/_brtools.pyx @@ -54,11 +54,6 @@ cdef class SpectraCoefficient(Coefficient): return self -@cython.overflowcheck(True) -cdef size_t _mul_checked(size_t a, size_t b) except? -1: - return a * b - - cdef Data _apply_trans(Data original, int trans): """helper function for matmul_var_data, apply transform.""" cdef Data out @@ -201,7 +196,7 @@ cdef class _EigenBasisTransform: def __init__(self, QobjEvo oper, bint sparse=False): if oper.dims[0] != oper.dims[1]: raise ValueError - if type(oper(0).data) is _data.CSR and not sparse: + if type(oper(0).data) in (_data.CSR, _data.Dia) and not sparse: oper = oper.to(Dense) self.oper = oper self.isconstant = oper.isconstant @@ -250,10 +245,10 @@ cdef class _EigenBasisTransform: return self._evecs_inv cdef Data _S_converter(self, double t): - return _data.kron(self.evecs(t).transpose(), self._inv(t)) + return _data.kron_transpose(self.evecs(t), self._inv(t)) cdef Data _S_converter_inverse(self, double t): - return _data.kron(self._inv(t).transpose(), self.evecs(t)) + return _data.kron_transpose(self._inv(t), self.evecs(t)) cpdef Data to_eigbasis(self, double t, Data fock): """ diff --git a/qutip/core/coefficient.py b/qutip/core/coefficient.py index 545ce271ff..0c6ed689be 100644 --- a/qutip/core/coefficient.py +++ b/qutip/core/coefficient.py @@ -10,7 +10,6 @@ import importlib import warnings import numbers -from contextlib import contextmanager from collections import defaultdict from setuptools import setup, Extension try: @@ -24,7 +23,7 @@ from .data import Data from .cy.coefficient import ( Coefficient, InterCoefficient, FunctionCoefficient, StrFunctionCoefficient, - ConjCoefficient, NormCoefficient + ConjCoefficient, NormCoefficient, ConstantCoefficient ) @@ -32,26 +31,37 @@ "clean_compiled_coefficient"] -@contextmanager -def _ignore_import_warning_for_pyximporter(): - """ - A helper for ignoring PyxImporter import warnings generated by Python 3.10+ - because PyxImporter has no .find_spec method. - """ - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=ImportWarning) - yield - - class StringParsingWarning(Warning): pass +def _return(base, **kwargs): + return base + + +# The `coefficient` function is dispatcher for the type of the `base` to the +# function that created the `Coefficient` object. `coefficient_builders` stores +# the map `type -> function(base, **kw)`. Optional module can add their +# `Coefficient` specializations here. +coefficient_builders = { + Coefficient: _return, + np.ndarray: InterCoefficient, + scipy.interpolate.PPoly: InterCoefficient.from_PPoly, + scipy.interpolate.BSpline: InterCoefficient.from_Bspline, +} + + def coefficient(base, *, tlist=None, args={}, args_ctypes={}, - order=3, compile_opt=None, function_style=None): - """Coefficient for time dependent systems. + order=3, compile_opt=None, function_style=None, + boundary_conditions=None, **kwargs): + """Build ``Coefficient`` for time dependent systems: + + ``` + QobjEvo = Qobj + Qobj * Coefficient + Qobj * Coefficient + ... + ``` - The coefficients are either a function, a string or a numpy array. + The coefficients can be a function, a string or a numpy array. Other + packages may add support for other kind of coefficients. For function based coefficients, the function signature must be either: @@ -88,8 +98,7 @@ def f2_t(t, args): real imag conj abs norm arg proj numpy as np, scipy.special as spe (python interface) - and cython_special (cython interface) - [https://docs.scipy.org/doc/scipy/reference/special.cython_special.html]. + and cython_special (scipy cython interface) *Examples* coeff = coefficient('exp(-1j*w1*t)', args={"w1":1.}) @@ -116,23 +125,51 @@ def f2_t(t, args): created from ``ndarray``). Other interpolation methods from scipy are converted to a function-based coefficient (the same kind of coefficient created from callables). - """ - if isinstance(base, Coefficient): - return base - elif isinstance(base, np.ndarray): - return InterCoefficient(base, tlist, order) + Parameters + ---------- + base : object + Base object to make into a Coefficient. + + args : dict, optional + Dictionary of arguments to pass to the function or string coefficient. - elif isinstance(base, scipy.interpolate.PPoly): - return InterCoefficient.from_PPoly(base) + order : int, default=3 + Order of the spline for array based coefficient. - elif isinstance(base, scipy.interpolate.BSpline): - return InterCoefficient.from_Bspline(base) + tlist : iterable, optional + Times for each element of an array based coefficient. - elif isinstance(base, str): - return coeff_from_str(base, args, args_ctypes, compile_opt) + function_style : str, ["dict", "pythonic", None] + Function signature of function based coefficients. - elif callable(base): + args_ctypes : dict, optional + C type for the args when compiling array based coefficients. + + compile_opt : CompilationOptions, optional + Sets of options for the compilation of string based coefficients. + + boundary_conditions: 2-tupule, str or None, optional + Specify boundary conditions for spline interpolation. + + **kwargs + Extra arguments to pass the the coefficients. + """ + kwargs.update({ + "tlist": tlist, + 'args': args, + 'args_ctypes': args_ctypes, + 'order': order, + 'compile_opt': compile_opt, + 'function_style': function_style, + 'boundary_conditions': boundary_conditions + }) + + for type_ in coefficient_builders: + if isinstance(base, type_): + return coefficient_builders[type_](base, **kwargs) + + if callable(base): op = FunctionCoefficient(base, args.copy(), style=function_style) if not isinstance(op(0), numbers.Number): raise TypeError("The coefficient function must return a number") @@ -153,9 +190,18 @@ def conj(coeff): return ConjCoefficient(coeff) +def const(value): + """ return a Coefficient with a constant value. + """ + return ConstantCoefficient(value) + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # %%%%%%%%% Everything under this is for string compilation %%%%%%%%% # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +WARN_MISSING_MODULE = [0] + + class CompilationOptions(QutipOptions): """ Compilation options: @@ -216,6 +262,7 @@ class CompilationOptions(QutipOptions): _use_cython = True except ImportError: _use_cython = False + WARN_MISSING_MODULE[0] = 1 _options = { "use_cython": _use_cython, @@ -305,7 +352,7 @@ def proj(x): "spe": scipy.special} -def coeff_from_str(base, args, args_ctypes, compile_opt=None): +def coeff_from_str(base, args, args_ctypes, compile_opt=None, **_): """ Entry point for string based coefficients - Test if the string is valid @@ -326,6 +373,12 @@ def coeff_from_str(base, args, args_ctypes, compile_opt=None): coeff = None # Do we compile? if not compile_opt['use_cython']: + if WARN_MISSING_MODULE[0]: + warnings.warn( + "Both `cython` and `filelock` are required for compilation of " + "string coefficents. Falling back on `eval`.") + # Only warns once. + WARN_MISSING_MODULE[0] = 0 return StrFunctionCoefficient(base, args) # Parsing tries to make the code in common pattern parsed, variables, constants, raw = try_parse(base, args, @@ -341,7 +394,7 @@ def coeff_from_str(base, args, args_ctypes, compile_opt=None): # Previously compiled coefficient not available: create the cython code code = make_cy_code(parsed, variables, constants, raw, compile_opt) - try : + try: coeff = compile_code(code, file_name, parsed, compile_opt) except PermissionError: pass @@ -353,13 +406,15 @@ def coeff_from_str(base, args, args_ctypes, compile_opt=None): return coeff(base, keys, const, args) +coefficient_builders[str] = coeff_from_str + + def try_import(file_name, parsed_in): """ Import the compiled coefficient if existing and check for name collision. """ try: - with _ignore_import_warning_for_pyximporter(): - mod = importlib.import_module(file_name) + mod = importlib.import_module(file_name) except ModuleNotFoundError: # Coefficient does not exist, to compile as file_name return None @@ -499,9 +554,8 @@ def compile_code(code, file_name, parsed, c_opt): include_dirs=[np.get_include()], language='c++' ) - with _ignore_import_warning_for_pyximporter(): - ext_modules = cythonize(coeff_file, force=True) - setup(ext_modules=ext_modules) + ext_modules = cythonize(coeff_file, force=True) + setup(ext_modules=ext_modules) except Exception as e: if c_opt['clean_on_error']: for file in glob.glob(file_name + "*"): @@ -732,6 +786,6 @@ class DummySelf: loc_env = {"t": 0, 'self': DummySelf} try: exec(code, str_env, loc_env) - except Exception as e: + except Exception: return False return True diff --git a/qutip/core/cy/_element.pxd b/qutip/core/cy/_element.pxd index 59301e1b7c..745df68e9d 100644 --- a/qutip/core/cy/_element.pxd +++ b/qutip/core/cy/_element.pxd @@ -6,10 +6,11 @@ from qutip.core.data.base cimport idxint from libcpp cimport bool cdef class _BaseElement: - cpdef Data data(self, double t) - cpdef object qobj(self, double t) - cpdef double complex coeff(self, double t) except * - cdef Data matmul_data_t(_BaseElement self, double t, Data state, Data out=?) + cdef Data _data + cpdef Data data(self, t) + cpdef object qobj(self, t) + cpdef object coeff(self, t) + cdef Data matmul_data_t(_BaseElement self, t, Data state, Data out=?) cdef class _ConstantElement(_BaseElement): @@ -22,21 +23,21 @@ cdef class _EvoElement(_BaseElement): cdef class _FuncElement(_BaseElement): - cdef object _func - cdef dict _args - cdef tuple _previous - cdef bint _f_pythonic - cdef set _f_parameters + cdef readonly object _func + cdef readonly dict _args + cdef readonly tuple _previous + cdef readonly bint _f_pythonic + cdef readonly set _f_parameters cdef class _MapElement(_BaseElement): - cdef _FuncElement _base - cdef list _transform - cdef double complex _coeff + cdef readonly _FuncElement _base + cdef readonly list _transform + cdef readonly double complex _coeff cdef class _ProdElement(_BaseElement): - cdef _BaseElement _left - cdef _BaseElement _right - cdef list _transform - cdef bool _conj + cdef readonly _BaseElement _left + cdef readonly _BaseElement _right + cdef readonly list _transform + cdef readonly bool _conj diff --git a/qutip/core/cy/_element.pyx b/qutip/core/cy/_element.pyx index 8d46d546c8..de2f574690 100644 --- a/qutip/core/cy/_element.pyx +++ b/qutip/core/cy/_element.pyx @@ -1,5 +1,9 @@ #cython: language_level=3 -#cython: boundscheck=False, wraparound=False, initializedcheck=False, cdvision=True +#cython: boundscheck=False +#cython: wraparound=False +#cython: initializedcheck=False +#cython: cdvision=True +#cython: c_api_binop_methods=True from .. import data as _data from qutip.core.cy.coefficient import coefficient_function_parameters @@ -55,7 +59,7 @@ cdef class _BaseElement: All :obj:`~_BaseElement` instances are immutable and methods that would modify an object return a new instance instead. """ - cpdef Data data(self, double t): + cpdef Data data(self, t): """ Returns the underlying :obj:`~Data` of the :obj:`~Qobj` component of the term at time ``t``. @@ -75,7 +79,7 @@ cdef class _BaseElement: "Sub-classes of _BaseElement should implement .data(t)." ) - cpdef object qobj(self, double t): + cpdef object qobj(self, t): """ Returns the :obj:`~Qobj` component of the term at time ``t``. @@ -93,7 +97,7 @@ cdef class _BaseElement: "Sub-classes of _BaseElement should implement .qobj(t)." ) - cpdef double complex coeff(self, double t) except *: + cpdef object coeff(self, t): """ Returns the complex coefficient of the term at time ``t``. @@ -111,7 +115,7 @@ cdef class _BaseElement: "Sub-classes of _BaseElement should implement .coeff(t)." ) - cdef Data matmul_data_t(_BaseElement self, double t, Data state, Data out=None): + cdef Data matmul_data_t(_BaseElement self, t, Data state, Data out=None): """ Possibly in-place multiplication and addition. Multiplies a given state by the elemen's value at time ``t`` and adds the result to ``out``. @@ -256,6 +260,12 @@ cdef class _BaseElement: "Sub-classes of _BaseElement should implement .replace_arguments(t)." ) + def __call__(self, t, args=None): + if args: + cache = [] + self = self.replace_arguments(args, cache) + return self.qobj(t) * self.coeff(t) + cdef class _ConstantElement(_BaseElement): """ @@ -267,6 +277,7 @@ cdef class _ConstantElement(_BaseElement): """ def __init__(self, qobj): self._qobj = qobj + self._data = self._qobj.data def __mul__(left, right): if type(left) is _ConstantElement: @@ -283,13 +294,13 @@ cdef class _ConstantElement(_BaseElement): ) return NotImplemented - cpdef Data data(self, double t): - return self._qobj.data + cpdef Data data(self, t): + return self._data - cpdef object qobj(self, double t): + cpdef object qobj(self, t): return self._qobj - cpdef double complex coeff(self, double t) except *: + cpdef object coeff(self, t): return 1. def linear_map(self, f, anti=False): @@ -298,6 +309,9 @@ cdef class _ConstantElement(_BaseElement): def replace_arguments(self, args, cache=None): return self + def __call__(self, t, args=None): + return self._qobj + cdef class _EvoElement(_BaseElement): """ @@ -309,6 +323,7 @@ cdef class _EvoElement(_BaseElement): """ def __init__(self, qobj, coefficient): self._qobj = qobj + self._data = self._qobj.data self._coefficient = coefficient def __mul__(left, right): @@ -333,13 +348,13 @@ cdef class _EvoElement(_BaseElement): return NotImplemented return _EvoElement(left._qobj * right._qobj, coefficient) - cpdef Data data(self, double t): - return self._qobj.data + cpdef Data data(self, t): + return self._data - cpdef object qobj(self, double t): + cpdef object qobj(self, t): return self._qobj - cpdef double complex coeff(self, double t) except *: + cpdef object coeff(self, t): return self._coefficient(t) def linear_map(self, f, anti=False): @@ -435,10 +450,10 @@ cdef class _FuncElement(_BaseElement): def __matmul__(left, right): return _ProdElement(left, right, []) - cpdef Data data(self, double t): + cpdef Data data(self, t): return self.qobj(t).data - cpdef object qobj(self, double t): + cpdef object qobj(self, t): cdef double _t cdef object _qobj _t, _qobj = self._previous @@ -451,7 +466,7 @@ cdef class _FuncElement(_BaseElement): self._previous = (t, _qobj) return _qobj - cpdef double complex coeff(self, double t) except *: + cpdef object coeff(self, t): return 1. def linear_map(self, f, anti=False): @@ -514,16 +529,16 @@ cdef class _MapElement(_BaseElement): def __matmul__(left, right): return _ProdElement(left, right, []) - cpdef Data data(self, double t): + cpdef Data data(self, t): return self.qobj(t).data - cpdef object qobj(self, double t): + cpdef object qobj(self, t): out = self._base.qobj(t) for func in self._transform: out = func(out) return out - cpdef double complex coeff(self, double t) except *: + cpdef object coeff(self, t): return self._coeff def linear_map(self, f, anti=False): @@ -572,20 +587,20 @@ cdef class _ProdElement(_BaseElement): def __matmul__(left, right): return _ProdElement(left, right, []) - cpdef Data data(self, double t): + cpdef Data data(self, t): return self.qobj(t).data - cpdef object qobj(self, double t): + cpdef object qobj(self, t): out = self._left.qobj(t) @ self._right.qobj(t) for func in self._transform: out = func(out) return out - cpdef double complex coeff(self, double t) except *: + cpdef object coeff(self, t): cdef double complex out = self._left.coeff(t) * self._right.coeff(t) return conj(out) if self._conj else out - cdef Data matmul_data_t(_ProdElement self, double t, Data state, Data out=None): + cdef Data matmul_data_t(_ProdElement self, t, Data state, Data out=None): cdef Data temp if not self._transform: temp = self._right.matmul_data_t(t, state) diff --git a/qutip/core/cy/coefficient.pxd b/qutip/core/cy/coefficient.pxd index d584514839..f81858c2ef 100644 --- a/qutip/core/cy/coefficient.pxd +++ b/qutip/core/cy/coefficient.pxd @@ -1,5 +1,5 @@ #cython: language_level=3 cdef class Coefficient: - cdef dict args + cdef readonly dict args cdef double complex _call(self, double t) except * cpdef Coefficient copy(self) diff --git a/qutip/core/cy/coefficient.pyx b/qutip/core/cy/coefficient.pyx index 819a22c1e5..5ecc6bf9cb 100644 --- a/qutip/core/cy/coefficient.pyx +++ b/qutip/core/cy/coefficient.pyx @@ -1,4 +1,6 @@ #cython: language_level=3 +#cython: c_api_binop_methods=True + import inspect import pickle import scipy @@ -13,6 +15,12 @@ cdef extern from "" namespace "std" nogil: double norm(double complex x) +__all__ = [ + "Coefficient", "InterCoefficient", "FunctionCoefficient", + "StrFunctionCoefficient", "ConjCoefficient", "NormCoefficient" +] + + def coefficient_function_parameters(func, style=None): """ Return the function style (either "pythonic" or not) and a list of @@ -75,8 +83,8 @@ cdef class Coefficient: :obj:`Coefficient` are immutable. """ - def __init__(self): - raise NotImplementedError("Only sub-classes should be initiated.") + def __init__(self, args, **_): + self.args = args def replace_arguments(self, _args=None, **kwargs): """ @@ -98,7 +106,7 @@ cdef class Coefficient: """ return self - def __call__(self, double t, dict _args=None, **kwargs): + def __call__(self, t, dict _args=None, **kwargs): """ Return the coefficient value at time `t`. Stored arguments can overwriten with `_args` or as keywords parameters. @@ -121,8 +129,8 @@ cdef class Coefficient: cdef double complex _call(self, double t) except *: """Core computation of the :obj:`Coefficient`.""" - raise NotImplementedError("All Coefficient sub-classes " - "should overwrite this.") + # All Coefficient sub-classes should overwrite this or __call__ + return complex(self(t)) def __add__(left, right): if ( @@ -185,7 +193,7 @@ cdef class FunctionCoefficient(Coefficient): _UNSET = object() def __init__(self, func, dict args, style=None, _f_pythonic=_UNSET, - _f_parameters=_UNSET): + _f_parameters=_UNSET, **_): if _f_pythonic is self._UNSET or _f_parameters is self._UNSET: if not (_f_pythonic is self._UNSET and _f_parameters is self._UNSET): @@ -204,7 +212,9 @@ cdef class FunctionCoefficient(Coefficient): self._f_pythonic = _f_pythonic self._f_parameters = _f_parameters - cdef complex _call(self, double t) except *: + def __call__(self, t, dict _args=None, **kwargs): + if _args is not None or kwargs: + return self.replace_arguments(_args, **kwargs)(t) if self._f_pythonic: return self.func(t, **self.args) return self.func(t, self.args) @@ -321,7 +331,7 @@ cdef class StrFunctionCoefficient(Coefficient): "np": np, "spe": scipy.special} - def __init__(self, base, dict args): + def __init__(self, base, dict args, **_): args2var = "\n".join([" {} = args['{}']".format(key, key) for key in args]) code = f""" @@ -385,6 +395,12 @@ cdef class InterCoefficient(Coefficient): order : int Order of the interpolation. Order ``0`` uses the previous (i.e. left) value. The order will be reduced to ``len(tlist) - 1`` if it is larger. + + boundary_conditions : 2-Tuple, str or None, optional + Boundary conditions for spline evaluation. Default value is `None`. + Correspond to `bc_type` of scipy.interpolate.make_interp_spline. + Refer to Scipy's documentation for further details: + https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.make_interp_spline.html """ cdef int order cdef double dt @@ -392,7 +408,7 @@ cdef class InterCoefficient(Coefficient): cdef complex[:, :] poly cdef object np_arrays - def __init__(self, coeff_arr, tlist, int order): + def __init__(self, coeff_arr, tlist, int order, boundary_conditions, **_): tlist = np.array(tlist, dtype=np.float64) coeff_arr = np.array(coeff_arr, dtype=np.complex128) @@ -416,7 +432,8 @@ cdef class InterCoefficient(Coefficient): elif order >= 2: # Use scipy to compute the spline and transform it to polynomes # as used in scipy's PPoly which is easier for us to use. - spline = make_interp_spline(tlist, coeff_arr, k=order) + spline = make_interp_spline(tlist, coeff_arr, k=order, + bc_type=boundary_conditions) # Scipy can move knots, we add them to tlist tlist = np.sort(np.unique(np.concatenate([spline.t, tlist]))) a = np.arange(spline.k+1) @@ -499,11 +516,11 @@ cdef class InterCoefficient(Coefficient): return out @classmethod - def from_PPoly(cls, ppoly): + def from_PPoly(cls, ppoly, **_): return cls.restore(ppoly.x, ppoly.c) @classmethod - def from_Bspline(cls, spline): + def from_Bspline(cls, spline, **_): tlist = np.unique(spline.t) a = np.arange(spline.k+1) a[0] = 1 @@ -717,3 +734,43 @@ cdef class NormCoefficient(Coefficient): cpdef Coefficient copy(self): """Return a copy of the :obj:`Coefficient`.""" return NormCoefficient(self.base.copy()) + + +@cython.auto_pickle(True) +cdef class ConstantCoefficient(Coefficient): + """ + A time-independent coefficient. + + :obj:`ConstantCoefficient` is returned by ``qutip.coefficent.const(value)``. + """ + cdef complex value + + def __init__(self, complex value): + self.value = value + + def replace_arguments(self, _args=None, **kwargs): + """ + Replace the arguments (``args``) of a coefficient. + + Returns a new :obj:`Coefficient` if the coefficient has arguments, or + the original coefficient if it does not. Arguments to replace may be + supplied either in a dictionary as the first position argument, or + passed as keywords, or as a combination of the two. Arguments not + replaced retain their previous values. + + Parameters + ---------- + _args : dict + Dictionary of arguments to replace. + + **kwargs + Arguments to replace. + """ + return self + + cdef complex _call(self, double t) except *: + return self.value + + cpdef Coefficient copy(self): + """Return a copy of the :obj:`Coefficient`.""" + return self diff --git a/qutip/core/cy/qobjevo.pxd b/qutip/core/cy/qobjevo.pxd index de310b6910..07e0d3df7c 100644 --- a/qutip/core/cy/qobjevo.pxd +++ b/qutip/core/cy/qobjevo.pxd @@ -15,10 +15,10 @@ cdef class QobjEvo: cpdef Data _call(QobjEvo self, double t) - cdef double _prepare(QobjEvo self, double t, Data state=*) + cdef object _prepare(QobjEvo self, object t, Data state=*) - cpdef double complex expect_data(QobjEvo self, double t, Data state) except * + cpdef object expect_data(QobjEvo self, object t, Data state) cdef double complex _expect_dense(QobjEvo self, double t, Dense state) except * - cpdef Data matmul_data(QobjEvo self, double t, Data state, Data out=*) + cpdef Data matmul_data(QobjEvo self, object t, Data state, Data out=*) diff --git a/qutip/core/cy/qobjevo.pyx b/qutip/core/cy/qobjevo.pyx index 70fd6df1ae..388551dd39 100644 --- a/qutip/core/cy/qobjevo.pyx +++ b/qutip/core/cy/qobjevo.pyx @@ -99,6 +99,14 @@ cdef class QobjEvo: ``qutip.settings.core["function_coefficient_style"]`` is used. Otherwise the supplied value overrides the global setting. + + boundary_conditions : 2-Tuple, str or None, optional + Boundary conditions for spline evaluation. Default value is `None`. + Correspond to `bc_type` of scipy.interpolate.make_interp_spline. + Refer to Scipy's documentation for further details: + https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.make_interp_spline.html + + Attributes ---------- dims : list @@ -182,7 +190,7 @@ cdef class QobjEvo: """ def __init__(QobjEvo self, Q_object, args=None, tlist=None, order=3, copy=True, compress=True, - function_style=None): + function_style=None, boundary_conditions=None): if isinstance(Q_object, QobjEvo): self.dims = Q_object.dims.copy() self.shape = Q_object.shape @@ -217,21 +225,31 @@ cdef class QobjEvo: self.elements.append( self._read_element( op, copy=copy, tlist=tlist, args=args, order=order, - function_style=function_style + function_style=function_style, + boundary_conditions=boundary_conditions ) ) else: self.elements.append( self._read_element( Q_object, copy=copy, tlist=tlist, args=args, order=order, - function_style=function_style + function_style=function_style, + boundary_conditions=boundary_conditions ) ) if compress: self.compress() - def _read_element(self, op, copy, tlist, args, order, function_style): + def __repr__(self): + cls = self.__class__.__name__ + repr_str = f'{cls}: dims = {self.dims}, shape = {self.shape}, ' + repr_str += f'type = {self.type}, superrep = {self.superrep}, ' + repr_str += f'isconstant = {self.isconstant}, num_elements = {self.num_elements}' + return repr_str + + def _read_element(self, op, copy, tlist, args, order, function_style, + boundary_conditions): """ Read a Q_object item and return an element for that item. """ if isinstance(op, Qobj): out = _ConstantElement(op.copy() if copy else op) @@ -239,7 +257,8 @@ cdef class QobjEvo: elif isinstance(op, list): out = _EvoElement( op[0].copy() if copy else op[0], - coefficient(op[1], tlist=tlist, args=args, order=order) + coefficient(op[1], tlist=tlist, args=args, order=order, + boundary_conditions=boundary_conditions) ) qobj = op[0] elif isinstance(op, _BaseElement): @@ -285,6 +304,33 @@ cdef class QobjEvo: return out + @classmethod + def _restore(cls, elements, dims, shape, type, superrep, flags): + """Recreate a QobjEvo without using __init__. """ + cdef QobjEvo out = cls.__new__(cls) + out.elements = elements + out.dims = dims + out.shape = shape + out.type = type + out.superrep = superrep + out._issuper, out._isoper = flags + return out + + def _getstate(self): + """ Obtain the state """ + # For jax pytree representation + # auto_pickle create similar method __getstate__, but since it's + # automatically created, it could change depending on cython version + # etc., so we create our own. + return { + "elements": self.elements, + "dims": self.dims, + "shape": self.shape, + "type": self.type, + "superrep": self.superrep, + "flags": (self._issuper, self._isoper,) + } + def __call__(self, double t, dict _args=None, **kwargs): """ Get the :class:`~Qobj` at ``t``. @@ -353,7 +399,7 @@ cdef class QobjEvo: ) return out - cdef double _prepare(QobjEvo self, double t, Data state=None): + cdef object _prepare(QobjEvo self, object t, Data state=None): """ Precomputation before computing getting the element at `t`""" # We keep the function for feedback eventually return t @@ -417,19 +463,19 @@ cdef class QobjEvo: if isinstance(other, QobjEvo): if other.dims != self.dims: raise TypeError("incompatible dimensions" + - str(self.dims) + ", " + str(other.dims)) + str(self.dims) + ", " + str(other.dims)) for element in ( other).elements: self.elements.append(element) elif isinstance(other, Qobj): if other.dims != self.dims: raise TypeError("incompatible dimensions" + - str(self.dims) + ", " + str(other.dims)) + str(self.dims) + ", " + str(other.dims)) self.elements.append(_ConstantElement(other)) elif ( isinstance(other, numbers.Number) and self.dims[0] == self.dims[1] ): - self.elements.append(_ConstantElement(other * qutip.qeye(self.dims[0]))) + self.elements.append(_ConstantElement(other * qutip.qeye_like(self))) else: return NotImplemented return self @@ -763,6 +809,35 @@ cdef class QobjEvo: self.elements = cleaned_elements + def to_list(QobjEvo self): + """ + Restore the QobjEvo to a list form. + + Returns + ------- + list_qevo: list + The QobjEvo as a list, element are either :class:`Qobj` for + constant parts, ``[Qobj, Coefficient]`` for coefficient based term. + The original format of the :class:`Coefficient` is not restored. + Lastly if the original `QobjEvo` is constructed with a function + returning a Qobj, the term is returned as a pair of the original + function and args (``dict``). + """ + out = [] + for element in self.elements: + if isinstance(element, _ConstantElement): + out.append(element.qobj(0)) + elif isinstance(element, _EvoElement): + coeff = element._coefficient + out.append([element.qobj(0), coeff]) + elif isinstance(element, _FuncElement): + func = element._func + args = element._args + out.append([func, args]) + else: + out.append([element, {}]) + return out + ########################################################################### # properties # ########################################################################### @@ -797,7 +872,7 @@ cdef class QobjEvo: ########################################################################### # operation methods # ########################################################################### - def expect(QobjEvo self, double t, state): + def expect(QobjEvo self, object t, state, check_real=True): """ Expectation value of this operator at time ``t`` with the state. @@ -805,9 +880,15 @@ cdef class QobjEvo: ---------- t : float Time of the operator to apply. + state : Qobj right matrix of the product + check_real : bool (True) + Whether to convert the result to a `real` when the imaginary part + is smaller than the real part by a dactor of + ``settings.core['rtol']``. + Returns ------- expect : float or complex @@ -830,11 +911,14 @@ cdef class QobjEvo: raise ValueError("incompatible dimensions " + str(self.dims) + ", " + str(state.dims)) out = self.expect_data(t, state.data) - if out == 0 or (out.real and fabs(out.imag / out.real) < herm_rtol): + if ( + check_real and + (out == 0 or (out.real and fabs(out.imag / out.real) < herm_rtol)) + ): return out.real return out - cpdef double complex expect_data(QobjEvo self, double t, Data state) except *: + cpdef object expect_data(QobjEvo self, object t, Data state): """ Expectation is defined as ``state.adjoint() @ self @ state`` if ``state`` is a vector, or ``state`` is an operator and ``self`` is a @@ -844,7 +928,7 @@ cdef class QobjEvo: if type(state) is Dense: return self._expect_dense(t, state) cdef _BaseElement part - cdef double complex out = 0., coeff + cdef object out = 0. cdef Data part_data cdef object expect_func t = self._prepare(t, state) @@ -857,9 +941,8 @@ cdef class QobjEvo: for element in self.elements: part = (<_BaseElement> element) - coeff = part.coeff(t) part_data = part.data(t) - out += coeff * expect_func(part_data, state) + out += part.coeff(t) * expect_func(part_data, state) return out cdef double complex _expect_dense(QobjEvo self, double t, Dense state) except *: @@ -891,7 +974,7 @@ cdef class QobjEvo: out += coeff * expect_data_dense(part_data, state) return out - def matmul(self, double t, state): + def matmul(self, t, state): """ Product of this operator at time ``t`` to the state. ``self(t) @ state`` @@ -916,11 +999,11 @@ cdef class QobjEvo: ", " + str(state.dims[0])) return Qobj(self.matmul_data(t, state.data), - dims=[self.dims[0],state.dims[1]], + dims=[self.dims[0], state.dims[1]], copy=False - ) + ) - cpdef Data matmul_data(QobjEvo self, double t, Data state, Data out=None): + cpdef Data matmul_data(QobjEvo self, object t, Data state, Data out=None): """Compute ``out += self(t) @ state``""" cdef _BaseElement part t = self._prepare(t, state) diff --git a/qutip/core/data/__init__.pxd b/qutip/core/data/__init__.pxd index c1b4e6809d..9e1a83faba 100644 --- a/qutip/core/data/__init__.pxd +++ b/qutip/core/data/__init__.pxd @@ -5,6 +5,7 @@ from qutip.core.data cimport dense, csr from qutip.core.data.base cimport Data, idxint from qutip.core.data.dense cimport Dense from qutip.core.data.csr cimport CSR +from qutip.core.data.dia cimport Dia from qutip.core.data.add cimport * from qutip.core.data.adjoint cimport * diff --git a/qutip/core/data/__init__.py b/qutip/core/data/__init__.py index 557aff5c04..e546d947ce 100644 --- a/qutip/core/data/__init__.py +++ b/qutip/core/data/__init__.py @@ -3,6 +3,7 @@ from . import dense, csr from .dense import Dense from .csr import CSR +from .dia import Dia from .base import Data from .add import * @@ -25,6 +26,7 @@ from .tidyup import * from .trace import * from .solve import * +from .extract import * # For operations with mulitple related versions, we just import the module. from . import norm, permute @@ -36,9 +38,14 @@ to.add_conversions([ (Dense, CSR, dense.from_csr, 1), (CSR, Dense, csr.from_dense, 1.4), + (Dia, Dense, dia.from_dense, 1.4), + (Dense, Dia, dense.from_dia, 1.2), + (Dia, CSR, dia.from_csr, 1), + (CSR, Dia, csr.from_dia, 1), ]) to.register_aliases(['csr', 'CSR'], CSR) to.register_aliases(['Dense', 'dense'], Dense) +to.register_aliases(['DIA', 'Dia', 'dia', 'diag'], Dia) from . import _creator_utils @@ -46,6 +53,7 @@ create.add_creators([ (_creator_utils.is_data, _creator_utils.data_copy, 100), (_creator_utils.isspmatrix_csr, CSR, 80), + (_creator_utils.isspmatrix_dia, Dia, 80), (_creator_utils.is_nparray, Dense, 80), (_creator_utils.issparse, CSR, 20), (_creator_utils.true, Dense, -np.inf), diff --git a/qutip/core/data/_creator_utils.py b/qutip/core/data/_creator_utils.py index 3f3779e219..da3841c756 100644 --- a/qutip/core/data/_creator_utils.py +++ b/qutip/core/data/_creator_utils.py @@ -2,13 +2,20 @@ Define functions to use as Data creator for `create` in `convert.py`. """ -from scipy.sparse import isspmatrix_csr, issparse +from scipy.sparse import isspmatrix_csr, issparse, isspmatrix_dia import numpy as np from .csr import CSR from .base import Data from .dense import Dense -__all__ = ['is_data', 'is_nparray', 'data_copy', 'isspmatrix_csr', 'issparse'] +__all__ = [ + 'data_copy', + 'is_data', + 'is_nparray', + 'isspmatrix_csr', + 'isspmatrix_dia', + 'issparse' +] def is_data(arg): diff --git a/qutip/core/data/add.pxd b/qutip/core/data/add.pxd index edb30f14ae..dbed69900a 100644 --- a/qutip/core/data/add.pxd +++ b/qutip/core/data/add.pxd @@ -2,11 +2,14 @@ from qutip.core.data.csr cimport CSR from qutip.core.data.dense cimport Dense +from qutip.core.data.dia cimport Dia cdef void add_dense_eq_order_inplace(Dense left, Dense right, double complex scale) cpdef CSR add_csr(CSR left, CSR right, double complex scale=*) cpdef Dense add_dense(Dense left, Dense right, double complex scale=*) +cpdef Dia add_dia(Dia left, Dia right, double complex scale=*) cpdef Dense iadd_dense(Dense left, Dense right, double complex scale=*) cpdef CSR sub_csr(CSR left, CSR right) cpdef Dense sub_dense(Dense left, Dense right) +cpdef Dia sub_dia(Dia left, Dia right) diff --git a/qutip/core/data/add.pyx b/qutip/core/data/add.pyx index 4a24a193a4..45515af87a 100644 --- a/qutip/core/data/add.pyx +++ b/qutip/core/data/add.pyx @@ -9,29 +9,25 @@ from qutip.settings import settings from qutip.core.data.base cimport idxint, Data from qutip.core.data.dense cimport Dense +from qutip.core.data.dia cimport Dia +from qutip.core.data.tidyup cimport tidyup_dia from qutip.core.data.csr cimport ( CSR, Accumulator, acc_alloc, acc_free, acc_scatter, acc_gather, acc_reset, ) -from qutip.core.data cimport csr, dense +from qutip.core.data cimport csr, dense, dia cnp.import_array() -cdef extern from *: - void *PyDataMem_NEW(size_t size) - void *PyDataMem_NEW_ZEROED(size_t size, size_t elsize) - void PyDataMem_FREE(void *ptr) - - __all__ = [ - 'add', 'add_csr', 'add_dense', 'iadd_dense', - 'sub', 'sub_csr', 'sub_dense', + 'add', 'add_csr', 'add_dense', 'iadd_dense', 'add_dia', + 'sub', 'sub_csr', 'sub_dense', 'sub_dia', ] cdef int _ONE=1 -cdef void _check_shape(Data left, Data right) nogil except *: +cdef void _check_shape(Data left, Data right) except * nogil: if left.shape[0] != right.shape[0] or left.shape[1] != right.shape[1]: raise ValueError( "incompatible matrix shapes " @@ -217,6 +213,79 @@ cpdef Dense iadd_dense(Dense left, Dense right, double complex scale=1): return left +cpdef Dia add_dia(Dia left, Dia right, double complex scale=1): + _check_shape(left, right) + cdef idxint diag_left=0, diag_right=0, out_diag=0, i + cdef double complex *ptr_out, + cdef double complex *ptr_left + cdef double complex *ptr_right + cdef bint sorted=True + cdef Dia out = dia.empty(left.shape[0], left.shape[1], left.num_diag + right.num_diag) + cdef int length, size=left.shape[1] + + ptr_out = out.data + ptr_left = left.data + ptr_right = right.data + + with nogil: + while diag_left < left.num_diag and diag_right < right.num_diag: + if left.offsets[diag_left] == right.offsets[diag_right]: + out.offsets[out_diag] = left.offsets[diag_left] + blas.zcopy(&size, ptr_left, &_ONE, ptr_out, &_ONE) + blas.zaxpy(&size, &scale, ptr_right, &_ONE, ptr_out, &_ONE) + ptr_left += size + diag_left += 1 + ptr_right += size + diag_right += 1 + elif left.offsets[diag_left] <= right.offsets[diag_right]: + out.offsets[out_diag] = left.offsets[diag_left] + blas.zcopy(&size, ptr_left, &_ONE, ptr_out, &_ONE) + ptr_left += size + diag_left += 1 + else: + out.offsets[out_diag] = right.offsets[diag_right] + blas.zcopy(&size, ptr_right, &_ONE, ptr_out, &_ONE) + if scale != 1: + blas.zscal(&size, &scale, ptr_out, &_ONE) + ptr_right += size + diag_right += 1 + if out_diag != 0 and out.offsets[out_diag-1] >= out.offsets[out_diag]: + sorted=False + ptr_out += size + out_diag += 1 + + if diag_left < left.num_diag: + for i in range(left.num_diag - diag_left): + out.offsets[out_diag] = left.offsets[diag_left + i] + if out_diag != 0 and out.offsets[out_diag-1] >= out.offsets[out_diag]: + sorted=False + out_diag += 1 + + length = size * (left.num_diag - diag_left) + blas.zcopy(&length, ptr_left, &_ONE, ptr_out, &_ONE) + + + if diag_right < right.num_diag: + for i in range(right.num_diag - diag_right): + out.offsets[out_diag] = right.offsets[diag_right + i] + if out_diag != 0 and out.offsets[out_diag-1] >= out.offsets[out_diag]: + sorted=False + out_diag += 1 + + length = size * (right.num_diag - diag_right) + blas.zcopy(&length, ptr_right, &_ONE, ptr_out, &_ONE) + if scale != 1: + blas.zscal(&length, &scale, ptr_out, &_ONE) + + out.num_diag = out_diag + + if not sorted: + dia.clean_dia(out, True) + if settings.core['auto_tidyup']: + tidyup_dia(out, settings.core['auto_tidyup_atol'], True) + return out + + cpdef CSR sub_csr(CSR left, CSR right): return add_csr(left, right, -1) @@ -225,13 +294,17 @@ cpdef Dense sub_dense(Dense left, Dense right): return add_dense(left, right, -1) +cpdef Dia sub_dia(Dia left, Dia right): + return add_dia(left, right, -1) + + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect add = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('scale', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=1), ]), @@ -250,12 +323,13 @@ add.__doc__ =\ add.add_specialisations([ (Dense, Dense, Dense, add_dense), (CSR, CSR, CSR, add_csr), + (Dia, Dia, Dia, add_dia), ], _defer=True) sub = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_ONLY), ]), name='sub', module=__name__, @@ -271,6 +345,7 @@ sub.__doc__ =\ sub.add_specialisations([ (Dense, Dense, Dense, sub_dense), (CSR, CSR, CSR, sub_csr), + (Dia, Dia, Dia, sub_dia), ], _defer=True) del _inspect, _Dispatcher diff --git a/qutip/core/data/adjoint.pxd b/qutip/core/data/adjoint.pxd index 793aac6409..9330a9b2f9 100644 --- a/qutip/core/data/adjoint.pxd +++ b/qutip/core/data/adjoint.pxd @@ -2,6 +2,7 @@ from qutip.core.data.csr cimport CSR from qutip.core.data.dense cimport Dense +from qutip.core.data.dia cimport Dia cpdef CSR adjoint_csr(CSR matrix) cpdef CSR transpose_csr(CSR matrix) @@ -10,3 +11,7 @@ cpdef CSR conj_csr(CSR matrix) cpdef Dense adjoint_dense(Dense matrix) cpdef Dense transpose_dense(Dense matrix) cpdef Dense conj_dense(Dense matrix) + +cpdef Dia adjoint_dia(Dia matrix) +cpdef Dia transpose_dia(Dia matrix) +cpdef Dia conj_dia(Dia matrix) diff --git a/qutip/core/data/adjoint.pyx b/qutip/core/data/adjoint.pyx index ebff9e4d6c..68ee57637e 100644 --- a/qutip/core/data/adjoint.pyx +++ b/qutip/core/data/adjoint.pyx @@ -8,16 +8,17 @@ cimport cython from qutip.core.data.base cimport idxint from qutip.core.data.csr cimport CSR from qutip.core.data.dense cimport Dense -from qutip.core.data cimport csr, dense +from qutip.core.data.dia cimport Dia +from qutip.core.data cimport csr, dense, dia # Import std::conj as `_conj` to avoid clashing with our 'conj' dispatcher. cdef extern from "" namespace "std" nogil: double complex _conj "conj"(double complex x) __all__ = [ - 'adjoint', 'adjoint_csr', 'adjoint_dense', - 'conj', 'conj_csr', 'conj_dense', - 'transpose', 'transpose_csr', 'transpose_dense', + 'adjoint', 'adjoint_csr', 'adjoint_dense', 'adjoint_dia', + 'conj', 'conj_csr', 'conj_dense', 'conj_dia', + 'transpose', 'transpose_csr', 'transpose_dense', 'transpose_dia', ] @@ -109,12 +110,58 @@ cpdef Dense conj_dense(Dense matrix): return out +cpdef Dia adjoint_dia(Dia matrix): + cdef Dia out = dia.empty(matrix.shape[1], matrix.shape[0], matrix.num_diag) + cdef size_t i, new_i, + cdef idxint new_offset, j + with nogil: + out.num_diag = matrix.num_diag + for i in range(matrix.num_diag): + new_i = matrix.num_diag - i - 1 + new_offset = out.offsets[new_i] = -matrix.offsets[i] + for j in range(out.shape[1]): + if (j < new_offset) or (j - new_offset >= matrix.shape[1]): + out.data[new_i * out.shape[1] + j] = 0. + else: + out.data[new_i * out.shape[1] + j] = _conj(matrix.data[i * matrix.shape[1] + j - new_offset]) + return out + + +cpdef Dia transpose_dia(Dia matrix): + cdef Dia out = dia.empty(matrix.shape[1], matrix.shape[0], matrix.num_diag) + cdef size_t i, new_i, + cdef idxint new_offset, j + with nogil: + out.num_diag = matrix.num_diag + for i in range(matrix.num_diag): + new_i = matrix.num_diag - i - 1 + new_offset = out.offsets[new_i] = -matrix.offsets[i] + for j in range(out.shape[1]): + if (j < new_offset) or (j - new_offset >= matrix.shape[1]): + out.data[new_i * out.shape[1] + j] = 0. + else: + out.data[new_i * out.shape[1] + j] = matrix.data[i * matrix.shape[1] + j - new_offset] + return out + + +cpdef Dia conj_dia(Dia matrix): + cdef Dia out = dia.empty_like(matrix) + cdef size_t i, j + with nogil: + out.num_diag = matrix.num_diag + for i in range(matrix.num_diag): + out.offsets[i] = matrix.offsets[i] + for j in range(matrix.shape[1]): + out.data[i * matrix.shape[1] + j] = _conj(matrix.data[i * matrix.shape[1] + j]) + return out + + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect adjoint = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='adjoint', module=__name__, @@ -125,11 +172,12 @@ adjoint.__doc__ = """Hermitian adjoint (matrix conjugate transpose).""" adjoint.add_specialisations([ (Dense, Dense, adjoint_dense), (CSR, CSR, adjoint_csr), + (Dia, Dia, adjoint_dia), ], _defer=True) transpose = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='transpose', module=__name__, @@ -140,11 +188,12 @@ transpose.__doc__ = """Transpose of a matrix.""" transpose.add_specialisations([ (Dense, Dense, transpose_dense), (CSR, CSR, transpose_csr), + (Dia, Dia, transpose_dia), ], _defer=True) conj = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='conj', module=__name__, @@ -155,6 +204,7 @@ conj.__doc__ = """Element-wise conjugation of a matrix.""" conj.add_specialisations([ (Dense, Dense, conj_dense), (CSR, CSR, conj_csr), + (Dia, Dia, conj_dia), ], _defer=True) del _inspect, _Dispatcher diff --git a/qutip/core/data/base.pyx b/qutip/core/data/base.pyx index a463bf3834..5802f3b62d 100644 --- a/qutip/core/data/base.pyx +++ b/qutip/core/data/base.pyx @@ -1,4 +1,5 @@ #cython: language_level=3 +#cython: c_api_binop_methods=True import numpy as np cimport numpy as cnp diff --git a/qutip/core/data/constant.py b/qutip/core/data/constant.py index a60341a364..8061bb8832 100644 --- a/qutip/core/data/constant.py +++ b/qutip/core/data/constant.py @@ -3,13 +3,17 @@ # (e.g. `create`) should not be here, but should be defined within the # higher-level components of QuTiP instead. -from . import csr, dense +from . import csr, dense, dia from .csr import CSR +from .dia import Dia from .dense import Dense +from .base import Data from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect -__all__ = ['zeros', 'identity'] +__all__ = ['zeros', 'identity', 'zeros_like', 'identity_like', + 'zeros_like_dense', 'identity_like_dense', + 'zeros_like_data', 'identity_like_data'] zeros = _Dispatcher( _inspect.Signature([ @@ -37,6 +41,7 @@ """ zeros.add_specialisations([ (CSR, csr.zeros), + (Dia, dia.zeros), (Dense, dense.zeros), ], _defer=True) @@ -67,7 +72,61 @@ """ identity.add_specialisations([ (CSR, csr.identity), + (Dia, dia.identity), (Dense, dense.identity), ], _defer=True) + +def zeros_like_data(data, /): + """ + Create an zeros matrix of the same type and shape. + """ + return zeros[type(data)](*data.shape) + + +def zeros_like_dense(data, /): + """ + Create an zeros matrix of the same type and shape. + """ + return dense.zeros(*data.shape, fortran=data.fortran) + + +def identity_like_data(data, /): + """ + Create an identity matrix of the same type and shape. + """ + if not data.shape[0] == data.shape[1]: + raise ValueError("Can't create and identity like a non square matrix.") + return identity[type(data)](data.shape[0]) + + +def identity_like_dense(data, /): + """ + Create an identity matrix of the same type and shape. + """ + if not data.shape[0] == data.shape[1]: + raise ValueError("Can't create and identity like a non square matrix.") + return dense.identity(data.shape[0], fortran=data.fortran) + + +identity_like = _Dispatcher( + identity_like_data, name='identity_like', + module=__name__, inputs=("data",), out=False, +) +identity_like.add_specialisations([ + (Data, identity_like_data), + (Dense, identity_like_dense), +], _defer=True) + + +zeros_like = _Dispatcher( + zeros_like_data, name='zeros_like', + module=__name__, inputs=("data",), out=False, +) +zeros_like.add_specialisations([ + (Data, zeros_like_data), + (Dense, zeros_like_dense), +], _defer=True) + + del _Dispatcher, _inspect diff --git a/qutip/core/data/convert.pyx b/qutip/core/data/convert.pyx index 1bf7713cc2..d2681551b1 100644 --- a/qutip/core/data/convert.pyx +++ b/qutip/core/data/convert.pyx @@ -18,12 +18,50 @@ import numbers import numpy as np from scipy.sparse import dok_matrix, csgraph - cimport cython +from qutip.core.data.base cimport Data __all__ = ['to', 'create'] +class _Epsilon: + """ + Constant for an small weight non-null weight. + Use to set `Data` specialisation just over direct specialisation. + """ + def __repr__(self): + return "EPSILON" + + def __eq__(self, other): + if isinstance(other, _Epsilon): + return True + return NotImplemented + + def __add__(self, other): + if isinstance(other, _Epsilon): + return self + return other + + def __radd__(self, other): + if isinstance(other, _Epsilon): + return self + return other + + def __lt__(self, other): + """ positive number > _Epsilon > 0 """ + if isinstance(other, _Epsilon): + return False + return other > 0. + + def __gt__(self, other): + if isinstance(other, _Epsilon): + return False + return other <= 0. + + +EPSILON = _Epsilon() + + def _raise_if_unconnected(dtype_list, weights): unconnected = {} for i, type_ in enumerate(dtype_list): @@ -70,19 +108,23 @@ cdef class _converter: + ">") +def identity_converter(arg): + return arg + + cdef class _partial_converter: """Convert from any known data-layer type into the type `x.to`.""" - cdef dict converters + cdef object converter cdef readonly type to - def __init__(self, converters, to_type): - self.converters = dict(converters) + def __init__(self, converter, to_type): + self.converter = converter self.to = to_type def __call__(self, arg): try: - return self.converters[type(arg)](arg) + return self.converter[self.to, type(arg)](arg) except KeyError: raise TypeError("unknown type of input: " + str(arg)) from None @@ -201,6 +243,8 @@ cdef class _to: safe just to leave this blank; it is always at best an approximation. The currently defined weights are accessible in the `weights` attribute of this object. + Weight of ~0.001 are should be used in case when no conversion + is needed or ``converter = lambda mat : mat``. """ for arg in converters: if len(arg) == 3: @@ -259,6 +303,11 @@ cdef class _to: self.weight[(to_t, from_t)] = weight self._convert[(to_t, from_t)] =\ _converter(convert[::-1], to_t, from_t) + for dtype in self.dtypes: + self.weight[(dtype, Data)] = 1. + self.weight[(Data, dtype)] = EPSILON + self._convert[(dtype, Data)] = _partial_converter(self, dtype) + self._convert[(Data, dtype)] = identity_converter for dispatcher in self.dispatchers: dispatcher.rebuild_lookup() @@ -310,7 +359,7 @@ cdef class _to: type. """ if type(dtype) is type: - if dtype not in self.dtypes: + if dtype not in self.dtypes and dtype is not Data: raise ValueError( "Type is not a data-layer type: " + repr(dtype)) return dtype @@ -334,10 +383,7 @@ cdef class _to: raise KeyError(arg) to_t = self.parse(arg[0]) if len(arg) == 1: - converters = { - from_t: self._convert[to_t, from_t] for from_t in self.dtypes - } - return _partial_converter(converters, to_t) + return _partial_converter(self, to_t) from_t = self.parse(arg[1]) return self._convert[to_t, from_t] diff --git a/qutip/core/data/csr.pxd b/qutip/core/data/csr.pxd index 44a76256c8..4c813564ef 100644 --- a/qutip/core/data/csr.pxd +++ b/qutip/core/data/csr.pxd @@ -12,6 +12,7 @@ cimport numpy as cnp from qutip.core.data cimport base from qutip.core.data.dense cimport Dense +from qutip.core.data.dia cimport Dia cdef class CSR(base.Data): cdef double complex *data @@ -161,3 +162,7 @@ cpdef CSR from_dense(Dense matrix) cdef CSR from_coo_pointers(base.idxint *rows, base.idxint *cols, double complex *data, base.idxint n_rows, base.idxint n_cols, base.idxint nnz, double tol=*) +cpdef CSR from_dia(Dia matrix) + +cpdef CSR _from_csr_blocks(base.idxint[:] block_rows, base.idxint[:] block_cols, CSR[:] block_ops, + base.idxint n_blocks, base.idxint block_size) diff --git a/qutip/core/data/csr.pyx b/qutip/core/data/csr.pyx index 18989ff235..182c79714b 100644 --- a/qutip/core/data/csr.pyx +++ b/qutip/core/data/csr.pyx @@ -24,11 +24,8 @@ except ImportError: from scipy.sparse._data import _data_matrix as scipy_data_matrix from scipy.linalg cimport cython_blas as blas -from qutip.core.data cimport base, Dense -from qutip.core.data.add cimport add_csr, sub_csr +from qutip.core.data cimport base, Dense, Dia from qutip.core.data.adjoint cimport adjoint_csr, transpose_csr, conj_csr -from qutip.core.data.mul cimport mul_csr, neg_csr -from qutip.core.data.matmul cimport matmul_csr from qutip.core.data.trace cimport trace_csr from qutip.core.data.tidyup cimport tidyup_csr from .base import idxint_dtype @@ -533,6 +530,9 @@ cpdef CSR empty(base.idxint rows, base.idxint cols, base.idxint size): PyDataMem_NEW(size * sizeof(base.idxint)) out.row_index =\ PyDataMem_NEW(row_size * sizeof(base.idxint)) + if not out.data: raise MemoryError() + if not out.col_index: raise MemoryError() + if not out.row_index: raise MemoryError() # Set the number of non-zero elements to 0. out.row_index[rows] = 0 return out @@ -642,6 +642,32 @@ cdef CSR from_coo_pointers( return out +cpdef CSR from_dia(Dia matrix): + if matrix.num_diag == 0: + return zeros(*matrix.shape) + mat = matrix.as_scipy() + ordered = np.argsort(mat.offsets) + nnz = len(mat.offsets) * max(mat.shape) + ptrs = np.zeros(mat.shape[0]+1, dtype=idxint_dtype) + indices = np.zeros(nnz, dtype=idxint_dtype) + data = np.zeros(nnz, dtype=complex) + + ptr = 0 + for row in range(mat.shape[0]): + for idx in ordered: + off = mat.offsets[idx] + if off + row < 0: + continue + elif off + row >= mat.shape[1]: + break + indices[ptr] = off + row + data[ptr] = mat.data[idx, off + row] + ptr += 1 + ptrs[row + 1] = ptr + + return CSR((data, indices, ptrs), matrix.shape, copy=False) + + cdef inline base.idxint _diagonal_length( base.idxint offset, base.idxint n_rows, base.idxint n_cols, ) nogil: @@ -847,3 +873,122 @@ def diags(diagonals, offsets=None, shape=None): return diags_( diagonals_, np.array(offsets_, dtype=idxint_dtype), n_rows, n_cols, ) + + +cpdef CSR _from_csr_blocks( + base.idxint[:] block_rows, base.idxint[:] block_cols, CSR[:] block_ops, + base.idxint n_blocks, base.idxint block_size +): + """ + Construct a CSR from non-overlapping blocks. + + Each operator in ``block_ops`` should be a square CSR operator with + shape ``(block_size, block_size)``. The output operator consists of + ``n_blocks`` by ``n_blocks`` blocks and thus has shape + ``(n_blocks * block_size, n_blocks * block_size)``. + + None of the operators should overlap (i.e. the list of block row and + column pairs should be unique). + + Parameters + ---------- + block_rows : sequence of base.idxint integers + The block row for each operator. The block row should be in + ``range(0, n_blocks)``. + + block_cols : sequence of base.idxint integers + The block column for each operator. The block column should be in + ``range(0, n_blocks)``. + + block_ops : sequence of CSR matrixes + The operators corresponding to the rows and columns in ``block_rows`` + and ``block_cols``. + + n_blocks : base.idxint + Number of blocks. The shape of the final matrix is + (n_blocks * block, n_blocks * block). + + block_size : base.idxint + Size of each block. The shape of matrices in ``block_ops`` is + ``(block_size, block_size)``. + """ + cdef CSR op + cdef base.idxint shape = n_blocks * block_size + cdef base.idxint nnz_ = 0 + cdef base.idxint n_ops = len(block_ops) + cdef base.idxint i, j + cdef base.idxint row_idx, col_idx + + # check arrays are the same length + if len(block_rows) != n_ops or len(block_cols) != n_ops: + raise ValueError( + "The arrays block_rows, block_cols and block_ops should have" + " the same length." + ) + + if n_ops == 0: + return zeros(shape, shape) + + # check op shapes and calculate nnz + for op in block_ops: + nnz_ += nnz(op) + if op.shape[0] != block_size or op.shape[1] != block_size: + raise ValueError( + "Block operators (block_ops) do not have the correct shape." + ) + + # check ops are ordered by (row, column) + row_idx = block_rows[0] + col_idx = block_cols[0] + for i in range(1, n_ops): + if ( + block_rows[i] < row_idx or + (block_rows[i] == row_idx and block_cols[i] <= col_idx) + ): + raise ValueError( + "The arrays block_rows and block_cols must be sorted" + " by (row, column)." + ) + row_idx = block_rows[i] + col_idx = block_cols[i] + + if nnz_ == 0: + return zeros(shape, shape) + + cdef CSR out = empty(shape, shape, nnz_) + cdef base.idxint op_idx = 0 + cdef base.idxint prev_op_idx = 0 + cdef base.idxint end = 0 + cdef base.idxint row_pos, col_pos + cdef base.idxint op_row, op_row_start, op_row_end, op_row_len + + out.row_index[0] = 0 + + for row_idx in range(n_blocks): + prev_op_idx = op_idx + while op_idx < n_ops: + if block_rows[op_idx] != row_idx: + break + op_idx += 1 + + row_pos = row_idx * block_size + for op_row in range(block_size): + for i in range(prev_op_idx, op_idx): + op = block_ops[i] + if nnz(op) == 0: + # empty CSR matrices have uninitialized row_index entries. + # it's unclear whether users should ever see such matrixes + # but we support them here anyway. + continue + col_idx = block_cols[i] + col_pos = col_idx * block_size + op_row_start = op.row_index[op_row] + op_row_end = op.row_index[op_row + 1] + op_row_len = op_row_end - op_row_start + for j in range(op_row_len): + out.col_index[end + j] = op.col_index[op_row_start + j] + col_pos + out.data[end + j] = op.data[op_row_start + j] + end += op_row_len + out.row_index[row_pos + op_row + 1] = end + + return out diff --git a/qutip/core/data/dense.pxd b/qutip/core/data/dense.pxd index 3c3dad938a..f509def92b 100644 --- a/qutip/core/data/dense.pxd +++ b/qutip/core/data/dense.pxd @@ -3,11 +3,12 @@ cimport numpy as cnp from . cimport base -from .csr cimport CSR +from qutip.core.data.csr cimport CSR +from qutip.core.data.dia cimport Dia cdef class Dense(base.Data): cdef double complex *data - cdef bint fortran + cdef readonly bint fortran cdef object _np cdef bint _deallocate cdef void _fix_flags(Dense self, object array, bint make_owner=*) @@ -28,3 +29,4 @@ cpdef Dense zeros(base.idxint rows, base.idxint cols, bint fortran=*) cpdef Dense identity(base.idxint dimension, double complex scale=*, bint fortran=*) cpdef Dense from_csr(CSR matrix, bint fortran=*) +cpdef Dense from_dia(Dia matrix) diff --git a/qutip/core/data/dense.pyx b/qutip/core/data/dense.pyx index 99c4f28a49..1fc7f35670 100644 --- a/qutip/core/data/dense.pyx +++ b/qutip/core/data/dense.pyx @@ -11,11 +11,8 @@ cimport numpy as cnp from scipy.linalg cimport cython_blas as blas from .base import EfficiencyWarning -from qutip.core.data cimport base, CSR -from qutip.core.data.add cimport add_dense, sub_dense +from qutip.core.data cimport base, CSR, Dia from qutip.core.data.adjoint cimport adjoint_dense, transpose_dense, conj_dense -from qutip.core.data.mul cimport mul_dense, neg_dense -from qutip.core.data.matmul cimport matmul_dense from qutip.core.data.trace cimport trace_dense cnp.import_array() @@ -123,6 +120,7 @@ cdef class Dense(base.Data): cdef Dense out = Dense.__new__(Dense) cdef size_t size = self.shape[0]*self.shape[1]*sizeof(double complex) cdef double complex *ptr = PyDataMem_NEW(size) + if not ptr: raise MemoryError() memcpy(ptr, self.data, size) out.shape = self.shape out.data = ptr @@ -165,6 +163,7 @@ cdef class Dense(base.Data): """ cdef size_t size = self.shape[0]*self.shape[1]*sizeof(double complex) cdef double complex *ptr = PyDataMem_NEW(size) + if not ptr: raise MemoryError() memcpy(ptr, self.data, size) cdef object out =\ cnp.PyArray_SimpleNewFromData(2, [self.shape[0], self.shape[1]], @@ -247,6 +246,7 @@ cpdef Dense empty(base.idxint rows, base.idxint cols, bint fortran=True): cdef Dense out = Dense.__new__(Dense) out.shape = (rows, cols) out.data = PyDataMem_NEW(rows * cols * sizeof(double complex)) + if not out.data: raise MemoryError() out._deallocate = True out.fortran = fortran return out @@ -267,6 +267,7 @@ cpdef Dense zeros(base.idxint rows, base.idxint cols, bint fortran=True): out.shape = (rows, cols) out.data =\ PyDataMem_NEW_ZEROED(rows * cols, sizeof(double complex)) + if not out.data: raise MemoryError() out.fortran = fortran out._deallocate = True return out @@ -293,6 +294,7 @@ cpdef Dense from_csr(CSR matrix, bint fortran=False): PyDataMem_NEW_ZEROED(out.shape[0]*out.shape[1], sizeof(double complex)) ) + if not out.data: raise MemoryError() out.fortran = fortran out._deallocate = True cdef size_t row, ptr_in, ptr_out, row_stride, col_stride @@ -306,6 +308,10 @@ cpdef Dense from_csr(CSR matrix, bint fortran=False): return out +cpdef Dense from_dia(Dia matrix): + return Dense(matrix.to_array(), copy=False) + + cdef inline base.idxint _diagonal_length( base.idxint offset, base.idxint n_rows, base.idxint n_cols, ) nogil: diff --git a/qutip/core/data/dia.pxd b/qutip/core/data/dia.pxd new file mode 100644 index 0000000000..34bc36eadd --- /dev/null +++ b/qutip/core/data/dia.pxd @@ -0,0 +1,37 @@ +#cython: language_level=3 + +# from cpython cimport mem +# from libcpp.algorithm cimport sort +# from libc.math cimport fabs + +# cdef extern from *: +# void *PyMem_Calloc(size_t n, size_t elsize) + +# import numpy as np +# cimport numpy as cnp + +from qutip.core.data cimport base +from qutip.core.data.dense cimport Dense +from qutip.core.data.csr cimport CSR + +cdef class Dia(base.Data): + cdef double complex *data + cdef base.idxint *offsets + cdef readonly size_t num_diag, _max_diag + cdef object _scipy + cdef bint _deallocate + cpdef Dia copy(Dia self) + cpdef object as_scipy(Dia self, bint full=*) + cpdef double complex trace(Dia self) + cpdef Dia adjoint(Dia self) + cpdef Dia conj(Dia self) + cpdef Dia transpose(Dia self) + +cpdef Dia fast_from_scipy(object sci) +cpdef Dia empty(base.idxint rows, base.idxint cols, base.idxint num_diag) +cpdef Dia empty_like(Dia other) +cpdef Dia zeros(base.idxint rows, base.idxint cols) +cpdef Dia identity(base.idxint dimension, double complex scale=*) +cpdef Dia from_dense(Dense matrix) +cpdef Dia from_csr(CSR matrix) +cpdef Dia clean_dia(Dia matrix, bint inplace=*) diff --git a/qutip/core/data/dia.pyx b/qutip/core/data/dia.pyx new file mode 100644 index 0000000000..2b37d09159 --- /dev/null +++ b/qutip/core/data/dia.pyx @@ -0,0 +1,537 @@ +#cython: language_level=3 +#cython: boundscheck=False, wraparound=False, initializedcheck=False + +from libc.string cimport memset, memcpy + +from libcpp cimport bool +from libcpp.algorithm cimport sort +from libc.math cimport fabs +cimport cython + +from cpython cimport mem + +import numbers +import warnings + +import numpy as np +cimport numpy as cnp +import scipy.sparse +from scipy.sparse import dia_matrix as scipy_dia_matrix +try: + from scipy.sparse.data import _data_matrix as scipy_data_matrix +except ImportError: + # The file data was renamed to _data from scipy 1.8.0 + from scipy.sparse._data import _data_matrix as scipy_data_matrix +from scipy.linalg cimport cython_blas as blas + +from qutip.core.data cimport base, Dense, CSR +from qutip.core.data.adjoint import adjoint_dia, transpose_dia, conj_dia +from qutip.core.data.trace import trace_dia +from qutip.core.data.tidyup import tidyup_dia +from .base import idxint_dtype +from qutip.settings import settings + +cnp.import_array() + +cdef extern from *: + void PyArray_ENABLEFLAGS(cnp.ndarray arr, int flags) + void *PyDataMem_NEW(size_t size) + void PyDataMem_FREE(void *ptr) + +__all__ = ['Dia'] + +cdef object _dia_matrix(data, offsets, shape): + """ + Factory method of scipy diag_matrix: we skip all the index type-checking + because this takes tens of microseconds, and we already know we're in + a sensible format. + """ + cdef object out = scipy_dia_matrix.__new__(scipy_dia_matrix) + # `_data_matrix` is the first object in the inheritance chain which + # doesn't have a really slow __init__. + scipy_data_matrix.__init__(out) + out.data = data + out.offsets = offsets + out._shape = shape + return out + + +cdef tuple _count_element(complex[:] line, size_t length): + cdef size_t pre = 0, post = length + while pre < length and line[pre] == 0.: + pre += 1 + while post and line[post-1] == 0.: + post -= 1 + return (post, length - pre) + + +cdef class Dia(base.Data): + def __cinit__(self, *args, **kwargs): + self._deallocate = True + + def __init__(self, arg=None, shape=None, bint copy=True, bint tidyup=False): + cdef size_t ptr + cdef base.idxint col + cdef object data, offsets + + if isinstance(arg, scipy.sparse.spmatrix): + arg = arg.todia() + if shape is not None and shape != arg.shape: + raise ValueError("".join([ + "shapes do not match: ", str(shape), " and ", str(arg.shape), + ])) + shape = arg.shape + # + arg = (arg.data, arg.offsets) + if not isinstance(arg, tuple): + raise TypeError("arg must be a scipy matrix or tuple") + if len(arg) != 2: + raise ValueError("arg must be a (data, offsets) tuple") + data = np.array(arg[0], dtype=np.complex128, copy=copy, order='C') + offsets = np.array(arg[1], dtype=idxint_dtype, copy=copy, order='C') + + self.num_diag = offsets.shape[0] + self._max_diag = self.num_diag + + if shape is None: + warnings.warn("instantiating Dia matrix of unknown shape") + nrows = 0 + ncols = 0 + for i in range(self.num_diag): + pre, post = _count_element(data[i], data.shape[1]) + if offsets[i] >= 0: + row = post + col = post + offsets[i] + else: + row = pre - offsets[i] + col = pre + nrows = nrows if nrows > row else row + ncols = ncols if ncols > col else col + self.shape = (nrows, ncols) + else: + if not isinstance(shape, tuple): + raise TypeError("shape must be a 2-tuple of positive ints") + if not (len(shape) == 2 + and isinstance(shape[0], numbers.Integral) + and isinstance(shape[1], numbers.Integral) + and shape[0] > 0 + and shape[1] > 0): + raise ValueError("shape must be a 2-tuple of positive ints") + self.shape = shape + + # Scipy support ``data`` with diag of any length. They can be sorter if + # the last columns are empty or have extra unused columns at the end. + if data.shape[0] != 0 and data.shape[1] != self.shape[1]: + new_data = np.zeros((self.num_diag, self.shape[1]), dtype=np.complex128, order='C') + copy_length = min(data.shape[1], self.shape[1]) + new_data[:, :copy_length] = data[:, :copy_length] + data = new_data + + self._deallocate = False + self.data = cnp.PyArray_GETPTR1(data, 0) + self.offsets = cnp.PyArray_GETPTR1(offsets, 0) + + self._scipy = _dia_matrix(data, offsets, self.shape) + if tidyup: + tidyup_dia(self, settings.core['auto_tidyup_atol'], True) + + def __reduce__(self): + return (fast_from_scipy, (self.as_scipy(),)) + + cpdef Dia copy(self): + """ + Return a complete (deep) copy of this object. + + If the type currently has a scipy backing, such as that produced by + `as_scipy`, this will not be copied. The backing is a view onto our + data, and a straight copy of this view would be incorrect. We do not + create a new view at copy time, since the user may only access this + through a creation method, and creating it ahead of time would incur an + unnecessary speed penalty for users who do not need it (including + low-level C code). + """ + cdef Dia out = empty_like(self) + out.num_diag = self.num_diag + memcpy(out.data, self.data, self.num_diag * self.shape[1] * sizeof(double complex)) + memcpy(out.offsets, self.offsets, self.num_diag * sizeof(base.idxint)) + + return out + + cpdef object to_array(self): + """ + Get a copy of this data as a full 2D, C-contiguous NumPy array. This + is not a view onto the data, and changes to new array will not affect + the original data structure. + """ + cdef cnp.npy_intp *dims = [self.shape[0], self.shape[1]] + cdef object out = cnp.PyArray_ZEROS(2, dims, cnp.NPY_COMPLEX128, 0) + cdef size_t col, i, nrows = self.shape[0] + cdef base.idxint diag + for i in range(self.num_diag): + diag = self.offsets[i] + for col in range(self.shape[1]): + if col - diag < 0 or col - diag >= nrows: + continue + out[(col-diag), col] = self.data[i * self.shape[1] + col] + return out + + cpdef object as_scipy(self, bint full=False): + """ + Get a view onto this object as a `scipy.sparse.dia_matrix`. The + underlying data structures are exposed, such that modifications to the + `data` and `offsets` buffers in the resulting object will + modify this object too. + """ + # We store a reference to the scipy matrix not only for caching this + # relatively expensive method, but also because we transferred + # ownership of our data to the numpy arrays, and we can't allow them to + # be collected while we're alive. + if self._scipy is not None: + return self._scipy + cdef cnp.npy_intp num_diag = self.num_diag if not full else self._max_diag + cdef cnp.npy_intp size = self.shape[1] + data = cnp.PyArray_SimpleNewFromData(2, [num_diag, size], + cnp.NPY_COMPLEX128, + self.data) + offsets = cnp.PyArray_SimpleNewFromData(1, [num_diag], + base.idxint_DTYPE, + self.offsets) + PyArray_ENABLEFLAGS(data, cnp.NPY_ARRAY_OWNDATA) + PyArray_ENABLEFLAGS(offsets, cnp.NPY_ARRAY_OWNDATA) + self._deallocate = False + self._scipy = _dia_matrix(data, offsets, self.shape) + return self._scipy + + cpdef double complex trace(self): + return trace_dia(self) + + cpdef Dia adjoint(self): + return adjoint_dia(self) + + cpdef Dia conj(self): + return conj_dia(self) + + cpdef Dia transpose(self): + return transpose_dia(self) + + def __repr__(self): + return "".join([ + "Dia(shape=", str(self.shape), ", num_diag=", str(self.num_diag), ")", + ]) + + def __str__(self): + return self.__repr__() + + def __dealloc__(self): + # If we have a reference to a scipy type, then we've passed ownership + # of the data to numpy, so we let it handle refcounting and we don't + # need to deallocate anything ourselves. + if not self._deallocate: + return + if self.data != NULL: + PyDataMem_FREE(self.data) + if self.offsets != NULL: + PyDataMem_FREE(self.offsets) + + +cpdef Dia fast_from_scipy(object sci): + """ + Fast path construction from scipy.sparse.csr_matrix. This does _no_ type + checking on any of the inputs, and should consequently be considered very + unsafe. This is primarily for use in the unpickling operation. + """ + cdef Dia out = Dia.__new__(Dia) + out.shape = sci.shape + out._deallocate = False + out._scipy = sci + out.data = cnp.PyArray_GETPTR1(sci.data, 0) + out.offsets = cnp.PyArray_GETPTR1(sci.offsets, 0) + out.num_diag = sci.offsets.shape[0] + out._max_diag = sci.offsets.shape[0] + return out + + +cpdef Dia empty(base.idxint rows, base.idxint cols, base.idxint num_diag): + """ + Allocate an empty Dia matrix of the given shape, ``with num_diag`` + diagonals. + + This does not initialise any of the memory returned. + """ + if num_diag < 0: + raise ValueError("num_diag must be a positive integer.") + cdef Dia out = Dia.__new__(Dia) + out.shape = (rows, cols) + out.num_diag = 0 + # Python doesn't like allocating nothing. + if num_diag == 0: + num_diag += 1 + out._max_diag = num_diag + out.data =\ + PyDataMem_NEW(cols * num_diag * sizeof(double complex)) + out.offsets =\ + PyDataMem_NEW(num_diag * sizeof(base.idxint)) + if not out.data: raise MemoryError() + if not out.offsets: raise MemoryError() + return out + + +cpdef Dia empty_like(Dia other): + return empty(other.shape[0], other.shape[1], other.num_diag) + + +cpdef Dia zeros(base.idxint rows, base.idxint cols): + """ + Allocate the zero matrix with a given shape. There will not be any room in + the `data` and `col_index` buffers to add new elements. + """ + # We always allocate matrices with at least one element to ensure that we + # actually _are_ asking for memory (Python doesn't like allocating nothing) + cdef Dia out = empty(rows, cols, 0) + memset(out.data, 0, out.shape[1] * sizeof(double complex)) + out.offsets[0] = 0 + return out + + +cpdef Dia identity(base.idxint dimension, double complex scale=1): + """ + Return a square matrix of the specified dimension, with a constant along + the diagonal. By default this will be the identity matrix, but if `scale` + is passed, then the result will be `scale` times the identity. + """ + cdef Dia out = empty(dimension, dimension, 1) + for i in range(dimension): + out.data[i] = scale + out.offsets[0] = 0 + out.num_diag = 1 + return out + + +@cython.boundscheck(True) +cpdef Dia from_dense(Dense matrix): + cdef Dia out = empty(matrix.shape[0], matrix.shape[1], matrix.shape[0] + matrix.shape[1] - 1) + memset(out.data, 0, out._max_diag * out.shape[1] * sizeof(double complex)) + cdef size_t diag_, ptr_in, ptr_out=0, stride + cdef row, col + + out.num_diag = matrix.shape[0] + matrix.shape[1] - 1 + for i in range(matrix.shape[0] + matrix.shape[1] - 1): + out.offsets[i] = i -matrix.shape[0] + 1 + strideR = 1 if matrix.fortran else matrix.shape[1] + strideC = 1 if not matrix.fortran else matrix.shape[0] + + for row in range(matrix.shape[0]): + for col in range(matrix.shape[1]): + out.data[(col - row + matrix.shape[0] - 1) * out.shape[1] + col] = matrix.data[row * strideR + col * strideC] + + if settings.core["auto_tidyup"]: + tidyup_dia(out, settings.core["auto_tidyup_atol"], True) + + return out + + +cpdef Dia from_csr(CSR matrix): + out_diag = set() + for row in range(matrix.shape[0]): + for ptr in range(matrix.row_index[row], matrix.row_index[row+1]): + out_diag.add(matrix.col_index[ptr] - row) + data = np.zeros((len(out_diag), matrix.shape[1]), dtype=complex) + diags = np.sort(np.fromiter(out_diag, idxint_dtype, len(out_diag))) + for row in range(matrix.shape[0]): + for ptr in range(matrix.row_index[row], matrix.row_index[row+1]): + diag = matrix.col_index[ptr] - row + idx = np.searchsorted(diags, diag) + data[idx, matrix.col_index[ptr]] = matrix.data[ptr] + return Dia((data, diags), shape=matrix.shape, copy=False) + + +cpdef Dia clean_dia(Dia matrix, bint inplace=False): + """ + Sort and sum duplicates of offsets. + Set out of bound values to zeros. + """ + cdef Dia out = matrix if inplace else matrix.copy() + cdef base.idxint diag=0, new_diag=0, start, end, col + cdef double complex zONE=1. + cdef bint has_duplicate + cdef int length=out.shape[1], ONE=1 + + if out.num_diag == 0: + return out + + # We sort using insertion sort on the offsets, summing data of duplicated. + # This does not scale well with large number of diagonal + for new_diag in range(out.num_diag): + smallest_offsets = out.offsets[new_diag] + smallest_diag = new_diag + comp_diag = new_diag + 1 + has_duplicate = False + for comp_diag in range(new_diag + 1, out.num_diag): + if out.offsets[comp_diag] < smallest_offsets: + smallest_offsets = out.offsets[comp_diag] + smallest_diag = comp_diag + has_duplicate = False + elif out.offsets[comp_diag] == smallest_offsets: + blas.zaxpy( + &length, &zONE, + &out.data[comp_diag * out.shape[1]], &ONE, + &out.data[smallest_diag * out.shape[1]], &ONE + ) + out.offsets[comp_diag] = out.shape[1] + + if smallest_offsets == out.shape[1]: + new_diag -= 1 + break + if new_diag == smallest_diag: + continue + blas.zswap(&length, &out.data[new_diag * out.shape[1]], &ONE, &out.data[smallest_diag * out.shape[1]], &ONE) + out.offsets[smallest_diag] = out.offsets[new_diag] + out.offsets[new_diag] = smallest_offsets + + out.num_diag = new_diag + 1 + for diag in range(out.num_diag): + start = max(0, out.offsets[diag]) + end = min(out.shape[1], out.shape[0] + out.offsets[diag]) + for col in range(start): + out.data[diag * out.shape[1] + col] = 0. + for col in range(end, out.shape[1]): + out.data[diag * out.shape[1] + col] = 0. + + if out._scipy is not None: + out._scipy.data = out._scipy.data[:out.num_diag] + out._scipy.offsets = out._scipy.offsets[:out.num_diag] + return out + + +cdef inline base.idxint _diagonal_length( + base.idxint offset, base.idxint n_rows, base.idxint n_cols, +) nogil: + if offset > 0: + return n_rows if offset <= n_cols - n_rows else n_cols - offset + return n_cols if offset > n_cols - n_rows else n_rows + offset + + +cdef Dia diags_( + list diagonals, base.idxint[:] offsets, + base.idxint n_rows, base.idxint n_cols, +): + """ + Construct a Dia matrix from a list of diagonals and their offsets. The + offsets are assumed to be in sorted order. This is the C-only interface to + diag.diags, and inputs are not sanity checked (use the Python interface for + that). + + Parameters + ---------- + diagonals : list of indexable of double complex + The entries (including zeros) that should be placed on the diagonals in + the output matrix. Each entry must have enough entries in it to fill + the relevant diagonal (not checked). + + offsets : idxint[:] + The indices of the diagonals. These should be sorted and without + duplicates. `offsets[i]` is the location of the values `diagonals[i]`. + An offset of 0 is the main diagonal, positive values are above the main + diagonal and negative ones are below the main diagonal. + + n_rows, n_cols : idxint + The shape of the output. The result does not need to be square, but + the diagonals must be of the correct length to fit in. + """ + out = empty(n_rows, n_cols, len(offsets)) + out.num_diag = len(offsets) + for i in range(len(offsets)): + out.offsets[i] = offsets[i] + offset = max(offsets[i], 0) + for col in range(len(diagonals[i])): + out.data[i * out.shape[1] + col + offset] = diagonals[i][col] + return out + +@cython.wraparound(True) +def diags(diagonals, offsets=None, shape=None): + """ + Construct a Dia matrix from diagonals and their offsets. Using this + function in single-argument form produces a square matrix with the given + values on the main diagonal. + + With lists of diagonals and offsets, the matrix will be the smallest + possible square matrix if shape is not given, but in all cases the + diagonals must fit exactly with no extra or missing elements. Duplicated + diagonals will be summed together in the output. + + Parameters + ---------- + diagonals : sequence of array_like of complex or array_like of complex + The entries (including zeros) that should be placed on the diagonals in + the output matrix. Each entry must have enough entries in it to fill + the relevant diagonal and no more. + + offsets : sequence of integer or integer, optional + The indices of the diagonals. `offsets[i]` is the location of the + values `diagonals[i]`. An offset of 0 is the main diagonal, positive + values are above the main diagonal and negative ones are below the main + diagonal. + + shape : tuple, optional + The shape of the output as (``rows``, ``columns``). The result does + not need to be square, but the diagonals must be of the correct length + to fit in exactly. + """ + cdef base.idxint n_rows, n_cols, offset + try: + diagonals = list(diagonals) + if diagonals and np.isscalar(diagonals[0]): + # Catch the case where we're being called as (for example) + # diags([1, 2, 3], 0) + # with a single diagonal and offset. + diagonals = [diagonals] + except TypeError: + raise TypeError("diagonals must be a list of arrays of complex") from None + if offsets is None: + if len(diagonals) == 0: + offsets = [] + elif len(diagonals) == 1: + offsets = [0] + else: + raise TypeError("offsets must be supplied if passing more than one diagonal") + offsets = np.atleast_1d(offsets) + if offsets.ndim > 1: + raise ValueError("offsets must be a 1D array of integers") + if len(diagonals) != len(offsets): + raise ValueError("number of diagonals does not match number of offsets") + if len(diagonals) == 0: + if shape is None: + raise ValueError("cannot construct matrix with no diagonals without a shape") + else: + n_rows, n_cols = shape + return zeros(n_rows, n_cols) + order = np.argsort(offsets) + diagonals_ = [] + offsets_ = [] + prev, cur = None, None + for i in order: + cur = offsets[i] + if cur == prev: + diagonals_[-1] += np.asarray(diagonals[i], dtype=np.complex128) + else: + offsets_.append(cur) + diagonals_.append(np.asarray(diagonals[i], dtype=np.complex128)) + prev = cur + if shape is None: + n_rows = n_cols = abs(offsets_[0]) + len(diagonals_[0]) + else: + try: + n_rows, n_cols = shape + except (TypeError, ValueError): + raise TypeError("shape must be a 2-tuple of positive integers") + if n_rows < 0 or n_cols < 0: + raise ValueError("shape must be a 2-tuple of positive integers") + for i in range(len(diagonals_)): + offset = offsets_[i] + if len(diagonals_[i]) != _diagonal_length(offset, n_rows, n_cols): + raise ValueError("given diagonals do not have the correct lengths") + if n_rows == 0 and n_cols == 0: + raise ValueError("can't produce a 0x0 matrix") + return diags_( + diagonals_, np.array(offsets_, dtype=idxint_dtype), n_rows, n_cols, + ) diff --git a/qutip/core/data/dispatch.pyx b/qutip/core/data/dispatch.pyx index e8abebd7bc..181c7acc2b 100644 --- a/qutip/core/data/dispatch.pyx +++ b/qutip/core/data/dispatch.pyx @@ -1,4 +1,5 @@ #cython: language_level=3 +#cython: boundscheck=False import functools import inspect @@ -6,215 +7,15 @@ import itertools import warnings from .convert import to as _to +from .convert import EPSILON cimport cython from libc cimport math from libcpp cimport bool +from qutip.core.data.base cimport Data __all__ = ['Dispatcher'] -cdef class _bind: - """ - Cythonised implementation of inspect.Signature.bind, supporting faster - binding and handling of default arguments. On construction, the signature - of the base function is extracted, and all parameters are parsed into - positional or keyword slots, using positional wherever possible, and their - defaults are stored. - """ - # Instance of inpsect.Signature representing the function. - cdef object signature - cdef object inputs - # Mapping of (str: int), where str is the name of any argument which may be - # specified by a keyword, and int is its location in `self._pos` if - # available, or -1 if it is keyword-only. - cdef dict _locations - # Default values (or inspect.Parameter.empty) for every parameter which - # may be passed as a positional argument. - cdef list _pos - # For each dispatcher input which can be given as a positional argument, - # these two lists hold the index of the input in the given tuple of input - # names and the corresponding index into the positional arguments after - # binding is complete. {_pos_inputs_pos[n]: _pos_inputs_input[n]} is - # effectively the same mapping as _kw_inputs (but for positional - # arguments), but we use two lists rather than a dictionary for speed. - cdef list _pos_inputs_input, _pos_inputs_pos - # Default values (or inspect.Parameter.empty) for every parameter which - # _must_ be passed as a keyword argument. - cdef dict _kw - # Mapping of (name, index into the input tuple) for each input which must - # be specified as a keyword argument - cdef list _kw_inputs - # Names of the keyword arguments which have default values. - cdef set _default_kw_names - # Respectively, numbers of positional parameters, keyword-only parameters - # and dispatcher inputs. - cdef Py_ssize_t n_args, n_kwargs, n_inputs - # Respectively, numbers of inputs which are positional and keyword-only. - cdef Py_ssize_t _n_pos_inputs, _n_kw_inputs - # Respectively, numbers of positional arguments without a default, and - # number of keyword-only arguments which _have_ a default. - cdef Py_ssize_t _n_pos_no_default, _n_kw_default - - - def __init__(self, signature, tuple inputs): - for arg in inputs: - if arg not in signature.parameters: - raise AttributeError("No argument matches '{}'.".format(arg)) - self.signature = signature - self.inputs = inputs - self._locations = {} - self._pos = [] - self._pos_inputs_input = [] - self._pos_inputs_pos = [] - self._kw = {} - self._kw_inputs = [] - self._default_kw_names = set() - self._n_pos_no_default = 0 - # signature.parameters is ordered for all Python versions. - for i, (name, parameter) in enumerate(signature.parameters.items()): - kind = parameter.kind - if (kind == parameter.VAR_POSITIONAL - or kind == parameter.VAR_KEYWORD): - raise TypeError("Cannot dispatch with *args or **kwargs.") - if kind == parameter.KEYWORD_ONLY: - self._kw[name] = parameter.default - self._locations[name] = -1 - if parameter.default is not parameter.empty: - self._default_kw_names.add(name) - if name in inputs: - self._kw_inputs.append((name, inputs.index(name))) - else: - self._pos.append(parameter.default) - if kind != parameter.POSITIONAL_ONLY: - self._locations[name] = i - if parameter.default is parameter.empty: - self._n_pos_no_default += 1 - if name in inputs: - # Effectively a mapping. - self._pos_inputs_input.append(inputs.index(name)) - self._pos_inputs_pos.append(len(self._pos) - 1) - self.n_args = len(self._pos) - self.n_kwargs = len(self._kw) - self.n_inputs = len(inputs) - self._n_pos_inputs = len(self._pos_inputs_input) - self._n_kw_inputs = len(self._kw_inputs) - self._n_kw_default = len(self._default_kw_names) - - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef tuple bind(self, tuple args, dict kwargs): - """ - Cython reimplementation of inspect.Signature.bind for binding the - collected `*args` and `**kwargs` of a generic call to a specific - signature, checking that everything matches. - - The output is a parsed tuple (args, kwargs), where `args` is a list and - `kwargs` is a dict, however all arguments which _can_ be passed - positionally will be moved from `kwargs` into `args` for the output. - The resultant `args` and `kwargs` can be unpacked into the underlying - function call safely. Default values are not filled in by this method. - - This is necessary to allow a generic `Dispatcher` class to work with - all type signatures. The result of this function can be fed to - `_bind.convert_types` and `_bind.dispatch_types`. - """ - cdef Py_ssize_t location, got_pos=0, got_kw=0 - cdef Py_ssize_t n_passed_args = len(args) - if n_passed_args > self.n_args: - raise TypeError( - "Too many arguments: expected at most {} but received {}." - .format(n_passed_args, self.n_args) - ) - # _pos and _kw are filled with their defaults at initialisation, and - # _pos is already the correct length (no-default parameters have - # sentinel values). - cdef list out_pos = self._pos.copy() - cdef dict out_kw = self._kw.copy() - got_pos = self.n_args - self._n_pos_no_default - got_kw = self._n_kw_default - # Positional arguments unambiguously fill the first positional slots. - for location in range(n_passed_args): - out_pos[location] = args[location] - # How many arguments that we didn't have defaults for did we just get? - # Positionals with a default are always after those without one. - if n_passed_args > self._n_pos_no_default: - got_pos += self._n_pos_no_default - else: - got_pos += n_passed_args - # Everything else has been passed by keyword, but it may be allowed to - # be passed positionally, which is the case we prefer. dict.items() is - # (relatively) expensive, so we use a boolean test for the fast path. - cdef str kw - cdef object arg - if kwargs: - for kw, arg in kwargs.items(): - try: - location = self._locations[kw] - except KeyError: - raise TypeError("Unknown argument '{}'.".format(kw)) from None - # _locations[kw] = -1 if kw is keyword-only, otherwise the - # corresponding positional location. - if location >= 0: - if location < n_passed_args: - raise TypeError("Multiple values for '{}'".format(kw)) - out_pos[location] = arg - if location < self._n_pos_no_default: - got_pos += 1 - else: - out_kw[kw] = arg - if kw not in self._default_kw_names: - got_kw += 1 - if got_pos < self.n_args: - raise TypeError("Too few positional arguments passed.") - if got_kw < self.n_kwargs: - raise TypeError("Not all keyword arguments were filled.") - return out_pos, out_kw - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef list dispatch_types(self, list args, dict kwargs): - """ - Get a list of the types which the dispatch should operate over, given - the output of `_bind.bind`. The return value is a list of the _input_ - types to be dispatched on, in order that they were specified at - `Dispatcher` creation. The output type is not returned as part of this - list, because there is no way for `_bind` to know it. - """ - cdef list dispatch = [None] * self.n_inputs - cdef str kw - cdef Py_ssize_t location, i - for location in range(self._n_pos_inputs): - dispatch[self._pos_inputs_input[location]]\ - = type(args[self._pos_inputs_pos[location]]) - for i in range(self._n_kw_inputs): - kw, location = self._kw_inputs[i] - dispatch[location] = type(kwargs[kw]) - return dispatch - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef tuple convert_types(self, list args, dict kwargs, tuple converters): - """ - Apply the type converters `converters` to the parsed arguments `args` - and `kwargs`. - - If there are `n` inputs which are dispatched on, `converters` should be - a tuple whose first `n` elements are converters (such as those obtained - from `data.to[to_type, from_type]`) to the desired types. - - `args` and `kwargs` should be the output of `_bind.bind`; the function - will likely fail if called on unparsed arguments. - """ - cdef str kw - cdef Py_ssize_t location, i - for location in range(self._n_pos_inputs): - args[location] = converters[location](args[location]) - for i in range(self._n_kw_inputs): - kw, location = self._kw_inputs[i] - kwargs[kw] = converters[location](kwargs[kw]) - return args, kwargs - cdef double _conversion_weight(tuple froms, tuple tos, dict weight_map, bint out) except -1: """ @@ -222,6 +23,8 @@ cdef double _conversion_weight(tuple froms, tuple tos, dict weight_map, bint out element-wise to the types in `tos`. `weight_map` is a mapping of `(to_type, from_type): real`; it should almost certainly be `data.to.weight`. + + Specialisations that support any types input should use ``Data``. """ cdef double weight = 0.0 cdef Py_ssize_t i, n=len(froms) @@ -231,9 +34,9 @@ cdef double _conversion_weight(tuple froms, tuple tos, dict weight_map, bint out ) if out: n = n - 1 - weight += weight_map[froms[n], tos[n]] + weight = weight + weight_map[froms[n], tos[n]] for i in range(n): - weight += weight_map[tos[i], froms[i]] + weight = weight + weight_map[tos[i], froms[i]] return weight @@ -248,22 +51,19 @@ cdef class _constructed_specialisation: See `self.__signature__` or `self.__text_signature__` for the call signature of this object. """ - cdef _bind _parameters - cdef bint _output + cdef readonly bint _output cdef object _call - cdef Py_ssize_t _n_dispatch + cdef readonly Py_ssize_t _n_inputs, _n_dispatch cdef readonly tuple types - cdef tuple _converters - cdef str _short_name + cdef readonly tuple _converters + cdef readonly str _short_name cdef public str __doc__ cdef public str __name__ - cdef public str __module__ + # cdef public str __module__ cdef public object __signature__ cdef readonly str __text_signature__ - cdef readonly bint direct - def __init__(self, base, Dispatcher dispatcher, types, converters, out, - direct): + def __init__(self, base, Dispatcher dispatcher, types, converters, out): self.__doc__ = inspect.getdoc(dispatcher) self._short_name = dispatcher.__name__ self.__name__ = ( @@ -271,43 +71,34 @@ cdef class _constructed_specialisation: + "_" + "_".join([x.__name__ for x in types]) ) - self.__module__ = dispatcher.__module__ + # self.__module__ = dispatcher.__module__ self.__signature__ = dispatcher.__signature__ self.__text_signature__ = dispatcher.__text_signature__ - self._parameters = dispatcher._parameters self._output = out self._call = base - self._n_dispatch = len(types) self.types = types - self.direct = direct self._converters = converters + self._n_dispatch = len(converters) + self._n_inputs = len(converters) - out - cdef object prebound(self, list args, dict kwargs): - """ - Call this specialisation with pre-parsed arguments and keyword - arguments. `args` and `kwargs` must be the output of the relevant - `_bind.bind` method for this function. - """ - self._parameters.convert_types(args, kwargs, self._converters) - out = self._call(*args, **kwargs) + @cython.wraparound(False) + def __call__(self, *args, **kwargs): + cdef int i + cdef list _args = list(args) + for i in range(self._n_inputs): + _args[i] = self._converters[i](args[i]) + out = self._call(*_args, **kwargs) if self._output: out = self._converters[self._n_dispatch - 1](out) return out - def __call__(self, *args, **kwargs): - cdef list args_ - cdef dict kwargs_ - args_, kwargs_ = self._parameters.bind(args, kwargs) - return self.prebound(args_, kwargs_) - def __repr__(self): if len(self.types) == 1: spec = self.types[0].__name__ else: spec = "(" + ", ".join(x.__name__ for x in self.types) + ")" - direct = ("" if self.direct else "in") + "direct" return "".join([ - "<", direct, " specialisation ", spec, " of ", self._short_name, ">" + "" ]) @@ -328,17 +119,16 @@ cdef class Dispatcher: where `type1`, `type2`, etc are the dispatched arguments (with the output type on the end, if this is a dispatcher over the output type. """ - cdef _bind _parameters cdef readonly dict _specialisations - cdef Py_ssize_t _n_dispatch + cdef readonly Py_ssize_t _n_dispatch, _n_inputs cdef readonly dict _lookup - cdef set _dtypes - cdef bint _pass_on_dtype + cdef readonly set _dtypes + cdef readonly bint _pass_on_dtype cdef readonly tuple inputs cdef readonly bint output cdef public str __doc__ cdef public str __name__ - cdef public str __module__ + # cdef public str __module__ cdef public object __signature__ cdef readonly str __text_signature__ @@ -361,9 +151,8 @@ cdef class Dispatcher: an instance of `inspect.Signature`, which will be used instead. inputs : iterable of str - The parameters which should be dispatched over. These can be - positional or keyword arguments, but must feature in the signature - provided. + The parameters which should be dispatched over. These must be + positional arguments, and must feature in the signature provided. out : bool, optional (False) Whether to dispatch on the output of the function. Defaults to @@ -380,6 +169,10 @@ cdef class Dispatcher: this. If not given and `signature_source` is _not_ an instance of `inspect.Signature`, then we will attempt to read `__module__` from there instead. + + .. note:: + + Commented for now because of a bug in cython 3 (cython#5472) """ if isinstance(inputs, str): inputs = (inputs,) @@ -394,24 +187,32 @@ cdef class Dispatcher: self.__signature__ = signature_source else: self.__signature__ = inspect.signature(signature_source) - self._parameters = _bind(self.__signature__, inputs) + for input in self.inputs: + if ( + self.__signature__._parameters[input].kind + != inspect.Parameter.POSITIONAL_ONLY + ): + raise ValueError("inputs parameters must be positional only.") + if list(self.__signature__._parameters).index(input) >= len(inputs): + raise ValueError("inputs must be the first positional parameters.") if name is not None: self.__name__ = name elif not isinstance(signature_source, inspect.Signature): self.__name__ = signature_source.__name__ else: self.__name__ = 'dispatcher' - if module is not None: - self.__module__ = module - elif not isinstance(signature_source, inspect.Signature): - self.__module__ = signature_source.__module__ + # if module is not None: + # self.__module__ = module + # elif not isinstance(signature_source, inspect.Signature): + # self.__module__ = signature_source.__module__ self.__text_signature__ = self.__name__ + str(self.__signature__) if not isinstance(signature_source, inspect.Signature): self.__doc__ = inspect.getdoc(signature_source) self.output = out self._specialisations = {} self._lookup = {} - self._n_dispatch = self._parameters.n_inputs + self.output + self._n_inputs = len(self.inputs) + self._n_dispatch = len(self.inputs) + self.output self._pass_on_dtype = 'dtype' in self.__signature__.parameters # Add ourselves to the list of dispatchers to be updated. _to.dispatchers.append(self) @@ -471,12 +272,16 @@ cdef class Dispatcher: raise ValueError( "specialisation " + str(arg) + " has wrong number of parameters: needed types for " - + str(self._parameters.inputs) + + str(self.inputs) + (", an output type" if self.output else "") + " and a callable" ) for i in range(self._n_dispatch): - if (not _defer) and arg[i] not in _to.dtypes: + if ( + not _defer + and arg[i] not in _to.dtypes + and arg[i] is not Data + ): raise ValueError(str(arg[i]) + " is not a known data type") if not callable(arg[self._n_dispatch]): raise TypeError(str(arg[-1]) + " is not callable") @@ -484,6 +289,46 @@ cdef class Dispatcher: if not _defer: self.rebuild_lookup() + cdef object _find_specialization(self, tuple in_types, bint output): + # The complexity of building the table here is very poor, but it's a + # cost we pay very infrequently, and until it's proved to be a + # bottle-neck in real code, we stick with the simple algorithm. + cdef double weight, cur + cdef tuple types, out_types, displayed_type + cdef object function + cdef int n_dispatch + weight = math.INFINITY + types = None + function = None + n_dispatch = len(in_types) + for out_types, out_function in self._specialisations.items(): + cur = _conversion_weight( + in_types, out_types[:n_dispatch], _to.weight, out=output) + if cur < weight: + weight = cur + types = out_types + function = out_function + + if cur == math.INFINITY: + raise ValueError("No valid specialisations found") + + if weight in [EPSILON, 0.] and not (output and types[-1] is Data): + self._lookup[in_types] = function + else: + if output: + converters = tuple( + [_to[pair] for pair in zip(types[:-1], in_types[:-1])] + + [_to[in_types[-1], types[-1]]] + ) + else: + converters = tuple(_to[pair] for pair in zip(types, in_types)) + displayed_type = in_types + if len(in_types) < len(types): + displayed_type = displayed_type + (types[-1],) + self._lookup[in_types] =\ + _constructed_specialisation(function, self, displayed_type, + converters, output) + def rebuild_lookup(self): """ Manually trigger a rebuild of the lookup table for this dispatcher. @@ -493,58 +338,17 @@ cdef class Dispatcher: You most likely do not need to call this function yourself. """ - cdef double weight, cur - cdef tuple types, out_types - cdef object function if not self._specialisations: return self._dtypes = _to.dtypes.copy() - # The complexity of building the table here is very poor, but it's a - # cost we pay very infrequently, and until it's proved to be a - # bottle-neck in real code, we stick with the simple algorithm. for in_types in itertools.product(self._dtypes, repeat=self._n_dispatch): - weight = math.INFINITY - types = None - function = None - for out_types, out_function in self._specialisations.items(): - cur = _conversion_weight(in_types, out_types, _to.weight, - out=self.output) - if cur < weight: - weight = cur - types = out_types - function = out_function - if self.output: - converters = tuple( - [_to[pair] for pair in zip(types[:-1], in_types[:-1])] - + [_to[in_types[-1], types[-1]]] - ) - else: - converters = tuple(_to[pair] for pair in zip(types, in_types)) - self._lookup[in_types] =\ - _constructed_specialisation(function, self, in_types, - converters, self.output, - weight == 0) + self._find_specialization(in_types, self.output) # Now build the lookup table in the case that we dispatch on the output # type as well, but the user has called us without specifying it. # TODO: option to control default output type choice if unspecified? if self.output: for in_types in itertools.product(self._dtypes, repeat=self._n_dispatch-1): - weight = math.INFINITY - types = None - function = None - for out_types, out_function in self._specialisations.items(): - cur = _conversion_weight(in_types, out_types[:-1], - _to.weight, out=False) - if cur < weight: - weight = cur - types = out_types - function = out_function - converters = tuple(_to[pair] for pair in zip(types, in_types)) - self._lookup[in_types] =\ - _constructed_specialisation(function, self, - in_types + (types[-1],), - converters, False, - weight == 0) + self._find_specialization(in_types, False) def __getitem__(self, types): """ @@ -564,20 +368,25 @@ cdef class Dispatcher: return "" def __call__(self, *args, dtype=None, **kwargs): - cdef list args_, dispatch - cdef dict kwargs_ + cdef list dispatch = [] + cdef int i if self._pass_on_dtype: kwargs['dtype'] = dtype if not (self._pass_on_dtype or self.output) and dtype is not None: raise TypeError("unknown argument 'dtype'") - args_, kwargs_ = self._parameters.bind(args, kwargs) - dispatch = self._parameters.dispatch_types(args_, kwargs_) + if len(args) < self._n_inputs: + raise TypeError( + "All dispatched data input must be passed " + "as positional arguments." + ) + for i in range(self._n_inputs): + dispatch.append(type(args[i])) + if self.output and dtype is not None: dtype = _to.parse(dtype) dispatch.append(dtype) - cdef _constructed_specialisation function try: function = self._lookup[tuple(dispatch)] except KeyError: raise TypeError("unknown types to dispatch on: " + str(dispatch)) from None - return function.prebound(args_, kwargs_) + return function(*args, **kwargs) diff --git a/qutip/core/data/eigen.py b/qutip/core/data/eigen.py index c860fc2003..a77f092b99 100644 --- a/qutip/core/data/eigen.py +++ b/qutip/core/data/eigen.py @@ -207,7 +207,7 @@ def _eigs_fix_eigvals(data, eigvals, sort): return eigvals, num_large, num_small -def eigs_csr(data, isherm=None, vecs=True, sort='low', eigvals=0, +def eigs_csr(data, /, isherm=None, vecs=True, sort='low', eigvals=0, tol=0, maxiter=100000): """ Return eigenvalues and eigenvectors for a CSR matrix. This specialisation @@ -257,7 +257,7 @@ def eigs_csr(data, isherm=None, vecs=True, sort='low', eigvals=0, return (evals, Dense(evecs, copy=False)) if vecs else evals -def eigs_dense(data, isherm=None, vecs=True, sort='low', eigvals=0): +def eigs_dense(data, /, isherm=None, vecs=True, sort='low', eigvals=0): """ Return eigenvalues and eigenvectors for a Dense matrix. Takes no special keyword arguments; see the primary documentation in :func:`.eigs`. @@ -412,7 +412,7 @@ def svd_dense(data, vecs=True, **kw): svd = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('data', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('data', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('vecs', _inspect.Parameter.POSITIONAL_OR_KEYWORD), ]), name='svd', diff --git a/qutip/core/data/expect.pxd b/qutip/core/data/expect.pxd index 4e7d2af104..fd2c108fe2 100644 --- a/qutip/core/data/expect.pxd +++ b/qutip/core/data/expect.pxd @@ -1,16 +1,22 @@ #cython: language_level=3 #cython: boundscheck=False, wraparound=False, initializedcheck=False -from qutip.core.data cimport CSR, Dense, Data +from qutip.core.data cimport CSR, Dense, Data, Dia -cpdef double complex expect_csr(CSR op, CSR state) nogil except * -cpdef double complex expect_super_csr(CSR op, CSR state) nogil except * +cpdef double complex expect_csr(CSR op, CSR state) except * nogil +cpdef double complex expect_super_csr(CSR op, CSR state) except * nogil -cpdef double complex expect_csr_dense(CSR op, Dense state) nogil except * -cpdef double complex expect_super_csr_dense(CSR op, Dense state) nogil except * +cpdef double complex expect_csr_dense(CSR op, Dense state) except * nogil +cpdef double complex expect_super_csr_dense(CSR op, Dense state) except * nogil -cpdef double complex expect_dense(Dense op, Dense state) nogil except * -cpdef double complex expect_super_dense(Dense op, Dense state) nogil except * +cpdef double complex expect_dense(Dense op, Dense state) except * nogil +cpdef double complex expect_super_dense(Dense op, Dense state) except * nogil + +cpdef double complex expect_dia(Dia op, Dia state) except * +cpdef double complex expect_super_dia(Dia op, Dia state) except * + +cpdef double complex expect_dia_dense(Dia op, Dense state) except * +cpdef double complex expect_super_dia_dense(Dia op, Dense state) except * cdef double complex expect_data_dense(Data op, Dense state) except * cdef double complex expect_super_data_dense(Data op, Dense state) except * diff --git a/qutip/core/data/expect.pyx b/qutip/core/data/expect.pyx index 9b2089943e..a05969ddbb 100644 --- a/qutip/core/data/expect.pyx +++ b/qutip/core/data/expect.pyx @@ -12,15 +12,19 @@ cdef extern from "" namespace "std" nogil: double complex conj(double complex x) from qutip.core.data.base cimport idxint, Data -from qutip.core.data cimport csr, CSR, Dense +from qutip.core.data cimport csr, CSR, Dense, Dia +from .inner import inner +from .trace import trace, trace_oper_ket +from .matmul import matmul __all__ = [ - 'expect', 'expect_csr', 'expect_csr_dense', 'expect_dense', - 'expect_super', 'expect_super_csr', - 'expect_super_csr_dense', 'expect_super_dense', + 'expect', 'expect_csr', 'expect_dense', 'expect_dia', 'expect_data', + 'expect_csr_dense', 'expect_dia_dense', + 'expect_super', 'expect_super_csr', 'expect_super_dia', 'expect_super_dense', + 'expect_super_csr_dense', 'expect_super_dia_dense', 'expect_super_data', ] -cdef void _check_shape_ket(Data op, Data state) nogil except *: +cdef void _check_shape_ket(Data op, Data state) except * nogil: if ( op.shape[1] != state.shape[0] # Matrix multiplication or state.shape[1] != 1 # State is ket @@ -29,16 +33,16 @@ cdef void _check_shape_ket(Data op, Data state) nogil except *: raise ValueError("incorrect input shapes " + str(op.shape) + " and " + str(state.shape)) -cdef void _check_shape_dm(Data op, Data state) nogil except *: +cdef void _check_shape_dm(Data op, Data state) except * nogil: if ( op.shape[1] != state.shape[0] # Matrix multiplication - or state.shape[0] != state.shape[1] # State is square + or state.shape[0] != state.shape[1] # State is square or op.shape[0] != op.shape[1] # Op is square ): raise ValueError("incorrect input shapes " + str(op.shape) + " and " + str(state.shape)) -cdef void _check_shape_super(Data op, Data state) nogil except *: +cdef void _check_shape_super(Data op, Data state) except * nogil: if state.shape[1] != 1: raise ValueError("expected a column-stacked matrix") if ( @@ -48,7 +52,7 @@ cdef void _check_shape_super(Data op, Data state) nogil except *: raise ValueError("incompatible shapes " + str(op.shape) + ", " + str(state.shape)) -cdef double complex _expect_csr_ket(CSR op, CSR state) nogil except *: +cdef double complex _expect_csr_ket(CSR op, CSR state) except * nogil: """ Perform the operation state.adjoint() @ op @ state @@ -71,7 +75,7 @@ cdef double complex _expect_csr_ket(CSR op, CSR state) nogil except *: out += mul * sum return out -cdef double complex _expect_csr_dm(CSR op, CSR state) nogil except *: +cdef double complex _expect_csr_dm(CSR op, CSR state) except * nogil: """ Perform the operation tr(op @ state) @@ -90,7 +94,7 @@ cdef double complex _expect_csr_dm(CSR op, CSR state) nogil except *: return out -cpdef double complex expect_super_csr(CSR op, CSR state) nogil except *: +cpdef double complex expect_super_csr(CSR op, CSR state) except * nogil: """ Perform the operation `tr(op @ state)` where `op` is supplied as a superoperator, and `state` is a column-stacked operator. @@ -108,7 +112,7 @@ cpdef double complex expect_super_csr(CSR op, CSR state) nogil except *: return out -cpdef double complex expect_csr(CSR op, CSR state) nogil except *: +cpdef double complex expect_csr(CSR op, CSR state) except * nogil: """ Get the expectation value of the operator `op` over the state `state`. The state can be either a ket or a density matrix. @@ -122,7 +126,7 @@ cpdef double complex expect_csr(CSR op, CSR state) nogil except *: return _expect_csr_ket(op, state) return _expect_csr_dm(op, state) -cdef double complex _expect_csr_dense_ket(CSR op, Dense state) nogil except *: +cdef double complex _expect_csr_dense_ket(CSR op, Dense state) except * nogil: _check_shape_ket(op, state) cdef double complex out=0, sum cdef size_t row, ptr @@ -135,7 +139,7 @@ cdef double complex _expect_csr_dense_ket(CSR op, Dense state) nogil except *: out += sum * conj(state.data[row]) return out -cdef double complex _expect_csr_dense_dm(CSR op, Dense state) nogil except *: +cdef double complex _expect_csr_dense_dm(CSR op, Dense state) except * nogil: _check_shape_dm(op, state) cdef double complex out=0 cdef size_t row, ptr_op, ptr_state=0, row_stride, col_stride @@ -150,7 +154,7 @@ cdef double complex _expect_csr_dense_dm(CSR op, Dense state) nogil except *: return out -cdef double complex _expect_dense_ket(Dense op, Dense state) nogil except *: +cdef double complex _expect_dense_ket(Dense op, Dense state) except * nogil: _check_shape_ket(op, state) cdef double complex out=0, sum cdef size_t row, col, op_row_stride, op_col_stride @@ -165,7 +169,7 @@ cdef double complex _expect_dense_ket(Dense op, Dense state) nogil except *: out += sum * conj(state.data[row]) return out -cdef double complex _expect_dense_dense_dm(Dense op, Dense state) nogil except *: +cdef double complex _expect_dense_dense_dm(Dense op, Dense state) except * nogil: _check_shape_dm(op, state) cdef double complex out=0 cdef size_t row, col, op_row_stride, op_col_stride @@ -182,7 +186,7 @@ cdef double complex _expect_dense_dense_dm(Dense op, Dense state) nogil except * return out -cpdef double complex expect_csr_dense(CSR op, Dense state) nogil except *: +cpdef double complex expect_csr_dense(CSR op, Dense state) except * nogil: """ Get the expectation value of the operator `op` over the state `state`. The state can be either a ket or a density matrix. @@ -197,7 +201,7 @@ cpdef double complex expect_csr_dense(CSR op, Dense state) nogil except *: return _expect_csr_dense_dm(op, state) -cpdef double complex expect_dense(Dense op, Dense state) nogil except *: +cpdef double complex expect_dense(Dense op, Dense state) except * nogil: """ Get the expectation value of the operator `op` over the state `state`. The state can be either a ket or a density matrix. @@ -212,7 +216,7 @@ cpdef double complex expect_dense(Dense op, Dense state) nogil except *: return _expect_dense_dense_dm(op, state) -cpdef double complex expect_super_csr_dense(CSR op, Dense state) nogil except *: +cpdef double complex expect_super_csr_dense(CSR op, Dense state) except * nogil: """ Perform the operation `tr(op @ state)` where `op` is supplied as a superoperator, and `state` is a column-stacked operator. @@ -228,7 +232,7 @@ cpdef double complex expect_super_csr_dense(CSR op, Dense state) nogil except *: return out -cpdef double complex expect_super_dense(Dense op, Dense state) nogil except *: +cpdef double complex expect_super_dense(Dense op, Dense state) except * nogil: """ Perform the operation `tr(op @ state)` where `op` is supplied as a superoperator, and `state` is a column-stacked operator. @@ -249,13 +253,141 @@ cpdef double complex expect_super_dense(Dense op, Dense state) nogil except *: return out +cpdef double complex expect_dia(Dia op, Dia state) except *: + cdef double complex expect = 0. + cdef idxint diag_bra, diag_op, diag_ket, i, length + cdef idxint start_op, start_state, end_op, end_state + if state.shape[1] == 1: + _check_shape_ket(op, state) + # Since the ket is sparse and possibly unsorted. Taking the n'th + # element of the state require a loop on the diags. Thus 3 loops are + # needed. + for diag_ket in range(state.num_diag): + #if -state.offsets[diag_ket] >= op.shape[1]: + # continue + for diag_bra in range(state.num_diag): + for diag_op in range(op.num_diag): + if state.offsets[diag_ket] - state.offsets[diag_bra] + op.offsets[diag_op] == 0: + expect += ( + conj(state.data[diag_bra * state.shape[1]]) + * state.data[diag_ket * state.shape[1]] + * op.data[diag_op * op.shape[1] - state.offsets[diag_ket]] + ) + else: + _check_shape_dm(op, state) + for diag_op in range(op.num_diag): + for diag_state in range(state.num_diag): + if op.offsets[diag_op] == -state.offsets[diag_state]: + + start_op = max(0, op.offsets[diag_op]) + start_state = max(0, state.offsets[diag_state]) + end_op = min(op.shape[1], op.shape[0] + op.offsets[diag_op]) + end_state = min(state.shape[1], state.shape[0] + state.offsets[diag_state]) + length = min(end_op - start_op, end_state - start_state) + + for i in range(length): + expect += ( + op.data[diag_op * op.shape[1] + i + start_op] + * state.data[diag_state * state.shape[1] + i + start_state] + ) + return expect + + +cpdef double complex expect_dia_dense(Dia op, Dense state) except *: + cdef double complex expect = 0. + cdef idxint i, diag_op, start_op, end_op, strideR, stride, start_state + if state.shape[1] == 1: + _check_shape_ket(op, state) + for diag_op in range(op.num_diag): + start_op = max(0, op.offsets[diag_op]) + end_op = min(op.shape[1], op.shape[0] + op.offsets[diag_op]) + for i in range(start_op, end_op): + expect += ( + op.data[diag_op * op.shape[1] + i] + * state.data[i] + * conj(state.data[i - op.offsets[diag_op]]) + ) + else: + _check_shape_dm(op, state) + stride = state.shape[0] + 1 + strideR = state.shape[0] if state.fortran else 1 + for diag_op in range(op.num_diag): + start_op = max(0, op.offsets[diag_op]) + end_op = min(op.shape[1], op.shape[0] + op.offsets[diag_op]) + start_state = -op.offsets[diag_op] * strideR + for i in range(start_op, end_op): + expect += ( + op.data[diag_op * op.shape[1] + i] + * state.data[start_state + i * stride] + ) + return expect + + +cpdef double complex expect_super_dia(Dia op, Dia state) except *: + cdef double complex expect = 0. + _check_shape_super(op, state) + cdef idxint diag_op, diag_state + cdef idxint stride = sqrt(state.shape[0]) + 1 + for diag_op in range(op.num_diag): + for diag_state in range(state.num_diag): + if ( + -state.offsets[diag_state] < op.shape[1] + and -op.offsets[diag_op] - state.offsets[diag_state] >= 0 + and (-op.offsets[diag_op] - state.offsets[diag_state]) % stride == 0 + ): + expect += state.data[diag_state * state.shape[1]] * op.data[diag_op * op.shape[1] - state.offsets[diag_state]] + + return expect + + +cpdef double complex expect_super_dia_dense(Dia op, Dense state) except *: + cdef double complex expect = 0. + _check_shape_super(op, state) + cdef idxint col, diag_op, start, end + cdef idxint stride = sqrt(state.shape[0]) + 1 + for diag_op in range(op.num_diag): + start = max(0, op.offsets[diag_op]) + end = min(op.shape[1], op.shape[0] + op.offsets[diag_op]) + col = (((start - op.offsets[diag_op] - 1) // stride) + 1) * stride + op.offsets[diag_op] + while col < end: + expect += op.data[diag_op * op.shape[1] + col] * state.data[col] + col += stride + return expect + + +def expect_data(Data op, Data state): + """ + Get the expectation value of the operator `op` over the state `state`. The + state can be either a ket or a density matrix. + + The expectation of a state is defined as the operation: + state.adjoint() @ op @ state + and of a density matrix: + tr(op @ state) + """ + if state.shape[1] == 1: + _check_shape_ket(op, state) + return inner(state, matmul(op, state)) + _check_shape_dm(op, state) + return trace(matmul(op, state)) + + +def expect_super_data(Data op, Data state): + """ + Perform the operation `tr(op @ state)` where `op` is supplied as a + superoperator, and `state` is a column-stacked operator. + """ + _check_shape_super(op, state) + return trace_oper_ket(matmul(op, state)) + + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect expect = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('op', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('state', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('op', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('state', _inspect.Parameter.POSITIONAL_ONLY), ]), name='expect', module=__name__, @@ -276,12 +408,15 @@ expect.add_specialisations([ (CSR, CSR, expect_csr), (CSR, Dense, expect_csr_dense), (Dense, Dense, expect_dense), + (Dia, Dense, expect_dia_dense), + (Dia, Dia, expect_dia), + (Data, Data, expect_data), ], _defer=True) expect_super = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('op', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('state', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('op', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('state', _inspect.Parameter.POSITIONAL_ONLY), ]), name='expect_super', module=__name__, @@ -298,6 +433,9 @@ expect_super.add_specialisations([ (CSR, CSR, expect_super_csr), (CSR, Dense, expect_super_csr_dense), (Dense, Dense, expect_super_dense), + (Dia, Dense, expect_super_dia_dense), + (Dia, Dia, expect_super_dia), + (Data, Data, expect_super_data), ], _defer=True) del _inspect, _Dispatcher diff --git a/qutip/core/data/expm.py b/qutip/core/data/expm.py index 27586d1678..22e69ba5f2 100644 --- a/qutip/core/data/expm.py +++ b/qutip/core/data/expm.py @@ -4,12 +4,14 @@ from .dense import Dense from .csr import CSR -from .properties import isdiag_csr +from .dia import Dia +from . import dia +from .properties import isdiag_csr, isdiag_dia from qutip.settings import settings from .base import idxint_dtype __all__ = [ - 'expm', 'expm_csr', 'expm_csr_dense', 'expm_dense', + 'expm', 'expm_csr', 'expm_csr_dense', 'expm_dense', 'expm_dia', 'logm', 'logm_dense', ] @@ -37,6 +39,42 @@ def expm_csr(matrix: CSR) -> CSR: tidyup=settings.core['auto_tidyup']) +def expm_dia(matrix: Dia) -> Dia: + if matrix.shape[0] != matrix.shape[1]: + raise ValueError("can only exponentiate square matrix") + if matrix.num_diag == 0: + out = dia.identity(matrix.shape[0]) + elif matrix.num_diag > 1: + csc = matrix.as_scipy().tocsc() + out = Dia( + scipy.sparse.linalg.expm(csc).todia(), + tidyup=settings.core['auto_tidyup'], copy=False + ) + elif isdiag_dia(matrix): + matrix_sci = matrix.as_scipy() + data = np.exp(matrix_sci.data[0, :]) + out = dia.diags(data, shape=matrix.shape) + else: + mat = matrix.as_scipy() + size = matrix.shape[0] + offset = mat.offsets[0] + n_offset = offset + a_offset = abs(offset) + data = mat.data[0, max(0, offset): min(size, size + offset)] + data_0 = data + out_oufsets = np.arange(0, size, a_offset, dtype=idxint_dtype) + out_oufsets *= np.sign(offset) + out_data = np.zeros((len(out_oufsets), size), dtype=complex) + out_data[0, :] += 1. + for i in range(1, len(out_oufsets)): + out_data[i, max(0, n_offset): min(size, size + n_offset)] = data + data = data_0[:-abs(n_offset)] * data[a_offset:] / (i+1) + n_offset += offset + out = Dia((out_data, out_oufsets), shape=matrix.shape, copy=False) + + return out + + def expm_csr_dense(matrix: CSR) -> Dense: if matrix.shape[0] != matrix.shape[1]: raise ValueError("can only exponentiate square matrix") @@ -55,7 +93,7 @@ def expm_dense(matrix: Dense) -> Dense: expm = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='expm', module=__name__, @@ -67,6 +105,7 @@ def expm_dense(matrix: Dense) -> Dense: (CSR, CSR, expm_csr), (CSR, Dense, expm_csr_dense), (Dense, Dense, expm_dense), + (Dia, Dia, expm_dia), ], _defer=True) @@ -78,7 +117,7 @@ def logm_dense(matrix: Dense) -> Dense: logm = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='logm', module=__name__, diff --git a/qutip/core/data/extract.py b/qutip/core/data/extract.py new file mode 100644 index 0000000000..a5bbfeecb1 --- /dev/null +++ b/qutip/core/data/extract.py @@ -0,0 +1,130 @@ +from . import Dense, CSR, Dia +from .dispatch import Dispatcher as _Dispatcher +import inspect as _inspect +try: + from scipy.sparse import csr_array +except ImportError: + csr_array = None +from scipy.sparse import csr_matrix + + +__all__ = ["extract"] + + +def extract_dense(matrix, format=None, copy=True): + """ + Return an array representation of the Dense data object. + + Parameters + ---------- + matrix : Data + The matrix to convert to the given format. + + format : str {"ndarray"}, default="ndarray" + Type of the output. + + copy : bool, default: True + Whether to return a copy of the data. If False, + a view of the data is returned when possible. + """ + if format not in [None, "ndarray"]: + raise ValueError( + "Dense can only be extracted to 'ndarray'" + ) + if copy: + return matrix.to_array() + else: + return matrix.as_ndarray() + + +def extract_csr(matrix, format=None, copy=True): + """ + Return the scipy's object ``csr_matrix``. + + Parameters + ---------- + matrix : Data + The matrix to convert to common type. + + format : str, {"csr_matrix"} + Type of the output. + + copy : bool, default: True + Whether to pass a copy of the object or not. + """ + if format not in [None, "scipy_csr", "csr_matrix"]: + raise ValueError( + "CSR can only be extracted to 'csr_matrix'" + ) + csr_mat = matrix.as_scipy() + if copy: + csr_mat = csr_mat.copy() + return csr_mat + + +def extract_dia(matrix, format=None, copy=True): + """ + Return the scipy's object ``dia_matrix``. + + Parameters + ---------- + matrix : Data + The matrix to convert to common type. + + format : str, {"dia_matrix"} + Type of the output. + + copy : bool, default: True + Whether to pass a copy of the object or not. + """ + if format not in [None, "scipy_dia", "dia_matrix"]: + raise ValueError( + "Dia can only be extracted to 'dia_matrix'" + ) + dia_mat = matrix.as_scipy() + if copy: + dia_mat = dia_mat.copy() + return dia_mat + + +extract = _Dispatcher( + _inspect.Signature([ + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter( + 'format', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=None + ), + _inspect.Parameter( + 'copy', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=True + ) + ]), + name='extract', + module=__name__, + inputs=('matrix',), + out=False, +) +extract.__doc__ =\ + """ + Return the common representation of the data layer object: scipy's + ``csr_matrix`` for ``CSR``, numpy array for ``Dense``, Jax's ``Array`` for + ``JaxArray``, etc. + + Parameters + ---------- + matrix : Data + The matrix to convert to common type. + + format : str, default: None + Type of the output, "ndarray" for ``Dense``, "csr_array" for ``CSR``. + A ValueError will be raised if the format is not supported. + + copy : bool, default: True + Whether to pass a copy of the object. + """ +extract.add_specialisations([ + (CSR, extract_csr), + (Dia, extract_dia), + (Dense, extract_dense), +], _defer=True) + + +del _Dispatcher, _inspect diff --git a/qutip/core/data/inner.pxd b/qutip/core/data/inner.pxd index 1d5000dd7a..5c202060a6 100644 --- a/qutip/core/data/inner.pxd +++ b/qutip/core/data/inner.pxd @@ -2,5 +2,5 @@ from qutip.core.data.csr cimport CSR -cpdef double complex inner_csr(CSR left, CSR right, bint scalar_is_ket=*) nogil except * -cpdef double complex inner_op_csr(CSR left, CSR op, CSR right, bint scalar_is_ket=*) nogil except * +cpdef double complex inner_csr(CSR left, CSR right, bint scalar_is_ket=*) except * nogil +cpdef double complex inner_op_csr(CSR left, CSR op, CSR right, bint scalar_is_ket=*) except * nogil diff --git a/qutip/core/data/inner.pyx b/qutip/core/data/inner.pyx index 9491970044..d488ae5210 100644 --- a/qutip/core/data/inner.pyx +++ b/qutip/core/data/inner.pyx @@ -8,15 +8,20 @@ from qutip.core.data.base cimport idxint, Data from qutip.core.data cimport csr, dense from qutip.core.data.csr cimport CSR from qutip.core.data.dense cimport Dense +from qutip.core.data.dia cimport Dia from qutip.core.data.matmul cimport matmul_dense +from .matmul import matmul +from .trace import trace +from .adjoint import adjoint __all__ = [ - 'inner', 'inner_csr', 'inner_dense', - 'inner_op', 'inner_op_csr', 'inner_op_dense', + 'inner', 'inner_csr', 'inner_dense', 'inner_dia', 'inner_data', + 'inner_op', 'inner_op_csr', 'inner_op_dense', 'inner_op_dia', + 'inner_op_data', ] -cdef void _check_shape_inner(Data left, Data right) nogil except *: +cdef void _check_shape_inner(Data left, Data right) except * nogil: if ( (left.shape[0] != 1 and left.shape[1] != 1) or right.shape[1] != 1 @@ -28,7 +33,7 @@ cdef void _check_shape_inner(Data left, Data right) nogil except *: + str(right.shape) ) -cdef void _check_shape_inner_op(Data left, Data op, Data right) nogil except *: +cdef void _check_shape_inner_op(Data left, Data op, Data right) except * nogil: cdef bint left_shape = left.shape[0] == 1 or left.shape[1] == 1 cdef bint left_op = ( (left.shape[0] == 1 and left.shape[1] == op.shape[0]) @@ -67,7 +72,7 @@ cdef double complex _inner_csr_ket_ket(CSR left, CSR right) nogil: out += conj(left.data[ptr_l]) * right.data[ptr_r] return out -cpdef double complex inner_csr(CSR left, CSR right, bint scalar_is_ket=False) nogil except *: +cpdef double complex inner_csr(CSR left, CSR right, bint scalar_is_ket=False) except * nogil: """ Compute the complex inner product . The shape of `left` is used to determine if it has been supplied as a ket or a bra. The result of @@ -90,7 +95,46 @@ cpdef double complex inner_csr(CSR left, CSR right, bint scalar_is_ket=False) no return _inner_csr_bra_ket(left, right) return _inner_csr_ket_ket(left, right) -cpdef double complex inner_dense(Dense left, Dense right, bint scalar_is_ket=False) nogil except *: +cpdef double complex inner_dia(Dia left, Dia right, bint scalar_is_ket=False) except * nogil: + """ + Compute the complex inner product . The shape of `left` is + used to determine if it has been supplied as a ket or a bra. The result of + this function will be identical if passed `left` or `adjoint(left)`. + + The parameter `scalar_is_ket` is only intended for the case where `left` + and `right` are both of shape (1, 1). In this case, `left` will be assumed + to be a ket unless `scalar_is_ket` is False. This parameter is ignored at + all other times. + """ + _check_shape_inner(left, right) + cdef double complex inner = 0. + cdef idxint diag_left, diag_right + cdef bint is_ket + if right.shape[0] == 1: + is_ket = scalar_is_ket + else: + is_ket = left.shape[0] == right.shape[0] + + if is_ket: + for diag_right in range(right.num_diag): + for diag_left in range(left.num_diag): + if left.offsets[diag_left] - right.offsets[diag_right] == 0: + inner += ( + conj(left.data[diag_left * left.shape[1]]) + * right.data[diag_right * right.shape[1]] + ) + else: + for diag_right in range(right.num_diag): + for diag_left in range(left.num_diag): + if left.offsets[diag_left] + right.offsets[diag_right] == 0: + inner += ( + left.data[diag_left * left.shape[1] + left.offsets[diag_left]] + * right.data[diag_right * right.shape[1]] + ) + + return inner + +cpdef double complex inner_dense(Dense left, Dense right, bint scalar_is_ket=False) except * nogil: """ Compute the complex inner product . The shape of `left` is used to determine if it has been supplied as a ket or a bra. The result of @@ -149,8 +193,52 @@ cdef double complex _inner_op_csr_ket_ket(CSR left, CSR op, CSR right) nogil: out += conj(left.data[ptr_l]) * sum return out +cpdef double complex inner_op_dia(Dia left, Dia op, Dia right, + bint scalar_is_ket=False) except * nogil: + """ + Compute the complex inner product . The shape of `left` is + used to determine if it has been supplied as a ket or a bra. The result of + this function will be identical if passed `left` or `adjoint(left)`. + + The parameter `scalar_is_ket` is only intended for the case where `left` + and `right` are both of shape (1, 1). In this case, `left` will be assumed + to be a ket unless `scalar_is_ket` is False. This parameter is ignored at + all other times. + """ + _check_shape_inner_op(left, op, right) + cdef double complex inner = 0., val + cdef idxint diag_left, diag_op, diag_right + cdef int is_ket + if op.shape[0] == 1: + is_ket = scalar_is_ket + else: + is_ket = left.shape[0] == op.shape[0] + + if is_ket: + for diag_right in range(right.num_diag): + for diag_left in range(left.num_diag): + for diag_op in range(op.num_diag): + if -left.offsets[diag_left] + right.offsets[diag_right] + op.offsets[diag_op] == 0: + inner += ( + conj(left.data[diag_left]) + * right.data[diag_right] + * op.data[diag_op * op.shape[1] - right.offsets[diag_right]] + ) + else: + for diag_right in range(right.num_diag): + for diag_left in range(left.num_diag): + for diag_op in range(op.num_diag): + if left.offsets[diag_left] + right.offsets[diag_right] + op.offsets[diag_op] == 0: + inner += ( + left.data[diag_left * left.shape[1] + left.offsets[diag_left]] + * right.data[diag_right] + * op.data[diag_op * op.shape[1] - right.offsets[diag_right]] + ) + + return inner + cpdef double complex inner_op_csr(CSR left, CSR op, CSR right, - bint scalar_is_ket=False) nogil except *: + bint scalar_is_ket=False) except * nogil: """ Compute the complex inner product . The shape of `left` is used to determine if it has been supplied as a ket or a bra. The result of @@ -188,13 +276,52 @@ cpdef double complex inner_op_dense(Dense left, Dense op, Dense right, return inner_dense(left, matmul_dense(op, right), scalar_is_ket) +cpdef inner_data(Data left, Data right, bint scalar_is_ket=False): + """ + Compute the complex inner product . The shape of `left` is + used to determine if it has been supplied as a ket or a bra. The result of + this function will be identical if passed `left` or `adjoint(left)`. + + The parameter `scalar_is_ket` is only intended for the case where `left` + and `right` are both of shape (1, 1). In this case, `left` will be assumed + to be a ket unless `scalar_is_ket` is False. This parameter is ignored at + all other times. + """ + _check_shape_inner(left, right) + if left.shape[0] == left.shape[1] == right.shape[1] == 1: + return ( + trace(left).conjugate() * trace(right) if scalar_is_ket + else trace(left) * trace(right) + ) + + if left.shape[0] != 1: + left = adjoint(left) + # We use trace so we don't force convertion to complex. + return trace(matmul(left, right)) + + +cpdef inner_op_data(Data left, Data op, Data right, bint scalar_is_ket=False): + """ + Compute the complex inner product . The shape of `left` is + used to determine if it has been supplied as a ket or a bra. The result of + this function will be identical if passed `left` or `adjoint(left)`. + + The parameter `scalar_is_ket` is only intended for the case where `left` + and `right` are both of shape (1, 1). In this case, `left` will be assumed + to be a ket unless `scalar_is_ket` is False. This parameter is ignored at + all other times. + """ + _check_shape_inner_op(left, op, right) + return inner_data(left, matmul(op, right), scalar_is_ket) + + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect inner = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('scalar_is_ket', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False), ]), @@ -231,14 +358,16 @@ inner.__doc__ =\ """ inner.add_specialisations([ (CSR, CSR, inner_csr), + (Dia, Dia, inner_dia), (Dense, Dense, inner_dense), + (Data, Data, inner_data), ], _defer=True) inner_op = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('op', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('op', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('scalar_is_ket', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False), ]), @@ -280,7 +409,9 @@ inner_op.__doc__ =\ """ inner_op.add_specialisations([ (CSR, CSR, CSR, inner_op_csr), + (Dia, Dia, Dia, inner_op_dia), (Dense, Dense, Dense, inner_op_dense), + (Data, Data, Data, inner_op_data), ], _defer=True) del _inspect, _Dispatcher diff --git a/qutip/core/data/kron.pyx b/qutip/core/data/kron.pyx index fd94adc369..fb3d3d69c6 100644 --- a/qutip/core/data/kron.pyx +++ b/qutip/core/data/kron.pyx @@ -2,15 +2,20 @@ #cython: boundscheck=False, wraparound=False, initializedcheck=False cimport cython +from libc.string cimport memset -from qutip.core.data.base cimport idxint +from qutip.core.data.base cimport idxint, Data from qutip.core.data.csr cimport CSR from qutip.core.data.dense cimport Dense -from qutip.core.data cimport csr +from .adjoint import transpose +from qutip.core.data.dia cimport Dia +from qutip.core.data cimport csr, dia +from qutip.core.data.convert import to as _to import numpy __all__ = [ - 'kron', 'kron_csr', 'kron_dense' + 'kron', 'kron_csr', 'kron_dense', 'kron_dia', + 'kron_transpose', 'kron_transpose_dense', 'kron_transpose_data', ] @@ -65,13 +70,102 @@ cpdef CSR kron_csr(CSR left, CSR right): return out +cdef inline void _vec_kron( + double complex * ptr_l, double complex * ptr_r, double complex * ptr_out, + idxint size_l, idxint size_r, idxint step +): + cdef idxint i, j + for i in range(size_l): + for j in range(size_r): + ptr_out[i*step+j] = ptr_l[i] * ptr_r[j] + + +cpdef Dia kron_dia(Dia left, Dia right): + cdef idxint nrows_l=left.shape[0], nrows_r=right.shape[0] + cdef idxint ncols_l=left.shape[1], ncols_r=right.shape[1] + cdef idxint nrows=_mul_checked(nrows_l, nrows_r) + cdef idxint ncols=_mul_checked(ncols_l, ncols_r) + cdef idxint max_diag=_mul_checked(right.num_diag, left.num_diag) + cdef idxint num_diag=0, diag_left, diag_right, delta, col_left, col_right + cdef idxint start_left, end_left, start_right, end_right + cdef Dia out + + if right.shape[0] == right.shape[1]: + out = dia.empty(nrows, ncols, max_diag) + memset( + out.data, 0, + max_diag * out.shape[1] * sizeof(double complex) + ) + for diag_left in range(left.num_diag): + for diag_right in range(right.num_diag): + out.offsets[num_diag] = ( + left.offsets[diag_left] * right.shape[0] + + right.offsets[diag_right] + ) + start_left = max(0, left.offsets[diag_left]) + end_left = min(left.shape[1], left.offsets[diag_left] + left.shape[0]) + _vec_kron( + left.data + (diag_left * ncols_l) + max(0, left.offsets[diag_left]), + right.data + (diag_right * ncols_r) + max(0, right.offsets[diag_right]), + out.data + (num_diag * ncols) + max(0, left.offsets[diag_left]) * right.shape[0] + max(0, right.offsets[diag_right]), + end_left - start_left, + right.shape[1] - abs(right.offsets[diag_right]), + right.shape[1] + ) + num_diag += 1 + out.num_diag = num_diag + + else: + max_diag = _mul_checked(max_diag, ncols_l) + if max_diag < nrows: + out = dia.empty(nrows, ncols, max_diag) + delta = right.shape[0] - right.shape[1] + for diag_left in range(left.num_diag): + for diag_right in range(right.num_diag): + start_left = max(0, left.offsets[diag_left]) + end_left = min(left.shape[1], left.shape[0] + left.offsets[diag_left]) + for col_left in range(start_left, end_left): + memset( + out.data + (num_diag * out.shape[1]), 0, + out.shape[1] * sizeof(double complex) + ) + + out.offsets[num_diag] = ( + left.offsets[diag_left] * right.shape[0] + + right.offsets[diag_right] + - col_left * delta + ) + + start_right = max(0, right.offsets[diag_right]) + end_right = min(right.shape[1], right.shape[0] + right.offsets[diag_right]) + for col_right in range(start_right, end_right): + out.data[num_diag * out.shape[1] + col_left * right.shape[1] + col_right] = ( + right.data[diag_right * right.shape[1] + col_right] + * left.data[diag_left * left.shape[1] + col_left] + ) + num_diag += 1 + out.num_diag = num_diag + + else: + # The output is not sparse enough ant the empty data array would be + # larger than the dense array. + # Fallback on dense operation + left_dense = _to(Dense, left) + right_dense = _to(Dense, right) + out_dense = kron_dense(left_dense, right_dense) + out = _to(Dia, out_dense) + + out = dia.clean_dia(out, True) + return out + + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect kron = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_ONLY), ]), name='kron', module=__name__, @@ -86,6 +180,37 @@ kron.__doc__ =\ kron.add_specialisations([ (CSR, CSR, CSR, kron_csr), (Dense, Dense, Dense, kron_dense), + (Dia, Dia, Dia, kron_dia), ], _defer=True) + +cpdef Data kron_transpose_data(Data left, Data right): + return kron(transpose(left), right) + + +cpdef Dense kron_transpose_dense(Dense left, Dense right): + return Dense(numpy.kron(left.as_ndarray().T, right.as_ndarray()), copy=False) + + +kron_transpose = _Dispatcher( + _inspect.Signature([ + _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_ONLY), + ]), + name='kron_transpose', + module=__name__, + inputs=('left', 'right'), + out=True, +) +kron_transpose.__doc__ =\ + """ + Compute the Kronecker product of two matrices with transposing the first + one. This is used to represent superoperator. + """ +kron_transpose.add_specialisations([ + (Data, Data, Data, kron_transpose_data), + (Dense, Dense, Dense, kron_transpose_dense), +], _defer=True) + + del _inspect, _Dispatcher diff --git a/qutip/core/data/linalg.py b/qutip/core/data/linalg.py index f55077381c..b4e687923b 100644 --- a/qutip/core/data/linalg.py +++ b/qutip/core/data/linalg.py @@ -6,7 +6,7 @@ __all__ = ['inv', 'inv_csr', 'inv_dense'] -def inv_dense(data): +def inv_dense(data, /): """Compute the inverse of a matrix""" if not isinstance(data, Dense): raise TypeError("expected data in Dense format but got " @@ -17,7 +17,7 @@ def inv_dense(data): return Dense(scipy.linalg.inv(data.as_ndarray()), copy=False) -def inv_csr(data): +def inv_csr(data, /): """Compute the inverse of a sparse matrix""" if not isinstance(data, CSR): raise TypeError("expected data in CSR format but got " diff --git a/qutip/core/data/make.py b/qutip/core/data/make.py index 8b34d6565c..39c5247169 100644 --- a/qutip/core/data/make.py +++ b/qutip/core/data/make.py @@ -1,7 +1,11 @@ from .dispatch import Dispatcher as _Dispatcher -from . import csr, dense, CSR, Dense +from . import csr, dense, dia, CSR, Dense, Dia +import numpy as np -__all__ = ['diag', 'one_element_csr', 'one_element_dense', 'one_element'] +__all__ = [ + 'diag', + 'one_element_csr', 'one_element_dense', 'one_element_dia', 'one_element' +] def _diag_signature(diagonals, offsets=0, shape=None): @@ -36,6 +40,7 @@ def _diag_signature(diagonals, offsets=0, shape=None): diag = _Dispatcher(_diag_signature, name='diag', inputs=(), out=True) diag.add_specialisations([ (CSR, csr.diags), + (Dia, dia.diags), (Dense, dense.diags), ], _defer=True) @@ -93,9 +98,34 @@ def one_element_dense(shape, position, value=1.0): return data +def one_element_dia(shape, position, value=1.0): + """ + Create a matrix with only one nonzero element. + + Parameters + ---------- + shape : tuple + The shape of the output as (``rows``, ``columns``). + + position : tuple + The position of the non zero in the matrix as (``rows``, ``columns``). + + value : complex, optional + The value of the non-null element. + """ + if not (0 <= position[0] < shape[0] and 0 <= position[1] < shape[1]): + raise ValueError("Position of the elements out of bound: " + + str(position) + " in " + str(shape)) + data = np.zeros((1, shape[1]), dtype=complex) + data[0, position[1]] = value + offsets = np.array([position[1]-position[0]]) + return Dia((data, offsets), copy=False, shape=shape) + + one_element = _Dispatcher(one_element_dense, name='one_element', inputs=(), out=True) one_element.add_specialisations([ (CSR, one_element_csr), (Dense, one_element_dense), + (Dia, one_element_dia), ], _defer=True) diff --git a/qutip/core/data/matmul.pxd b/qutip/core/data/matmul.pxd index 54d21798d0..2df6e7bf25 100644 --- a/qutip/core/data/matmul.pxd +++ b/qutip/core/data/matmul.pxd @@ -2,13 +2,17 @@ from qutip.core.data.csr cimport CSR from qutip.core.data.dense cimport Dense +from qutip.core.data.dia cimport Dia from qutip.core.data.base cimport Data cpdef CSR matmul_csr(CSR left, CSR right, double complex scale=*, CSR out=*) cpdef Dense matmul_dense(Dense left, Dense right, double complex scale=*, Dense out=*) cpdef Dense matmul_csr_dense_dense(CSR left, Dense right, double complex scale=*, Dense out=*) +cpdef Dia matmul_dia(Dia left, Dia right, double complex scale=*) +cpdef Dense matmul_dia_dense_dense(Dia left, Dense right, double complex scale=*, Dense out=*) cdef Dense matmul_data_dense(Data left, Dense right) cdef void imatmul_data_dense(Data left, Dense right, double complex scale, Dense out) cpdef Dense multiply_dense(Dense left, Dense right) cpdef CSR multiply_csr(CSR left, CSR right) +cpdef Dia multiply_dia(Dia left, Dia right) diff --git a/qutip/core/data/matmul.pyx b/qutip/core/data/matmul.pyx index 0accc05286..06942c1314 100644 --- a/qutip/core/data/matmul.pyx +++ b/qutip/core/data/matmul.pyx @@ -3,6 +3,8 @@ from libc.string cimport memset, memcpy from libc.math cimport fabs +from libc.stdlib cimport abs +from libcpp.algorithm cimport lower_bound import warnings from qutip.settings import settings @@ -19,8 +21,12 @@ from qutip.core.data.base import idxint_dtype from qutip.core.data.base cimport idxint, Data from qutip.core.data.dense cimport Dense from qutip.core.data.csr cimport CSR -from qutip.core.data cimport csr, dense +from qutip.core.data.dia cimport Dia +from qutip.core.data.tidyup cimport tidyup_dia +from qutip.core.data cimport csr, dense, dia from qutip.core.data.add cimport iadd_dense, add_csr +from qutip.core.data.mul cimport imul_dense +from qutip.core.data.dense import OrderEfficiencyWarning cnp.import_array() @@ -37,13 +43,23 @@ cdef extern from "src/matmul_csr_vector.hpp" nogil: double complex *vec, double complex scale, double complex *out, T nrows) +cdef extern from "src/matmul_diag_vector.hpp" nogil: + void _matmul_diag_vector[T]( + double complex *data, double complex *vec, double complex *out, + T length, double complex scale) + void _matmul_diag_block[T]( + double complex *data, double complex *vec, double complex *out, + T length, T width) + + __all__ = [ - 'matmul', 'matmul_csr', 'matmul_dense', 'matmul_csr_dense_dense', - 'multiply', 'multiply_csr', 'multiply_dense', + 'matmul', 'matmul_csr', 'matmul_dense', 'matmul_dia', + 'matmul_csr_dense_dense', 'matmul_dia_dense_dense', 'matmul_dense_dia_dense', + 'multiply', 'multiply_csr', 'multiply_dense', 'multiply_dia', ] -cdef void _check_shape(Data left, Data right, Data out=None) nogil except *: +cdef void _check_shape(Data left, Data right, Data out=None) except * nogil: if left.shape[1] != right.shape[0]: raise ValueError( "incompatible matrix shapes " @@ -196,7 +212,7 @@ cpdef Dense matmul_csr_dense_dense(CSR left, Dense right, "out matrix is {}-ordered".format('Fortran' if out.fortran else 'C') + " but input is {}-ordered".format('Fortran' if right.fortran else 'C') ) - warnings.warn(msg, dense.OrderEfficiencyWarning) + warnings.warn(msg, OrderEfficiencyWarning) # Rather than making loads of copies of the same code, we just moan at # the user and then transpose one of the arrays. We prefer to have # `right` in Fortran-order for cache efficiency. @@ -308,6 +324,198 @@ cpdef Dense matmul_dense(Dense left, Dense right, double complex scale=1, Dense return out +cpdef Dia matmul_dia(Dia left, Dia right, double complex scale=1): + _check_shape(left, right, None) + # We could probably do faster than this... + npoffsets = np.unique(np.add.outer(left.as_scipy().offsets, right.as_scipy().offsets)) + npoffsets = npoffsets[np.logical_and(npoffsets > -left.shape[0], npoffsets < right.shape[1])] + cdef idxint[:] offsets = npoffsets + if len(npoffsets) == 0: + return dia.zeros(left.shape[0], right.shape[1]) + cdef idxint *ptr = &offsets[0] + cdef size_t num_diag = offsets.shape[0], diag_out, diag_left, diag_right + cdef idxint start_left, end_left, start_out, end_out, start, end, col, off_out + npdata = np.zeros((num_diag, right.shape[1]), dtype=complex) + cdef double complex[:, ::1] data = npdata + + with nogil: + for diag_left in range(left.num_diag): + for diag_right in range(right.num_diag): + off_out = left.offsets[diag_left] + right.offsets[diag_right] + if off_out <= -left.shape[0] or off_out >= right.shape[1]: + continue + diag_out = (lower_bound(ptr, ptr + num_diag, off_out) - ptr) + + start_left = max(0, left.offsets[diag_left]) + right.offsets[diag_right] + start_right = max(0, right.offsets[diag_right]) + start_out = max(0, off_out) + end_left = min(left.shape[1], left.shape[0] + left.offsets[diag_left]) + right.offsets[diag_right] + end_right = min(right.shape[1], right.shape[0] + right.offsets[diag_right]) + end_out = min(right.shape[1], left.shape[0] + off_out) + start = max(start_left, start_right, start_out) + end = min(end_left, end_right, end_out) + + for col in range(start, end): + data[diag_out, col] += ( + scale + * left.data[diag_left * left.shape[1] + col - right.offsets[diag_right]] + * right.data[diag_right * right.shape[1] + col] + ) + return Dia((npdata, npoffsets), shape=(left.shape[0], right.shape[1]), copy=False) + + +cpdef Dense matmul_dia_dense_dense(Dia left, Dense right, double complex scale=1, Dense out=None): + _check_shape(left, right, out) + cdef Dense tmp + if out is not None and scale == 1.: + tmp = out + out = None + else: + tmp = dense.zeros(left.shape[0], right.shape[1], right.fortran) + + cdef idxint start_left, end_left, start_out, end_out, length, i, start_right + cdef idxint col, strideR_in, strideC_in, strideR_out, strideC_out + cdef size_t diag + + with nogil: + strideR_in = right.shape[1] if not right.fortran else 1 + strideC_in = right.shape[0] if right.fortran else 1 + strideR_out = tmp.shape[1] if not tmp.fortran else 1 + strideC_out = tmp.shape[0] if tmp.fortran else 1 + + if ( + (left.shape[0] == left.shape[1]) + and (strideC_in == 1) + and (strideC_out == 1) + ): + #Fast track for easy case + for diag in range(left.num_diag): + _matmul_diag_block( + right.data + max(0, left.offsets[diag]) * strideR_in, + left.data + diag * left.shape[1] + max(0, left.offsets[diag]), + tmp.data + max(0, -left.offsets[diag]) * strideR_out, + left.shape[1] - abs(left.offsets[diag]), + right.shape[1] + ) + + elif (strideR_in == 1) and (strideR_out == 1): + for col in range(right.shape[1]): + for diag in range(left.num_diag): + start_left = max(0, left.offsets[diag]) + end_left = min(left.shape[1], left.shape[0] + left.offsets[diag]) + start_out = max(0, -left.offsets[diag]) + end_out = min(left.shape[0], left.shape[1] - left.offsets[diag]) + length = min(end_left - start_left, end_out - start_out) + start_right = start_left + col * strideC_in + start_left += diag * left.shape[1] + start_out += col * strideC_out + _matmul_diag_vector( + left.data + start_left, + right.data + start_right, + tmp.data + start_out, + length, 1. + ) + + else: + for col in range(right.shape[1]): + for diag in range(left.num_diag): + start_left = max(0, left.offsets[diag]) + end_left = min(left.shape[1], left.shape[0] + left.offsets[diag]) + start_out = max(0, -left.offsets[diag]) + end_out = min(left.shape[0], left.shape[1] - left.offsets[diag]) + length = min(end_left - start_left, end_out - start_out) + for i in range(length): + tmp.data[(start_out + i) * strideR_out + col * strideC_out] += ( + left.data[diag * left.shape[1] + i + start_left] + * right.data[(start_left + i) * strideR_in + col * strideC_in] + ) + + if out is None and scale == 1.: + out = tmp + elif out is None: + imul_dense(tmp, scale) + out = tmp + else: + iadd_dense(out, tmp, scale) + + return out + + +cpdef Dense matmul_dense_dia_dense(Dense left, Dia right, double complex scale=1, Dense out=None): + _check_shape(left, right, out) + cdef Dense tmp + if out is not None and scale == 1.: + tmp = out + out = None + else: + tmp = dense.zeros(left.shape[0], right.shape[1], left.fortran) + + cdef idxint start_left, end_right, start_out, end_out, length, i, start_right + cdef idxint row, strideR_in, strideC_in, strideR_out, strideC_out + cdef size_t diag + + with nogil: + strideR_in = left.shape[1] if not left.fortran else 1 + strideC_in = left.shape[0] if left.fortran else 1 + strideR_out = tmp.shape[1] if not tmp.fortran else 1 + strideC_out = tmp.shape[0] if tmp.fortran else 1 + + if ( + (right.shape[0] == right.shape[1]) + and (strideR_in == 1) + and (strideR_out == 1) + ): + #Fast track for easy case + for diag in range(right.num_diag): + _matmul_diag_block( + left.data + max(0, -right.offsets[diag]) * strideC_in, + right.data + diag * right.shape[1] + max(0, right.offsets[diag]), + tmp.data + max(0, right.offsets[diag]) * strideC_out, + right.shape[1] - abs(right.offsets[diag]), + left.shape[0] + ) + + elif (strideC_in == 1) and (strideC_out == 1): + for row in range(left.shape[0]): + for diag in range(right.num_diag): + start_right = max(0, right.offsets[diag]) + end_right = min(right.shape[1], right.shape[0] + right.offsets[diag]) + start_out = max(0, right.offsets[diag]) + length = end_right - start_right + start_left = max(0, -right.offsets[diag]) + row * strideR_in + start_right += diag * right.shape[1] + start_out = max(0, right.offsets[diag]) + row * strideR_out + _matmul_diag_vector( + right.data + start_right, + left.data + start_left, + tmp.data + start_out, + length, 1. + ) + + else: + for row in range(left.shape[0]): + for diag in range(right.num_diag): + start_right = max(0, right.offsets[diag]) + end_right = min(right.shape[1], right.shape[0] + right.offsets[diag]) + start_left = max(0, -right.offsets[diag]) + length = end_right - start_right + for i in range(length): + tmp.data[(start_right + i) * strideC_out + row * strideR_out] += ( + right.data[diag * right.shape[1] + i + start_right] + * left.data[(start_left + i) * strideC_in + row * strideR_in] + ) + + if out is None and scale == 1.: + out = tmp + elif out is None: + imul_dense(tmp, scale) + out = tmp + else: + iadd_dense(out, tmp, scale) + + return out + + cpdef CSR multiply_csr(CSR left, CSR right): """Element-wise multiplication of CSR matrices.""" if left.shape[0] != right.shape[0] or left.shape[1] != right.shape[1]: @@ -371,6 +579,70 @@ cpdef CSR multiply_csr(CSR left, CSR right): return out +cpdef Dia multiply_dia(Dia left, Dia right): + if left.shape[0] != right.shape[0] or left.shape[1] != right.shape[1]: + raise ValueError( + "incompatible matrix shapes " + + str(left.shape) + + " and " + + str(right.shape) + ) + cdef idxint diag_left=0, diag_right=0, out_diag=0, col + cdef bint sorted=True + cdef Dia out = dia.empty(left.shape[0], left.shape[1], min(left.num_diag, right.num_diag)) + + with nogil: + for diag_left in range(1, left.num_diag): + if left.offsets[diag_left-1] > left.offsets[diag_left]: + sorted = False + continue + if sorted: + for diag_right in range(1, right.num_diag): + if right.offsets[diag_right-1] > right.offsets[diag_right]: + sorted = False + continue + + if sorted: + diag_left = 0 + diag_right = 0 + while diag_left < left.num_diag and diag_right < right.num_diag: + if left.offsets[diag_left] == right.offsets[diag_right]: + out.offsets[out_diag] = left.offsets[diag_left] + for col in range(out.shape[1]): + if col >= left.shape[1] or col >= right.shape[1]: + out.data[out_diag * out.shape[1] + col] = 0 + else: + out.data[out_diag * out.shape[1] + col] = ( + left.data[diag_left * left.shape[1] + col] * + right.data[diag_right * right.shape[1] + col] + ) + out_diag += 1 + diag_left += 1 + diag_right += 1 + elif left.offsets[diag_left] < right.offsets[diag_right]: + diag_left += 1 + else: + diag_right += 1 + + else: + for diag_left in range(left.num_diag): + for diag_right in range(right.num_diag): + if left.offsets[diag_left] == right.offsets[diag_right]: + out.offsets[out_diag] = left.offsets[diag_left] + for col in range(right.shape[1]): + out.data[out_diag * out.shape[1] + col] = ( + left.data[diag_left * left.shape[1] + col] * + right.data[diag_right * right.shape[1] + col] + ) + out_diag += 1 + break + out.num_diag = out_diag + + if settings.core['auto_tidyup']: + tidyup_dia(out, settings.core['auto_tidyup_atol'], True) + return out + + cpdef Dense multiply_dense(Dense left, Dense right): """Element-wise multiplication of Dense matrices.""" if left.shape[0] != right.shape[0] or left.shape[1] != right.shape[1]: @@ -397,8 +669,8 @@ import inspect as _inspect matmul = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('scale', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=1), ]), @@ -429,13 +701,16 @@ matmul.add_specialisations([ (CSR, CSR, CSR, matmul_csr), (CSR, Dense, Dense, matmul_csr_dense_dense), (Dense, Dense, Dense, matmul_dense), + (Dia, Dia, Dia, matmul_dia), + (Dia, Dense, Dense, matmul_dia_dense_dense), + (Dense, Dia, Dense, matmul_dense_dia_dense), ], _defer=True) multiply = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('left', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('right', _inspect.Parameter.POSITIONAL_ONLY), ]), name='multiply', module=__name__, @@ -447,6 +722,7 @@ multiply.__doc__ =\ multiply.add_specialisations([ (CSR, CSR, CSR, multiply_csr), (Dense, Dense, Dense, multiply_dense), + (Dia, Dia, Dia, multiply_dia), ], _defer=True) @@ -459,6 +735,8 @@ cdef Dense matmul_data_dense(Data left, Dense right): out = matmul_csr_dense_dense(left, right) elif type(left) is Dense: out = matmul_dense(left, right) + elif type(left) is Dia: + out = matmul_dia_dense_dense(left, right) else: out = matmul(left, right) return out @@ -467,7 +745,9 @@ cdef Dense matmul_data_dense(Data left, Dense right): cdef void imatmul_data_dense(Data left, Dense right, double complex scale, Dense out): if type(left) is CSR: matmul_csr_dense_dense(left, right, scale, out) + elif type(left) is Dia: + matmul_dia_dense_dense(left, right, scale, out) elif type(left) is Dense: matmul_dense(left, right, scale, out) else: - iadd_dense(out, matmul(left, right), scale) + iadd_dense(out, matmul(left, right, dtype=Dense), scale) diff --git a/qutip/core/data/mul.pxd b/qutip/core/data/mul.pxd index 2c29259631..6e7b560eef 100644 --- a/qutip/core/data/mul.pxd +++ b/qutip/core/data/mul.pxd @@ -1,6 +1,6 @@ #cython: language_level=3 -from qutip.core.data cimport CSR, Dense, Data +from qutip.core.data cimport CSR, Dense, Data, Dia cpdef CSR imul_csr(CSR matrix, double complex value) cpdef CSR mul_csr(CSR matrix, double complex value) @@ -10,4 +10,8 @@ cpdef Dense imul_dense(Dense matrix, double complex value) cpdef Dense mul_dense(Dense matrix, double complex value) cpdef Dense neg_dense(Dense matrix) +cpdef Dia imul_dia(Dia matrix, double complex value) +cpdef Dia mul_dia(Dia matrix, double complex value) +cpdef Dia neg_dia(Dia matrix) + cpdef Data imul_data(Data matrix, double complex value) diff --git a/qutip/core/data/mul.pyx b/qutip/core/data/mul.pyx index 27d8e47663..d81e68882c 100644 --- a/qutip/core/data/mul.pyx +++ b/qutip/core/data/mul.pyx @@ -1,21 +1,21 @@ #cython: language_level=3 #cython: boundscheck=False, wrapround=False, initializedcheck=False -from qutip.core.data cimport idxint, csr, CSR, dense, Dense, Data +from qutip.core.data cimport idxint, csr, CSR, dense, Dense, Data, Dia, dia +from scipy.linalg.cython_blas cimport zscal __all__ = [ - 'mul', 'mul_csr', 'mul_dense', - 'imul', 'imul_csr', 'imul_dense', 'imul_data', - 'neg', 'neg_csr', 'neg_dense', + 'mul', 'mul_csr', 'mul_dense', 'mul_dia', + 'imul', 'imul_csr', 'imul_dense', 'imul_dia', 'imul_data', + 'neg', 'neg_csr', 'neg_dense', 'neg_dia', ] cpdef CSR imul_csr(CSR matrix, double complex value): """Multiply this CSR `matrix` by a complex scalar `value`.""" - cdef idxint ptr - with nogil: - for ptr in range(csr.nnz(matrix)): - matrix.data[ptr] *= value + cdef idxint l = csr.nnz(matrix) + cdef int ONE=1 + zscal(&l, &value, matrix.data, &ONE) return matrix cpdef CSR mul_csr(CSR matrix, double complex value): @@ -39,12 +39,43 @@ cpdef CSR neg_csr(CSR matrix): return out +cpdef Dia imul_dia(Dia matrix, double complex value): + """Multiply this Dia `matrix` by a complex scalar `value`.""" + cdef idxint l = matrix.num_diag * matrix.shape[1] + cdef int ONE=1 + zscal(&l, &value, matrix.data, &ONE) + return matrix + +cpdef Dia mul_dia(Dia matrix, double complex value): + """Multiply this Dia `matrix` by a complex scalar `value`.""" + if value == 0: + return dia.zeros(matrix.shape[0], matrix.shape[1]) + cdef Dia out = dia.empty_like(matrix) + cdef idxint ptr, diag, l = matrix.num_diag * matrix.shape[1] + with nogil: + for ptr in range(l): + out.data[ptr] = value * matrix.data[ptr] + for ptr in range(matrix.num_diag): + out.offsets[ptr] = matrix.offsets[ptr] + out.num_diag = matrix.num_diag + return out + +cpdef Dia neg_dia(Dia matrix): + """Unary negation of this Dia `matrix`. Return a new object.""" + cdef Dia out = matrix.copy() + cdef idxint ptr, l = matrix.num_diag * matrix.shape[1] + with nogil: + for ptr in range(l): + out.data[ptr] = -matrix.data[ptr] + return out + + cpdef Dense imul_dense(Dense matrix, double complex value): """Multiply this Dense `matrix` by a complex scalar `value`.""" cdef size_t ptr - with nogil: - for ptr in range(matrix.shape[0]*matrix.shape[1]): - matrix.data[ptr] *= value + cdef int ONE=1 + cdef idxint l = matrix.shape[0]*matrix.shape[1] + zscal(&l, &value, matrix.data, &ONE) return matrix cpdef Dense mul_dense(Dense matrix, double complex value): @@ -71,7 +102,7 @@ import inspect as _inspect mul = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('value', _inspect.Parameter.POSITIONAL_OR_KEYWORD), ]), name='mul', @@ -83,6 +114,7 @@ mul.__doc__ =\ """Multiply a matrix element-wise by a scalar.""" mul.add_specialisations([ (CSR, CSR, mul_csr), + (Dia, Dia, mul_dia), (Dense, Dense, mul_dense), ], _defer=True) @@ -91,7 +123,7 @@ imul = _Dispatcher( # give expected results if used as: # mat = imul(mat, x) _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('value', _inspect.Parameter.POSITIONAL_OR_KEYWORD), ]), name='imul', @@ -103,12 +135,13 @@ imul.__doc__ =\ """Multiply inplace a matrix element-wise by a scalar.""" imul.add_specialisations([ (CSR, CSR, imul_csr), + (Dia, Dia, imul_dia), (Dense, Dense, imul_dense), ], _defer=True) neg = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='neg', module=__name__, @@ -119,6 +152,7 @@ neg.__doc__ =\ """Unary element-wise negation of a matrix.""" neg.add_specialisations([ (CSR, CSR, neg_csr), + (Dia, Dia, neg_dia), (Dense, Dense, neg_dense), ], _defer=True) @@ -130,5 +164,7 @@ cpdef Data imul_data(Data matrix, double complex value): return imul_csr(matrix, value) elif type(matrix) is Dense: return imul_dense(matrix, value) + elif type(matrix) is Dia: + return imul_dia(matrix, value) else: return imul(matrix, value) diff --git a/qutip/core/data/norm.pxd b/qutip/core/data/norm.pxd index 5e1b7ebe0d..ed85c4138b 100644 --- a/qutip/core/data/norm.pxd +++ b/qutip/core/data/norm.pxd @@ -1,15 +1,24 @@ #cython: language_level=3 #cython: boundscheck=False, wraparound=False, initializedcheck=False -from qutip.core.data cimport CSR, Dense, Data +from qutip.core.data cimport CSR, Dense, Data, Dia cpdef double one_csr(CSR matrix) except -1 cpdef double trace_csr(CSR matrix) except -1 cpdef double max_csr(CSR matrix) nogil cpdef double frobenius_csr(CSR matrix) nogil -cpdef double l2_csr(CSR matrix) nogil except -1 +cpdef double l2_csr(CSR matrix) except -1 nogil cpdef double frobenius_dense(Dense matrix) nogil -cpdef double l2_dense(Dense matrix) nogil except -1 +cpdef double l2_dense(Dense matrix) except -1 nogil -cpdef double frobenius_data(Data state) +cpdef double one_dia(Dia matrix) except -1 +cpdef double max_dia(Dia matrix) nogil +cpdef double frobenius_dia(Dia matrix) nogil +cpdef double l2_dia(Dia matrix) except -1 nogil + +cpdef double frobenius_data(Data state) except -1 + +cdef inline int int_max(int a, int b) nogil: + # Name collision between the ``max`` builtin and norm.max + return b if b > a else a diff --git a/qutip/core/data/norm.pyx b/qutip/core/data/norm.pyx index 998d49d1d2..7f3ecb1a1e 100644 --- a/qutip/core/data/norm.pyx +++ b/qutip/core/data/norm.pyx @@ -7,8 +7,9 @@ from cpython cimport mem from scipy.linalg cimport cython_blas as blas import scipy +import numpy as np -from qutip.core.data cimport CSR, Dense, csr, dense, Data +from qutip.core.data cimport CSR, Dense, csr, Data, Dia from qutip.core.data.adjoint cimport adjoint_csr, adjoint_dense from qutip.core.data.matmul cimport matmul_csr @@ -92,7 +93,7 @@ cpdef double frobenius_csr(CSR matrix) nogil: cdef int n=csr.nnz(matrix), inc=1 return blas.dznrm2(&n, &matrix.data[0], &inc) -cpdef double l2_csr(CSR matrix) nogil except -1: +cpdef double l2_csr(CSR matrix) except -1 nogil: if matrix.shape[0] != 1 and matrix.shape[1] != 1: raise ValueError("L2 norm is only defined on vectors") return frobenius_csr(matrix) @@ -128,18 +129,57 @@ cpdef double frobenius_dense(Dense matrix) nogil: cdef int inc = 1 return blas.dznrm2(&n, matrix.data, &inc) -cpdef double l2_dense(Dense matrix) nogil except -1: +cpdef double l2_dense(Dense matrix) except -1 nogil: if matrix.shape[0] != 1 and matrix.shape[1] != 1: raise ValueError("L2 norm is only defined on vectors") return frobenius_dense(matrix) +cpdef double frobenius_dia(Dia matrix) nogil: + cdef int offset, diag, start, end, col=1 + cdef double total=0, cur + for diag in range(matrix.num_diag): + offset = matrix.offsets[diag] + start = int_max(0, offset) + end = min(matrix.shape[1], matrix.shape[0] + offset) + for col in range(start, end): + total += abssq(matrix.data[diag * matrix.shape[1] + col]) + return math.sqrt(total) + +cpdef double l2_dia(Dia matrix) except -1 nogil: + if matrix.shape[0] != 1 and matrix.shape[1] != 1: + raise ValueError("L2 norm is only defined on vectors") + return frobenius_dia(matrix) + +cpdef double max_dia(Dia matrix) nogil: + cdef int offset, diag, start, end, col=1 + cdef double total=0, cur + for diag in range(matrix.num_diag): + offset = matrix.offsets[diag] + start = int_max(0, offset) + end = min(matrix.shape[1], matrix.shape[0] + offset) + for col in range(start, end): + cur = abssq(matrix.data[diag * matrix.shape[1] + col]) + total = cur if cur > total else total + return math.sqrt(total) + +cpdef double one_dia(Dia matrix) except -1: + cdef int offset, diag, start, end, col=1 + cols_one = np.zeros(matrix.shape[1], dtype=float) + for diag in range(matrix.num_diag): + offset = matrix.offsets[diag] + start = int_max(0, offset) + end = min(matrix.shape[1], matrix.shape[0] + offset) + for col in range(start, end): + cols_one[col] += abs(matrix.data[diag * matrix.shape[1] + col]) + return np.max(cols_one) + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect l2 = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('vector', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('vector', _inspect.Parameter.POSITIONAL_ONLY), ]), name='l2', module=__name__, @@ -154,11 +194,12 @@ l2.__doc__ =\ """ l2.add_specialisations([ (Dense, l2_dense), + (Dia, l2_dia), (CSR, l2_csr), ], _defer=True) _norm_signature = _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]) frobenius = _Dispatcher(_norm_signature, name='frobenius', module=__name__, inputs=('matrix',)) @@ -171,6 +212,7 @@ frobenius.__doc__ =\ """ frobenius.add_specialisations([ (Dense, frobenius_dense), + (Dia, frobenius_dia), (CSR, frobenius_csr), ], _defer=True) @@ -183,6 +225,7 @@ max.__doc__ =\ """ max.add_specialisations([ (Dense, max_dense), + (Dia, max_dia), (CSR, max_csr), ], _defer=True) @@ -195,13 +238,14 @@ one.__doc__ =\ """ one.add_specialisations([ (Dense, one_dense), + (Dia, one_dia), (CSR, one_csr), ], _defer=True) trace = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), inputs=('matrix',), name='trace', @@ -220,7 +264,7 @@ trace.add_specialisations([ ], _defer=True) -cpdef double frobenius_data(Data state): +cpdef double frobenius_data(Data state) except -1: if type(state) is Dense: return frobenius_dense(state) elif type(state) is CSR: diff --git a/qutip/core/data/permute.pyx b/qutip/core/data/permute.pyx index 72f44af2af..dc4517677b 100644 --- a/qutip/core/data/permute.pyx +++ b/qutip/core/data/permute.pyx @@ -320,7 +320,7 @@ import inspect as _inspect dimensions = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('dimensions', _inspect.Parameter.POSITIONAL_OR_KEYWORD), _inspect.Parameter('order', _inspect.Parameter.POSITIONAL_OR_KEYWORD), ]), @@ -367,7 +367,7 @@ dimensions.add_specialisations([ indices = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('row_perm', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=None), _inspect.Parameter('col_perm', _inspect.Parameter.POSITIONAL_OR_KEYWORD, diff --git a/qutip/core/data/pow.pyx b/qutip/core/data/pow.pyx index b90a1a44b7..7fc6d4568e 100644 --- a/qutip/core/data/pow.pyx +++ b/qutip/core/data/pow.pyx @@ -3,14 +3,15 @@ cimport cython -from qutip.core.data cimport csr, dense +from qutip.core.data cimport csr, dense, dia from qutip.core.data.csr cimport CSR from qutip.core.data.dense cimport Dense -from qutip.core.data.matmul cimport matmul_csr +from qutip.core.data.dia cimport Dia +from qutip.core.data.matmul cimport matmul_csr, matmul_dia import numpy as np __all__ = [ - 'pow', 'pow_csr', 'pow_dense', + 'pow', 'pow_csr', 'pow_dense', 'pow_dia', ] @@ -41,6 +42,33 @@ cpdef CSR pow_csr(CSR matrix, unsigned long long n): return out +@cython.nonecheck(False) +@cython.cdivision(True) +cpdef Dia pow_dia(Dia matrix, unsigned long long n): + if matrix.shape[0] != matrix.shape[1]: + raise ValueError("matrix power only works with square matrices") + if n == 0: + return dia.identity(matrix.shape[0]) + if n == 1: + return matrix.copy() + # We do the matrix power in terms of powers of two, so we can do it + # ceil(lg(n)) + bits(n) - 1 matrix mulitplications, where `bits` is the + # number of set bits in the input. + # + # We don't have to do matrix.copy() or pow.copy() here, because we've + # guaranteed that we won't be returning without at least one matrix + # multiplcation, which will allocate a new matrix. + cdef Dia pow = matrix + cdef Dia out = pow if n & 1 else None + n >>= 1 + while n: + pow = matmul_dia(pow, pow) + if n & 1: + out = pow if out is None else matmul_dia(out, pow) + n >>= 1 + return out + + cpdef Dense pow_dense(Dense matrix, unsigned long long n): if matrix.shape[0] != matrix.shape[1]: raise ValueError("matrix power only works with square matrices") @@ -56,7 +84,7 @@ import inspect as _inspect pow = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('n', _inspect.Parameter.POSITIONAL_OR_KEYWORD), ]), name='pow', @@ -81,6 +109,7 @@ pow.__doc__ =\ pow.add_specialisations([ (CSR, CSR, pow_csr), (Dense, Dense, pow_dense), + (Dia, Dia, pow_dia), ], _defer=True) del _inspect, _Dispatcher diff --git a/qutip/core/data/project.pyx b/qutip/core/data/project.pyx index b88771e365..45fa7ac956 100644 --- a/qutip/core/data/project.pyx +++ b/qutip/core/data/project.pyx @@ -1,17 +1,17 @@ #cython: language_level=3 #cython: boundscheck=False, wraparound=False, initializedcheck=False -from libc.string cimport memcpy +from libc.string cimport memcpy, memset from qutip.core.data.base cimport idxint -from qutip.core.data cimport csr, dense, Dense +from qutip.core.data cimport csr, dense, Dense, dia, Dia from qutip.core.data.csr cimport CSR cdef extern from "" namespace "std" nogil: double complex conj(double complex x) __all__ = [ - 'project', 'project_csr', 'project_dense', + 'project', 'project_csr', 'project_dense', 'project_dia', ] @@ -67,6 +67,7 @@ cdef void _project_bra_csr(CSR bra, CSR out) nogil: for row_out in range(row_out, out.shape[0]): out.row_index[row_out + 1] = cur + cpdef CSR project_csr(CSR state): """ Calculate the projection |state> tolsq: + return False + return True + + cpdef bint isdiag_csr(CSR matrix) nogil: cdef size_t row, ptr_start, ptr_end=matrix.row_index[0] for row in range(matrix.shape[0]): @@ -199,6 +261,21 @@ cpdef bint isdiag_dense(Dense matrix) nogil: return True +cpdef bint iszero_dia(Dia matrix, double tol=-1) nogil: + cdef size_t diag, start, end, col + if tol < 0: + with gil: + tol = settings.core["atol"] + cdef double tolsq = tol*tol + for diag in range(matrix.num_diag): + start = max(0, matrix.offsets[diag]) + end = min(matrix.shape[1], matrix.shape[0] + matrix.offsets[diag]) + for col in range(start, end): + if _abssq(matrix.data[diag * matrix.shape[1] + col]) > tolsq: + return False + return True + + cpdef bint iszero_csr(CSR matrix, double tol=-1) nogil: cdef size_t ptr if tol < 0: @@ -210,6 +287,7 @@ cpdef bint iszero_csr(CSR matrix, double tol=-1) nogil: return False return True + cpdef bint iszero_dense(Dense matrix, double tol=-1) nogil: cdef size_t ptr if tol < 0: @@ -227,7 +305,7 @@ import inspect as _inspect isherm = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('tol', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=-1), ]), @@ -255,12 +333,14 @@ isherm.__doc__ =\ `qutip.settings.atol` is used instead. """ isherm.add_specialisations([ + (Dense, isherm_dense), + (Dia, isherm_dia), (CSR, isherm_csr), ], _defer=True) isdiag = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='isdiag', module=__name__, @@ -277,12 +357,14 @@ isdiag.__doc__ =\ The matrix to test for diagonality. """ isdiag.add_specialisations([ + (Dense, isdiag_dense), + (Dia, isdiag_dia), (CSR, isdiag_csr), ], _defer=True) iszero = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('tol', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=-1), ]), @@ -311,6 +393,7 @@ iszero.__doc__ =\ """ iszero.add_specialisations([ (CSR, iszero_csr), + (Dia, iszero_dia), (Dense, iszero_dense), ], _defer=True) diff --git a/qutip/core/data/ptrace.pyx b/qutip/core/data/ptrace.pyx index 3e58e82d94..00765608c7 100644 --- a/qutip/core/data/ptrace.pyx +++ b/qutip/core/data/ptrace.pyx @@ -6,15 +6,16 @@ import numbers import numpy as np cimport numpy as cnp +cimport cython -from qutip.core.data cimport csr, dense, idxint, CSR, Dense, Data +from qutip.core.data cimport csr, dense, idxint, CSR, Dense, Data, Dia, dia from qutip.core.data.base import idxint_dtype from qutip.settings import settings cnp.import_array() __all__ = [ - 'ptrace', 'ptrace_csr', 'ptrace_dense', 'ptrace_csr_dense', + 'ptrace', 'ptrace_csr', 'ptrace_dense', 'ptrace_csr_dense', 'ptrace_dia', ] cdef tuple _parse_inputs(object dims, object sel, tuple shape): @@ -121,6 +122,37 @@ cpdef CSR ptrace_csr(CSR matrix, object dims, object sel): size, size, p, tol) +def ptrace_dia(matrix, dims, sel): + if len(sel) == len(dims): + return matrix.copy() + dims, sel = _parse_inputs(dims, sel, matrix.shape) + mat = matrix.as_scipy() + cdef idxint[:, ::1] tensor_table = np.zeros((dims.shape[0], 3), dtype=idxint_dtype) + cdef idxint pos_row[2] + cdef idxint pos_col[2] + size = _populate_tensor_table(dims, sel, tensor_table) + data = {} + for i, offset in enumerate(mat.offsets): + start = max(0, offset) + end = min(matrix.shape[0] + offset, matrix.shape[1]) + for col in range(start, end): + _i2_k_t(col - offset, tensor_table, pos_row) + _i2_k_t(col, tensor_table, pos_col) + if pos_row[1] == pos_col[1]: + new_offset = pos_col[0] - pos_row[0] + if new_offset not in data: + data[new_offset] = np.zeros(size, dtype=complex) + data[new_offset][pos_col[0]] += mat.data[i, col] + + if len(data) == 0: + return dia.zeros(size, size) + offsets = np.array(list(data.keys()), dtype=idxint_dtype) + data = np.array(list(data.values()), dtype=complex) + out = Dia((data, offsets), shape=(size, size), copy=False) + out = dia.clean_dia(out, True) + return out + + cpdef Dense ptrace_csr_dense(CSR matrix, object dims, object sel): dims, sel = _parse_inputs(dims, sel, matrix.shape) @@ -169,7 +201,7 @@ import inspect as _inspect ptrace = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('dims', _inspect.Parameter.POSITIONAL_OR_KEYWORD), _inspect.Parameter('sel', _inspect.Parameter.POSITIONAL_OR_KEYWORD), ]), @@ -211,6 +243,7 @@ ptrace.add_specialisations([ (CSR, CSR, ptrace_csr), (CSR, Dense, ptrace_csr_dense), (Dense, Dense, ptrace_dense), + (Dia, Dia, ptrace_dia), ], _defer=True) del _inspect, _Dispatcher diff --git a/qutip/core/data/reshape.pxd b/qutip/core/data/reshape.pxd index 4efae3a0e4..e8118b3495 100644 --- a/qutip/core/data/reshape.pxd +++ b/qutip/core/data/reshape.pxd @@ -1,12 +1,16 @@ #cython: language_level=3 from qutip.core.data.base cimport idxint -from qutip.core.data cimport CSR, Dense +from qutip.core.data cimport CSR, Dense, Dia cpdef CSR reshape_csr(CSR matrix, idxint n_rows_out, idxint n_cols_out) cpdef CSR column_stack_csr(CSR matrix) cpdef CSR column_unstack_csr(CSR matrix, idxint rows) +cpdef Dia reshape_dia(Dia matrix, idxint n_rows_out, idxint n_cols_out) +cpdef Dia column_stack_dia(Dia matrix) +cpdef Dia column_unstack_dia(Dia matrix, idxint rows) + cpdef Dense reshape_dense(Dense matrix, idxint n_rows_out, idxint n_cols_out) cpdef Dense column_stack_dense(Dense matrix, bint inplace=*) cpdef Dense column_unstack_dense(Dense matrix, idxint rows, bint inplace=*) diff --git a/qutip/core/data/reshape.pyx b/qutip/core/data/reshape.pyx index 6871e12c9b..4b9683ac60 100644 --- a/qutip/core/data/reshape.pyx +++ b/qutip/core/data/reshape.pyx @@ -8,13 +8,13 @@ cimport cython import warnings from qutip.core.data.base cimport idxint -from qutip.core.data cimport csr, dense, CSR, Dense, Data +from qutip.core.data cimport csr, dense, CSR, Dense, Data, Dia __all__ = [ - 'reshape', 'reshape_csr', 'reshape_dense', - 'column_stack', 'column_stack_csr', 'column_stack_dense', - 'column_unstack', 'column_unstack_csr', 'column_unstack_dense', - 'split_columns', 'split_columns_dense', 'split_columns_csr', + 'reshape', 'reshape_csr', 'reshape_dense', 'reshape_dia', + 'column_stack', 'column_stack_csr', 'column_stack_dense', 'column_stack_dia', + 'column_unstack', 'column_unstack_csr', 'column_unstack_dense', 'column_unstack_dia', + 'split_columns', 'split_columns_dense', 'split_columns_csr', 'split_columns_dia', ] @@ -74,6 +74,14 @@ cpdef Dense reshape_dense(Dense matrix, idxint n_rows_out, idxint n_cols_out): return out +cpdef Dia reshape_dia(Dia matrix, idxint n_rows_out, idxint n_cols_out): + _reshape_check_input(matrix, n_rows_out, n_cols_out) + # Once reshaped, diagonals are no longer ligned up. + return Dia( + matrix.as_scipy().reshape((n_rows_out, n_cols_out)).todia(), copy=False + ) + + cpdef CSR column_stack_csr(CSR matrix): if matrix.shape[1] == 1: return matrix.copy() @@ -94,6 +102,12 @@ cpdef Dense column_stack_dense(Dense matrix, bint inplace=False): return reshape_dense(matrix.transpose(), matrix.shape[0]*matrix.shape[1], 1) +cpdef Dia column_stack_dia(Dia matrix): + if matrix.shape[1] == 1: + return matrix.copy() + return reshape_dia(matrix.transpose(), matrix.shape[0]*matrix.shape[1], 1) + + cdef void _column_unstack_check_shape(Data matrix, idxint rows) except *: if matrix.shape[1] != 1: raise ValueError("input is not a single column") @@ -108,6 +122,7 @@ cpdef CSR column_unstack_csr(CSR matrix, idxint rows): cdef idxint cols = matrix.shape[0] // rows return reshape_csr(matrix, cols, rows).transpose() + cpdef Dense column_unstack_dense(Dense matrix, idxint rows, bint inplace=False): _column_unstack_check_shape(matrix, rows) cdef idxint cols = matrix.shape[0] // rows @@ -121,21 +136,33 @@ cpdef Dense column_unstack_dense(Dense matrix, idxint rows, bint inplace=False): return out +cpdef Dia column_unstack_dia(Dia matrix, idxint rows): + _column_unstack_check_shape(matrix, rows) + cdef idxint cols = matrix.shape[0] // rows + return reshape_dia(matrix, cols, rows).transpose() + + cpdef list split_columns_dense(Dense matrix, copy=True): return [Dense(matrix.as_ndarray()[:, k], copy=copy) for k in range(matrix.shape[1])] + cpdef list split_columns_csr(CSR matrix, copy=True): return [CSR(matrix.as_scipy()[:, k], copy=copy) for k in range(matrix.shape[1])] +cpdef list split_columns_dia(Dia matrix, copy=None): + as_array = matrix.to_array() + return [Dense(as_array[:, k], copy=False) for k in range(matrix.shape[1])] + + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect reshape = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('n_rows_out', _inspect.Parameter.POSITIONAL_OR_KEYWORD), _inspect.Parameter('n_cols_out', _inspect.Parameter.POSITIONAL_OR_KEYWORD), ]), @@ -160,6 +187,7 @@ reshape.__doc__ =\ reshape.add_specialisations([ (CSR, CSR, reshape_csr), (Dense, Dense, reshape_dense), + (Dia, Dia, reshape_dia), ], _defer=True) @@ -169,7 +197,7 @@ reshape.add_specialisations([ column_stack = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='column_stack', module=__name__, @@ -204,11 +232,12 @@ column_stack.__doc__ =\ column_stack.add_specialisations([ (CSR, CSR, column_stack_csr), (Dense, Dense, column_stack_dense), + (Dia, Dia, column_stack_dia), ], _defer=True) column_unstack = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('rows', _inspect.Parameter.POSITIONAL_OR_KEYWORD), ]), name='column_unstack', @@ -244,12 +273,13 @@ column_unstack.__doc__ =\ column_unstack.add_specialisations([ (CSR, CSR, column_unstack_csr), (Dense, Dense, column_unstack_dense), + (Dia, Dia, column_unstack_dia), ], _defer=True) split_columns = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('copy', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=1), ]), @@ -268,9 +298,9 @@ split_columns.__doc__ =\ matrix : Data The matrix to unstack the columns of. - copy : bool, optional - The number of rows there should be in the output matrix. This must - divide into the total number of elements in the input. + copy : bool, optional [True] + Whether to return copy of the data or a view. + View may not be a possible in all cases. Returns ------- @@ -280,6 +310,7 @@ split_columns.__doc__ =\ split_columns.add_specialisations([ (CSR, split_columns_csr), (Dense, split_columns_dense), + (Dia, split_columns_dia), ], _defer=True) del _inspect, _Dispatcher diff --git a/qutip/core/data/solve.py b/qutip/core/data/solve.py index a164d4f745..31fc6c9659 100644 --- a/qutip/core/data/solve.py +++ b/qutip/core/data/solve.py @@ -1,20 +1,26 @@ -from qutip.core.data import CSR, Data, csr, Dense +from qutip.core.data import CSR, Data, csr, Dense, Dia import qutip.core.data as _data import scipy.sparse.linalg as splinalg import numpy as np from qutip.settings import settings +import warnings +from typing import Union + if settings.has_mkl: from qutip._mkl.spsolve import mkl_spsolve else: mkl_spsolve = None +__all__ = ["solve_csr_dense", "solve_dia_dense", "solve_dense", "solve"] + + def _splu(A, B, **kwargs): lu = splinalg.splu(A, **kwargs) return lu.solve(B) -def solve_csr_dense(matrix: CSR, target: Dense, method=None, +def solve_csr_dense(matrix: Union[CSR, Dia], target: Dense, method=None, options: dict={}) -> Dense: """ Solve ``Ax=b`` for ``x``. @@ -22,7 +28,7 @@ def solve_csr_dense(matrix: CSR, target: Dense, method=None, Parameters: ----------- - matrix : CSR + matrix : CSR, Dia The matrix ``A``. target : Data @@ -59,6 +65,10 @@ def solve_csr_dense(matrix: CSR, target: Dense, method=None, if method == "splu": solver = _splu + elif method == "lstsq": + solver = splinalg.lsqr + elif method == "solve": + solver = splinalg.spsolve elif hasattr(splinalg, method): solver = getattr(splinalg, method) elif method == "mkl_spsolve" and mkl_spsolve is None: @@ -70,10 +80,15 @@ def solve_csr_dense(matrix: CSR, target: Dense, method=None, options = options.copy() M = matrix.as_scipy() - if options.pop("csc", False): + if options.pop("csc", False) or isinstance(matrix, Dia): M = M.tocsc() - out = solver(M, b, **options) + with warnings.catch_warnings(): + warnings.simplefilter("error") + try: + out = solver(M, b, **options) + except splinalg.MatrixRankWarning: + raise ValueError("Matrix is singular") if isinstance(out, tuple) and len(out) == 2: # iterative method return a success flag @@ -98,6 +113,9 @@ def solve_csr_dense(matrix: CSR, target: Dense, method=None, return Dense(out, copy=False) +solve_dia_dense = solve_csr_dense + + def solve_dense(matrix: Dense, target: Data, method=None, options: dict={}) -> Dense: """ @@ -135,7 +153,10 @@ def solve_dense(matrix: Dense, target: Data, method=None, b = target.to_array() if method in ["solve", None]: - out = np.linalg.solve(matrix.as_ndarray(), b) + try: + out = np.linalg.solve(matrix.as_ndarray(), b) + except np.linalg.LinAlgError: + raise ValueError("Matrix is singular") elif method == "lstsq": out, *_ = np.linalg.lstsq( matrix.as_ndarray(), @@ -154,8 +175,8 @@ def solve_dense(matrix: Dense, target: Data, method=None, solve = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), - _inspect.Parameter('target', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), + _inspect.Parameter('target', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('method', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=None), _inspect.Parameter('options', _inspect.Parameter.POSITIONAL_OR_KEYWORD, @@ -182,6 +203,7 @@ def solve_dense(matrix: Dense, target: Data, method=None, equation Ax=b from scipy.sparse.linalg (CSR ``matrix``) or numpy.linalg (Dense ``matrix``) can be used. Sparse cases also accept `splu` and `mkl_spsolve`. + `solve` and `lstsq` will work for any data-type. options : dict Keywork options to pass to the solver. Refer to the documenentation @@ -200,6 +222,7 @@ def solve_dense(matrix: Dense, target: Data, method=None, """ solve.add_specialisations([ (CSR, Dense, Dense, solve_csr_dense), + (Dia, Dense, Dense, solve_dia_dense), (Dense, Dense, Dense, solve_dense), ], _defer=True) diff --git a/qutip/core/data/src/matmul_diag_vector.cpp b/qutip/core/data/src/matmul_diag_vector.cpp new file mode 100644 index 0000000000..fbbf370b2e --- /dev/null +++ b/qutip/core/data/src/matmul_diag_vector.cpp @@ -0,0 +1,84 @@ +#include + +#include "matmul_diag_vector.hpp" + +template +void _matmul_diag_vector( + const std::complex * _RESTRICT data, + const std::complex * _RESTRICT vec, + std::complex * _RESTRICT out, + const IntT length, + const std::complex scale +){ + const double * data_dbl = reinterpret_cast(data); + const double * vec_dbl = reinterpret_cast(vec); + double * out_dbl = reinterpret_cast(out); + // Gcc does not vectorize complex automatically? + for (IntT i=0; i( + const std::complex * _RESTRICT, + const std::complex * _RESTRICT, + std::complex * _RESTRICT, + const int, + const std::complex); +template void _matmul_diag_vector<>( + const std::complex * _RESTRICT, + const std::complex * _RESTRICT, + std::complex * _RESTRICT, + const long, + const std::complex); +template void _matmul_diag_vector<>( + const std::complex * _RESTRICT, + const std::complex * _RESTRICT, + std::complex * _RESTRICT, + const long long, + const std::complex); + +template +void _matmul_diag_block( + const std::complex * _RESTRICT data, + const std::complex * _RESTRICT vec, + std::complex * _RESTRICT out, + const IntT length, + const IntT width +){ + const double * data_dbl = reinterpret_cast(data); + const double * vec_dbl = reinterpret_cast(vec); + double * out_dbl = reinterpret_cast(out); + IntT ptr = 0; + // Gcc does not vectorize complex automatically? + for (IntT i=0; i( + const std::complex * _RESTRICT, + const std::complex * _RESTRICT, + std::complex * _RESTRICT, + const int, + const int); +template void _matmul_diag_block<>( + const std::complex * _RESTRICT, + const std::complex * _RESTRICT, + std::complex * _RESTRICT, + const long, + const long); +template void _matmul_diag_block<>( + const std::complex * _RESTRICT, + const std::complex * _RESTRICT, + std::complex * _RESTRICT, + const long long, + const long long); diff --git a/qutip/core/data/src/matmul_diag_vector.hpp b/qutip/core/data/src/matmul_diag_vector.hpp new file mode 100644 index 0000000000..f8e79aa624 --- /dev/null +++ b/qutip/core/data/src/matmul_diag_vector.hpp @@ -0,0 +1,26 @@ +#include + +#if defined(__GNUC__) || defined(_MSC_VER) +# define _RESTRICT __restrict +#else +# define _RESTRICT +#endif + +template +void _matmul_diag_vector( + const std::complex * _RESTRICT data, + const std::complex * _RESTRICT vec, + std::complex * _RESTRICT out, + const IntT length, + const std::complex scale +); + + +template +void _matmul_diag_block( + const std::complex * _RESTRICT data, + const std::complex * _RESTRICT vec, + std::complex * _RESTRICT out, + const IntT length, + const IntT width +); diff --git a/qutip/core/data/tidyup.pxd b/qutip/core/data/tidyup.pxd index 4444714507..c1c6acc264 100644 --- a/qutip/core/data/tidyup.pxd +++ b/qutip/core/data/tidyup.pxd @@ -1,7 +1,8 @@ #cython: language_level=3 #cython: boundscheck=False, wraparound=False, initializedcheck=False -from qutip.core.data cimport CSR, Dense +from qutip.core.data cimport CSR, Dense, Dia cpdef CSR tidyup_csr(CSR matrix, double tol, bint inplace=*) cpdef Dense tidyup_dense(Dense matrix, double tol, bint inplace=*) +cpdef Dia tidyup_dia(Dia matrix, double tol, bint inplace=*) diff --git a/qutip/core/data/tidyup.pyx b/qutip/core/data/tidyup.pyx index a9b80798f1..ee9d7c8dc7 100644 --- a/qutip/core/data/tidyup.pyx +++ b/qutip/core/data/tidyup.pyx @@ -4,15 +4,16 @@ from libc.math cimport fabs cimport numpy as cnp +from scipy.linalg cimport cython_blas as blas -from qutip.core.data cimport csr, dense, CSR, Dense +from qutip.core.data cimport csr, dense, CSR, Dense, dia, Dia, base cdef extern from "" namespace "std" nogil: # abs is templated such that Cython treats std::abs as complex->complex double abs(double complex x) __all__ = [ - 'tidyup', 'tidyup_csr', 'tidyup_dense', + 'tidyup', 'tidyup_csr', 'tidyup_dense', 'tidyup_dia', ] @@ -55,6 +56,47 @@ cpdef Dense tidyup_dense(Dense matrix, double tol, bint inplace=True): return out +cpdef Dia tidyup_dia(Dia matrix, double tol, bint inplace=True): + cdef Dia out = matrix if inplace else matrix.copy() + cdef base.idxint diag=0, new_diag=0, ONE=1, start, end, col + cdef bint re, im, has_data + cdef double complex value + cdef int length + + while diag < out.num_diag: + start = max(0, out.offsets[diag]) + end = min(out.shape[1], out.shape[0] + out.offsets[diag]) + has_data = False + for col in range(start, end): + re = False + im = False + if fabs(out.data[diag * out.shape[1] + col].real) < tol: + re = True + out.data[diag * out.shape[1] + col].real = 0 + if fabs(out.data[diag * out.shape[1] + col].imag) < tol: + im = True + out.data[diag * out.shape[1] + col].imag = 0 + has_data |= not (re & im) + + if has_data and new_diag < diag: + length = out.shape[1] + blas.zcopy( + &length, + &out.data[diag * out.shape[1]], &ONE, + &out.data[new_diag * out.shape[1]], &ONE + ) + out.offsets[new_diag] = out.offsets[diag] + + if has_data: + new_diag += 1 + diag += 1 + out.num_diag = new_diag + if out._scipy is not None: + out._scipy.data = out._scipy.data[:new_diag] + out._scipy.offsets = out._scipy.offsets[:new_diag] + return out + + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect @@ -63,7 +105,7 @@ import inspect as _inspect tidyup = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), _inspect.Parameter('tol', _inspect.Parameter.POSITIONAL_OR_KEYWORD), _inspect.Parameter('inplace', _inspect.Parameter.POSITIONAL_OR_KEYWORD, default=True), @@ -101,6 +143,7 @@ tidyup.__doc__ =\ tidyup.add_specialisations([ (CSR, tidyup_csr), (Dense, tidyup_dense), + (Dia, tidyup_dia), ], _defer=True) del _inspect, _Dispatcher diff --git a/qutip/core/data/trace.pxd b/qutip/core/data/trace.pxd index bdac487763..cf2ef75641 100644 --- a/qutip/core/data/trace.pxd +++ b/qutip/core/data/trace.pxd @@ -1,6 +1,11 @@ #cython: language_level=3 -from qutip.core.data cimport CSR, Dense +from qutip.core.data cimport CSR, Dense, Dia -cpdef double complex trace_csr(CSR matrix) nogil except * -cpdef double complex trace_dense(Dense matrix) nogil except * +cpdef double complex trace_csr(CSR matrix) except * nogil +cpdef double complex trace_dense(Dense matrix) except * nogil +cpdef double complex trace_dia(Dia matrix) except * nogil + +cpdef double complex trace_oper_ket_csr(CSR matrix) except * nogil +cpdef double complex trace_oper_ket_dense(Dense matrix) except * nogil +cpdef double complex trace_oper_ket_dia(Dia matrix) except * nogil diff --git a/qutip/core/data/trace.pyx b/qutip/core/data/trace.pyx index 18756de52d..210444dc31 100644 --- a/qutip/core/data/trace.pyx +++ b/qutip/core/data/trace.pyx @@ -2,22 +2,34 @@ #cython: boundscheck=False, wraparound=False, initializedcheck=False cimport cython +from libc.math cimport sqrt -from qutip.core.data cimport Data, CSR, Dense +from qutip.core.data cimport Data, CSR, Dense, Dia +from qutip.core.data cimport base +from .reshape import column_unstack __all__ = [ - 'trace', 'trace_csr', 'trace_dense', + 'trace', 'trace_csr', 'trace_dense', 'trace_dia', + 'trace_oper_ket', 'trace_oper_ket_csr', 'trace_oper_ket_dense', + 'trace_oper_ket_dia', 'trace_oper_ket_data', ] -cdef void _check_shape(Data matrix) nogil except *: +cdef void _check_shape(Data matrix) except * nogil: if matrix.shape[0] != matrix.shape[1]: raise ValueError("".join([ "matrix shape ", str(matrix.shape), " is not square.", ])) -cpdef double complex trace_csr(CSR matrix) nogil except *: +cdef void _check_shape_oper_ket(int N, Data matrix) except * nogil: + if matrix.shape[0] != N * N or matrix.shape[1] != 1: + raise ValueError("".join([ + "matrix ", str(matrix.shape), " is not a stacked square matrix." + ])) + + +cpdef double complex trace_csr(CSR matrix) except * nogil: _check_shape(matrix) cdef size_t row, ptr cdef double complex trace = 0 @@ -28,7 +40,7 @@ cpdef double complex trace_csr(CSR matrix) nogil except *: break return trace -cpdef double complex trace_dense(Dense matrix) nogil except *: +cpdef double complex trace_dense(Dense matrix) except * nogil: _check_shape(matrix) cdef double complex trace = 0 cdef size_t ptr = 0 @@ -38,13 +50,64 @@ cpdef double complex trace_dense(Dense matrix) nogil except *: ptr += stride return trace +cpdef double complex trace_dia(Dia matrix) except * nogil: + _check_shape(matrix) + cdef double complex trace = 0 + cdef size_t diag, j + for diag in range(matrix.num_diag): + if matrix.offsets[diag] == 0: + for j in range(matrix.shape[1]): + trace += matrix.data[diag * matrix.shape[1] + j] + break + return trace + + +cpdef double complex trace_oper_ket_csr(CSR matrix) except * nogil: + cdef size_t N = sqrt(matrix.shape[0]) + _check_shape_oper_ket(N, matrix) + cdef size_t row + cdef double complex trace = 0 + cdef size_t stride = N + 1 + for row in range(N): + if matrix.row_index[row * stride] != matrix.row_index[row * stride + 1]: + trace += matrix.data[matrix.row_index[row * stride]] + return trace + +cpdef double complex trace_oper_ket_dense(Dense matrix) except * nogil: + cdef size_t N = sqrt(matrix.shape[0]) + _check_shape_oper_ket(N, matrix) + cdef double complex trace = 0 + cdef size_t ptr = 0 + cdef size_t stride = N + 1 + for ptr in range(N): + trace += matrix.data[ptr * stride] + return trace + + +cpdef double complex trace_oper_ket_dia(Dia matrix) except * nogil: + cdef size_t N = sqrt(matrix.shape[0]) + _check_shape_oper_ket(N, matrix) + cdef double complex trace = 0 + cdef size_t diag = 0 + cdef size_t stride = N + 1 + for diag in range(matrix.num_diag): + if -matrix.offsets[diag] % stride == 0: + trace += matrix.data[diag * matrix.shape[1]] + return trace + + +cpdef trace_oper_ket_data(Data matrix): + cdef size_t N = sqrt(matrix.shape[0]) + _check_shape_oper_ket(N, matrix) + return trace(column_unstack(matrix, N)) + from .dispatch import Dispatcher as _Dispatcher import inspect as _inspect trace = _Dispatcher( _inspect.Signature([ - _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_OR_KEYWORD), + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), ]), name='trace', module=__name__, @@ -55,7 +118,26 @@ trace.__doc__ =\ """Compute the trace (sum of digaonal elements) of a square matrix.""" trace.add_specialisations([ (CSR, trace_csr), + (Dia, trace_dia), (Dense, trace_dense), ], _defer=True) +trace_oper_ket = _Dispatcher( + _inspect.Signature([ + _inspect.Parameter('matrix', _inspect.Parameter.POSITIONAL_ONLY), + ]), + name='trace_oper_ket', + module=__name__, + inputs=('matrix',), + out=False, +) +trace_oper_ket.__doc__ =\ + """Compute the trace (sum of digaonal elements) of a stacked square matrix .""" +trace_oper_ket.add_specialisations([ + (CSR, trace_oper_ket_csr), + (Dia, trace_oper_ket_dia), + (Dense, trace_oper_ket_dense), + (Data, trace_oper_ket_data), +], _defer=True) + del _inspect, _Dispatcher diff --git a/qutip/core/gates.py b/qutip/core/gates.py index 28726e292c..5b3d988d10 100644 --- a/qutip/core/gates.py +++ b/qutip/core/gates.py @@ -4,7 +4,7 @@ import numpy as np import scipy.sparse as sp -from . import Qobj, qeye, sigmax, fock_dm, qdiags +from . import Qobj, qeye, sigmax, fock_dm, qdiags, qeye_like __all__ = [ @@ -690,7 +690,7 @@ def _powers(op, N): Generator that yields powers of an operator `op`, through to `N`. """ - acc = qeye(op.dims[0]) + acc = qeye_like(op) yield acc for _ in range(N - 1): diff --git a/qutip/core/metrics.py b/qutip/core/metrics.py index 08c635326c..acd43eba49 100644 --- a/qutip/core/metrics.py +++ b/qutip/core/metrics.py @@ -17,7 +17,7 @@ from .superop_reps import (to_kraus, to_choi, _to_superpauli, to_super, kraus_to_choi) from .superoperator import operator_to_vector, vector_to_operator -from .operators import qeye +from .operators import qeye, qeye_like from .states import ket2dm from .semidefinite import dnorm_problem, dnorm_sparse_problem from . import data as _data @@ -494,7 +494,7 @@ def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False, ): # Make an identity the same size as A and B to # compare against. - I = qeye(A.dims[0]) + I = qeye_like(A) # Compare to B first, so that an error is raised # as soon as possible. Bd = B.dag() @@ -538,18 +538,12 @@ def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False, # If we're still here, we need to actually solve the problem. # Assume square... - dim = np.prod(J.dims[0][0]) - - # The constraints only depend on the dimension, so - # we can cache them efficiently. - problem, Jr, Ji, *_ = dnorm_problem(dim) + dim = int(np.prod(J.dims[0][0])) # Load the parameters with the Choi matrix passed in. J_dat = _data.to('csr', J.data).as_scipy() if not sparse: - # The parameters and constraints only depend on the dimension, so - # we can cache them efficiently. problem, Jr, Ji = dnorm_problem(dim) # Load the parameters with the Choi matrix passed in. @@ -561,9 +555,6 @@ def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False, J_dat.indptr), shape=J_dat.shape).toarray() else: - - # The parameters do not depend solely on the dimension, - # so we can not cache them efficiently. problem = dnorm_sparse_problem(dim, J_dat) problem.solve(solver=solver, verbose=verbose) diff --git a/qutip/core/operators.py b/qutip/core/operators.py index 6a3a990bc0..f51f1de878 100644 --- a/qutip/core/operators.py +++ b/qutip/core/operators.py @@ -5,10 +5,11 @@ __all__ = ['jmat', 'spin_Jx', 'spin_Jy', 'spin_Jz', 'spin_Jm', 'spin_Jp', 'spin_J_set', 'sigmap', 'sigmam', 'sigmax', 'sigmay', 'sigmaz', - 'destroy', 'create', 'qeye', 'identity', 'position', 'momentum', - 'num', 'squeeze', 'squeezing', 'displace', 'commutator', - 'qutrit_ops', 'qdiags', 'phase', 'qzero', 'enr_destroy', - 'enr_identity', 'charge', 'tunneling', 'qft'] + 'destroy', 'create', 'fdestroy', 'fcreate', 'qeye', 'qeye_like', + 'identity', 'position', 'momentum', 'num', 'squeeze', 'squeezing', + 'swap', 'displace', 'commutator', 'qutrit_ops', 'qdiags', 'phase', + 'qzero', 'qzero_like', 'enr_destroy', 'enr_identity', 'charge', + 'tunneling', 'qft'] import numbers @@ -18,9 +19,11 @@ from . import data as _data from .qobj import Qobj from .dimensions import flatten +from .. import settings -def qdiags(diagonals, offsets=None, dims=None, shape=None, *, dtype=_data.CSR): +def qdiags(diagonals, offsets=None, dims=None, shape=None, *, + dtype=None): """ Constructs an operator from an array of diagonals. @@ -58,12 +61,13 @@ def qdiags(diagonals, offsets=None, dims=None, shape=None, *, dtype=_data.CSR): [ 0. 0. 0. 0. ]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia offsets = [0] if offsets is None else offsets data = _data.diag[dtype](diagonals, offsets, shape) return Qobj(data, dims=dims, type='oper', copy=False) -def jmat(j, which=None, *, dtype=_data.CSR): +def jmat(j, which=None, *, dtype=None): """Higher-order spin operators: Parameters @@ -107,12 +111,12 @@ def jmat(j, which=None, *, dtype=_data.CSR): [ 0. 0. 0.] [ 0. 0. -1.]]] - Notes ----- If no 'args' input, then returns array of ['x','y','z'] operators. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR if int(2 * j) != 2 * j or j < 0: raise ValueError('j must be a non-negative integer or half-integer') @@ -131,11 +135,11 @@ def jmat(j, which=None, *, dtype=_data.CSR): return Qobj(_jplus(j, dtype=dtype).adjoint(), dims=dims, type='oper', isherm=False, isunitary=False, copy=False) if which == 'x': - A = _jplus(j, dtype=dtype) + A = _jplus(j, dtype=dtype) return Qobj(_data.add(A, A.adjoint()), dims=dims, type='oper', isherm=True, isunitary=False, copy=False) * 0.5 if which == 'y': - A = _data.mul(_jplus(j, dtype=dtype), -0.5j) + A = _data.mul(_jplus(j, dtype=dtype), -0.5j) return Qobj(_data.add(A, A.adjoint()), dims=dims, type='oper', isherm=True, isunitary=False, copy=False) if which == 'z': @@ -144,7 +148,7 @@ def jmat(j, which=None, *, dtype=_data.CSR): raise ValueError('Invalid spin operator: ' + which) -def _jplus(j, *, dtype=_data.CSR): +def _jplus(j, *, dtype=None): """ Internal functions for generating the data representing the J-plus operator. @@ -154,10 +158,11 @@ def _jplus(j, *, dtype=_data.CSR): return _data.diag[dtype](data, 1) -def _jz(j, *, dtype=_data.CSR): +def _jz(j, *, dtype=None): """ Internal functions for generating the data representing the J-z operator. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR N = int(2*j + 1) data = np.array([j-k for k in range(N)], dtype=complex) return _data.diag[dtype](data, 0) @@ -166,7 +171,7 @@ def _jz(j, *, dtype=_data.CSR): # # Spin j operators: # -def spin_Jx(j, *, dtype=_data.CSR): +def spin_Jx(j, *, dtype=None): """Spin-j x operator Parameters @@ -187,7 +192,7 @@ def spin_Jx(j, *, dtype=_data.CSR): return jmat(j, 'x', dtype=dtype) -def spin_Jy(j, *, dtype=_data.CSR): +def spin_Jy(j, *, dtype=None): """Spin-j y operator Parameters @@ -208,7 +213,7 @@ def spin_Jy(j, *, dtype=_data.CSR): return jmat(j, 'y', dtype=dtype) -def spin_Jz(j, *, dtype=_data.CSR): +def spin_Jz(j, *, dtype=None): """Spin-j z operator Parameters @@ -229,7 +234,7 @@ def spin_Jz(j, *, dtype=_data.CSR): return jmat(j, 'z', dtype=dtype) -def spin_Jm(j, *, dtype=_data.CSR): +def spin_Jm(j, *, dtype=None): """Spin-j annihilation operator Parameters @@ -250,7 +255,7 @@ def spin_Jm(j, *, dtype=_data.CSR): return jmat(j, '-', dtype=dtype) -def spin_Jp(j, *, dtype=_data.CSR): +def spin_Jp(j, *, dtype=None): """Spin-j creation operator Parameters @@ -271,7 +276,7 @@ def spin_Jp(j, *, dtype=_data.CSR): return jmat(j, '+', dtype=dtype) -def spin_J_set(j, *, dtype=_data.CSR): +def spin_J_set(j, *, dtype=None): """Set of spin-j operators (x, y, z) Parameters @@ -384,7 +389,7 @@ def sigmaz(): return _SIGMAZ.copy() -def destroy(N, offset=0, *, dtype=_data.CSR): +def destroy(N, offset=0, *, dtype=None): """ Destruction (lowering) operator. @@ -416,13 +421,14 @@ def destroy(N, offset=0, *, dtype=_data.CSR): [ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j] [ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia if not isinstance(N, (int, np.integer)): # raise error if N not integer raise ValueError("Hilbert space dimension must be integer value") data = np.sqrt(np.arange(offset+1, N+offset, dtype=complex)) return qdiags(data, 1, dtype=dtype) -def create(N, offset=0, *, dtype=_data.CSR): +def create(N, offset=0, *, dtype=None): """ Creation (raising) operator. @@ -458,12 +464,155 @@ def create(N, offset=0, *, dtype=_data.CSR): [ 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j 0.00000000+0.j] [ 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j 0.00000000+0.j]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia if not isinstance(N, (int, np.integer)): # raise error if N not integer raise ValueError("Hilbert space dimension must be integer value") data = np.sqrt(np.arange(offset+1, N+offset, dtype=complex)) return qdiags(data, -1, dtype=dtype) +def fdestroy(n_sites, site, dtype=None): + """ + Fermionic destruction operator. + We use the Jordan-Wigner transformation, + making use of the Jordan-Wigner ZZ..Z strings, + to construct this as follows: + + .. math:: + + a_j = \\sigma_z^{\\otimes j} \\otimes + (\\frac{\\sigma_x + i \\sigma_y}{2}) + \\otimes I^{\\otimes N-j-1} + + Parameters + ---------- + n_sites : int + Number of sites in Fock space. + + site : int (default 0) + The site in Fock space to add a fermion to. + Corresponds to j in the above JW transform. + + Returns + ------- + oper : qobj + Qobj for destruction operator. + + Examples + -------- + >>> fdestroy(2) # doctest: +SKIP + Quantum object: dims=[[2 2], [2 2]], shape=(4, 4), \ + type='oper', isherm=False + Qobj data = + [[0. 0. 1. 0.] + [0. 0. 0. 1.] + [0. 0. 0. 0.] + [0. 0. 0. 0.]] + """ + return _f_op(n_sites, site, 'destruction', dtype=dtype) + + +def fcreate(n_sites, site, dtype=None): + """ + Fermionic creation operator. + We use the Jordan-Wigner transformation, + making use of the Jordan-Wigner ZZ..Z strings, + to construct this as follows: + + .. math:: + + a_j = \\sigma_z^{\\otimes j} + \\otimes (frac{sigma_x - i sigma_y}{2}) + \\otimes I^{\\otimes N-j-1} + + + Parameters + ---------- + n_sites : int + Number of sites in Fock space. + + site : int + The site in Fock space to add a fermion to. + Corresponds to j in the above JW transform. + + Returns + ------- + oper : qobj + Qobj for raising operator. + + Examples + -------- + >>> fcreate(2) # doctest: +SKIP + Quantum object: dims = [[2, 2], [2, 2]], shape = (4, 4), \ + type = oper, isherm = False + Qobj data = + [[0. 0. 0. 0.] + [0. 0. 0. 0.] + [1. 0. 0. 0.] + [0. 1. 0. 0.]] + """ + return _f_op(n_sites, site, 'creation', dtype=dtype) + + +def _f_op(n_sites, site, action, dtype=None): + """ Makes fermionic creation and destruction operators. + We use the Jordan-Wigner transformation, + making use of the Jordan-Wigner ZZ..Z strings, + to construct this as follows: + + .. math:: + + a_j = \\sigma_z^{\\otimes j} + \\otimes (frac{sigma_x \\pm i sigma_y}{2}) + \\otimes I^{\\otimes N-j-1} + + Parameters + ---------- + action : str + The type of operator to build. + Can only be 'creation' or 'destruction' + + n_sites : int + Number of sites in Fock space. + + site : int + The site in Fock space to create/destroy a fermion on. + Corresponds to j in the above JW transform. + + Returns + ------- + oper : qobj + Qobj for destruction operator. + """ + # get `tensor` and sigma z objects + from .tensor import tensor + s_z = 2 * jmat(0.5, 'z', dtype=dtype) + + # sanity check + if site < 0: + raise ValueError(f'The specified site {site} cannot be \ + less than 0.') + elif 0 >= n_sites: + raise ValueError(f'The specified number of sites {n_sites} \ + cannot be equal to or less than 0.') + elif site >= n_sites: + raise ValueError(f'The specified site {site} is not in \ + the range of {n_sites} sites.') + + # figure out which operator to build + if action.lower() == 'creation': + operator = create(2, dtype=dtype) + elif action.lower() == 'destruction': + operator = destroy(2, dtype=dtype) + else: + raise TypeError("Unknown operator '%s'. `action` must be \ + either 'creation' or 'destruction.'" % action) + + eye = identity(2, dtype=dtype) + opers = [s_z] * site + [operator] + [eye] * (n_sites - site - 1) + return tensor(opers) + + def _implicit_tensor_dimensions(dimensions): """ Total flattened size and operator dimensions for operator creation routines @@ -493,7 +642,7 @@ def _implicit_tensor_dimensions(dimensions): return np.prod(flat), [dimensions, dimensions] -def qzero(dimensions, *, dtype=_data.CSR): +def qzero(dimensions, *, dtype=None): """ Zero operator. @@ -515,6 +664,7 @@ def qzero(dimensions, *, dtype=_data.CSR): Zero operator Qobj. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR size, dimensions = _implicit_tensor_dimensions(dimensions) # A sparse matrix with no data is equal to a zero matrix. type_ = 'super' if isinstance(dimensions[0][0], list) else 'oper' @@ -522,7 +672,31 @@ def qzero(dimensions, *, dtype=_data.CSR): isherm=True, isunitary=False, copy=False) -def qeye(dimensions, *, dtype=_data.CSR): +def qzero_like(qobj): + """ + Zero operator of the same dims and type as the reference. + + Parameters + ---------- + qobj : Qobj, QobjEvo + Reference quantum object to copy the dims from. + + Returns + ------- + qzero : qobj + Zero operator Qobj. + + """ + from .cy.qobjevo import QobjEvo + if isinstance(qobj, QobjEvo): + qobj = qobj(0) + return Qobj( + _data.zeros_like(qobj.data), dims=qobj.dims, type=qobj.type, + superrep=qobj.superrep, isherm=True, isunitary=False, copy=False + ) + + +def qeye(dimensions, *, dtype=None): """ Identity operator. @@ -562,6 +736,7 @@ def qeye(dimensions, *, dtype=_data.CSR): [0. 0. 0. 1.]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia size, dimensions = _implicit_tensor_dimensions(dimensions) type_ = 'super' if isinstance(dimensions[0][0], list) else 'oper' return Qobj(_data.identity[dtype](size), dims=dimensions, type=type_, @@ -572,7 +747,32 @@ def qeye(dimensions, *, dtype=_data.CSR): identity = qeye -def position(N, offset=0, *, dtype=_data.CSR): +def qeye_like(qobj): + """ + Identity operator with the same dims and type as the reference quantum + object. + + Parameters + ---------- + qobj : Qobj, QobjEvo + Reference quantum object to copy the dims from. + + Returns + ------- + oper : qobj + Identity operator Qobj. + + """ + from .cy.qobjevo import QobjEvo + if isinstance(qobj, QobjEvo): + qobj = qobj(0) + return Qobj( + _data.identity_like(qobj.data), dims=qobj.dims, type=qobj.type, + superrep=qobj.superrep, isherm=True, isunitary=True, copy=False + ) + + +def position(N, offset=0, *, dtype=None): """ Position operator x=1/sqrt(2)*(a+a.dag()) @@ -600,7 +800,7 @@ def position(N, offset=0, *, dtype=_data.CSR): return position -def momentum(N, offset=0, *, dtype=_data.CSR): +def momentum(N, offset=0, *, dtype=None): """ Momentum operator p=-1j/sqrt(2)*(a-a.dag()) @@ -628,7 +828,7 @@ def momentum(N, offset=0, *, dtype=_data.CSR): return momentum -def num(N, offset=0, *, dtype=_data.CSR): +def num(N, offset=0, *, dtype=None): """ Quantum object for number operator. @@ -660,11 +860,12 @@ def num(N, offset=0, *, dtype=_data.CSR): [0 0 2 0] [0 0 0 3]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia data = np.arange(offset, offset + N, dtype=complex) return qdiags(data, 0, dtype=dtype) -def squeeze(N, z, offset=0, *, dtype=_data.CSR): +def squeeze(N, z, offset=0, *, dtype=None): """Single-mode squeezing operator. Parameters @@ -701,7 +902,7 @@ def squeeze(N, z, offset=0, *, dtype=_data.CSR): [ 0.00000000+0.j -0.30142443+0.j 0.00000000+0.j 0.95349007+0.j]] """ - asq = destroy(N, offset=offset) ** 2 + asq = destroy(N, offset=offset, dtype=dtype) ** 2 op = 0.5*np.conj(z)*asq - 0.5*z*asq.dag() return op.expm(dtype=dtype) @@ -735,7 +936,7 @@ def squeezing(a1, a2, z): return b.expm() -def displace(N, alpha, offset=0, *, dtype=_data.Dense): +def displace(N, alpha, offset=0, *, dtype=None): """Single-mode displacement operator. Parameters @@ -771,6 +972,7 @@ def displace(N, alpha, offset=0, *, dtype=_data.Dense): [ 0.00626025+0.j 0.07418172+0.j 0.41083747+0.j 0.90866411+0.j]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense a = destroy(N, offset=offset) return (alpha * a.dag() - np.conj(alpha) * a).expm(dtype=dtype) @@ -790,7 +992,7 @@ def commutator(A, B, kind="normal"): raise TypeError("Unknown commutator kind '%s'" % kind) -def qutrit_ops(*, dtype=_data.CSR): +def qutrit_ops(*, dtype=None): """ Operators for a three level system (qutrit). @@ -808,6 +1010,7 @@ def qutrit_ops(*, dtype=_data.CSR): """ from .states import qutrit_basis + dtype = dtype or settings.core["default_dtype"] or _data.CSR out = np.empty((6,), dtype=object) one, two, three = qutrit_basis(dtype=dtype) out[0] = one * one.dag() @@ -819,7 +1022,7 @@ def qutrit_ops(*, dtype=_data.CSR): return out -def phase(N, phi0=0, *, dtype=_data.Dense): +def phase(N, phi0=0, *, dtype=None): """ Single-mode Pegg-Barnett phase operator. @@ -845,6 +1048,7 @@ def phase(N, phi0=0, *, dtype=_data.Dense): The Pegg-Barnett phase operator is Hermitian on a truncated Hilbert space. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense phim = phi0 + (2 * np.pi * np.arange(N)) / N # discrete phase angles n = np.arange(N)[:, np.newaxis] states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1j * n * kk) @@ -853,7 +1057,7 @@ def phase(N, phi0=0, *, dtype=_data.Dense): return Qobj(ops, dims=[[N], [N]], type='oper', copy=False).to(dtype) -def enr_destroy(dims, excitations, *, dtype=_data.CSR): +def enr_destroy(dims, excitations, *, dtype=None): """ Generate annilation operators for modes in a excitation-number-restricted state space. For example, consider a system consisting of 4 modes, each @@ -899,6 +1103,7 @@ def enr_destroy(dims, excitations, *, dtype=_data.CSR): quantum system described by dims. """ from .states import enr_state_dictionaries + dtype = dtype or settings.core["default_dtype"] or _data.CSR nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations) @@ -917,7 +1122,7 @@ def enr_destroy(dims, excitations, *, dtype=_data.CSR): return [Qobj(a, dims=[dims, dims]).to(dtype) for a in a_ops] -def enr_identity(dims, excitations, *, dtype=_data.CSR): +def enr_identity(dims, excitations, *, dtype=None): """ Generate the identity operator for the excitation-number restricted state space defined by the `dims` and `exciations` arguments. See the @@ -947,6 +1152,7 @@ def enr_identity(dims, excitations, *, dtype=_data.CSR): exication-number-restricted state space defined by `dims` and `exciations`. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia from .states import enr_state_dictionaries nstates, _, _ = enr_state_dictionaries(dims, excitations) return Qobj(_data.identity[dtype](nstates), @@ -957,7 +1163,7 @@ def enr_identity(dims, excitations, *, dtype=_data.CSR): copy=False) -def charge(Nmax, Nmin=None, frac=1, *, dtype=_data.CSR): +def charge(Nmax, Nmin=None, frac=1, *, dtype=None): """ Generate the diagonal charge operator over charge states from Nmin to Nmax. @@ -987,6 +1193,7 @@ def charge(Nmax, Nmin=None, frac=1, *, dtype=_data.CSR): .. versionadded:: 3.2 """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia if Nmin is None: Nmin = -Nmax diag = frac * np.arange(Nmin, Nmax+1, dtype=float) @@ -995,7 +1202,7 @@ def charge(Nmax, Nmin=None, frac=1, *, dtype=_data.CSR): return out -def tunneling(N, m=1, *, dtype=_data.CSR): +def tunneling(N, m=1, *, dtype=None): r""" Tunneling operator with elements of the form :math:`\\sum |N>"] - return "\n".join(out) + if len(out)-2: + return "\n".join(out) + else: + return "".join(out) def __enter__(self): self._backup = getattr(settings, self._settings_name) @@ -89,6 +93,12 @@ class CoreOptions(QutipOptions): on the signature of the supplied function. If the function signature is exactly ``f(t, args)`` then ``dict`` is used. Otherwise ``pythonic`` is used. + + default_dtype : Nonetype, str, type {None} + When set, functions creating :class:`Qobj`, such as :func:"qeye" or + :func:"rand_herm", will use the specified data type. Any data-layer + known to ``qutip.data.to`` is accepted. When ``None``, these functions + will default to a sensible data type. """ _options = { # use auto tidyup @@ -103,6 +113,8 @@ class CoreOptions(QutipOptions): "auto_tidyup_atol": 1e-14, # signature style expected by function coefficients "function_coefficient_style": "auto", + # Default Qobj dtype for Qobj create function + "default_dtype": None, } _settings_name = "core" diff --git a/qutip/core/qobj.py b/qutip/core/qobj.py index 83a778b052..2560a90b54 100644 --- a/qutip/core/qobj.py +++ b/qutip/core/qobj.py @@ -824,6 +824,26 @@ def full(self, order='C', squeeze=False): out = np.asarray(self.data.to_array(), order=order) return out.squeeze() if squeeze else out + def data_as(self, format=None, copy=True): + """Matrix from quantum object. + + Parameters + ---------- + format : str, default: None + Type of the output, "ndarray" for ``Dense``, "csr_matrix" for + ``CSR``. A ValueError will be raised if the format is not + supported. + + copy : bool {False, True} + Whether to return a copy + + Returns + ------- + data : numpy.ndarray, scipy.sparse.matrix_csr, etc. + Matrix in the type of the underlying libraries. + """ + return _data.extract(self.data, format, copy) + def diag(self): """Diagonal elements of quantum object. @@ -1538,7 +1558,7 @@ def eigenstates(self, sparse=False, sort='low', eigvals=0, isherm=self._isherm, sort=sort, eigvals=eigvals, tol=tol, maxiter=maxiter) - elif isinstance(self.data, _data.CSR): + elif isinstance(self.data, (_data.CSR, _data.Dia)): evals, evecs = _data.eigs(_data.to(_data.Dense, self.data), isherm=self._isherm, sort=sort, eigvals=eigvals) @@ -1603,7 +1623,7 @@ def eigenenergies(self, sparse=False, sort='low', isherm=self._isherm, sort=sort, eigvals=eigvals, tol=tol, maxiter=maxiter) - elif isinstance(self.data, _data.CSR): + elif isinstance(self.data, (_data.CSR, _data.Dia)): return _data.eigs(_data.to(_data.Dense, self.data), vecs=False, isherm=self._isherm, sort=sort, eigvals=eigvals) @@ -1754,7 +1774,7 @@ def _calculate_isunitary(self): if not self.isoper or self._data.shape[0] != self._data.shape[1]: return False cmp = _data.matmul(self._data, self._data.adjoint()) - iden = _data.identity(self.shape[0], dtype=type(cmp)) + iden = _data.identity_like(cmp) return _data.iszero(_data.sub(cmp, iden), tol=settings.core['atol']) diff --git a/qutip/core/semidefinite.py b/qutip/core/semidefinite.py index ba96aeec23..ce8fcabf89 100644 --- a/qutip/core/semidefinite.py +++ b/qutip/core/semidefinite.py @@ -3,56 +3,47 @@ """ This module implements internal-use functions for semidefinite programming. """ - import collections import functools import numpy as np import scipy.sparse as sp + # Conditionally import CVXPY try: import cvxpy + + __all__ = ["dnorm_problem", "dnorm_sparse_problem"] except ImportError: cvxpy = None + __all__ = [] -from .tensor import tensor_swap -from .operators import qeye -from ..logging_utils import get_logger -logger = get_logger('qutip.core.semidefinite') +from .operators import swap -Complex = collections.namedtuple('Complex', ['re', 'im']) +Complex = collections.namedtuple("Complex", ["re", "im"]) -def complex_var(rows=1, cols=1, name=None): +def _complex_var(rows=1, cols=1, name=None): return Complex( re=cvxpy.Variable((rows, cols), name=(name + "_re") if name else None), im=cvxpy.Variable((rows, cols), name=(name + "_im") if name else None), ) -def herm(*Xs): - return sum([[X.re == X.re.T, X.im == -X.im.T] for X in Xs], []) - - -def pos_noherm(*Xs): - return [ - cvxpy.bmat([ - [X.re, -X.im], - [X.im, X.re] - ]) >> 0 - for X in Xs - ] - - -def pos(*Xs): - return pos_noherm(*Xs) + herm(*Xs) - - -def dens(*rhos): - return pos(*rhos) + [ - cvxpy.trace(rho.re) == 1 - for rho in rhos +def _make_constraints(*rhos): + """ + Create constraints to ensure definied density operators. + """ + # rhos traces are 1 + constraints = [cvxpy.trace(rho.re) == 1 for rho in rhos] + # rhos are Hermitian + for rho in rhos: + constraints += [rho.re == rho.re.T] + [rho.im == -rho.im.T] + # Non negative + constraints += [ + cvxpy.bmat([[rho.re, -rho.im], [rho.im, rho.re]]) >> 0 for rho in rhos ] + return constraints def _arr_to_complex(A): @@ -61,7 +52,7 @@ def _arr_to_complex(A): return Complex(re=A, im=np.zeros_like(A)) -def kron(A, B): +def _kron(A, B): if isinstance(A, np.ndarray): A = _arr_to_complex(A) if isinstance(B, np.ndarray): @@ -73,98 +64,68 @@ def kron(A, B): ) -def conj(W, A): +def _conj(W, A): U, V = W.re, W.im A, B = A.re, A.im return Complex( re=(U @ A @ U.T - U @ B @ V.T - V @ A @ V.T - V @ B @ U.T), - im=(U @ A @ V.T + U @ B @ U.T + V @ A @ U.T - V @ B @ V.T) - ) - - -def bmat(B): - return Complex( - re=cvxpy.bmat([[element.re for element in row] for row in B]), - im=cvxpy.bmat([[element.re for element in row] for row in B]), + im=(U @ A @ V.T + U @ B @ U.T + V @ A @ U.T - V @ B @ V.T), ) -def dag(X): - return Complex(re=X.re.T, im=-X.im.T) - - -def memoize(fn): - cache = {} - - @functools.wraps(fn) - def memoized(*args): - if args in cache: - return cache[args] - else: - ret = fn(*args) - cache[args] = ret - return ret - - memoized.reset_cache = cache.clear - return memoized - - -def qudit_swap(dim): - # We should likely generalize this and include it in qip.gates. - W = qeye([dim, dim]) - return tensor_swap(W, (0, 1)) - - -@memoize +@functools.lru_cache def initialize_constraints_on_dnorm_problem(dim): # Start assembling constraints and variables. constraints = [] # Make a complex variable for X. - X = complex_var(dim ** 2, dim ** 2, "X") + X = _complex_var(dim**2, dim**2, "X") # Make complex variables for rho0 and rho1. - rho0 = complex_var(dim, dim, "rho0") - rho1 = complex_var(dim, dim, "rho1") - constraints += dens(rho0, rho1) + rho0 = _complex_var(dim, dim, "rho0") + rho1 = _complex_var(dim, dim, "rho1") + constraints += _make_constraints(rho0, rho1) # Finally, add the tricky positive semidefinite constraint. # Since we're using column-stacking, but Watrous used row-stacking, # we need to swap the order in Rho0 and Rho1. This is not straightforward, # as CVXPY requires that the constant be the first argument. To solve this, # We conjugate by SWAP. - W = qudit_swap(dim).full() + W = swap(dim, dim).full() W = Complex(re=W.real, im=W.imag) - Rho0 = conj(W, kron(np.eye(dim), rho0)) - Rho1 = conj(W, kron(np.eye(dim), rho1)) - - Y = cvxpy.bmat([ - [Rho0.re, X.re, -Rho0.im, -X.im], - [X.re.T, Rho1.re, X.im.T, -Rho1.im], - - [Rho0.im, X.im, Rho0.re, X.re], - [-X.im.T, Rho1.im, X.re.T, Rho1.re], - ]) + Rho0 = _conj(W, _kron(np.eye(dim), rho0)) + Rho1 = _conj(W, _kron(np.eye(dim), rho1)) + + Y = cvxpy.bmat( + [ + [Rho0.re, X.re, -Rho0.im, -X.im], + [X.re.T, Rho1.re, X.im.T, -Rho1.im], + [Rho0.im, X.im, Rho0.re, X.re], + [-X.im.T, Rho1.im, X.re.T, Rho1.re], + ] + ) constraints += [Y >> 0] - logger.debug("Using %d constraints.", len(constraints)) - return X, constraints def dnorm_problem(dim): + """ + Creade the cvxpy ``Problem`` for the dnorm metric using dense arrays + """ X, constraints = initialize_constraints_on_dnorm_problem(dim) Jr = cvxpy.Parameter((dim**2, dim**2)) Ji = cvxpy.Parameter((dim**2, dim**2)) # The objective, however, depends on J. - objective = cvxpy.Maximize(cvxpy.trace( - Jr.T @ X.re + Ji.T @ X.im - )) + objective = cvxpy.Maximize(cvxpy.trace(Jr.T @ X.re + Ji.T @ X.im)) problem = cvxpy.Problem(objective, constraints) return problem, Jr, Ji def dnorm_sparse_problem(dim, J_dat): + """ + Creade the cvxpy ``Problem`` for the dnorm metric using sparse arrays + """ X, constraints = initialize_constraints_on_dnorm_problem(dim) J_val = J_dat.tocoo() @@ -181,10 +142,11 @@ def adapt_sparse_params(A_val, dim): A_cols = np.arange(A_nnz.size) # We are pushing the data on the location of the nonzero elements # to the nonzero rows of A_indexer - A_Indexer = sp.coo_matrix((A_data, (A_rows, A_cols)), - shape=(side_size**2, A_nnz.size)) + A_Indexer = sp.coo_matrix( + (A_data, (A_rows, A_cols)), shape=(side_size**2, A_nnz.size) + ) # We get finaly the sparse matrix A which we wanted - A = cvxpy.reshape(A_Indexer @ A_nnz, (side_size, side_size), order='C') + A = cvxpy.reshape(A_Indexer @ A_nnz, (side_size, side_size), order="C") A_nnz.value = A_val.data return A @@ -195,9 +157,7 @@ def adapt_sparse_params(A_val, dim): Ji = adapt_sparse_params(Ji_val, dim) # The objective, however, depends on J. - objective = cvxpy.Maximize(cvxpy.trace( - Jr.T @ X.re + Ji.T @ X.im - )) + objective = cvxpy.Maximize(cvxpy.trace(Jr.T @ X.re + Ji.T @ X.im)) problem = cvxpy.Problem(objective, constraints) return problem diff --git a/qutip/core/states.py b/qutip/core/states.py index 85b57631d6..a3c310998a 100644 --- a/qutip/core/states.py +++ b/qutip/core/states.py @@ -19,6 +19,7 @@ from .qobj import Qobj from .operators import jmat, displace, qdiags from .tensor import tensor +from .. import settings def _promote_to_zero_list(arg, length): """ @@ -42,7 +43,7 @@ def _promote_to_zero_list(arg, length): raise TypeError("Dimensions must be an integer or list of integers.") -def basis(dimensions, n=None, offset=None, *, dtype=_data.Dense): +def basis(dimensions, n=None, offset=None, *, dtype=None): """Generates the vector representation of a Fock state. Parameters @@ -104,6 +105,7 @@ def basis(dimensions, n=None, offset=None, *, dtype=_data.Dense): basis(N, 1) = ground state """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense # Promote all parameters to lists to simplify later logic. if not isinstance(dimensions, list): dimensions = [dimensions] @@ -129,7 +131,7 @@ def basis(dimensions, n=None, offset=None, *, dtype=_data.Dense): copy=False) -def qutrit_basis(*, dtype=_data.Dense): +def qutrit_basis(*, dtype=None): """Basis states for a three level system (qutrit) dtype : type or str @@ -142,6 +144,7 @@ def qutrit_basis(*, dtype=_data.Dense): Array of qutrit basis vectors """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense out = np.empty((3,), dtype=object) out[:] = [ basis(3, 0, dtype=dtype), @@ -153,7 +156,7 @@ def qutrit_basis(*, dtype=_data.Dense): _COHERENT_METHODS = ('operator', 'analytic') -def coherent(N, alpha, offset=0, method=None, *, dtype=_data.Dense): +def coherent(N, alpha, offset=0, method=None, *, dtype=None): """Generates a coherent state with eigenvalue alpha. Constructed using displacement operator on vacuum state. @@ -207,6 +210,7 @@ def coherent(N, alpha, offset=0, method=None, *, dtype=_data.Dense): but would in that case give more accurate coefficients. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense if offset < 0: raise ValueError('Offset must be non-negative') @@ -240,7 +244,7 @@ def coherent(N, alpha, offset=0, method=None, *, dtype=_data.Dense): ) -def coherent_dm(N, alpha, offset=0, method='operator', *, dtype=_data.Dense): +def coherent_dm(N, alpha, offset=0, method='operator', *, dtype=None): """Density matrix representation of a coherent state. Constructed via outer product of :func:`qutip.states.coherent` @@ -293,10 +297,11 @@ def coherent_dm(N, alpha, offset=0, method='operator', *, dtype=_data.Dense): but would in that case give more accurate coefficients. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return coherent(N, alpha, offset=offset, method=method, dtype=dtype).proj() -def fock_dm(dimensions, n=None, offset=None, *, dtype=_data.CSR): +def fock_dm(dimensions, n=None, offset=None, *, dtype=None): """Density matrix representation of a Fock state Constructed via outer product of :func:`qutip.states.fock`. @@ -337,10 +342,11 @@ def fock_dm(dimensions, n=None, offset=None, *, dtype=_data.CSR): [ 0.+0.j 0.+0.j 0.+0.j]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia return basis(dimensions, n, offset=offset, dtype=dtype).proj() -def fock(dimensions, n=None, offset=None, *, dtype=_data.Dense): +def fock(dimensions, n=None, offset=None, *, dtype=None): """Bosonic Fock (number) state. Same as :func:`qutip.states.basis`. @@ -383,7 +389,7 @@ def fock(dimensions, n=None, offset=None, *, dtype=_data.Dense): return basis(dimensions, n, offset=offset, dtype=dtype) -def thermal_dm(N, n, method='operator', *, dtype=_data.CSR): +def thermal_dm(N, n, method='operator', *, dtype=None): """Density matrix for a thermal state of n particles Parameters @@ -440,6 +446,7 @@ def thermal_dm(N, n, method='operator', *, dtype=_data.CSR): if truncated too aggressively. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia if n == 0: return fock_dm(N, 0, dtype=dtype) else: @@ -461,7 +468,7 @@ def thermal_dm(N, n, method='operator', *, dtype=_data.CSR): return out -def maximally_mixed_dm(N, *, dtype=_data.CSR): +def maximally_mixed_dm(N, *, dtype=None): """ Returns the maximally mixed density matrix for a Hilbert space of dimension N. @@ -480,6 +487,7 @@ def maximally_mixed_dm(N, *, dtype=_data.CSR): dm : qobj Thermal state density matrix. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia if not isinstance(N, numbers.Integral) or N <= 0: raise ValueError("N must be integer N > 0") return Qobj(_data.identity[dtype](N, scale=1/N), dims=[[N], [N]], @@ -518,7 +526,7 @@ def ket2dm(Q): raise TypeError("Input is not a ket or bra vector.") -def projection(N, n, m, offset=None, *, dtype=_data.CSR): +def projection(N, n, m, offset=None, *, dtype=None): r""" The projection operator that projects state :math:`\lvert m\rangle` on state :math:`\lvert n\rangle`. @@ -544,11 +552,12 @@ def projection(N, n, m, offset=None, *, dtype=_data.CSR): oper : qobj Requested projection operator. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR return basis(N, n, offset=offset, dtype=dtype) @ \ basis(N, m, offset=offset, dtype=dtype).dag() -def qstate(string, *, dtype=_data.Dense): +def qstate(string, *, dtype=None): r"""Creates a tensor product for a set of qubits in either the 'up' :math:`\lvert0\rangle` or 'down' :math:`\lvert1\rangle` state. @@ -585,6 +594,7 @@ def qstate(string, *, dtype=_data.Dense): [ 0.] [ 0.]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense n = len(string) if n != (string.count('u') + string.count('d')): raise TypeError('String input to QSTATE must consist ' + @@ -612,7 +622,7 @@ def _character_to_qudit(x): return _qubit_dict[x] if x in _qubit_dict else int(x) -def ket(seq, dim=2, *, dtype=_data.Dense): +def ket(seq, dim=2, *, dtype=None): """ Produces a multiparticle ket state for a list or string, where each element stands for state of the respective particle. @@ -690,12 +700,13 @@ def ket(seq, dim=2, *, dtype=_data.Dense): [ 0.] [ 0.]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense ns = [_character_to_qudit(x) for x in seq] dim = [dim]*len(ns) if isinstance(dim, numbers.Integral) else dim return basis(dim, ns, dtype=dtype) -def bra(seq, dim=2, *, dtype=_data.Dense): +def bra(seq, dim=2, *, dtype=None): """ Produces a multiparticle bra state for a list or string, where each element stands for state of the respective particle. @@ -749,6 +760,7 @@ def bra(seq, dim=2, *, dtype=_data.Dense): Qobj data = [[ 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]] """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return ket(seq, dim=dim, dtype=dtype).dag() @@ -866,7 +878,7 @@ def state_index_number(dims, index): return np.unravel_index(index, dims) -def state_number_qobj(dims, state, *, dtype=_data.Dense): +def state_number_qobj(dims, state, *, dtype=None): """ Return a Qobj representation of a quantum state specified by the state array `state`. @@ -906,6 +918,7 @@ def state_number_qobj(dims, state, *, dtype=_data.Dense): .. note:: Deprecated in QuTiP 5.0, use :func:`basis` instead. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense warnings.warn("basis() is a drop-in replacement for this", DeprecationWarning) return basis(dims, state, dtype=dtype) @@ -943,7 +956,7 @@ def enr_state_dictionaries(dims, excitations): return nstates, state2idx, idx2state -def enr_fock(dims, excitations, state, *, dtype=_data.Dense): +def enr_fock(dims, excitations, state, *, dtype=None): """ Generate the Fock state representation in a excitation-number restricted state space. The `dims` argument is a list of integers that define the @@ -977,6 +990,7 @@ def enr_fock(dims, excitations, state, *, dtype=_data.Dense): restricted state space defined by `dims` and `exciations`. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense nstates, state2idx, _ = enr_state_dictionaries(dims, excitations) try: data =_data.one_element[dtype]((nstates, 1), @@ -990,7 +1004,7 @@ def enr_fock(dims, excitations, state, *, dtype=_data.Dense): return Qobj(data, dims=[dims, [1]*len(dims)], type='ket', copy=False) -def enr_thermal_dm(dims, excitations, n, *, dtype=_data.CSR): +def enr_thermal_dm(dims, excitations, n, *, dtype=None): """ Generate the density operator for a thermal state in the excitation-number- restricted state space defined by the `dims` and `exciations` arguments. @@ -1023,6 +1037,7 @@ def enr_thermal_dm(dims, excitations, n, *, dtype=_data.CSR): dm : Qobj Thermal state density matrix. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR nstates, _, idx2state = enr_state_dictionaries(dims, excitations) if not isinstance(n, (list, np.ndarray)): n = np.ones(len(dims)) * n @@ -1037,7 +1052,7 @@ def enr_thermal_dm(dims, excitations, n, *, dtype=_data.CSR): return out -def phase_basis(N, m, phi0=0, *, dtype=_data.Dense): +def phase_basis(N, m, phi0=0, *, dtype=None): """ Basis vector for the mth phase of the Pegg-Barnett phase operator. @@ -1068,13 +1083,14 @@ def phase_basis(N, m, phi0=0, *, dtype=_data.Dense): Hilbert space. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense phim = phi0 + (2.0 * np.pi * m) / N n = np.arange(N)[:, np.newaxis] data = np.exp(1.0j * n * phim) / np.sqrt(N) return Qobj(data, dims=[[N], [1]], type='ket', copy=False).to(dtype) -def zero_ket(N, dims=None, *, dtype=_data.Dense): +def zero_ket(N, dims=None, *, dtype=None): """ Creates the zero ket vector with shape Nx1 and dimensions `dims`. @@ -1096,10 +1112,11 @@ def zero_ket(N, dims=None, *, dtype=_data.Dense): Zero ket on given Hilbert space. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return Qobj(_data.zeros[dtype](N, 1), dims=dims, type='ket', copy=False) -def spin_state(j, m, type='ket', *, dtype=_data.Dense): +def spin_state(j, m, type='ket', *, dtype=None): r"""Generates the spin state :math:`\lvert j, m\rangle`, i.e. the eigenstate of the spin-j Sz operator with eigenvalue m. @@ -1123,6 +1140,7 @@ def spin_state(j, m, type='ket', *, dtype=_data.Dense): state : qobj Qobj quantum object for spin state """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense J = 2*j + 1 if type == 'ket': @@ -1135,7 +1153,7 @@ def spin_state(j, m, type='ket', *, dtype=_data.Dense): raise ValueError(f"Invalid value keyword argument type='{type}'") -def spin_coherent(j, theta, phi, type='ket', *, dtype=_data.Dense): +def spin_coherent(j, theta, phi, type='ket', *, dtype=None): r"""Generate the coherent spin state :math:`\lvert \theta, \phi\rangle`. Parameters @@ -1161,6 +1179,7 @@ def spin_coherent(j, theta, phi, type='ket', *, dtype=_data.Dense): state : qobj Qobj quantum object for spin coherent state """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense if type not in ['ket', 'bra', 'dm']: raise ValueError("Invalid value keyword argument 'type'") Sp = jmat(j, '+') @@ -1183,7 +1202,7 @@ def spin_coherent(j, theta, phi, type='ket', *, dtype=_data.Dense): '11': np.sqrt(0.5) * (basis([2, 2], [0, 1]) - basis([2, 2], [1, 0])), } -def bell_state(state='00', *, dtype=_data.Dense): +def bell_state(state='00', *, dtype=None): r""" Returns the selected Bell state: @@ -1215,10 +1234,11 @@ def bell_state(state='00', *, dtype=_data.Dense): Bell_state : qobj Bell state """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return _BELL_STATES[state].copy().to(dtype) -def singlet_state(*, dtype=_data.Dense): +def singlet_state(*, dtype=None): r""" Returns the two particle singlet-state: @@ -1239,10 +1259,11 @@ def singlet_state(*, dtype=_data.Dense): Bell_state : qobj :math:`\lvert B_{11}\rangle` Bell state """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return bell_state('11').to(dtype) -def triplet_states(*, dtype=_data.Dense): +def triplet_states(*, dtype=None): r""" Returns a list of the two particle triplet-states: @@ -1271,7 +1292,7 @@ def triplet_states(*, dtype=_data.Dense): ] -def w_state(N=3, *, dtype=_data.Dense): +def w_state(N=3, *, dtype=None): """ Returns the N-qubit W-state: ``[ |100..0> + |010..0> + |001..0> + ... |000..1> ] / sqrt(n)`` @@ -1298,7 +1319,7 @@ def w_state(N=3, *, dtype=_data.Dense): return np.sqrt(1 / N) * state -def ghz_state(N=3, *, dtype=_data.Dense): +def ghz_state(N=3, *, dtype=None): """ Returns the N-qubit GHZ-state: ``[ |00...00> + |11...11> ] / sqrt(2)`` diff --git a/qutip/core/superoperator.py b/qutip/core/superoperator.py index 328862b8f3..3ed59fc623 100644 --- a/qutip/core/superoperator.py +++ b/qutip/core/superoperator.py @@ -100,7 +100,7 @@ def liouvillian(H=None, c_ops=None, data_only=False, chi=None): spI = _data.identity(op_shape[0], dtype=type(H.data)) data = _data.mul(_data.kron(spI, H.data), -1j) - data = _data.add(data, _data.kron(H.data.transpose(), spI), scale=1j) + data = _data.add(data, _data.kron_transpose(H.data, spI), scale=1j) for c_op, chi_ in zip(c_ops, chi): c = c_op.data @@ -108,7 +108,7 @@ def liouvillian(H=None, c_ops=None, data_only=False, chi=None): cdc = _data.matmul(cd, c) data = _data.add(data, _data.kron(c.conj(), c), np.exp(1j*chi_)) data = _data.add(data, _data.kron(spI, cdc), -0.5) - data = _data.add(data, _data.kron(cdc.transpose(), spI), -0.5) + data = _data.add(data, _data.kron_transpose(cdc, spI), -0.5) if data_only: return data @@ -310,8 +310,8 @@ def spost(A): """ if not A.isoper: raise TypeError('Input is not a quantum operator') - data = _data.kron(A.data.transpose(), - _data.identity(A.shape[0], dtype=type(A.data))) + Id = _data.identity(A.shape[0], dtype=type(A.data)) + data = _data.kron_transpose(A.data, _data.identity_like(A.data)) return Qobj(data, dims=[A.dims, A.dims], type='super', @@ -336,7 +336,7 @@ def spre(A): """ if not A.isoper: raise TypeError('Input is not a quantum operator') - data = _data.kron(_data.identity(A.shape[0], dtype=type(A.data)), A.data) + data = _data.kron(_data.identity_like(A.data), A.data) return Qobj(data, dims=[A.dims, A.dims], type='super', @@ -379,7 +379,7 @@ def sprepost(A, B): _drop_projected_dims(B.dims[1])], [_drop_projected_dims(A.dims[1]), _drop_projected_dims(B.dims[0])]] - return Qobj(_data.kron(B.data.transpose(), A.data), + return Qobj(_data.kron_transpose(B.data, A.data), dims=dims, type='super', superrep='super', diff --git a/qutip/core/tensor.py b/qutip/core/tensor.py index 969d2ad842..3a3661752c 100644 --- a/qutip/core/tensor.py +++ b/qutip/core/tensor.py @@ -333,7 +333,7 @@ def tensor_contract(qobj, *pairs): # We don't need to check for tensor idxs versus dims idxs here, # as column- versus row-stacking will never move an index for the # vectorized operator spaces all the way from the left to the right. - l_mtx_dims, r_mtx_dims = map(np.product, map(flatten, contracted_dims)) + l_mtx_dims, r_mtx_dims = map(np.prod, map(flatten, contracted_dims)) # Reshape back into a 2D matrix. qmtx = qtens.reshape((l_mtx_dims, r_mtx_dims)) diff --git a/qutip/ipynbtools.py b/qutip/ipynbtools.py index e2d906b637..48e2eeb14b 100644 --- a/qutip/ipynbtools.py +++ b/qutip/ipynbtools.py @@ -1,7 +1,7 @@ """ This module contains utility functions for using QuTiP with IPython notebooks. """ -from qutip.ui.progressbar import BaseProgressBar +from qutip.ui.progressbar import BaseProgressBar, HTMLProgressBar from .settings import _blas_info, available_cpu_count import IPython @@ -11,16 +11,16 @@ try: from ipyparallel import Client __all__ = ['version_table', 'plot_animation', - 'parallel_map', 'HTMLProgressBar'] + 'parallel_map'] except: - __all__ = ['version_table', 'plot_animation', 'HTMLProgressBar'] + __all__ = ['version_table', 'plot_animation'] else: try: from IPython.parallel import Client __all__ = ['version_table', 'plot_animation', - 'parallel_map', 'HTMLProgressBar'] + 'parallel_map'] except: - __all__ = ['version_table', 'plot_animation', 'HTMLProgressBar'] + __all__ = ['version_table', 'plot_animation'] from IPython.display import HTML, Javascript, display @@ -39,10 +39,15 @@ import qutip import numpy import scipy -import Cython import matplotlib import IPython +try: + import Cython + _cython_available = True +except ImportError: + _cython_available = False + def version_table(verbose=False): """ @@ -67,13 +72,14 @@ def version_table(verbose=False): ("Numpy", numpy.__version__), ("SciPy", scipy.__version__), ("matplotlib", matplotlib.__version__), - ("Cython", Cython.__version__), ("Number of CPUs", available_cpu_count()), ("BLAS Info", _blas_info()), ("IPython", IPython.__version__), ("Python", sys.version), ("OS", "%s [%s]" % (os.name, sys.platform)) ] + if _cython_available: + packages.append(("Cython", Cython.__version__)) for name, version in packages: html += "%s%s" % (name, version) @@ -97,57 +103,6 @@ def version_table(verbose=False): return HTML(html) -class HTMLProgressBar(BaseProgressBar): - """ - A simple HTML progress bar for using in IPython notebooks. Based on - IPython ProgressBar demo notebook: - https://github.com/ipython/ipython/tree/master/examples/notebooks - - Example usage: - - n_vec = linspace(0, 10, 100) - pbar = HTMLProgressBar(len(n_vec)) - for n in n_vec: - pbar.update(n) - compute_with_n(n) - """ - - def __init__(self, iterations=0, chunk_size=1.0): - self.divid = str(uuid.uuid4()) - self.textid = str(uuid.uuid4()) - self.pb = HTML("""\ -
-
 
-
-

-""" % (self.divid, self.textid)) - display(self.pb) - super(HTMLProgressBar, self).start(iterations, chunk_size) - - def start(self, iterations=0, chunk_size=1.0): - super(HTMLProgressBar, self).start(iterations, chunk_size) - - def update(self, n): - p = (n / self.N) * 100.0 - if p >= self.p_chunk: - lbl = ("Elapsed time: %s. " % self.time_elapsed() + - "Est. remaining time: %s." % self.time_remaining_est(p)) - js_code = ("$('div#%s').width('%i%%');" % (self.divid, p) + - "$('p#%s').text('%s');" % (self.textid, lbl)) - display(Javascript(js_code)) - # display(Javascript("$('div#%s').width('%i%%')" % (self.divid, - # p))) - self.p_chunk += self.p_chunk_size - - def finished(self): - self.t_done = time.time() - lbl = "Elapsed time: %s" % self.time_elapsed() - js_code = ("$('div#%s').width('%i%%');" % (self.divid, 100.0) + - "$('p#%s').text('%s');" % (self.textid, lbl)) - display(Javascript(js_code)) - - def _visualize_parfor_data(metadata): """ Visualizing the task scheduling meta data collected from AsyncResults. @@ -317,16 +272,15 @@ def parallel_map(task, values, task_args=None, task_kwargs=None, view.wait(ar_list) else: if progress_bar is True: - progress_bar = HTMLProgressBar() - - n = len(ar_list) - progress_bar.start(n) + progress_bar = HTMLProgressBar(len(ar_list)) + prev_finished = 0 while True: n_finished = sum([ar.progress for ar in ar_list]) - progress_bar.update(n_finished) + for _ in range(prev_finished, n_finished): + progress_bar.update() + prev_finished = n_finished if view.wait(ar_list, timeout=0.5): - progress_bar.update(n) break progress_bar.finished() diff --git a/qutip/legacy/__init__.py b/qutip/legacy/__init__.py new file mode 100644 index 0000000000..918689196a --- /dev/null +++ b/qutip/legacy/__init__.py @@ -0,0 +1,5 @@ +import warnings +warnings.warn("Function in legacy are untested.") +del warnings + +from .nonmarkov.memorycascade import MemoryCascade diff --git a/qutip/solve/__init__.py b/qutip/legacy/nonmarkov/__init__.py similarity index 100% rename from qutip/solve/__init__.py rename to qutip/legacy/nonmarkov/__init__.py diff --git a/qutip/solve/nonmarkov/memorycascade.py b/qutip/legacy/nonmarkov/memorycascade.py similarity index 65% rename from qutip/solve/nonmarkov/memorycascade.py rename to qutip/legacy/nonmarkov/memorycascade.py index 35996d21cd..0cdf18cd3b 100644 --- a/qutip/solve/nonmarkov/memorycascade.py +++ b/qutip/legacy/nonmarkov/memorycascade.py @@ -16,7 +16,23 @@ import numpy as np import warnings -import qutip as qt +from qutip import ( + sprepost, + Qobj, + spre, + spost, + liouvillian, + qeye, + mesolve, + propagator, + composite, + isket, + ket2dm, + tensor_contract, +) + + +__all__ = ["MemoryCascade"] class MemoryCascade: @@ -49,35 +65,39 @@ class MemoryCascade: Integrator method to use. Defaults to 'propagator' which tends to be faster for long times (i.e., large Hilbert space). - parallel : bool - Run integrator in parallel if True. Only implemented for 'propagator' - as the integrator method. - options : dict Generic solver options. """ - def __init__(self, H_S, L1, L2, S_matrix=None, c_ops_markov=None, - integrator='propagator', parallel=False, options=None): + def __init__( + self, + H_S, + L1, + L2, + S_matrix=None, + c_ops_markov=None, + integrator="propagator", + options=None, + ): if options is None: - self.options = {} + self.options = {"progress_bar": False} else: self.options = options self.H_S = H_S self.sysdims = H_S.dims - if isinstance(L1, qt.Qobj): + if isinstance(L1, Qobj): self.L1 = [L1] else: self.L1 = L1 - if isinstance(L2, qt.Qobj): + if isinstance(L2, Qobj): self.L2 = [L2] else: self.L2 = L2 if not len(self.L1) == len(self.L2): - raise ValueError('L1 and L2 has to be of equal length.') - if isinstance(c_ops_markov, qt.Qobj): + raise ValueError("L1 and L2 has to be of equal length.") + if isinstance(c_ops_markov, Qobj): self.c_ops_markov = [c_ops_markov] else: self.c_ops_markov = c_ops_markov @@ -87,12 +107,19 @@ def __init__(self, H_S, L1, L2, S_matrix=None, c_ops_markov=None, else: self.S_matrix = S_matrix # create system identity superoperator - self.Id = qt.qeye(H_S.shape[0]) + self.Id = qeye(H_S.shape[0]) self.Id.dims = self.sysdims - self.Id = qt.sprepost(self.Id, self.Id) - self.store_states = self.options['store_states'] + self.Id = sprepost(self.Id, self.Id) + self.store_states = self.options.get("store_states", False) self.integrator = integrator - self.parallel = parallel + self._generators = {} + + def generator(self, k): + if k not in self._generators: + self._generators[k] = _generator( + k, self.H_S, self.L1, self.L2, self.S_matrix, self.c_ops_markov + ) + return self._generators[k] def propagator(self, t, tau, notrace=False): """ @@ -116,25 +143,27 @@ def propagator(self, t, tau, notrace=False): : :class:`qutip.Qobj` time-propagator for reduced system dynamics """ - k = int(t/tau)+1 - s = t-(k-1)*tau - G1, E0 = _generator(k, self.H_S, self.L1, self.L2, self.S_matrix, - self.c_ops_markov) - E = _integrate(G1, E0, 0., s, integrator=self.integrator, - parallel=self.parallel, opt=self.options) + k = int(t / tau) + 1 + s = t - (k - 1) * tau + G1 = self.generator(k) + E0 = qeye(G1.dims[0]) + E = _integrate( + G1, E0, 0.0, s, integrator=self.integrator, opt=self.options + ) if k > 1: - G2, null = _generator(k-1, self.H_S, self.L1, self.L2, - self.S_matrix, self.c_ops_markov) - G2 = qt.composite(G2, self.Id) - E = _integrate(G2, E, s, tau, integrator=self.integrator, - parallel=self.parallel, opt=self.options) - E.dims = E0.dims + G2 = self.generator(k - 1) + G2 = composite(G2, self.Id) + E = _integrate( + G2, E, s, tau, integrator=self.integrator, opt=self.options + ) + if not notrace: E = _genptrace(E, k) return E - def outfieldpropagator(self, blist, tlist, tau, c1=None, c2=None, - notrace=False): + def outfieldpropagator( + self, blist, tlist, tau, c1=None, c2=None, notrace=False + ): r""" Compute propagator for computing output field expectation values for times t1,t2,... and @@ -181,58 +210,64 @@ def outfieldpropagator(self, blist, tlist, tau, c1=None, c2=None, if c1 is None and len(self.L1) == 1: c1 = self.L1[0] else: - raise ValueError('Argument c1 has to be specified when more than' + - 'one collapse operator couples to the feedback' + - 'loop.') + raise ValueError( + "Argument c1 has to be specified when more than" + + "one collapse operator couples to the feedback" + + "loop." + ) if c2 is None and len(self.L2) == 1: c2 = self.L2[0] else: - raise ValueError('Argument c1 has to be specified when more than' + - 'one collapse operator couples to the feedback' + - 'loop.') + raise ValueError( + "Argument c1 has to be specified when more than" + + "one collapse operator couples to the feedback" + + "loop." + ) klist = [] slist = [] for t in tlist: - klist.append(int(t/tau)+1) - slist.append(t-(klist[-1]-1)*tau) + klist.append(int(t / tau) + 1) + slist.append(t - (klist[-1] - 1) * tau) kmax = max(klist) zipped = sorted(zip(slist, klist, blist)) slist = [s for (s, k, b) in zipped] klist = [k for (s, k, b) in zipped] blist = [b for (s, k, b) in zipped] - G1, E0 = _generator(kmax, self.H_S, self.L1, self.L2, self.S_matrix, - self.c_ops_markov) - sprev = 0. + G1 = self.generator(kmax) + sprev = 0.0 + E0 = qeye(G1.dims[0]) E = E0 for i, s in enumerate(slist): - E = _integrate(G1, E, sprev, s, integrator=self.integrator, - parallel=self.parallel, opt=self.options) + E = _integrate( + G1, E, sprev, s, integrator=self.integrator, opt=self.options + ) + l2 = _localop(c2, klist[i], kmax) if klist[i] == 1: - l1 = 0.*qt.Qobj() + l1 = l2 * 0.0 else: - l1 = _localop(c1, klist[i]-1, kmax) - l2 = _localop(c2, klist[i], kmax) + l1 = _localop(c1, klist[i] - 1, kmax) if blist[i] == 0: superop = self.Id elif blist[i] == 1: - superop = qt.spre(l1+l2) + superop = spre(l1 + l2) elif blist[i] == 2: - superop = qt.spost(l1.dag()+l2.dag()) + superop = spost(l1.dag() + l2.dag()) elif blist[i] == 3: - superop = qt.spre(l1) + superop = spre(l1) elif blist[i] == 4: - superop = qt.spost(l1.dag()) + superop = spost(l1.dag()) else: - raise ValueError('Allowed values in blist are 0, 1, 2, 3 ' + - 'and 4.') - superop.dims = E.dims - E = superop*E + raise ValueError( + "Allowed values in blist are 0, 1, 2, 3 and 4." + ) + + E = superop @ E sprev = s - E = _integrate(G1, E, slist[-1], tau, integrator=self.integrator, - parallel=self.parallel, opt=self.options) + E = _integrate( + G1, E, slist[-1], tau, integrator=self.integrator, opt=self.options + ) - E.dims = E0.dims if not notrace: E = _genptrace(E, kmax) return E @@ -257,12 +292,11 @@ def rhot(self, rho0, t, tau): : :class:`qutip.Qobj` density matrix at time :math:`t` """ - if qt.isket(rho0): - rho0 = qt.ket2dm(rho0) + if isket(rho0): + rho0 = ket2dm(rho0) E = self.propagator(t, tau) - rhovec = qt.operator_to_vector(rho0) - return qt.vector_to_operator(E*rhovec) + return E(rho0) def outfieldcorr(self, rho0, blist, tlist, tau, c1=None, c2=None): r""" @@ -306,9 +340,11 @@ def outfieldcorr(self, rho0, blist, tlist, tau, c1=None, c2=None): : complex expectation value of field correlation function """ + if isket(rho0): + rho0 = ket2dm(rho0) + E = self.outfieldpropagator(blist, tlist, tau) - rhovec = qt.operator_to_vector(rho0) - return (qt.vector_to_operator(E*rhovec)).tr() + return (E(rho0)).tr() def _localop(op, l, k): @@ -317,15 +353,14 @@ def _localop(op, l, k): with identity operators on all the other k-1 systems """ if l < 1 or l > k: - raise IndexError('index l out of range') - h = op - I = qt.qeye(op.shape[0]) - I.dims = op.dims - for i in range(1, l): - h = qt.tensor(I, h) - for i in range(l+1, k+1): - h = qt.tensor(h, I) - return h + raise IndexError("index l out of range") + out = op + if l > 1: + out = qeye(op.dims[0] * (l - 1)) & out + if l < k: + out = out & qeye(op.dims[0] * (k - l)) + + return out def _genptrace(E, k): @@ -333,9 +368,9 @@ def _genptrace(E, k): Perform a gneralized partial trace on a superoperator E, tracing out all subsystems but one. """ - for l in range(k-1): + for l in range(k - 1): nsys = len(E.dims[0][0]) - E = qt.tensor_contract(E, (0, 2*nsys+1), (nsys, 3*nsys+1)) + E = tensor_contract(E, (0, 2 * nsys + 1), (nsys, 3 * nsys + 1)) return E @@ -343,63 +378,56 @@ def _generator(k, H, L1, L2, S=None, c_ops_markov=None): """ Create a Liouvillian for a cascaded chain of k system copies """ - id = qt.qeye(H.dims[0][0]) - Id = qt.sprepost(id, id) + id = qeye(H.dims[0][0]) + Id = sprepost(id, id) if S is None: S = np.identity(len(L1)) # create Lindbladian - L = qt.Qobj() - E0 = Id + # first system - L += qt.liouvillian(None, [_localop(c, 1, k) for c in L2]) + L = liouvillian(None, [_localop(c, 1, k) for c in L2]) for l in range(1, k): - # Identiy superoperator - E0 = qt.composite(E0, Id) # Bare Hamiltonian Hl = _localop(H, l, k) - L += qt.liouvillian(Hl, []) + L += liouvillian(Hl, []) # Markovian Decay channels if c_ops_markov is not None: for c in c_ops_markov: cl = _localop(c, l, k) - L += qt.liouvillian(None, [cl]) + L += liouvillian(None, [cl]) # Cascade coupling c1 = np.array([_localop(c, l, k) for c in L1]) - c2 = np.array([_localop(c, l+1, k) for c in L2]) + c2 = np.array([_localop(c, l + 1, k) for c in L2]) c2dag = np.array([c.dag() for c in c2]) - Hcasc = -0.5j*np.dot(c2dag, np.dot(S, c1)) + Hcasc = -0.5j * np.dot(c2dag, np.dot(S, c1)) Hcasc += Hcasc.dag() Lvec = c2 + np.dot(S, c1) - L += qt.liouvillian(Hcasc, [c for c in Lvec]) + L += liouvillian(Hcasc, [c for c in Lvec]) # last system - L += qt.liouvillian(_localop(H, k, k), [_localop(c, k, k) for c in L1]) + L += liouvillian(_localop(H, k, k), [_localop(c, k, k) for c in L1]) if c_ops_markov is not None: for c in c_ops_markov: cl = _localop(c, k, k) - L += qt.liouvillian(None, [cl]) - E0.dims = L.dims - # return generator and identity superop E0 - return L, E0 + L += liouvillian(None, [cl]) + # return generator + return L -def _integrate(L, E0, ti, tf, integrator='propagator', parallel=False, - opt=None): +def _integrate(L, E0, ti, tf, integrator="propagator", opt=None): """ Basic ode integrator """ opt = opt or {} if tf > ti: - if integrator == 'mesolve': - if parallel: - warnings.warn('parallelization not implemented for "mesolve"') - opt.store_final_state = True - sol = qt.mesolve(L, E0, [ti, tf], [], [], options=opt) + if integrator == "mesolve": + opt["store_final_state"] = True + sol = mesolve(L, E0, [ti, tf], options=opt) return sol.final_state - elif integrator == 'propagator': - return qt.propagator(L, (tf-ti), [], [], parallel=parallel, - options=opt)*E0 + elif integrator == "propagator": + return propagator(L, (tf - ti), options=opt) @ E0 else: - raise ValueError('integrator keyword must be either "propagator"' + - 'or "mesolve"') + raise ValueError( + 'integrator keyword must be either "propagator" or "mesolve"' + ) else: return E0 diff --git a/qutip/solve/rcsolve.py b/qutip/legacy/rcsolve.py similarity index 75% rename from qutip/solve/rcsolve.py rename to qutip/legacy/rcsolve.py index b357bc6e02..b171946ba2 100644 --- a/qutip/solve/rcsolve.py +++ b/qutip/legacy/rcsolve.py @@ -14,8 +14,8 @@ from numpy import matrix from numpy import linalg from .. import ( - spre, spost, sprepost, thermal_dm, tensor, identity, destroy, sigmax, - sigmaz, basis, qeye, mesolve + spre, spost, sprepost, thermal_dm, tensor, destroy, + qeye, mesolve, qeye_like, qzero_like ) @@ -56,12 +56,15 @@ def rcsolve(Hsys, psi0, tlist, e_ops, Q, wc, alpha, N, w_th, sparse=False, output: Result System evolution. """ + if psi0.isket: + psi0 = psi0.proj() + if options is None: options = {} dot_energy, dot_state = Hsys.eigenstates(sparse=sparse) deltaE = dot_energy[1] - dot_energy[0] - if (w_th < deltaE/2): + if (w_th < deltaE / 2): warnings.warn("Given w_th might not provide accurate results") gamma = deltaE / (2 * np.pi * wc) wa = 2 * np.pi * gamma * wc # reaction coordinate frequency @@ -69,11 +72,8 @@ def rcsolve(Hsys, psi0, tlist, e_ops, Q, wc, alpha, N, w_th, sparse=False, nb = (1 / (np.exp(wa/w_th) - 1)) # Reaction coordinate hamiltonian/operators - - dimensions = Q.dims - a = tensor(destroy(N), qeye(dimensions[1])) - unit = tensor(qeye(N), qeye(dimensions[1])) - Nmax = N * dimensions[1][0] + a = tensor(destroy(N), qeye_like(Hsys)) + unit = tensor(qeye(N), qeye_like(Hsys)) Q_exp = tensor(qeye(N), Q) Hsys_exp = tensor(qeye(N), Hsys) e_ops_exp = [tensor(qeye(N), kk) for kk in e_ops] @@ -86,29 +86,22 @@ def rcsolve(Hsys, psi0, tlist, e_ops, Q, wc, alpha, N, w_th, sparse=False, # interaction H1 = (g * (a.dag() + a) * Q_exp) H = H0 + H1 - L = 0 - PsipreEta = 0 - PsipreX = 0 + PsipreEta = qzero_like(H) + PsipreX = qzero_like(H) all_energy, all_state = H.eigenstates(sparse=sparse) - Apre = spre((a + a.dag())) - Apost = spost(a + a.dag()) + Nmax = len(all_energy) for j in range(Nmax): for k in range(Nmax): A = xa.matrix_element(all_state[j].dag(), all_state[k]) delE = (all_energy[j] - all_energy[k]) if abs(A) > 0.0: if abs(delE) > 0.0: - X = (0.5 * np.pi * gamma*(all_energy[j] - all_energy[k]) - * (np.cosh((all_energy[j] - all_energy[k]) / - (2 * w_th)) - / (np.sinh((all_energy[j] - all_energy[k]) / - (2 * w_th)))) * A) - eta = (0.5 * np.pi * gamma * - (all_energy[j] - all_energy[k]) * A) - PsipreX = PsipreX + X * all_state[j] * all_state[k].dag() - PsipreEta = PsipreEta + (eta * all_state[j] - * all_state[k].dag()) + eta = 0.5 * np.pi * gamma * delE * A + X = eta / np.tanh(delE / (2 * w_th)) + proj = all_state[j] * all_state[k].dag() + PsipreX = PsipreX + X * proj + PsipreEta = PsipreEta + eta * proj else: X = 0.5 * np.pi * gamma * A * 2 * w_th PsipreX = PsipreX + X * all_state[j] * all_state[k].dag() diff --git a/qutip/logging_utils.py b/qutip/logging_utils.py deleted file mode 100644 index c91b380b50..0000000000 --- a/qutip/logging_utils.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -This module contains internal-use functions for configuring and writing to -debug logs, using Python's internal logging functionality by default. -""" - -# IMPORTS -from __future__ import absolute_import -import inspect -import logging - -from qutip.settings import settings - -# EXPORTS -NOTSET = logging.NOTSET -DEBUG_INTENSE = logging.DEBUG - 4 -DEBUG_VERBOSE = logging.DEBUG - 2 -DEBUG = logging.DEBUG -INFO = logging.INFO -WARN = logging.WARN -ERROR = logging.ERROR -CRITICAL = logging.CRITICAL - -__all__ = ['get_logger'] - -# META-LOGGING - -metalogger = logging.getLogger(__name__) -metalogger.addHandler(logging.NullHandler()) - - -# FUNCTIONS - -def get_logger(name=None): - """ - Returns a Python logging object with handlers configured - in accordance with ~/.qutiprc. By default, this will do - something sensible to integrate with IPython when running - in that environment, and will print to stdout otherwise. - - Note that this function uses a bit of magic, and thus should - not be considered part of the QuTiP API. Rather, this function - is for internal use only. - - Parameters - ---------- - - name : str - Name of the logger to be created. If not passed, - the name will automatically be set to the name of the - calling module. - """ - if name is None: - try: - calling_frame = inspect.stack()[1][0] - calling_module = inspect.getmodule(calling_frame) - name = (calling_module.__name__ - if calling_module is not None else '') - - except Exception: - metalogger.warn('Error creating logger.', exc_info=1) - name = '' - - logger = logging.getLogger(name) - - policy = settings.log_handler - - if policy == 'default': - # Let's try to see if we're in IPython mode. - policy = 'basic' if settings.ipython else 'stream' - - metalogger.debug("Creating logger for {} with policy {}.".format( - name, policy - )) - - if policy == 'basic': - # Add no handlers, just let basicConfig do it all. - # This is nice for working with IPython, since - # it will use its own handlers instead of our StreamHandler - # below. - if settings.debug: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig() - - elif policy == 'stream': - formatter = logging.Formatter( - '[%(asctime)s] %(name)s[%(process)s]: ' - '%(funcName)s: %(levelname)s: %(message)s', - '%Y-%m-%d %H:%M:%S') - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - - # We're handling things here, so no propagation out. - logger.propagate = False - - elif policy == 'null': - # We need to add a NullHandler so that debugging works - # at all, but this policy leaves it to the user to - # make their own handlers. This is particularly useful - # for capturing to logfiles. - logger.addHandler(logging.NullHandler()) - - if settings.debug: - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.WARN) - - return logger diff --git a/qutip/measurement.py b/qutip/measurement.py index 0211180a73..da90387dd0 100644 --- a/qutip/measurement.py +++ b/qutip/measurement.py @@ -2,6 +2,15 @@ Module for measuring quantum objects. """ +__all__ = [ + 'measurement_statistics_povm', + 'measurement_statistics_observable', + 'measure_observable', + 'measure_povm', + 'measurement_statistics', + 'measure' +] + import numpy as np from . import Qobj, expect, identity, tensor diff --git a/qutip/orbital.py b/qutip/orbital.py deleted file mode 100644 index 209b65b406..0000000000 --- a/qutip/orbital.py +++ /dev/null @@ -1,81 +0,0 @@ -__all__ = ['orbital'] - -import numpy as np -from scipy.special import sph_harm - - -def orbital(theta, phi, *args): - r"""Calculates an angular wave function on a sphere. - ``psi = orbital(theta,phi,ket1,ket2,...)`` calculates - the angular wave function on a sphere at the mesh of points - defined by theta and phi which is - :math:`\sum_{lm} c_{lm} Y_{lm}(theta,phi)` where :math:`C_{lm}` are the - coefficients specified by the list of kets. Each ket has 2l+1 components - for some integer l. The first entry of the ket defines the coefficient - c_{l,-l}, while the last entry of the ket defines the - coefficient c_{l, l}. - - Parameters - ---------- - theta : int/float/list/array - Polar angles in [0, pi] - - phi : int/float/list/array - Azimuthal angles in [0, 2*pi] - - args : list/array - ``list`` of ket vectors. - - Returns - ------- - ``array`` for angular wave function evaluated at all - possible combinations of theta and phi - - """ - if isinstance(args[0], list): - # use the list in args[0] - args = args[0] - - # convert to numpy array - theta = np.atleast_1d(theta) - phi = np.atleast_1d(phi) - # check that arrays are only 1D - if len(theta.shape) != 1: - raise ValueError('Polar angles theta must be 1D list') - if len(phi.shape) != 1: - raise ValueError('Azimuthal angles phi must be 1D list') - - # make meshgrid - phi_mesh, theta_mesh = np.meshgrid(phi, theta) - # setup empty wavefunction - psi = np.zeros([theta.shape[0], phi.shape[0]], dtype=complex) - # iterate through provided kets - for k in range(len(args)): - ket = args[k] - if ket.type == 'bra': - ket = ket.conj() - elif not ket.type == 'ket': - raise TypeError('Invalid type for input ket in orbital') - # Extract l value from the state - l = (ket.shape[0] - 1) / 2.0 - if l != np.floor(l): - raise ValueError( - 'Kets must have odd number of components in orbital') - l = int(l) - # get factors from ket - factors = ket.full() - # iterate through the possible m - - for i in range(len(factors)): - # set correct m - m = i - l - # calculate spherical harmonics - # note that theta and phi are interchanged in scipy implementation - res = sph_harm(m, l, phi_mesh, theta_mesh) - psi += factors[i] * res - - # flatten output if only one row - if psi.shape[1] == 1: - psi = psi.flatten() - - return psi diff --git a/qutip/partial_transpose.py b/qutip/partial_transpose.py index 1601bffbcc..61871369d8 100644 --- a/qutip/partial_transpose.py +++ b/qutip/partial_transpose.py @@ -74,7 +74,7 @@ def _partial_transpose_sparse(rho, mask): """ data = sp.lil_matrix((rho.shape[0], rho.shape[1]), dtype=complex) - rho_data = rho.data.as_scipy() + rho_data = rho.to("CSR").data.as_scipy() for m in range(len(rho_data.indptr) - 1): @@ -117,6 +117,6 @@ def _partial_transpose_reference(rho, mask): n_pt = state_number_index( rho.dims[0], np.choose(mask, [psi_B, psi_A])) - A_pt[m_pt, n_pt] = rho.data.as_scipy()[m, n] + A_pt[m_pt, n_pt] = rho.to("CSR").data.as_scipy()[m, n] return Qobj(A_pt, dims=rho.dims) diff --git a/qutip/solve/nonmarkov/__init__.py b/qutip/piqs/__init__.py similarity index 100% rename from qutip/solve/nonmarkov/__init__.py rename to qutip/piqs/__init__.py diff --git a/qutip/solve/_piqs.pyx b/qutip/piqs/_piqs.pyx similarity index 100% rename from qutip/solve/_piqs.pyx rename to qutip/piqs/_piqs.pyx diff --git a/qutip/solve/piqs.py b/qutip/piqs/piqs.py similarity index 99% rename from qutip/solve/piqs.py rename to qutip/piqs/piqs.py index 2784e986cb..a8f14e7668 100644 --- a/qutip/solve/piqs.py +++ b/qutip/piqs/piqs.py @@ -22,7 +22,6 @@ sigmap, sigmam, ) from ..entropy import entropy_vn -from .solver import SolverOptions, Result from ._piqs import Dicke as _Dicke from ._piqs import ( jmm1_dictionary, @@ -314,12 +313,16 @@ def purity_dicke(rho): return dicke_function_trace(f, rho) +class Result: + pass + + class Dicke(object): """The Dicke class which builds the Lindbladian and Liouvillian matrix. Examples -------- - >>> from piqs import Dicke, jspin + >>> from qutip.piqs import Dicke, jspin >>> N = 2 >>> jx, jy, jz = jspin(N) >>> jp = jspin(N, "+") @@ -499,7 +502,7 @@ def liouvillian(self): liouv = lindblad + hamiltonian_superoperator return liouv - def pisolve(self, initial_state, tlist, options=None): + def pisolve(self, initial_state, tlist): """ Solve for diagonal Hamiltonians and initial states faster. @@ -512,13 +515,10 @@ def pisolve(self, initial_state, tlist, options=None): tlist: ndarray A 1D numpy array of list of timesteps to integrate - options : :class:`qutip.solver.SolverOptions` - The options for the solver. - Returns ======= result: list - A dictionary of the type `qutip.solver.Result` which holds the + A dictionary of the type `qutip.piqs.Result` which holds the results of the evolution. """ if isdiagonal(initial_state) == False: @@ -546,7 +546,7 @@ def pisolve(self, initial_state, tlist, options=None): self.collective_pumping, self.collective_dephasing, ) - result = pim.solve(initial_state, tlist, options=None) + result = pim.solve(initial_state, tlist) return result def c_ops(self): @@ -1789,12 +1789,10 @@ def coefficient_matrix(self): sparse_M[k, int(current_col)] = taus[tau] return sparse_M.tocsr() - def solve(self, rho0, tlist, options=None): + def solve(self, rho0, tlist): """ Solve the ODE for the evolution of diagonal states and Hamiltonians. """ - if options is None: - options = SolverOptions() output = Result() output.solver = "pisolve" output.times = tlist diff --git a/qutip/random_objects.py b/qutip/random_objects.py index b0bcf85c18..be759ae9db 100644 --- a/qutip/random_objects.py +++ b/qutip/random_objects.py @@ -24,6 +24,7 @@ to_super, to_choi, to_chi, to_kraus, to_stinespring) from .core import data as _data from .core.dimensions import flatten +from . import settings _RAND = default_rng() @@ -210,7 +211,7 @@ def _merge_shuffle_blocks(blocks, generator): def rand_herm(dimensions, density=0.30, distribution="fill", *, - eigenvalues=(), seed=None, dtype=_data.CSR): + eigenvalues=(), seed=None, dtype=None): """Creates a random sparse Hermitian quantum object. Parameters @@ -274,13 +275,16 @@ def rand_herm(dimensions, density=0.30, distribution="fill", *, while _data.csr.nnz(out) < 0.95 * nvals: out = _rand_jacobi_rotation(out, generator) out = Qobj(out, type='oper', dims=dims, isherm=True, copy=False) + dtype = dtype or settings.core["default_dtype"] or _data.CSR else: pos_def = distribution == "pos_def" if density < 0.5: M = _rand_herm_sparse(N, density, pos_def, generator) + dtype = dtype or settings.core["default_dtype"] or _data.CSR else: M = _rand_herm_dense(N, density, pos_def, generator) + dtype = dtype or settings.core["default_dtype"] or _data.Dense out = Qobj(M, type='oper', dims=dims, isherm=True, copy=False) @@ -332,7 +336,7 @@ def _rand_herm_dense(N, density, pos_def, generator): def rand_unitary(dimensions, density=1, distribution="haar", *, - seed=None, dtype=_data.Dense): + seed=None, dtype=None): r"""Creates a random sparse unitary quantum object. Parameters @@ -366,6 +370,7 @@ def rand_unitary(dimensions, density=1, distribution="haar", *, oper : qobj Unitary quantum operator. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense N, dims = _implicit_tensor_dimensions(dimensions) if distribution not in ["haar", "exp"]: raise ValueError("distribution must be one of {'haar', 'exp'}") @@ -434,7 +439,7 @@ def _rand_unitary_haar(N, generator): def rand_ket(dimensions, density=1, distribution="haar", *, - seed=None, dtype=_data.Dense): + seed=None, dtype=None): """Creates a random ket vector. Parameters @@ -469,6 +474,7 @@ def rand_ket(dimensions, density=1, distribution="haar", *, oper : qobj Ket quantum state vector. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense generator = _get_generator(seed) N, dims = _implicit_tensor_dimensions(dimensions) if distribution not in ["haar", "fill"]: @@ -494,7 +500,8 @@ def rand_ket(dimensions, density=1, distribution="haar", *, def rand_dm(dimensions, density=0.75, distribution="ginibre", *, - eigenvalues=(), rank=None, seed=None, dtype=_data.CSR): + eigenvalues=(), rank=None, seed=None, + dtype=None): r"""Creates a random density matrix of the desired dimensions. Parameters @@ -541,6 +548,7 @@ def rand_dm(dimensions, density=0.75, distribution="ginibre", *, oper : qobj Density matrix quantum operator. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense generator = _get_generator(seed) N, dims = _implicit_tensor_dimensions(dimensions) distributions = set(["eigen", "ginibre", "hs", "pure", "herm"]) @@ -620,7 +628,8 @@ def _rand_dm_ginibre(N, rank, generator): return rho -def rand_kraus_map(dimensions, *, seed=None, dtype=_data.Dense): +def rand_kraus_map(dimensions, *, seed=None, + dtype=None): """ Creates a random CPTP map on an N-dimensional Hilbert space in Kraus form. @@ -647,6 +656,7 @@ def rand_kraus_map(dimensions, *, seed=None, dtype=_data.Dense): N^2 x N x N qobj operators. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense N, dims = _implicit_tensor_dimensions(dimensions) # Random unitary (Stinespring Dilation) @@ -657,7 +667,8 @@ def rand_kraus_map(dimensions, *, seed=None, dtype=_data.Dense): for x in oper_list] -def rand_super(dimensions, *, superrep="super", seed=None, dtype=_data.Dense): +def rand_super(dimensions, *, superrep="super", seed=None, + dtype=None): """ Returns a randomly drawn superoperator acting on operators acting on N dimensions. @@ -681,6 +692,7 @@ def rand_super(dimensions, *, superrep="super", seed=None, dtype=_data.Dense): Storage representation. Any data-layer known to `qutip.data.to` is accepted. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense generator = _get_generator(seed) from .solver.propagator import propagator N, dims = _implicit_tensor_dimensions(dimensions, superoper=True) @@ -698,7 +710,8 @@ def rand_super(dimensions, *, superrep="super", seed=None, dtype=_data.Dense): def rand_super_bcsz(dimensions, enforce_tp=True, rank=None, *, - superrep="super", seed=None, dtype=_data.CSR): + superrep="super", seed=None, + dtype=None): """ Returns a random superoperator drawn from the Bruzda et al ensemble for CPTP maps [BCSZ08]_. Note that due to @@ -741,6 +754,7 @@ def rand_super_bcsz(dimensions, enforce_tp=True, rank=None, *, A superoperator acting on vectorized dim × dim density operators, sampled from the BCSZ distribution. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR generator = _get_generator(seed) N, dims = _implicit_tensor_dimensions(dimensions, superoper=True) @@ -800,7 +814,7 @@ def rand_super_bcsz(dimensions, enforce_tp=True, rank=None, *, def rand_stochastic(dimensions, density=0.75, kind='left', - *, seed=None, dtype=_data.CSR): + *, seed=None, dtype=None): """Generates a random stochastic matrix. Parameters @@ -830,6 +844,7 @@ def rand_stochastic(dimensions, density=0.75, kind='left', oper : qobj Quantum operator form of stochastic matrix. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense generator = _get_generator(seed) N, dims = _implicit_tensor_dimensions(dimensions) num_elems = max([int(np.ceil(N*(N+1)*density)/2), N]) diff --git a/qutip/settings.py b/qutip/settings.py index 7537942884..29ee053b75 100644 --- a/qutip/settings.py +++ b/qutip/settings.py @@ -281,8 +281,9 @@ def colorblind_safe(self, value): def __str__(self): lines = ["Qutip settings:"] for attr in self.__dir__(): - if not attr.startswith('_') and attr != "core": + if not attr.startswith('_') and attr not in ["core", "compile"]: lines.append(f" {attr}: {self.__getattribute__(attr)}") + lines.append(f" compile: {self.compile.__repr__(full=False)}") return '\n'.join(lines) def __repr__(self): diff --git a/qutip/simdiag.py b/qutip/simdiag.py index 370ade6552..2f875ff82b 100644 --- a/qutip/simdiag.py +++ b/qutip/simdiag.py @@ -64,7 +64,7 @@ def simdiag(ops, evals: bool = True, *, will often be eigenvectors of only the first operator. Returns - -------- + ------- eigs : tuple Tuple of arrays representing eigvecs and eigvals of quantum objects corresponding to simultaneous eigenvectors and eigenvalues for each diff --git a/qutip/solve/_stochastic.pyx b/qutip/solve/_stochastic.pyx deleted file mode 100644 index 91799251bf..0000000000 --- a/qutip/solve/_stochastic.pyx +++ /dev/null @@ -1,2345 +0,0 @@ -#cython: language_level=3 - -import numpy as np -cimport numpy as np -cimport cython -from cpython.exc cimport PyErr_CheckSignals -from libc.math cimport fabs -from qutip.core.cy.qobjevo cimport QobjEvo -from .. import Qobj, unstack_columns -import scipy.sparse as sp -from scipy.sparse.linalg import LinearOperator -from scipy.linalg.cython_blas cimport zaxpy, zdotu, zdotc, zcopy, zdscal, zscal -from scipy.linalg.cython_blas cimport dznrm2 as raw_dznrm2 - -from qutip.core.data cimport dense, Dense -import qutip.core.data as _data - -cdef extern from "" namespace "std" nogil: - double complex conj(double complex x) - -cdef int ZERO=0 -cdef double DZERO=0 -cdef complex ZZERO=0j -cdef int ONE=1 - -"""Some of blas wrapper""" -@cython.boundscheck(False) -cdef void _axpy(complex a, complex[::1] x, complex[::1] y): - """ y += a*x""" - cdef int l = x.shape[0] - zaxpy(&l, &a, &x[0], &ONE, &y[0], &ONE) - -@cython.boundscheck(False) -cdef void copy(complex[::1] x, complex[::1] y): - """ y = x """ - cdef int l = x.shape[0] - zcopy(&l, &x[0], &ONE, &y[0], &ONE) - -@cython.boundscheck(False) -cdef complex _dot(complex[::1] x, complex[::1] y): - """ = x_i * y_i """ - cdef int l = x.shape[0] - return zdotu(&l, &x[0], &ONE, &y[0], &ONE) - -@cython.boundscheck(False) -cdef complex _dotc(complex[::1] x, complex[::1] y): - """ = conj(x_i) * y_i """ - cdef int l = x.shape[0] - return zdotc(&l, &x[0], &ONE, &y[0], &ONE) - -@cython.boundscheck(False) -cdef double _dznrm2(complex[::1] vec): - """ = sqrt( x_i**2 ) """ - cdef int l = vec.shape[0] - return raw_dznrm2(&l, &vec[0], &ONE) - -@cython.boundscheck(False) -cdef void _scale(double a, complex[::1] x): - """ x *= a """ - cdef int l = x.shape[0] - zdscal(&l, &a, &x[0], &ONE) - -@cython.boundscheck(False) -cdef void _zscale(complex a, complex[::1] x): - """ x *= a """ - cdef int l = x.shape[0] - zscal(&l, &a, &x[0], &ONE) - -@cython.boundscheck(False) -cdef void _zero(complex[::1] x): - """ x *= 0 """ - cdef int l = x.shape[0] - zdscal(&l, &DZERO, &x[0], &ONE) - -@cython.boundscheck(False) -cdef void _zero_2d(complex[:,::1] x): - """ x *= 0 """ - cdef int l = x.shape[0]*x.shape[1] - zdscal(&l, &DZERO, &x[0,0], &ONE) - -@cython.boundscheck(False) -cdef void _zero_3d(complex[:,:,::1] x): - """ x *= 0 """ - cdef int l = x.shape[0]*x.shape[1]*x.shape[2] - zdscal(&l, &DZERO, &x[0,0,0], &ONE) - -@cython.boundscheck(False) -cdef void _zero_4d(complex[:,:,:,::1] x): - """ x *= 0 """ - cdef int l = x.shape[0]*x.shape[1]*x.shape[2]*x.shape[3] - zdscal(&l, &DZERO, &x[0,0,0,0], &ONE) - -@cython.boundscheck(False) -@cython.initializedcheck(False) -cdef Dense _dense_wrap(double complex [::1] x): - return dense.wrap(&x[0], x.shape[0], 1) - -# %%%%%%%%%%%%%%%%%%%%%%%%% -# functions for ensuring that the states stay physical -@cython.cdivision(True) -@cython.boundscheck(False) -cdef void _normalize_inplace(complex[::1] vec): - """ make norm of vec equal to 1""" - cdef int l = vec.shape[0] - cdef double norm = 1.0/_dznrm2(vec) - zdscal(&l, &norm, &vec[0], &ONE) - -# to move eventually, 10x faster than scipy's norm. -@cython.cdivision(True) -@cython.boundscheck(False) -def normalize_inplace(complex[::1] vec): - """ make norm of vec equal to 1""" - cdef int l = vec.shape[0] - cdef double norm = 1.0/_dznrm2(vec) - zdscal(&l, &norm, &vec[0], &ONE) - return fabs(norm-1) - -@cython.cdivision(True) -@cython.boundscheck(False) -cdef void _normalize_rho(complex[::1] rho): - """ Ensure that the density matrix trace is one and - that the composing states are normalized. - """ - cdef int l = rho.shape[0] - cdef int N = np.sqrt(l) - cdef complex[::1,:] mat = np.reshape(rho, (N,N), order="F") - cdef complex[::1,:] eivec - cdef double[::1] eival = np.zeros(N) - eivec = _data.eigs(Dense(mat, copy=False), True, True)[1].full() - - _zero(rho) - cdef int i, j, k - cdef double sum - - sum = 0. - for i in range(N): - _normalize_inplace(eivec[:,i]) - if eival[i] < 0: - eival[i] = 0. - sum += eival[i] - if sum != 1.: - for i in range(N): - eival[i] /= sum - - for i in range(N): - for j in range(N): - for k in range(N): - rho[j+N*k] += conj(eivec[k,i])*eivec[j,i]*eival[i] - -# Available solvers: -cpdef enum Solvers: - # order 0.5 - EULER_SOLVER = 50 - # order 0.5 strong, 1.0 weak? - PC_SOLVER = 101 - PC_2_SOLVER = 104 - # order 1.0 - PLATEN_SOLVER = 100 - MILSTEIN_SOLVER = 102 - MILSTEIN_IMP_SOLVER = 103 - # order 1.5 - EXPLICIT1_5_SOLVER = 150 - TAYLOR1_5_SOLVER = 152 - TAYLOR1_5_IMP_SOLVER = 153 - # order 2.0 - TAYLOR2_0_SOLVER = 202 - - # Special solvers - PHOTOCURRENT_SOLVER = 60 - PHOTOCURRENT_PC_SOLVER = 110 - ROUCHON_SOLVER = 120 - - # For initialisation - SOLVER_NOT_SET = 0 - - -cdef class TaylorNoise: - """ Object to build the Stratonovich integral for order 2.0 strong taylor. - Complex enough that I fell it should be kept separated from the main solver. - """ - cdef: - int p - double rho, alpha - double aFactor, bFactor - double BFactor, CFactor - double dt, dt_sqrt - - @cython.cdivision(True) - def __init__(self, int p, double dt): - self.p = p - self.dt = dt - self.dt_sqrt = dt**.5 - cdef double pi = np.pi - cdef int i - - cdef double rho = 0. - for i in range(1,p+1): - rho += (i+0.)**-2 - rho = 1./3.-2*rho/(pi**2) - self.rho = (rho)**.5 - self.aFactor = -(2)**.5/pi - - cdef double alpha = 0. - for i in range(1,p+1): - alpha += (i+0.)**-4 - alpha = pi/180-alpha/(2*pi**2)/pi - self.alpha = (alpha)**.5 - self.bFactor = (0.5)**.5/pi**2 - - self.BFactor = 1/(4*pi**2) - self.CFactor = -1/(2*pi**2) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cpdef void order2(self, double[::1] noise, double[::1] dws): - cdef int p = self.p - cdef int r, l - cdef double s = 1/6. - cdef double a = 0 - cdef double b = 0 - cdef double AA = 0 - cdef double BB = 0 - cdef double CC = 0 - - for r in range(p): - a += noise[3+r]/(r+1.) - b += noise[3+r+p]/(r+1.)/(r+1.) - BB += (1/(r+1.)/(r+1.)) *\ - (noise[3+r]*noise[3+r]+noise[3+r+p]*noise[3+r+p]) - for l in range(p): - if r != l: - CC += (r+1.)/((r+1.)*(r+1.)-(l+1.)*(l+1.)) *\ - (1/(l+1.)*noise[3+r]*noise[3+l] - \ - (l+1.)/(r+1.)*noise[3+r+p]*noise[3+l+p]) - - a = self.aFactor * a + self.rho * noise[1] - b = self.bFactor * b + self.alpha * noise[2] - AA = 0.25*a*a - BB *= self.BFactor - CC *= self.CFactor - - dws[0] = noise[0] # dw - dws[1] = 0.5*(noise[0]+a) # dz - dws[2] = noise[0]*(noise[0]*s -0.25*a -0.5*b) +BB +CC # j011 - dws[3] = noise[0]*(noise[0]*s + b) -AA -BB # j101 - dws[4] = noise[0]*(noise[0]*s +0.25*a -0.5*b) +AA -CC # j110 - - -cdef class StochasticSolver: - """ stochastic solver class base - Does most of the initialisation, drive the simulation and contain the - stochastic integration algorythm that do not depend on the physics. - - This class is not to be used as is, the function computing the evolution's - derivative are specified in it's child class which define the deterministic - and stochastic contributions. - - PYTHON METHODS: - set_solver: - Receive the data for the integration. - Prepare buffers - - cy_sesolve_single_trajectory: - Run one trajectory. - - - INTERNAL METHODS - make_noise: - create the stochastic noise - run: - evolution between timestep (substep) - solver's method: - stochastic integration algorithm - euler - milstein - taylor - ... - - CHILD: - SSESolver: stochastic schrodinger evolution - SMESolver: stochastic master evolution - PcSSESolver: photocurrent stochastic schrodinger evolution - PcSMESolver: photocurrent stochastic master evolution - PmSMESolver: positive map stochastic master evolution - GenericSSolver: general (user defined) stochastic evolution - - CHILD METHODS: - set_data: - Read data about the system - d1: - deterministic part - d2: - non-deterministic part - derivatives: - d1, d2 and their derivatives up to dt**1.5 - multiple sc_ops - derivativesO2: - d1, d2 and there derivatives up to dt**2.0 - one sc_ops - """ - cdef int l_vec, num_ops - cdef Solvers solver - cdef int num_step, num_substeps, num_dw - cdef int normalize - cdef double dt - cdef int noise_type - cdef object custom_noise - cdef double[::1] dW_factor - cdef unsigned int[::1] seed - cdef object sso - - # buffer to not redo the initialisation at each substep - cdef complex[:, ::1] buffer_1d - cdef complex[:, :, ::1] buffer_2d - cdef complex[:, :, :, ::1] buffer_3d - cdef complex[:, :, :, ::1] buffer_4d - cdef complex[:, ::1] expect_buffer_1d - cdef complex[:, ::1] expect_buffer_2d - cdef complex[:, :, ::1] expect_buffer_3d - cdef complex[:, ::1] func_buffer_1d - cdef complex[:, ::1] func_buffer_2d - cdef complex[:, :, ::1] func_buffer_3d - - cdef TaylorNoise order2noise - - def __init__(self): - self.l_vec = 0 - self.num_ops = 0 - self.solver = SOLVER_NOT_SET - - def set_solver(self, sso): - """ Prepare the solver from the info in StochasticSolverOptions - - Parameters - ---------- - sso : StochasticSolverOptions - Data of the stochastic system - """ - self.set_data(sso) - self.sso = sso - - self.solver = sso.solver_code - self.dt = sso.dt - self.num_substeps = sso.nsubsteps - self.normalize = sso.normalize - self.num_step = len(sso.times) - self.num_dw = len(sso.sops) - if self.solver in [EXPLICIT1_5_SOLVER, - TAYLOR1_5_SOLVER, - TAYLOR1_5_IMP_SOLVER]: - self.num_dw *= 2 - if self.solver in [TAYLOR2_0_SOLVER]: - self.num_dw *= 3 + 2*sso.p - self.order2noise = TaylorNoise(sso.p, self.dt) - # prepare buffers for the solvers - nb_solver = [0,0,0,0] - nb_func = [0,0,0] - nb_expect = [0,0,0] - - # %%%%%%%%%%%%%%%%%%%%%%%%% - # Depending on the solver, determine the numbers of buffers of each - # shape to prepare. (~30% slower when not preallocating buffer) - # nb_solver : buffer to contain the states used by solver - # nb_func : buffer for states used used by d1, d2 and derivative functions - # nb_expect : buffer to store expectation values. - if self.solver is EULER_SOLVER: - nb_solver = [0,1,0,0] - elif self.solver is PHOTOCURRENT_SOLVER: - nb_solver = [1,0,0,0] - nb_func = [1,0,0] - elif self.solver is PLATEN_SOLVER: - nb_solver = [2,5,0,0] - elif self.solver is PC_SOLVER: - nb_solver = [4,1,1,0] - elif self.solver is MILSTEIN_SOLVER: - nb_solver = [0,1,1,0] - elif self.solver is MILSTEIN_IMP_SOLVER: - nb_solver = [1,1,1,0] - elif self.solver is PC_2_SOLVER: - nb_solver = [5,1,1,0] - elif self.solver is PHOTOCURRENT_PC_SOLVER: - nb_solver = [1,1,0,0] - nb_func = [1,0,0] - elif self.solver is ROUCHON_SOLVER: - nb_solver = [2,0,0,0] - elif self.solver is EXPLICIT1_5_SOLVER: - nb_solver = [5,8,3,0] - elif self.solver is TAYLOR1_5_SOLVER: - nb_solver = [2,3,1,1] - elif self.solver is TAYLOR1_5_IMP_SOLVER: - nb_solver = [2,3,1,1] - elif self.solver is TAYLOR2_0_SOLVER: - nb_solver = [11,0,0,0] - - if self.solver in [PC_SOLVER, MILSTEIN_SOLVER, MILSTEIN_IMP_SOLVER, - PC_2_SOLVER, TAYLOR1_5_SOLVER, TAYLOR1_5_IMP_SOLVER]: - if sso.me: - nb_func = [1,0,0] - nb_expect = [1,1,0] - else: - nb_func = [2,1,1] - nb_expect = [2,1,1] - elif self.solver in [PHOTOCURRENT_SOLVER, PHOTOCURRENT_PC_SOLVER]: - nb_expect = [1,0,0] - elif self.solver is TAYLOR2_0_SOLVER: - if sso.me: - nb_func = [2,0,0] - nb_expect = [2,0,0] - else: - nb_func = [14,0,0] - nb_expect = [0,0,0] - elif self.solver is ROUCHON_SOLVER: - nb_expect = [1,0,0] - else: - if not sso.me: - nb_func = [1,0,0] - - self.buffer_1d = np.zeros((nb_solver[0], self.l_vec), dtype=complex) - self.buffer_2d = np.zeros((nb_solver[1], self.num_ops, self.l_vec), - dtype=complex) - self.buffer_3d = np.zeros((nb_solver[2], self.num_ops, self.num_ops, self.l_vec), - dtype=complex) - if nb_solver[3]: - self.buffer_4d = np.zeros((self.num_ops, self.num_ops, self.num_ops, self.l_vec), - dtype=complex) - - self.expect_buffer_1d = np.zeros((nb_expect[0], self.num_ops), dtype=complex) - if nb_expect[1]: - self.expect_buffer_2d = np.zeros((self.num_ops, self.num_ops), dtype=complex) - if nb_expect[2]: - self.expect_buffer_3d = np.zeros((self.num_ops, self.num_ops, self.num_ops), dtype=complex) - - self.func_buffer_1d = np.zeros((nb_func[0], self.l_vec), dtype=complex) - if nb_func[1]: - self.func_buffer_2d = np.zeros((self.num_ops, self.l_vec), dtype=complex) - if nb_func[2]: - self.func_buffer_3d = np.zeros((self.num_ops, self.num_ops, self.l_vec), dtype=complex) - - self.noise_type = sso.noise_type - self.dW_factor = np.array(sso.dW_factors, dtype=np.float64) - if self.noise_type == 1: - self.custom_noise = sso.noise - elif self.noise_type == 0: - self.seed = sso.noise - - def set_data(self, sso): - """Set solver specific operator""" - pass - - cdef np.ndarray[double, ndim=3] make_noise(self, int n): - """Create the random numbers for the stochastic process""" - if self.solver in [PHOTOCURRENT_SOLVER, PHOTOCURRENT_PC_SOLVER] and self.noise_type == 0: - # photocurrent, just seed, - np.random.seed(self.seed[n]) - return np.zeros((self.num_step, self.num_substeps, self.num_dw)) - if self.noise_type == 0: - np.random.seed(self.seed[n]) - return np.random.randn(self.num_step, self.num_substeps, self.num_dw) *\ - np.sqrt(self.dt) - elif self.noise_type == 1: - return self.custom_noise[n,:,:,:] - - @cython.boundscheck(False) - @cython.wraparound(False) - def cy_sesolve_single_trajectory(self, int n): - """ Run the one of the trajectories of the stochastic system. - - Parameters - ---------- - n : int - Number of the iterations - - sso : StochasticSolverOptions - Data of the stochastic system - - Returns - ------- - states_list : list of qobj - State of the system at each time - - noise : array - noise at each step of the solver - - measurements : array - measurements value at each timestep for each m_ops - - expect : array - expectation value at each timestep for each e_ops - - """ - sso = self.sso - cdef double[::1] times = sso.times - cdef complex[::1] rho_t - cdef double t - cdef int m_idx, t_idx, e_idx - cdef np.ndarray[double, ndim=3] noise = self.make_noise(n) - cdef int tlast = times.shape[0] - - rho_t = sso.rho0.copy() - dims = sso.state0.dims - - expect = np.zeros((len(sso.ce_ops), len(sso.times)), dtype=complex) - measurements = np.zeros((len(times), len(sso.cm_ops)), dtype=complex) - states_list = [] - for t_idx, t in enumerate(times): - PyErr_CheckSignals() - if sso.ce_ops: - for e_idx, e in enumerate(sso.ce_ops): - s = e.expect_data(t, _dense_wrap(rho_t)) - expect[e_idx, t_idx] = s - if sso.store_states or not sso.ce_ops: - if sso.me: - states_list.append(Qobj(unstack_columns(np.asarray(rho_t)), - dims=dims)) - else: - states_list.append(Qobj(np.asarray(rho_t), dims=dims)) - - if t_idx != tlast-1: - rho_t = self.run(t, self.dt, noise[t_idx, :, :], - rho_t, self.num_substeps) - - if sso.store_measurement: - for m_idx, m in enumerate(sso.cm_ops): - m_expt = m.expect_data(t, _dense_wrap(rho_t)) - measurements[t_idx, m_idx] = m_expt + self.dW_factor[m_idx] * \ - sum(noise[t_idx, :, m_idx]) / (self.dt * self.num_substeps) - - if sso.method == 'heterodyne': - measurements = measurements.reshape(len(times), len(sso.cm_ops)//2, 2) - - return states_list, noise, measurements, expect - - @cython.boundscheck(False) - cdef complex[::1] run(self, double t, double dt, double[:, ::1] noise, - complex[::1] vec, int num_substeps): - """ Do one time full step""" - cdef complex[::1] out = np.zeros(self.l_vec, dtype=complex) - cdef int i - if self.solver is EULER_SOLVER: - for i in range(num_substeps): - self.euler(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is PHOTOCURRENT_SOLVER: - for i in range(num_substeps): - self.photocurrent(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is PLATEN_SOLVER: - for i in range(num_substeps): - self.platen(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is PC_SOLVER: - for i in range(num_substeps): - self.pred_corr(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is MILSTEIN_SOLVER: - for i in range(num_substeps): - self.milstein(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is MILSTEIN_IMP_SOLVER: - for i in range(num_substeps): - self.milstein_imp(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is PC_2_SOLVER: - for i in range(num_substeps): - self.pred_corr_a(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is PHOTOCURRENT_PC_SOLVER: - for i in range(num_substeps): - self.photocurrent_pc(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is ROUCHON_SOLVER: - for i in range(num_substeps): - self.rouchon(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is EXPLICIT1_5_SOLVER: - for i in range(num_substeps): - self.platen15(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is TAYLOR1_5_SOLVER: - for i in range(num_substeps): - self.taylor15(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is TAYLOR1_5_IMP_SOLVER: - for i in range(num_substeps): - self.taylor15_imp(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - elif self.solver is TAYLOR2_0_SOLVER: - for i in range(num_substeps): - self.taylor20(t + i*dt, dt, noise[i, :], vec, out) - out, vec = vec, out - - if self.normalize: - self._normalize_inplace(vec) - return vec - - cdef void _normalize_inplace(self, complex[::1] vec): - _normalize_inplace(vec) - - # Dummy functions - # Needed for compilation since ssesolve is not stand-alone - cdef void d1(self, double t, complex[::1] v, complex[::1] out): - """ deterministic part of the evolution - depend on schrodinger vs master vs photocurrent - """ - pass - - cdef void d2(self, double t, complex[::1] v, complex[:, ::1] out): - """ stochastic part of the evolution - depend on schrodinger vs master vs photocurrent - """ - pass - - cdef void implicit(self, double t, np.ndarray[complex, ndim=1] dvec, - complex[::1] out, np.ndarray[complex, ndim=1] guess) except *: - """ Do the step X(t+dt) = f(X(t+dt)) + g(X(t)) """ - pass - - cdef void derivatives(self, double t, int deg, complex[::1] rho, - complex[::1] a, complex[:, ::1] b, - complex[:, :, ::1] Lb, complex[:,::1] La, - complex[:, ::1] L0b, complex[:, :, :, ::1] LLb, - complex[::1] L0a): - """ Obtain the multiple terms for stochastic taylor expension - Up to order 1.5 - multiple sc_ops - """ - pass - - cdef void derivativesO2(self, double t, complex[::1] rho, - complex[::1] a, complex[::1] b, complex[::1] Lb, - complex[::1] La, complex[::1] L0b, complex[::1] LLb, - complex[::1] L0a, - complex[::1] LLa, complex[::1] LL0b, - complex[::1] L0Lb, complex[::1] LLLb): - """ Obtain the multiple terms for stochastic taylor expension - Up to order 2.0 - One sc_ops - """ - pass - - cdef void photocurrent(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """Special integration scheme: - photocurrent collapse + euler evolution - """ - pass - - cdef void photocurrent_pc(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """Special integration scheme: - photocurrent collapse + predictor-corrector evolution - """ - pass - - cdef void rouchon(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """Special integration scheme: - Force valid density matrix using positive map - Pierre Rouchon, Jason F. Ralph - arXiv:1410.5345 [quant-ph] - """ - pass - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef void euler(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """Integration scheme: - Basic Euler order 0.5 - dV = d1 dt + d2_i dW_i - Numerical Solution of Stochastic Differential Equations - By Peter E. Kloeden, Eckhard Platen - """ - cdef int i, j - cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] - _zero_2d(d2) - copy(vec, out) - self.d1(t, vec, out) - self.d2(t, vec, d2) - for i in range(self.num_ops): - _axpy(noise[i], d2[i,:], out) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void platen(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """ - Platen rhs function for both master eq and schrodinger eq. - dV = -iH* (V+Vt)/2 * dt + (d1(V)+d1(Vt))/2 * dt - + (2*d2_i(V)+d2_i(V+)+d2_i(V-))/4 * dW_i - + (d2_i(V+)-d2_i(V-))/4 * (dW_i**2 -dt) * dt**(-.5) - - Vt = V -iH*V*dt + d1*dt + d2_i*dW_i - V+/- = V -iH*V*dt + d1*dt +/- d2_i*dt**.5 - The Theory of Open Quantum Systems - Chapter 7 Eq. (7.47), H.-P Breuer, F. Petruccione - """ - cdef int i, j, k - cdef double sqrt_dt = np.sqrt(dt) - cdef double sqrt_dt_inv = 0.25/sqrt_dt - cdef double dw, dw2 - cdef complex[::1] d1 = self.buffer_1d[0,:] - cdef complex[::1] Vt = self.buffer_1d[1,:] - cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] - cdef complex[:, ::1] Vm = self.buffer_2d[1,:,:] - cdef complex[:, ::1] Vp = self.buffer_2d[2,:,:] - cdef complex[:, ::1] d2p = self.buffer_2d[3,:,:] - cdef complex[:, ::1] d2m = self.buffer_2d[4,:,:] - _zero(d1) - _zero_2d(d2) - - self.d1(t, vec, d1) - self.d2(t, vec, d2) - _axpy(1.0,vec,d1) - copy(d1,Vt) - copy(d1,out) - _scale(0.5,out) - for i in range(self.num_ops): - copy(d1,Vp[i,:]) - copy(d1,Vm[i,:]) - _axpy( sqrt_dt,d2[i,:],Vp[i,:]) - _axpy(-sqrt_dt,d2[i,:],Vm[i,:]) - _axpy(noise[i],d2[i,:],Vt) - _zero(d1) - self.d1(t, Vt, d1) - _axpy(0.5,d1,out) - _axpy(0.5,vec,out) - for i in range(self.num_ops): - _zero_2d(d2p) - _zero_2d(d2m) - self.d2(t, Vp[i,:], d2p) - self.d2(t, Vm[i,:], d2m) - dw = noise[i] * 0.25 - _axpy(dw,d2m[i,:],out) - _axpy(2*dw,d2[i,:],out) - _axpy(dw,d2p[i,:],out) - for j in range(self.num_ops): - if i == j: - dw2 = sqrt_dt_inv * (noise[i]*noise[i] - dt) - else: - dw2 = sqrt_dt_inv * noise[i] * noise[j] - _axpy(dw2,d2p[j,:],out) - _axpy(-dw2,d2m[j,:],out) - - @cython.wraparound(False) - @cython.boundscheck(False) - @cython.cdivision(True) - cdef void pred_corr(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """ - Chapter 15.5 Eq. (5.4) - Numerical Solution of Stochastic Differential Equations - By Peter E. Kloeden, Eckhard Platen - """ - # a=0. b=0.5 - cdef double dt_2 = dt*0.5 - cdef complex[::1] euler = self.buffer_1d[0,:] - cdef complex[::1] a_pred = self.buffer_1d[1,:] - cdef complex[::1] b_pred = self.buffer_1d[2,:] - cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] - cdef complex[:, :, ::1] dd2 = self.buffer_3d[0,:,:,:] - _zero(a_pred) - _zero(b_pred) - _zero_2d(d2) - _zero_3d(dd2) - self.derivatives(t, 1, vec, a_pred, d2, dd2, None, None, None, None) - copy(vec, euler) - copy(vec, out) - _axpy(1.0, a_pred, euler) - for i in range(self.num_ops): - _axpy(noise[i], d2[i,:], b_pred) - _axpy(-dt_2, dd2[i,i,:], a_pred) - _axpy(1.0, a_pred, out) - _axpy(1.0, b_pred, euler) - _axpy(0.5, b_pred, out) - _zero_2d(d2) - self.d2(t + dt, euler, d2) - for i in range(self.num_ops): - _axpy(noise[i]*0.5, d2[i,:], out) - - @cython.wraparound(False) - @cython.boundscheck(False) - @cython.cdivision(True) - cdef void pred_corr_a(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """ - Chapter 15.5 Eq. (5.4) - Numerical Solution of Stochastic Differential Equations - By Peter E. Kloeden, Eckhard Platen - """ - # a=0.5, b=0.5 - cdef int i, j, k - cdef complex[::1] euler = self.buffer_1d[0,:] - cdef complex[::1] a_pred = self.buffer_1d[1,:] - _zero(a_pred) - cdef complex[::1] a_corr = self.buffer_1d[2,:] - _zero(a_corr) - cdef complex[::1] b_pred = self.buffer_1d[3,:] - _zero(b_pred) - cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] - _zero_2d(d2) - cdef complex[:, :, ::1] dd2 = self.buffer_3d[0,:,:,:] - _zero_3d(dd2) - - cdef double dt_2 = dt*0.5 - self.derivatives(t, 1, vec, a_pred, d2, dd2, None, None, None, None) - copy(vec, euler) - _axpy(1.0, a_pred, euler) - for i in range(self.num_ops): - _axpy(noise[i], d2[i,:], b_pred) - _axpy(-dt_2, dd2[i,i,:], a_pred) - _axpy(1.0, b_pred, euler) - copy(vec, out) - _axpy(0.5, a_pred, out) - _axpy(0.5, b_pred, out) - _zero_2d(d2) - _zero_3d(dd2) - self.derivatives(t, 1, euler, a_corr, d2, dd2, None, None, None, None) - for i in range(self.num_ops): - _axpy(noise[i]*0.5, d2[i,:], out) - _axpy(-dt_2, dd2[i,i,:], a_corr) - _axpy(0.5, a_corr, out) - - @cython.wraparound(False) - @cython.boundscheck(False) - @cython.cdivision(True) - cdef void milstein(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """ - Chapter 10.3 Eq. (3.1) - Numerical Solution of Stochastic Differential Equations - By Peter E. Kloeden, Eckhard Platen - - dV = -iH*V*dt + d1*dt + d2_i*dW_i - + 0.5*d2_i' d2_j*(dW_i*dw_j -dt*delta_ij) - """ - cdef int i, j, k - cdef double dw - cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] - cdef complex[:, :, ::1] dd2 = self.buffer_3d[0,:,:,:] - _zero_2d(d2) - _zero_3d(dd2) - copy(vec,out) - self.derivatives(t, 1, vec, out, d2, dd2, None, None, None, None) - for i in range(self.num_ops): - _axpy(noise[i],d2[i,:],out) - for i in range(self.num_ops): - for j in range(i, self.num_ops): - if (i == j): - dw = (noise[i] * noise[i] - dt) * 0.5 - else: - dw = (noise[i] * noise[j]) - _axpy(dw,dd2[i,j,:],out) - - @cython.wraparound(False) - @cython.boundscheck(False) - @cython.cdivision(True) - cdef void milstein_imp(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out) except *: - """ - Chapter 12.2 Eq. (2.9) - Numerical Solution of Stochastic Differential Equations - By Peter E. Kloeden, Eckhard Platen - """ - cdef int i, j, k - cdef double dw - cdef np.ndarray[complex, ndim=1] guess = np.zeros((self.l_vec, ), - dtype=complex) - cdef np.ndarray[complex, ndim=1] dvec = np.zeros((self.l_vec, ), - dtype=complex) - cdef complex[::1] a = self.buffer_1d[0,:] - cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] - cdef complex[:, :, ::1] dd2 = self.buffer_3d[0,:,:,:] - _zero(a) - _zero_2d(d2) - _zero_3d(dd2) - self.derivatives(t, 1, vec, a, d2, dd2, None, None, None, None) - copy(vec, dvec) - _axpy(0.5, a, dvec) - for i in range(self.num_ops): - _axpy(noise[i], d2[i,:], dvec) - for i in range(self.num_ops): - for j in range(i, self.num_ops): - if (i == j): - dw = (noise[i] * noise[i] - dt) * 0.5 - else: - dw = (noise[i] * noise[j]) - _axpy(dw, dd2[i,j,:], dvec) - copy(dvec, guess) - _axpy(0.5, a, guess) - self.implicit(t+dt, dvec, out, guess) - - @cython.wraparound(False) - @cython.boundscheck(False) - @cython.cdivision(True) - cdef void taylor15(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """ - Chapter 12.2 Eq. (2.18), - Numerical Solution of Stochastic Differential Equations - By Peter E. Kloeden, Eckhard Platen - """ - cdef complex[::1] a = self.buffer_1d[0, :] - cdef complex[:, ::1] b = self.buffer_2d[0, :, :] - cdef complex[:, :, ::1] Lb = self.buffer_3d[0, :, :, :] - cdef complex[:, ::1] L0b = self.buffer_2d[1,:,:] - cdef complex[:, ::1] La = self.buffer_2d[2,:,:] - cdef complex[:, :, :, ::1] LLb = self.buffer_4d[:, :, :, :] - cdef complex[::1] L0a = self.buffer_1d[1, :] - _zero(a) - _zero_2d(b) - _zero_3d(Lb) - _zero_2d(L0b) - _zero_2d(La) - _zero_4d(LLb) - _zero(L0a) - self.derivatives(t, 2, vec, a, b, Lb, La, L0b, LLb, L0a) - - cdef int i,j,k - cdef double[::1] dz, dw - dw = np.empty(self.num_ops) - dz = np.empty(self.num_ops) - # The dt of dz is included in the d1 part (Ldt) and the noise (dt**.5) - for i in range(self.num_ops): - dw[i] = noise[i] - dz[i] = 0.5 *(noise[i] + 1./np.sqrt(3) * noise[i+self.num_ops]) - copy(vec,out) - _axpy(1.0, a, out) - _axpy(0.5, L0a, out) - - for i in range(self.num_ops): - _axpy(dw[i], b[i,:], out) - _axpy(0.5*(dw[i]*dw[i]-dt), Lb[i,i,:], out) - _axpy(dz[i], La[i,:], out) - _axpy(dw[i]-dz[i], L0b[i,:], out) - _axpy(0.5 * ((1/3.) * dw[i] * dw[i] - dt) * dw[i], - LLb[i,i,i,:], out) - for j in range(i+1,self.num_ops): - _axpy((dw[i]*dw[j]), Lb[i,j,:], out) - _axpy(0.5*(dw[j]*dw[j]-dt)*dw[i], LLb[i,j,j,:], out) - _axpy(0.5*(dw[i]*dw[i]-dt)*dw[j], LLb[i,i,j,:], out) - for k in range(j+1,self.num_ops): - _axpy(dw[i]*dw[j]*dw[k], LLb[i,j,k,:], out) - - @cython.wraparound(False) - @cython.boundscheck(False) - @cython.cdivision(True) - cdef void taylor15_imp(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out) except *: - """ - Chapter 12.2 Eq. (2.18), - Numerical Solution of Stochastic Differential Equations - By Peter E. Kloeden, Eckhard Platen - """ - cdef complex[::1] a = self.buffer_1d[0, :] - cdef complex[:, ::1] b = self.buffer_2d[0, :, :] - cdef complex[:, :, ::1] Lb = self.buffer_3d[0, :, :, :] - cdef complex[:, ::1] L0b = self.buffer_2d[1,:,:] - cdef complex[:, ::1] La = self.buffer_2d[2,:,:] - cdef complex[:, :, :, ::1] LLb = self.buffer_4d[:, :, :, :] - cdef complex[::1] L0a = self.buffer_1d[1, :] - _zero(a) - _zero_2d(b) - _zero_3d(Lb) - _zero_2d(L0b) - _zero_2d(La) - _zero_4d(LLb) - _zero(L0a) - cdef np.ndarray[complex, ndim=1] guess = np.zeros((self.l_vec, ), - dtype=complex) - cdef np.ndarray[complex, ndim=1] vec_t = np.zeros((self.l_vec, ), - dtype=complex) - self.derivatives(t, 3, vec, a, b, Lb, La, L0b, LLb, L0a) - - cdef int i,j,k - cdef double[::1] dz, dw - dw = np.empty(self.num_ops) - dz = np.empty(self.num_ops) - # The dt of dz is included in the d1 part (Ldt) and the noise (dt**.5) - for i in range(self.num_ops): - dw[i] = noise[i] - dz[i] = 0.5 *(noise[i] + 1./np.sqrt(3) * noise[i+self.num_ops]) - copy(vec, vec_t) - _axpy(0.5, a, vec_t) - for i in range(self.num_ops): - _axpy(dw[i], b[i,:], vec_t) - _axpy(0.5*(dw[i]*dw[i]-dt), Lb[i,i,:], vec_t) - _axpy(dz[i]-dw[i]*0.5, La[i,:], vec_t) - _axpy(dw[i]-dz[i] , L0b[i,:], vec_t) - _axpy(0.5 * ((1/3.) * dw[i] * dw[i] - dt) * dw[i], - LLb[i,i,i,:], vec_t) - for j in range(i+1,self.num_ops): - _axpy((dw[i]*dw[j]), Lb[i,j,:], vec_t) - _axpy(0.5*(dw[j]*dw[j]-dt)*dw[i], LLb[i,j,j,:], vec_t) - _axpy(0.5*(dw[i]*dw[i]-dt)*dw[j], LLb[i,i,j,:], vec_t) - for k in range(j+1,self.num_ops): - _axpy(dw[i]*dw[j]*dw[k], LLb[i,j,k,:], vec_t) - copy(vec_t, guess) - _axpy(0.5, a, guess) - - self.implicit(t+dt, vec_t, out, guess) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void platen15(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - """ - Chapter 11.2 Eq. (2.13) - Numerical Solution of Stochastic Differential Equations - By Peter E. Kloeden, Eckhard Platen - """ - cdef int i, j, k - cdef double sqrt_dt = np.sqrt(dt) - cdef double sqrt_dt_inv = 1./sqrt_dt - cdef double ddz, ddw, ddd - cdef double[::1] dz, dw - dw = np.empty(self.num_ops) - dz = np.empty(self.num_ops) - for i in range(self.num_ops): - dw[i] = noise[i] - dz[i] = 0.5 *(noise[i] + 1./np.sqrt(3) * noise[i+self.num_ops]) - - cdef complex[::1] d1 = self.buffer_1d[0,:] - cdef complex[::1] d1p = self.buffer_1d[1,:] - cdef complex[::1] d1m = self.buffer_1d[2,:] - cdef complex[::1] V = self.buffer_1d[3,:] - cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] - cdef complex[:, ::1] dd2 = self.buffer_2d[1,:,:] - cdef complex[:, ::1] d2p = self.buffer_2d[2,:,:] - cdef complex[:, ::1] d2m = self.buffer_2d[3,:,:] - cdef complex[:, ::1] d2pp = self.buffer_2d[4,:,:] - cdef complex[:, ::1] d2mm = self.buffer_2d[5,:,:] - cdef complex[:, ::1] v2p = self.buffer_2d[6,:,:] - cdef complex[:, ::1] v2m = self.buffer_2d[7,:,:] - cdef complex[:, :, ::1] p2p = self.buffer_3d[0,:,:,:] - cdef complex[:, : ,::1] p2m = self.buffer_3d[1,:,:,:] - - _zero(d1) - _zero_2d(d2) - _zero_2d(dd2) - self.d1(t, vec, d1) - self.d2(t, vec, d2) - self.d2(t + dt, vec, dd2) - # Euler part - copy(vec,out) - _axpy(1., d1, out) - for i in range(self.num_ops): - _axpy(dw[i], d2[i,:], out) - - _zero(V) - _axpy(1., vec, V) - _axpy(1./self.num_ops, d1, V) - - _zero_2d(v2p) - _zero_2d(v2m) - for i in range(self.num_ops): - _axpy(1., V, v2p[i,:]) - _axpy(sqrt_dt, d2[i,:], v2p[i,:]) - _axpy(1., V, v2m[i,:]) - _axpy(-sqrt_dt, d2[i,:], v2m[i,:]) - - _zero_3d(p2p) - _zero_3d(p2m) - for i in range(self.num_ops): - _zero_2d(d2p) - _zero_2d(d2m) - self.d2(t, v2p[i,:], d2p) - self.d2(t, v2m[i,:], d2m) - ddw = (dw[i]*dw[i]-dt)*0.25/sqrt_dt # 1.0 - _axpy( ddw, d2p[i,:], out) - _axpy(-ddw, d2m[i,:], out) - for j in range(self.num_ops): - _axpy( 1., v2p[i,:], p2p[i,j,:]) - _axpy( sqrt_dt, d2p[j,:], p2p[i,j,:]) - _axpy( 1., v2p[i,:], p2m[i,j,:]) - _axpy(-sqrt_dt, d2p[j,:], p2m[i,j,:]) - - _axpy(-0.5*(self.num_ops), d1, out) - for i in range(self.num_ops): - ddz = dz[i]*0.5/sqrt_dt # 1.5 - ddd = 0.25*(dw[i]*dw[i]/3-dt)*dw[i]/dt # 1.5 - _zero(d1p) - _zero(d1m) - _zero_2d(d2m) - _zero_2d(d2p) - _zero_2d(d2pp) - _zero_2d(d2mm) - self.d1(t + dt/self.num_ops, v2p[i,:], d1p) - self.d1(t + dt/self.num_ops, v2m[i,:], d1m) - self.d2(t, v2p[i,:], d2p) - self.d2(t, v2m[i,:], d2m) - self.d2(t, p2p[i,i,:], d2pp) - self.d2(t, p2m[i,i,:], d2mm) - - _axpy( ddz+0.25, d1p, out) - _axpy(-ddz+0.25, d1m, out) - - _axpy((dw[i]-dz[i]), dd2[i,:], out) - _axpy((dz[i]-dw[i]), d2[i,:], out) - - _axpy( ddd, d2pp[i,:], out) - _axpy(-ddd, d2mm[i,:], out) - _axpy(-ddd, d2p[i,:], out) - _axpy( ddd, d2m[i,:], out) - - for j in range(self.num_ops): - ddw = 0.5*(dw[j]-dz[j]) # 1.5 - _axpy(ddw, d2p[j,:], out) - _axpy(-2*ddw, d2[j,:], out) - _axpy(ddw, d2m[j,:], out) - - if j>i: - ddw = 0.5*(dw[i]*dw[j])/sqrt_dt # 1.0 - _axpy( ddw, d2p[j,:], out) - _axpy(-ddw, d2m[j,:], out) - - ddw = 0.25*(dw[j]*dw[j]-dt)*dw[i]/dt # 1.5 - _zero_2d(d2pp) - _zero_2d(d2mm) - self.d2(t, p2p[j,i,:], d2pp) - self.d2(t, p2m[j,i,:], d2mm) - _axpy( ddw, d2pp[j,:], out) - _axpy(-ddw, d2mm[j,:], out) - _axpy(-ddw, d2p[j,:], out) - _axpy( ddw, d2m[j,:], out) - - for k in range(j+1,self.num_ops): - ddw = 0.5*dw[i]*dw[j]*dw[k]/dt # 1.5 - _axpy( ddw, d2pp[k,:], out) - _axpy(-ddw, d2mm[k,:], out) - _axpy(-ddw, d2p[k,:], out) - _axpy( ddw, d2m[k,:], out) - - if j=0 d2 euler dwi - a[:] a >=0 d1 euler dt - Lb[i,i,:] bi'bi >=1 milstein (dwi^2-dt)/2 - Lb[i,j,:] bj'bi >=1 milstein dwi*dwj - - L0b[i,:] ab' +db/dt +bbb"/2 >=2 taylor15 dwidt-dzi - La[i,:] ba' >=2 taylor15 dzi - LLb[i,i,i,:] bi(bibi"+bi'bi') >=2 taylor15 (dwi^2/3-dt)dwi/2 - LLb[i,j,j,:] bi(bjbj"+bj'bj') >=2 taylor15 (dwj^2-dt)dwj/2 - LLb[i,j,k,:] bi(bjbk"+bj'bk') >=2 taylor15 dwi*dwj*dwk - L0a[:] aa' +da/dt +bba"/2 2 taylor15 dt^2/2 - """ - cdef int i, j, k, l - cdef double dt = self.dt - cdef QobjEvo c_op - cdef complex e, de_bb - - cdef complex[::1] e_real = self.expect_buffer_1d[0,:] - cdef complex[:, ::1] de_b = self.expect_buffer_2d[:,:] - cdef complex[::1] de_a = self.expect_buffer_1d[1,:] - cdef complex[:, :, ::1] dde_bb = self.expect_buffer_3d[:,:,:] - _zero_3d(dde_bb) - cdef complex[:, ::1] Cvec = self.func_buffer_2d[:,:] - cdef complex[:, :, ::1] Cb = self.func_buffer_3d[:,:,:] - cdef complex[::1] temp = self.func_buffer_1d[0,:] - cdef complex[::1] temp2 = self.func_buffer_1d[1,:] - _zero(temp) - _zero(temp2) - _zero_2d(Cvec) - _zero_3d(Cb) - - # a b - self.L.matmul_data(t, _dense_wrap(vec), _dense_wrap(a)) - for i in range(self.num_ops): - c_op = self.c_ops[i] - c_op.matmul_data(t, _dense_wrap(vec), _dense_wrap(Cvec[i, :])) - e = _dotc(vec,Cvec[i,:]) - e_real[i] = e.real - _axpy(1., Cvec[i,:], b[i,:]) - _axpy(-e_real[i], vec, b[i,:]) - _axpy(-0.5 * e_real[i] * e_real[i] * dt, vec, a[:]) - _axpy(e_real[i] * dt, Cvec[i,:], a[:]) - - #Lb bb' - if deg >= 1: - for i in range(self.num_ops): - c_op = self.c_ops[i] - for j in range(self.num_ops): - c_op.matmul_data(t, _dense_wrap(b[j, :]), _dense_wrap(Cb[i,j,:])) - for k in range(self.l_vec): - temp[k] = conj(b[j,k]) - temp2[k] = 0. - c_op.matmul_data(t, _dense_wrap(temp), _dense_wrap(temp2)) - de_b[i,j] = (_dotc(vec, Cb[i,j,:]) + _dot(b[j,:], Cvec[i,:]) + \ - conj(_dotc(b[j,:], Cvec[i,:]) + _dotc(vec, temp2))) * 0.5 - _axpy(1., Cb[i,j,:], Lb[i,j,:]) - _axpy(-e_real[i], b[j,:], Lb[i,j,:]) - _axpy(-de_b[i,j], vec, Lb[i,j,:]) - - for k in range(self.num_ops): - dde_bb[i,j,k] += (_dot(b[j,:], Cb[i,k,:]) + \ - _dot(b[k,:], Cb[i,j,:]) + \ - conj(_dotc(b[k,:], temp2)))*.5 - dde_bb[i,k,j] += conj(_dotc(b[k,:], temp2))*.5 - - #L0b La LLb - if deg >= 2: - for i in range(self.num_ops): - #ba' - self.L.matmul_data(t, _dense_wrap(b[i,:]), _dense_wrap(La[i,:])) - for j in range(self.num_ops): - _axpy(-0.5 * e_real[j] * e_real[j] * dt, b[i,:], La[i,:]) - _axpy(-e_real[j] * de_b[i,j] * dt, vec, La[i,:]) - _axpy(e_real[j] * dt, Cb[i,j,:], La[i,:]) - _axpy(de_b[i,j] * dt, Cvec[i,:], La[i,:]) - - #ab' + db/dt + bbb"/2 - c_op = self.c_ops[i] - c_op.matmul_data(t, _dense_wrap(a), _dense_wrap(L0b[i,:])) - for k in range(self.l_vec): - temp[k] = conj(a[k]) - temp2[k] = 0. - c_op.matmul_data(t, _dense_wrap(temp), _dense_wrap(temp2)) - de_a[i] = (_dotc(vec, L0b[i,:]) + _dot(a, Cvec[i,:]) + \ - conj(_dotc(a, Cvec[i,:]) + _dotc(vec, temp2))) * 0.5 - _axpy(-e_real[i], a, L0b[i,:]) - _axpy(-de_a[i], vec, L0b[i,:]) - - temp = np.zeros(self.l_vec, dtype=complex) - c_op.matmul_data(t + self.dt, _dense_wrap(vec), _dense_wrap(temp)) - e = _dotc(vec,temp) - _axpy(1., temp, L0b[i,:]) - _axpy(-e.real, vec, L0b[i,:]) - _axpy(-1., b[i,:], L0b[i,:]) - - for j in range(self.num_ops): - _axpy(-de_b[i,j]*dt, b[j,:], L0b[i,:]) - _axpy(-dde_bb[i,j,j]*dt, vec, L0b[i,:]) - - #b(bb"+b'b') - for j in range(i,self.num_ops): - for k in range(j, self.num_ops): - c_op.matmul_data(t, _dense_wrap(Lb[j,k,:]), - _dense_wrap(LLb[i,j,k,:])) - for l in range(self.l_vec): - temp[l] = conj(Lb[j,k,l]) - temp2[l] = 0. - c_op.matmul_data(t, _dense_wrap(temp), _dense_wrap(temp2)) - de_bb = (_dotc(vec, LLb[i,j,k,:]) + \ - _dot(Lb[j,k,:], Cvec[i,:]) + \ - conj(_dotc(Lb[j,k,:], Cvec[i,:]) +\ - _dotc(vec, temp2)))*0.5 - _axpy(-e_real[i], Lb[j,k,:], LLb[i,j,k,:]) - _axpy(-de_bb, vec, LLb[i,j,k,:]) - _axpy(-dde_bb[i,j,k], vec, LLb[i,j,k,:]) - _axpy(-de_b[i,j], b[k,:], LLb[i,j,k,:]) - _axpy(-de_b[i,k], b[j,:], LLb[i,j,k,:]) - - #da/dt + aa' + bba" - if deg == 2: - self.d1(t + dt, vec, L0a) - _axpy(-1.0, a, L0a) - self.L.matmul_data(t, _dense_wrap(a), _dense_wrap(L0a)) - for j in range(self.num_ops): - c_op = self.c_ops[j] - temp = np.zeros(self.l_vec, dtype=complex) - c_op.matmul_data(t, _dense_wrap(a), _dense_wrap(temp)) - _axpy(-0.5 * e_real[j] * e_real[j] * dt, a[:], L0a[:]) - _axpy(-e_real[j] * de_a[j] * dt, vec, L0a[:]) - _axpy(e_real[j] * dt, temp, L0a[:]) - _axpy(de_a[j] * dt, Cvec[j,:], L0a[:]) - for i in range(self.num_ops): - _axpy(-0.5*(e_real[i] * dde_bb[i,j,j] + - de_b[i,j] * de_b[i,j]) * dt * dt, vec, L0a[:]) - _axpy(-e_real[i] * de_b[i,j] * dt * dt, b[j,:], L0a[:]) - _axpy(0.5*dde_bb[i,j,j] * dt * dt, Cvec[i,:], L0a[:]) - _axpy(de_b[i,j] * dt * dt, Cb[i,j,:], L0a[:]) - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef void _c_vec_conj(self, double t, QobjEvo c_op, - complex[::1] vec, complex[::1] out): - cdef int k - cdef complex[::1] temp = self.func_buffer_1d[13,:] - for k in range(self.l_vec): - temp[k] = conj(vec[k]) - out[k] = 0. - c_op.matmul_data(t, _dense_wrap(temp), _dense_wrap(out)) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void derivativesO2(self, double t, complex[::1] psi, - complex[::1] a, complex[::1] b, complex[::1] Lb, - complex[::1] La, complex[::1] L0b, complex[::1] LLb, - complex[::1] L0a, - complex[::1] LLa, complex[::1] LL0b, - complex[::1] L0Lb, complex[::1] LLLb): - """ - Combinaisons of a and b derivative for m sc_ops up to order dt**2.0 - Use Stratonovich-Taylor expansion. - One one sc_ops - dY ~ a dt + bi dwi - - b[:] b d2 euler dw - a[:] a- Lb/2 d1 euler dt - Lb[:] b'b milstein dw^2/2 - L0b[:] ab'- b'b'b/2 taylor1.5 dwdt-dz - La[:] ba'- (b'b'b+b"bb)/2 taylor1.5 dz - LLb[:] (b"bb+b'b'b) taylor1.5 dw^3/6 - L0a[:] a_a'_ + da/dt -Lb_a'_/2 taylor1.5 dt^2/2 - - LLa[:] ... taylor2.0 dwdt-dz - LL0b[:] ... taylor2.0 dz - L0Lb[:] ... taylor2.0 dw^3/6 - LLLb[:] ... taylor2.0 dt^2/2 - """ - cdef double dt = self.dt - cdef QobjEvo c_op = self.c_ops[0] - cdef complex e, de_b, de_Lb, de_LLb, dde_bb, dde_bLb - cdef complex de_a, dde_ba, de_La, de_L0b - - cdef complex[::1] Cpsi = self.func_buffer_1d[0,:] - cdef complex[::1] Cb = self.func_buffer_1d[1,:] - cdef complex[::1] Cbc = self.func_buffer_1d[2,:] - cdef complex[::1] CLb = self.func_buffer_1d[3,:] - cdef complex[::1] CLbc = self.func_buffer_1d[4,:] - cdef complex[::1] CLLb = self.func_buffer_1d[5,:] - cdef complex[::1] CLLbc = self.func_buffer_1d[6,:] - cdef complex[::1] Ca = self.func_buffer_1d[7,:] - cdef complex[::1] Cac = self.func_buffer_1d[8,:] - cdef complex[::1] CLa = self.func_buffer_1d[9,:] - cdef complex[::1] CLac = self.func_buffer_1d[10,:] - cdef complex[::1] CL0b = self.func_buffer_1d[11,:] - cdef complex[::1] CL0bc = self.func_buffer_1d[12,:] - _zero(Cpsi) - _zero(Cb) - _zero(CLb) - _zero(CLLb) - _zero(Ca) - _zero(CLa) - _zero(CL0b) - - # b - c_op.matmul_data(t, _dense_wrap(psi), _dense_wrap(Cpsi)) - e = _dotc(psi, Cpsi).real - _axpy(1., Cpsi, b) - _axpy(-e, psi, b) - - # Lb - c_op.matmul_data(t, _dense_wrap(b), _dense_wrap(Cb)) - self._c_vec_conj(t, c_op, b, Cbc) - de_b = (_dotc(psi, Cb) + _dot(b, Cpsi) + \ - conj(_dotc(b, Cpsi) + _dotc(psi, Cbc))) * 0.5 - _axpy(1., Cb, Lb) - _axpy(-e, b, Lb) - _axpy(-de_b, psi, Lb) - - # LLb = b'b'b + b"bb - c_op.matmul_data(t, _dense_wrap(Lb), _dense_wrap(CLb)) - self._c_vec_conj(t, c_op, Lb, CLbc) - de_Lb = (_dotc(psi, CLb) + _dot(Lb, Cpsi) + \ - conj(_dotc(Lb, Cpsi) + _dotc(psi, CLbc)))*0.5 - _axpy(1, CLb, LLb) # b'b'b - _axpy(-e, Lb, LLb) # b'b'b - _axpy(-de_Lb, psi, LLb) # b'b'b - dde_bb += (_dot(b, Cb) + conj(_dotc(b, Cbc))) - _axpy(-dde_bb, psi, LLb) # b"bb - _axpy(-de_b*2, b, LLb) # b"bb - - # LLLb = b"'bbb + 3* b"b'bb + b'(b"bb + b'b'b) - c_op.matmul_data(t, _dense_wrap(LLb), _dense_wrap(CLLb)) - self._c_vec_conj(t, c_op, LLb, CLLbc) - de_LLb = (_dotc(psi, CLLb) + _dot(LLb, Cpsi) + \ - conj(_dotc(LLb, Cpsi) + _dotc(psi, CLLbc)))*0.5 - dde_bLb += (_dot(b, CLb) + _dot(Lb, Cb) + conj(_dotc(Lb, Cbc)) + \ - conj(_dotc(b, CLbc)))*.5 - _axpy(1, CLLb, LLLb) # b'(b"bb + b'b'b) - _axpy(-e, LLb, LLLb) # b'(b"bb + b'b'b) - _axpy(-de_LLb, psi, LLLb) # b'(b"bb + b'b'b) - _axpy(-dde_bLb*3, psi, LLLb) # b"bLb - _axpy(-de_Lb*3, b, LLLb) # b"bLb - _axpy(-de_b*3, Lb, LLLb) # b"bLb - _axpy(-dde_bb*3, b, LLLb) # b"'bbb - - # a - self.L.matmul_data(t, _dense_wrap(psi), _dense_wrap(a)) - _axpy(-0.5 * e * e * dt, psi, a) - _axpy(e * dt, Cpsi, a) - _axpy(-0.5 * dt, Lb, a) - - #La - self.L.matmul_data(t, _dense_wrap(b), _dense_wrap(La)) - _axpy(-0.5 * e * e * dt, b, La) - _axpy(-e * de_b * dt, psi, La) - _axpy(e * dt, Cb, La) - _axpy(de_b * dt, Cpsi, La) - _axpy(-0.5 * dt, LLb, La) - - #LLa - _axpy(-2 * e * de_b * dt, b, LLa) - _axpy(-de_b * de_b * dt, psi, LLa) - _axpy(-e * dde_bb * dt, psi, LLa) - _axpy( 2 * de_b * dt, Cb, LLa) - _axpy( dde_bb * dt, Cpsi, LLa) - - self.L.matmul_data(t, _dense_wrap(Lb), _dense_wrap(LLa)) - _axpy(-de_Lb * e * dt, psi, LLa) - _axpy(-0.5 * e * e * dt, Lb, LLa) - _axpy( de_Lb * dt, Cpsi, LLa) - _axpy( e * dt, CLb, LLa) - - _axpy(-0.5 * dt, LLLb, LLa) - - # L0b = b'a - c_op.matmul_data(t, _dense_wrap(a), _dense_wrap(Ca)) - self._c_vec_conj(t, c_op, a, Cac) - de_a = (_dotc(psi, Ca) + _dot(a, Cpsi) + \ - conj(_dotc(a, Cpsi) + _dotc(psi, Cac))) * 0.5 - _axpy(1.0, Ca, L0b) - _axpy(-e, a, L0b) - _axpy(-de_a, psi, L0b) - - # LL0b = b"ba + b'La - dde_ba += (_dot(b, Ca) + _dot(a, Cb) + conj(_dotc(a, Cbc)) + \ - conj(_dotc(b, Cac)))*.5 - _axpy(-dde_ba, psi, LL0b) - _axpy(-de_a, b, LL0b) - _axpy(-de_b, a, LL0b) - c_op.matmul_data(t, _dense_wrap(La), _dense_wrap(CLa)) - self._c_vec_conj(t, c_op, La, CLac) - de_La = (_dotc(psi, CLa) + _dot(La, Cpsi) + \ - conj(_dotc(La, Cpsi) + _dotc(psi, CLac))) * 0.5 - _axpy(1., CLa, LL0b) - _axpy(-e, La, LL0b) - _axpy(-de_La, psi, LL0b) - - # L0Lb = b"ba + b'L0b - _axpy(-dde_ba, psi, L0Lb) - _axpy(-de_a, b, L0Lb) - _axpy(-de_b, a, L0Lb) - c_op.matmul_data(t, _dense_wrap(L0b), _dense_wrap(CL0b)) - self._c_vec_conj(t, c_op, L0b, CL0bc) - de_L0b = (_dotc(psi, CL0b) + _dot(L0b, Cpsi) + \ - conj(_dotc(L0b, Cpsi) + _dotc(psi, CL0bc))) * 0.5 - _axpy(1., CL0b, L0Lb) - _axpy(-e, L0b, L0Lb) - _axpy(-de_L0b, psi, L0Lb) - - # _L0_ _a_ = da/dt + a'_a_ -_L0_Lb/2 - self.d1(t + dt, psi, L0a) # da/dt - _axpy(-0.5 * dt, Lb, L0a) # da/dt - _axpy(-1.0, a, L0a) # da/dt - self.L.matmul_data(t, _dense_wrap(a), _dense_wrap(L0a)) # a'_a_ - _axpy(-0.5 * e * e * dt, a, L0a) # a'_a_ - _axpy(-e * de_a * dt, psi, L0a) # a'_a_ - _axpy(e * dt, Ca, L0a) # a'_a_ - _axpy(de_a * dt, Cpsi, L0a) # a'_a_ - _axpy(-0.5 * dt, L0Lb, L0a) # _L0_Lb/2 - - cdef void implicit(self, double t, np.ndarray[complex, ndim=1] dvec, - complex[::1] out, - np.ndarray[complex, ndim=1] guess) except *: - # np.ndarray to memoryview is OK but not the reverse - # scipy function only take np array, not memoryview - self.imp_t = t - spout, check = sp.linalg.bicgstab(self.imp, dvec, x0=guess, - tol=self.tol, atol=1e-12) - cdef int i - copy(spout, out) - - -cdef class SMESolver(StochasticSolver): - """stochastic master equation system""" - cdef QobjEvo L - cdef object imp - cdef object c_ops - cdef int N_root - cdef double tol - - def set_data(self, sso): - L = sso.LH - c_ops = sso.sops - self.l_vec = L.shape[0] - self.num_ops = len(c_ops) - self.L = L - self.c_ops = [] - self.N_root = np.sqrt(self.l_vec) - for i, op in enumerate(c_ops): - self.c_ops.append(op) - if sso.solver_code in [MILSTEIN_IMP_SOLVER, TAYLOR1_5_IMP_SOLVER]: - self.tol = sso.tol - self.imp = sso.imp - - cdef void _normalize_inplace(self, complex[::1] vec): - _normalize_rho(vec) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef complex expect(self, complex[::1] rho): - cdef complex e = 0. - cdef int k - for k in range(self.N_root): - e += rho[k*(self.N_root+1)] - return e - - @cython.boundscheck(False) - cdef void d1(self, double t, complex[::1] rho, complex[::1] out): - self.L.matmul_data(t, _dense_wrap(rho), _dense_wrap(out)) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void d2(self, double t, complex[::1] rho, complex[:, ::1] out): - cdef int i, k - cdef QobjEvo c_op - cdef complex expect - for i in range(self.num_ops): - c_op = self.c_ops[i] - c_op.matmul_data(t, _dense_wrap(rho), _dense_wrap(out[i,:])) - expect = self.expect(out[i,:]) - _axpy(-expect, rho, out[i,:]) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void derivatives(self, double t, int deg, complex[::1] rho, - complex[::1] a, complex[:, ::1] b, - complex[:, :, ::1] Lb, complex[:,::1] La, - complex[:, ::1] L0b, complex[:, :, :, ::1] LLb, - complex[::1] L0a): - """ - combinaisons of a and b derivative for m sc_ops up to order dt**1.5 - dY ~ a dt + bi dwi - deg use noise - b[i.:] bi >=0 d2 euler dwi - a[:] a >=0 d1 euler dt - Lb[i,i,:] bi'bi >=1 milstein (dwi^2-dt)/2 - Lb[i,j,:] bj'bi >=1 milstein dwi*dwj - L0b[i,:] ab' +db/dt +bbb"/2 >=2 taylor15 dwidt-dzi - La[i,:] ba' >=2 taylor15 dzi - LLb[i,i,i,:] bi(bibi"+bi'bi') >=2 taylor15 (dwi^2/3-dt)dwi/2 - LLb[i,j,j,:] bi(bjbj"+bj'bj') >=2 taylor15 (dwj^2-dt)dwj/2 - LLb[i,j,k,:] bi(bjbk"+bj'bk') >=2 taylor15 dwi*dwj*dwk - L0a[:] aa' +da/dt +bba"/2 2 taylor15 dt^2/2 - """ - cdef int i, j, k - cdef QobjEvo c_op - cdef QobjEvo c_opj - cdef complex trApp, trAbb, trAa - cdef complex[::1] trAp = self.expect_buffer_1d[0,:] - cdef complex[:, ::1] trAb = self.expect_buffer_2d - cdef complex[::1] temp = self.func_buffer_1d[0,:] - #_zero(temp) - - # a - self.L.matmul_data(t, _dense_wrap(rho), _dense_wrap(a)) - - # b - for i in range(self.num_ops): - c_op = self.c_ops[i] - # bi - c_op.matmul_data(t, _dense_wrap(rho), _dense_wrap(b[i,:])) - trAp[i] = self.expect(b[i,:]) - _axpy(-trAp[i], rho, b[i,:]) - - # Libj = bibj', i<=j - # sc_ops must commute (Libj = Ljbi) - if deg >= 1: - for i in range(self.num_ops): - c_op = self.c_ops[i] - for j in range(i, self.num_ops): - c_op.matmul_data(t, _dense_wrap(b[j,:]), _dense_wrap(Lb[i,j,:])) - trAb[i,j] = self.expect(Lb[i,j,:]) - _axpy(-trAp[i], b[j,:], Lb[i,j,:]) - _axpy(-trAb[i,j], rho, Lb[i,j,:]) - - # L0b La LLb - if deg >= 2: - for i in range(self.num_ops): - c_op = self.c_ops[i] - # Lia = bia' - self.L.matmul_data(t, _dense_wrap(b[i,:]), _dense_wrap(La[i,:])) - - # L0bi = abi' + dbi/dt + Sum_j bjbjbi"/2 - # db/dt - c_op.matmul_data(t + self.dt, _dense_wrap(rho), _dense_wrap(L0b[i,:])) - trApp = self.expect(L0b[i,:]) - _axpy(-trApp, rho, L0b[i,:]) - _axpy(-1, b[i,:], L0b[i,:]) - # ab' - _zero(temp) # = np.zeros((self.l_vec, ), dtype=complex) - c_op.matmul_data(t, _dense_wrap(a), _dense_wrap(temp)) - trAa = self.expect(temp) - _axpy(1., temp, L0b[i,:]) - _axpy(-trAp[i], a[:], L0b[i,:]) - _axpy(-trAa, rho, L0b[i,:]) - # bbb" : trAb[i,j] only defined for j>=i - for j in range(i): - _axpy(-trAb[j,i]*self.dt, b[j,:], L0b[i,:]) # L contain dt - for j in range(i,self.num_ops): - _axpy(-trAb[i,j]*self.dt, b[j,:], L0b[i,:]) # L contain dt - - # LLb - # LiLjbk = bi(bj'bk'+bjbk"), i<=j<=k - # sc_ops must commute (LiLjbk = LjLibk = LkLjbi) - for j in range(i,self.num_ops): - for k in range(j,self.num_ops): - c_op.matmul_data(t, _dense_wrap(Lb[j,k,:]), _dense_wrap(LLb[i,j,k,:])) - trAbb = self.expect(LLb[i,j,k,:]) - _axpy(-trAp[i], Lb[j,k,:], LLb[i,j,k,:]) - _axpy(-trAbb, rho, LLb[i,j,k,:]) - _axpy(-trAb[i,k], b[j,:], LLb[i,j,k,:]) - _axpy(-trAb[i,j], b[k,:], LLb[i,j,k,:]) - - # L0a = a'a + da/dt + bba"/2 (a" = 0) - if deg == 2: - self.L.matmul_data(t, _dense_wrap(a), _dense_wrap(L0a)) - self.L.matmul_data(t+self.dt, _dense_wrap(rho), _dense_wrap(L0a)) - _axpy(-1, a, L0a) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void derivativesO2(self, double t, complex[::1] rho, - complex[::1] a, complex[::1] b, complex[::1] Lb, - complex[::1] La, complex[::1] L0b, complex[::1] LLb, - complex[::1] L0a, - complex[::1] LLa, complex[::1] LL0b, - complex[::1] L0Lb, complex[::1] LLLb): - """ - Combinaisons of a and b derivative for m sc_ops up to order dt**2.0 - Use Stratonovich-Taylor expansion. - One one sc_ops - dY ~ a dt + bi dwi - - b[:] b d2 euler dw - a[:] a- Lb/2 d1 euler dt - Lb[:] b'b milstein dw^2/2 - L0b[:] ab'- b'b'b/2 taylor1.5 dwdt-dz - La[:] ba'- (b'b'b+b"bb)/2 taylor1.5 dz - LLb[:] (b"bb+b'b'b) taylor1.5 dw^3/6 - L0a[:] a_a'_ + da/dt -Lb_a'_/2 taylor1.5 dt^2/2 - - LLa[:] ... taylor2.0 dwdt-dz - LL0b[:] ... taylor2.0 dz - L0Lb[:] ... taylor2.0 dw^3/6 - LLLb[:] ... taylor2.0 dt^2/2 - """ - cdef int i, j, k - cdef QobjEvo c_op = self.c_ops[0] - cdef QobjEvo c_opj - cdef complex trAp, trApt - cdef complex trAb, trALb, trALLb - cdef complex trAa, trALa - cdef complex trAL0b - - cdef complex[::1] temp = self.func_buffer_1d[0,:] - cdef complex[::1] temp2 = self.func_buffer_1d[1,:] - - # b - c_op.matmul_data(t, _dense_wrap(rho), _dense_wrap(b)) - trAp = self.expect(b) - _axpy(-trAp, rho, b) - - # Lb = b'b - c_op.matmul_data(t, _dense_wrap(b), _dense_wrap(Lb)) - trAb = self.expect(Lb) - _axpy(-trAp, b, Lb) - _axpy(-trAb, rho, Lb) - - # LLb = b'Lb+b"bb - c_op.matmul_data(t, _dense_wrap(Lb), _dense_wrap(LLb)) - trALb = self.expect(LLb) - _axpy(-trAp, Lb, LLb) - _axpy(-trALb, rho, LLb) - _axpy(-trAb*2, b, LLb) - - # LLLb = b'LLb + 3 b"bLb + b"'bbb - c_op.matmul_data(t, _dense_wrap(LLb), _dense_wrap(LLLb)) - trALLb = self.expect(LLLb) - _axpy(-trAp, LLb, LLLb) - _axpy(-trALLb, rho, LLLb) - _axpy(-trALb*3, b, LLLb) - _axpy(-trAb*3, Lb, LLLb) - - # _a_ = a - Lb/2 - self.L.matmul_data(t, _dense_wrap(rho), _dense_wrap(a)) - _axpy(-0.5*self.dt, Lb, a) - - # L_a_ = ba' - LLb/2 - self.L.matmul_data(t, _dense_wrap(b), _dense_wrap(La)) - _axpy(-0.5*self.dt, LLb, La) - - # LL_a_ = b(La)' - LLLb/2 - self.L.matmul_data(t, _dense_wrap(Lb), _dense_wrap(LLa)) - _axpy(-0.5*self.dt, LLLb, LLa) - - # _L0_b = b'(_a_) - c_op.matmul_data(t, _dense_wrap(a), _dense_wrap(L0b)) - trAa = self.expect(L0b) - _axpy(-trAp, a, L0b) - _axpy(-trAa, rho, L0b) - - # _L0_Lb = b'(b'(_a_))+b"(_a_,b) - c_op.matmul_data(t, _dense_wrap(L0b), _dense_wrap(L0Lb)) - trAL0b = self.expect(L0Lb) - _axpy(-trAp, L0b, L0Lb) - _axpy(-trAL0b, rho, L0Lb) - _axpy(-trAa, b, L0Lb) - _axpy(-trAb, a, L0Lb) - - # L_L0_b = b'(_a_'(b))+b"(_a_,b) - c_op.matmul_data(t, _dense_wrap(La), _dense_wrap(LL0b)) - trAL0b = self.expect(LL0b) - _axpy(-trAp, La, LL0b) - _axpy(-trAL0b, rho, LL0b) - _axpy(-trAa, b, LL0b) - _axpy(-trAb, a, LL0b) - - # _L0_ _a_ = _L0_a - _L0_Lb/2 + da/dt - self.L.matmul_data(t, _dense_wrap(a), _dense_wrap(L0a)) - self.L.matmul_data(t+self.dt, _dense_wrap(rho), _dense_wrap(L0a)) - _axpy(-0.5*self.dt, Lb, L0a) # _a_(t+dt) = a(t+dt)-0.5*Lb - _axpy(-1, a, L0a) - _axpy(-self.dt*0.5, L0Lb, L0a) - - - cdef void implicit(self, double t, np.ndarray[complex, ndim=1] dvec, - complex[::1] out, - np.ndarray[complex, ndim=1] guess) except *: - # np.ndarray to memoryview is OK but not the reverse - # scipy function only take np array, not memoryview - spout, check = sp.linalg.bicgstab(self.imp(t).data.as_scipy(), - dvec, x0=guess, - tol=self.tol, atol=1e-12) - cdef int i - copy(spout,out) - - -cdef class PcSSESolver(StochasticSolver): - """photocurrent for Schrodinger equation""" - cdef QobjEvo L - cdef object c_ops - cdef object cdc_ops - - def set_data(self, sso): - L = sso.LH - c_ops = sso.sops - self.l_vec = L.shape[0] - self.num_ops = len(c_ops) - self.L = L - self.c_ops = [] - self.cdc_ops = [] - for i, op in enumerate(c_ops): - self.c_ops.append(op[0]) - self.cdc_ops.append(op[1]) - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef void photocurrent(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - cdef QobjEvo c_op - cdef double rand - cdef int i, which = -1 - cdef complex[::1] expects = self.expect_buffer_1d[0,:] - cdef complex[::1] d2 = self.buffer_1d[0,:] - - copy(vec, out) - self.d1(t, vec, out) - rand = np.random.rand() - for i in range(self.num_ops): - c_op = self.cdc_ops[i] - expects[i] = c_op.expect_data(t, _dense_wrap(vec)) - if expects[i].real * dt >= 1e-15: - rand -= expects[i].real *dt - if rand < 0: - which = i - noise[i] = 1. - break - - if which >= 0: - self.collapse(t, which, expects[which].real, vec, d2) - _axpy(1, d2, out) - _axpy(-1, vec, out) - - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef void photocurrent_pc(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - cdef QobjEvo c_op - cdef double expect - cdef int i, which=0, num_coll=0, did_collapse=0 - cdef complex[::1] tmp = self.buffer_1d[0,:] - cdef complex[::1] expects = self.expect_buffer_1d[0,:] - cdef np.ndarray[int, ndim=1] colls - - # Collapses are computed first - for i in range(self.num_ops): - c_op = self.cdc_ops[i] - expects[i] = c_op.expect_data(t, _dense_wrap(vec)).real - if expects[i].real > 0: - did_collapse = np.random.poisson(expects[i].real * dt) - num_coll += did_collapse - if did_collapse: - which = i - noise[i] = did_collapse * 1. - else: - noise[i] = 0. - - if num_coll == 0: - pass - elif num_coll == 1: - # Do one collapse - self.collapse(t, which, expects[which].real, vec, out) - copy(out, vec) - elif num_coll and noise[which] == num_coll: - # Do many collapse of one sc_ops. - # Recompute the expectation value, but only to check for zero. - c_op = self.cdc_ops[which] - for i in range(num_coll): - expect = c_op.expect_data(t, _dense_wrap(vec)).real - if expect * dt >= 1e-15: - self.collapse(t, which, expect, vec, out) - copy(out,vec) - elif num_coll >= 2: - # 2 or more collapses of different operators - # Ineficient, should be rare - coll = [] - for i in range(self.num_ops): - coll += [i]*int(noise[i]) - np.random.shuffle(coll) - for i in coll: - c_op = self.cdc_ops[i] - expect = c_op.expect_data(t, _dense_wrap(vec)).real - if expect * dt >= 1e-15: - self.collapse(t, i, expect, vec, out) - copy(out,vec) - copy(vec,tmp) - copy(vec,out) - self.d1(t, vec, tmp) - self.d1(t+dt, tmp, out) - _scale(0.5, out) - _axpy(0.5, tmp, out) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void d1(self, double t, complex[::1] vec, complex[::1] out): - self.L.matmul_data(t, _dense_wrap(vec), _dense_wrap(out)) - cdef int i - cdef complex e - cdef QobjEvo c_op - cdef complex[::1] temp = self.func_buffer_1d[0,:] - for i in range(self.num_ops): - _zero(temp) - c_op = self.c_ops[i] - c_op.matmul_data(t, _dense_wrap(vec), _dense_wrap(temp)) - e = _dznrm2(temp) - _axpy(0.5 * e * e * self.dt, vec, out) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void d2(self, double t, complex[::1] vec, complex[:, ::1] out): - cdef int i - cdef QobjEvo c_op - cdef complex expect - for i in range(self.num_ops): - c_op = self.c_ops[i] - c_op.matmul_data(t, _dense_wrap(vec), _dense_wrap(out[i,:])) - expect = _dznrm2(out[i,:]) - if expect.real >= 1e-15: - _zscale(1/expect, out[i,:]) - else: - _zero(out[i,:]) - _axpy(-1, vec, out[i,:]) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void collapse(self, double t, int which, double expect, - complex[::1] vec, complex[::1] out): - cdef QobjEvo c_op - c_op = self.c_ops[which] - _zero(out) - c_op.matmul_data(t, _dense_wrap(vec), _dense_wrap(out)) - _zscale(1/expect, out) - - -cdef class PcSMESolver(StochasticSolver): - """photocurrent for master equation""" - cdef QobjEvo L - cdef object cdcr_cdcl_ops - cdef object cdcl_ops - cdef object clcdr_ops - cdef int N_root - - def set_data(self, sso): - L = sso.LH - c_ops = sso.sops - self.l_vec = L.shape[0] - self.num_ops = len(c_ops) - self.L = L - self.cdcr_cdcl_ops = [] - self.cdcl_ops = [] - self.clcdr_ops = [] - self.N_root = np.sqrt(self.l_vec) - for i, op in enumerate(c_ops): - self.cdcr_cdcl_ops.append(op[0]) - self.cdcl_ops.append(op[1]) - self.clcdr_ops.append(op[2]) - - cdef void _normalize_inplace(self, complex[::1] vec): - _normalize_rho(vec) - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef void photocurrent(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - cdef QobjEvo c_op - cdef double rand - cdef int i, which = -1 - cdef complex[::1] expects = self.expect_buffer_1d[0,:] - cdef complex[::1] d2 = self.buffer_1d[0,:] - - copy(vec, out) - self.d1(t, vec, out) - rand = np.random.rand() - for i in range(self.num_ops): - c_op = self.clcdr_ops[i] - expects[i] = c_op.expect_data(t, _dense_wrap(vec)) - if expects[i].real * dt >= 1e-15: - rand -= expects[i].real *dt - if rand < 0: - which = i - noise[i] = 1. - break - - if which >= 0: - self.collapse(t, which, expects[which].real, vec, d2) - _axpy(1, d2, out) - _axpy(-1, vec, out) - - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef void photocurrent_pc(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - cdef QobjEvo c_op - cdef int i, which, num_coll=0, did_collapse - cdef complex[::1] expects = self.expect_buffer_1d[0,:] - cdef complex[::1] tmp = self.buffer_1d[0,:] - cdef double expect - cdef np.ndarray[int, ndim=1] colls - - # Collapses are computed first - for i in range(self.num_ops): - c_op = self.clcdr_ops[i] - expects[i] = c_op.expect_data(t, _dense_wrap(vec)).real - if expects[i].real > 0: - did_collapse = np.random.poisson(expects[i].real* dt) - num_coll += did_collapse - if did_collapse: - which = i - noise[i] = did_collapse * 1. - else: - noise[i] = 0. - - if num_coll == 0: - pass - elif num_coll == 1: - # Do one collapse - self.collapse(t, which, expects[which].real, vec, out) - copy(out,vec) - elif noise[which] == num_coll: - # Do many collapse of one sc_ops. - # Recompute the expectation value, but only to check for zero. - c_op = self.clcdr_ops[which] - for i in range(num_coll): - expect = c_op.expect_data(t, _dense_wrap(vec)).real - if expect * dt >= 1e-15: - self.collapse(t, which, expect, vec, out) - copy(out,vec) - elif num_coll >= 2: - # 2 or more collapses of different operators - # Ineficient, should be rare - coll = [] - for i in range(self.num_ops): - coll += [i] * int(noise[i]) - np.random.shuffle(coll) - for i in coll: - c_op = self.clcdr_ops[i] - expect = c_op.expect_data(t, _dense_wrap(vec)).real - if expect * dt >= 1e-15: - self.collapse(t, i, expect, vec, out) - copy(out,vec) - - copy(vec,tmp) - copy(vec,out) - self.d1(t, vec, tmp) - self.d1(t+dt, tmp, out) - _scale(0.5, out) - _axpy(0.5, tmp, out) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef complex expect(self, complex[::1] rho): - cdef complex e = 0. - cdef int k - for k in range(self.N_root): - e += rho[k*(self.N_root+1)] - return e - - @cython.boundscheck(False) - cdef void d1(self, double t, complex[::1] rho, complex[::1] out): - cdef int i - cdef QobjEvo c_op - cdef complex[::1] crho = self.func_buffer_1d[0,:] - cdef complex expect - self.L.matmul_data(t, _dense_wrap(rho), _dense_wrap(out)) - for i in range(self.num_ops): - c_op = self.cdcr_cdcl_ops[i] - _zero(crho) - c_op.matmul_data(t, _dense_wrap(rho), _dense_wrap(crho)) - expect = self.expect(crho) - _axpy(0.5*expect* self.dt, rho, out) - _axpy(-0.5* self.dt, crho, out) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void d2(self, double t, complex[::1] rho, complex[:, ::1] out): - cdef int i - cdef QobjEvo c_op - cdef complex expect - for i in range(self.num_ops): - c_op = self.clcdr_ops[i] - c_op.matmul_data(t, _dense_wrap(rho), _dense_wrap(out[i,:])) - expect = self.expect(out[i,:]) - if expect.real >= 1e-15: - _zscale((1.+0j)/expect, out[i,:]) - else: - _zero(out[i,:]) - _axpy(-1, rho, out[i,:]) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef void collapse(self, double t, int which, double expect, - complex[::1] vec, complex[::1] out): - cdef QobjEvo c_op - c_op = self.clcdr_ops[which] - _zero(out) - c_op.matmul_data(t, _dense_wrap(vec), _dense_wrap(out)) - _zscale(1./expect, out) - -cdef class PmSMESolver(StochasticSolver): - """positive map for master equation""" - cdef object L - cdef QobjEvo pp_ops - cdef QobjEvo preLH - cdef QobjEvo postLH - cdef object sops - cdef object preops - cdef object postops - cdef object preops2 - cdef object postops2 - cdef int N_root - - def set_data(self, sso): - c_ops = sso.sops - self.l_vec = sso.pp.shape[0] - self.num_ops = len(c_ops) - self.preLH = sso.preLH - self.postLH = sso.postLH - self.pp_ops = sso.pp - self.sops = [op for op in sso.sops] - self.preops = [op for op in sso.preops] - self.postops = [op for op in sso.postops] - self.preops2 = [op for op in sso.preops2] - self.postops2 = [op for op in sso.postops2] - self.N_root = np.sqrt(self.l_vec) - - cdef void _normalize_inplace(self, complex[::1] vec): - _normalize_rho(vec) - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef void rouchon(self, double t, double dt, double[:] noise, - complex[::1] vec, complex[::1] out): - cdef complex[::1] dy = self.expect_buffer_1d[0,:] - cdef complex[::1] temp = self.buffer_1d[0,:] - cdef complex[::1] temp2 = self.buffer_1d[1,:] - cdef int i, j, k - cdef QobjEvo c_op, c_opj - cdef complex ddw, tr - _zero(out) - _zero(temp) - self.preLH.matmul_data(t, _dense_wrap(vec), _dense_wrap(temp)) - for i in range(self.num_ops): - c_op = self.sops[i] - dy[i] = c_op.expect_data(t, _dense_wrap(vec)) + noise[i] - c_op = self.preops[i] - _zero(temp2) - c_op.matmul_data(t, _dense_wrap(vec), _dense_wrap(temp2)) - _axpy(dy[i], temp2, temp) - - k = 0 - for i in range(self.num_ops): - for j in range(i, self.num_ops): - c_op = self.preops2[k] - if i == j: - ddw = (dy[i]*dy[j] - dt) *0.5 - else: - ddw = (dy[i]*dy[j]) - - _zero(temp2) - c_op.matmul_data(t, _dense_wrap(vec), _dense_wrap(temp2)) - _axpy(ddw, temp2, temp) - k += 1 - - self.postLH.matmul_data(t, _dense_wrap(temp), _dense_wrap(out)) - for i in range(self.num_ops): - dy[i] = conj(dy[i]) - c_op = self.postops[i] - _zero(temp2) - c_op.matmul_data(t, _dense_wrap(temp), _dense_wrap(temp2)) - _axpy(dy[i], temp2, out) - - k = 0 - for i in range(self.num_ops): - for j in range(i, self.num_ops): - c_op = self.postops2[k] - if i == j: - ddw = (dy[i]*dy[j] - dt) *0.5 - else: - ddw = (dy[i]*dy[j]) - _zero(temp2) - c_op.matmul_data(t, _dense_wrap(temp), _dense_wrap(temp2)) - _axpy(ddw, temp2, out) - k += 1 - - self.pp_ops.matmul_data(t, _dense_wrap(vec), _dense_wrap(out)) - tr = self.expect(out) - _zscale(1./tr, out) - - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef complex expect(self, complex[::1] rho): - cdef complex e = 0. - cdef int k - for k in range(self.N_root): - e += rho[k*(self.N_root+1)] - return e - - -cdef class GenericSSolver(StochasticSolver): - """support for user defined system""" - cdef object d1_func, d2_func - - def set_data(self, sso): - self.l_vec = sso.rho0.shape[0] - self.num_ops = len(sso.sops) - self.d1_func = sso.d1 - self.d2_func = sso.d2 - - - cdef void d1(self, double t, complex[::1] rho, complex[::1] out): - cdef np.ndarray[complex, ndim=1] in_np - cdef np.ndarray[complex, ndim=1] out_np - in_np = np.zeros((self.l_vec, ), dtype=complex) - copy(rho, in_np) - out_np = self.d1_func(t, in_np) - _axpy(self.dt, out_np, out) # d1 is += and * dt - - @cython.boundscheck(False) - cdef void d2(self, double t, complex[::1] rho, complex[:, ::1] out): - cdef np.ndarray[complex, ndim=1] in_np - cdef np.ndarray[complex, ndim=2] out_np - cdef int i - in_np = np.zeros((self.l_vec, ), dtype=complex) - copy(rho, in_np) - out_np = self.d2_func(t, in_np) - for i in range(self.num_ops): - copy(out_np[i,:], out[i,:]) diff --git a/qutip/solve/nonmarkov/transfertensor.py b/qutip/solve/nonmarkov/transfertensor.py deleted file mode 100644 index 561b7554e1..0000000000 --- a/qutip/solve/nonmarkov/transfertensor.py +++ /dev/null @@ -1,254 +0,0 @@ -# -*- coding: utf-8 -*- -# @author: Arne L. Grimsmo -# @email1: arne.grimsmo@gmail.com -# @organization: University of Sherbrooke - -""" -This module contains an implementation of the non-Markovian transfer tensor -method (TTM), introduced in [1]. - -[1] Javier Cerrillo and Jianshu Cao, Phys. Rev. Lett 112, 110401 (2014) -""" - -import numpy as np - - -from qutip import spre, vector_to_operator, operator_to_vector -from qutip.core import data as _data -from ..solver import Result, SolverOptions - - -class TTMSolverOptions: - """Class of options for the Transfer Tensor Method solver. - - Attributes - ---------- - dynmaps : list of :class:`qutip.Qobj` - List of precomputed dynamical maps (superoperators), - or a callback function that returns the - superoperator at a given time. - - times : array_like - List of times :math:`t_n` at which to calculate :math:`\\rho(t_n)` - - learningtimes : array_like - List of times :math:`t_k` to use as learning times if argument - `dynmaps` is a callback function. - - thres : float - Threshold for halting. Halts if :math:`||T_{n}-T_{n-1}||` is below - treshold. - - options : :class:`qutip.solver.SolverOptions` - Generic solver options. - """ - - def __init__(self, dynmaps=None, times=[], learningtimes=[], - thres=0.0, options=None): - if options is None: - options = SolverOptions() - - self.dynmaps = dynmaps - self.times = times - self.learningtimes = learningtimes - self.thres = thres - self.store_states = options['store_states'] - - -def ttmsolve(dynmaps, rho0, times, e_ops=[], learningtimes=None, tensors=None, - **kwargs): - """ - Solve time-evolution using the Transfer Tensor Method, based on a set of - precomputed dynamical maps. - - Parameters - ---------- - dynmaps : list of :class:`qutip.Qobj` - List of precomputed dynamical maps (superoperators), - or a callback function that returns the - superoperator at a given time. - - rho0 : :class:`qutip.Qobj` - Initial density matrix or state vector (ket). - - times : array_like - list of times :math:`t_n` at which to compute :math:`\\rho(t_n)`. - Must be uniformily spaced. - - e_ops : list of :class:`qutip.Qobj` / callback function - single operator or list of operators for which to evaluate - expectation values. - - learningtimes : array_like - list of times :math:`t_k` for which we have knowledge of the dynamical - maps :math:`E(t_k)`. - - tensors : array_like - optional list of precomputed tensors :math:`T_k` - - kwargs : dictionary - Optional keyword arguments. See - :class:`qutip.nonmarkov.transfertensor.TTMSolverOptions`. - - Returns - ------- - output: :class:`qutip.solver.Result` - An instance of the class :class:`qutip.solver.Result`. - """ - - opt = TTMSolverOptions(dynmaps=dynmaps, times=times, - learningtimes=learningtimes, **kwargs) - - diff = None - - if rho0.isket: - rho0 = rho0.proj() - - output = Result() - e_sops_data = [] - - if callable(e_ops): - n_expt_op = 0 - expt_callback = True - - else: - try: - tmp = e_ops[:] - del tmp - - n_expt_op = len(e_ops) - expt_callback = False - - if n_expt_op == 0: - # fall back on storing states - opt.store_states = True - - for op in e_ops: - e_sops_data.append(spre(op).data) - if op.isherm and rho0.isherm: - output.expect.append(np.zeros(len(times))) - else: - output.expect.append(np.zeros(len(times), dtype=complex)) - except TypeError: - raise TypeError("Argument 'e_ops' should be a callable or" + - "list-like.") - - if tensors is None: - tensors, diff = _generatetensors(dynmaps, learningtimes, opt=opt) - - if rho0.isoper: - # vectorize density matrix - rho0vec = operator_to_vector(rho0) - else: - # rho0 might be a super in which case we should not vectorize - rho0vec = rho0 - - K = len(tensors) - states = [rho0vec] - for n in range(1, len(times)): - # Set current state - state = None - for j in range(1, min(K, n + 1)): - tmp = tensors[j] * states[n - j] - state = tmp if state is None else tmp + state - # Append state to all states - states.append(state) - for i, r in enumerate(states): - if opt.store_states or expt_callback: - if r.type == 'operator-ket': - states[i] = vector_to_operator(r) - else: - states[i] = r - if expt_callback: - # use callback method - e_ops(times[i], states[i]) - rdata = _data.column_stack(r.data) - for m in range(n_expt_op): - if output.expect[m].dtype == complex: - output.expect[m][i] = _data.expect_super(e_sops_data[m], rdata) - else: - output.expect[m][i] =\ - _data.expect_super(e_sops_data[m], rdata).real - - output.solver = "ttmsolve" - output.times = times - - output.ttmconvergence = diff - - if opt.store_states: - output.states = states - - return output - - -def _generatetensors(dynmaps, learningtimes=None, **kwargs): - r""" - Generate the tensors :math:`T_1,\dots,T_K` from the dynamical maps - :math:`E(t_k)`. - - A stationary process is assumed, i.e., :math:`T_{n,k} = T_{n-k}`. - - Parameters - ---------- - dynmaps : list of :class:`qutip.Qobj` - List of precomputed dynamical maps (superoperators) at the times - specified in `learningtimes`, or a callback function that returns the - superoperator at a given time. - - learningtimes : array_like - list of times :math:`t_k` to use if argument `dynmaps` is a callback - function. - - kwargs : dictionary - Optional keyword arguments. See - :class:`qutip.nonmarkov.transfertensor.TTMSolverOptions`. - - Returns - ------- - Tlist: list of :class:`qutip.Qobj.` - A list of transfer tensors :math:`T_1,\dots,T_K` - """ - - # Determine if dynmaps is callable or list-like - if callable(dynmaps): - if learningtimes is None: - raise TypeError("Argument 'learnintimes' required when 'dynmaps'" + - "is a callback function.") - - def dynmapfunc(n): - return dynmaps(learningtimes[n]) - - Kmax = len(learningtimes) - else: - try: - tmp = dynmaps[:] - del tmp - - def dynmapfunc(n): - return dynmaps[n] - - Kmax = len(dynmaps) - except TypeError: - raise TypeError("Argument 'dynmaps' should be a callable or" + - "list-like.") - - if "opt" not in kwargs: - opt = TTMSolverOptions(dynmaps=dynmaps, learningtimes=learningtimes, - **kwargs) - else: - opt = kwargs['opt'] - - Tlist = [] - diff = [0.0] - for n in range(Kmax): - T = dynmapfunc(n) - for m in range(1, n): - T -= Tlist[n - m] * dynmapfunc(m) - Tlist.append(T) - if n > 1: - diff.append((Tlist[-1] - Tlist[-2]).norm()) - if diff[-1] < opt.thres: - # Below threshold for truncation - print('breaking', (Tlist[-1] - Tlist[-2]).norm(), n) - break - return Tlist, diff diff --git a/qutip/solve/optionsclass.py b/qutip/solve/optionsclass.py deleted file mode 100644 index b82de81217..0000000000 --- a/qutip/solve/optionsclass.py +++ /dev/null @@ -1,263 +0,0 @@ - - -class AllOptions: - """ - Class that serve as the parent of all other options class. - """ - # Temporary patch to make changes in solver's options minimal. - # TODO: Follow up PR based on #1812 should replace this. - def __init__(self): - self._isDefault = True - self._children = [] - self._fullname = "qutip.settings" - self._defaultInstance = self - - def _all_children(self): - optcls = [] - for child in self._children: - optcls += child._all_children() - return optcls - - -alloptions = AllOptions() - - -def optionsclass(name, parent=alloptions): - """ - Use as a decorator to register the options class to `qutip.settings`. - The default will be in added to qutip.setting.[parent].name. - - The options class should contain an `options` dictionary containing the - option and their default value. Readonly settings can be defined in - `read_only_options` dict. - - ``` - >>> import qutip - >>> @qutip.optionsclass("myopt", parent=qutip.settings.solver) - >>> class MyOpt(): - >>> options = {"opt1": True} - >>> read_only_options = {"hidden": 1} - - >>> qutip.settings.solver.myopt['opt1'] = False - >>> qutip.settings.solver.myopt['hidden'] - 1 - - >>> qutip.settings.solver.myopt['hidden'] = 2 - KeyError: "'hidden': Read-only value" - - >>> print(qutip.settings.solver.myopt) - qutip.settings.solver.myopt: - opt1 : False - hidden : 1 - - print(MyOpt(opt1=2)) - qutip.settings.solver.myopt: - opt1 : 2 - hidden : 1 - ``` - - It add the methods: - __init__(file=None, *, option=None... ) - Allow to create from data in files or from defaults with attributes - overwritten by keywords. - __repr__(): - Make a clean print for all 'options' and 'read_only_options'. - __getitem__(), __setitem__(): - Pass through to 'self.options' and 'self.read_only_options'. - Option in 'self.read_only_options' can not be set. - save(file='qutiprc'): - Save the object in a file. 'qutiprc' file is loaded as default when - loading qutip. - load(file): - Overwrite with options previously saved. - Loaded options will have the same type as the default from one of: - (bool, int, float, complex, str, object) - reset(): - If used on an instance, reset to the default in qutip.settings. - If used from qutip.settings..., go back to qutip's defaults. - - """ - # The real work is in _QtOptionsMaker - if isinstance(name, str): - # Called as - # @QtOptionsClass(name) - # class Options: - return _QtOptionsMaker(name, parent) - else: - # Called as - # @QtOptionsClass - # class Options: - return _QtOptionsMaker(name.__name__, parent)(name) - - -class _QtOptionsMaker: - """ - Apply the `optionsclass` decorator. - """ - def __init__(self, name, parent): - self.name = name - self.parent = parent - - def __call__(self, cls): - if hasattr(cls, "_isDefault"): - # Already a QtOptionsClass - return - - if not hasattr(cls, "read_only_options"): - cls.read_only_options = {} - # type to used when loading from file. - cls._types = {key: type(val) for key, val in cls.options.items()} - # Name in settings and in files - cls._name = self.name - # Name when printing - cls._fullname = ".".join([self.parent._fullname, self.name]) - # Is this instance the default for the other. - cls._isDefault = False - # Children in the settings tree - cls._children = [] - # Build the default instance - # Do it before setting __init__ since it use this default - self._make_default(cls) - - cls.__init__ = _make_init(cls.options) - cls.__repr__ = _repr - cls.__getitem__ = _getitem - cls.__setitem__ = _setitem - cls.__contains__ = _contains - cls.reset = _reset - cls.save = _save - cls.load = _load - cls._all_children = _all_children - - return cls - - def _make_default(self, cls): - """ Create the default and add it to the parent. - """ - default = cls() - default.options = cls.options.copy() - default._isDefault = True - default._children = [] - # The parent has the child twice: attribute and in a list. - self.parent._defaultInstance._children.append(default) - setattr(self.parent._defaultInstance, self.name, default) - cls._defaultInstance = default - - -def _make_init(all_set): - attributes_kw = ",\n ".join(["{}=None".format(key) - for key in all_set]) - attributes_set = "".join([" if {0} is not None:\n" - " self.options['{0}'] = {0}\n".format(key) - for key in all_set]) - code = f""" -def __init__(self, file='', *, - {attributes_kw}, - _child_instance=False, - **kwargs): - self.options = self._defaultInstance.options.copy() -{attributes_set} - if file: - self.load(file) - if hasattr(self, "extra_options"): - other_kw = dict() - for kw in kwargs: - if kw in self.extra_options: - self.options[kw] = kwargs[kw] - else: - other_kw[kw] = kwargs[kw] - kwargs = other_kw - self._children = [] - for child in self._defaultInstance._children: - self._children.append(child.__class__(file, _child_instance=True, - **kwargs)) - setattr(self, child._name, self._children[-1]) - kwargs = self._children[-1].left_over_kwargs - if _child_instance: - self.left_over_kwargs = kwargs - elif kwargs: - raise KeyError("Unknown options: " + - " ,".join(str(key) for key in kwargs)) -""" - ns = {} - exec(code, globals(), ns) - return ns["__init__"] - - -def _repr(self, _recursive=True): - out = self._fullname + ":\n" - longest = max(len(key) for key in self.options) - if self.read_only_options: - longest_readonly = max(len(key) for key in self.read_only_options) - longest = max((longest, longest_readonly)) - for key, val in self.options.items(): - if isinstance(val, str): - out += "{:{width}} : '{}'\n".format(key, val, - width=longest) - else: - out += "{:{width}} : {}\n".format(key, val, - width=longest) - for key, val in self.read_only_options.items(): - out += "{:{width}} : {}\n".format(key, val, - width=longest) - out += "\n" - if _recursive: - out += "".join([child.__repr__(_recursive) - for child in self._children]) - return out - - -def _reset(self, _recursive=False): - """Reset instance to the default value or the default to Qutip's default""" - if self._isDefault: - self.options = self.__class__.options.copy() - if _recursive: - for child in self._children: - child.reset() - else: - self.options = self._defaultInstance.options.copy() - - -def _all_children(self): - optcls = [self] - for child in self._children: - optcls += child._all_children() - return optcls - - -def _save(self, file="qutiprc", _recursive=False): - """Save to desired file. 'qutiprc' if not specified""" - import qutip.configrc as qrc - if _recursive: - optcls = self._all_children() - else: - optcls = [self] - qrc.write_rc_object(file, optcls) - - -def _load(self, file="qutiprc", _recursive=False): - """Load from desired file. 'qutiprc' if not specified""" - import qutip.configrc as qrc - if _recursive: - optcls = self._all_children() - else: - optcls = [self] - qrc.load_rc_object(file, optcls) - - -def _contains(self, key): - return key in self.read_only_options or key in self.options - - -def _getitem(self, key): - # Let the dict catch the KeyError - if key in self.read_only_options: - return self.read_only_options[key] - return self.options[key] - - -def _setitem(self, key, value): - # Let the dict catch the KeyError - if key in self.read_only_options: - raise KeyError(f"'{key}': Read-only value") - self.options[key] = value diff --git a/qutip/solve/parallel.py b/qutip/solve/parallel.py deleted file mode 100644 index 36e35b8531..0000000000 --- a/qutip/solve/parallel.py +++ /dev/null @@ -1,241 +0,0 @@ -""" -This function provides functions for parallel execution of loops and function -mappings, using the builtin Python module multiprocessing. -""" -__all__ = ['parallel_map', 'serial_map'] - -from scipy import array -import multiprocessing -from functools import partial -import os -import sys -import signal -from qutip.settings import settings as qset -from qutip.ui.progressbar import BaseProgressBar, TextProgressBar - - -if sys.platform == 'darwin': - Pool = multiprocessing.get_context('fork').Pool -else: - Pool = multiprocessing.Pool - - -def _task_wrapper(args): - try: - return args[0](*args[1]) - except KeyboardInterrupt: - os.kill(args[2], signal.SIGINT) - sys.exit(1) - - -def _task_wrapper_with_args(args, user_args): - try: - return args[0](*args[1], **user_args) - except KeyboardInterrupt: - os.kill(args[2], signal.SIGINT) - sys.exit(1) - - -def parfor(func, *args, **kwargs): - """Executes a multi-variable function in parallel on the local machine. - - Parallel execution of a for-loop over function `func` for multiple input - arguments and keyword arguments. - - .. note:: - - From QuTiP 3.1, we recommend to use :func:`qutip.parallel.parallel_map` - instead of this function. - - Parameters - ---------- - func : function_type - A function to run in parallel on the local machine. The function 'func' - accepts a series of arguments that are passed to the function as - variables. In general, the function can have multiple input variables, - and these arguments must be passed in the same order as they are - defined in the function definition. In addition, the user can pass - multiple keyword arguments to the function. - - The following keyword argument is reserved: - - num_cpus : int - Number of CPU's to use. Default uses maximum number of CPU's. - Performance degrades if num_cpus is larger than the physical CPU - count of your machine. - - Returns - ------- - result : list - A ``list`` with length equal to number of input parameters - containing the output from `func`. - - """ - os.environ['QUTIP_IN_PARALLEL'] = 'TRUE' - kw = _default_kwargs() - if 'num_cpus' in kwargs.keys(): - kw['num_cpus'] = kwargs['num_cpus'] - del kwargs['num_cpus'] - if len(kwargs) != 0: - task_func = partial(_task_wrapper_with_args, user_args=kwargs) - else: - task_func = _task_wrapper - - # if kw['num_cpus'] > qset.num_cpus: - # print("Requested number of CPUs (%s) " % kw['num_cpus'] + - # "is larger than physical number (%s)." % qset.num_cpus) - # print("Reduce 'num_cpus' for greater performance.") - - pool = Pool(processes=kw['num_cpus']) - args = [list(arg) for arg in args] - var = [[args[j][i] for j in range(len(args))] - for i in range(len(list(args[0])))] - try: - map_args = ((func, v, os.getpid()) for v in var) - par_return = list(pool.map(task_func, map_args)) - - pool.terminate() - pool.join() - os.environ['QUTIP_IN_PARALLEL'] = 'FALSE' - if isinstance(par_return[0], tuple): - par_return = [elem for elem in par_return] - num_elems = len(par_return[0]) - dt = [type(ii) for ii in par_return[0]] - return [array([elem[ii] for elem in par_return], dtype=dt[ii]) - for ii in range(num_elems)] - else: - return par_return - - except KeyboardInterrupt: - os.environ['QUTIP_IN_PARALLEL'] = 'FALSE' - pool.terminate() - - -def serial_map(task, values, task_args=tuple(), task_kwargs={}, **kwargs): - """ - Serial mapping function with the same call signature as parallel_map, for - easy switching between serial and parallel execution. This - is functionally equivalent to:: - - result = [task(value, *task_args, **task_kwargs) for value in values] - - This function work as a drop-in replacement of - :func:`qutip.parallel.parallel_map`. - - Parameters - ---------- - task : a Python function - The function that is to be called for each value in ``task_vec``. - values : array / list - The list or array of values for which the ``task`` function is to be - evaluated. - task_args : list / dictionary - The optional additional argument to the ``task`` function. - task_kwargs : list / dictionary - The optional additional keyword argument to the ``task`` function. - progress_bar : ProgressBar - Progress bar class instance for showing progress. - - Returns - -------- - result : list - The result list contains the value of - ``task(value, *task_args, **task_kwargs)`` for each - value in ``values``. - - """ - try: - progress_bar = kwargs['progress_bar'] - if progress_bar is True: - progress_bar = TextProgressBar() - except: - progress_bar = BaseProgressBar() - - progress_bar.start(len(values)) - results = [] - for n, value in enumerate(values): - progress_bar.update(n) - result = task(value, *task_args, **task_kwargs) - results.append(result) - progress_bar.finished() - - return results - - -def parallel_map(task, values, task_args=tuple(), task_kwargs={}, **kwargs): - """ - Parallel execution of a mapping of `values` to the function `task`. This - is functionally equivalent to:: - - result = [task(value, *task_args, **task_kwargs) for value in values] - - Parameters - ---------- - task : a Python function - The function that is to be called for each value in ``task_vec``. - values : array / list - The list or array of values for which the ``task`` function is to be - evaluated. - task_args : list / dictionary - The optional additional argument to the ``task`` function. - task_kwargs : list / dictionary - The optional additional keyword argument to the ``task`` function. - progress_bar : ProgressBar - Progress bar class instance for showing progress. - - Returns - -------- - result : list - The result list contains the value of - ``task(value, *task_args, **task_kwargs)`` for - each value in ``values``. - - """ - os.environ['QUTIP_IN_PARALLEL'] = 'TRUE' - kw = _default_kwargs() - if 'num_cpus' in kwargs: - kw['num_cpus'] = kwargs['num_cpus'] - - try: - progress_bar = kwargs['progress_bar'] - if progress_bar is True: - progress_bar = TextProgressBar() - except: - progress_bar = BaseProgressBar() - - progress_bar.start(len(values)) - nfinished = [0] - - def _update_progress_bar(x): - nfinished[0] += 1 - progress_bar.update(nfinished[0]) - - try: - pool = Pool(processes=kw['num_cpus']) - - async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs, - _update_progress_bar) - for value in values] - - while not all([ar.ready() for ar in async_res]): - for ar in async_res: - ar.wait(timeout=0.1) - - pool.terminate() - pool.join() - - except KeyboardInterrupt as e: - os.environ['QUTIP_IN_PARALLEL'] = 'FALSE' - pool.terminate() - pool.join() - raise e - - progress_bar.finished() - os.environ['QUTIP_IN_PARALLEL'] = 'FALSE' - return [ar.get() for ar in async_res] - - -def _default_kwargs(): - - settings = {'num_cpus': multiprocessing.cpu_count()} - return settings diff --git a/qutip/solve/pdpsolve.py b/qutip/solve/pdpsolve.py deleted file mode 100644 index 1a1d2964d2..0000000000 --- a/qutip/solve/pdpsolve.py +++ /dev/null @@ -1,575 +0,0 @@ -# -*- coding: utf-8 -*- - -import numpy as np -from numpy.random import RandomState - -from .. import ( - Qobj, expect, spre, spost, stack_columns, unstack_columns, liouvillian, -) -from ..core import data as _data -from .solver import Result, SolverOptions -from .parallel import serial_map -from ..ui.progressbar import TextProgressBar -from ..settings import settings -debug = settings.debug - - -class StochasticSolverOptions: - """ - Class of options for stochastic (piecewse deterministic process) PDP - solvers such as :func:`qutip.pdpsolve.ssepdpsolve`, - :func:`qutip.pdpsolve.smepdpsolve`. - Options can be specified either as arguments to the constructor:: - - sso = StochasticSolverOptions(nsubsteps=100, ...) - - or by changing the class attributes after creation:: - - sso = StochasticSolverOptions() - sso.nsubsteps = 1000 - - The stochastic solvers :func:`qutip.pdpsolve.ssepdpsolve` and - :func:`qutip.pdpsolve.smepdpsolve` all take the same keyword arguments as - the constructor of these class, and internally they use these arguments to - construct an instance of this class, so it is rarely needed to explicitly - create an instance of this class. - - Attributes - ---------- - - H : :class:`qutip.Qobj` - System Hamiltonian. - - state0 : :class:`qutip.Qobj` - Initial state vector (ket) or density matrix. - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - c_ops : list of :class:`qutip.Qobj` - List of deterministic collapse operators. - - sc_ops : list of :class:`qutip.Qobj` - List of stochastic collapse operators. Each stochastic collapse - operator will give a deterministic and stochastic contribution - to the equation of motion according to how the d1 and d2 functions - are defined. - - e_ops : list of :class:`qutip.Qobj` - Single operator or list of operators for which to evaluate - expectation values. - - m_ops : list of :class:`qutip.Qobj` - List of operators representing the measurement operators. The expected - format is a nested list with one measurement operator for each - stochastic increament, for each stochastic collapse operator. - - args : dict / list - List of dictionary of additional problem-specific parameters. - Implicit methods can adjust tolerance via args = {'tol':value} - - ntraj : int - Number of trajectors. - - nsubsteps : int - Number of sub steps between each time-spep given in `times`. - - d1 : function - Function for calculating the operator-valued coefficient to the - deterministic increment dt. - - d2 : function - Function for calculating the operator-valued coefficient to the - stochastic increment(s) dW_n, where n is in [0, d2_len[. - - d2_len : int (default 1) - The number of stochastic increments in the process. - - dW_factors : array - Array of length d2_len, containing scaling factors for each - measurement operator in m_ops. - - rhs : function - Function for calculating the deterministic and stochastic contributions - to the right-hand side of the stochastic differential equation. This - only needs to be specified when implementing a custom SDE solver. - - generate_A_ops : function - Function that generates a list of pre-computed operators or super- - operators. These precomputed operators are used in some d1 and d2 - functions. - - generate_noise : function - Function for generate an array of pre-computed noise signal. - - homogeneous : bool (True) - Wheter or not the stochastic process is homogenous. Inhomogenous - processes are only supported for poisson distributions. - - solver : string - Name of the solver method to use for solving the stochastic - equations. Valid values are: - 1/2 order algorithms: 'euler-maruyama', 'fast-euler-maruyama', - 'pc-euler' is a predictor-corrector method which is more - stable than explicit methods, - 1 order algorithms: 'milstein', 'fast-milstein', 'platen', - 'milstein-imp' is semi-implicit Milstein method, - 3/2 order algorithms: 'taylor15', - 'taylor15-imp' is semi-implicit Taylor 1.5 method. - Implicit methods can adjust tolerance via args = {'tol':value}, - default is {'tol':1e-6} - - method : string ('homodyne', 'heterodyne', 'photocurrent') - The name of the type of measurement process that give rise to the - stochastic equation to solve. Specifying a method with this keyword - argument is a short-hand notation for using pre-defined d1 and d2 - functions for the corresponding stochastic processes. - - distribution : string ('normal', 'poisson') - The name of the distribution used for the stochastic increments. - - store_measurements : bool (default False) - Whether or not to store the measurement results in the - :class:`qutip.solver.Result` instance returned by the solver. - - noise : array - Vector specifying the noise. - - normalize : bool (default True) - Whether or not to normalize the wave function during the evolution. - - options : :class:`qutip.solver.SolverOptions` - Generic solver options. - - map_func: function - A map function or managing the calls to single-trajactory solvers. - - map_kwargs: dictionary - Optional keyword arguments to the map_func function function. - - progress_bar : :class:`qutip.ui.BaseProgressBar` - Optional progress bar class instance. - """ - def __init__(self, H=None, state0=None, times=None, c_ops=[], sc_ops=[], - e_ops=[], m_ops=None, args=None, ntraj=1, nsubsteps=1, - d1=None, d2=None, d2_len=1, dW_factors=None, rhs=None, - generate_A_ops=None, generate_noise=None, homogeneous=True, - solver=None, method=None, distribution='normal', - store_measurement=False, noise=None, normalize=True, - options=None, progress_bar=None, map_func=None, - map_kwargs=None): - - if options is None: - options = SolverOptions() - - if progress_bar is None: - progress_bar = TextProgressBar() - - self.H = H - self.d1 = d1 - self.d2 = d2 - self.d2_len = d2_len - self.dW_factors = dW_factors # if dW_factors else np.ones(d2_len) - self.state0 = state0 - self.times = times - self.c_ops = c_ops - self.sc_ops = sc_ops - self.e_ops = e_ops - self.m_ops = m_ops - - self.ntraj = ntraj - self.nsubsteps = nsubsteps - self.solver = solver - self.method = method - self.distribution = distribution - self.homogeneous = homogeneous - self.rhs = rhs - self.options = options - self.progress_bar = progress_bar - self.store_measurement = store_measurement - self.store_states = options['store_states'] - self.noise = noise - self.args = args - self.normalize = normalize - - self.generate_noise = generate_noise - self.generate_A_ops = generate_A_ops - - if self.ntraj > 1 and map_func: - self.map_func = map_func - else: - self.map_func = serial_map - - self.map_kwargs = map_kwargs if map_kwargs is not None else {} - - # Does any operator depend on time? - self.td = False - if not isinstance(H, Qobj): - self.td = True - for ops in c_ops: - if not isinstance(ops, Qobj): - self.td = True - for ops in sc_ops: - if not isinstance(ops, Qobj): - self.td = True - - -def main_ssepdpsolve(H, psi0, times, c_ops, e_ops, **kwargs): - """ - A stochastic (piecewse deterministic process) PDP solver for wavefunction - evolution. For most purposes, use :func:`qutip.mcsolve` instead for quantum - trajectory simulations. - - Parameters - ---------- - - H : :class:`qutip.Qobj` - System Hamiltonian. - - psi0 : :class:`qutip.Qobj` - Initial state vector (ket). - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - c_ops : list of :class:`qutip.Qobj` - Deterministic collapse operator which will contribute with a standard - Lindblad type of dissipation. - - e_ops : list of :class:`qutip.Qobj` / callback function single - single operator or list of operators for which to evaluate - expectation values. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - - An instance of the class :class:`qutip.solver.Result`. - """ - if isinstance(e_ops, dict): - e_ops_dict = e_ops - e_ops = [e for e in e_ops.values()] - else: - e_ops_dict = None - - sso = StochasticSolverOptions(H=H, state0=psi0, times=times, c_ops=c_ops, - e_ops=e_ops, **kwargs) - - res = _ssepdpsolve_generic(sso, sso.options, sso.progress_bar) - - if e_ops_dict: - res.expect = {e: res.expect[n] - for n, e in enumerate(e_ops_dict.keys())} - return res - - -def main_smepdpsolve(H, rho0, times, c_ops, e_ops, **kwargs): - """ - A stochastic (piecewse deterministic process) PDP solver for density matrix - evolution. - - Parameters - ---------- - - H : :class:`qutip.Qobj` - System Hamiltonian. - - rho0 : :class:`qutip.Qobj` - Initial density matrix. - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - c_ops : list of :class:`qutip.Qobj` - Deterministic collapse operator which will contribute with a standard - Lindblad type of dissipation. - - sc_ops : list of :class:`qutip.Qobj` - List of stochastic collapse operators. Each stochastic collapse - operator will give a deterministic and stochastic contribution - to the equation of motion according to how the d1 and d2 functions - are defined. - - e_ops : list of :class:`qutip.Qobj` / callback function single - single operator or list of operators for which to evaluate - expectation values. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - - An instance of the class :class:`qutip.solver.Result`. - """ - if isinstance(e_ops, dict): - e_ops_dict = e_ops - e_ops = [e for e in e_ops.values()] - else: - e_ops_dict = None - - sso = StochasticSolverOptions(H=H, state0=rho0, times=times, c_ops=c_ops, - e_ops=e_ops, **kwargs) - - res = _smepdpsolve_generic(sso, sso.options, sso.progress_bar) - - if e_ops_dict: - res.expect = {e: res.expect[n] - for n, e in enumerate(e_ops_dict.keys())} - return res - - -# ----------------------------------------------------------------------------- -# Generic parameterized stochastic SE PDP solver -# -def _ssepdpsolve_generic(sso, options, progress_bar): - """ - For internal use. See ssepdpsolve. - """ - N_store = len(sso.times) - N_substeps = sso.nsubsteps - dt = (sso.times[1] - sso.times[0]) / N_substeps - nt = sso.ntraj - - data = Result() - data.solver = "sepdpsolve" - data.times = sso.tlist - data.expect = np.zeros((len(sso.e_ops), N_store), dtype=complex) - data.ss = np.zeros((len(sso.e_ops), N_store), dtype=complex) - data.jump_times = [] - data.jump_op_idx = [] - - # effective hamiltonian for deterministic part - Heff = sso.H - for c in sso.c_ops: - Heff += -0.5j * c.dag() * c - - progress_bar.start(sso.ntraj) - for n in range(sso.ntraj): - progress_bar.update(n) - psi_t = _data.dense.fast_from_numpy(sso.state0.full().ravel()) - - states_list, jump_times, jump_op_idx = \ - _ssepdpsolve_single_trajectory(data, Heff, dt, sso.times, - N_store, N_substeps, - psi_t, sso.state0.dims, - sso.c_ops, sso.e_ops) - - data.states.append(states_list) - data.jump_times.append(jump_times) - data.jump_op_idx.append(jump_op_idx) - - progress_bar.finished() - - # average density matrices - if options['average_states'] and np.any(data.states): - data.states = [sum([data.states[m][n] for m in range(nt)]).unit() - for n in range(len(data.times))] - - # average - data.expect = data.expect / nt - - # standard error - if nt > 1: - data.se = (data.ss - nt * (data.expect ** 2)) / (nt * (nt - 1)) - else: - data.se = None - - # convert complex data to real if hermitian - data.expect = [np.real(data.expect[n, :]) - if e.isherm else data.expect[n, :] - for n, e in enumerate(sso.e_ops)] - - return data - - -def _ssepdpsolve_single_trajectory(data, Heff, dt, times, N_store, N_substeps, psi_t, dims, c_ops, e_ops): - """ - Internal function. See ssepdpsolve. - """ - states_list = [] - - phi_t = psi_t.copy() - - prng = RandomState() # todo: seed it - r_jump, r_op = prng.rand(2) - - jump_times = [] - jump_op_idx = [] - - for t_idx, t in enumerate(times): - - if e_ops: - for e_idx, e in enumerate(e_ops): - s = _data.expect(e, psi_t) - data.expect[e_idx, t_idx] += s - data.ss[e_idx, t_idx] += s ** 2 - else: - states_list.append(Qobj(psi_t.to_array(), dims=dims)) - - for j in range(N_substeps): - - if _data.norm.l2(phi_t) ** 2 < r_jump: - # jump occurs - p = np.array([ - _data.norm.l2(_data.matmul(c.data, psi_t)) ** 2 - for c in c_ops - ]) - p = np.cumsum(p / np.sum(p)) - n = np.where(p >= r_op)[0][0] - - # apply jump - psi_t = _data.matmul(c_ops[n].data, psi_t) - psi_t /= _data.norm.l2(psi_t) - phi_t = psi_t.copy() - - # store info about jump - jump_times.append(times[t_idx] + dt * j) - jump_op_idx.append(n) - - # get new random numbers for next jump - r_jump, r_op = prng.rand(2) - - # deterministic evolution wihtout correction for norm decay - dphi_t = (-1j*dt) * _data.matmul(Heff.data, phi_t) - - # deterministic evolution with correction for norm decay - dpsi_t = (-1j*dt) * _data.matmul(Heff.data, psi_t) - A = 0.5 * np.sum([ - _data.norm.l2(_data.matmul(c.data, psi_t)) ** 2 - for c in c_ops - ]) - dpsi_t = _data.add(dpsi_t, psi_t, scale=dt*A) - - # increment wavefunctions - phi_t = _data.add(phi_t, dphi_t) - psi_t = _data.add(psi_t, dpsi_t) - - # ensure that normalized wavefunction remains normalized - # this allows larger time step than otherwise would be possible - psi_t = _data.mul(psi_t, 1/_data.norm.l2(psi_t)) - - return states_list, jump_times, jump_op_idx - - -# ----------------------------------------------------------------------------- -# Generic parameterized stochastic ME PDP solver -# -def _smepdpsolve_generic(sso, options, progress_bar): - """ - For internal use. See smepdpsolve. - """ - N_store = len(sso.times) - N_substeps = sso.nsubsteps - dt = (sso.times[1] - sso.times[0]) / N_substeps - nt = sso.ntraj - - data = Result() - data.solver = "smepdpsolve" - data.times = sso.times - data.expect = np.zeros((len(sso.e_ops), N_store), dtype=complex) - data.jump_times = [] - data.jump_op_idx = [] - - # Liouvillian for the deterministic part. - # needs to be modified for TD systems - L = liouvillian(sso.H, sso.c_ops) - - progress_bar.start(sso.ntraj) - - for n in range(sso.ntraj): - progress_bar.update(n) - rho_t = _data.dense.fast_from_numpy(sso.rho0.full()) - rho_t = _data.column_stack_dense(rho_t) - - states_list, jump_times, jump_op_idx = \ - _smepdpsolve_single_trajectory(data, L, dt, sso.times, - N_store, N_substeps, - rho_t, sso.rho0.dims, - sso.c_ops, sso.e_ops) - - data.states.append(states_list) - data.jump_times.append(jump_times) - data.jump_op_idx.append(jump_op_idx) - - progress_bar.finished() - - # average density matrices - if options['average_states'] and np.any(data.states): - data.states = [sum([data.states[m][n] for m in range(nt)]).unit() - for n in range(len(data.times))] - - # average - data.expect = data.expect / sso.ntraj - - # standard error - if nt > 1: - data.se = (data.ss - nt * (data.expect ** 2)) / (nt * (nt - 1)) - else: - data.se = None - - return data - -def _smepdpsolve_single_trajectory(data, L, dt, times, N_store, N_substeps, rho_t, dims, c_ops, e_ops): - """ - Internal function. See smepdpsolve. - """ - states_list = [] - - rho_t = np.copy(rho_t) - sigma_t = np.copy(rho_t) - - prng = RandomState() # todo: seed it - r_jump, r_op = prng.rand(2) - - jump_times = [] - jump_op_idx = [] - - for t_idx, t in enumerate(times): - - if e_ops: - for e_idx, e in enumerate(e_ops): - data.expect[e_idx, t_idx] +=\ - _data.expect_super(e, rho_t) - else: - states_list.append(Qobj(unstack_columns(rho_t), dims=dims)) - - for j in range(N_substeps): - - if sigma_t.norm() < r_jump: - # jump occurs - p = np.array([expect(c.dag() * c, rho_t) for c in c_ops]) - p = np.cumsum(p / np.sum(p)) - n = np.where(p >= r_op)[0][0] - - # apply jump - rho_t = c_ops[n] * rho_t * c_ops[n].dag() - rho_t /= expect(c_ops[n].dag() * c_ops[n], rho_t) - sigma_t = np.copy(rho_t) - - # store info about jump - jump_times.append(times[t_idx] + dt * j) - jump_op_idx.append(n) - - # get new random numbers for next jump - r_jump, r_op = prng.rand(2) - - # deterministic evolution wihtout correction for norm decay - dsigma_t = _data.matmul(L.data, sigma_t) * dt - - # deterministic evolution with correction for norm decay - drho_t = _data.matmul(L.data, rho_t) * dt - - # increment density matrices - sigma_t = _data.add(sigma_t, dsigma_t) - rho_t = _data.add(rho_t, drho_t) - - return states_list, jump_times, jump_op_idx diff --git a/qutip/solve/solver.py b/qutip/solve/solver.py deleted file mode 100644 index 08a53fb64e..0000000000 --- a/qutip/solve/solver.py +++ /dev/null @@ -1,922 +0,0 @@ -from __future__ import print_function - -__all__ = ['SolverOptions', 'ExpectOps'] - -import os -import sys -import warnings -import datetime -import numpy as np -from collections import OrderedDict -from types import FunctionType, BuiltinFunctionType - -from .. import __version__, Qobj, QobjEvo -from .optionsclass import optionsclass -from ..core import data as _data - -solver_safe = {} - - -class SolverSystem(): - pass - - -class ExpectOps: - """ - Contain and compute expectation values - """ - def __init__(self, e_ops=[], super_=False): - # take care of expectation values, if any - self.isfunc = False - self.e_ops_dict = False - self.raw_e_ops = e_ops - self.e_ops_qoevo = [] - self.e_num = 0 - self.e_ops_isherm = [] - - if isinstance(e_ops, (Qobj, QobjEvo)): - e_ops = [e_ops] - elif isinstance(e_ops, dict): - self.e_ops_dict = e_ops - e_ops = [e for e in e_ops.values()] - - self.e_ops = e_ops - if isinstance(e_ops, list): - self.e_num = len(e_ops) - self.e_ops_isherm = [e.isherm for e in e_ops] - if not super_: - self.e_ops_qoevo = np.array([QobjEvo(e) for e in e_ops], - dtype=object) - else: - self.e_ops_qoevo = np.array([QobjEvo(spre(e)) for e in e_ops], - dtype=object) - elif callable(e_ops): - self.isfunc = True - self.e_num = 1 - - def init(self, tlist): - self.tlist = tlist - if self.isfunc: - self.raw_out = [] - else: - self.raw_out = np.zeros((self.e_num, len(tlist)), dtype=complex) - - def check_dims(self, dims): - if not self.isfunc: - for op in self.e_ops_qoevo: - if isinstance(op, QobjEvo) and op.dims[1] != dims[0]: - raise TypeError(f"e_ops dims ({op.dims}) are not " - f"compatible with the system's ({dims})") - - def copy(self): - out = ExpectOps.__new__(ExpectOps) - out.isfunc = self.isfunc - out.e_ops_dict = self.e_ops_dict - out.raw_e_ops = self.raw_e_ops - out.e_ops = self.e_ops - out.e_num = self.e_num - out.e_ops_isherm = self.e_ops_isherm - out.e_ops_qoevo = self.e_ops_qoevo - return out - - def step(self, iter_, state): - if self.isfunc: - self.raw_out.append(self.e_ops(t, state)) - else: - state = _data.dense.fast_from_numpy(state) - t = self.tlist[iter_] - for ii in range(self.e_num): - if isinstance(self.e_ops_qoevo[ii], QobjEvo): - self.raw_out[ii, iter_] = \ - self.e_ops_qoevo[ii].expect_data(t, state) - elif callable(self.e_ops_qoevo[ii]): - self.raw_out[ii, iter_] = \ - self.e_ops_qoevo[ii](t, state) - - def finish(self): - if self.isfunc: - result = self.raw_out - else: - result = [] - for ii in range(self.e_num): - if self.e_ops_isherm[ii]: - result.append(np.real(self.raw_out[ii, :])) - else: - result.append(self.raw_out[ii, :]) - if self.e_ops_dict: - result = {e: result[n] - for n, e in enumerate(self.e_ops_dict.keys())} - return result - - def __eq__(self, other): - if isinstance(other, ExpectOps): - other = other.raw_e_ops - return self.raw_e_ops == other - - def __ne__(self, other): - return not (self == other) - - def __bool__(self): - return bool(self.e_num) - - -@optionsclass("solver") -class SolverOptions: - """ - Class of options for evolution solvers such as :func:`qutip.mesolve` and - :func:`qutip.mcsolve`. Options can be specified either as arguments to the - constructor:: - - opts = SolverOptions(order=10, ...) - - or by changing the class attributes after creation:: - - opts = SolverOptions() - opts.order = 10 - - Returns options class to be used as options in evolution solvers. - - The default can be changed by:: - - qutip.settings.solver['order'] = 10 - - Supported options: - - atol : float {1e-8} - Absolute tolerance. - rtol : float {1e-6} - Relative tolerance. - method : str {'adams','bdf'} - Integration method. - order : int {12} - Order of integrator (<=12 'adams', <=5 'bdf') - nsteps : int {2500} - Max. number of internal steps/call. - first_step : float {0} - Size of initial step (0 = automatic). - min_step : float {0} - Minimum step size (0 = automatic). - max_step : float {0} - Maximum step size (0 = automatic) - tidy : bool {True,False} - Tidyup Hamiltonian and initial state by removing small terms. - average_states : bool {False} - Average states values over trajectories in stochastic solvers. - average_expect : bool {True} - Average expectation values over trajectories for stochastic solvers. - ntraj : int {500} - Number of trajectories in stochastic solvers. - store_final_state : bool {False, True} - Whether or not to store the final state of the evolution in the - result class. - store_states : bool {False, True} - Whether or not to store the state vectors or density matrices in the - result class, even if expectation values operators are given. If no - expectation are provided, then states are stored by default and this - option has no effect. - """ - options = { - # Absolute tolerance (default = 1e-8) - "atol": 1e-8, - # Relative tolerance (default = 1e-6) - "rtol": 1e-6, - # Integration method (default = 'adams', for stiff 'bdf') - "method": 'adams', - # Maximum order used by integrator (<=12 for 'adams', <=5 for 'bdf') - "order": 12, - # Max. number of internal steps/call - "nsteps": 1000, - # Size of initial step (0 = determined by solver) - "first_step": 0, - # Max step size (0 = determined by solver) - "max_step": 0, - # Minimal step size (0 = determined by solver) - "min_step": 0, - # Average expectation values over trajectories (default = True) - "average_expect": True, - # average expectation values - "average_states": False, - # tidyup Hamiltonian before calculation (default = True) - "tidy": True, - # Number of trajectories (default = 500) - "ntraj": 500, - "gui": False, - # store final state? - "store_final_state": False, - # store states even if expectation operators are given? - "store_states": False, - # average mcsolver density matricies assuming steady state evolution - "steady_state_average": False, - # Normalize output of solvers - # (turned off for batch unitary propagator mode) - "normalize_output": True - } - - -@optionsclass("mcsolve", SolverOptions) -class McOptions: - """ - Class of options for evolution solvers such as :func:`qutip.mesolve` and - :func:`qutip.mcsolve`. Options can be specified either as arguments to the - constructor:: - - opts = SolverOptions(norm_tol=1e-3, ...) - - or by changing the class attributes after creation:: - - opts = SolverOptions() - opts.['norm_tol'] = 1e-3 - - Returns options class to be used as options in evolution solvers. - - The default can be changed by:: - - qutip.settings.options.montecarlo['norm_tol'] = 1e-3 - - Options - ------- - - norm_tol : float {1e-4} - Tolerance used when finding wavefunction norm in mcsolve. - norm_t_tol : float {1e-6} - Tolerance used when finding wavefunction time in mcsolve. - norm_steps : int {5} - Max. number of steps used to find wavefunction norm to within norm_tol - in mcsolve. - mc_corr_eps : float {1e-10} - Arbitrarily small value for eliminating any divide-by-zero errors in - correlation calculations when using mcsolve. - """ - options = { - # Tolerance for wavefunction norm (mcsolve only) - "norm_tol": 1e-4, - # Tolerance for collapse time precision (mcsolve only) - "norm_t_tol": 1e-6, - # Max. number of steps taken to find wavefunction norm to within - # norm_tol (mcsolve only) - "norm_steps": 5, - # small value in mc solver for computing correlations - "mc_corr_eps": 1e-10, - } - - -class Result(): - """Class for storing simulation results from any of the dynamics solvers. - - Attributes - ---------- - - solver : str - Which solver was used [e.g., 'mesolve', 'mcsolve', 'brmesolve', ...] - times : list/array - Times at which simulation data was collected. - expect : list/array - Expectation values (if requested) for simulation. - states : array - State of the simulation (density matrix or ket) evaluated at ``times``. - num_expect : int - Number of expectation value operators in simulation. - num_collapse : int - Number of collapse operators in simualation. - ntraj : int/list - Number of trajectories (for stochastic solvers). A list indicates - that averaging of expectation values was done over a subset of total - number of trajectories. - col_times : list - Times at which state collpase occurred. Only for Monte Carlo solver. - col_which : list - Which collapse operator was responsible for each collapse in - ``col_times``. Only for Monte Carlo solver. - - """ - def __init__(self): - self.solver = None - self.times = None - self.states = [] - self.expect = [] - self.num_expect = 0 - self.num_collapse = 0 - self.ntraj = None - self.seeds = None - self.col_times = None - self.col_which = None - - def __str__(self): - s = "Result object " - if self.solver: - s += "with " + self.solver + " data.\n" - else: - s += "missing solver information.\n" - s += "-" * (len(s) - 1) + "\n" - if self.states is not None and len(self.states) > 0: - s += "states = True\n" - elif self.expect is not None and len(self.expect) > 0: - s += "expect = True\nnum_expect = " + str(self.num_expect) + ", " - else: - s += "states = True, expect = True\n" + \ - "num_expect = " + str(self.num_expect) + ", " - s += "num_collapse = " + str(self.num_collapse) - if self.solver == 'mcsolve': - s += ", ntraj = " + str(self.ntraj) - return s - - def __repr__(self): - return self.__str__() - - def __getstate__(self): - # defines what happens when Qobj object gets pickled - self.__dict__.update({'qutip_version': __version__[:5]}) - return self.__dict__ - - def __setstate__(self, state): - # defines what happens when loading a pickled Qobj - if 'qutip_version' in state.keys(): - del state['qutip_version'] - (self.__dict__).update(state) - - -# %%%%%%%%%%% remove ? -class SolverConfiguration(): - def __init__(self): - - self.cgen_num = 0 - - self.reset() - - def reset(self): - # General stuff - self.tlist = None # evaluations times - self.ntraj = None # number / list of trajectories - self.options = None # options for solvers - self.norm_tol = None # tolerance for wavefunction norm - self.norm_steps = None # max. number of steps to take in finding - # Initial state stuff - self.psi0 = None # initial state - self.psi0_dims = None # initial state dims - self.psi0_shape = None # initial state shape - - # flags for setting time-dependence, collapse ops, and number of times - # codegen has been run - self.cflag = 0 # Flag signaling collapse operators - self.tflag = 0 # Flag signaling time-dependent problem - - self.soft_reset() - - def soft_reset(self): - # Hamiltonian stuff - self.h_td_inds = [] # indicies of time-dependent Hamiltonian operators - self.h_tdterms = [] # List of td strs and funcs - self.h_data = None # List of sparse matrix data - self.h_ind = None # List of sparse matrix indices - self.h_ptr = None # List of sparse matrix ptrs - - # Expectation operator stuff - self.e_num = 0 # number of expect ops - self.e_ops_data = [] # expect op data - self.e_ops_ind = [] # expect op indices - self.e_ops_ptr = [] # expect op indptrs - self.e_ops_isherm = [] # expect op isherm - - # Collapse operator stuff - self.c_num = 0 # number of collapse ops - self.c_const_inds = [] # indicies of constant collapse operators - self.c_td_inds = [] # indicies of time-dependent collapse operators - self.c_ops_data = [] # collapse op data - self.c_ops_ind = [] # collapse op indices - self.c_ops_ptr = [] # collapse op indptrs - self.c_args = [] # store args for time-dependent collapse func. - - # Norm collapse operator stuff - self.n_ops_data = [] # norm collapse op data - self.n_ops_ind = [] # norm collapse op indices - self.n_ops_ptr = [] # norm collapse op indptrs - - # holds executable strings for time-dependent collapse evaluation - self.col_expect_code = None - self.col_spmv_code = None - - # hold stuff for function list based time dependence - self.h_td_inds = [] - self.h_td_data = [] - self.h_td_ind = [] - self.h_td_ptr = [] - self.h_funcs = None - self.h_func_args = None - self.c_funcs = None - self.c_func_args = None - - # time-dependent (TD) function stuff - self.tdfunc = None # Placeholder for TD RHS function. - self.tdname = None # Name of td .pyx file - self.colspmv = None # Placeholder for TD col-spmv function. - self.colexpect = None # Placeholder for TD col_expect function. - self.string = None # Holds string of variables passed to td solver - - -def _format_time(t, tt=None, ttt=None): - time_str = str(datetime.timedelta(seconds=t)) - if tt is not None and ttt is not None: - sect_percent = 100*t/tt - solve_percent = 100*t/ttt - time_str += " ({:03.2f}% section, {:03.2f}% total)".format( - sect_percent, solve_percent) - elif tt is not None: - sect_percent = 100*t/tt - time_str += " ({:03.2f}% section)".format(sect_percent) - - elif ttt is not None: - solve_percent = 100*t/ttt - time_str += " ({:03.2f}% total)".format(solve_percent) - - return time_str - - -class Stats: - """ - Statistical information on the solver performance - Statistics can be grouped into sections. - If no section names are given in the the contructor, then all statistics - will be added to one section 'main' - - Parameters - ---------- - section_names : list - list of keys that will be used as keys for the sections - These keys will also be used as names for the sections - The text in the output can be overidden by setting the header property - of the section - If no names are given then one section called 'main' is created - - Attributes - ---------- - sections : OrderedDict of _StatsSection - These are the sections that are created automatically on instantiation - or added using add_section - - header : string - Some text that will be used as the heading in the report - By default there is None - - total_time : float - Time in seconds for the solver to complete processing - Can be None, meaning that total timing percentages will be reported - """ - - def __init__(self, section_names=None): - self._def_section_name = 'main' - self.sections = OrderedDict() - self.total_time = None - self.header = None - if isinstance(section_names, list): - c = 0 - for name in section_names: - self.sections[name] = _StatsSection(name, self) - if c == 0: - self._def_section_name = name - c += 1 - - else: - self.sections[self._def_section_name] = \ - _StatsSection(self._def_section_name) - - def _get_section(self, section): - if section is None: - return self.sections[self._def_section_name] - elif isinstance(section, _StatsSection): - return section - else: - sect = self.sections.get(section, None) - if sect is None: - raise ValueError("Unknown section {}".format(section)) - else: - return sect - - def add_section(self, name): - """ - Add another section with the given name - - Parameters - ---------- - name : string - will be used as key for sections dict - will also be the header for the section - - Returns - ------- - section : :class:`_StatsSection` - The new section - """ - sect = _StatsSection(name, self) - self.sections[name] = sect - return sect - - def add_count(self, key, value, section=None): - """ - Add value to count. If key does not already exist in section then - it is created with this value. - If key already exists it is increased by the give value - value is expected to be an integer - - Parameters - ---------- - key : string - key for the section.counts dictionary - reusing a key will result in numerical addition of value - - value : int - Initial value of the count, or added to an existing count - - section : string or :class:`_StatsSection` - Section which to add the count to. - If None given, the default (first) section will be used - """ - - self._get_section(section).add_count(key, value) - - def add_timing(self, key, value, section=None): - """ - Add value to timing. If key does not already exist in section then - it is created with this value. - If key already exists it is increased by the give value - value is expected to be a float, and given in seconds. - - Parameters - ---------- - key : string - key for the section.timings dictionary - reusing a key will result in numerical addition of value - - value : int - Initial value of the timing, or added to an existing timing - - section: string or `class` : _StatsSection - Section which to add the timing to. - If None given, the default (first) section will be used - """ - self._get_section(section).add_timing(key, value) - - def add_message(self, key, value, section=None, sep=";"): - """ - Add value to message. If key does not already exist in section then - it is created with this value. - If key already exists the value is added to the message - The value will be converted to a string - - Parameters - ---------- - key : string - key for the section.messages dictionary - reusing a key will result in concatenation of value - - value : int - Initial value of the message, or added to an existing message - - sep : string - Message will be prefixed with this string when concatenating - - section: string or `class` : _StatsSection - Section which to add the message to. - If None given, the default (first) section will be used - """ - self._get_section(section).add_message(key, value, sep=sep) - - def set_total_time(self, value, section=None): - """ - Sets the total time for the complete solve or for a specific section - value is expected to be a float, and given in seconds - - Parameters - ---------- - value : float - Time in seconds to complete the solver section - - section : string or `class` : _StatsSection - Section which to set the total_time for - If None given, the total_time for complete solve is set - """ - if not isinstance(value, float): - try: - value = float(value) - except: - raise TypeError("value is expected to be a float") - - if section is None: - self.total_time = value - else: - sect = self._get_section(section) - sect.total_time = value - - def report(self, output=sys.stdout): - """ - Report the counts, timings and messages from the sections. - Sections are reported in the order that the names were supplied - in the constructor. - The counts, timings and messages are reported in the order that they - are added to the sections - The output can be written to anything that supports a write method, - e.g. a file or the console (default) - The output is intended to in markdown format - - Parameters - ---------- - output : stream - file or console stream - anything that support write - where - the output will be written - """ - - if not hasattr(output, 'write'): - raise TypeError("output must have a write method") - - if self.header: - output.write("{}\n{}\n".format(self.header, - ("="*len(self.header)))) - for name, sect in self.sections.items(): - sect.report(output) - - if self.total_time is not None: - output.write("\nSummary\n-------\n") - output.write("{}\t solver total time\n".format( - _format_time(self.total_time))) - - def clear(self): - """ - Clear counts, timings and messages from all sections - """ - for sect in self.sections.values(): - sect.clear() - self.total_time = None - - -class _StatsSection(object): - """ - Not intended to be directly instantiated - This is the type for the SolverStats.sections values - - The method parameter descriptions are the same as for those the parent - with the same method name - - Parameters - ---------- - name : string - key for the parent sections dictionary - will also be used as the header - - parent : `class` : SolverStats - The container for all the sections - - Attributes - ---------- - name : string - key for the parent sections dictionary - will also be used as the header - - parent : `class` : SolverStats - The container for all the sections - - header : string - Used as heading for section in report - - counts : OrderedDict - The integer type statistics for the stats section - - timings : OrderedDict - The timing type statistics for the stats section - Expected to contain float values representing values in seconds - - messages : OrderedDict - Text type output to be reported - - total_time : float - Total time for processing in the section - Can be None, meaning that section timing percentages will be reported - """ - def __init__(self, name, parent): - self.parent = parent - self.header = str(name) - self.name = name - self.counts = OrderedDict() - self.timings = OrderedDict() - self.messages = OrderedDict() - self.total_time = None - - def add_count(self, key, value): - """ - Add value to count. If key does not already exist in section then - it is created with this value. - If key already exists it is increased by the given value - value is expected to be an integer - """ - if not isinstance(value, int): - try: - value = int(value) - except: - raise TypeError("value is expected to be an integer") - - if key in self.counts: - self.counts[key] += value - else: - self.counts[key] = value - - def add_timing(self, key, value): - """ - Add value to timing. If key does not already exist in section then - it is created with this value. - If key already exists it is increased by the give value - value is expected to be a float, and given in seconds. - """ - if not isinstance(value, float): - try: - value = float(value) - except: - raise TypeError("value is expected to be a float") - - if key in self.timings: - self.timings[key] += value - else: - self.timings[key] = value - - def add_message(self, key, value, sep=";"): - """ - Add value to message. If key does not already exist in section then - it is created with this value. - If key already exists the value is added to the message - The value will be converted to a string - """ - value = str(value) - - if key in self.messages: - if sep is not None: - try: - value = sep + value - except: - TypeError("It is not possible to concatenate the value " - "with the given seperator") - self.messages[key] += value - else: - self.messages[key] = value - - def report(self, output=sys.stdout): - """ - Report the counts, timings and messages for this section. - Note the percentage of the section and solver total times will be - given if the parent and or section total_time is set - """ - if self.header: - output.write("\n{}\n{}\n".format(self.header, - ("-"*len(self.header)))) - - # TODO: Make the timings and counts ouput in a table format - # Generally make more pretty - - # Report timings - try: - ttt = self.parent.total_time - except: - ttt = None - - tt = self.total_time - - output.write("### Timings:\n") - for key, value in self.timings.items(): - l = " - {}\t{}\n".format(_format_time(value, tt, ttt), key) - output.write(l) - if tt is not None: - output.write(" - {}\t{} total time\n".format(_format_time(tt), - self.name)) - - # Report counts - output.write("### Counts:\n") - for key, value in self.counts.items(): - l = " - {}\t{}\n".format(value, key) - output.write(l) - - # Report messages - output.write("### Messages:\n") - for key, value in self.messages.items(): - l = " - {}:\t{}\n".format(key, value) - output.write(l) - - - def clear(self): - """ - Clear counts, timings and messages from this section - """ - self.counts.clear() - self.timings.clear() - self.messages.clear() - self.total_time = None - - -def _solver_safety_check(H, state=None, c_ops=[], e_ops=[], args={}): - # Input is std Qobj (Hamiltonian or Liouvillian) - if isinstance(H, Qobj): - Hdims = H.dims - Htype = H.type - _structure_check(Hdims, Htype, state) - # Input H is function - elif isinstance(H, (FunctionType, BuiltinFunctionType)): - Hdims = H(0,args).dims - Htype = H(0,args).type - _structure_check(Hdims, Htype, state) - # Input is td-list - elif isinstance(H, list): - if isinstance(H[0], Qobj): - Hdims = H[0].dims - Htype = H[0].type - elif isinstance(H[0], list): - Hdims = H[0][0].dims - Htype = H[0][0].type - elif isinstance(H[0], (FunctionType, BuiltinFunctionType)): - Hdims = H[0](0,args).dims - Htype = H[0](0,args).type - else: - raise Exception('Invalid td-list element.') - # Check all operators in list - for ii in range(len(H)): - if isinstance(H[ii], Qobj): - _temp_dims = H[ii].dims - _temp_type = H[ii].type - elif isinstance(H[ii], list): - _temp_dims = H[ii][0].dims - _temp_type = H[ii][0].type - elif isinstance(H[ii], (FunctionType, BuiltinFunctionType)): - _temp_dims = H[ii](0,args).dims - _temp_type = H[ii](0,args).type - else: - raise Exception('Invalid td-list element.') - _structure_check(_temp_dims,_temp_type,state) - - else: - raise Exception('Invalid time-dependent format.') - - - for ii in range(len(c_ops)): - do_tests = True - if isinstance(c_ops[ii], Qobj): - _temp_state = c_ops[ii] - elif isinstance(c_ops[ii], list): - if isinstance(c_ops[ii][0], Qobj): - _temp_state = c_ops[ii][0] - elif isinstance(c_ops[ii][0], tuple): - do_tests = False - for kk in range(len(c_ops[ii][0])): - _temp_state = c_ops[ii][0][kk] - _structure_check(Hdims, Htype, _temp_state) - else: - raise Exception('Invalid td-list element.') - if do_tests: - _structure_check(Hdims, Htype, _temp_state) - - if isinstance(e_ops, list): - for ii in range(len(e_ops)): - if isinstance(e_ops[ii], Qobj): - _temp_state = e_ops[ii] - elif isinstance(e_ops[ii], list): - _temp_state = e_ops[ii][0] - else: - raise Exception('Invalid td-list element.') - _structure_check(Hdims,Htype,_temp_state) - elif isinstance(e_ops, FunctionType): - pass - else: - raise Exception('Invalid e_ops specification.') - - -def _structure_check(Hdims, Htype, state): - if state is not None: - # Input state is a ket vector - if state.type == 'ket': - # Input is Hamiltonian - if Htype == 'oper': - if Hdims[1] != state.dims[0]: - raise Exception('Input operator and ket do not ' - 'share same structure.') - # Input is super and state is ket - elif Htype == 'super': - if Hdims[1][1] != state.dims[0]: - raise Exception('Input operator and ket do not ' - 'share same structure.') - else: - raise Exception('Invalid input operator.') - # Input state is a density matrix - elif state.type == 'oper': - # Input is Hamiltonian and state is density matrix - if Htype == 'oper': - if Hdims[1] != state.dims[0]: - raise Exception('Input operators do not ' - 'share same structure.') - # Input is super op. and state is density matrix - elif Htype == 'super': - if Hdims[1] != state.dims: - raise Exception('Input operators do not ' - 'share same structure.') - - -# -# create a global instance of the SolverConfiguration class -# -config = SolverConfiguration() - -# for backwards compatibility -# Odeoptions = Options -# Odedata = Result diff --git a/qutip/solve/stochastic.py b/qutip/solve/stochastic.py deleted file mode 100644 index 99829ae8ff..0000000000 --- a/qutip/solve/stochastic.py +++ /dev/null @@ -1,1405 +0,0 @@ -# -*- coding: utf-8 -*- - -import numpy as np -import scipy.sparse as sp -from .. import ( - Qobj, QobjEvo, isket, isoper, issuper, ket2dm, spre, spost, stack_columns, - unstack_columns, liouvillian, lindblad_dissipator, -) -from .pdpsolve import main_ssepdpsolve, main_smepdpsolve -from ._stochastic import ( - SSESolver, SMESolver, PcSSESolver, PcSMESolver, PmSMESolver, - GenericSSolver, Solvers -) -from .solver import Result, SolverOptions, _solver_safety_check -from .parallel import serial_map -from ..ui.progressbar import TextProgressBar -from qutip.core import data as _data - -__all__ = ['ssesolve', 'photocurrent_sesolve', 'smepdpsolve', - 'smesolve', 'photocurrent_mesolve', 'ssepdpsolve', - 'stochastic_solvers', 'general_stochastic'] - - -def stochastic_solvers(): - # This docstring contains several literal backslash characters inside LaTeX - # blocks, but it cannot be declared as a raw string because we also need to - # use a line continuation. At one point we need a restructured text - # "definition list", where the heading _must_ be entirely on one line, - # however it will violate our line-length reporting if we do that. - """ - This function is purely a reference point for documenting the available - stochastic solver methods, and takes no actions. - - Notes - ----- - Available solvers for :obj:`~ssesolve` and :obj:`~smesolve` - euler-maruyama - A simple generalization of the Euler method for ordinary - differential equations to stochastic differential equations. Only - solver which could take non-commuting ``sc_ops``. *not tested* - - - Order 0.5 - - Code: ``'euler-maruyama'``, ``'euler'`` or ``0.5`` - - milstein - An order 1.0 strong Taylor scheme. Better approximate numerical - solution to stochastic differential equations. See eq. (2.9) of - chapter 12.2 of [1]_. - - - Order strong 1.0 - - Code: ``'milstein'`` or ``1.0`` - - milstein-imp - An order 1.0 implicit strong Taylor scheme. Implicit Milstein - scheme for the numerical simulation of stiff stochastic - differential equations. - - - Order strong 1.0 - - Code: ``'milstein-imp'`` - - predictor-corrector - Generalization of the trapezoidal method to stochastic differential - equations. More stable than explicit methods. See eq. (5.4) of - chapter 15.5 of [1]_. - - - Order strong 0.5, weak 1.0 - - Codes to only correct the stochastic part (:math:`\\alpha=0`, - :math:`\\eta=1/2`): ``'pred-corr'``, ``'predictor-corrector'`` or - ``'pc-euler'`` - - Codes to correct both the stochastic and deterministic parts - (:math:`\\alpha=1/2`, :math:`\\eta=1/2`): ``'pc-euler-imp'``, - ``'pc-euler-2'`` or ``'pred-corr-2'`` - - platen - Explicit scheme, creates the Milstein using finite differences - instead of analytic derivatives. Also contains some higher order - terms, thus converges better than Milstein while staying strong - order 1.0. Does not require derivatives, therefore usable by - :func:`~general_stochastic`. See eq. (7.47) of chapter 7 of [2]_. - - - Order strong 1.0, weak 2.0 - - Code: ``'platen'``, ``'platen1'`` or ``'explicit1'`` - - rouchon - Scheme keeping the positivity of the density matrix - (:obj:`~smesolve` only). See eq. (4) with :math:`\\eta=1` of [3]_. - - - Order strong 1.0? - - Code: ``'rouchon'`` or ``'Rouchon'`` - - taylor1.5 - Order 1.5 strong Taylor scheme. Solver with more terms of the - Ito-Taylor expansion. Default solver for :obj:`~smesolve` and - :obj:`~ssesolve`. See eq. (4.6) of chapter 10.4 of [1]_. - - - Order strong 1.5 - - Code: ``'taylor1.5'``, ``'taylor15'``, ``1.5``, or ``None`` - - taylor1.5-imp - Order 1.5 implicit strong Taylor scheme. Implicit Taylor 1.5 - (:math:`\\alpha = 1/2`, :math:`\\beta` doesn't matter). See eq. - (2.18) of chapter 12.2 of [1]_. - - - Order strong 1.5 - - Code: ``'taylor1.5-imp'`` or ``'taylor15-imp'`` - - explicit1.5 - Explicit order 1.5 strong schemes. Reproduce the order 1.5 strong - Taylor scheme using finite difference instead of derivatives. - Slower than ``taylor15`` but usable by - :func:`~general_stochastic`. See eq. (2.13) of chapter 11.2 of - [1]_. - - - Order strong 1.5 - - Code: ``'explicit1.5'``, ``'explicit15'`` or ``'platen15'`` - - taylor2.0 - Order 2 strong Taylor scheme. Solver with more terms of the - Stratonovich expansion. See eq. (5.2) of chapter 10.5 of [1]_. - - - Order strong 2.0 - - Code: ``'taylor2.0'``, ``'taylor20'`` or ``2.0`` - - All solvers, except taylor2.0, are usable in both smesolve and ssesolve - and for both heterodyne and homodyne. taylor2.0 only works for 1 - stochastic operator independent of time with the homodyne method. - :func:`~general_stochastic` only accepts the derivative-free - solvers: ``'euler'``, ``'platen'`` and ``'explicit1.5'``. - - Available solvers for :obj:`~photocurrent_sesolve` and \ -:obj:`~photocurrent_mesolve` - Photocurrent use ordinary differential equations between - stochastic "jump/collapse". - - euler - Euler method for ordinary differential equations between jumps. - Only one jump per time interval. Default solver. See eqs. (4.19) - and (4.4) of chapter 4 of [4]_. - - - Order 1.0 - - Code: ``'euler'`` - - predictor–corrector - predictor–corrector method (PECE) for ordinary differential - equations. Uses the Poisson distribution to obtain the number of - jumps at each timestep. - - - Order 2.0 - - Code: ``'pred-corr'`` - - References - ---------- - .. [1] Peter E. Kloeden and Exkhard Platen, *Numerical Solution of - Stochastic Differential Equations*. - .. [2] H.-P. Breuer and F. Petruccione, *The Theory of Open Quantum - Systems*. - .. [3] Pierre Rouchon and Jason F. Ralpha, *Efficient Quantum Filtering for - Quantum Feedback Control*, `arXiv:1410.5345 [quant-ph] - `_, Phys. Rev. A 91, 012118, - (2015). - .. [4] Howard M. Wiseman, Gerard J. Milburn, *Quantum measurement and - control*. - """ - - -class StochasticSolverOptions: - """Class of options for stochastic solvers such as - :func:`qutip.stochastic.ssesolve`, :func:`qutip.stochastic.smesolve`, etc. - - The stochastic solvers :func:`qutip.stochastic.general_stochastic`, - :func:`qutip.stochastic.ssesolve`, :func:`qutip.stochastic.smesolve`, - :func:`qutip.stochastic.photocurrent_sesolve` and - :func:`qutip.stochastic.photocurrent_mesolve` - all take the same keyword arguments as - the constructor of these class, and internally they use these arguments to - construct an instance of this class, so it is rarely needed to explicitly - create an instance of this class. - - Within the attribute list, a ``time_dependent_object`` is either - - - :class:`~qutip.Qobj`: a constant term - - 2-element list of ``[Qobj, time_dependence]``: a time-dependent term - where the ``Qobj`` will be multiplied by the time-dependent scalar. - - For more details on all allowed time-dependent objects, see the - documentation for :class:`~qutip.QobjEvo`. - - Attributes - ---------- - H : time_dependent_object or list of time_dependent_object - System Hamiltonian in standard time-dependent list format. This is the - same as the argument that (e.g.) :func:`~qutip.mesolve` takes. - If this is a list of elements, they are summed. - - state0 : :class:`qutip.Qobj` - Initial state vector (ket) or density matrix. - - times : array_like of float - List of times for :math:`t`. Must be uniformly spaced. - - c_ops : list of time_dependent_object - List of deterministic collapse operators. Each element of the list is - a separate operator; unlike the Hamiltonian, there is no implicit - summation over the terms. - - sc_ops : list of time_dependent_object - List of stochastic collapse operators. Each stochastic collapse - operator will give a deterministic and stochastic contribution - to the equation of motion according to how the d1 and d2 functions - are defined. Each element of the list is a separate operator, like - ``c_ops``. - - e_ops : list of :class:`qutip.Qobj` - Single operator or list of operators for which to evaluate - expectation values. - - m_ops : list of :class:`qutip.Qobj` - List of operators representing the measurement operators. The expected - format is a nested list with one measurement operator for each - stochastic increament, for each stochastic collapse operator. - - args : dict - Dictionary of parameters for time dependent systems. - - tol : float - Tolerance of the solver for implicit methods. - - ntraj : int - Number of trajectors. - - nsubsteps : int - Number of sub steps between each time-spep given in `times`. - - dW_factors : array - Array of length len(sc_ops), containing scaling factors for each - measurement operator in m_ops. - - solver : string - Name of the solver method to use for solving the stochastic - equations. Valid values are: - - - order 1/2 algorithms: 'euler-maruyama', 'pc-euler', 'pc-euler-imp' - - order 1 algorithms: 'milstein', 'platen', 'milstein-imp', 'rouchon' - - order 3/2 algorithms: 'taylor1.5', 'taylor1.5-imp', 'explicit1.5' - - order 2 algorithms: 'taylor2.0' - - See the documentation of :func:`~qutip.stochastic.stochastic_solvers` - for a description of the solvers. Implicit methods can adjust - tolerance via the kw 'tol'. Default is {'tol': 1e-6} - - method : string ('homodyne', 'heterodyne') - The name of the type of measurement process that give rise to the - stochastic equation to solve. - - store_all_expect : bool (default False) - Whether or not to store the e_ops expect values for all paths. - - store_measurement : bool (default False) - Whether or not to store the measurement results in the - :class:`qutip.solver.Result` instance returned by the solver. - - noise : int, or 1D array of int, or 4D array of float - - int : seed of the noise - - 1D array : length = ntraj, seeds for each trajectories. - - 4D array : ``(ntraj, len(times), nsubsteps, len(sc_ops)*[1|2])``. - Vector for the noise, the len of the last dimensions is doubled for - solvers of order 1.5. This corresponds to results.noise. - - noiseDepth : int - Number of terms kept of the truncated series used to create the - noise used by taylor2.0 solver. - - normalize : bool - (default True for (photo)ssesolve, False for (photo)smesolve) - Whether or not to normalize the wave function during the evolution. - Normalizing density matrices introduce numerical errors. - - options : :class:`qutip.solver.SolverOptions` - Generic solver options. Only options['average_states'] and - options['store_states'] are used. - - map_func: function - A map function or managing the calls to single-trajactory solvers. - - map_kwargs: dictionary - Optional keyword arguments to the map_func function function. - - progress_bar : :class:`qutip.ui.BaseProgressBar` - Optional progress bar class instance. - """ - def __init__(self, me, H=None, c_ops=[], sc_ops=[], state0=None, - e_ops=[], m_ops=None, store_all_expect=False, - store_measurement=False, dW_factors=None, - solver=None, method="homodyne", normalize=None, - times=None, nsubsteps=1, ntraj=1, tol=None, - generate_noise=None, noise=None, - progress_bar=None, map_func=None, map_kwargs=None, - args={}, options=None, noiseDepth=20): - - if options is None: - options = SolverOptions() - - if progress_bar is None: - progress_bar = TextProgressBar() - - # System - # Cast to QobjEvo so the code has only one version for both the - # constant and time-dependent case. - self.me = me - - if H is not None: - msg = "The Hamiltonian format is not valid. " - try: - self.H = QobjEvo(H, args=args, tlist=times) - except Exception as e: - raise ValueError(msg + str(e)) from e - else: - self.H = H - - if sc_ops: - msg = ("The sc_ops format is not valid. Options are " - "[ Qobj / QobjEvo / [Qobj, coeff]]. ") - try: - self.sc_ops = [QobjEvo(op, args=args, tlist=times) - for op in sc_ops] - except Exception as e: - raise ValueError(msg + str(e)) from e - else: - self.sc_ops = sc_ops - - if c_ops: - msg = ("The c_ops format is not valid. Options are " - "[ Qobj / QobjEvo / [Qobj, coeff]]. ") - try: - self.c_ops = [QobjEvo(op, args=args, tlist=times) - for op in c_ops] - except Exception as e: - raise ValueError(msg + str(e)) from e - else: - self.c_ops = c_ops - - self.state0 = state0 - self.rho0 = stack_columns(state0.full()).ravel() - - # Observation - - for e_op in e_ops: - if ( - isinstance(e_op, Qobj) - and self.H is not None - and e_op.dims[1] != self.H.dims[0] - ): - raise TypeError(f"e_ops dims ({e_op.dims}) are not compatible " - f"with the system's ({self.H.dims})") - self.e_ops = e_ops - self.m_ops = m_ops - self.store_measurement = store_measurement - self.store_all_expect = store_all_expect - self.store_states = options['store_states'] - self.dW_factors = dW_factors - - # Solver - self.solver = solver - self.method = method - if normalize is None and me: - self.normalize = 0 - elif normalize is None and not me: - self.normalize = 1 - elif normalize: - self.normalize = 1 - else: - self.normalize = 0 - - self.times = times - self.nsubsteps = nsubsteps - self.dt = (times[1] - times[0]) / self.nsubsteps - self.ntraj = ntraj - if tol is not None: - self.tol = tol - elif "tol" in args: - self.tol = args["tol"] - else: - self.tol = 1e-7 - - # Noise - if noise is not None: - if isinstance(noise, int): - # noise contain a seed - np.random.seed(noise) - noise = np.random.randint(0, 2**32, ntraj, dtype=np.uint32) - noise = np.array(noise) - if len(noise.shape) == 1: - if noise.shape[0] < ntraj: - raise ValueError("'noise' does not have enought seeds " + - "len(noise) >= ntraj") - # numpy seed must be between 0 and 2**32-1 - # 'u4': unsigned 32bit int - self.noise = noise.astype("u4") - self.noise_type = 0 - - elif len(noise.shape) == 4: - # taylor case not included - dw_len = (2 if method == "heterodyne" else 1) - dw_len_str = (" * 2" if method == "heterodyne" else "") - msg = "Incorrect shape for 'noise': " - if noise.shape[0] < ntraj: - raise ValueError(msg + "shape[0] >= ntraj") - if noise.shape[1] < len(times): - raise ValueError(msg + "shape[1] >= len(times)") - if noise.shape[2] < nsubsteps: - raise ValueError(msg + "shape[2] >= nsubsteps") - if noise.shape[3] < len(self.sc_ops) * dw_len: - raise ValueError(msg + "shape[3] >= len(self.sc_ops)" + - dw_len_str) - self.noise_type = 1 - self.noise = noise - - else: - self.noise = np.random.randint(0, 2**32, ntraj, dtype=np.uint32) - self.noise_type = 0 - - # Map - self.progress_bar = progress_bar - if self.ntraj > 1 and map_func: - self.map_func = map_func - else: - self.map_func = serial_map - self.map_kwargs = map_kwargs if map_kwargs is not None else {} - - # Other - self.options = options - self.args = args - self.set_solver() - self.p = noiseDepth - - def set_solver(self): - if self.solver in ['euler-maruyama', 'euler', 50, 0.5]: - self.solver_code = 50 - self.solver = 'euler-maruyama' - elif self.solver in ['platen', 'platen1', 'explicit1', 100]: - self.solver_code = 100 - self.solver = 'platen' - elif self.solver in ['pred-corr', 'predictor-corrector', - 'pc-euler', 101]: - self.solver_code = 101 - self.solver = 'pred-corr' - elif self.solver in ['milstein', 102, 1.0]: - self.solver_code = 102 - self.solver = 'milstein' - elif self.solver in ['milstein-imp', 103]: - self.solver_code = 103 - self.solver = 'milstein-imp' - elif self.solver in ['pred-corr-2', 'pc-euler-2', 'pc-euler-imp', 104]: - self.solver_code = 104 - self.solver = 'pred-corr-2' - elif self.solver in ['Rouchon', 'rouchon', 120]: - self.solver_code = 120 - self.solver = 'rouchon' - if not all((op.isconstant for op in self.sc_ops)): - raise ValueError("Rouchon only works with constant sc_ops") - elif self.solver in ['platen15', 'explicit1.5', 'explicit15', 150]: - self.solver_code = 150 - self.solver = 'explicit1.5' - elif self.solver in ['taylor15', 'taylor1.5', None, 1.5, 152]: - self.solver_code = 152 - self.solver = 'taylor1.5' - elif self.solver in ['taylor15-imp', 'taylor1.5-imp', 153]: - self.solver_code = 153 - self.solver = 'taylor1.5-imp' - elif self.solver in ['taylor2.0', 'taylor20', 2.0, 202]: - self.solver_code = 202 - self.solver = 'taylor2.0' - if not len(self.sc_ops) == 1 or \ - not self.sc_ops[0].isconstant or \ - not self.method == "homodyne": - raise ValueError( - "Taylor2.0 only works with 1 constant sc_ops and for" - " homodyne method" - ) - else: - known = [ - None, 'euler-maruyama', 'platen', 'pc-euler', 'pc-euler-imp', - 'milstein', 'milstein-imp', 'rouchon', 'taylor1.5', - 'taylor1.5-imp', 'explicit1.5', 'taylor2.0', - ] - raise ValueError("The solver should be one of {!r}".format(known)) - - -class StochasticSolverOptionsPhoto(StochasticSolverOptions): - """ - Attributes - ---------- - - solver : string - Name of the solver method to use for solving the evolution - of the system.* - order 1 algorithms: 'euler' - order 2 algorithms: 'pred-corr' - In photocurrent evolution - """ - def set_solver(self): - if self.solver in [None, 'euler', 1, 60]: - self.solver_code = 60 - self.solver = 'euler' - elif self.solver in ['pred-corr', 'predictor-corrector', 110, 2]: - self.solver_code = 110 - self.solver = 'pred-corr' - else: - raise Exception("The solver should be one of " + - "[None, 'euler', 'predictor-corrector']") - - -def smesolve(H, rho0, times, c_ops=[], sc_ops=[], e_ops=[], - _safe_mode=True, args={}, **kwargs): - """ - Solve stochastic master equation. Dispatch to specific solvers - depending on the value of the `solver` keyword argument. - - Parameters - ---------- - - H : :class:`qutip.Qobj`, or time dependent system. - System Hamiltonian. - Can depend on time, see StochasticSolverOptions help for format. - - rho0 : :class:`qutip.Qobj` - Initial density matrix or state vector (ket). - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - c_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs. - Deterministic collapse operator which will contribute with a standard - Lindblad type of dissipation. - Can depend on time, see StochasticSolverOptions help for format. - - sc_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs. - List of stochastic collapse operators. Each stochastic collapse - operator will give a deterministic and stochastic contribution - to the eqaution of motion according to how the d1 and d2 functions - are defined. - Can depend on time, see StochasticSolverOptions help for format. - - e_ops : list of :class:`qutip.Qobj` - single operator or list of operators for which to evaluate - expectation values. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - - An instance of the class :class:`qutip.solver.Result`. - - """ - if "method" in kwargs and kwargs["method"] == "photocurrent": - print("stochastic solver with photocurrent method has been moved to " - "it's own function: photocurrent_mesolve") - return photocurrent_mesolve(H, rho0, times, c_ops=c_ops, sc_ops=sc_ops, - e_ops=e_ops, _safe_mode=_safe_mode, - args=args, **kwargs) - if isket(rho0): - rho0 = ket2dm(rho0) - - if isinstance(e_ops, dict): - e_ops_dict = e_ops - e_ops = [e for e in e_ops.values()] - else: - e_ops_dict = None - - sso = StochasticSolverOptions(True, H=H, state0=rho0, times=times, - c_ops=c_ops, sc_ops=sc_ops, e_ops=e_ops, - args=args, **kwargs) - - if _safe_mode: - _safety_checks(sso) - - if sso.solver_code == 120: - return _positive_map(sso, e_ops_dict) - - sso.LH = liouvillian(sso.H, c_ops=sso.sc_ops + sso.c_ops) * sso.dt - if sso.method == 'homodyne' or sso.method is None: - if sso.m_ops is None: - sso.m_ops = [op + op.dag() for op in sso.sc_ops] - sso.sops = [spre(op) + spost(op.dag()) for op in sso.sc_ops] - if not isinstance(sso.dW_factors, list): - sso.dW_factors = [1] * len(sso.m_ops) - elif len(sso.dW_factors) != len(sso.m_ops): - raise Exception("The len of dW_factors is not the same as m_ops") - - elif sso.method == 'heterodyne': - if sso.m_ops is None: - m_ops = [] - sso.sops = [] - for c in sso.sc_ops: - if sso.m_ops is None: - m_ops += [c + c.dag(), -1j * (c - c.dag())] - sso.sops += [(spre(c) + spost(c.dag())) / np.sqrt(2), - (spre(c) - spost(c.dag())) * -1j / np.sqrt(2)] - sso.m_ops = m_ops - if not isinstance(sso.dW_factors, list): - sso.dW_factors = [np.sqrt(2)] * len(sso.sops) - elif len(sso.dW_factors) == len(sso.m_ops): - pass - elif len(sso.dW_factors) == len(sso.sc_ops): - dW_factors = [] - for fact in sso.dW_factors: - dW_factors += [np.sqrt(2) * fact, np.sqrt(2) * fact] - sso.dW_factors = dW_factors - elif len(sso.dW_factors) != len(sso.m_ops): - raise Exception("The len of dW_factors is not the same as sc_ops") - - elif sso.method == "photocurrent": - raise NotImplementedError("Moved to 'photocurrent_mesolve'") - - else: - raise Exception("The method must be one of None, homodyne, heterodyne") - - sso.ce_ops = [QobjEvo(spre(op)) for op in sso.e_ops] - sso.cm_ops = [QobjEvo(spre(op)) for op in sso.m_ops] - - if sso.solver_code in [103, 153]: - sso.imp = (1 - sso.LH * 0.5).to(_data.CSR) - - sso.solver_obj = SMESolver - sso.solver_name = "smesolve_" + sso.solver - - res = _sesolve_generic(sso, sso.options, sso.progress_bar) - - if e_ops_dict: - res.expect = {e: res.expect[n] - for n, e in enumerate(e_ops_dict.keys())} - return res - - -def ssesolve(H, psi0, times, sc_ops=[], e_ops=[], - _safe_mode=True, args={}, **kwargs): - """ - Solve stochastic schrodinger equation. Dispatch to specific solvers - depending on the value of the `solver` keyword argument. - - Parameters - ---------- - - H : :class:`qutip.Qobj`, or time dependent system. - System Hamiltonian. - Can depend on time, see StochasticSolverOptions help for format. - - psi0 : :class:`qutip.Qobj` - State vector (ket). - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - sc_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs. - List of stochastic collapse operators. Each stochastic collapse - operator will give a deterministic and stochastic contribution - to the eqaution of motion according to how the d1 and d2 functions - are defined. - Can depend on time, see StochasticSolverOptions help for format. - - e_ops : list of :class:`qutip.Qobj` - single operator or list of operators for which to evaluate - expectation values. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - - An instance of the class :class:`qutip.solver.Result`. - """ - if "method" in kwargs and kwargs["method"] == "photocurrent": - print("stochastic solver with photocurrent method has been moved to " - "it's own function: photocurrent_sesolve") - return photocurrent_sesolve(H, psi0, times, c_ops=c_ops, - e_ops=e_ops, _safe_mode=_safe_mode, - args=args, **kwargs) - - if isinstance(e_ops, dict): - e_ops_dict = e_ops - e_ops = [e for e in e_ops.values()] - else: - e_ops_dict = None - - sso = StochasticSolverOptions(False, H=H, state0=psi0, times=times, - sc_ops=sc_ops, e_ops=e_ops, - args=args, **kwargs) - - if _safe_mode: - _safety_checks(sso) - - if sso.solver_code == 120: - raise Exception("rouchon only work with smesolve") - - if sso.method == 'homodyne' or sso.method is None: - if sso.m_ops is None: - sso.m_ops = [op + op.dag() for op in sso.sc_ops] - sso.sops = [[op, op + op.dag()] for op in sso.sc_ops] - if not isinstance(sso.dW_factors, list): - sso.dW_factors = [1] * len(sso.sops) - elif len(sso.dW_factors) != len(sso.sops): - raise Exception("The len of dW_factors is not the same as sc_ops") - - elif sso.method == 'heterodyne': - if sso.m_ops is None: - m_ops = [] - sso.sops = [] - for c in sso.sc_ops: - if sso.m_ops is None: - m_ops += [c + c.dag(), -1j * (c - c.dag())] - c1 = c / np.sqrt(2) - c2 = c * (-1j / np.sqrt(2)) - sso.sops += [[c1, c1 + c1.dag()], - [c2, c2 + c2.dag()]] - sso.m_ops = m_ops - if not isinstance(sso.dW_factors, list): - sso.dW_factors = [np.sqrt(2)] * len(sso.sops) - elif len(sso.dW_factors) == len(sso.sc_ops): - dW_factors = [] - for fact in sso.dW_factors: - dW_factors += [np.sqrt(2) * fact, np.sqrt(2) * fact] - sso.dW_factors = dW_factors - elif len(sso.dW_factors) != len(sso.sops): - raise Exception("The len of dW_factors is not the same as sc_ops") - - elif sso.method == "photocurrent": - NotImplementedError("Moved to 'photocurrent_sesolve'") - - else: - raise Exception("The method must be one of None, homodyne, heterodyne") - - sso.LH = sso.H * (-1j * sso.dt) - for ops in sso.sops: - sso.LH += (-0.5 * sso.dt) * ops[0].dag() @ ops[0] - - sso.ce_ops = [QobjEvo(op) for op in sso.e_ops] - sso.cm_ops = [QobjEvo(op) for op in sso.m_ops] - - sso.solver_obj = SSESolver - sso.solver_name = "ssesolve_" + sso.solver - - res = _sesolve_generic(sso, sso.options, sso.progress_bar) - - if e_ops_dict: - res.expect = {e: res.expect[n] - for n, e in enumerate(e_ops_dict.keys())} - - return res - - -def _positive_map(sso, e_ops_dict): - if sso.method == 'homodyne' or sso.method is None: - sops = sso.sc_ops - if sso.m_ops is None: - sso.m_ops = [op + op.dag() for op in sso.sc_ops] - if not isinstance(sso.dW_factors, list): - sso.dW_factors = [1] * len(sops) - elif len(sso.dW_factors) != len(sops): - raise Exception("The len of dW_factors is not the same as sc_ops") - - elif sso.method == 'heterodyne': - if sso.m_ops is None: - m_ops = [] - sops = [] - for c in sso.sc_ops: - if sso.m_ops is None: - m_ops += [c + c.dag(), -1j * (c - c.dag())] - sops += [c / np.sqrt(2), -1j / np.sqrt(2) * c] - sso.m_ops = m_ops - if not isinstance(sso.dW_factors, list): - sso.dW_factors = [np.sqrt(2)] * len(sops) - elif len(sso.dW_factors) == len(sso.sc_ops): - dW_factors = [] - for fact in sso.dW_factors: - dW_factors += [np.sqrt(2) * fact, np.sqrt(2) * fact] - sso.dW_factors = dW_factors - elif len(sso.dW_factors) != len(sops): - raise Exception("The len of dW_factors is not the same as sc_ops") - else: - raise Exception("The method must be one of homodyne or heterodyne") - - LH = 1 - (sso.H * 1j * sso.dt) - sso.pp = spre(sso.H) * 0 - sso.sops = [] - sso.preops = [] - sso.postops = [] - sso.preops2 = [] - sso.postops2 = [] - - def _prespostdag(op): - return spre(op) * spost(op.dag()) - - for op in sso.c_ops: - LH += op.dag() @ op * (-sso.dt * 0.5) - sso.pp += spre(op) * spost(op.dag()) * sso.dt - - for i, op in enumerate(sops): - LH += (-sso.dt * 0.5) * op.dag() @ op - sso.sops += [(spre(op) + spost(op.dag())) * sso.dt] - sso.preops += [spre(op)] - sso.postops += [spost(op.dag())] - for op2 in sops[i:]: - sso.preops2 += [spre(op * op2)] - sso.postops2 += [spost(op.dag() * op2.dag())] - - sso.ce_ops = [QobjEvo(spre(op)) for op in sso.e_ops] - sso.cm_ops = [QobjEvo(spre(op)) for op in sso.m_ops] - sso.preLH = spre(LH) - sso.postLH = spost(LH.dag()) - - sso.solver_obj = PmSMESolver - sso.solver_name = "smesolve_" + sso.solver - res = _sesolve_generic(sso, sso.options, sso.progress_bar) - - if e_ops_dict: - res.expect = {e: res.expect[n] - for n, e in enumerate(e_ops_dict.keys())} - - return res - - -def photocurrent_mesolve(H, rho0, times, c_ops=[], sc_ops=[], e_ops=[], - _safe_mode=True, args={}, **kwargs): - """ - Solve stochastic master equation using the photocurrent method. - - Parameters - ---------- - - H : :class:`qutip.Qobj`, or time dependent system. - System Hamiltonian. - Can depend on time, see StochasticSolverOptions help for format. - - rho0 : :class:`qutip.Qobj` - Initial density matrix or state vector (ket). - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - c_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs. - Deterministic collapse operator which will contribute with a standard - Lindblad type of dissipation. - Can depend on time, see StochasticSolverOptions help for format. - - sc_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs. - List of stochastic collapse operators. Each stochastic collapse - operator will give a deterministic and stochastic contribution - to the eqaution of motion according to how the d1 and d2 functions - are defined. - Can depend on time, see StochasticSolverOptions help for format. - - e_ops : list of :class:`qutip.Qobj` / callback function single - single operator or list of operators for which to evaluate - expectation values. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - - An instance of the class :class:`qutip.solver.Result`. - """ - if isket(rho0): - rho0 = ket2dm(rho0) - - if isinstance(e_ops, dict): - e_ops_dict = e_ops - e_ops = [e for e in e_ops.values()] - else: - e_ops_dict = None - - sso = StochasticSolverOptionsPhoto(True, H=H, state0=rho0, times=times, - c_ops=c_ops, sc_ops=sc_ops, e_ops=e_ops, - args=args, **kwargs) - - if _safe_mode: - _safety_checks(sso) - - if sso.m_ops is None: - sso.m_ops = [op * 0 for op in sso.sc_ops] - if not isinstance(sso.dW_factors, list): - sso.dW_factors = [1] * len(sso.sc_ops) - elif len(sso.dW_factors) != len(sso.sc_ops): - raise Exception("The len of dW_factors is not the same as sc_ops") - - sso.solver_obj = PcSMESolver - sso.solver_name = "photocurrent_mesolve" - sso.LH = liouvillian(sso.H, c_ops=sso.c_ops) * sso.dt - - def _prespostdag(op): - return spre(op) * spost(op.dag()) - - sso.sops = [[spre(op.dag() @ op) + spost(op.dag() @ op), - spre(op.dag() @ op), - spre(op) * spost(op.dag())] for op in sso.sc_ops] - sso.ce_ops = [QobjEvo(spre(op)) for op in sso.e_ops] - sso.cm_ops = [QobjEvo(spre(op)) for op in sso.m_ops] - - res = _sesolve_generic(sso, sso.options, sso.progress_bar) - res.num_collapse = [np.count_nonzero(noise) for noise in res.noise] - - if e_ops_dict: - res.expect = {e: res.expect[n] - for n, e in enumerate(e_ops_dict.keys())} - - return res - - -def photocurrent_sesolve(H, psi0, times, sc_ops=[], e_ops=[], - _safe_mode=True, args={}, **kwargs): - """ - Solve stochastic schrodinger equation using the photocurrent method. - - Parameters - ---------- - - H : :class:`qutip.Qobj`, or time dependent system. - System Hamiltonian. - Can depend on time, see StochasticSolverOptions help for format. - - psi0 : :class:`qutip.Qobj` - Initial state vector (ket). - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - sc_ops : list of :class:`qutip.Qobj`, or time dependent Qobjs. - List of stochastic collapse operators. Each stochastic collapse - operator will give a deterministic and stochastic contribution - to the eqaution of motion according to how the d1 and d2 functions - are defined. - Can depend on time, see StochasticSolverOptions help for format. - - e_ops : list of :class:`qutip.Qobj` / callback function single - single operator or list of operators for which to evaluate - expectation values. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - - An instance of the class :class:`qutip.solver.Result`. - """ - if isinstance(e_ops, dict): - e_ops_dict = e_ops - e_ops = list(e_ops.values()) - else: - e_ops_dict = None - - sso = StochasticSolverOptionsPhoto(False, H=H, state0=psi0, times=times, - sc_ops=sc_ops, e_ops=e_ops, - args=args, **kwargs) - - if _safe_mode: - _safety_checks(sso) - - if sso.m_ops is None: - sso.m_ops = [op * 0 for op in sso.sc_ops] - if not isinstance(sso.dW_factors, list): - sso.dW_factors = [1] * len(sso.sc_ops) - elif len(sso.dW_factors) != len(sso.sc_ops): - raise Exception("The len of dW_factors is not the same as sc_ops") - - sso.solver_obj = PcSSESolver - sso.solver_name = "photocurrent_sesolve" - sso.sops = [[op, op.dag() @ op] for op in sso.sc_ops] - sso.LH = sso.H * (-1j*sso.dt) - for ops in sso.sops: - sso.LH += ops[0].dag() @ ops[0] * (-0.5 * sso.dt) - sso.ce_ops = [QobjEvo(op) for op in sso.e_ops] - sso.cm_ops = [QobjEvo(op) for op in sso.m_ops] - - res = _sesolve_generic(sso, sso.options, sso.progress_bar) - res.num_collapse = [np.count_nonzero(noise) for noise in res.noise] - - if e_ops_dict: - res.expect = {e: res.expect[n] - for n, e in enumerate(e_ops_dict.keys())} - - return res - - -def general_stochastic(state0, times, d1, d2, e_ops=[], m_ops=[], - _safe_mode=True, len_d2=1, args={}, **kwargs): - """ - Solve stochastic general equation. Dispatch to specific solvers - depending on the value of the `solver` keyword argument. - - - Parameters - ---------- - - state0 : :class:`qutip.Qobj` - Initial state vector (ket) or density matrix as a vector. - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - d1 : function, callable class - Function representing the deterministic evolution of the system. - - def d1(time (double), state (as a np.array vector)): - return 1d np.array - - d2 : function, callable class - Function representing the stochastic evolution of the system. - - def d2(time (double), state (as a np.array vector)): - return 2d np.array (N_sc_ops, len(state0)) - - len_d2 : int - Number of output vector produced by d2 - - e_ops : list of :class:`qutip.Qobj` - single operator or list of operators for which to evaluate - expectation values. - Must be a superoperator if the state vector is a density matrix. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - An instance of the class :class:`qutip.solver.Result`. - """ - - if isinstance(e_ops, dict): - e_ops_dict = e_ops - e_ops = [e for e in e_ops.values()] - else: - e_ops_dict = None - - if "solver" not in kwargs: - kwargs["solver"] = 50 - - sso = StochasticSolverOptions(False, H=None, state0=state0, times=times, - e_ops=e_ops, args=args, **kwargs) - if sso.solver_code not in [50, 100, 150]: - raise ValueError("Only Euler, platen, platen15 can be " + - "used for the general stochastic solver.") - - sso.d1 = d1 - sso.d2 = d2 - if _safe_mode: - # This state0_vec is computed as stack_columns(state0.full()).ravel() - # in the sso init. - state0_vec = sso.rho0 - l_vec = state0_vec.shape[0] - try: - out_d1 = d1(0., sso.rho0) - except Exception as e: - raise RuntimeError("Safety check: d1(0., state0_vec) failed.:\n" + - str(e)) from e - try: - out_d2 = d2(0., sso.rho0) - except Exception as e: - raise RuntimeError("Safety check: d2(0., state0_vec) failed:\n" + - str(e)) from e - - msg_d1 = ("d1 must return an 1d numpy array with the same number " - "of elements as the initial state as a vector.") - if not isinstance(out_d1, np.ndarray): - raise TypeError(msg_d1) - if (out_d1.ndim != 1 - or out_d1.shape[0] != l_vec or len(out_d1.shape) != 1): - raise ValueError(msg_d1) - - msg_d2 = ("Safety check: d2 must return a 2d numpy array " - "with the shape (len_d2, len(state0_vec) ).") - if not isinstance(out_d2, np.ndarray): - raise TypeError(msg_d2) - if (out_d2.ndim != 2 - or out_d2.shape[1] != l_vec or out_d2.shape[0] != len_d2): - raise ValueError(msg_d2) - if out_d1.dtype != np.dtype('complex128') or \ - out_d2.dtype != np.dtype('complex128'): - raise ValueError("Safety check: d1 and d2 must return " + - "complex numpy array.") - msg_e_ops = ("Safety check: The shape of the e_ops " - "does not fit the intial state.") - for op in sso.e_ops: - shape_op = op.shape - if sso.me: - if shape_op[0]**2 != l_vec or shape_op[1]**2 != l_vec: - raise ValueError(msg_e_ops) - else: - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise ValueError(msg_e_ops + - " Expecting e_ops as superoperators.") - - sso.m_ops = [] - sso.cm_ops = [] - if sso.store_measurement: - if not m_ops: - raise ValueError("General stochastic needs explicit " + - "m_ops to store measurement.") - sso.m_ops = m_ops - sso.cm_ops = [QobjEvo(op) for op in sso.m_ops] - if sso.dW_factors is None: - sso.dW_factors = [1.] * len(sso.m_ops) - elif len(sso.dW_factors) == 1: - sso.dW_factors = sso.dW_factors * len(sso.m_ops) - elif len(sso.dW_factors) != len(sso.m_ops): - raise ValueError("The number of dW_factors must fit" + - " the number of m_ops.") - - if sso.dW_factors is None: - sso.dW_factors = [1.] * len_d2 - sso.sops = [None] * len_d2 - sso.ce_ops = [QobjEvo(op) for op in sso.e_ops] - - sso.solver_obj = GenericSSolver - sso.solver_name = "general_stochastic_solver_" + sso.solver - - ssolver = GenericSSolver() - # ssolver.set_data(sso) - ssolver.set_solver(sso) - - res = _sesolve_generic(sso, sso.options, sso.progress_bar) - - if e_ops_dict: - res.expect = {e: res.expect[n] - for n, e in enumerate(e_ops_dict.keys())} - - return res - - -def _safety_checks(sso): - l_vec = sso.rho0.shape[0] - if sso.H.issuper: - if not sso.me: - raise ValueError( - "Given a Liouvillian for a Schrödinger equation problem." - ) - shape_op = sso.H.shape - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise Exception("The size of the hamiltonian does " - "not fit the intial state") - else: - shape_op = sso.H.shape - if sso.me: - if shape_op[0]**2 != l_vec or shape_op[1]**2 != l_vec: - raise Exception("The size of the hamiltonian does " - "not fit the intial state") - else: - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise Exception("The size of the hamiltonian does " - "not fit the intial state") - - for op in sso.sc_ops: - if op.issuper: - if not sso.me: - raise ValueError( - "Given a Liouvillian for a Schrödinger equation problem." - ) - shape_op = op.shape - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise Exception("The size of the sc_ops does " - "not fit the intial state") - else: - shape_op = op.shape - if sso.me: - if shape_op[0]**2 != l_vec or shape_op[1]**2 != l_vec: - raise Exception("The size of the sc_ops does " - "not fit the intial state") - else: - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise Exception("The size of the sc_ops does " - "not fit the intial state") - - for op in sso.c_ops: - if op.issuper: - if not sso.me: - raise ValueError( - "Given a Liouvillian for a Schrödinger equation problem." - ) - shape_op = op.shape - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise Exception("The size of the c_ops does " - "not fit the intial state") - else: - shape_op = op.shape - if sso.me: - if shape_op[0]**2 != l_vec or shape_op[1]**2 != l_vec: - raise Exception("The size of the c_ops does " - "not fit the intial state") - else: - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise Exception("The size of the c_ops does " - "not fit the intial state") - - for op in sso.e_ops: - shape_op = op.shape - if sso.me: - if shape_op[0]**2 != l_vec or shape_op[1]**2 != l_vec: - raise Exception("The size of the e_ops does " - "not fit the intial state") - else: - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise Exception("The size of the e_ops does " - "not fit the intial state") - - if sso.m_ops is not None: - for op in sso.m_ops: - shape_op = op.shape - if sso.me: - if shape_op[0]**2 != l_vec or shape_op[1]**2 != l_vec: - raise Exception("The size of the m_ops does " - "not fit the intial state") - else: - if shape_op[0] != l_vec or shape_op[1] != l_vec: - raise Exception("The size of the m_ops does " - "not fit the intial state") - - -def _sesolve_generic(sso, options, progress_bar): - """ - Internal function. See smesolve. - """ - res = Result() - res.times = sso.times - res.expect = np.zeros((len(sso.e_ops), len(sso.times)), dtype=complex) - res.ss = np.zeros((len(sso.e_ops), len(sso.times)), dtype=complex) - res.measurement = [] - res.solver = sso.solver_name - res.ntraj = sso.ntraj - res.num_expect = len(sso.e_ops) - - nt = sso.ntraj - task = _single_trajectory - map_kwargs = {'progress_bar': sso.progress_bar} - map_kwargs.update(sso.map_kwargs) - task_args = (sso,) - task_kwargs = {} - - results = sso.map_func(task, list(range(sso.ntraj)), - task_args, task_kwargs, **map_kwargs) - noise = [] - for result in results: - states_list, dW, m, expect = result - res.states.append(states_list) - noise.append(dW) - res.measurement.append(m) - res.expect += expect - res.ss += expect * expect - res.noise = np.stack(noise) - - if sso.store_all_expect: - paths_expect = [] - for result in results: - paths_expect.append(result[3]) - res.runs_expect = np.stack(paths_expect) - - # average density matrices (vectorized maybe) - # ajgpitch 2019-10-25: np.any(res.states) seems to error - # I guess there may be a potential exception if there are no states? - # store individual trajectory states - res.traj_states = res.states - res.avg_states = None - if options['average_states'] and options['store_states']: - avg_states_list = [] - for n in range(len(res.times)): - tslot_states = [res.states[mm][n].data for mm in range(nt)] - if len(tslot_states) > 0: - state = Qobj(np.sum(tslot_states), - dims=res.states[0][n].dims).unit() - avg_states_list.append(state) - # store average states - res.states = res.avg_states = avg_states_list - - # average - res.expect = res.expect / nt - - # standard error - if nt > 1: - res.se = (res.ss - nt * (res.expect ** 2)) / (nt * (nt - 1)) - else: - res.se = None - - # convert complex data to real if hermitian - res.expect = [np.real(res.expect[n, :]) - if e.isherm else res.expect[n, :] - for n, e in enumerate(sso.e_ops)] - - return res - - -def _single_trajectory(i, sso): - # Only one step? - ssolver = sso.solver_obj() - ssolver.set_solver(sso) - result = ssolver.cy_sesolve_single_trajectory(i) - return result - - -# The code for ssepdpsolve have been moved to the file pdpsolve. -# The call is still in stochastic for consistance. -def ssepdpsolve(H, psi0, times, c_ops, e_ops, **kwargs): - """ - A stochastic (piecewse deterministic process) PDP solver for wavefunction - evolution. For most purposes, use :func:`qutip.mcsolve` instead for quantum - trajectory simulations. - - Parameters - ---------- - - H : :class:`qutip.Qobj` - System Hamiltonian. - - psi0 : :class:`qutip.Qobj` - Initial state vector (ket). - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - c_ops : list of :class:`qutip.Qobj` - Deterministic collapse operator which will contribute with a standard - Lindblad type of dissipation. - - e_ops : list of :class:`qutip.Qobj` / callback function single - single operator or list of operators for which to evaluate - expectation values. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - - An instance of the class :class:`qutip.solver.Result`. - - """ - return main_ssepdpsolve(H, psi0, times, c_ops, e_ops, **kwargs) - - -# The code for smepdpsolve have been moved to the file pdpsolve. -# The call is still in stochastic for consistance. -def smepdpsolve(H, rho0, times, c_ops, e_ops, **kwargs): - """ - A stochastic (piecewse deterministic process) PDP solver for density matrix - evolution. - - Parameters - ---------- - - H : :class:`qutip.Qobj` - System Hamiltonian. - - rho0 : :class:`qutip.Qobj` - Initial density matrix. - - times : *list* / *array* - List of times for :math:`t`. Must be uniformly spaced. - - c_ops : list of :class:`qutip.Qobj` - Deterministic collapse operator which will contribute with a standard - Lindblad type of dissipation. - - sc_ops : list of :class:`qutip.Qobj` - List of stochastic collapse operators. Each stochastic collapse - operator will give a deterministic and stochastic contribution - to the eqaution of motion according to how the d1 and d2 functions - are defined. - - e_ops : list of :class:`qutip.Qobj` / callback function single - single operator or list of operators for which to evaluate - expectation values. - - kwargs : *dictionary* - Optional keyword arguments. See - :class:`qutip.stochastic.StochasticSolverOptions`. - - Returns - ------- - - output: :class:`qutip.solver.Result` - - An instance of the class :class:`qutip.solver.Result`. - - """ - return main_smepdpsolve(H, rho0, times, c_ops, e_ops, **kwargs) diff --git a/qutip/solver/__init__.py b/qutip/solver/__init__.py index 99d84ccd88..6714cae70b 100644 --- a/qutip/solver/__init__.py +++ b/qutip/solver/__init__.py @@ -1,10 +1,11 @@ from .result import * from .options import * -import qutip.solver.integrator +import qutip.solver.integrator as integrator from .integrator import IntegratorException from .sesolve import * from .mesolve import * from .mcsolve import * +from .nm_mcsolve import * from .propagator import * from .scattering import * from .correlation import * @@ -16,3 +17,5 @@ from .brmesolve import * from .krylovsolve import * from .parallel import * +import qutip.solver.sode as sode +from .stochastic import * diff --git a/qutip/solver/correlation.py b/qutip/solver/correlation.py index b05514c519..fcbeff8e13 100644 --- a/qutip/solver/correlation.py +++ b/qutip/solver/correlation.py @@ -8,8 +8,8 @@ import scipy.fftpack from ..core import ( - qeye, Qobj, QobjEvo, liouvillian, spre, unstack_columns, stack_columns, - tensor, qzero, expect + Qobj, QobjEvo, liouvillian, spre, unstack_columns, stack_columns, + tensor, expect, qeye_like, isket ) from .mesolve import MESolver from .mcsolve import MCSolver @@ -17,7 +17,7 @@ from .heom.bofin_solvers import HEOMSolver from .steadystate import steadystate -from ..ui.progressbar import progess_bars +from ..ui.progressbar import progress_bars # ----------------------------------------------------------------------------- # PUBLIC API @@ -466,11 +466,12 @@ def correlation_3op(solver, state0, tlist, taulist, A=None, B=None, C=None): is returned instead. """ taulist = np.asarray(taulist) + if isket(state0): + state0 = state0.proj() - dims = state0.dims[0] - A = QobjEvo(qeye(dims) if A in [None, 1] else A) - B = QobjEvo(qeye(dims) if B in [None, 1] else B) - C = QobjEvo(qeye(dims) if C in [None, 1] else C) + A = QobjEvo(qeye_like(state0) if A in [None, 1] else A) + B = QobjEvo(qeye_like(state0) if B in [None, 1] else B) + C = QobjEvo(qeye_like(state0) if C in [None, 1] else C) if isinstance(solver, (MESolver, BRSolver)): out = _correlation_3op_dm(solver, state0, tlist, taulist, A, B, C) @@ -495,8 +496,9 @@ def _correlation_3op_dm(solver, state0, tlist, taulist, A, B, C): solver.options["normalize_output"] = False solver.options["progress_bar"] = False - progress_bar = progess_bars[old_opt['progress_bar']]() - progress_bar.start(len(taulist) + 1, **old_opt['progress_kwargs']) + progress_bar = progress_bars[old_opt['progress_bar']]( + len(taulist) + 1, **old_opt['progress_kwargs'] + ) rho_t = solver.run(state0, tlist).states corr_mat = np.zeros([np.size(tlist), np.size(taulist)], dtype=complex) progress_bar.update() diff --git a/qutip/solver/countstat.py b/qutip/solver/countstat.py index 58c1d135cf..f6a3a4341b 100644 --- a/qutip/solver/countstat.py +++ b/qutip/solver/countstat.py @@ -10,7 +10,7 @@ from itertools import product from ..core import ( sprepost, spre, qeye, tensor, expect, Qobj, - operator_to_vector, vector_to_operator + operator_to_vector, vector_to_operator, CoreOptions ) from ..core import data as _data from .steadystate import pseudo_inverse, steadystate @@ -76,14 +76,9 @@ def countstat_current(L, c_ops=None, rhoss=None, J_ops=None): def _solve(A, V): try: - if settings.has_mkl: - out = mkl_spsolve(A.tocsc(), V) - else: - A.sort_indices() - out = sp.linalg.splu(A, permc_spec='COLAMD').solve(V) - except Exception: - out = sp.linalg.lsqr(A, V)[0] - return out + return _data.solve(A, V) + except ValueError: + return _data.solve(A, V, "lstsq") def _noise_direct(L, wlist, rhoss, J_ops): @@ -94,14 +89,13 @@ def _noise_direct(L, wlist, rhoss, J_ops): current = np.zeros(N_j_ops) noise = np.zeros((N_j_ops, N_j_ops, len(wlist))) - tr_op = tensor([qeye(n) for n in L.dims[0][0]]) + tr_op = qeye(L.dims[0][0]) tr_op_vec = operator_to_vector(tr_op) Pop = _data.kron(rhoss_vec, tr_op_vec.data.transpose()) Iop = _data.identity(np.prod(L.dims[0][0])**2) Q = _data.sub(Iop, Pop) - Q_ops = [_data.matmul(Q, _data.matmul(op, rhoss_vec)).to_array() - for op in J_ops] + Q_ops = [_data.matmul(Q, _data.matmul(op, rhoss_vec)) for op in J_ops] for k, w in enumerate(wlist): if w != 0.0: @@ -110,11 +104,10 @@ def _noise_direct(L, wlist, rhoss, J_ops): # At zero frequency some solvers fail for small systems. # Adding a small finite frequency of order 1e-15 # helps prevent the solvers from throwing an exception. - L_temp = 1e-15j * spre(tr_op) + L + with CoreOptions(auto_tidyup=False): + L_temp = 1e-15j * spre(tr_op) + L - A = _data.to(_data.CSR, L_temp.data).as_scipy() - X_rho = [_data.dense.fast_from_numpy(_solve(A, op)) - for op in Q_ops] + X_rho = [_solve(L_temp.data, op) for op in Q_ops] for i, j in product(range(N_j_ops), repeat=2): if i == j: diff --git a/qutip/tests/solve/__init__.py b/qutip/solver/cy/__init__.py similarity index 100% rename from qutip/tests/solve/__init__.py rename to qutip/solver/cy/__init__.py diff --git a/qutip/solver/cy/nm_mcsolve.pxd b/qutip/solver/cy/nm_mcsolve.pxd new file mode 100644 index 0000000000..8f5df2ea55 --- /dev/null +++ b/qutip/solver/cy/nm_mcsolve.pxd @@ -0,0 +1,14 @@ +#cython: language_level=3 +from qutip.core.cy.coefficient cimport Coefficient + + +cdef class RateShiftCoefficient(Coefficient): + cdef: + Coefficient [:] coeffs + + cpdef double as_double(self, double t) except * + + +cdef class SqrtRealCoefficient(Coefficient): + cdef: + Coefficient base diff --git a/qutip/solver/cy/nm_mcsolve.pyx b/qutip/solver/cy/nm_mcsolve.pyx new file mode 100644 index 0000000000..41dff11a97 --- /dev/null +++ b/qutip/solver/cy/nm_mcsolve.pyx @@ -0,0 +1,116 @@ +#cython: language_level=3 + +import numpy as np + +cimport cython + +from qutip.core.cy.coefficient cimport Coefficient + +cdef extern from "" namespace "std" nogil: + double sqrt(double x) + +cdef extern from "" namespace "std" nogil: + double real(double complex x) + + +cdef class RateShiftCoefficient(Coefficient): + """ + A coefficient representing the rate shift of a list of coefficients. + + The rate shift is ``2 * abs(min([0, coeff_1(t), coeff_2(t), ...]))``. + + Parameters + ---------- + coeffs : list of :class:`Coefficient` + The list of coefficients to determine the rate shift of. + """ + def __init__(self, list coeffs): + self.coeffs = np.array(coeffs, dtype=Coefficient) + + def __reduce__(self): + return (RateShiftCoefficient, (list(self.coeffs),)) + + def replace_arguments(self, _args=None, **kwargs): + """ + Replace the arguments (``args``) of a coefficient. + + Returns a new :obj:`Coefficient` if the coefficient has arguments, or + the original coefficient if it does not. Arguments to replace may be + supplied either in a dictionary as the first position argument, or + passed as keywords, or as a combination of the two. Arguments not + replaced retain their previous values. + + Parameters + ---------- + _args : dict + Dictionary of arguments to replace. + + **kwargs + Arguments to replace. + """ + return RateShiftCoefficient( + [coeff.replace_arguments(_args, **kwargs) for coeff in self.coeffs], + ) + + cdef complex _call(self, double t) except *: + """ Return the rate shift. """ + cdef int N = len(self.coeffs) + cdef int i + cdef double min_rate = 0 + cdef Coefficient coeff + + for i in range(N): + coeff = self.coeffs[i] + min_rate = min(min_rate, real(coeff._call(t))) + + return 2 * abs(min_rate) + + cpdef double as_double(self, double t) except *: + """ Return the rate shift as a float. """ + return real(self._call(t)) + + cpdef Coefficient copy(self): + """Return a copy of the :obj:`Coefficient`.""" + return RateShiftCoefficient( + [coeff.copy() for coeff in self.coeffs], + ) + + +@cython.auto_pickle(True) +cdef class SqrtRealCoefficient(Coefficient): + """ + A coefficient representing the positive square root of the real part of + another coefficient. + """ + def __init__(self, Coefficient base): + self.base = base + + def replace_arguments(self, _args=None, **kwargs): + """ + Replace the arguments (``args``) of a coefficient. + + Returns a new :obj:`Coefficient` if the coefficient has arguments, or + the original coefficient if it does not. Arguments to replace may be + supplied either in a dictionary as the first position argument, or + passed as keywords, or as a combination of the two. Arguments not + replaced retain their previous values. + + Parameters + ---------- + _args : dict + Dictionary of arguments to replace. + + **kwargs + Arguments to replace. + """ + return SqrtRealCoefficient( + self.base.replace_arguments(_args, **kwargs) + ) + + cdef complex _call(self, double t) except *: + """Return the shifted rate.""" + return sqrt(real(self.base._call(t))) + + cpdef Coefficient copy(self): + """Return a copy of the :obj:`Coefficient`.""" + return SqrtRealCoefficient(self.base.copy()) diff --git a/qutip/solver/floquet.py b/qutip/solver/floquet.py index 7ce6cc4f11..61c139d349 100644 --- a/qutip/solver/floquet.py +++ b/qutip/solver/floquet.py @@ -15,7 +15,7 @@ from .integrator import Integrator from .result import Result from time import time -from ..ui.progressbar import progess_bars +from ..ui.progressbar import progress_bars class FloquetBasis: @@ -659,14 +659,13 @@ def fmmesolve( the expectation values for the times specified by `tlist`, and/or the state density matrices corresponding to the times. """ - if c_ops is None: + if c_ops is None and rho0.isket: return fsesolve( H, rho0, tlist, e_ops=e_ops, T=T, - w_th=w_th, args=args, options=options, ) @@ -918,8 +917,9 @@ def run(self, state0, tlist, *, floquet=False, args=None, e_ops=None): results.add(tlist[0], self._restore_state(_data0, copy=False)) stats["preparation time"] += time() - _time_start - progress_bar = progess_bars[self.options["progress_bar"]]() - progress_bar.start(len(tlist) - 1, **self.options["progress_kwargs"]) + progress_bar = progress_bars[self.options["progress_bar"]]( + len(tlist) - 1, **self.options["progress_kwargs"] + ) for t, state in self._integrator.run(tlist): progress_bar.update() results.add(t, self._restore_state(state, copy=False)) diff --git a/qutip/solver/heom/bofin_baths.py b/qutip/solver/heom/bofin_baths.py index 9dfec1a1d7..ff64dfc610 100644 --- a/qutip/solver/heom/bofin_baths.py +++ b/qutip/solver/heom/bofin_baths.py @@ -91,8 +91,11 @@ class BathExponent: Attributes ---------- + fermionic : bool + True if the type of the exponent is a Fermionic type (i.e. either + "+" or "-") and False otherwise. - All of the parameters are available as attributes. + All of the parameters are also available as attributes. """ types = enum.Enum("ExponentType", ["R", "I", "RI", "+", "-"]) @@ -120,15 +123,19 @@ def _check_sigma_bar_k_offset(self, type, offset): " specified for + and - bath exponents" ) + def _type_is_fermionic(self, type): + return type in (self.types["+"], self.types["-"]) + def __init__( - self, type, dim, Q, ck, vk, ck2=None, sigma_bar_k_offset=None, - tag=None, + self, type, dim, Q, ck, vk, ck2=None, + sigma_bar_k_offset=None, tag=None, ): if not isinstance(type, self.types): type = self.types[type] self._check_ck2(type, ck2) self._check_sigma_bar_k_offset(type, sigma_bar_k_offset) self.type = type + self.fermionic = self._type_is_fermionic(type) self.dim = dim self.Q = Q self.ck = ck @@ -145,6 +152,7 @@ def __repr__(self): f" Q.dims={dims!r}" f" ck={self.ck!r} vk={self.vk!r} ck2={self.ck2!r}" f" sigma_bar_k_offset={self.sigma_bar_k_offset!r}" + f" fermionic={self.fermionic!r}" f" tag={self.tag!r}>" ) diff --git a/qutip/solver/heom/bofin_solvers.py b/qutip/solver/heom/bofin_solvers.py index 99131ae990..d38553c902 100644 --- a/qutip/solver/heom/bofin_solvers.py +++ b/qutip/solver/heom/bofin_solvers.py @@ -119,6 +119,7 @@ def __init__(self, exponents, max_depth): self.labels = list(state_number_enumerate(self.dims, max_depth)) self._label_idx = {s: i for i, s in enumerate(self.labels)} + self.idx = self._label_idx.__getitem__ def idx(self, label): """ @@ -134,6 +135,13 @@ def idx(self, label): ------- int The index of the label within the list of ADO labels. + + Note + ---- + This implementation of the ``.idx(...)`` method is just for + reference and documentation. To avoid the cost of a Python + function call, it is replaced with + ``self._label_idx.__getitem__`` when the instance is created. """ return self._label_idx[label] @@ -504,13 +512,6 @@ def heomsolve( Maximum lenght of one internal step. When using pulses, it should be less than half the width of the thinnest pulse. - - - - - - - Returns ------- :class:`~HEOMResult` @@ -527,7 +528,7 @@ def heomsolve( if the results option ``ado_return`` was set to ``True``). Each element is an instance of :class:`HierarchyADOsState`. The state of a particular ADO may be extracted from - ``result.ado_states[i]`` by calling :meth:`.extract`. + ``result.ado_states[i]`` by calling :meth:`extract`. * ``expect``: a list containing the values of each ``e_ops`` at time ``t``. @@ -579,6 +580,12 @@ class HEOMSolver(Solver): ados : :obj:`HierarchyADOs` The description of the hierarchy constructed from the given bath and maximum depth. + + rhs : :obj:`QobjEvo` + The right-hand side (RHS) of the hierarchy evolution ODE. Internally + the system and bath coupling operators are converted to + :class:`qutip.data.CSR` instances during construction of the RHS, + so the operators in the ``rhs`` will all be sparse. """ name = "heomsolver" @@ -624,7 +631,7 @@ def __init__(self, H, bath, max_depth, *, options=None): self._sId = _data.identity(self._sup_shape, dtype="csr") # pre-calculate superoperators required by _grad_prev and _grad_next: - Qs = [exp.Q for exp in self.ados.exponents] + Qs = [exp.Q.to("csr") for exp in self.ados.exponents] self._spreQ = [spre(op).data for op in Qs] self._spostQ = [spost(op).data for op in Qs] self._s_pre_minus_post_Q = [ @@ -688,20 +695,6 @@ def _combine_bath_exponents(self, bath): exponents = [] for b in bath: exponents.extend(b.exponents) - all_bosonic = all( - exp.type in (exp.types.R, exp.types.I, exp.types.RI) - for exp in exponents - ) - all_fermionic = all( - exp.type in (exp.types["+"], exp.types["-"]) - for exp in exponents - ) - if not (all_bosonic or all_fermionic): - raise ValueError( - "Bath exponents are currently restricted to being either" - " all bosonic or all fermionic, but a mixture of bath" - " exponents was given." - ) if not all(exp.Q.dims == exponents[0].Q.dims for exp in exponents): raise ValueError( "All bath exponents must have system coupling operators" @@ -710,30 +703,19 @@ def _combine_bath_exponents(self, bath): ) return exponents - def _grad_n(self, L, he_n): + def _grad_n(self, he_n): """ Get the gradient for the hierarchy ADO at level n. """ vk = self.ados.vk vk_sum = sum(he_n[i] * vk[i] for i in range(len(vk))) - if L is not None: # time-independent case - op = _data.sub(L, _data.mul(self._sId, vk_sum)) - else: # time-dependent case - op = _data.mul(self._sId, -vk_sum) + op = _data.mul(self._sId, -vk_sum) return op def _grad_prev(self, he_n, k): """ Get the previous gradient. """ - if self.ados.exponents[k].type in ( - BathExponent.types.R, BathExponent.types.I, - BathExponent.types.RI - ): - return self._grad_prev_bosonic(he_n, k) - elif self.ados.exponents[k].type in ( - BathExponent.types["+"], BathExponent.types["-"] - ): + if self.ados.exponents[k].fermionic: return self._grad_prev_fermionic(he_n, k) else: - raise ValueError( - f"Mode {k} has unsupported type {self.ados.exponents[k].type}") + return self._grad_prev_bosonic(he_n, k) def _grad_prev_bosonic(self, he_n, k): if self.ados.exponents[k].type == BathExponent.types.R: @@ -765,11 +747,15 @@ def _grad_prev_bosonic(self, he_n, k): def _grad_prev_fermionic(self, he_n, k): ck = self.ados.ck + he_fermionic_n = [ + i * int(exp.fermionic) + for i, exp in zip(he_n, self.ados.exponents) + ] - n_excite = sum(he_n) + n_excite = sum(he_fermionic_n) sign1 = (-1) ** (n_excite + 1) - n_excite_before_m = sum(he_n[:k]) + n_excite_before_m = sum(he_fermionic_n[:k]) sign2 = (-1) ** (n_excite_before_m) sigma_bar_k = k + self.ados.sigma_bar_k_offset[k] @@ -799,28 +785,24 @@ def _grad_prev_fermionic(self, he_n, k): def _grad_next(self, he_n, k): """ Get the previous gradient. """ - if self.ados.exponents[k].type in ( - BathExponent.types.R, BathExponent.types.I, - BathExponent.types.RI - ): - return self._grad_next_bosonic(he_n, k) - elif self.ados.exponents[k].type in ( - BathExponent.types["+"], BathExponent.types["-"] - ): + if self.ados.exponents[k].fermionic: return self._grad_next_fermionic(he_n, k) else: - raise ValueError( - f"Mode {k} has unsupported type {self.ados.exponents[k].type}") + return self._grad_next_bosonic(he_n, k) def _grad_next_bosonic(self, he_n, k): op = _data.mul(self._s_pre_minus_post_Q[k], -1j) return op def _grad_next_fermionic(self, he_n, k): - n_excite = sum(he_n) + he_fermionic_n = [ + i * int(exp.fermionic) + for i, exp in zip(he_n, self.ados.exponents) + ] + n_excite = sum(he_fermionic_n) sign1 = (-1) ** (n_excite + 1) - n_excite_before_m = sum(he_n[:k]) + n_excite_before_m = sum(he_fermionic_n[:k]) sign2 = (-1) ** (n_excite_before_m) if self.ados.exponents[k].type == BathExponent.types["+"]: @@ -840,14 +822,14 @@ def _grad_next_fermionic(self, he_n, k): ) return op - def _rhs(self, L): + def _rhs(self): """ Make the RHS for the HEOM. """ ops = _GatherHEOMRHS( self.ados.idx, block=self._sup_shape, nhe=self._n_ados ) for he_n in self.ados.labels: - op = self._grad_n(L, he_n) + op = self._grad_n(he_n) ops.add_op(he_n, he_n, op) for k in range(len(self.ados.dims)): next_he = self.ados.next(he_n, k) @@ -863,12 +845,17 @@ def _rhs(self, L): def _calculate_rhs(self): """ Make the full RHS required by the solver. """ + rhs_mat = self._rhs() + rhs_dims = [ + self._sup_shape * self._n_ados, self._sup_shape * self._n_ados + ] + h_identity = _data.identity(self._n_ados, dtype="csr") + if self.L_sys.isconstant: - L0 = self.L_sys(0) - rhs_mat = self._rhs(L0.data) - rhs = QobjEvo(Qobj(rhs_mat, dims=[ - self._sup_shape * self._n_ados, self._sup_shape * self._n_ados - ])) + # For the constant case, we just add the Liouvillian to the + # diagonal blocks of the RHS matrix. + rhs_mat += _data.kron(h_identity, self.L_sys(0).to("csr").data) + rhs = QobjEvo(Qobj(rhs_mat, dims=rhs_dims)) else: # In the time dependent case, we construct the parameters # for the ODE gradient function under the assumption that @@ -879,23 +866,23 @@ def _calculate_rhs(self): # This assumption holds because only _grad_n dependents on # the system Liouvillian (and not _grad_prev or _grad_next) and # the bath coupling operators are not time-dependent. - # - # By calling _rhs(None) we omit the Liouvillian completely from - # the RHS and then manually add back the Liouvillian afterwards. - rhs_mat = self._rhs(None) - rhs = QobjEvo(Qobj(rhs_mat)) - h_identity = _data.identity(self._n_ados, dtype="csr") + rhs = QobjEvo(Qobj(rhs_mat, dims=rhs_dims)) def _kron(x): - return Qobj(_data.kron(h_identity, x.data)).to("csr") + return Qobj( + _data.kron(h_identity, x.data), + dims=rhs_dims, + ).to("csr") + rhs += self.L_sys.linear_map(_kron) # The assertion that rhs_mat has data type CSR is just a sanity # check on the RHS creation. The base solver class will still # convert the RHS to the type required by the ODE integrator if - # required. + # needed. assert isinstance(rhs_mat, _csr.CSR) assert isinstance(rhs, QobjEvo) + assert rhs.dims == rhs_dims return rhs @@ -934,7 +921,8 @@ def steady_state( steady_ados : :class:`HierarchyADOsState` The steady state of the full ADO hierarchy. A particular ADO may be - extracted from the full state by calling :meth:`.extract`. + extracted from the full state by calling + :meth:`HEOMSolver.extract`. """ if not self.L_sys.isconstant: raise ValueError( @@ -1034,7 +1022,7 @@ def run(self, state0, tlist, *, args=None, e_ops=None): if the results option ``ado_return`` was set to ``True``). Each element is an instance of :class:`HierarchyADOsState`. The state of a particular ADO may be extracted from - ``result.ado_states[i]`` by calling :meth:`.extract`. + ``result.ado_states[i]`` by calling :meth:`extract`. * ``expect``: a list containing the values of each ``e_ops`` at time ``t``. @@ -1152,8 +1140,8 @@ def options(self): state_data_type: str, default="dense" Name of the data type of the state used during the ODE evolution. - Use an empty string to keep the input state type. Many integrator can - only work with `Dense`. + Use an empty string to keep the input state type. Many integrators + support only work with `Dense`. store_ados : bool, default=False Whether or not to store the HEOM ADOs. Only relevant when using @@ -1294,19 +1282,19 @@ class _GatherHEOMRHS: The number of ADOs in the hierarchy. """ def __init__(self, f_idx, block, nhe): - self._block = block - self._nhe = nhe + self._block_size = block + self._n_blocks = nhe self._f_idx = f_idx self._ops = [] def add_op(self, row_he, col_he, op): """ Add an block operator to the list. """ self._ops.append( - (self._f_idx(row_he), self._f_idx(col_he), _data.to["csr"](op)) + (self._f_idx(row_he), self._f_idx(col_he), op) ) def gather(self): - """ Create the HEOM liouvillian from a sorted list of smaller (fast) CSR + """ Create the HEOM liouvillian from a sorted list of smaller sparse matrices. .. note:: @@ -1323,48 +1311,13 @@ def gather(self): rhs : :obj:`Data` A combined matrix of shape ``(block * nhe, block * ne)``. """ - block = self._block - nhe = self._nhe - ops = self._ops - shape = (block * nhe, block * nhe) - if not ops: - return _data.zeros(*shape, dtype="csr") - ops.sort() - nnz = sum(_csr.nnz(op) for _, _, op in ops) - indptr = np.zeros(shape[0] + 1, dtype=np.int32) - indices = np.zeros(nnz, dtype=np.int32) - data = np.zeros(nnz, dtype=np.complex128) - end = 0 - op_idx = 0 - op_len = len(ops) - - for row_idx in range(nhe): - prev_op_idx = op_idx - while op_idx < op_len: - if ops[op_idx][0] != row_idx: - break - op_idx += 1 - - row_ops = ops[prev_op_idx: op_idx] - rowpos = row_idx * block - for op_row in range(block): - for _, col_idx, op in row_ops: - op = op.as_scipy() # convert CSR to SciPy csr_matrix - colpos = col_idx * block - op_row_start = op.indptr[op_row] - op_row_end = op.indptr[op_row + 1] - op_row_len = op_row_end - op_row_start - if op_row_len == 0: - continue - indices[end: end + op_row_len] = ( - op.indices[op_row_start: op_row_end] + colpos - ) - data[end: end + op_row_len] = ( - op.data[op_row_start: op_row_end] - ) - end += op_row_len - indptr[rowpos + op_row + 1] = end - - return _csr.CSR( - (data, indices, indptr), shape=shape, copy=False, + self._ops.sort() + ops = np.array(self._ops, dtype=[ + ("row", _data.base.idxint_dtype), + ("col", _data.base.idxint_dtype), + ("op", _data.CSR), + ]) + return _csr._from_csr_blocks( + ops["row"], ops["col"], ops["op"], + self._n_blocks, self._block_size, ) diff --git a/qutip/solver/integrator/explicit_rk.pxd b/qutip/solver/integrator/explicit_rk.pxd index 46bbcf9712..b653850c88 100644 --- a/qutip/solver/integrator/explicit_rk.pxd +++ b/qutip/solver/integrator/explicit_rk.pxd @@ -39,24 +39,24 @@ cdef class Explicit_RungeKutta: cdef double [:, ::1] a cdef double [:, ::1] bi - cpdef integrate(Explicit_RungeKutta self, double t, bint step=*) + cpdef void integrate(Explicit_RungeKutta self, double t, bint step=*) except * - cpdef void set_initial_value(self, Data y0, double t) + cpdef void set_initial_value(self, Data y0, double t) except * - cdef int _step_in_err(self, double t, int max_step) + cdef int _step_in_err(self, double t, int max_step) except -1 - cdef double _compute_step(self, double dt) + cdef double _compute_step(self, double dt) except -1 - cdef double _error(self, Data y_new, double dt) + cdef double _error(self, Data y_new, double dt) except -1 - cdef void _prep_dense_out(self) + cdef void _prep_dense_out(self) except * cdef Data _interpolate_step(self, double t, Data out) cdef inline Data _accumulate(self, Data target, double[:] factors, double dt, int size) - cdef double _estimate_first_step(self, double t, Data y0) + cdef double _estimate_first_step(self, double t, Data y0) except -1 cdef double _get_timestep(self, double t) diff --git a/qutip/solver/integrator/explicit_rk.pyx b/qutip/solver/integrator/explicit_rk.pyx index 17220ea25a..33749a90b7 100644 --- a/qutip/solver/integrator/explicit_rk.pyx +++ b/qutip/solver/integrator/explicit_rk.pyx @@ -11,9 +11,14 @@ from qutip.core.data.tidyup import tidyup_csr from qutip.core.data.norm import frobenius_data from .verner7efficient import vern7_coeff from .verner9efficient import vern9_coeff +from cpython.exc cimport PyErr_CheckSignals cimport cython import numpy as np + +__all__ = ["Explicit_RungeKutta"] + + euler_coeff = { 'order': 1, 'a': np.array([[0.]], dtype=np.float64), @@ -194,7 +199,7 @@ cdef class Explicit_RungeKutta: self.b_factor_np = np.empty(self.rk_extra_step, dtype=np.float64) self.b_factor = self.b_factor_np - cpdef void set_initial_value(self, Data y0, double t): + cpdef void set_initial_value(self, Data y0, double t) except *: """ Set the initial state and time of the integration. """ @@ -218,7 +223,7 @@ cdef class Explicit_RungeKutta: else: self._dt_safe = self.first_step - cdef double _estimate_first_step(self, double t, Data y0): + cdef double _estimate_first_step(self, double t, Data y0) except -1: if not self.adaptative_step: return 0. @@ -261,7 +266,7 @@ cdef class Explicit_RungeKutta: dt = max(self.min_step, dt) return dt - cpdef integrate(Explicit_RungeKutta self, double t, bint step=False): + cpdef void integrate(Explicit_RungeKutta self, double t, bint step=False) except *: """ Do the integration to t. If ``step`` is True, it will make a maximum 1 step and may not reach @@ -296,6 +301,7 @@ cdef class Explicit_RungeKutta: self._t_prev = self._t_front self._norm_prev = self._norm_front nsteps_left -= self._step_in_err(t, nsteps_left) + PyErr_CheckSignals() if step: break @@ -312,7 +318,7 @@ cdef class Explicit_RungeKutta: self._t = self._t_front self._y = copy_to(self._y_front, self._y) - cdef int _step_in_err(self, double t, int max_step): + cdef int _step_in_err(self, double t, int max_step) except -1: """ Do compute one step, repeating until the error is within tolerance. """ @@ -334,7 +340,7 @@ cdef class Explicit_RungeKutta: break return nsteps - cdef double _compute_step(self, double dt): + cdef double _compute_step(self, double dt) except -1: """ Do compute one step with fixed ``dt``, return the error. Use (_t_prev, _y_prev) to create (_t_front, _y_front) @@ -364,7 +370,7 @@ cdef class Explicit_RungeKutta: return self._error(self._y_front, dt) - cdef double _error(self, Data y_new, double dt): + cdef double _error(self, Data y_new, double dt) except -1: """ Compute the normalized error. (error/tol) """ if not self.adaptative_step: return 0. @@ -374,7 +380,7 @@ cdef class Explicit_RungeKutta: return frobenius_data(self._y_temp) / (self.atol + max(self._norm_prev, self._norm_front) * self.rtol) - cdef void _prep_dense_out(self): + cdef void _prep_dense_out(self) except *: """ Compute derivative for the interpolation step. """ diff --git a/qutip/solver/mcsolve.py b/qutip/solver/mcsolve.py index 3a326ce405..9682d73437 100644 --- a/qutip/solver/mcsolve.py +++ b/qutip/solver/mcsolve.py @@ -1,13 +1,10 @@ __all__ = ['mcsolve', "MCSolver"] -import warnings - import numpy as np -from copy import copy -from ..core import QobjEvo, spre, spost, Qobj, unstack_columns, liouvillian +from ..core import QobjEvo, spre, spost, Qobj, unstack_columns from .multitraj import MultiTrajSolver -from .solver_base import Solver -from .result import McResult, Result +from .solver_base import Solver, Integrator +from .result import McResult, McTrajectoryResult, McResultImprovedSampling from .mesolve import mesolve, MESolver import qutip.core.data as _data from time import time @@ -56,7 +53,7 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, options : None / dict Dictionary of options for the solver. - - store_final_state : bool [False] + - store_final_state : bool, [False] Whether or not to store the final state of the evolution in the result class. - store_states : bool, NoneType, [None] @@ -69,7 +66,7 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, if not installed. Empty string or False will disable the bar. - progress_kwargs : dict, [{"chunk_size": 10}] kwargs to pass to the progress_bar. Qutip's bars use `chunk_size`. - - method : str {"adams", "bdf", "dop853", "vern9", etc.} ["adams"] + - method : str {"adams", "bdf", "dop853", "vern9", etc.}, ["adams"] Which differential equation integration method to use. - keep_runs_results : bool, [False] Whether to store results from all trajectories or just store the @@ -98,6 +95,9 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, - max_step : float, [0] Maximum lenght of one internal step. When using pulses, it should be less than half the width of the thinnest pulse. + - improved_sampling : Bool + Whether to use the improved sampling algorithm from Abdelhafez et al. + PRA (2019) seeds : int, SeedSequence, list, [optional] Seed for the random number generator. It can be a single seed used to @@ -106,7 +106,7 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, seeds=prev_result.seeds - target_tol : {float, tuple, list}, optional + target_tol : float, tuple, list, [optional] Target tolerance of the evolution. The evolution will compute trajectories until the error on the expectation values is lower than this tolerance. The maximum number of trajectories employed is @@ -115,13 +115,13 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, relative tolerance, in that order. Lastly, it can be a list of pairs of (atol, rtol) for each e_ops. - timeout : float [optional] + timeout : float, [optional] Maximum time for the evolution in second. When reached, no more - trajectories will be computed. Overwrite the option of the same name. + trajectories will be computed. Returns ------- - results : :class:`qutip.solver.Result` + results : :class:`qutip.solver.McResult` Object storing all results from the simulation. Which results is saved depends on the presence of ``e_ops`` and the options used. ``collapse`` and ``photocurrent`` is available to Monte Carlo simulation results. @@ -151,8 +151,8 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, "ntraj must be an integer. " "A list of numbers is not longer supported." ) - mc = MCSolver(H, c_ops, options=options) + result = mc.run(state, tlist=tlist, ntraj=ntraj, e_ops=e_ops, seed=seeds, target_tol=target_tol, timeout=timeout) return result @@ -174,7 +174,8 @@ def __init__(self, integrator, c_ops, n_ops, options=None): self._is_set = False self.issuper = c_ops[0].issuper - def set_state(self, t, state0, generator): + def set_state(self, t, state0, generator, + no_jump=False, jump_prob_floor=0.0): """ Set the state of the ODE solver. @@ -188,10 +189,25 @@ def set_state(self, t, state0, generator): generator : numpy.random.generator Random number generator. + + no_jump: Bool + whether or not to sample the no-jump trajectory. + If so, the "random number" should be set to zero + + jump_prob_floor: float + if no_jump == False, this is set to the no-jump + probability. This setting ensures that we sample + a trajectory with jumps """ self.collapses = [] self._generator = generator - self.target_norm = self._generator.random() + if no_jump: + self.target_norm = 0.0 + else: + self.target_norm = ( + self._generator.random() * (1 - jump_prob_floor) + + jump_prob_floor + ) self._integrator.set_state(t, state0) self._is_set = True @@ -225,12 +241,12 @@ def reset(self, hard=False): def _prob_func(self, state): if self.issuper: - return _data.norm.trace(unstack_columns(state)) + return _data.trace_oper_ket(state).real return _data.norm.l2(state)**2 def _norm_func(self, state): if self.issuper: - return _data.norm.trace(unstack_columns(state)) + return _data.trace_oper_ket(state).real return _data.norm.l2(state) def _find_collapse_time(self, norm_old, norm, t_prev, t_final): @@ -301,6 +317,9 @@ def _do_collapse(self, collapse_time, state): else: state_new = _data.mul(state_new, 1 / new_norm) self.collapses.append((collapse_time, which)) + # this does not need to be modified for improved sampling: + # as noted in Abdelhafez PRA (2019), + # after a jump we reset to the full range [0, 1) self.target_norm = self._generator.random() self._integrator.set_state(collapse_time, state_new) @@ -339,17 +358,12 @@ class MCSolver(MultiTrajSolver): (see :class:`qutip.QobjEvo`'s documentation). They must be operators even if ``H`` is a superoperator. - options : SolverOptions, [optional] + options : dict, [optional] Options for the evolution. - - seed : int, SeedSequence, list, [optional] - Seed for the random number generator. It can be a single seed used to - spawn seeds for each trajectory or a list of seed, one for each - trajectory. Seeds are saved in the result and can be reused with:: - seeds=prev_result.seeds """ name = "mcsolve" - resultclass = McResult + trajectory_resultclass = McTrajectoryResult + mc_integrator_class = MCIntegrator solver_options = { "progress_bar": "text", "progress_kwargs": {"chunk_size": 10}, @@ -365,6 +379,7 @@ class MCSolver(MultiTrajSolver): "norm_steps": 5, "norm_t_tol": 1e-6, "norm_tol": 1e-4, + "improved_sampling": False, } def __init__(self, H, c_ops, *, options=None): @@ -392,6 +407,7 @@ def __init__(self, H, c_ops, *, options=None): rhs -= 0.5 * n_op self._num_collapse = len(self._c_ops) + self.options = options super().__init__(rhs, options=options) @@ -424,24 +440,74 @@ def _argument(self, args): for n_op in self._n_ops: n_op.arguments(args) - def _run_one_traj(self, seed, state, tlist, e_ops): + def _initialize_run_one_traj(self, seed, state, tlist, e_ops, + no_jump=False, jump_prob_floor=0.0): + result = self.trajectory_resultclass(e_ops, self.options) + generator = self._get_generator(seed) + self._integrator.set_state(tlist[0], state, generator, + no_jump=no_jump, + jump_prob_floor=jump_prob_floor) + result.add(tlist[0], self._restore_state(state, copy=False)) + return result + + def _run_one_traj(self, seed, state, tlist, e_ops, no_jump=False, + jump_prob_floor=0.0): """ Run one trajectory and return the result. """ - # The integrators is reused, but non-reentrant. They are are fine for - # multiprocessing, but will fail with multithreading. - # If a thread base parallel map is created, eahc trajectory should use - # a copy of the integrator. - result = Result(e_ops, {**self.options, "normalize_output": False}) - generator = self._get_generator(seed) - self._integrator.set_state(tlist[0], state, generator) - result.add(tlist[0], self._restore_state(state, copy=False)) - for t in tlist[1:]: - t, state = self._integrator.integrate(t, copy=False) - result.add(t, self._restore_state(state, copy=False)) + result = self._initialize_run_one_traj(seed, state, tlist, e_ops, + no_jump=no_jump, + jump_prob_floor=jump_prob_floor) + seed, result = self._integrate_one_traj(seed, tlist, result) result.collapse = self._integrator.collapses return seed, result + def run(self, state, tlist, ntraj=1, *, + args=None, e_ops=(), timeout=None, target_tol=None, seed=None): + """ + Do the evolution of the Quantum system. + See the overridden method for further details. The modification + here is to sample the no-jump trajectory first. Then, the no-jump + probability is used as a lower-bound for random numbers in future + monte carlo runs + """ + if not self.options["improved_sampling"]: + return super().run(state, tlist, ntraj=ntraj, args=args, + e_ops=e_ops, timeout=timeout, + target_tol=target_tol, seed=seed) + stats, seeds, result, map_func, map_kw, state0 = self._initialize_run( + state, + ntraj, + args=args, + e_ops=e_ops, + timeout=timeout, + target_tol=target_tol, + seed=seed, + ) + # first run the no-jump trajectory + start_time = time() + seed0, no_jump_result = self._run_one_traj(seeds[0], state0, tlist, + e_ops, no_jump=True) + _, state, _ = self._integrator.get_state(copy=False) + no_jump_prob = self._integrator._prob_func(state) + result.no_jump_prob = no_jump_prob + result.add((seed0, no_jump_result)) + result.stats['no jump run time'] = time() - start_time + + # run the remaining trajectories with the random number floor + # set to the no jump probability such that we only sample + # trajectories with jumps + start_time = time() + map_func( + self._run_one_traj, seeds[1:], + (state0, tlist, e_ops, False, no_jump_prob), + reduce_func=result.add, map_kw=map_kw, + progress_bar=self.options["progress_bar"], + progress_bar_kwargs=self.options["progress_kwargs"] + ) + result.stats['run time'] = time() - start_time + return result + def _get_integrator(self): _time_start = time() method = self.options["method"] @@ -452,12 +518,19 @@ def _get_integrator(self): else: raise ValueError("Integrator method not supported.") integrator_instance = integrator(self.rhs, self.options) - mc_integrator = MCIntegrator( + mc_integrator = self.mc_integrator_class( integrator_instance, self._c_ops, self._n_ops, self.options ) self._init_integrator_time = time() - _time_start return mc_integrator + @property + def resultclass(self): + if self.options["improved_sampling"]: + return McResultImprovedSampling + else: + return McResult + @property def options(self): """ @@ -516,6 +589,10 @@ def options(self): norm_steps: int Maximum number of tries to find the collapse. + + improved_sampling: Bool + Whether to use the improved sampling algorithm + of Abdelhafez et al. PRA (2019) """ return self._options diff --git a/qutip/solver/mesolve.py b/qutip/solver/mesolve.py index ed865dcabd..5792038af1 100644 --- a/qutip/solver/mesolve.py +++ b/qutip/solver/mesolve.py @@ -170,7 +170,7 @@ class MESolver(SESolver): of Liouvillian superoperators. None is equivalent to an empty list. options : dict, optional - Options for the solver, see :obj:`SESolver.options` and + Options for the solver, see :obj:`MESolver.options` and `Integrator <./classes.html#classes-ode>`_ for a list of all options. attributes diff --git a/qutip/solver/multitraj.py b/qutip/solver/multitraj.py index 204178d116..e422413afc 100644 --- a/qutip/solver/multitraj.py +++ b/qutip/solver/multitraj.py @@ -1,12 +1,10 @@ -from .. import Qobj, QobjEvo from .result import Result, MultiTrajResult from .parallel import _get_map from time import time from .solver_base import Solver import numpy as np -from copy import copy -__all__ = ["MultiTrajSolver", "TrajectorySolver"] +__all__ = ["MultiTrajSolver"] class MultiTrajSolver(Solver): @@ -30,6 +28,7 @@ class MultiTrajSolver(Solver): """ name = "generic multi trajectory" resultclass = MultiTrajResult + trajectory_resultclass = Result _avail_integrators = {} # Class of option used by the solver @@ -103,6 +102,28 @@ def step(self, t, *, args=None, copy=True): _, state = self._integrator.integrate(t, copy=False) return self._restore_state(state, copy=copy) + def _initialize_run(self, state, ntraj=1, args=None, e_ops=(), + timeout=None, target_tol=None, seed=None): + start_time = time() + self._argument(args) + stats = self._initialize_stats() + seeds = self._read_seed(seed, ntraj) + + result = self.resultclass( + e_ops, self.options, solver=self.name, stats=stats + ) + result.add_end_condition(ntraj, target_tol) + + map_func = _get_map[self.options['map']] + map_kw = { + 'timeout': timeout, + 'job_timeout': self.options['job_timeout'], + 'num_cpus': self.options['num_cpus'], + } + state0 = self._prepare_state(state) + stats['preparation time'] += time() - start_time + return stats, seeds, result, map_func, map_kw, state0 + def run(self, state, tlist, ntraj=1, *, args=None, e_ops=(), timeout=None, target_tol=None, seed=None): """ @@ -110,8 +131,8 @@ def run(self, state, tlist, ntraj=1, *, For a ``state`` at time ``tlist[0]`` do the evolution as directed by ``rhs`` and for each time in ``tlist`` store the state and/or - expectation values in a :cls:`Result`. The evolution method and stored - results are determined by ``options``. + expectation values in a :class:`Result`. The evolution method and + stored results are determined by ``options``. Parameters ---------- @@ -154,8 +175,8 @@ def run(self, state, tlist, ntraj=1, *, seed : {int, SeedSequence, list} optional Seed or list of seeds for each trajectories. - Return - ------ + Returns + ------- results : :class:`qutip.solver.MultiTrajResult` Results of the evolution. States and/or expect will be saved. You can control the saved data in the options. @@ -164,25 +185,15 @@ def run(self, state, tlist, ntraj=1, *, The simulation will end when the first end condition is reached between ``ntraj``, ``timeout`` and ``target_tol``. """ - start_time = time() - self._argument(args) - stats = self._initialize_stats() - seeds = self._read_seed(seed, ntraj) - - result = self.resultclass( - e_ops, self.options, solver=self.name, stats=stats + stats, seeds, result, map_func, map_kw, state0 = self._initialize_run( + state, + ntraj, + args=args, + e_ops=e_ops, + timeout=timeout, + target_tol=target_tol, + seed=seed, ) - result.add_end_condition(ntraj, target_tol) - - map_func = _get_map[self.options['map']] - map_kw = { - 'timeout': timeout, - 'job_timeout': self.options['job_timeout'], - 'num_cpus': self.options['num_cpus'], - } - state0 = self._prepare_state(state) - stats['preparation time'] += time() - start_time - start_time = time() map_func( self._run_one_traj, seeds, @@ -194,16 +205,23 @@ def run(self, state, tlist, ntraj=1, *, result.stats['run time'] = time() - start_time return result + def _initialize_run_one_traj(self, seed, state, tlist, e_ops): + result = self.trajectory_resultclass(e_ops, self.options) + generator = self._get_generator(seed) + self._integrator.set_state(tlist[0], state, generator) + result.add(tlist[0], self._restore_state(state, copy=False)) + return result + def _run_one_traj(self, seed, state, tlist, e_ops): """ Run one trajectory and return the result. """ - result = Result(e_ops, self.options) - generator = self._get_generator(seed) - self._integrator.set_state(tlist[0], state, generator) - result.add(tlist[0], self._restore_state(state, copy=False)) + result = self._initialize_run_one_traj(seed, state, tlist, e_ops) + return self._integrate_one_traj(seed, tlist, result) + + def _integrate_one_traj(self, seed, tlist, result): for t in tlist[1:]: - t, state = self._integrator.step(t, copy=False) + t, state = self._integrator.integrate(t, copy=False) result.add(t, self._restore_state(state, copy=False)) return seed, result diff --git a/qutip/solver/nm_mcsolve.py b/qutip/solver/nm_mcsolve.py new file mode 100644 index 0000000000..f97298b390 --- /dev/null +++ b/qutip/solver/nm_mcsolve.py @@ -0,0 +1,521 @@ +__all__ = ['nm_mcsolve', 'NonMarkovianMCSolver'] + +import functools +import numbers + +import numpy as np +import scipy + +from .multitraj import MultiTrajSolver +from .mcsolve import MCSolver, MCIntegrator +from .mesolve import MESolver, mesolve +from .result import NmmcResult, NmmcTrajectoryResult +from .cy.nm_mcsolve import RateShiftCoefficient, SqrtRealCoefficient +from ..core.coefficient import ConstantCoefficient +from ..core import ( + CoreOptions, Qobj, QobjEvo, isket, ket2dm, qeye, coefficient, +) + + +# The algorithm implemented here is based on the influence martingale approach +# described in +# Nat Commun 13, 4140 (2022) +# https://doi.org/10.1038/s41467-022-31533-8 +# https://arxiv.org/abs/2102.10355 +# and +# https://arxiv.org/abs/2209.08958 + + +def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, + args=None, options=None, seeds=None, target_tol=None, + timeout=None): + """ + Monte-Carlo evolution corresponding to a Lindblad equation with "rates" + that may be negative. Usage of this function is analogous to ``mcsolve``, + but the ``c_ops`` parameter is replaced by an ``ops_and_rates`` parameter + to allow for negative rates. Options for the underlying ODE solver are + given by the Options class. + + Parameters + ---------- + H : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`, ``list``, callable. + System Hamiltonian as a Qobj, QobjEvo. It can also be any input type + that QobjEvo accepts (see :class:`qutip.QobjEvo`'s documentation). + ``H`` can also be a superoperator (liouvillian) if some collapse + operators are to be treated deterministically. + + state : :class:`qutip.Qobj` + Initial state vector. + + tlist : array_like + Times at which results are recorded. + + ops_and_rates : list + A ``list`` of tuples ``(L, Gamma)``, where the Lindblad operator ``L`` + is a :class:`qutip.Qobj` and ``Gamma`` represents the corresponding + rate, which is allowed to be negative. The Lindblad operators must be + operators even if ``H`` is a superoperator. If none are given, the + solver will defer to ``sesolve`` or ``mesolve``. Each rate ``Gamma`` + may be just a number (in the case of a constant rate) or, otherwise, + specified using any format accepted by :func:`qutip.coefficient`. + + e_ops : list, [optional] + A ``list`` of operator as Qobj, QobjEvo or callable with signature of + (t, state: Qobj) for calculating expectation values. When no ``e_ops`` + are given, the solver will default to save the states. + + ntraj : int + Maximum number of trajectories to run. Can be cut short if a time limit + is passed with the ``timeout`` keyword or if the target tolerance is + reached, see ``target_tol``. + + args : None / dict + Arguments for time-dependent Hamiltonian and collapse operator terms. + + options : None / dict + Dictionary of options for the solver. + + - store_final_state : bool, [False] + Whether or not to store the final state of the evolution in the + result class. + - store_states : bool, NoneType, [None] + Whether or not to store the state density matrices. + On ``None`` the states will be saved if no expectation operators are + given. + - progress_bar : str {'text', 'enhanced', 'tqdm', ''}, ['text'] + How to present the solver progress. + 'tqdm' uses the python module of the same name and raise an error + if not installed. Empty string or False will disable the bar. + - progress_kwargs : dict, [{"chunk_size": 10}] + kwargs to pass to the progress_bar. Qutip's bars use ``chunk_size``. + - method : str {"adams", "bdf", "dop853", "vern9", etc.}, ["adams"] + Which differential equation integration method to use. + - keep_runs_results : bool, [False] + Whether to store results from all trajectories or just store the + averages. + - map : str {"serial", "parallel", "loky"}, ["serial"] + How to run the trajectories. "parallel" uses concurrent module to run + in parallel while "loky" use the module of the same name to do so. + - job_timeout : NoneType, int, [None] + Maximum time to compute one trajectory. + - num_cpus : NoneType, int, [None] + Number of cpus to use when running in parallel. ``None`` detect the + number of available cpus. + - norm_t_tol, norm_tol, norm_steps : float, float, int, [1e-6, 1e-4, 5] + Parameters used to find the collapse location. ``norm_t_tol`` and + ``norm_tol`` are the tolerance in time and norm respectively. + An error will be raised if the collapse could not be found within + ``norm_steps`` tries. + - mc_corr_eps : float, [1e-10] + Small number used to detect non-physical collapse caused by numerical + imprecision. + - atol, rtol : float, [1e-8, 1e-6] + Absolute and relative tolerance of the ODE integrator. + - nsteps : int [2500] + Maximum number of (internally defined) steps allowed in one ``tlist`` + step. + - max_step : float, [0] + Maximum length of one internal step. When using pulses, it should be + less than half the width of the thinnest pulse. + - completeness_rtol, completeness_atol : float, float, [1e-5, 1e-8] + Parameters used in determining whether the given Lindblad operators + satisfy a certain completeness relation. If they do not, an + additional Lindblad operator is added automatically (with zero rate). + - martingale_quad_limit : float or int, [100] + An upper bound on the number of subintervals used in the adaptive + integration of the martingale. + + seeds : int, SeedSequence, list, [optional] + Seed for the random number generator. It can be a single seed used to + spawn seeds for each trajectory or a list of seeds, one for each + trajectory. Seeds are saved in the result and they can be reused with:: + + seeds=prev_result.seeds + + target_tol : float, tuple, list, [optional] + Target tolerance of the evolution. The evolution will compute + trajectories until the error on the expectation values is lower than + this tolerance. The maximum number of trajectories employed is + given by ``ntraj``. The error is computed using jackknife resampling. + ``target_tol`` can be an absolute tolerance or a pair of absolute and + relative tolerance, in that order. Lastly, it can be a list of pairs of + (atol, rtol) for each e_ops. + + timeout : float, [optional] + Maximum time for the evolution in seconds. When reached, no more + trajectories will be computed. + + Returns + ------- + results : :class:`qutip.solver.NmmcResult` + Object storing all results from the simulation. Compared to a result + returned by ``mcsolve``, this result contains the additional field + ``trace`` (and ``runs_trace`` if ``store_final_state`` is set). Note + that the states on the individual trajectories are not normalized. This + field contains the average of their trace, which will converge to one + in the limit of sufficiently many trajectories. + """ + H = QobjEvo(H, args=args, tlist=tlist) + + if len(ops_and_rates) == 0: + if options is None: + options = {} + options = { + key: options[key] + for key in options + if key in MESolver.solver_options + } + return mesolve( + H, state, tlist, e_ops=e_ops, args=args, options=options, + ) + + ops_and_rates = [ + _parse_op_and_rate(op, rate, tlist=tlist, args=args or {}) + for op, rate in ops_and_rates + ] + + nmmc = NonMarkovianMCSolver(H, ops_and_rates, options=options) + result = nmmc.run(state, tlist=tlist, ntraj=ntraj, e_ops=e_ops, + seed=seeds, target_tol=target_tol, timeout=timeout) + return result + + +def _parse_op_and_rate(op, rate, **kw): + """ Sanity check the op and convert rates to coefficients. """ + if not isinstance(op, Qobj): + raise ValueError("NonMarkovianMCSolver ops must be of type Qobj") + if isinstance(rate, numbers.Number): + rate = ConstantCoefficient(rate) + else: + rate = coefficient(rate, **kw) + return op, rate + + +class InfluenceMartingale: + def __init__(self, nm_solver, a_parameter, quad_limit): + self._nm_solver = nm_solver + self._quad_limit = quad_limit + self._a_parameter = a_parameter + self.reset() + + def reset(self): + self._t_prev = None + self._continuous_martingale_at_t_prev = None + self._precomputed_continuous_martingale = {} + self._discrete_martingale = None + + def initialize(self, t0, cache='clear'): + # `cache` may be 'clear', 'keep' or a new list of times for which + # to pre-compute the continuous contribution to the martingale + self._t_prev = t0 + self._continuous_martingale_at_t_prev = 1 + self._discrete_martingale = 1 + + if np.array_equal(cache, 'clear'): + self._precomputed_continuous_martingale = {} + return + if np.array_equal(cache, 'keep'): + return + + self._precomputed_continuous_martingale = {} + mu_c0 = 1 + for t1 in cache: + mu_c1 = mu_c0 * self._compute_continuous_martingale(t0, t1) + self._precomputed_continuous_martingale[t1] = mu_c1 + t0, mu_c0 = t1, mu_c1 + + def add_collapse(self, collapse_time, collapse_channel): + if self._t_prev is None: + raise RuntimeError("The `start` method must called first.") + + rate = self._nm_solver.rate(collapse_time, collapse_channel) + shift = self._nm_solver.rate_shift(collapse_time) + factor = rate / (rate + shift) + self._discrete_martingale *= factor + + def value(self, t): + if self._t_prev is None: + raise RuntimeError("The `start` method must called first.") + + # find value of continuous martingale at given time + if t in self._precomputed_continuous_martingale: + mu_c = self._precomputed_continuous_martingale[t] + else: + mu_c = ( + self._continuous_martingale_at_t_prev * + self._compute_continuous_martingale(self._t_prev, t) + ) + self._t_prev = t + self._continuous_martingale_at_t_prev = mu_c + + return self._discrete_martingale * mu_c + + def _compute_continuous_martingale(self, t1, t2): + if t1 == t2: + return 1 + + integral, _, *info = scipy.integrate.quad( + self._nm_solver.rate_shift, t1, t2, + limit=self._quad_limit, + full_output=True, + ) + if len(info) > 1: + raise ValueError( + f"Failed to integrate the continuous martingale: {info[1]}" + ) + return np.exp(self._a_parameter * integral) + + +class NmMCIntegrator(MCIntegrator): + def __init__(self, *args, **kwargs): + self._martingale = kwargs.pop("__martingale") + super().__init__(*args, **kwargs) + + def _do_collapse(self, *args, **kwargs): + # _do_collapse might not append a new collapse, so we need to check + # whether one was added before calculating the martingales. + num_collapse_old = len(self.collapses) + super()._do_collapse(*args, **kwargs) + if len(self.collapses) > num_collapse_old: + collapse_time, collapse_channel = self.collapses[-1] + self._martingale.add_collapse(collapse_time, collapse_channel) + + def set_state(self, t, *args, **kwargs): + super().set_state(t, *args, **kwargs) + self._martingale.initialize(t, cache='keep') + + +class NonMarkovianMCSolver(MCSolver): + """ + Monte Carlo Solver for Lindblad equations with "rates" that may be + negative. The ``c_ops`` parameter of :class:`qutip.MCSolver` is replaced by + an ``ops_and_rates`` parameter to allow for negative rates. Options for the + underlying ODE solver are given by the Options class. + + Parameters + ---------- + H : :class:`qutip.Qobj`, :class:`qutip.QobjEvo`, ``list``, callable. + System Hamiltonian as a Qobj, QobjEvo. It can also be any input type + that QobjEvo accepts (see :class:`qutip.QobjEvo` documentation). + ``H`` can also be a superoperator (liouvillian) if some collapse + operators are to be treated deterministically. + + ops_and_rates : list + A ``list`` of tuples ``(L, Gamma)``, where the Lindblad operator ``L`` + is a :class:`qutip.Qobj` and ``Gamma`` represents the corresponding + rate, which is allowed to be negative. The Lindblad operators must be + operators even if ``H`` is a superoperator. Each rate ``Gamma`` may be + just a number (in the case of a constant rate) or, otherwise, specified + using any format accepted by :func:`qutip.coefficient`. + + args : None / dict + Arguments for time-dependent Hamiltonian and collapse operator terms. + + options : SolverOptions, [optional] + Options for the evolution. + + seed : int, SeedSequence, list, [optional] + Seed for the random number generator. It can be a single seed used to + spawn seeds for each trajectory or a list of seed, one for each + trajectory. Seeds are saved in the result and can be reused with:: + + seeds=prev_result.seeds + """ + name = "nm_mcsolve" + resultclass = NmmcResult + solver_options = { + **MCSolver.solver_options, + "completeness_rtol": 1e-5, + "completeness_atol": 1e-8, + "martingale_quad_limit": 100, + } + + # both classes will be partially initialized in constructor + trajectory_resultclass = NmmcTrajectoryResult + mc_integrator_class = NmMCIntegrator + + def __init__( + self, H, ops_and_rates, *_args, args=None, options=None, **kwargs, + ): + self.options = options + + ops_and_rates = [ + _parse_op_and_rate(op, rate, args=args or {}) + for op, rate in ops_and_rates + ] + a_parameter, L = self._check_completeness(ops_and_rates) + if L is not None: + ops_and_rates.append((L, ConstantCoefficient(0))) + + self.ops = [op for op, _ in ops_and_rates] + self._martingale = InfluenceMartingale( + self, a_parameter, self.options["martingale_quad_limit"] + ) + + # Many coefficients. These should not be publicly exposed + # and will all need to be updated in _arguments(): + self._rates = [rate for _, rate in ops_and_rates] + self._rate_shift = RateShiftCoefficient(self._rates) + self._sqrt_shifted_rates = [ + SqrtRealCoefficient(rate + self._rate_shift) + for rate in self._rates + ] + + c_ops = [ + QobjEvo([op, sqrt_shifted_rate]) + for op, sqrt_shifted_rate + in zip(self.ops, self._sqrt_shifted_rates) + ] + self.trajectory_resultclass = functools.partial( + NmmcTrajectoryResult, __nm_solver=self, + ) + self.mc_integrator_class = functools.partial( + NmMCIntegrator, __martingale=self._martingale, + ) + super().__init__(H, c_ops, *_args, options=options, **kwargs) + + def _check_completeness(self, ops_and_rates): + """ + Checks whether ``sum(Li.dag() * Li)`` is proportional to the identity + operator. If not, creates an extra Lindblad operator so that it is. + + Returns the proportionality factor a, and the extra Lindblad operator + (or None if no extra Lindblad operator is necessary). + """ + op = sum((L.dag() * L) for L, _ in ops_and_rates) + + a_candidate = op.tr() / op.shape[0] + with CoreOptions(rtol=self.options["completeness_rtol"], + atol=self.options["completeness_atol"]): + if op == a_candidate * qeye(op.dims[0]): + return np.real(a_candidate), None + + a = max(op.eigenenergies()) + L = (a * qeye(op.dims[0]) - op).sqrtm() # new Lindblad operator + return a, L + + def current_martingale(self): + """ + Returns the value of the influence martingale along the current + trajectory. The value of the martingale is the product of the + continuous and the discrete contribution. The current time and the + collapses that have happened are read out from the internal integrator. + """ + t, *_ = self._integrator.get_state(copy=False) + return self._martingale.value(t) + + def _argument(self, args): + self._rates = [rate.replace_arguments(args) for rate in self._rates] + self._rate_shift = self._rate_shift.replace_arguments(args) + self._sqrt_shifted_rates = [ + rate.replace_arguments(args) for rate in self._sqrt_shifted_rates + ] + super()._argument(args) + + def rate_shift(self, t): + """ + Return the rate shift at time ``t``. + + The rate shift is ``2 * abs(min([0, rate_1(t), rate_2(t), ...]))``. + + Parameters + ---------- + t : float + The time at which to calculate the rate shift. + + Returns + ------- + rate_shift : float + The rate shift amount. + """ + return self._rate_shift.as_double(t) + + def rate(self, t, i): + """ + Return the i'th unshifted rate at time ``t``. + + Parameters + ---------- + t : float + The time at which to calculate the rate. + i : int + Which rate to calculate. + + Returns + ------- + rate : float + The value of rate ``i`` at time ``t``. + """ + return np.real(self._rates[i](t)) + + def sqrt_shifted_rate(self, t, i): + """ + Return the square root of the i'th shifted rate at time ``t``. + + Parameters + ---------- + t : float + The time at wich to calculate the shifted rate. + i : int + Which shifted rate to calculate. + + Returns + ------- + rate : float + The square root of the shifted value of rate ``i`` at time ``t``. + """ + return np.real(self._sqrt_shifted_rates[i](t)) + + # MCSolver (and NonMarkovianMCSolver) offer two interfaces, i.e., two ways + # of interacting with them: either call `start` first and then manually + # integrate a single trajectory with subsequent calls to `step`, or call + # `run` to integrate a large number of trajectories, saving the results in + # an `NmmcResult`. + # We are responsible for (a) keeping our `_martingale` object in the + # correct state throughout and (b) multiplying all state density matrices + # with the martingale before passing them on to the user. + # + # Regarding (a), we firstly assume that start, step and run are only + # accessed by a single thread. start and step thus cannot be called while + # run is being executed. Secondly, we reset the martingale object at the + # beginning and end of run, requiring the user to call start again after + # calling run before calling step. Internal state of the martingale + # object accumulated by using one interface can thus not influence + # computations with the other interface. + # Note that the start/step-interface allows updating the `args` dictionary + # at each step. This action does not mess up the martingale state since we + # do not precompute any martingale values in this interface. In the + # run-interface we do precompute the values of the continuous part of the + # martingale, but the `args` dictionary cannot be changed in the middle of + # the run. + # + # Regarding (b), in the start/step-interface we just include the martingale + # in the step method. In order to include the martingale in the + # run-interface, we use a custom trajectory-resultclass that grabs the + # martingale value from the NonMarkovianMCSolver whenever a state is added. + + def start(self, state, t0, seed=None): + self._martingale.initialize(t0, cache='clear') + return super().start(state, t0, seed=seed) + + # The returned state will be a density matrix with trace=mu the martingale + def step(self, t, *, args=None, copy=True): + state = super().step(t, args=args, copy=copy) + if isket(state): + state = ket2dm(state) + return state * self.current_martingale() + + def run(self, state, tlist, *args, **kwargs): + # update `args` dictionary before precomputing martingale + if 'args' in kwargs: + self._argument(kwargs.pop('args')) + + self._martingale.initialize(tlist[0], cache=tlist) + result = super().run(state, tlist, *args, **kwargs) + self._martingale.reset() + + return result + + start.__doc__ = MultiTrajSolver.start.__doc__ + step.__doc__ = MultiTrajSolver.step.__doc__ + run.__doc__ = MultiTrajSolver.run.__doc__ diff --git a/qutip/solver/nonmarkov/__init__.py b/qutip/solver/nonmarkov/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/qutip/solver/nonmarkov/transfertensor.py b/qutip/solver/nonmarkov/transfertensor.py new file mode 100644 index 0000000000..fd4f1721c7 --- /dev/null +++ b/qutip/solver/nonmarkov/transfertensor.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +# @author: Arne L. Grimsmo +# @email1: arne.grimsmo@gmail.com +# @organization: University of Sherbrooke + +""" +This module contains an implementation of the non-Markovian transfer tensor +method (TTM), introduced in [1]. + +[1] Javier Cerrillo and Jianshu Cao, Phys. Rev. Lett 112, 110401 (2014) +""" + +import numpy as np +import time +from qutip import spre, vector_to_operator, operator_to_vector, Result + + +def ttmsolve(dynmaps, state0, times, e_ops=[], num_learning=0, options=None): + """ + Expand time-evolution using the Transfer Tensor Method [1]_, based on a set + of precomputed dynamical maps. + + Parameters + ---------- + dynmaps : list of :class:`qutip.Qobj`, callable + List of precomputed dynamical maps (superoperators) for the first times + of ``times`` or a callback function that returns the superoperator at a + given time. + + state0 : :class:`qutip.Qobj` + Initial density matrix or state vector (ket). + + times : array_like + List of times :math:`t_n` at which to compute results. + Must be uniformily spaced. + + e_ops : :class:`qutip.qobj`, callable, or list. + Single operator or list of operators for which to evaluate + expectation values or callable or list of callable. + Callable signature must be, `f(t: float, state: Qobj)`. + See :func:`expect` for more detail of operator expectation. + + num_learning : int + Number of times used to construct the dynmaps operators when + ``dynmaps`` is a callable. + + options : dictionary + Dictionary of options for the solver. + + - store_final_state : bool + Whether or not to store the final state of the evolution in the + result class. + - store_states : bool, None + Whether or not to store the state vectors or density matrices. + On `None` the states will be saved if no expectation operators are + given. + - normalize_output : bool + Normalize output state to hide ODE numerical errors. + - threshold : float + Threshold for halting. Halts if :math:`||T_{n}-T_{n-1}||` is below + treshold. + + Returns + ------- + output: :class:`qutip.solver.Result` + An instance of the class :class:`qutip.solver.Result`. + + .. [1] Javier Cerrillo and Jianshu Cao, Phys. Rev. Lett 112, 110401 (2014) + """ + + opt = { + "store_final_state": False, + "store_states": None, + "normalize_output": "ket", + "threshold": 0.0, + "num_learning": 0, + } + if options: + opt.update(options) + + if not np.allclose(np.diff(times), times[1] - times[0]): + raise ValueError("The time should be uniformily distributed.") + + if callable(dynmaps): + if num_learning <= 0: + raise ValueError( + "When dynmaps is a callable, options['num_learning'] must be " + "the number of dynamical maps to compute." + ) + dynmaps = [dynmaps(t) for t in times[:num_learning]] + + if ( + not dynmaps + or not dynmaps[0].issuper + or not all(dmap.dims == dynmaps[0].dims for dmap in dynmaps) + ): + raise ValueError("`dynmaps` entries must be super operators.") + + start = time.time() + tensors, diff = _generatetensors(dynmaps, opt["threshold"]) + end = time.time() + + stats = { + "preparation time": end - start, + "run time": 0.0, + "ttmconvergence": diff, + "num_tensor": len(tensors), + } + if state0.isket: + state0 = state0.proj() + + if state0.isoper: + # vectorize density matrix + rho0vec = operator_to_vector(state0) + restore = vector_to_operator + else: + # state0 might be a super in which case we should not vectorize + rho0vec = state0 + restore = lambda state: state + + K = len(tensors) + start = time.time() + results = Result(e_ops, opt, solver="ttmsolve", stats=stats) + states = [rho0vec] + results.add(times[0], state0) + + for n in range(1, len(times)): + # Set current state + state = 0 + for j in range(1, min(K, n + 1)): + tmp = tensors[j] @ states[n - j] + state = tmp + state + # Append state to all states + states.append(state) + results.add(times[n], restore(state)) + end = time.time() + stats["run time"] = end - start + + return results + + +def _generatetensors(dynmaps, threshold): + r""" + Generate the tensors :math:`T_1,\dots,T_K` from the dynamical maps + :math:`E(t_k)`. + + A stationary process is assumed, i.e., :math:`T_{n,k} = T_{n-k}`. + + Parameters + ---------- + dynmaps : list of :class:`qutip.Qobj` + List of precomputed dynamical maps (superoperators) at the times + specified in `learningtimes`. + + threshold : float + Threshold for halting. Halts if :math:`||T_{n}-T_{n-1}||` is below + treshold. + + Returns + ------- + Tensors, diffs: list of :class:`qutip.Qobj.` + A list of transfer tensors :math:`T_1,\dots,T_K` + """ + Tensors = [] + diff = [0.0] + for n in range(len(dynmaps)): + T = dynmaps[n] + for m in range(1, n): + T -= Tensors[n - m] @ dynmaps[m] + Tensors.append(T) + if n > 1: + diff.append((Tensors[-1] - Tensors[-2]).norm()) + if diff[-1] < threshold: + # Below threshold for truncation + break + return Tensors, diff diff --git a/qutip/solver/parallel.py b/qutip/solver/parallel.py index 4ceeb0fc44..83bf727a0a 100644 --- a/qutip/solver/parallel.py +++ b/qutip/solver/parallel.py @@ -1,6 +1,7 @@ """ This module provides functions for parallel execution of loops and function -mappings, using the builtin Python module multiprocessing or the loky parallel execution library. +mappings, using the builtin Python module multiprocessing or the loky parallel +execution library. """ __all__ = ['parallel_map', 'serial_map', 'loky_pmap'] @@ -10,7 +11,7 @@ import time import threading import concurrent.futures -from qutip.ui.progressbar import progess_bars +from qutip.ui.progressbar import progress_bars from qutip.settings import available_cpu_count if sys.platform == 'darwin': @@ -70,7 +71,9 @@ def serial_map(task, values, task_args=None, task_kwargs=None, The optional additional keyword argument to the ``task`` function. reduce_func : func (optional) If provided, it will be called with the output of each tasks instead of - storing a them in a list. + storing a them in a list. It should return None or a number. + When returning a number, it represent the estimation of the number of + task left. On a return <= 0, the map will end early. progress_bar : string Progress bar options's string for showing progress. progress_bar_kwargs : dict @@ -94,8 +97,10 @@ def serial_map(task, values, task_args=None, task_kwargs=None, if task_kwargs is None: task_kwargs = {} map_kw = _read_map_kw(map_kw) - progress_bar = progess_bars[progress_bar]() - progress_bar.start(len(values), **progress_bar_kwargs) + remaining_ntraj = None + progress_bar = progress_bars[progress_bar]( + len(values), **progress_bar_kwargs + ) end_time = map_kw['timeout'] + time.time() results = None if reduce_func is None: @@ -104,7 +109,7 @@ def serial_map(task, values, task_args=None, task_kwargs=None, for n, value in enumerate(values): if time.time() > end_time: break - progress_bar.update(n) + progress_bar.update() try: result = task(value, *task_args, **task_kwargs) except Exception as err: @@ -114,9 +119,11 @@ def serial_map(task, values, task_args=None, task_kwargs=None, errors[n] = err else: if reduce_func is not None: - reduce_func(result) + remaining_ntraj = reduce_func(result) else: results[n] = result + if remaining_ntraj is not None and remaining_ntraj <= 0: + end_time = 0 progress_bar.finished() if errors: @@ -148,7 +155,9 @@ def parallel_map(task, values, task_args=None, task_kwargs=None, reduce_func : func (optional) If provided, it will be called with the output of each tasks instead of storing a them in a list. Note that the order in which results are - passed to ``reduce_func`` is not defined. + passed to ``reduce_func`` is not defined. It should return None or a + number. When returning a number, it represent the estimation of the + number of task left. On a return <= 0, the map will end early. progress_bar : string Progress bar options's string for showing progress. progress_bar_kwargs : dict @@ -177,10 +186,12 @@ def parallel_map(task, values, task_args=None, task_kwargs=None, end_time = map_kw['timeout'] + time.time() job_time = map_kw['job_timeout'] - progress_bar = progess_bars[progress_bar]() - progress_bar.start(len(values), **progress_bar_kwargs) + progress_bar = progress_bars[progress_bar]( + len(values), **progress_bar_kwargs + ) errors = {} + finished = [] if reduce_func is not None: results = None result_func = lambda i, value: reduce_func(value) @@ -194,7 +205,9 @@ def _done_callback(future): result = future.result() except Exception as e: errors[future._i] = e - result_func(future._i, result) + remaining_ntraj = result_func(future._i, result) + if remaining_ntraj is not None and remaining_ntraj <= 0: + finished.append(True) progress_bar.update() if sys.version_info >= (3, 7): @@ -224,7 +237,11 @@ def _done_callback(future): timeout=timeout, return_when=concurrent.futures.FIRST_COMPLETED, ) - if time.time() >= end_time or (errors and map_kw['fail_fast']): + if ( + time.time() >= end_time + or (errors and map_kw['fail_fast']) + or finished + ): # no time left, exit the loop break while len(waiting) < map_kw['num_cpus'] and i < len(values): @@ -282,7 +299,9 @@ def loky_pmap(task, values, task_args=None, task_kwargs=None, The optional additional keyword argument to the ``task`` function. reduce_func : func (optional) If provided, it will be called with the output of each tasks instead of - storing a them in a list. + storing a them in a list. It should return None or a number. When + returning a number, it represent the estimation of the number of task + left. On a return <= 0, the map will end early. progress_bar : string Progress bar options's string for showing progress. progress_bar_kwargs : dict @@ -311,13 +330,15 @@ def loky_pmap(task, values, task_args=None, task_kwargs=None, os.environ['QUTIP_IN_PARALLEL'] = 'TRUE' from loky import get_reusable_executor, TimeoutError - progress_bar = progess_bars[progress_bar]() - progress_bar.start(len(values), **progress_bar_kwargs) + progress_bar = progress_bars[progress_bar]( + len(values), **progress_bar_kwargs + ) executor = get_reusable_executor(max_workers=map_kw['num_cpus']) end_time = map_kw['timeout'] + time.time() job_time = map_kw['job_timeout'] results = None + remaining_ntraj = None errors = {} if reduce_func is None: results = [None] * len(values) @@ -337,10 +358,12 @@ def loky_pmap(task, values, task_args=None, task_kwargs=None, errors[n] = err else: if reduce_func is not None: - reduce_func(result) + remaining_ntraj = reduce_func(result) else: results[n] = result progress_bar.update() + if remaining_ntraj is not None and remaining_ntraj <= 0: + break except KeyboardInterrupt as e: [job.cancel() for job in jobs] diff --git a/qutip/solver/propagator.py b/qutip/solver/propagator.py index 179c0dcef0..e85ad5c5a0 100644 --- a/qutip/solver/propagator.py +++ b/qutip/solver/propagator.py @@ -3,7 +3,7 @@ import numbers import numpy as np -from .. import Qobj, qeye, unstack_columns, QobjEvo +from .. import Qobj, qeye, qeye_like, unstack_columns, QobjEvo, liouvillian from ..core import data as _data from .mesolve import mesolve, MESolver from .sesolve import sesolve, SESolver @@ -61,12 +61,15 @@ def propagator(H, t, c_ops=(), args=None, options=None, **kwargs): if not isinstance(H, (Qobj, QobjEvo)): H = QobjEvo(H, args=args, **kwargs) - if H.issuper or c_ops: - out = mesolve(H, qeye(H.dims), tlist, c_ops=c_ops, - args=args, options=options).states + if c_ops: + H = liouvillian(H, c_ops) + + U0 = qeye_like(H) + + if H.issuper: + out = mesolve(H, U0, tlist, args=args, options=options).states else: - out = sesolve(H, qeye(H.dims[0]), tlist, - args=args, options=options).states + out = sesolve(H, U0, tlist, args=args, options=options).states if list_output: return out @@ -212,7 +215,7 @@ def __call__(self, t, t_start=0, **args): self.args = args self.solver._argument(args) self.times = [0] - self.props = [qeye(self.props[0].dims[0])] + self.props = [qeye_like(self.props[0])] self.solver.start(self.props[0], self.times[0]) if t_start: @@ -256,7 +259,7 @@ def _compute(self, t, idx): U = self.solver.step(t) else: # Evolving backward in time is not supported by all integrator. - self.solver.start(qeye(self.props[0].dims[0]), t) + self.solver.start(qeye_like(self.props[0]), t) Uinv = self.solver.step(self.times[idx]) U = self._inv(Uinv) return U diff --git a/qutip/solver/result.py b/qutip/solver/result.py index a87ad9206c..7544d09761 100644 --- a/qutip/solver/result.py +++ b/qutip/solver/result.py @@ -1,8 +1,9 @@ """ Class for solve function results""" import numpy as np -from ..core import Qobj, QobjEvo, expect, qzero +from ..core import Qobj, QobjEvo, expect, isket, ket2dm, qzero, qzero_like -__all__ = ["Result", "MultiTrajResult", "McResult"] +__all__ = ["Result", "MultiTrajResult", "McResult", "NmmcResult", + "McTrajectoryResult", "McResultImprovedSampling"] class _QobjExpectEop: @@ -485,12 +486,11 @@ def _add_first_traj(self, trajectory): self.times = trajectory.times if trajectory.states: - state = trajectory.states[0] - self._sum_states = [qzero(state.dims[0]) + self._sum_states = [qzero_like(self._to_dm(state)) for state in trajectory.states] if trajectory.final_state: state = trajectory.final_state - self._sum_final_states = qzero(state.dims[0]) + self._sum_final_states = qzero_like(self._to_dm(state)) self._sum_expect = [ np.zeros_like(expect) for expect in trajectory.expect @@ -568,6 +568,11 @@ def _fixed_end(self): self.stats['end_condition'] = 'ntraj reached' return ntraj_left + def _average_computer(self): + avg = np.array(self._sum_expect) / self.num_trajectories + avg2 = np.array(self._sum2_expect) / self.num_trajectories + return avg, avg2 + def _target_tolerance_end(self): """ Compute the error on the expectation values using jackknife resampling. @@ -576,14 +581,13 @@ def _target_tolerance_end(self): """ if self.num_trajectories <= 1: return np.inf - avg = np.array(self._sum_expect) / self.num_trajectories - avg2 = np.array(self._sum2_expect) / self.num_trajectories + avg, avg2 = self._average_computer() target = np.array([ atol + rtol * mean for mean, (atol, rtol) in zip(avg, self._target_tols) ]) - target_ntraj = np.max((avg2 - abs(avg)**2) / target**2 + 1) + target_ntraj = np.max((avg2 - abs(avg) ** 2) / target**2 + 1) self._estimated_ntraj = min(target_ntraj, self._target_ntraj) if (self._estimated_ntraj - self.num_trajectories) <= 0: @@ -866,9 +870,19 @@ def __add__(self, other): return new +class McTrajectoryResult(Result): + """ + Result class used by the :class:`qutip.MCSolver` for single trajectories. + """ + + def __init__(self, e_ops, options, *args, **kwargs): + super().__init__(e_ops, {**options, "normalize_output": False}, + *args, **kwargs) + + class McResult(MultiTrajResult): """ - Base class for storing solver results. + Class for storing Monte-Carlo solver results. Parameters ---------- @@ -970,3 +984,273 @@ def runs_photocurrent(self): for i in range(self.num_c_ops) ]) return measurements + + +class McResultImprovedSampling(McResult, MultiTrajResult): + """ + See docstring for McResult and MultiTrajResult for all relevant documentation. + This class computes expectation values and sums of states, etc + using the improved sampling algorithm, which samples the no-jump trajectory + first and then only samples jump trajectories afterwards. + """ + def __init__(self, e_ops, options, **kw): + MultiTrajResult.__init__(self, e_ops=e_ops, options=options, **kw) + self._sum_expect_no_jump = None + self._sum_expect_jump = None + self._sum2_expect_no_jump = None + self._sum2_expect_jump = None + + self._sum_states_no_jump = None + self._sum_states_jump = None + self._sum_final_states_no_jump = None + self._sum_final_states_jump = None + + self.no_jump_prob = None + + def _reduce_states(self, trajectory): + if self.num_trajectories == 1: + self._sum_states_no_jump = [ + accu + self._to_dm(state) + for accu, state + in zip(self._sum_states_no_jump, trajectory.states) + ] + else: + self._sum_states_jump = [ + accu + self._to_dm(state) + for accu, state + in zip(self._sum_states_jump, trajectory.states) + ] + + def _reduce_final_state(self, trajectory): + dm_final_state = self._to_dm(trajectory.final_state) + if self.num_trajectories == 1: + self._sum_final_states_no_jump += dm_final_state + else: + self._sum_final_states_jump += dm_final_state + + def _average_computer(self): + avg = np.array(self._sum_expect_jump) / (self.num_trajectories - 1) + avg2 = np.array(self._sum2_expect_jump) / (self.num_trajectories - 1) + return avg, avg2 + + def _add_first_traj(self, trajectory): + super()._add_first_traj(trajectory) + if trajectory.states: + del self._sum_states + self._sum_states_no_jump = [qzero_like(self._to_dm(state)) + for state in trajectory.states] + self._sum_states_jump = [qzero_like(self._to_dm(state)) + for state in trajectory.states] + if trajectory.final_state: + state = trajectory.final_state + del self._sum_final_states + self._sum_final_states_no_jump = qzero_like(self._to_dm(state)) + self._sum_final_states_jump = qzero_like(self._to_dm(state)) + self._sum_expect_jump = [ + np.zeros_like(expect) for expect in trajectory.expect + ] + self._sum2_expect_jump = [ + np.zeros_like(expect) for expect in trajectory.expect + ] + self._sum_expect_no_jump = [ + np.zeros_like(expect) for expect in trajectory.expect + ] + self._sum2_expect_no_jump = [ + np.zeros_like(expect) for expect in trajectory.expect + ] + self._sum_expect_jump = [np.zeros_like(expect) + for expect in trajectory.expect] + self._sum2_expect_jump = [np.zeros_like(expect) + for expect in trajectory.expect] + del self._sum_expect + del self._sum2_expect + + def _reduce_expect(self, trajectory): + """ + Compute the average of the expectation values appropriately + weighting the jump and no-jump trajectories + """ + for i, k in enumerate(self._raw_ops): + expect_traj = trajectory.expect[i] + p = self.no_jump_prob + if self.num_trajectories == 1: + self._sum_expect_no_jump[i] += expect_traj * p + self._sum2_expect_no_jump[i] += expect_traj**2 * p + # no jump trajectory will always be the first one, no need + # to worry about including jump trajectories + avg = self._sum_expect_no_jump[i] + avg2 = self._sum2_expect_no_jump[i] + else: + self._sum_expect_jump[i] += expect_traj * (1 - p) + self._sum2_expect_jump[i] += expect_traj**2 * (1 - p) + avg = (self._sum_expect_no_jump[i] + + self._sum_expect_jump[i] + / (self.num_trajectories - 1)) + avg2 = (self._sum2_expect_no_jump[i] + + self._sum2_expect_jump[i] + / (self.num_trajectories - 1)) + + self.average_e_data[k] = list(avg) + + # mean(expect**2) - mean(expect)**2 can something be very small + # negative (-1e-15) which raise an error for float sqrt. + self.std_e_data[k] = list(np.sqrt(np.abs(avg2 - np.abs(avg**2)))) + + if self.runs_e_data: + self.runs_e_data[k].append(trajectory.e_data[k]) + + @property + def average_states(self): + """ + States averages as density matrices. + """ + if self._sum_states_no_jump is None: + return None + p = self.no_jump_prob + return [p * final_no_jump + + (1 - p) * final_jump / (self.num_trajectories - 1) + for final_no_jump, final_jump in + zip(self._sum_states_no_jump, self._sum_states_jump)] + + @property + def average_final_state(self): + """ + Last states of each trajectory averaged into a density matrix. + """ + if self._sum_final_states_no_jump is None: + return None + p = self.no_jump_prob + return ( + p * self._sum_final_states_no_jump + + (1 - p) * self._sum_final_states_jump + / (self.num_trajectories - 1) + ) + + def __add__(self, other): + raise NotImplemented + + @property + def photocurrent(self): + """ + Average photocurrent or measurement of the evolution. + """ + cols = [[] for _ in range(self.num_c_ops)] + tlist = self.times + for collapses in self.collapse: + for t, which in collapses: + cols[which].append(t) + mesurement = [ + (1 - self.no_jump_prob) / (self.num_trajectories - 1) * + np.histogram(cols[i], tlist)[0] / np.diff(tlist) + for i in range(self.num_c_ops) + ] + return mesurement + + +class NmmcTrajectoryResult(McTrajectoryResult): + """ + Result class used by the :class:`qutip.NonMarkovianMCSolver` for single + trajectories. Additionally stores the trace of the state along the + trajectory. + """ + + def __init__(self, e_ops, options, *args, **kwargs): + self._nm_solver = kwargs.pop("__nm_solver") + super().__init__(e_ops, options, *args, **kwargs) + self.trace = [] + + # This gets called during the Monte-Carlo simulation of the associated + # completely positive master equation. To obtain the state of the actual + # system, we simply multiply the provided state with the current martingale + # before storing it / computing expectation values. + def add(self, t, state): + if isket(state): + state = ket2dm(state) + mu = self._nm_solver.current_martingale() + super().add(t, state * mu) + self.trace.append(mu) + + add.__doc__ = Result.add.__doc__ + + +class NmmcResult(McResult): + """ + Class for storing the results of the non-Markovian Monte-Carlo solver. + + Parameters + ---------- + e_ops : :obj:`~Qobj`, :obj:`~QobjEvo`, function or list or dict of these + The ``e_ops`` parameter defines the set of values to record at + each time step ``t``. If an element is a :obj:`~Qobj` or + :obj:`~QobjEvo` the value recorded is the expectation value of that + operator given the state at ``t``. If the element is a function, ``f``, + the value recorded is ``f(t, state)``. + + The values are recorded in the ``.expect`` attribute of this result + object. ``.expect`` is a list, where each item contains the values + of the corresponding ``e_op``. + + options : :obj:`~SolverResultsOptions` + The options for this result class. + + solver : str or None + The name of the solver generating these results. + + stats : dict + The stats generated by the solver while producing these results. Note + that the solver may update the stats directly while producing results. + Must include a value for "num_collapse". + + kw : dict + Additional parameters specific to a result sub-class. + + Properties + ---------- + average_trace : list + The average trace (i.e., averaged over all trajectories) at each time. + + std_trace : list + The standard deviation of the trace at each time. + + runs_trace : list of lists + For each recorded trajectory, the trace at each time. + Only present if ``keep_runs_results`` is set in the options. + """ + + def _post_init(self): + super()._post_init() + + self._sum_trace = None + self._sum2_trace = None + self.average_trace = [] + self.std_trace = [] + self.runs_trace = [] + + self.add_processor(self._add_trace) + + def _add_first_traj(self, trajectory): + super()._add_first_traj(trajectory) + self._sum_trace = np.zeros_like(trajectory.times) + self._sum2_trace = np.zeros_like(trajectory.times) + + def _add_trace(self, trajectory): + new_trace = np.array(trajectory.trace) + self._sum_trace += new_trace + self._sum2_trace += np.abs(new_trace)**2 + + avg = self._sum_trace / self.num_trajectories + avg2 = self._sum2_trace / self.num_trajectories + + self.average_trace = avg + self.std_trace = np.sqrt(np.abs(avg2 - np.abs(avg)**2)) + + if self.options['keep_runs_results']: + self.runs_trace.append(trajectory.trace) + + @property + def trace(self): + """ + Refers to ``average_trace`` or ``runs_trace``, depending on whether + ``keep_runs_results`` is set in the options. + """ + return self.runs_trace or self.average_trace diff --git a/qutip/solver/sode/__init__.py b/qutip/solver/sode/__init__.py new file mode 100644 index 0000000000..5f9e2560fb --- /dev/null +++ b/qutip/solver/sode/__init__.py @@ -0,0 +1,4 @@ +from .ssystem import * +from .sode import * +from .itotaylor import * +from .rouchon import * diff --git a/qutip/solver/sode/_noise.py b/qutip/solver/sode/_noise.py new file mode 100644 index 0000000000..ab5993c8b9 --- /dev/null +++ b/qutip/solver/sode/_noise.py @@ -0,0 +1,57 @@ +import numpy as np + +__all__ = [] + + +class _Noise: + """ + Weiner process generator used for tests. + """ + + def __init__(self, T, dt, num=1): + N = int(np.round(T / dt)) + self.T = T + self.dt = dt + self.num = num + self.noise = np.random.randn(N, num) * dt**0.5 + + def dw(self, dt): + """ + Ito integral I(i). + """ + N = int(np.round(dt / self.dt)) + return self.noise.reshape(-1, N, self.num).sum(axis=1) + + def dz(self, dt): + """ + Ito integral I(0, i). + """ + N = int(np.round(dt / self.dt)) + return ( + np.einsum( + "ijk,j->ik", + self.noise.reshape(-1, N, self.num), + np.arange(N - 0.5, 0, -1), + ) + * self.dt + ) + + def dW(self, dt): + """ + Noise used for Ito-Taylor integrators of order up to 1.5. + """ + N = int(np.round(dt / self.dt)) + noise = self.noise.copy() + if noise.shape[0] % N: + noise = noise[: -(noise.shape[0] % N)] + out = np.empty((noise.shape[0] // N, 2, self.num), dtype=float) + out[:, 0, :] = noise.reshape(-1, N, self.num).sum(axis=1) + out[:, 1, :] = ( + np.einsum( + "ijk,j->ik", + self.noise.reshape(-1, N, self.num), + np.arange(N - 0.5, 0, -1), + ) + * self.dt + ) + return out diff --git a/qutip/solver/sode/_sode.pyx b/qutip/solver/sode/_sode.pyx new file mode 100644 index 0000000000..d528228d87 --- /dev/null +++ b/qutip/solver/sode/_sode.pyx @@ -0,0 +1,509 @@ +#cython: language_level=3 + +from qutip.core import data as _data +from qutip.core.cy.qobjevo cimport QobjEvo +from qutip.core.data cimport Data, Dense, imul_dense, iadd_dense +from collections import defaultdict +cimport cython +from qutip.solver.sode.ssystem cimport _StochasticSystem +import numpy as np + + +cdef class Euler: + cdef _StochasticSystem system + + def __init__(self, _StochasticSystem system): + self.system = system + + @cython.wraparound(False) + def run( + self, double t, Data state, double dt, + double[:, :, ::1] dW, int num_step + ): + cdef int i + for i in range(num_step): + state = self.step(t + i * dt, state, dt, dW[i, :, :]) + return state + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef Data step(self, double t, Data state, double dt, double[:, :] dW): + """ + Integration scheme: + Basic Euler order 0.5 + dV = d1 dt + d2_i dW_i + Numerical Solution of Stochastic Differential Equations + By Peter E. Kloeden, Eckhard Platen + """ + cdef int i + cdef _StochasticSystem system = self.system + + cdef Data a = system.drift(t, state) + b = system.diffusion(t, state) + cdef Data new_state = _data.add(state, a, dt) + for i in range(system.num_collapse): + new_state = _data.add(new_state, b[i], dW[0, i]) + return new_state + + +cdef class Platen(Euler): + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef Data step(self, double t, Data state, double dt, double[:, :] dW): + """ + Platen rhs function for both master eq and schrodinger eq. + dV = -iH* (V+Vt)/2 * dt + (d1(V)+d1(Vt))/2 * dt + + (2*d2_i(V)+d2_i(V+)+d2_i(V-))/4 * dW_i + + (d2_i(V+)-d2_i(V-))/4 * (dW_i**2 -dt) * dt**(-.5) + + Vt = V -iH*V*dt + d1*dt + d2_i*dW_i + V+/- = V -iH*V*dt + d1*dt +/- d2_i*dt**.5 + The Theory of Open Quantum Systems + Chapter 7 Eq. (7.47), H.-P Breuer, F. Petruccione + """ + cdef _StochasticSystem system = self.system + cdef int i, j, num_ops = system.num_collapse + cdef double sqrt_dt = np.sqrt(dt) + cdef double sqrt_dt_inv = 0.25 / sqrt_dt + cdef double dw, dw2, dw2p, dw2m + + cdef Data d1 = _data.add(state, system.drift(t, state), dt) + cdef list d2 = system.diffusion(t, state) + cdef Data Vt, out + cdef list Vp, Vm + + out = _data.mul(d1, 0.5) + Vt = d1.copy() + Vp = [] + Vm = [] + for i in range(num_ops): + Vp.append(_data.add(d1, d2[i], sqrt_dt)) + Vm.append(_data.add(d1, d2[i], -sqrt_dt)) + Vt = _data.add(Vt, d2[i], dW[0, i]) + + d1 = system.drift(t, Vt) + out = _data.add(out, d1, 0.5 * dt) + out = _data.add(out, state, 0.5) + for i in range(num_ops): + d2p = system.diffusion(t, Vp[i]) + d2m = system.diffusion(t, Vm[i]) + dw = dW[0, i] * 0.25 + out = _data.add(out, d2[i], 2 * dw) + + for j in range(num_ops): + if i == j: + dw2 = sqrt_dt_inv * (dW[0, i] * dW[0, j] - dt) + dw2p = dw2 + dw + dw2m = -dw2 + dw + else: + dw2p = sqrt_dt_inv * dW[0, i] * dW[0, j] + dw2m = -dw2p + out = _data.add(out, d2p[j], dw2p) + out = _data.add(out, d2m[j], dw2m) + + return out + + +cdef class Explicit15(Euler): + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef Data step(self, double t, Data state, double dt, double[:, :] dW): + """ + Chapter 11.2 Eq. (2.13) + Numerical Solution of Stochastic Differential Equations + By Peter E. Kloeden, Eckhard Platen + """ + cdef _StochasticSystem system = self.system + cdef int i, j, k, num_ops = system.num_collapse + cdef double sqrt_dt = np.sqrt(dt) + cdef double sqrt_dt_inv = 1./sqrt_dt + cdef double ddz, ddw, ddd + cdef double[::1] dz, dw, dwp, dwm + + dw = np.empty(num_ops) + dz = np.empty(num_ops) + dwp = np.zeros(num_ops) + dwm = np.zeros(num_ops) + for i in range(num_ops): + dw[i] = dW[0, i] + dz[i] = 0.5 *(dW[0, i] + 1./np.sqrt(3) * dW[1, i]) + + d1 = system.drift(t, state) + d2 = system.diffusion(t, state) + dd2 = system.diffusion(t + dt, state) + # Euler part + out = _data.add(state, d1, dt) + for i in range(num_ops): + out = _data.add(out, d2[i], dw[i]) + + V = _data.add(state, d1, dt/num_ops) + + v2p = [] + v2m = [] + for i in range(num_ops): + v2p.append(_data.add(V, d2[i], sqrt_dt)) + v2m.append(_data.add(V, d2[i], -sqrt_dt)) + + p2p = [] + p2m = [] + for i in range(num_ops): + d2p = system.diffusion(t, v2p[i]) + d2m = system.diffusion(t, v2m[i]) + ddw = (dw[i] * dw[i] - dt) * 0.25 * sqrt_dt_inv # 1.0 + out = _data.add(out, d2p[i], ddw) + out = _data.add(out, d2m[i], -ddw) + temp_p2p = [] + temp_p2m = [] + for j in range(num_ops): + temp_p2p.append(_data.add(v2p[i], d2p[j], sqrt_dt)) + temp_p2m.append(_data.add(v2p[i], d2p[j], -sqrt_dt)) + p2p.append(temp_p2p) + p2m.append(temp_p2m) + + out = _data.add(out, d1, -0.5*(num_ops) * dt) + + for i in range(num_ops): + ddz = dz[i] * 0.5 / sqrt_dt # 1.5 + ddd = 0.25 * (dw[i] * dw[i] / 3 - dt) * dw[i] / dt # 1.5 + for j in range(num_ops): + dwp[j] = 0 + dwm[j] = 0 + + d1p = system.drift(t + dt/num_ops, v2p[i]) + d1m = system.drift(t + dt/num_ops, v2m[i]) + + d2p = system.diffusion(t, v2p[i]) + d2m = system.diffusion(t, v2m[i]) + d2pp = system.diffusion(t, p2p[i][i]) + d2mm = system.diffusion(t, p2m[i][i]) + + out = _data.add(out, d1p, (0.25 + ddz) * dt) + out = _data.add(out, d1m, (0.25 - ddz) * dt) + + out = _data.add(out, dd2[i], dw[i] - dz[i]) + out = _data.add(out, d2[i], dz[i] - dw[i]) + + out = _data.add(out, d2pp[i], ddd) + out = _data.add(out, d2mm[i], -ddd) + dwp[i] += -ddd + dwm[i] += ddd + + for j in range(num_ops): + ddw = 0.5 * (dw[j] - dz[j]) # O(1.5) + dwp[j] += ddw + dwm[j] += ddw + out = _data.add(out, d2[j], -2*ddw) + + if j > i: + ddw = 0.5 * (dw[i] * dw[j]) / sqrt_dt # O(1.0) + dwp[j] += ddw + dwm[j] += -ddw + + ddw = 0.25 * (dw[j] * dw[j] - dt) * dw[i] / dt # O(1.5) + d2pp = system.diffusion(t, p2p[j][i]) + d2mm = system.diffusion(t, p2m[j][i]) + out = _data.add(out, d2pp[j], ddw) + out = _data.add(out, d2mm[j], -ddw) + dwp[j] += -ddw + dwm[j] += ddw + + for k in range(j+1, num_ops): + ddw = 0.5 * dw[i] * dw[j] * dw[k] / dt # O(1.5) + out = _data.add(out, d2pp[k], ddw) + out = _data.add(out, d2mm[k], -ddw) + dwp[k] += -ddw + dwm[k] += ddw + + if j < i: + ddw = 0.25 * (dw[j] * dw[j] - dt) * dw[i] / dt # O(1.5) + d2pp = system.diffusion(t, p2p[j][i]) + d2mm = system.diffusion(t, p2m[j][i]) + + out = _data.add(out, d2pp[j], ddw) + out = _data.add(out, d2mm[j], -ddw) + dwp[j] += -ddw + dwm[j] += ddw + + for j in range(num_ops): + out = _data.add(out, d2p[j], dwp[j]) + out = _data.add(out, d2m[j], dwm[j]) + + return out + + +cdef class Milstein: + cdef _StochasticSystem system + + def __init__(self, _StochasticSystem system): + self.system = system + + @cython.wraparound(False) + def run(self, double t, Data state, double dt, double[:, :, ::1] dW, int ntraj): + cdef int i + if type(state) != _data.Dense: + state = _data.to(_data.Dense, state) + cdef Dense out = _data.zeros_like(state) + state = state.copy() + + for i in range(ntraj): + self.step(t + i * dt, state, dt, dW[i, :, :], out) + state, out = out, state + return state + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef Data step(self, double t, Dense state, double dt, double[:, :] dW, Dense out): + """ + Chapter 10.3 Eq. (3.12) + Numerical Solution of Stochastic Differential Equations + By Peter E. Kloeden, Eckhard Platen + + dV = -iH*V*dt + d1*dt + d2_i*dW_i + + 0.5*d2_i' d2_j*(dW_i*dw_j -dt*delta_ij) + """ + cdef _StochasticSystem system = self.system + cdef int i, j, num_ops = system.num_collapse + cdef double dw + + system.set_state(t, state) + + imul_dense(out, 0.) + iadd_dense(out, state, 1) + iadd_dense(out, system.a(), dt) + + for i in range(num_ops): + iadd_dense(out, system.bi(i), dW[0, i]) + + for i in range(num_ops): + for j in range(i, num_ops): + if i == j: + dw = (dW[0, i] * dW[0, j] - dt) * 0.5 + else: + dw = dW[0, i] * dW[0, j] + iadd_dense(out, system.Libj(i, j), dw) + + +cdef class PredCorr: + cdef Dense euler + cdef double alpha, eta + cdef _StochasticSystem system + + def __init__(self, _StochasticSystem system, double alpha=0., double eta=0.5): + self.system = system + self.alpha = alpha + self.eta = eta + + @cython.wraparound(False) + def run(self, double t, Data state, double dt, double[:, :, ::1] dW, int ntraj): + cdef int i + if type(state) != _data.Dense: + state = _data.to(_data.Dense, state) + cdef Dense out = _data.zeros_like(state) + self.euler = _data.zeros_like(state) + state = state.copy() + + for i in range(ntraj): + self.step(t + i * dt, state, dt, dW[i, :, :], out) + state, out = out, state + return state + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef Data step(self, double t, Dense state, double dt, double[:, :] dW, Dense out): + """ + Chapter 15.5 Eq. (5.4) + Numerical Solution of Stochastic Differential Equations + By Peter E. Kloeden, Eckhard Platen + """ + cdef _StochasticSystem system = self.system + cdef int i, j, k, num_ops = system.num_collapse + cdef double eta=self.eta, alpha=self.alpha + cdef Dense euler = self.euler + + system.set_state(t, state) + + imul_dense(out, 0.) + iadd_dense(out, state, 1) + iadd_dense(out, system.a(), dt * (1-alpha)) + + imul_dense(euler, 0.) + iadd_dense(euler, state, 1) + iadd_dense(euler, system.a(), dt) + + for i in range(num_ops): + iadd_dense(euler, system.bi(i), dW[0, i]) + iadd_dense(out, system.bi(i), dW[0, i] * eta) + iadd_dense(out, system.Libj(i, i), dt * (alpha-1) * 0.5) + + system.set_state(t+dt, euler) + for i in range(num_ops): + iadd_dense(out, system.bi(i), dW[0, i] * (1-eta)) + + if alpha: + iadd_dense(out, system.a(), dt*alpha) + for i in range(num_ops): + iadd_dense(out, system.Libj(i, i), -dt * alpha * 0.5) + + return out + + +cdef class Taylor15(Milstein): + @cython.boundscheck(False) + @cython.wraparound(False) + cdef Data step(self, double t, Dense state, double dt, double[:, :] dW, Dense out): + """ + Chapter 10.4 Eq. (4.6), + Numerical Solution of Stochastic Differential Equations + By Peter E. Kloeden, Eckhard Platen + """ + cdef _StochasticSystem system = self.system + system.set_state(t, state) + cdef int i, j, k, num_ops = system.num_collapse + cdef double[:] dz, dw + + num_ops = system.num_collapse + dw = dW[0, :] + dz = 0.5 * (dW[0, :] + dW[1, :] / np.sqrt(3)) * dt + + imul_dense(out, 0.) + iadd_dense(out, state, 1) + iadd_dense(out, system.a(), dt) + iadd_dense(out, system.L0a(), 0.5 * dt * dt) + + for i in range(num_ops): + iadd_dense(out, system.bi(i), dw[i]) + iadd_dense(out, system.Libj(i, i), 0.5 * (dw[i] * dw[i] - dt)) + iadd_dense(out, system.Lia(i), dz[i]) + iadd_dense(out, system.L0bi(i), dw[i] * dt - dz[i]) + iadd_dense(out, system.LiLjbk(i, i, i), + 0.5 * ((1/3.) * dw[i] * dw[i] - dt) * dw[i]) + + for j in range(i+1, num_ops): + iadd_dense(out, system.Libj(i, j), dw[i] * dw[j]) + iadd_dense(out, system.LiLjbk(i, j, j), 0.5 * (dw[j] * dw[j] -dt) * dw[i]) + iadd_dense(out, system.LiLjbk(i, i, j), 0.5 * (dw[i] * dw[i] -dt) * dw[j]) + for k in range(j+1, num_ops): + iadd_dense(out, system.LiLjbk(i, j, k), dw[i]*dw[j]*dw[k]) + + return out + + +cdef class Milstein_imp: + cdef _StochasticSystem system + cdef bint use_inv + cdef QobjEvo implicit + cdef Data inv + cdef double prev_dt + cdef dict imp_opt + + def __init__(self, _StochasticSystem system, imp_method=None, imp_options={}): + self.system = system + self.prev_dt = 0 + if imp_method == "inv": + if not self.system.L.isconstant: + raise TypeError("The 'inv' integration method requires that the system Hamiltonian or Liouvillian be constant.") + self.use_inv = True + self.imp_opt = {} + else: + self.use_inv = False + self.imp_opt = {"method": imp_method, "options": imp_options} + + + @cython.wraparound(False) + def run(self, double t, Data state, double dt, double[:, :, ::1] dW, int ntraj): + cdef int i + if type(state) != _data.Dense: + state = _data.to(_data.Dense, state) + cdef Dense tmp = _data.zeros_like(state) + + if dt != self.prev_dt: + self.implicit = 1 - self.system.L * (dt / 2) + if self.use_inv: + self.inv = _data.inv(self.implicit._call(0)) + + for i in range(ntraj): + state = self.step(t + i * dt, state, dt, dW[i, :, :], tmp) + return state + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef Data step(self, double t, Dense state, double dt, double[:, :] dW, Dense target): + """ + Chapter 12.2 Eq. (2.11) + Numerical Solution of Stochastic Differential Equations + By Peter E. Kloeden, Eckhard Platen + """ + cdef _StochasticSystem system = self.system + cdef int i, j, num_ops = system.num_collapse + cdef double dw + + system.set_state(t, state) + + imul_dense(target, 0.) + iadd_dense(target, state, 1) + iadd_dense(target, system.a(), dt * 0.5) + + for i in range(num_ops): + iadd_dense(target, system.bi(i), dW[0, i]) + + for i in range(num_ops): + for j in range(i, num_ops): + if i == j: + dw = (dW[0, i] * dW[0, j] - dt) * 0.5 + else: + dw = dW[0, i] * dW[0, j] + iadd_dense(target, system.Libj(i, j), dw) + + if self.use_inv: + out = _data.matmul(self.inv, target) + else: + out = _data.solve(self.implicit._call(t+dt), target, **self.imp_opt) + + return out + + +cdef class Taylor15_imp(Milstein_imp): + @cython.boundscheck(False) + @cython.wraparound(False) + cdef Data step(self, double t, Dense state, double dt, double[:, :] dW, Dense target): + """ + Chapter 12.2 Eq. (2.18), + Numerical Solution of Stochastic Differential Equations + By Peter E. Kloeden, Eckhard Platen + """ + cdef _StochasticSystem system = self.system + system.set_state(t, state) + cdef int i, j, k, num_ops = system.num_collapse + cdef double[:] dz, dw + + num_ops = system.num_collapse + dw = dW[0, :] + dz = 0.5 * (dW[0, :] + dW[1, :] / np.sqrt(3)) * dt + + imul_dense(target, 0.) + iadd_dense(target, state, 1) + iadd_dense(target, system.a(), dt * 0.5) + + for i in range(num_ops): + iadd_dense(target, system.bi(i), dw[i]) + iadd_dense(target, system.Libj(i, i), 0.5 * (dw[i] * dw[i] - dt)) + iadd_dense(target, system.Lia(i), dz[i] - dw[i] * dt * 0.5) + iadd_dense(target, system.L0bi(i), dw[i] * dt - dz[i]) + iadd_dense(target, system.LiLjbk(i, i, i), + 0.5 * ((1/3.) * dw[i] * dw[i] - dt) * dw[i]) + + for j in range(i+1, num_ops): + iadd_dense(target, system.Libj(i, j), dw[i] * dw[j]) + iadd_dense(target, system.LiLjbk(i, j, j), 0.5 * (dw[j] * dw[j] -dt) * dw[i]) + iadd_dense(target, system.LiLjbk(i, i, j), 0.5 * (dw[i] * dw[i] -dt) * dw[j]) + for k in range(j+1, num_ops): + iadd_dense(target, system.LiLjbk(i, j, k), dw[i]*dw[j]*dw[k]) + + if self.use_inv: + out = _data.matmul(self.inv, target) + else: + out = _data.solve(self.implicit._call(t+dt), target, **self.imp_opt) + + return out diff --git a/qutip/solver/sode/itotaylor.py b/qutip/solver/sode/itotaylor.py new file mode 100644 index 0000000000..040f9e1ec8 --- /dev/null +++ b/qutip/solver/sode/itotaylor.py @@ -0,0 +1,160 @@ +from . import _sode +from .sode import _Explicit_Simple_Integrator, _Implicit_Simple_Integrator +from ..stochastic import StochasticSolver, SMESolver + + +__all__ = [ + "EulerSODE", "Milstein_SODE", "Taylor1_5_SODE", "Explicit1_5_SODE", + "Implicit_Milstein_SODE", "Implicit_Taylor1_5_SODE" +] + + +class EulerSODE(_Explicit_Simple_Integrator): + """ + A simple generalization of the Euler method for ordinary + differential equations to stochastic differential equations. Only + solver which could take non-commuting ``sc_ops``. + + - Order: 0.5 + """ + stepper = _sode.Euler + N_dw = 1 + + +class Milstein_SODE(_Explicit_Simple_Integrator): + """ + An order 1.0 strong Taylor scheme. Better approximate numerical + solution to stochastic differential equations. See eq. (3.12) of + chapter 10.3 of Peter E. Kloeden and Exkhard Platen, + *Numerical Solution of Stochastic Differential Equations*.. + + - Order strong 1.0 + """ + stepper = _sode.Milstein + N_dw = 1 + + +class Taylor1_5_SODE(_Explicit_Simple_Integrator): + """ + Order 1.5 strong Taylor scheme. Solver with more terms of the + Ito-Taylor expansion. See eq. (4.6) of chapter 10.4 of Peter E. Kloeden and + Exkhard Platen, *Numerical Solution of Stochastic Differential Equations*. + + - Order strong 1.5 + """ + integrator_options = { + "dt": 0.001, + "tol": 1e-10, + "derr_dt": 1e-6, + } + stepper = _sode.Taylor15 + N_dw = 2 + + @property + def options(self): + """ + Supported options by Order 1.5 strong Taylor Stochastic Integrators: + + dt : float, default=0.001 + Internal time step. + + tol : float, default=1e-10 + Relative tolerance. + + derr_dt : float, default=1e-6 + Finite time difference used to compute the derrivative of the + hamiltonian and ``sc_ops``. + """ + return self._options + + @options.setter + def options(self, new_options): + _Explicit_Simple_Integrator.options.fset(self, new_options) + + +class Explicit1_5_SODE(_Explicit_Simple_Integrator): + """ + Explicit order 1.5 strong schemes. Reproduce the order 1.5 strong + Taylor scheme using finite difference instead of derivatives. + Slower than ``taylor15`` but usable when derrivatives cannot be + analytically obtained. + See eq. (2.13) of chapter 11.2 of Peter E. Kloeden and Exkhard Platen, + *Numerical Solution of Stochastic Differential Equations.* + + - Order: strong 1.5 + """ + stepper = _sode.Explicit15 + N_dw = 2 + + +class Implicit_Milstein_SODE(_Implicit_Simple_Integrator): + """ + An order 1.0 implicit strong Taylor scheme. Implicit Milstein + scheme for the numerical simulation of stiff stochastic + differential equations. Eq. (2.11) with alpha=0.5 of + chapter 12.2 of Peter E. Kloeden and Exkhard Platen, + *Numerical Solution of Stochastic Differential Equations*. + + - Order strong 1.0 + """ + stepper = _sode.Milstein_imp + N_dw = 1 + + +class Implicit_Taylor1_5_SODE(_Implicit_Simple_Integrator): + """ + Order 1.5 implicit strong Taylor scheme. Solver with more terms of the + Ito-Taylor expansion. Eq. (2.18) with ``alpha=0.5`` of chapter 12.2 of + Peter E. Kloeden and Exkhard Platen, + *Numerical Solution of Stochastic Differential Equations*. + + - Order strong 1.5 + """ + integrator_options = { + "dt": 0.001, + "tol": 1e-10, + "solve_method": None, + "solve_options": {}, + "deff_dt": 1e-6 + } + stepper = _sode.Taylor15_imp + N_dw = 2 + + @property + def options(self): + """ + Supported options by Implicit Order 1.5 strong Taylor Stochastic + Integrators: + + dt : float, default=0.001 + Internal time step. + + tol : float, default=1e-10 + Tolerance for the time steps. + + solve_method : str, default=None + Method used for solver the ``Ax=b`` of the implicit step. + Accept methods supported by :func:`qutip.core.data.solve`. + When the system is constant, the inverse of the matrix ``A`` can be + used by entering ``inv``. + + solve_options : dict, default={} + Options to pass to the call to :func:`qutip.core.data.solve`. + + derr_dt : float, default=1e-6 + Finite time difference used to compute the derrivative of the + hamiltonian and ``sc_ops``. + """ + return self._options + + @options.setter + def options(self, new_options): + _Implicit_Simple_Integrator.options.fset(self, new_options) + + +StochasticSolver.add_integrator(EulerSODE, "euler") +StochasticSolver.add_integrator(Explicit1_5_SODE, "explicit1.5") +SMESolver.add_integrator(Taylor1_5_SODE, "taylor1.5") +SMESolver.add_integrator(Milstein_SODE, "milstein") +SMESolver.add_integrator(Implicit_Milstein_SODE, "milstein_imp") +SMESolver.add_integrator(Implicit_Taylor1_5_SODE, "taylor1.5_imp") diff --git a/qutip/solver/sode/rouchon.py b/qutip/solver/sode/rouchon.py new file mode 100644 index 0000000000..30c48fd0cb --- /dev/null +++ b/qutip/solver/sode/rouchon.py @@ -0,0 +1,136 @@ +import numpy as np +from qutip import unstack_columns, stack_columns +from qutip.core import data as _data +from ..stochastic import StochasticSolver +from .sode import SIntegrator +from ..integrator.integrator import Integrator + + +__all__ = ["RouchonSODE"] + + +class RouchonSODE(SIntegrator): + """ + Stochastic integration method keeping the positivity of the density matrix. + See eq. (4) Pierre Rouchon and Jason F. Ralpha, + *Efficient Quantum Filtering for Quantum Feedback Control*, + `arXiv:1410.5345 [quant-ph] `_, + Phys. Rev. A 91, 012118, (2015). + + - Order: strong 1 + + .. note:: + + This method should be used with very small ``dt``. Unlike other + methods that will return unphysical state (negative eigenvalues, Nans) + when the time step is too large, this method will return state that + seems normal. + """ + integrator_options = { + "dt": 0.0001, + "tol": 1e-7, + } + + def __init__(self, rhs, options): + self._options = self.integrator_options.copy() + self.options = options + + self.H = rhs.H + if self.H.issuper: + raise TypeError("The rouchon stochastic integration method can't" + " use a premade Liouvillian.") + self._issuper = rhs.issuper + + dtype = type(self.H(0).data) + self.c_ops = rhs.c_ops + self.sc_ops = rhs.sc_ops + self.cpcds = [op + op.dag() for op in self.sc_ops] + for op in self.cpcds: + op.compress() + self.M = ( + - 1j * self.H + - sum(op.dag() @ op for op in self.c_ops) * 0.5 + - sum(op.dag() @ op for op in self.sc_ops) * 0.5 + ) + self.M.compress() + + self.num_collapses = len(self.sc_ops) + self.scc = [ + [self.sc_ops[i] @ self.sc_ops[j] for i in range(j+1)] + for j in range(self.num_collapses) + ] + + self.id = _data.identity[dtype](self.H.shape[0]) + + def integrate(self, t, copy=True): + delta_t = (t - self.t) + if delta_t < 0: + raise ValueError("Stochastic integration need increasing times") + elif delta_t == 0: + return self.t, self.state, np.zeros(self.N_dw) + + dt = self.options["dt"] + N, extra = np.divmod(delta_t, dt) + N = int(N) + if extra > self.options["tol"]: + # Not a whole number of steps. + N += 1 + dt = delta_t / N + dW = self.generator.normal( + 0, + np.sqrt(dt), + size=(N, self.num_collapses) + ) + + if self._issuper: + self.state = unstack_columns(self.state) + for dw in dW: + self.state = self._step(self.t, self.state, dt, dw) + self.t += dt + if self._issuper: + self.state = stack_columns(self.state) + + return self.t, self.state, np.sum(dW, axis=0) + + def _step(self, t, state, dt, dW): + dy = [ + op.expect_data(t, state) * dt + dw + for op, dw in zip(self.cpcds, dW) + ] + M = _data.add(self.id, self.M._call(t), dt) + for i in range(self.num_collapses): + M = _data.add(M, self.sc_ops[i]._call(t), dy[i]) + M = _data.add(M, self.scc[i][i]._call(t), (dy[i]**2-dt)/2) + for j in range(i): + M = _data.add(M, self.scc[i][j]._call(t), dy[i]*dy[j]) + out = _data.matmul(M, state) + if self._issuper: + Mdag = M.adjoint() + out = _data.matmul(out, Mdag) + for cop in self.c_ops: + op = cop._call(t) + out += op @ state @ op.adjoint() * dt + out = out / _data.trace(out) + else: + out = out / _data.norm.l2(out) + return out + + @property + def options(self): + """ + Supported options by Rouchon Stochastic Integrators: + + dt : float, default=0.001 + Internal time step. + + tol : float, default=1e-7 + Relative tolerance. + """ + return self._options + + @options.setter + def options(self, new_options): + Integrator.options.fset(self, new_options) + + +StochasticSolver.add_integrator(RouchonSODE, "rouchon") diff --git a/qutip/solver/sode/sode.py b/qutip/solver/sode/sode.py new file mode 100644 index 0000000000..94b04409a5 --- /dev/null +++ b/qutip/solver/sode/sode.py @@ -0,0 +1,281 @@ +import numpy as np +from . import _sode +from ..integrator.integrator import Integrator +from ..stochastic import StochasticSolver, SMESolver + + +__all__ = ["SIntegrator", "PlatenSODE", "PredCorr_SODE"] + + +class SIntegrator(Integrator): + """ + A wrapper around stochastic ODE solvers. + + Parameters + ---------- + system: qutip.StochasticSystem + Quantum system in which states evolve. + + options: dict + Options for the integrator. + + Class Attributes + ---------------- + name : str + The name of the integrator. + + supports_blackbox : bool + If True, then the integrator calls only ``system.matmul``, + ``system.matmul_data``, ``system.expect``, ``system.expect_data`` and + ``isconstant``, ``isoper`` or ``issuper``. This allows the solver using + the integrator to modify the system in creative ways. In particular, + the solver may modify the system depending on *both* the time ``t`` + *and* the current ``state`` the system is being applied to. + + If the integrator calls any other methods, set to False. + + supports_time_dependent : bool + If True, then the integrator supports time dependent systems. If False, + ``supports_blackbox`` should usually be ``False`` too. + + integrator_options : dict + A dictionary of options used by the integrator and their default + values. Once initiated, ``self.options`` will be a dict with the same + keys, not the full options object passed to the solver. Options' keys + included here will be supported by the :cls:SolverOdeOptions. + """ + + def set_state(self, t, state0, generator): + """ + Set the state of the SODE solver. + + Parameters + ---------- + t : float + Initial time + + state0 : qutip.Data + Initial state. + + generator : numpy.random.generator + Random number generator. + """ + self.t = t + self.state = state0 + self.generator = generator + + def get_state(self, copy=True): + return self.t, self.state, self.generator + + def integrate(self, t, copy=True): + """ + Evolve to t. + + Before calling `integrate` for the first time, the initial state should + be set with `set_state`. + + Parameters + ---------- + t : float + Time to integrate to, should be larger than the previous time. + + copy : bool [True] + Whether to return a copy of the state or the state itself. + + Returns + ------- + (t, state, noise) : (float, qutip.Data, np.ndarray) + The state of the solver at ``t``. + """ + raise NotImplementedError + + def mcstep(self, t, copy=True): + raise NotImplementedError + + +class _Explicit_Simple_Integrator(SIntegrator): + """ + Stochastic evolution solver + """ + + integrator_options = { + "dt": 0.001, + "tol": 1e-10, + } + stepper = None + N_dw = 0 + + def __init__(self, rhs, options): + self._options = self.integrator_options.copy() + self.options = options + self.system = rhs(self.options) + self.step_func = self.stepper(self.system).run + + def integrate(self, t, copy=True): + delta_t = t - self.t + if delta_t < 0: + raise ValueError("Stochastic integration time") + elif delta_t == 0: + return self.t, self.state, np.zeros(self.N_dw) + + dt = self.options["dt"] + N, extra = np.divmod(delta_t, dt) + N = int(N) + if extra > self.options["tol"]: + # Not a whole number of steps. + N += 1 + dt = delta_t / N + dW = self.generator.normal( + 0, np.sqrt(dt), size=(N, self.N_dw, self.system.num_collapse) + ) + + self.state = self.step_func(self.t, self.state, dt, dW, N) + self.t += dt * N + + return self.t, self.state, np.sum(dW[:, 0, :], axis=0) + + @property + def options(self): + """ + Supported options by Explicit Stochastic Integrators: + + dt : float, default=0.001 + Internal time step. + + tol : float, default=1e-10 + Tolerance for the time steps. + """ + return self._options + + @options.setter + def options(self, new_options): + Integrator.options.fset(self, new_options) + + +class _Implicit_Simple_Integrator(_Explicit_Simple_Integrator): + """ + Stochastic evolution solver + """ + + integrator_options = { + "dt": 0.001, + "tol": 1e-10, + "solve_method": None, + "solve_options": {}, + } + stepper = None + N_dw = 0 + + def __init__(self, rhs, options): + self._options = self.integrator_options.copy() + self.options = options + self.system = rhs(self.options) + self.step_func = self.stepper( + self.system, + self.options["solve_method"], + self.options["solve_options"], + ).run + + @property + def options(self): + """ + Supported options by Implicit Stochastic Integrators: + + dt : float, default=0.001 + Internal time step. + + tol : float, default=1e-10 + Tolerance for the time steps. + + solve_method : str, default=None + Method used for solver the ``Ax=b`` of the implicit step. + Accept methods supported by :func:`qutip.core.data.solve`. + When the system is constant, the inverse of the matrix ``A`` can be + used by entering ``inv``. + + solve_options : dict, default={} + Options to pass to the call to :func:`qutip.core.data.solve`. + """ + return self._options + + @options.setter + def options(self, new_options): + Integrator.options.fset(self, new_options) + + +class PlatenSODE(_Explicit_Simple_Integrator): + """ + Explicit scheme, creates the Milstein using finite differences + instead of analytic derivatives. Also contains some higher order + terms, thus converges better than Milstein while staying strong + order 1.0. Does not require derivatives. See eq. (7.47) of chapter 7 of + H.-P. Breuer and F. Petruccione, *The Theory of Open Quantum Systems*. + + - Order: strong 1, weak 2 + """ + + stepper = _sode.Platen + N_dw = 1 + + +class PredCorr_SODE(_Explicit_Simple_Integrator): + """ + Generalization of the trapezoidal method to stochastic differential + equations. More stable than explicit methods. See eq. (5.4) of + chapter 15.5 of Peter E. Kloeden and Exkhard Platen, + *Numerical Solution of Stochastic Differential Equations*. + + - Order strong 0.5, weak 1.0 + - Codes to only correct the stochastic part (:math:`\\alpha=0`, + :math:`\\eta=1/2`): ``'pred-corr'``, ``'predictor-corrector'`` or + ``'pc-euler'`` + - Codes to correct both the stochastic and deterministic parts + (:math:`\\alpha=1/2`, :math:`\\eta=1/2`): ``'pc-euler-imp'``, + ``'pc-euler-2'`` or ``'pred-corr-2'`` + """ + + integrator_options = { + "dt": 0.001, + "tol": 1e-10, + "alpha": 0.0, + "eta": 0.5, + } + stepper = _sode.PredCorr + N_dw = 1 + + def __init__(self, rhs, options): + self._options = self.integrator_options.copy() + self.options = options + self.system = rhs(self.options) + self.step_func = self.stepper( + self.system, self.options["alpha"], self.options["eta"] + ).run + + @property + def options(self): + """ + Supported options by Explicit Stochastic Integrators: + + dt : float, default=0.001 + Internal time step. + + tol : float, default=1e-10 + Tolerance for the time steps. + + alpha : float, default=0. + Implicit factor to the drift. + eff_drift ~= drift(t) * (1-alpha) + drift(t+dt) * alpha + + eta : float, default=0.5 + Implicit factor to the diffusion. + eff_diffusion ~= diffusion(t) * (1-eta) + diffusion(t+dt) * eta + """ + return self._options + + @options.setter + def options(self, new_options): + Integrator.options.fset(self, new_options) + + +StochasticSolver.add_integrator(PlatenSODE, "platen") +SMESolver.add_integrator(PredCorr_SODE, "pred_corr") diff --git a/qutip/solver/sode/ssystem.pxd b/qutip/solver/sode/ssystem.pxd new file mode 100644 index 0000000000..a09212c1db --- /dev/null +++ b/qutip/solver/sode/ssystem.pxd @@ -0,0 +1,24 @@ +#cython: language_level=3 +from qutip.core.data cimport Data, Dense +from qutip.core.cy.qobjevo cimport QobjEvo + +cdef class _StochasticSystem: + cdef readonly list c_ops + cdef readonly QobjEvo L + cdef readonly int num_collapse + cdef Data state + cdef double t + + cpdef Data drift(self, t, Data state) + + cpdef list diffusion(self, t, Data state) + + cpdef void set_state(self, double t, Dense state) except * + + cpdef Data a(self) + cpdef Data bi(self, int i) + cpdef Data Libj(self, int i, int j) + cpdef Data Lia(self, int i) + cpdef Data L0bi(self, int i) + cpdef Data LiLjbk(self, int i, int j, int k) + cpdef Data L0a(self) diff --git a/qutip/solver/sode/ssystem.pyx b/qutip/solver/sode/ssystem.pyx new file mode 100644 index 0000000000..be15d4634a --- /dev/null +++ b/qutip/solver/sode/ssystem.pyx @@ -0,0 +1,565 @@ +#cython: language_level=3 +""" +Class to represent a stochastic differential equation system. +""" + +from qutip.core import data as _data +from qutip.core.cy.qobjevo cimport QobjEvo +from qutip.core.data cimport Data, dense, Dense, imul_dense, iadd_dense +from qutip.core.data.trace cimport trace_oper_ket_dense +cimport cython +import numpy as np +from qutip.core import spre, spost, liouvillian + +__all__ = [ + "StochasticOpenSystem", "StochasticClosedSystem" +] + +@cython.boundscheck(False) +@cython.initializedcheck(False) +cdef Dense _dense_wrap(double complex [::1] x): + return dense.wrap(&x[0], x.shape[0], 1) + + +cdef class _StochasticSystem: + """ + RHS for stochastic differential equations. + + Contain the deterministic drift term and the diffusion term[s] through + the ``drift`` and ``diffusion`` methods. + + Derrivatives corresponding to the terms in the ito-tyalor expansion are + available through a different interface: + ``set_state``, ``a``, ``bi``, ``Libj`` etc. + + A different interface is used since each term is not independant, but + which terms is needed change according to the integration method. + Whereas the raw drift and diffusion are independant. + """ + def __init__(self): + raise NotImplementedError + + cpdef Data drift(self, t, Data state): + """ + Compute the drift term for the ``state`` at time ``t``. + """ + raise NotImplementedError + + cpdef list diffusion(self, t, Data state): + """ + Compute the diffusion terms for the ``state`` at time ``t``. + """ + raise NotImplementedError + + cpdef void set_state(self, double t, Dense state) except *: + """ + Initialize the set of derrivatives. + """ + raise NotImplementedError + + cpdef Data a(self): + """ + Drift term + """ + raise NotImplementedError + + cpdef Data bi(self, int i): + """ + Diffusion term for the ``i``th operator. + """ + raise NotImplementedError + + cpdef Data Libj(self, int i, int j): + """ + bi_n * d bj / dx_n + """ + raise NotImplementedError + + cpdef Data Lia(self, int i): + """ + bi_n * d a / dx_n + """ + raise NotImplementedError + + cpdef Data L0bi(self, int i): + """ + dbi/dt + + a_n * d bi / dx_n + + sum_k bk_n bk_m *0.5 d**2 (bi) / (dx_n dx_m) + """ + raise NotImplementedError + + cpdef Data LiLjbk(self, int i, int j, int k): + """ + bi_n * d/dx_n ( bj_m * d bk / dx_m) + """ + raise NotImplementedError + + cpdef Data L0a(self): + """ + da/dt + + a_n * d a / dx_n + + sum_k bk_n bk_m *0.5 d**2 (a) / (dx_n dx_m) + """ + raise NotImplementedError + + +cdef class StochasticClosedSystem(_StochasticSystem): + """ + RHS for closed quantum stochastic system (ssesolve) + + drift = -1H * psi + + sum_i (-c_i.dag * c_i / 2 + c_i * e_i / 2 - e_i**2 / 8) * psi + + e_i = + + diffusion = (c_i - e_i / 2) * psi + """ + cdef readonly list cpcd_ops + + def __init__(self, H, sc_ops): + self.L = -1j * H + self.c_ops = sc_ops + self.cpcd_ops = [op + op.dag() for op in sc_ops] + + self.num_collapse = len(self.c_ops) + for c_op in self.c_ops: + self.L += -0.5 * c_op.dag() * c_op + + cpdef Data drift(self, t, Data state): + cdef int i + cdef QobjEvo c_op + cdef Data temp, out + + out = self.L.matmul_data(t, state) + for i in range(self.num_collapse): + c_op = self.cpcd_ops[i] + e = c_op.expect_data(t, state) + c_op = self.c_ops[i] + temp = c_op.matmul_data(t, state) + out = _data.add(out, state, -0.125 * e * e) + out = _data.add(out, temp, 0.5 * e) + return out + + cpdef list diffusion(self, t, Data state): + cdef int i + cdef QobjEvo c_op + out = [] + for i in range(self.num_collapse): + c_op = self.c_ops[i] + _out = c_op.matmul_data(t, state) + c_op = self.cpcd_ops[i] + expect = c_op.expect_data(t, state) + out.append(_data.add(_out, state, -0.5 * expect)) + return out + + def __reduce__(self): + return ( + StochasticClosedSystem.restore, + (self.L, self.c_ops, self.cpcd_ops) + ) + + @classmethod + def restore(cls, L, c_ops, cpcd_ops): + cdef StochasticClosedSystem out = cls.__new__(cls) + out.L = L + out.c_ops = c_ops + out.cpcd_ops = cpcd_ops + out.num_collapse = len(c_ops) + return out + + +cdef class StochasticOpenSystem(_StochasticSystem): + """ + RHS for open quantum stochastic system (smesolve) + + drift = liouvillian(H, sc_ops + c_ops)(rho) + + diffusion = c_i @ rho + rho @ c_i/dag - tr(c_i @ rho) rho + """ + cdef int state_size, N_root + cdef double dt + cdef int _is_set + cdef bint _a_set, _b_set, _Lb_set, _L0b_set, _La_set, _LLb_set, _L0a_set + + cdef Dense _a, temp, _L0a + cdef complex[::1] expect_Cv + cdef complex[:, ::1] expect_Cb, _b, _La, _L0b + cdef complex[:, :, ::1] _Lb + cdef complex[:, :, :, ::1] _LLb + + def __init__(self, H, sc_ops, c_ops=(), derr_dt=1e-6): + if H.issuper: + self.L = H + liouvillian(None, sc_ops) + else: + self.L = liouvillian(H, sc_ops) + if c_ops: + self.L = self.L + liouvillian(None, c_ops) + + self.c_ops = [spre(op) + spost(op.dag()) for op in sc_ops] + self.num_collapse = len(self.c_ops) + self.state_size = self.L.shape[1] + self._is_set = 0 + self.N_root = int(self.state_size**0.5) + self.dt = derr_dt + + cpdef Data drift(self, t, Data state): + return self.L.matmul_data(t, state) + + cpdef list diffusion(self, t, Data state): + cdef int i + cdef QobjEvo c_op + cdef complex expect + cdef out = [] + for i in range(self.num_collapse): + c_op = self.c_ops[i] + vec = c_op.matmul_data(t, state) + expect = _data.trace_oper_ket(vec) + out.append(_data.add(vec, state, -expect)) + return out + + cpdef void set_state(self, double t, Dense state) except *: + cdef n, l + self.t = t + if not state.fortran: + state = state.reorder(fortran=1) + self.state = state + self._a_set = False + self._b_set = False + self._Lb_set = False + self._L0b_set = False + self._La_set = False + self._LLb_set = False + self._L0a_set = False + + if not self._is_set: + n = self.num_collapse + l = self.state_size + self._is_set = 1 + self._a = dense.zeros(self.state_size, 1) + self.temp = dense.zeros(self.state_size, 1) + self._L0a = dense.zeros(self.state_size, 1) + self.expect_Cv = np.zeros(n, dtype=complex) + self.expect_Cb = np.zeros((n, n), dtype=complex) + self._b = np.zeros((n, l), dtype=complex) + self._L0b = np.zeros((n, l), dtype=complex) + self._Lb = np.zeros((n, n, l), dtype=complex) + self._LLb = np.zeros((n, n, n, l), dtype=complex) + self._La = np.zeros((n, l), dtype=complex) + + cpdef Data a(self): + if not self._is_set: + raise RuntimeError( + "Derrivatives set for ito taylor expansion need " + "to receive the state with `set_state`." + ) + if not self._a_set: + self._compute_a() + return self._a + + cdef void _compute_a(StochasticOpenSystem self) except *: + if not self._is_set: + raise RuntimeError( + "Derrivatives set for ito taylor expansion need " + "to receive the state with `set_state`." + ) + imul_dense(self._a, 0) + self.L.matmul_data(self.t, self.state, self._a) + self._a_set = True + + cpdef Data bi(self, int i): + if not self._is_set: + raise RuntimeError( + "Derrivatives set for ito taylor expansion need " + "to receive the state with `set_state`." + ) + if not self._b_set: + self._compute_b() + return _dense_wrap(self._b[i, :]) + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef void _compute_b(self) except *: + if not self._is_set: + raise RuntimeError( + "Derrivatives set for ito taylor expansion need " + "to receive the state with `set_state`." + ) + cdef int i + cdef QobjEvo c_op + cdef Dense b_vec, state=self.state + for i in range(self.num_collapse): + c_op = self.c_ops[i] + b_vec = _dense_wrap(self._b[i, :]) + imul_dense(b_vec, 0) + c_op.matmul_data(self.t, state, b_vec) + self.expect_Cv[i] = trace_oper_ket_dense(b_vec) + iadd_dense(b_vec, state, -self.expect_Cv[i]) + self._b_set = True + + cpdef Data Libj(self, int i, int j): + if not self._is_set: + raise RuntimeError( + "Derrivatives set for ito taylor expansion need " + "to receive the state with `set_state`." + ) + if not self._Lb_set: + self._compute_Lb() + # We only support commutative diffusion + if i > j: + j, i = i, j + return _dense_wrap(self._Lb[i, j, :]) + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef void _compute_Lb(self) except *: + cdef int i, j + cdef QobjEvo c_op + cdef Dense b_vec, Lb_vec, state=self.state + cdef complex expect + if not self._b_set: + self._compute_b() + + for i in range(self.num_collapse): + c_op = self.c_ops[i] + for j in range(i, self.num_collapse): + b_vec = _dense_wrap(self._b[j, :]) + Lb_vec = _dense_wrap(self._Lb[i, j, :]) + imul_dense(Lb_vec, 0) + c_op.matmul_data(self.t, b_vec, Lb_vec) + self.expect_Cb[i,j] = trace_oper_ket_dense(Lb_vec) + iadd_dense(Lb_vec, b_vec, -self.expect_Cv[i]) + iadd_dense(Lb_vec, state, -self.expect_Cb[i,j]) + self._Lb_set = True + + cpdef Data Lia(self, int i): + if not self._La_set: + self._compute_La() + return _dense_wrap(self._La[i, :]) + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef void _compute_La(self) except *: + cdef int i + cdef QobjEvo c_op + cdef Dense b_vec, La_vec + if not self._b_set: + self._compute_b() + + for i in range(self.num_collapse): + b_vec = _dense_wrap(self._b[i, :]) + La_vec = _dense_wrap(self._La[i, :]) + imul_dense(La_vec, 0.) + self.L.matmul_data(self.t, b_vec, La_vec) + self._La_set = True + + cpdef Data L0bi(self, int i): + # L0bi = abi' + dbi/dt + Sum_j bjbjbi"/2 + if not self._L0b_set: + self._compute_L0b() + return _dense_wrap(self._L0b[i, :]) + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef void _compute_L0b(self) except *: + cdef int i, j + cdef QobjEvo c_op + cdef Dense b_vec, L0b_vec + if not self._Lb_set: + self._compute_Lb() + if not self._a_set: + self._compute_a() + + for i in range(self.num_collapse): + c_op = self.c_ops[i] + L0b_vec = _dense_wrap(self._L0b[i, :]) + b_vec = _dense_wrap(self._b[i, :]) + imul_dense(L0b_vec, 0.) + + # db/dt + if not c_op.isconstant: + c_op.matmul_data(self.t + self.dt, self.state, L0b_vec) + expect = trace_oper_ket_dense(L0b_vec) + iadd_dense(L0b_vec, self.state, -expect) + iadd_dense(L0b_vec, b_vec, -1) + imul_dense(L0b_vec, 1/self.dt) + + # ab' + imul_dense(self.temp, 0) + c_op.matmul_data(self.t, self._a, self.temp) + expect = trace_oper_ket_dense(self.temp) + iadd_dense(L0b_vec, self.temp, 1) + iadd_dense(L0b_vec, self._a, -self.expect_Cv[i]) + iadd_dense(L0b_vec, self.state, -expect) + + # bbb" : expect_Cb[i,j] only defined for j>=i + for j in range(i): + b_vec = _dense_wrap(self._b[j, :]) + iadd_dense(L0b_vec, b_vec, -self.expect_Cb[j,i]) + for j in range(i, self.num_collapse): + b_vec = _dense_wrap(self._b[j, :]) + iadd_dense(L0b_vec, b_vec, -self.expect_Cb[i,j]) + self._L0b_set = True + + cpdef Data LiLjbk(self, int i, int j, int k): + # LiLjbk = bi(bj'bk'+bjbk"), i<=j<=k + if not self._LLb_set: + self._compute_LLb() + # Only commutative noise supported + # Definied for i <= j <= k + # Simple bubble sort to order the terms + if i>j: i, j = j, i + if j>k: + j, k = k, j + if i>j: i, j = j, i + + return _dense_wrap(self._LLb[i, j, k, :]) + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef void _compute_LLb(self) except *: + # LiLjbk = bi(bj'bk'+bjbk"), i<=j<=k + # sc_ops must commute (LiLjbk = LjLibk = LkLjbi) + cdef int i, j, k + cdef QobjEvo c_op + cdef Dense bj_vec, bk_vec, LLb_vec, Lb_vec + if not self._Lb_set: + self._compute_Lb() + + for i in range(self.num_collapse): + for j in range(i, self.num_collapse): + for k in range(j, self.num_collapse): + c_op = self.c_ops[i] + LLb_vec = _dense_wrap(self._LLb[i, j, k, :]) + Lb_vec = _dense_wrap(self._Lb[j, k, :]) + bj_vec = _dense_wrap(self._b[j, :]) + bk_vec = _dense_wrap(self._b[k, :]) + imul_dense(LLb_vec, 0.) + + c_op.matmul_data(self.t, Lb_vec, LLb_vec) + expect = trace_oper_ket_dense(LLb_vec) + + iadd_dense(LLb_vec, Lb_vec, -self.expect_Cv[i]) + iadd_dense(LLb_vec, self.state, -expect) + iadd_dense(LLb_vec, bj_vec, -self.expect_Cb[i,k]) + iadd_dense(LLb_vec, bk_vec, -self.expect_Cb[i,j]) + + self._LLb_set = True + + cpdef Data L0a(self): + # L0a = a'a + da/dt + bba"/2 (a" = 0) + if not self._L0a_set: + self._compute_L0a() + return self._L0a + + cdef void _compute_L0a(self) except *: + # L0a = a'a + da/dt + bba"/2 (a" = 0) + imul_dense(self._L0a, 0.) + if not self.L.isconstant: + self.L.matmul_data(self.t + self.dt, self.state, self._L0a) + iadd_dense(self._L0a, self._a, -1) + imul_dense(self._L0a, 1/self.dt) + self.L.matmul_data(self.t, self._a, self._L0a) + self._L0a_set = True + + def __reduce__(self): + return ( + StochasticOpenSystem.restore, + (self.L, self.c_ops, self.dt) + ) + + @classmethod + def restore(cls, L, c_ops, derr_dt): + cdef StochasticOpenSystem out = cls.__new__(cls) + out.L = L + out.c_ops = c_ops + out.num_collapse = len(c_ops) + out.state_size = out.L.shape[1] + out._is_set = 0 + out.N_root = int(out.state_size**0.5) + out.dt = derr_dt + return out + + +cdef class SimpleStochasticSystem(_StochasticSystem): + """ + Simple system that can be solver analytically. + Used in tests. + + drift = -iH @ vec + + diffusion = c_i @ vec + + """ + cdef double dt + + def __init__(self, H, c_ops): + self.L = -1j * H + self.c_ops = c_ops + + self.num_collapse = len(self.c_ops) + self.dt = 1e-6 + + cpdef Data drift(self, t, Data state): + return self.L.matmul_data(t, state) + + cpdef list diffusion(self, t, Data state): + cdef int i + cdef out = [] + for i in range(self.num_collapse): + out.append(self.c_ops[i].matmul_data(t, state)) + return out + + cpdef void set_state(self, double t, Dense state) except *: + self.t = t + self.state = state + + cpdef Data a(self): + return self.L.matmul_data(self.t, self.state) + + cpdef Data bi(self, int i): + return self.c_ops[i].matmul_data(self.t, self.state) + + cpdef Data Libj(self, int i, int j): + bj = self.c_ops[i].matmul_data(self.t, self.state) + return self.c_ops[j].matmul_data(self.t, bj) + + cpdef Data Lia(self, int i): + bi = self.c_ops[i].matmul_data(self.t, self.state) + return self.L.matmul_data(self.t, bi) + + cpdef Data L0bi(self, int i): + # L0bi = abi' + dbi/dt + Sum_j bjbjbi"/2 + a = self.L.matmul_data(self.t, self.state) + abi = self.c_ops[i].matmul_data(self.t, a) + b = self.c_ops[i].matmul_data(self.t, self.state) + bdt = self.c_ops[i].matmul_data(self.t + self.dt, self.state) + return abi + (bdt - b) / self.dt + + cpdef Data LiLjbk(self, int i, int j, int k): + bk = self.c_ops[k].matmul_data(self.t, self.state) + Ljbk = self.c_ops[j].matmul_data(self.t, bk) + return self.c_ops[i].matmul_data(self.t, Ljbk) + + cpdef Data L0a(self): + # L0a = a'a + da/dt + bba"/2 (a" = 0) + a = self.L.matmul_data(self.t, self.state) + aa = self.L.matmul_data(self.t, a) + adt = self.L.matmul_data(self.t + self.dt, self.state) + return aa + (adt - a) / self.dt + + def analytic(self, t, W): + """ + Analytic solution, H and all c_ops must commute. + Support time dependance of order 2 (a + b*t + c*t**2) + """ + def _intergal(f, T): + return (f(0) + 4 * f(T/2) + f(T)) / 6 + + out = _intergal(self.L, t) * t + for i in range(self.num_collapse): + out += _intergal(self.c_ops[i], t) * W[i] + out -= 0.5 * _intergal( + lambda t: self.c_ops[i](t) @ self.c_ops[i](t), t + ) * t + return out.expm().data diff --git a/qutip/solver/solver_base.py b/qutip/solver/solver_base.py index 953bfab490..028eb448f3 100644 --- a/qutip/solver/solver_base.py +++ b/qutip/solver/solver_base.py @@ -5,7 +5,7 @@ from ..core import stack_columns, unstack_columns from .result import Result from .integrator import Integrator -from ..ui.progressbar import progess_bars +from ..ui.progressbar import progress_bars from time import time @@ -110,8 +110,8 @@ def run(self, state0, tlist, *, args=None, e_ops=None): For a ``state0`` at time ``tlist[0]`` do the evolution as directed by ``rhs`` and for each time in ``tlist`` store the state and/or - expectation values in a :cls:`Result`. The evolution method and stored - results are determined by ``options``. + expectation values in a :class:`Result`. The evolution method and + stored results are determined by ``options``. Parameters ---------- @@ -150,8 +150,9 @@ def run(self, state0, tlist, *, args=None, e_ops=None): results.add(tlist[0], self._restore_state(_data0, copy=False)) stats['preparation time'] += time() - _time_start - progress_bar = progess_bars[self.options['progress_bar']]() - progress_bar.start(len(tlist)-1, **self.options['progress_kwargs']) + progress_bar = progress_bars[self.options['progress_bar']]( + len(tlist)-1, **self.options['progress_kwargs'] + ) for t, state in self._integrator.run(tlist): progress_bar.update() results.add(t, self._restore_state(state, copy=False)) diff --git a/qutip/solver/steadystate.py b/qutip/solver/steadystate.py index f576c9f2bb..41fe9e4b63 100644 --- a/qutip/solver/steadystate.py +++ b/qutip/solver/steadystate.py @@ -1,4 +1,4 @@ -from qutip import liouvillian, lindblad_dissipator, Qobj, qeye, qzero +from qutip import liouvillian, lindblad_dissipator, Qobj, qzero_like, qeye_like from qutip import vector_to_operator, operator_to_vector from qutip import settings import qutip.core.data as _data @@ -215,16 +215,16 @@ def _steadystate_direct(A, weight, **kw): if isinstance(L, _data.CSR): L, b = _permute_wbm(L, b) else: - warn("Only sparse matrice can be permuted.", RuntimeWarning) + warn("Only CSR matrice can be permuted.", RuntimeWarning) use_rcm = False if kw.pop("use_rcm", False): if isinstance(L, _data.CSR): L, b, perm = _permute_rcm(L, b) use_rcm = True else: - warn("Only sparse matrice can be permuted.", RuntimeWarning) + warn("Only CSR matrice can be permuted.", RuntimeWarning) if kw.pop("use_precond", False): - if isinstance(L, _data.CSR): + if isinstance(L, (_data.CSR, _data.Dia)): kw["M"] = _compute_precond(L, kw) else: warn("Only sparse solver use preconditioners.", RuntimeWarning) @@ -271,16 +271,16 @@ def _steadystate_power(A, **kw): if isinstance(L, _data.CSR): L, y = _permute_wbm(L, y) else: - warn("Only sparse matrice can be permuted.", RuntimeWarning) + warn("Only CSR matrice can be permuted.", RuntimeWarning) use_rcm = False if kw.pop("use_rcm", False): if isinstance(L, _data.CSR): L, y, perm = _permute_rcm(L, y) use_rcm = True else: - warn("Only sparse matrice can be permuted.", RuntimeWarning) + warn("Only CSR matrice can be permuted.", RuntimeWarning) if kw.pop("use_precond", False): - if isinstance(L, _data.CSR): + if isinstance(L, (_data.CSR, _data.Dia)): kw["M"] = _compute_precond(L, kw) else: warn("Only sparse solver use preconditioners.", RuntimeWarning) @@ -377,8 +377,8 @@ def steadystate_floquet(H_0, c_ops, Op_t, w_d=1.0, n_it=3, sparse=False, # L_p and L_m correspond to the positive and negative # frequency terms respectively. # They are independent in the model, so we keep both names. - Id = qeye(L_0.dims[0], dtype=type(L_0.data)) - S = T = qzero(L_0.dims[0], dtype=type(L_0.data)) + Id = qeye_like(L_0) + S = T = qzero_like(L_0) if isinstance(H_0.data, _data.CSR) and not sparse: L_0 = L_0.to("Dense") @@ -469,7 +469,7 @@ def pseudo_inverse(L, rhoss=None, w=None, method='splu', *, use_rcm=False, method = "splu" if sparse else "pinv" sparse_solvers = ["splu", "mkl_spsolve", "spilu"] dense_solvers = ["solve", "lstsq", "pinv"] - if isinstance(L.data, _data.CSR) and method in dense_solvers: + if isinstance(L.data, (_data.CSR, _data.Dia)) and method in dense_solvers: L = L.to("dense") elif isinstance(L.data, _data.Dense) and method in sparse_solvers: L = L.to("csr") @@ -478,17 +478,17 @@ def pseudo_inverse(L, rhoss=None, w=None, method='splu', *, use_rcm=False, dtype = type(L.data) rhoss_vec = operator_to_vector(rhoss) - tr_op = qeye(L.dims[0][0]) + tr_op = qeye_like(rhoss) tr_op_vec = operator_to_vector(tr_op) P = _data.kron(rhoss_vec.data, tr_op_vec.data.transpose(), dtype=dtype) - I = _data.csr.identity(N * N) + I = _data.identity_like(P) Q = _data.sub(I, P) if w in [None, 0.0]: L += 1e-15j else: - L += 1.0j*w + L += 1.0j * w use_rcm = use_rcm and isinstance(L.data, _data.CSR) @@ -504,8 +504,9 @@ def pseudo_inverse(L, rhoss=None, w=None, method='splu', *, use_rcm=False, LI = _data.Dense(scipy.linalg.pinv(A.to_array()), copy=False) LIQ = _data.matmul(LI, Q) elif method == "spilu": - if not isinstance(A, _data.CSR): - raise TypeError("'spilu' method can only be used with sparse data") + if not isinstance(A, (_data.CSR, _data.Dia)): + warn("'spilu' method can only be used with sparse data.") + A = _data.to(_data.CSR, A) ILU = scipy.sparse.linalg.spilu(A.as_scipy().tocsc(), **kwargs) LIQ = _data.Dense(ILU.solve(Q.to_array())) else: diff --git a/qutip/solver/stochastic.py b/qutip/solver/stochastic.py new file mode 100644 index 0000000000..d5d0504a85 --- /dev/null +++ b/qutip/solver/stochastic.py @@ -0,0 +1,756 @@ +__all__ = ["smesolve", "SMESolver", "ssesolve", "SSESolver"] + +from .sode.ssystem import * +from .result import MultiTrajResult, Result, ExpectOp +from .multitraj import MultiTrajSolver +from .. import Qobj, QobjEvo, liouvillian, lindblad_dissipator +import numpy as np +from collections.abc import Iterable +from functools import partial + + +class StochasticTrajResult(Result): + def _post_init(self, m_ops=(), dw_factor=(), heterodyne=False): + super()._post_init() + self.W = [] + self.m_ops = [] + self.m_expect = [] + self.dW_factor = dw_factor + self.heterodyne = heterodyne + for op in m_ops: + f = self._e_op_func(op) + self.W.append([0.0]) + self.m_expect.append([]) + self.m_ops.append(ExpectOp(op, f, self.m_expect[-1].append)) + self.add_processor(self.m_ops[-1]._store) + + def add(self, t, state, noise): + super().add(t, state) + if noise is not None and self.options["store_measurement"]: + for i, dW in enumerate(noise): + self.W[i].append(self.W[i][-1] + dW) + + @property + def wiener_process(self): + """ + Wiener processes for each stochastic collapse operators. + + The output shape is + (len(sc_ops), len(tlist)) + for homodyne detection, and + (len(sc_ops), 2, len(tlist)) + for heterodyne detection. + """ + W = np.array(self.W) + if self.heterodyne: + W = W.reshape(-1, 2, W.shape[1]) + return W + + @property + def dW(self): + """ + Wiener increment for each stochastic collapse operators. + + The output shape is + (len(sc_ops), len(tlist)-1) + for homodyne detection, and + (len(sc_ops), 2, len(tlist)-1) + for heterodyne detection. + """ + dw = np.diff(self.W, axis=1) + if self.heterodyne: + dw = dw.reshape(-1, 2, dw.shape[1]) + return dw + + @property + def measurement(self): + """ + Measurements for each stochastic collapse operators. + + The output shape is + (len(sc_ops), len(tlist)-1) + for homodyne detection, and + (len(sc_ops), 2, len(tlist)-1) + for heterodyne detection. + """ + dts = np.diff(self.times) + m_expect = np.array(self.m_expect)[:, 1:] + noise = np.einsum( + "i,ij,j->ij", self.dW_factor, np.diff(self.W, axis=1), (1 / dts) + ) + if self.heterodyne: + m_expect = m_expect.reshape(-1, 2, m_expect.shape[1]) + noise = noise.reshape(-1, 2, noise.shape[1]) + return m_expect + noise + + +class StochasticResult(MultiTrajResult): + def _post_init(self): + super()._post_init() + + store_measurement = self.options["store_measurement"] + keep_runs = self.options["keep_runs_results"] + + if not keep_runs and store_measurement: + self.add_processor( + partial(self._reduce_attr, attr="wiener_process") + ) + self._wiener_process = [] + self.add_processor(partial(self._reduce_attr, attr="dW")) + self._dW = [] + + if not keep_runs and store_measurement: + self.add_processor(partial(self._reduce_attr, attr="measurement")) + self._measurement = [] + + def _reduce_attr(self, trajectory, attr): + """ + Add a result attribute to a list when the trajectories are not stored. + """ + getattr(self, "_" + attr).append(getattr(trajectory, attr)) + + def _trajectories_attr(self, attr): + """ + Get the result associated to the attr, whether the trajectories are + saved or not. + """ + if hasattr(self, "_" + attr): + return getattr(self, "_" + attr) + elif self.options["keep_runs_results"]: + return np.array([ + getattr(traj, attr) for traj in self.trajectories + ]) + return None + + @property + def measurement(self): + """ + Measurements for each trajectories and stochastic collapse operators. + + The output shape is + (ntraj, len(sc_ops), len(tlist)-1) + for homodyne detection, and + (ntraj, len(sc_ops), 2, len(tlist)-1) + for heterodyne detection. + """ + return self._trajectories_attr("measurement") + + @property + def dW(self): + """ + Wiener increment for each trajectories and stochastic collapse + operators. + + The output shape is + (ntraj, len(sc_ops), len(tlist)-1) + for homodyne detection, and + (ntraj, len(sc_ops), 2, len(tlist)-1) + for heterodyne detection. + """ + return self._trajectories_attr("dW") + + @property + def wiener_process(self): + """ + Wiener processes for each trajectories and stochastic collapse + operators. + + The output shape is + (ntraj, len(sc_ops), len(tlist)-1) + for homodyne detection, and + (ntraj, len(sc_ops), 2, len(tlist)-1) + for heterodyne detection. + """ + return self._trajectories_attr("wiener_process") + + +class _StochasticRHS: + """ + In between object to store the stochastic system. + + It store the Hamiltonian (not Liouvillian when possible), and sc_ops. + dims and flags are provided to be usable the the base ``Solver`` class. + + We don't want to use the cython rhs (``StochasticOpenSystem``, etc.) since + the rouchon integrator need the part but does not use the usual drift and + diffusion computation. + """ + + def __init__(self, issuper, H, sc_ops, c_ops, heterodyne): + + if not isinstance(H, (Qobj, QobjEvo)) or not H.isoper: + raise TypeError("The Hamiltonian must be am operator") + self.H = QobjEvo(H) + + if isinstance(sc_ops, (Qobj, QobjEvo)): + sc_ops = [sc_ops] + self.sc_ops = [QobjEvo(c_op) for c_op in sc_ops] + + if isinstance(c_ops, (Qobj, QobjEvo)): + c_ops = [c_ops] + self.c_ops = [QobjEvo(c_op) for c_op in c_ops] + + if any(not c_op.isoper for c_op in c_ops): + raise TypeError("c_ops must be operators") + + if any(not c_op.isoper for c_op in sc_ops): + raise TypeError("sc_ops must be operators") + + self.issuper = issuper + self.heterodyne = heterodyne + + if heterodyne: + sc_ops = [] + for c_op in self.sc_ops: + sc_ops.append(c_op / np.sqrt(2)) + sc_ops.append(c_op * (-1j / np.sqrt(2))) + self.sc_ops = sc_ops + + if self.issuper and not self.H.issuper: + self.dims = [self.H.dims, self.H.dims] + else: + self.dims = self.H.dims + + def __call__(self, options): + if self.issuper: + return StochasticOpenSystem( + self.H, self.sc_ops, self.c_ops, options.get("derr_dt", 1e-6) + ) + else: + return StochasticClosedSystem(self.H, self.sc_ops) + + +def smesolve( + H, rho0, tlist, c_ops=(), sc_ops=(), heterodyne=False, *, + e_ops=(), args={}, ntraj=500, options=None, + seeds=None, target_tol=None, timeout=None, +): + """ + Solve stochastic master equation. + + Parameters + ---------- + H : :class:`Qobj`, :class:`QobjEvo`, :class:`QobjEvo` compatible format. + System Hamiltonian as a Qobj or QobjEvo for time-dependent + Hamiltonians. List of [:class:`Qobj`, :class:`Coefficient`] or callable + that can be made into :class:`QobjEvo` are also accepted. + + rho0 : :class:`qutip.Qobj` + Initial density matrix or state vector (ket). + + tlist : *list* / *array* + List of times for :math:`t`. + + c_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format) + Deterministic collapse operator which will contribute with a standard + Lindblad type of dissipation. + + sc_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format) + List of stochastic collapse operators. + + e_ops : : :class:`qutip.qobj`, callable, or list. + Single operator or list of operators for which to evaluate + expectation values or callable or list of callable. + Callable signature must be, `f(t: float, state: Qobj)`. + See :func:`expect` for more detail of operator expectation. + + args : None / *dictionary* + Dictionary of parameters for time-dependent Hamiltonians and + collapse operators. + + ntraj : int [500] + Number of trajectories to compute. + + heterodyne : bool [False] + Whether to use heterodyne or homodyne detection. + + seeds : int, SeedSequence, list, [optional] + Seed for the random number generator. It can be a single seed used to + spawn seeds for each trajectory or a list of seeds, one for each + trajectory. Seeds are saved in the result and they can be reused with:: + + seeds=prev_result.seeds + + When using a parallel map, the trajectories can be re-ordered. + + target_tol : {float, tuple, list}, optional + Target tolerance of the evolution. The evolution will compute + trajectories until the error on the expectation values is lower than + this tolerance. The maximum number of trajectories employed is + given by ``ntraj``. The error is computed using jackknife resampling. + ``target_tol`` can be an absolute tolerance or a pair of absolute and + relative tolerance, in that order. Lastly, it can be a list of pairs of + ``(atol, rtol)`` for each e_ops. + + timeout : float [optional] + Maximum time for the evolution in second. When reached, no more + trajectories will be computed. Overwrite the option of the same name. + + options : None / dict + Dictionary of options for the solver. + + - store_final_state : bool, [False] + Whether or not to store the final state of the evolution in the + result class. + - store_states : bool, None, [None] + Whether or not to store the state vectors or density matrices. + On `None` the states will be saved if no expectation operators are + given. + - store_measurement: bool, [False] + Whether to store the measurement and wiener process for each + trajectories. + - keep_runs_results : bool, [False] + Whether to store results from all trajectories or just store the + averages. + - normalize_output : bool, [False] + Normalize output state to hide ODE numerical errors. + - progress_bar : str {'text', 'enhanced', 'tqdm', ''}, ["text"] + How to present the solver progress. + 'tqdm' uses the python module of the same name and raise an error + if not installed. Empty string or False will disable the bar. + - progress_kwargs : dict, [{"chunk_size": 10}] + kwargs to pass to the progress_bar. Qutip's bars use `chunk_size`. + - method : str, ["rouchon"] + Which stochastic differential equation integration method to use. + Main ones are {"euler", "rouchon", "platen", "taylor1.5_imp"} + - map : str {"serial", "parallel", "loky"}, ["serial"] + How to run the trajectories. "parallel" uses concurent module to run + in parallel while "loky" use the module of the same name to do so. + - job_timeout : NoneType, int, [None] + Maximum time to compute one trajectory. + - num_cpus : NoneType, int, [None] + Number of cpus to use when running in parallel. ``None`` detect the + number of available cpus. + - dt : float [0.001 ~ 0.0001] + The finite steps lenght for the Stochastic integration method. + Default change depending on the integrator. + + Other options could be supported depending on the integration method, + see `SIntegrator <./classes.html#classes-sode>`_. + + Returns + ------- + + output: :class:`qutip.solver.Result` + + An instance of the class :class:`qutip.solver.Result`. + """ + H = QobjEvo(H, args=args, tlist=tlist) + c_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in c_ops] + sc_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in sc_ops] + sol = SMESolver( + H, sc_ops, c_ops=c_ops, options=options, heterodyne=heterodyne + ) + return sol.run( + rho0, tlist, ntraj, e_ops=e_ops, + seed=seeds, target_tol=target_tol, timeout=timeout, + ) + + +def ssesolve( + H, psi0, tlist, sc_ops=(), heterodyne=False, *, + e_ops=(), args={}, ntraj=500, options=None, + seeds=None, target_tol=None, timeout=None, +): + """ + Solve stochastic Schrodinger equation. + + Parameters + ---------- + H : :class:`Qobj`, :class:`QobjEvo`, :class:`QobjEvo` compatible format. + System Hamiltonian as a Qobj or QobjEvo for time-dependent + Hamiltonians. List of [:class:`Qobj`, :class:`Coefficient`] or callable + that can be made into :class:`QobjEvo` are also accepted. + + psi0 : :class:`qutip.Qobj` + Initial state vector (ket). + + tlist : *list* / *array* + List of times for :math:`t`. + + sc_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format) + List of stochastic collapse operators. + + e_ops : :class:`qutip.qobj`, callable, or list. + Single operator or list of operators for which to evaluate + expectation values or callable or list of callable. + Callable signature must be, `f(t: float, state: Qobj)`. + See :func:`expect` for more detail of operator expectation. + + args : None / *dictionary* + Dictionary of parameters for time-dependent Hamiltonians and + collapse operators. + + ntraj : int [500] + Number of trajectories to compute. + + heterodyne : bool [False] + Whether to use heterodyne or homodyne detection. + + seeds : int, SeedSequence, list, [optional] + Seed for the random number generator. It can be a single seed used to + spawn seeds for each trajectory or a list of seeds, one for each + trajectory. Seeds are saved in the result and they can be reused with:: + + seeds=prev_result.seeds + + target_tol : {float, tuple, list}, optional + Target tolerance of the evolution. The evolution will compute + trajectories until the error on the expectation values is lower than + this tolerance. The maximum number of trajectories employed is + given by ``ntraj``. The error is computed using jackknife resampling. + ``target_tol`` can be an absolute tolerance or a pair of absolute and + relative tolerance, in that order. Lastly, it can be a list of pairs of + (atol, rtol) for each e_ops. + + timeout : float [optional] + Maximum time for the evolution in second. When reached, no more + trajectories will be computed. Overwrite the option of the same name. + + options : None / dict + Dictionary of options for the solver. + + - store_final_state : bool, [False] + Whether or not to store the final state of the evolution in the + result class. + - store_states : bool, None, [None] + Whether or not to store the state vectors or density matrices. + On `None` the states will be saved if no expectation operators are + given. + - store_measurement: bool, [False] + Whether to store the measurement and wiener process, or brownian + noise for each trajectories. + - keep_runs_results : bool, [False] + Whether to store results from all trajectories or just store the + averages. + - normalize_output : bool, [False] + Normalize output state to hide ODE numerical errors. + - progress_bar : str {'text', 'enhanced', 'tqdm', ''}, ["text"] + How to present the solver progress. + 'tqdm' uses the python module of the same name and raise an error + if not installed. Empty string or False will disable the bar. + - progress_kwargs : dict, [{"chunk_size": 10}] + kwargs to pass to the progress_bar. Qutip's bars use `chunk_size`. + - method : str, ["rouchon"] + Which stochastic differential equation integration method to use. + Main ones are {"euler", "rouchon", "platen", "taylor1.5_imp"} + - map : str {"serial", "parallel", "loky"}, ["serial"] + How to run the trajectories. "parallel" uses concurent module to run + in parallel while "loky" use the module of the same name to do so. + - job_timeout : NoneType, int, [None] + Maximum time to compute one trajectory. + - num_cpus : NoneType, int, [None] + Number of cpus to use when running in parallel. ``None`` detect the + number of available cpus. + - dt : float [0.001 ~ 0.0001] + The finite steps lenght for the Stochastic integration method. + Default change depending on the integrator. + + Other options could be supported depending on the integration method, + see `SIntegrator <./classes.html#classes-sode>`_. + + Returns + ------- + + output: :class:`qutip.solver.Result` + An instance of the class :class:`qutip.solver.Result`. + """ + H = QobjEvo(H, args=args, tlist=tlist) + sc_ops = [QobjEvo(c_op, args=args, tlist=tlist) for c_op in sc_ops] + sol = SSESolver(H, sc_ops, options=options, heterodyne=heterodyne) + return sol.run( + psi0, tlist, ntraj, e_ops=e_ops, + seed=seeds, target_tol=target_tol, timeout=timeout, + ) + + +class StochasticSolver(MultiTrajSolver): + """ + Generic stochastic solver. + """ + + name = "StochasticSolver" + resultclass = StochasticResult + _avail_integrators = {} + system = None + solver_options = { + "progress_bar": "text", + "progress_kwargs": {"chunk_size": 10}, + "store_final_state": False, + "store_states": None, + "store_measurement": False, + "keep_runs_results": False, + "normalize_output": False, + "method": "taylor1.5", + "map": "serial", + "job_timeout": None, + "num_cpus": None, + "bitgenerator": None, + } + + def __init__(self, H, sc_ops, heterodyne, *, c_ops=(), options=None): + self.options = options + self._heterodyne = heterodyne + if self.name == "ssesolve" and c_ops: + raise ValueError("c_ops are not supported by ssesolve.") + + rhs = _StochasticRHS(self._open, H, sc_ops, c_ops, heterodyne) + super().__init__(rhs, options=options) + + if heterodyne: + self._m_ops = [] + for op in sc_ops: + self._m_ops += [op + op.dag(), -1j * (op - op.dag())] + self._dW_factors = np.ones(len(sc_ops) * 2) * 2**0.5 + else: + self._m_ops = [op + op.dag() for op in sc_ops] + self._dW_factors = np.ones(len(sc_ops)) + + @property + def heterodyne(self): + return self._heterodyne + + @property + def m_ops(self): + return self._m_ops + + @m_ops.setter + def m_ops(self, new_m_ops): + """ + Measurements operators. + + Default are: + + m_ops = sc_ops + sc_ops.dag() + + for homodyne detection, and + + m_ops = sc_ops + sc_ops.dag(), -1j*(sc_ops - sc_ops.dag()) + + for heterodyne detection. + + Measurements opput is computed as: + + expect(m_ops_i, state(t)) + dW_i / dt * dW_factors + + Where ``dW`` follows a gaussian distribution with norm 0 and derivation + of ``dt**0.5``. ``dt`` is the time difference between step in the + ``tlist``. + + ``m_ops`` can be overwritten, but the number of operators must be + constant. + """ + if len(new_m_ops) != len(self.m_ops): + if self.heterodyne: + raise ValueError( + f"2 `m_ops` per `sc_ops`, {len(self.rhs.sc_ops)} operators" + " are expected for heterodyne measurement." + ) + else: + raise ValueError( + f"{len(self.rhs.sc_ops)} measurements " + "operators are expected." + ) + if not all( + isinstance(op, Qobj) and op.dims == self.rhs.sc_ops[0].dims + for op in new_m_ops + ): + raise ValueError( + "m_ops must be Qobj with the same dimensions" + " as the Hamiltonian" + ) + self._m_ops = new_m_ops + + @property + def dW_factors(self): + return self._dW_factors + + @dW_factors.setter + def dW_factors(self, new_dW_factors): + """ + Scaling of the noise on the measurements. + Default are ``1`` for homodyne and ``sqrt(1/2)`` for heterodyne. + ``dW_factors`` must be a list of the same length as ``m_ops``. + """ + if len(new_dW_factors) != len(self._dW_factors): + if self.heterodyne: + raise ValueError( + f"2 `dW_factors` per `sc_ops`, {len(self.rhs.sc_ops)} " + "values are expected for heterodyne measurement." + ) + else: + raise ValueError( + f"{len(self.rhs.sc_ops)} dW_factors are expected." + ) + self._dW_factors = new_dW_factors + + def _run_one_traj(self, seed, state, tlist, e_ops): + """ + Run one trajectory and return the result. + """ + result = StochasticTrajResult( + e_ops, + self.options, + m_ops=self.m_ops, + dw_factor=self.dW_factors, + heterodyne=self.heterodyne, + ) + generator = self._get_generator(seed) + self._integrator.set_state(tlist[0], state, generator) + state_t = self._restore_state(state, copy=False) + result.add(tlist[0], state_t, None) + for t in tlist[1:]: + t, state, noise = self._integrator.integrate(t, copy=False) + state_t = self._restore_state(state, copy=False) + result.add(t, state_t, noise) + return seed, result + + @classmethod + def avail_integrators(cls): + if cls is StochasticSolver: + return cls._avail_integrators.copy() + return { + **StochasticSolver.avail_integrators(), + **cls._avail_integrators, + } + + @property + def options(self): + """ + Options for stochastic solver: + + store_final_state: bool, default=False + Whether or not to store the final state of the evolution in the + result class. + + store_states: bool, default=None + Whether or not to store the state vectors or density matrices. + On `None` the states will be saved if no expectation operators are + given. + + store_measurement: bool, [False] + Whether to store the measurement for each trajectories. + Storing measurements will also store the wiener process, or + brownian noise for each trajectories. + + progress_bar: str {'text', 'enhanced', 'tqdm', ''}, default="text" + How to present the solver progress. 'tqdm' uses the python module + of the same name and raise an error if not installed. Empty string + or False will disable the bar. + + progress_kwargs: dict, default={"chunk_size":10} + Arguments to pass to the progress_bar. Qutip's bars use + ``chunk_size``. + + keep_runs_results: bool + Whether to store results from all trajectories or just store the + averages. + + method: str, default="rouchon" + Which ODE integrator methods are supported. + + map: str {"serial", "parallel", "loky"}, default="serial" + How to run the trajectories. "parallel" uses concurent module to + run in parallel while "loky" use the module of the same name to do + so. + + job_timeout: None, int, default=None + Maximum time to compute one trajectory. + + num_cpus: None, int, default=None + Number of cpus to use when running in parallel. ``None`` detect the + number of available cpus. + + bitgenerator: {None, "MT19937", "PCG64DXSM", ...}, default=None + Which of numpy.random's bitgenerator to use. With ``None``, your + numpy version's default is used. + """ + return self._options + + @options.setter + def options(self, new_options): + MultiTrajSolver.options.fset(self, new_options) + + +class SMESolver(StochasticSolver): + r""" + Stochastic Master Equation Solver. + + Parameters + ---------- + H : :class:`Qobj`, :class:`QobjEvo`, :class:`QobjEvo` compatible format. + System Hamiltonian as a Qobj or QobjEvo for time-dependent + Hamiltonians. List of [:class:`Qobj`, :class:`Coefficient`] or callable + that can be made into :class:`QobjEvo` are also accepted. + + sc_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format) + List of stochastic collapse operators. + + heterodyne : bool, [False] + Whether to use heterodyne or homodyne detection. + + options : dict, [optional] + Options for the solver, see :obj:`SMESolver.options` and + `SIntegrator <./classes.html#classes-sode>`_ for a list of all options. + """ + name = "smesolve" + _avail_integrators = {} + _open = True + solver_options = { + "progress_bar": "text", + "progress_kwargs": {"chunk_size": 10}, + "store_final_state": False, + "store_states": None, + "store_measurement": False, + "keep_runs_results": False, + "normalize_output": False, + "method": "taylor1.5", + "map": "serial", + "job_timeout": None, + "num_cpus": None, + "bitgenerator": None, + } + + +class SSESolver(StochasticSolver): + r""" + Stochastic Schrodinger Equation Solver. + + Parameters + ---------- + H : :class:`Qobj`, :class:`QobjEvo`, :class:`QobjEvo` compatible format. + System Hamiltonian as a Qobj or QobjEvo for time-dependent + Hamiltonians. List of [:class:`Qobj`, :class:`Coefficient`] or callable + that can be made into :class:`QobjEvo` are also accepted. + + c_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format) + Deterministic collapse operator which will contribute with a standard + Lindblad type of dissipation. + + sc_ops : list of (:class:`QobjEvo`, :class:`QobjEvo` compatible format) + List of stochastic collapse operators. + + heterodyne : bool, [False] + Whether to use heterodyne or homodyne detection. + + options : dict, [optional] + Options for the solver, see :obj:`SSESolver.options` and + `SIntegrator <./classes.html#classes-sode>`_ for a list of all options. + """ + name = "ssesolve" + _avail_integrators = {} + _open = False + solver_options = { + "progress_bar": "text", + "progress_kwargs": {"chunk_size": 10}, + "store_final_state": False, + "store_states": None, + "store_measurement": False, + "keep_runs_results": False, + "normalize_output": False, + "method": "platen", + "map": "serial", + "job_timeout": None, + "num_cpus": None, + "bitgenerator": None, + } diff --git a/qutip/tests/conftest.py b/qutip/tests/conftest.py index 49d4add02a..2ac625ee47 100644 --- a/qutip/tests/conftest.py +++ b/qutip/tests/conftest.py @@ -28,6 +28,7 @@ def _skip_cython_tests_if_unavailable(item): # importorskip rather than mark.skipif because this way we get pytest's # version-handling semantics. pytest.importorskip('Cython', minversion='0.14') + pytest.importorskip('filelock') @pytest.hookimpl(trylast=True) diff --git a/qutip/tests/core/data/conftest.py b/qutip/tests/core/data/conftest.py index 01d05c01e8..b4348eaa46 100644 --- a/qutip/tests/core/data/conftest.py +++ b/qutip/tests/core/data/conftest.py @@ -28,6 +28,33 @@ def shuffle_indices_scipy_csr(matrix): return out +def random_scipy_dia(shape, density, sort=False): + """ + Generate a random scipy dia matrix with the given shape, density. + """ + num_diag = int(density * (shape[0] + shape[1] - 1)) or 1 + offsets = [] + data = [] + diags = np.random.choice( + np.arange(-shape[0] + 1, shape[1]), + num_diag, + replace=False + ) + for diag in diags: + offsets.append(diag) + num_elements = min( + shape[0], shape[1], shape[0] + diag, shape[1] - diag + ) + data.append( + np.random.rand(num_elements) + 1j*np.random.rand(num_elements) + ) + if sort: + order = np.argsort(offsets) + offsets = [offsets[i] for i in order] + data = [data[i] for i in order] + return scipy.sparse.diags(data, offsets, shape=shape).todia() + + def random_scipy_csr(shape, density, sorted_): """ Generate a random scipy CSR matrix with the given shape, nnz density, and @@ -64,3 +91,8 @@ def random_csr(shape, density, sorted_): def random_dense(shape, fortran): """Generate a random qutip Dense matrix of the given shape.""" return qutip.core.data.Dense(random_numpy_dense(shape, fortran)) + + +def random_diag(shape, density, sort=False): + """Generate a random qutip Dia matrix of the given shape and density""" + return qutip.core.data.Dia(random_scipy_dia(shape, density, sort)) diff --git a/qutip/tests/core/data/test_convert.py b/qutip/tests/core/data/test_convert.py index a7be4e6de9..917003dced 100644 --- a/qutip/tests/core/data/test_convert.py +++ b/qutip/tests/core/data/test_convert.py @@ -2,6 +2,7 @@ import pytest from scipy import sparse from qutip import data +from .test_mathematics import UnaryOpMixin def test_init_empty_data(): @@ -14,11 +15,16 @@ def test_init_empty_data(): @pytest.mark.parametrize(['base', 'dtype'], [ pytest.param(data.dense.zeros(2, 2), data.Dense, id='data.Dense'), pytest.param(data.csr.zeros(2, 2), data.CSR, id='data.CSR'), + pytest.param(data.dia.zeros(2, 2), data.Dia, id='data.Dia'), pytest.param(np.zeros((10, 10), dtype=np.complex128), data.Dense, id='array'), pytest.param(sparse.eye(10, dtype=np.complex128, format='csr'), data.CSR, id='sparse'), + pytest.param(sparse.eye(10, dtype=np.complex128, format='dia'), data.Dia, + id='diag'), pytest.param(np.zeros((10, 10), dtype=np.int32), data.Dense, id='array'), + pytest.param(sparse.eye(10, dtype=float, format='dia'), data.Dia, + id='diag'), pytest.param(sparse.eye(10, dtype=float, format='csr'), data.CSR, id='sparse'), ]) @@ -35,6 +41,9 @@ def test_create(base, dtype): pytest.param('csr', data.csr.zeros(2, 2), id='from CSR str'), pytest.param('CSR', data.csr.zeros(2, 2), id='from CSR STR'), pytest.param(data.CSR, data.csr.zeros(2, 2), id='from CSR type'), + pytest.param('Dia', data.dia.zeros(2, 2), id='from Dia STR'), + pytest.param('dia', data.dia.zeros(2, 2), id='from Dia str'), + pytest.param(data.Dia, data.dia.zeros(2, 2), id='from Dia type'), ]) @pytest.mark.parametrize(['to_', 'dtype'], [ pytest.param('dense', data.Dense, id='to Dense str'), @@ -43,6 +52,9 @@ def test_create(base, dtype): pytest.param('csr', data.CSR, id='to CSR str'), pytest.param('CSR', data.CSR, id='to CSR STR'), pytest.param(data.CSR, data.CSR, id='to CSR type'), + pytest.param('Dia', data.Dia, id='to Dia STR'), + pytest.param('dia', data.Dia, id='to Dia str'), + pytest.param(data.Dia, data.Dia, id='to Dia type'), ]) def test_converters(from_, base, to_, dtype): converter = data.to[to_, from_] @@ -76,3 +88,17 @@ def test_parse_error(input, error, msg): with pytest.raises(error) as exc: data.to.parse(input) assert str(exc.value) == msg + + +class TestConvert(UnaryOpMixin): + def op_numpy(self, mat): + return mat + + specialisations = [ + pytest.param(data.dense.from_csr, data.CSR, data.Dense), + pytest.param(data.dense.from_dia, data.Dia, data.Dense), + pytest.param(data.csr.from_dense, data.Dense, data.CSR), + pytest.param(data.csr.from_dia, data.Dia, data.CSR), + pytest.param(data.dia.from_dense, data.Dense, data.Dia), + pytest.param(data.dia.from_csr, data.CSR, data.Dia), + ] diff --git a/qutip/tests/core/data/test_csr.py b/qutip/tests/core/data/test_csr.py index 351edff98c..834c9febdb 100644 --- a/qutip/tests/core/data/test_csr.py +++ b/qutip/tests/core/data/test_csr.py @@ -343,6 +343,109 @@ def test_one_element_error(self, shape, position, value): " out of bound: ") +class TestFromCSRBlocks: + class _blocks: + def __init__(self, rows, cols, ops, n_blocks=2, block_size=2): + self.rows = np.array(rows, dtype=data.base.idxint_dtype) + self.cols = np.array(cols, dtype=data.base.idxint_dtype) + if isinstance(ops, int): + ops = [csr.zeros(block_size, block_size)] * ops + self.ops = np.array(ops, dtype=object) + self.n_blocks = n_blocks + self.block_size = block_size + + def from_csr_blocks(self): + return csr._from_csr_blocks( + self.rows, self.cols, self.ops, + self.n_blocks, self.block_size, + ) + + @pytest.mark.parametrize(['blocks'], [ + pytest.param(_blocks((0, 1), (0,), 1), id='rows neq ops'), + pytest.param(_blocks((0,), (0, 1), 1), id='cols neq ops'), + ]) + def test_input_length_error(self, blocks): + with pytest.raises(ValueError) as exc: + blocks.from_csr_blocks() + assert str(exc.value) == ( + "The arrays block_rows, block_cols and block_ops should have" + " the same length." + ) + + def test_op_shape_error(self): + blocks = self._blocks( + (0, 1), (0, 1), + (csr.zeros(2, 2), csr.zeros(3, 3)), + ) + with pytest.raises(ValueError) as exc: + blocks.from_csr_blocks() + assert str(exc.value) == ( + "Block operators (block_ops) do not have the correct shape." + ) + + @pytest.mark.parametrize(['blocks'], [ + pytest.param(_blocks((1, 0), (0, 1), 2), id='rows not ordered'), + pytest.param(_blocks((1, 1), (1, 0), 2), id='cols not ordered'), + pytest.param(_blocks((1, 0), (1, 0), 2), id='non-unique block'), + ]) + def test_op_ordering_error(self, blocks): + with pytest.raises(ValueError) as exc: + blocks.from_csr_blocks() + assert str(exc.value) == ( + "The arrays block_rows and block_cols must be sorted" + " by (row, column)." + ) + + @pytest.mark.parametrize(['blocks'], [ + pytest.param(_blocks((), (), ()), id='no ops'), + pytest.param(_blocks((0, 1), (1, 0), 2), id='zero ops'), + ]) + def test_zeros_output_fast_paths(self, blocks): + out = blocks.from_csr_blocks() + assert out == csr.zeros(2 * 2, 2 * 2) + assert csr.nnz(out) == 0 + + def test_construct_identity_with_empty(self): + # users are not expected to be exposed to + # csr.empty directly, but it is good to + # avoid segfaults, so we test passing + # csr.empty(..) blocks here explicitly + blocks = self._blocks( + [0, 0, 1, 1], [0, 1, 0, 1], [ + csr.identity(2), csr.empty(2, 2, 0), + csr.empty(2, 2, 0), csr.identity(2), + ], + ) + out = blocks.from_csr_blocks() + assert out == csr.identity(4) + assert csr.nnz(out) == 4 + + def test_construct_identity_with_zeros(self): + blocks = self._blocks( + [0, 0, 1, 1], [0, 1, 0, 1], [ + csr.identity(2), csr.zeros(2, 2), + csr.zeros(2, 2), csr.identity(2), + ], + ) + out = blocks.from_csr_blocks() + assert out == csr.identity(4) + assert csr.nnz(out) == 4 + + def test_construct_kron(self): + A = np.array([[1, 2], [3, 4]]) + B = np.array([[0.3, 0.35], [0.4, 0.45]]) + B_op = data.to("csr", data.Dense(B)) + blocks = self._blocks( + [0, 0, 1, 1], [0, 1, 0, 1], [ + A[0, 0] * B_op, A[0, 1] * B_op, + A[1, 0] * B_op, A[1, 1] * B_op, + ] + ) + out = blocks.from_csr_blocks() + assert out == data.kron(data.Dense(A), data.Dense(B)) + assert csr.nnz(out) == 16 + + def test_tidyup(): small = qeye(1) * 1e-5 with CoreOptions(auto_tidyup_atol=1e-3): diff --git a/qutip/tests/core/data/test_dense.py b/qutip/tests/core/data/test_dense.py index 655595d1a4..d66a7a07c4 100644 --- a/qutip/tests/core/data/test_dense.py +++ b/qutip/tests/core/data/test_dense.py @@ -2,7 +2,7 @@ import pytest from qutip.core import data -from qutip.core.data import dense +from qutip.core.data import dense, csr from . import conftest @@ -286,3 +286,22 @@ def test_one_element_error(self, shape, position, value): base = data.one_element_dense(shape, position, value) assert str(exc.value).startswith("Position of the elements" " out of bound: ") + + +def test_OrderEfficiencyWarning(): + N = 5 + M = csr.identity(N) + C_ordered = dense.zeros(N, 1, fortran=False) + fortran_ordered = dense.zeros(N, 1, fortran=True) + with pytest.warns(dense.OrderEfficiencyWarning): + data.matmul_csr_dense_dense(M, C_ordered, out=fortran_ordered) + + +@pytest.mark.parametrize("fortran", [True, False]) +@pytest.mark.parametrize("func", + [data.zeros_like_dense, data.identity_like_dense] +) +def test_like_keep_order(func, fortran): + old = dense.zeros(3, 3, fortran=fortran) + new = func(old) + assert new.fortran == old.fortran diff --git a/qutip/tests/core/data/test_dia.py b/qutip/tests/core/data/test_dia.py new file mode 100644 index 0000000000..4cd77ff317 --- /dev/null +++ b/qutip/tests/core/data/test_dia.py @@ -0,0 +1,351 @@ +import numpy as np +import scipy.sparse +import pytest + +from qutip.core import data, qeye, CoreOptions +from qutip.core.data import dia, Dense, Dia + +from . import conftest + +# We only choose a small subset of dtypes to test so it isn't crazy. +_dtype_complex = ['complex128'] +_dtype_float = ['float64'] +_dtype_int = ['int32', 'int64'] +_dtype_uint = ['uint32'] + + +# Set up some fixtures for automatic parametrisation. + +@pytest.fixture(params=[ + pytest.param((1, 5), id='ket'), + pytest.param((5, 1), id='bra'), + pytest.param((5, 5), id='square'), + pytest.param((2, 4), id='wide'), + pytest.param((4, 2), id='tall'), +]) +def shape(request): return request.param + +@pytest.fixture(params=[ + pytest.param(0, id='empty'), + pytest.param(1, id='full'), + pytest.param(0.2, id='sparse'), +]) +def density(request): return request.param + + +@pytest.fixture(scope='function') +def scipy_dia(shape, density): + return conftest.random_scipy_dia(shape, density) + + +def _valid_scipy(): + """Arbitrary valid scipy Dia""" + return conftest.random_scipy_dia((10, 10), 0.5) + + +def _valid_arg(): + """ + Arbitrary valid 3-tuple which is a valid `arg` parameter for __init__. + """ + sci = _valid_scipy() + return (sci.data, sci.offsets) + + +@pytest.fixture(scope='function') +def data_diag(shape, density): + return conftest.random_diag(shape, density) + + +class TestClassMethods: + def test_init_from_scipy(self, scipy_dia): + """Test that __init__ can accept a scipy dia matrix.""" + out = dia.Dia(scipy_dia) + assert out.shape == scipy_dia.shape + assert (out.as_scipy() - scipy_dia).nnz == 0 + + def test_init_from_tuple(self, scipy_dia): + """ + Test that __init__ does not throw when passed a 3-tuple. Also tests + the as_scipy() method succeeds. + """ + arg = (scipy_dia.data, scipy_dia.offsets) + out = dia.Dia(arg, shape=scipy_dia.shape) + assert out.shape == scipy_dia.shape + assert (out.as_scipy() - scipy_dia).nnz == 0 + + @pytest.mark.parametrize('d_type', ( + _dtype_complex + _dtype_float + _dtype_int + _dtype_uint + )) + @pytest.mark.parametrize('o_type', _dtype_int + _dtype_uint) + def test_init_from_tuple_allowed_dtypes(self, d_type, o_type): + """ + Test that initialisation can use a variety of dtypes and converts into + the correct type. + """ + sci = _valid_scipy() + data = sci.data.real.astype(d_type, casting='unsafe') + offsets = sci.offsets.astype(o_type, casting='unsafe') + scipy_dia = scipy.sparse.dia_matrix((data, offsets), shape=sci.shape) + out = dia.Dia((data, offsets), shape=sci.shape) + out_scipy = out.as_scipy() + assert out.shape == scipy_dia.shape + assert out_scipy.data.dtype == np.complex128 + assert (out_scipy - scipy_dia).nnz == 0 + + @pytest.mark.parametrize(['arg', 'kwargs', 'error'], [ + pytest.param((), {}, ValueError, id="arg 0 tuple"), + pytest.param((None,), {}, ValueError, id="arg 1 tuple"), + pytest.param((None,)*2, {}, TypeError, id="arg None tuple"), + pytest.param((None,)*3, {}, ValueError, id="arg 3 tuple"), + pytest.param(_valid_scipy(), {'shape': ()}, ValueError, + id="scipy-shape 0 tuple"), + pytest.param(_valid_scipy(), {'shape': (1,)}, ValueError, + id="scipy-shape 1 tuple"), + pytest.param(_valid_scipy(), {'shape': (None, None)}, ValueError, + id="scipy-shape None tuple"), + pytest.param(_valid_scipy(), {'shape': [2, 2]}, ValueError, + id="scipy-shape list"), + pytest.param(_valid_scipy(), {'shape': (1, 2, 3)}, ValueError, + id="scipy-shape 3 tuple"), + pytest.param(_valid_arg(), {'shape': ()}, ValueError, + id="arg-shape 0 tuple"), + pytest.param(_valid_arg(), {'shape': (1,)}, ValueError, + id="arg-shape 1 tuple"), + pytest.param(_valid_arg(), {'shape': (None, None)}, ValueError, + id="arg-shape None tuple"), + pytest.param(_valid_arg(), {'shape': [2, 2]}, TypeError, + id="arg-shape list"), + pytest.param(_valid_arg(), {'shape': (1, 2, 3)}, ValueError, + id="arg-shape 3 tuple"), + pytest.param(_valid_arg(), {'shape': (-1, -1)}, ValueError, + id="arg-negative shape"), + ]) + def test_init_from_wrong_input(self, arg, kwargs, error): + """ + Test that the __init__ method raises a suitable error when passed + incorrectly formatted inputs. + + This test also serves as a *partial* check that Dia safely handles + deallocation in the presence of exceptions in its __init__ method. If + the tests segfault, it's quite likely that the memory management isn't + being done correctly in the hand-off us setting our data buffers up and + marking the numpy actually owns the data. + """ + with pytest.raises(error): + dia.Dia(arg, **kwargs) + + def test_copy_returns_a_correct_copy(self, data_diag): + """ + Test that the copy() method produces an actual copy, and that the + result represents the same matrix. + """ + original = data_diag + copy = data_diag.copy() + assert original is not copy + assert np.all(original.to_array() == copy.to_array()) + + def test_as_scipy_returns_a_view(self, data_diag): + """ + Test that modifying the views in the result of as_scipy() also modifies + the underlying data structures. This is important for allowing minor + data modifications from within Python-space. + """ + unmodified_copy = data_diag.copy() + data_diag.as_scipy().data += 1 + modified_copy = data_diag.copy() + assert np.any(data_diag.to_array() != unmodified_copy.to_array()) + assert np.all(data_diag.to_array() == modified_copy.to_array()) + + def test_as_scipy_caches_result(self, data_diag): + """ + Test that the as_scipy() method always returns the same view, even if + called multiple times. + """ + assert data_diag.as_scipy() is data_diag.as_scipy() + + def test_as_scipy_of_dia_from_scipy_is_different(self, scipy_dia): + """ + Test that we produce a new scipy matrix, regardless of how we have + initialised the type. + """ + assert dia.Dia(scipy_dia).as_scipy() is not scipy_dia + + def test_as_scipy_of_copy_is_different(self, data_diag): + """ + Test that as_scipy() does not return the same array, or the same views + if it's not the same input matrix. We don't want two Dia matrices to + be linked. + """ + original = data_diag.as_scipy() + copy = data_diag.copy().as_scipy() + assert original is not copy + assert not np.may_share_memory(original.data, copy.data) + assert not np.may_share_memory(original.offsets, copy.offsets) + + def test_as_scipy_is_correct_result(self, scipy_dia): + """ + Test that as_scipy is actually giving the matrix we expect for a given + input. + """ + data_diag = dia.Dia(scipy_dia) + assert isinstance(data_diag.as_scipy(), scipy.sparse.dia_matrix) + assert (data_diag.as_scipy() - scipy_dia).nnz == 0 + + def test_as_scipy_of_uninitialised_is_empty(self, shape): + ndiag = 0 + base = dia.empty(shape[0], shape[1], ndiag) + sci = base.as_scipy() + assert len(sci.data) == 0 + assert len(sci.offsets) == 0 + + def test_to_array_is_correct_result(self, data_diag): + test_array = data_diag.to_array() + assert isinstance(test_array, np.ndarray) + # It's not enough to be accurate within a tolerance here - there's no + # mathematics, so they should be _identical_. + assert np.all(test_array == data_diag.as_scipy().toarray()) + + +class TestFactoryMethods: + def test_empty(self, shape, density): + ndiag = int(shape[0] * shape[1] * density) or 1 + base = dia.empty(shape[0], shape[1], ndiag) + sci = base.as_scipy(full=True) + assert isinstance(base, dia.Dia) + assert isinstance(sci, scipy.sparse.dia_matrix) + assert base.shape == shape + assert sci.data.shape == (ndiag, shape[1]) + assert sci.offsets.shape == (ndiag,) + + def test_zeros(self, shape): + base = dia.zeros(shape[0], shape[1]) + sci = base.as_scipy() + assert isinstance(base, dia.Dia) + assert base.shape == shape + assert sci.nnz == 0 + assert np.all(base.to_array() == 0) + + @pytest.mark.parametrize('dimension', [1, 5, 100]) + @pytest.mark.parametrize( + 'scale', + [None, 2, -0.1, 1.5, 1.5+1j], + ids=['none', 'int', 'negative', 'float', 'complex'] + ) + def test_identity(self, dimension, scale): + # scale=None is testing that the default value returns the identity. + base = (dia.identity(dimension) if scale is None + else dia.identity(dimension, scale)) + sci = base.as_scipy() + scipy_test = scipy.sparse.eye(dimension, + dtype=np.complex128, format='dia') + if scale is not None: + scipy_test *= scale + assert isinstance(base, dia.Dia) + assert base.shape == (dimension, dimension) + assert sci.nnz == dimension + assert (sci - scipy_test).nnz == 0 + + + @pytest.mark.parametrize(['diagonals', 'offsets', 'shape'], [ + pytest.param([2j, 3, 5, 9], None, None, id='main diagonal'), + pytest.param([1], None, None, id='1x1'), + pytest.param([[0.2j, 0.3]], None, None, id='main diagonal list'), + pytest.param([0.2j, 0.3], 2, None, id='superdiagonal'), + pytest.param([0.2j, 0.3], -2, None, id='subdiagonal'), + pytest.param([[0.2, 0.3, 0.4], [0.1, 0.9]], [-2, 3], None, + id='two diagonals'), + pytest.param([1, 2, 3], 0, (3, 5), id='main wide'), + pytest.param([1, 2, 3], 0, (5, 3), id='main tall'), + pytest.param([[1, 2, 3], [4, 5]], [-1, -2], (4, 8), id='two wide sub'), + pytest.param([[1, 2, 3, 4], [4, 5, 4j, 1j]], [1, 2], (4, 8), + id='two wide super'), + pytest.param([[1, 2, 3], [4, 5]], [1, 2], (8, 4), id='two tall super'), + pytest.param([[1, 2, 3, 4], [4, 5, 4j, 1j]], [-1, -2], (8, 4), + id='two tall sub'), + pytest.param([[1, 2, 3], [4, 5, 6], [1, 2]], [1, -1, -2], (4, 4), + id='out of order'), + pytest.param([[1, 2, 3], [4, 5, 6], [1, 2]], [1, 1, -2], (4, 4), + id='sum duplicates'), + ]) + def test_diags(self, diagonals, offsets, shape): + base = dia.diags(diagonals, offsets, shape) + # Build numpy version test. + if not isinstance(diagonals[0], list): + diagonals = [diagonals] + offsets = np.atleast_1d(offsets if offsets is not None else [0]) + if shape is None: + size = len(diagonals[0]) + abs(offsets[0]) + shape = (size, size) + test = np.zeros(shape, dtype=np.complex128) + for diagonal, offset in zip(diagonals, offsets): + test[np.where(np.eye(*shape, k=offset) == 1)] += diagonal + assert isinstance(base, dia.Dia) + assert base.shape == shape + np.testing.assert_allclose(base.to_array(), test, rtol=1e-10) + + + @pytest.mark.parametrize(['shape', 'position', 'value'], [ + pytest.param((1, 1), (0, 0), None, id='minimal'), + pytest.param((10, 10), (5, 5), 1.j, id='on diagonal'), + pytest.param((10, 10), (1, 5), 1., id='upper'), + pytest.param((10, 10), (5, 1), 2., id='lower'), + pytest.param((10, 1), (5, 0), None, id='column'), + pytest.param((1, 10), (0, 5), -5.j, id='row'), + pytest.param((10, 2), (5, 1), 1+2j, id='tall'), + pytest.param((2, 10), (1, 5), 10, id='wide'), + ]) + def test_one_element(self, shape, position, value): + test = np.zeros(shape, dtype=np.complex128) + if value is None: + base = data.one_element_dia(shape, position) + test[position] = 1.0+0.0j + else: + base = data.one_element_dia(shape, position, value) + test[position] = value + assert isinstance(base, data.Dia) + assert base.shape == shape + assert np.allclose(base.to_array(), test, atol=1e-10) + + @pytest.mark.parametrize(['shape', 'position', 'value'], [ + pytest.param((0, 0), (0, 0), None, id='zero shape'), + pytest.param((10, -2), (5, 0), 1.j, id='neg shape'), + pytest.param((10, 10), (10, 5), 1., id='outside'), + pytest.param((10, 10), (5, -1), 2., id='outside neg'), + ]) + def test_one_element_error(self, shape, position, value): + with pytest.raises(ValueError) as exc: + base = data.one_element_dia(shape, position, value) + assert str(exc.value).startswith("Position of the elements" + " out of bound: ") + + +def test_tidyup(data_diag): + before = data_diag.to_array() + sp_before = data_diag.as_scipy().toarray() + largest = max(np.abs(before.real).max(), np.abs(before.imag).max()) + min_r = np.abs(before.real[np.abs(before.real) > 0]).min() + min_i = np.abs(before.imag[np.abs(before.imag) > 0]).min() + smallest = min(min_r, min_i) + print(largest, smallest) + if largest == smallest: + return + tol = (largest + smallest) / 2 + tidy = data.tidyup_dia(data_diag, tol, False) + # Inplace=False, does not modify the original + np.testing.assert_array_equal(data_diag.to_array(), before) + np.testing.assert_array_equal(data_diag.as_scipy().toarray(), sp_before) + # Is tidyup + assert not np.allclose(tidy.to_array(), before) + assert not np.allclose(tidy.as_scipy().toarray(), sp_before) + + data.tidyup_dia(data_diag, tol, True) + assert not np.allclose(data_diag.to_array(), before) + assert not np.allclose(data_diag.as_scipy().toarray(), sp_before) + + +def test_autotidyup(): + small = qeye(1) * 1e-5 + with CoreOptions(auto_tidyup_atol=1e-3): + assert (small + small).tr() == 0 + with CoreOptions(auto_tidyup_atol=1e-3, auto_tidyup=False): + assert (small + small).tr() == 2e-5 diff --git a/qutip/tests/core/data/test_dispatch.py b/qutip/tests/core/data/test_dispatch.py new file mode 100644 index 0000000000..c697564c73 --- /dev/null +++ b/qutip/tests/core/data/test_dispatch.py @@ -0,0 +1,180 @@ +import pytest +import itertools +import qutip +from qutip.core.data.dispatch import Dispatcher, _constructed_specialisation +import qutip.core.data as _data + + +class pseudo_dipatched: + def __init__(self, types, output): + if output: + self.output = types[-1] + self.inputs = types[:-1] + else: + self.output = False + self.inputs = types + + def __call__(self, *args, **kwargs): + assert len(args) == len(self.inputs) + assert not kwargs + for got, expected in zip(args, self.inputs): + assert isinstance(got, expected) + + if not self.output: + return + elif self.output is _data.Data: + return _data.zeros(1,1) + else: + return _data.zeros[self.output](1,1) + + +def _test_name(arg): + if isinstance(arg, bool): + return str(arg) + return ", ".join(spec.__name__ for spec in arg) + + +@pytest.mark.parametrize(['specialisation', "output"], [ + ((_data.Dense,), True), + ((_data.Data,), True), + ((_data.Dense,), False), + ((_data.Data,), False), + ((_data.Dense, _data.Dense), True), + ((_data.Dense, _data.CSR), True), + ((_data.Data, _data.Data), True), + ((_data.Data, _data.Dense), True), + ((_data.Dense, _data.Data), True), + ((_data.Dense, _data.Dense), False), + ((_data.Dense, _data.CSR), False), + ((_data.Data, _data.Data), False), + ((_data.Data, _data.Dense), False), + ((_data.Dense, _data.Data), False), + ((_data.Dense, _data.Dense, _data.Dense), True), + ((_data.Dense, _data.CSR, _data.CSR), True), + ((_data.Data, _data.Data, _data.Data), True), + ((_data.Data, _data.Dense, _data.Dense), True), + ((_data.Dense, _data.Data, _data.Data), True), +], ids=_test_name) +def test_build_full(specialisation, output): + """ + Test that the dispatched function can always be called with any input type. + """ + def f(a=None, b=None, c=None, /): + """ + Doc + """ + + + n_input = len(specialisation) - output + dispatched = Dispatcher(f, ("a", "b", "c")[:n_input], output) + dispatched.add_specialisations( + [specialisation + (pseudo_dipatched(specialisation, output),)] + ) + + for in_types in itertools.product(_data.to.dtypes, repeat=n_input): + ins = tuple(_data.zeros[dtype](1, 1) for dtype in in_types) + + if not output: + out = dispatched(*ins) + out = dispatched[in_types](*ins) + + else: + out = dispatched(*ins) + assert out is not None + + out = dispatched[in_types](*ins) + assert out is not None + + if output: + for out_dtype in _data.to.dtypes: + + out = dispatched[in_types + (out_dtype,)](*ins) + if output: + assert isinstance(out, out_dtype) + + out = dispatched(*ins, dtype=out_dtype) + if output: + assert isinstance(out, out_dtype) + + +def test_Data_low_priority_one_dispatch(): + class func(): + __name__ = "dummy name" + def __call__(self, a, /): + return _data.zeros[_data.Dense](1, 1) + + f_dense = func() + f_data = func() + + dispatched = Dispatcher(f_dense, ("a",), False) + dispatched.add_specialisations([ + (_data.Dense, f_dense), (_data.Data, f_data)] + ) + + assert dispatched[_data.Dense] is f_dense + assert dispatched[_data.CSR] is f_data + + dispatched = Dispatcher(f_dense, (), True) + dispatched.add_specialisations([ + (_data.Dense, f_dense), (_data.Data, f_data)] + ) + + assert dispatched[_data.Dense] is f_dense + assert isinstance(dispatched[_data.CSR], _constructed_specialisation) + + +def test_Data_low_priority_two_dispatch(): + class func(): + __name__ = "" + def __init__(self): + self.count = 0 + + def __call__(self, a=None, b=None, /): + self.count += 1 + return _data.zeros[_data.Dense](1, 1) + + f_dense = func() + f_mixed = func() + f_data = func() + + dispatched = Dispatcher(f_dense, ("a", "b"), False) + dispatched.add_specialisations([ + (_data.Dense, _data.Dense, f_dense), + (_data.Dense, _data.Data, f_mixed), + (_data.Data, _data.Data, f_data), + ]) + + assert dispatched[_data.Dense, _data.Dense] is f_dense + assert dispatched[_data.Dense, _data.CSR] is f_mixed + assert dispatched[_data.CSR, _data.Dense] is f_data + assert dispatched[_data.CSR, _data.CSR] is f_data + + dispatched = Dispatcher(f_dense, ("a",), True) + dispatched.add_specialisations([ + (_data.Dense, _data.Dense, f_dense), + (_data.Dense, _data.Data, f_mixed), + (_data.Data, _data.Data, f_data), + ]) + + assert dispatched[_data.Dense, _data.Dense] is f_dense + assert dispatched[_data.CSR] is f_data + assert dispatched[_data.Dense] is f_dense + assert isinstance( + dispatched[_data.Dense, _data.CSR], _constructed_specialisation + ) + + assert f_mixed.count == 0 + dispatched[_data.Dense, _data.CSR](_data.zeros[_data.Dense](1, 1)) + assert f_mixed.count == 1 + + assert isinstance( + dispatched[_data.CSR, _data.Dense], _constructed_specialisation + ) + assert isinstance( + dispatched[_data.CSR, _data.CSR], _constructed_specialisation + ) + + assert f_data.count == 0 + dispatched[_data.CSR, _data.Dense](_data.zeros[_data.CSR](1, 1)) + dispatched[_data.CSR, _data.CSR](_data.zeros[_data.CSR](1, 1)) + assert f_data.count == 1 diff --git a/qutip/tests/core/data/test_expect.py b/qutip/tests/core/data/test_expect.py index 256a703a89..c43da02def 100644 --- a/qutip/tests/core/data/test_expect.py +++ b/qutip/tests/core/data/test_expect.py @@ -5,7 +5,7 @@ import pytest import numpy as np from qutip import data -from qutip.core.data import CSR, Dense +from qutip.core.data import CSR, Dense, Dia from itertools import product @@ -39,6 +39,9 @@ def op_numpy(self, op, state): pytest.param(data.expect_csr, CSR, CSR, complex), pytest.param(data.expect_dense, Dense, Dense, complex), pytest.param(data.expect_csr_dense, CSR, Dense, complex), + pytest.param(data.expect_dia, Dia, Dia, complex), + pytest.param(data.expect_dia_dense, Dia, Dense, complex), + pytest.param(data.expect_data, Dense, CSR, complex), ] @@ -64,4 +67,7 @@ def op_numpy(self, op, state): pytest.param(data.expect_super_dense, Dense, Dense, complex), pytest.param(data.expect_super_csr, CSR, CSR, complex), pytest.param(data.expect_super_csr_dense, CSR, Dense, complex), + pytest.param(data.expect_super_dia, Dia, Dia, complex), + pytest.param(data.expect_super_dia_dense, Dia, Dense, complex), + pytest.param(data.expect_super_data, CSR, Dense, complex), ] diff --git a/qutip/tests/core/data/test_linalg.py b/qutip/tests/core/data/test_linalg.py index 01681d4e23..56a1bb2b6b 100644 --- a/qutip/tests/core/data/test_linalg.py +++ b/qutip/tests/core/data/test_linalg.py @@ -3,9 +3,15 @@ import scipy import pytest import qutip +import warnings from qutip.core import data as _data -from qutip.core.data import Data, Dense, CSR +from qutip.core.data import Data, Dense, CSR, Dia + + +skip_no_mkl = pytest.mark.skipif( + not settings.has_mkl, reason="mkl not available" +) class TestSolve(): @@ -23,19 +29,23 @@ def _gen_ket(self, N, dtype): ("splu", {"csc": True}), ("gmres", {"atol": 1e-8}), ("lsqr", {}), - pytest.param( - "mkl_spsolve", {}, - marks=pytest.mark.skipif(not settings.has_mkl, reason="mkl not available") - ), + ("solve", {}), + ("lstsq", {}), + pytest.param("mkl_spsolve", {}, marks=skip_no_mkl), ], - ids=["spsolve", "splu", "gmres", "lsqr", "mkl_spsolve"] + ids=[ + "spsolve", "splu", "gmres", "lsqr", "solve", "lstsq", "mkl_spsolve" + ] ) - def test_mathematically_correct_CSR(self, method, opt): + @pytest.mark.parametrize('dtype', [CSR, Dia]) + def test_mathematically_correct_sparse(self, method, opt, dtype): """ Test that the binary operation is mathematically correct for all the known type specialisations. """ - A = self._gen_op(10, CSR) + if dtype is Dia and method == "mkl_spsolve": + pytest.skip("mkl is not supported for dia matrix") + A = self._gen_op(10, dtype) b = self._gen_ket(10, Dense) expected = self.op_numpy(A.to_array(), b.to_array()) test = _data.solve_csr_dense(A, b, method, opt) @@ -65,6 +75,14 @@ def test_mathematically_correct_Dense(self, method, opt): atol=1e-7, rtol=1e-7) + def test_singular(self): + A = qutip.num(2).data + b = qutip.basis(2, 1).data + with pytest.raises(ValueError) as err: + test1 = _data.solve(A, b) + assert "singular" in str(err.value).lower() + + def test_incorrect_shape_non_square(self): A = qutip.Qobj(np.random.rand(5, 10)).data b = qutip.Qobj(np.random.rand(10, 1)).data @@ -104,8 +122,14 @@ def test_mathematically_correct_svd(self, shape): only_S = _data.svd(matrix, False) assert sum(test_S > 1e-10) == 6 - np.testing.assert_allclose(test_U.to_array(), u, atol=1e-7, rtol=1e-7) - np.testing.assert_allclose(test_V.to_array(), v, atol=1e-7, rtol=1e-7) + # columns are definied up to a sign + np.testing.assert_allclose( + np.abs(test_U.to_array()), np.abs(u), atol=1e-7, rtol=1e-7 + ) + # rows are definied up to a sign + np.testing.assert_allclose( + np.abs(test_V.to_array()), np.abs(v), atol=1e-7, rtol=1e-7 + ) np.testing.assert_allclose(test_S, s, atol=1e-7, rtol=1e-7) np.testing.assert_allclose(only_S, s, atol=1e-7, rtol=1e-7) diff --git a/qutip/tests/core/data/test_mathematics.py b/qutip/tests/core/data/test_mathematics.py index ceeecda9a7..8d30aa9763 100644 --- a/qutip/tests/core/data/test_mathematics.py +++ b/qutip/tests/core/data/test_mathematics.py @@ -4,7 +4,7 @@ import pytest from qutip.core import data -from qutip.core.data import Data, Dense, CSR +from qutip.core.data import Data, Dense, CSR, Dia from . import conftest @@ -159,16 +159,37 @@ def factory(fortran): ] +def cases_diag(shape): + """ + Return a list of generators of the different special cases for Dense + matrices of a given shape. + """ + def factory(density, sort=False): + return lambda: conftest.random_diag(shape, density, sort) + + def zero_factory(): + return lambda: data.dia.zeros(shape[0], shape[1]) + + return [ + pytest.param(factory(0.001), id="sparse"), + pytest.param(factory(0.8, True), id="filled,sorted"), + pytest.param(factory(0.8, False), id="filled,unsorted"), + pytest.param(zero_factory(), id="zero"), + ] + + # Factory methods for generating the cases, mapping type to the function. # _ALL_CASES is for getting all the special cases to test, _RANDOM is for # getting just a single case from each. _ALL_CASES = { CSR: cases_csr, + Dia: cases_diag, Dense: cases_dense, } _RANDOM = { CSR: lambda shape: [lambda: conftest.random_csr(shape, 0.5, True)], Dense: lambda shape: [lambda: conftest.random_dense(shape, False)], + Dia: lambda shape: [lambda: conftest.random_diag(shape, 0.5)], } @@ -532,6 +553,7 @@ def op_numpy(self, left, right, scale): specialisations = [ pytest.param(data.add_csr, CSR, CSR, CSR), pytest.param(data.add_dense, Dense, Dense, Dense), + pytest.param(data.add_dia, Dia, Dia, Dia), ] # `add` has an additional scalar parameter, because the operation is @@ -568,6 +590,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.adjoint_csr, CSR, CSR), pytest.param(data.adjoint_dense, Dense, Dense), + pytest.param(data.adjoint_dia, Dia, Dia), ] @@ -578,6 +601,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.conj_csr, CSR, CSR), pytest.param(data.conj_dense, Dense, Dense), + pytest.param(data.conj_dia, Dia, Dia), ] @@ -614,7 +638,10 @@ def op_numpy(self, left, right, scalar_is_ket=False): specialisations = [ pytest.param(data.inner_csr, CSR, CSR, complex), + pytest.param(data.inner_dia, Dia, Dia, complex), pytest.param(data.inner_dense, Dense, Dense, complex), + pytest.param(data.inner_data, Dense, Dense, complex), + pytest.param(data.inner_data, CSR, CSR, complex), ] def generate_scalar_is_ket(self, metafunc): @@ -684,7 +711,9 @@ def op_numpy(self, left, mid, right, scalar_is_ket=False): specialisations = [ pytest.param(data.inner_op_csr, CSR, CSR, CSR, complex), + pytest.param(data.inner_op_dia, Dia, Dia, Dia, complex), pytest.param(data.inner_op_dense, Dense, Dense, Dense, complex), + pytest.param(data.inner_op_data, Dense, CSR, Dense, complex), ] def generate_scalar_is_ket(self, metafunc): @@ -732,6 +761,20 @@ def op_numpy(self, left, right): specialisations = [ pytest.param(data.kron_csr, CSR, CSR, CSR), pytest.param(data.kron_dense, Dense, Dense, Dense), + pytest.param(data.kron_dia, Dia, Dia, Dia), + ] + + +class TestKronT(BinaryOpMixin): + def op_numpy(self, left, right): + return np.kron(left.T, right) + + # Keep the dimension low because kron can get very expensive. + shapes = shapes_binary_unrestricted(dim=5) + bad_shapes = shapes_binary_bad_unrestricted(dim=5) + specialisations = [ + pytest.param(data.kron_transpose_data, CSR, CSR, CSR), + pytest.param(data.kron_transpose_dense, Dense, Dense, Dense), ] @@ -745,6 +788,9 @@ def op_numpy(self, left, right): pytest.param(data.matmul_csr, CSR, CSR, CSR), pytest.param(data.matmul_csr_dense_dense, CSR, Dense, Dense), pytest.param(data.matmul_dense, Dense, Dense, Dense), + pytest.param(data.matmul_dia, Dia, Dia, Dia), + pytest.param(data.matmul_dia_dense_dense, Dia, Dense, Dense), + pytest.param(data.matmul_dense_dia_dense, Dense, Dia, Dense), ] @@ -757,6 +803,7 @@ def op_numpy(self, left, right): specialisations = [ pytest.param(data.multiply_csr, CSR, CSR, CSR), pytest.param(data.multiply_dense, Dense, Dense, Dense), + pytest.param(data.multiply_dia, Dia, Dia, Dia), ] @@ -767,6 +814,7 @@ def op_numpy(self, matrix, scalar): specialisations = [ pytest.param(data.mul_csr, CSR, CSR), pytest.param(data.mul_dense, Dense, Dense), + pytest.param(data.mul_dia, Dia, Dia), ] @@ -777,6 +825,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.neg_csr, CSR, CSR), pytest.param(data.neg_dense, Dense, Dense), + pytest.param(data.neg_dia, Dia, Dia), ] @@ -789,6 +838,7 @@ def op_numpy(self, left, right): specialisations = [ pytest.param(data.sub_csr, CSR, CSR, CSR), pytest.param(data.sub_dense, Dense, Dense, Dense), + pytest.param(data.sub_dia, Dia, Dia, Dia), ] @@ -801,6 +851,30 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.trace_csr, CSR, complex), pytest.param(data.trace_dense, Dense, complex), + pytest.param(data.trace_dia, Dia, complex), + ] + + +class TestTrace_oper_ket(UnaryOpMixin): + def op_numpy(self, matrix): + N = int(matrix.shape[0] ** 0.5) + return np.sum(np.diag(matrix.reshape((N, N)))) + + shapes = [ + (pytest.param((100, 1), id="oper-ket"),), + ] + bad_shapes = [ + (pytest.param((1, 100), id="bra"),), + (pytest.param((99, 1), id="ket"),), + (pytest.param((99, 99), id="ket"),), + (pytest.param((2, 99), id="nonsquare"),), + ] + specialisations = [ + pytest.param(data.trace_oper_ket_csr, CSR, complex), + pytest.param(data.trace_oper_ket_dense, Dense, complex), + pytest.param(data.trace_oper_ket_dia, Dia, complex), + pytest.param(data.trace_oper_ket_data, CSR, complex), + pytest.param(data.trace_oper_ket_data, Dense, complex), ] @@ -813,6 +887,7 @@ def op_numpy(self, matrix, n): specialisations = [ pytest.param(data.pow_csr, CSR, CSR), pytest.param(data.pow_dense, Dense, Dense), + pytest.param(data.pow_dia, Dia, Dia), ] @pytest.mark.parametrize("n", [0, 1, 10], ids=["n_0", "n_1", "n_10"]) @@ -835,6 +910,8 @@ def test_incorrect_shape_raises(self, op, data_m): op(data_m(), 10) +# Scipy complain went creating full dia matrix. +@pytest.mark.filterwarnings("ignore:Constructing a DIA matrix") class TestExpm(UnaryOpMixin): def op_numpy(self, matrix): return scipy.linalg.expm(matrix) @@ -845,6 +922,7 @@ def op_numpy(self, matrix): pytest.param(data.expm_csr, CSR, CSR), pytest.param(data.expm_csr_dense, CSR, Dense), pytest.param(data.expm_dense, Dense, Dense), + pytest.param(data.expm_dia, Dia, Dia), ] @@ -866,6 +944,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.transpose_csr, CSR, CSR), pytest.param(data.transpose_dense, Dense, Dense), + pytest.param(data.transpose_dia, Dia, Dia), ] @@ -888,6 +967,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.project_csr, CSR, CSR), + pytest.param(data.project_dia, Dia, Dia), pytest.param(data.project_dense, Dense, Dense), ] @@ -930,3 +1010,26 @@ def op_numpy(self, matrix): pytest.param(_inv_csr, CSR, CSR), pytest.param(_inv_dense, Dense, Dense), ] + + +class TestZeros_like(UnaryOpMixin): + def op_numpy(self, matrix): + return np.zeros_like(matrix) + + specialisations = [ + pytest.param(data.zeros_like_data, CSR, CSR), + pytest.param(data.zeros_like_dense, Dense, Dense), + ] + + +class TestIdentity_like(UnaryOpMixin): + def op_numpy(self, matrix): + return np.eye(matrix.shape[0]) + + shapes = shapes_square() + bad_shapes = shapes_not_square() + + specialisations = [ + pytest.param(data.identity_like_data, CSR, CSR), + pytest.param(data.identity_like_dense, Dense, Dense), + ] diff --git a/qutip/tests/core/data/test_norm.py b/qutip/tests/core/data/test_norm.py index d285bb3ebf..cfc8ffbb4e 100644 --- a/qutip/tests/core/data/test_norm.py +++ b/qutip/tests/core/data/test_norm.py @@ -3,7 +3,7 @@ import scipy.linalg import pytest from qutip import data -from qutip.core.data import CSR, Dense +from qutip.core.data import CSR, Dense, Dia import numbers @@ -13,6 +13,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.norm.one_csr, CSR, numbers.Number), + pytest.param(data.norm.one_dia, Dia, numbers.Number), pytest.param(data.norm.one_dense, Dense, numbers.Number), ] @@ -23,6 +24,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.norm.frobenius_csr, CSR, numbers.Number), + pytest.param(data.norm.frobenius_dia, Dia, numbers.Number), pytest.param(data.norm.frobenius_dense, Dense, numbers.Number), ] @@ -35,6 +37,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.norm.max_csr, CSR, numbers.Number), + pytest.param(data.norm.max_dia, Dia, numbers.Number), pytest.param(data.norm.max_dense, Dense, numbers.Number), ] @@ -55,6 +58,7 @@ def op_numpy(self, matrix): ] specialisations = [ pytest.param(data.norm.l2_csr, CSR, numbers.Number), + pytest.param(data.norm.l2_dia, Dia, numbers.Number), pytest.param(data.norm.l2_dense, Dense, numbers.Number), ] diff --git a/qutip/tests/core/data/test_properties.py b/qutip/tests/core/data/test_properties.py index de844baec7..88697ee152 100644 --- a/qutip/tests/core/data/test_properties.py +++ b/qutip/tests/core/data/test_properties.py @@ -2,9 +2,9 @@ import pytest from qutip import data as _data +from qutip import CoreOptions - -@pytest.fixture(params=[_data.CSR, _data.Dense], ids=["CSR", "Dense"]) +@pytest.fixture(params=[_data.CSR, _data.Dense, _data.Dia], ids=["CSR", "Dense", "Dia"]) def datatype(request): return request.param @@ -73,9 +73,11 @@ def test_compare_implicit_zero_structure(self, datatype): zeros. """ - base = _data.to( - datatype, _data.create(np.array([[1, self.tol * 1e-3j], [0, 1]])) - ) + with CoreOptions(auto_tidyup=False): + base = _data.to( + datatype, + _data.create(np.array([[1, self.tol * 1e-3j], [0, 1]])) + ) # If this first line fails, the zero has been stored explicitly and so # the test is invalid. assert np.count_nonzero(base.to_array()) == 3 @@ -127,7 +129,8 @@ def test_compare_implicit_zero_random(self, datatype, density): base[np.random.rand(n, n) > density] = 0 np.fill_diagonal(base, self.tol * 1000) nnz = np.count_nonzero(base) - base = _data.to(datatype, _data.create(base)) + with CoreOptions(auto_tidyup=False): + base = _data.to(datatype, _data.create(base)) assert np.count_nonzero(base.to_array()) == nnz assert _data.isherm(base, tol=self.tol) assert _data.isherm(base.transpose(), tol=self.tol) @@ -143,7 +146,8 @@ def test_compare_implicit_zero_random(self, datatype, density): base[np.random.rand(n, n) > density] = 0 np.fill_diagonal(base, self.tol * 1000) nnz = np.count_nonzero(base) - base = _data.to(datatype, _data.create(base)) + with CoreOptions(auto_tidyup=False): + base = _data.to(datatype, _data.create(base)) assert np.count_nonzero(base.to_array()) == nnz assert not _data.isherm(base, tol=self.tol) assert not _data.isherm(base.transpose(), tol=self.tol) diff --git a/qutip/tests/core/data/test_ptrace.py b/qutip/tests/core/data/test_ptrace.py index 84fd41015d..74d25e8986 100644 --- a/qutip/tests/core/data/test_ptrace.py +++ b/qutip/tests/core/data/test_ptrace.py @@ -3,7 +3,7 @@ import scipy.linalg import pytest from qutip import data -from qutip.core.data import CSR, Dense +from qutip.core.data import CSR, Dense, Dia class TestPtrace(testing.UnaryOpMixin): @@ -38,6 +38,7 @@ def op_numpy(self, matrix, dims, sel): pytest.param(data.ptrace_csr, CSR, CSR), pytest.param(data.ptrace_csr_dense, CSR, Dense), pytest.param(data.ptrace_dense, Dense, Dense), + pytest.param(data.ptrace_dia, Dia, Dia), ] @pytest.mark.parametrize( diff --git a/qutip/tests/core/data/test_reshape.py b/qutip/tests/core/data/test_reshape.py index e86b327c8f..5f23b06b83 100644 --- a/qutip/tests/core/data/test_reshape.py +++ b/qutip/tests/core/data/test_reshape.py @@ -2,7 +2,7 @@ import pytest import numpy as np from qutip import data -from qutip.core.data import CSR, Dense +from qutip.core.data import CSR, Dense, Dia class TestSplitColumns(UnaryOpMixin): @@ -11,10 +11,12 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.split_columns_csr, CSR, list), + pytest.param(data.split_columns_dia, Dia, list), pytest.param(data.split_columns_dense, Dense, list), ] +@pytest.mark.filterwarnings("ignore:Constructing a DIA matrix") class TestColumnStack(UnaryOpMixin): def op_numpy(self, matrix): out_shape = (matrix.shape[0]*matrix.shape[1], 1) @@ -22,6 +24,7 @@ def op_numpy(self, matrix): specialisations = [ pytest.param(data.column_stack_csr, CSR, CSR), + pytest.param(data.column_stack_dia, Dia, Dia), pytest.param(data.column_stack_dense, Dense, Dense), ] @@ -43,6 +46,7 @@ def op_numpy(self, matrix, rows): specialisations = [ pytest.param(data.column_unstack_csr, CSR, CSR), + pytest.param(data.column_unstack_dia, Dia, Dia), pytest.param(data.column_unstack_dense, Dense, Dense), ] @@ -92,6 +96,7 @@ def op_numpy(self, matrix, rows, columns): specialisations = [ pytest.param(data.reshape_dense, Dense, Dense), + pytest.param(data.reshape_dia, Dia, Dia), pytest.param(data.reshape_csr, CSR, CSR), ] diff --git a/qutip/tests/core/test_brtools.py b/qutip/tests/core/test_brtools.py index 527ca89778..117b5b81bd 100644 --- a/qutip/tests/core/test_brtools.py +++ b/qutip/tests/core/test_brtools.py @@ -147,7 +147,7 @@ def test_br_term_linbblad_comp(func): diag = H.eigenenergies() skew = np.einsum('i,j->ji', np.ones(N), diag) - diag * np.ones((N, N)) spectrum = (skew > 0) * 1. - computation = func(A_op.data, spectrum, skew, 2).to_array() + computation = func(A_op.data, spectrum, skew, 1.5).to_array() lindblad = qutip.lindblad_dissipator(a).full() np.testing.assert_allclose(computation, lindblad, rtol=1e-14, atol=1e-14) @@ -333,4 +333,4 @@ def test_bloch_redfield_tensor_spectral_callable(): fock_basis=False ) assert isinstance(R_eigs, qutip.Qobj) - assert isinstance(evecs, qutip.Qobj) \ No newline at end of file + assert isinstance(evecs, qutip.Qobj) diff --git a/qutip/tests/core/test_coefficient.py b/qutip/tests/core/test_coefficient.py index 02287c5f58..bb5204c611 100644 --- a/qutip/tests/core/test_coefficient.py +++ b/qutip/tests/core/test_coefficient.py @@ -4,11 +4,12 @@ import numpy as np import scipy.interpolate as interp from functools import partial -from qutip.core.coefficient import (coefficient, norm, conj, +from qutip.core.coefficient import (coefficient, norm, conj, const, CompilationOptions, Coefficient, - clean_compiled_coefficient - ) -from qutip.core.options import CoreOptions + clean_compiled_coefficient, + WARN_MISSING_MODULE, + ) + # Ensure the latest version is tested clean_compiled_coefficient(True) @@ -94,6 +95,8 @@ def coeff_generator(style, func): if style == "steparraylog": return coefficient(base(tlistlog, **args), tlist=tlistlog, order=0) + if style == "const": + return const(2.0) @pytest.mark.parametrize(['base', 'kwargs', 'tol'], [ @@ -164,7 +167,8 @@ def test_CoeffCallArguments(base, tol): pytest.param("arraylog", id="logarray"), pytest.param("string", id="string"), pytest.param("steparray", id="steparray"), - pytest.param("steparraylog", id="steparraylog") + pytest.param("steparraylog", id="steparraylog"), + pytest.param("const", id="constant"), ]) @pytest.mark.parametrize(['transform', 'expected'], [ pytest.param(norm, lambda val: np.abs(val)**2, id="norm"), @@ -175,13 +179,19 @@ def test_CoeffUnitaryTransform(style, transform, expected): _assert_eq_over_interval(transform(coeff), lambda t: expected(coeff(t))) +def test_ConstantCoefficient(): + coeff = const(5.1) + _assert_eq_over_interval(coeff, lambda t: 5.1) + + @pytest.mark.parametrize(['style_left'], [ pytest.param("func", id="func"), pytest.param("array", id="array"), pytest.param("arraylog", id="logarray"), pytest.param("string", id="string"), pytest.param("steparray", id="steparray"), - pytest.param("steparraylog", id="steparraylog") + pytest.param("steparraylog", id="steparraylog"), + pytest.param("const", id="constant"), ]) @pytest.mark.parametrize(['style_right'], [ pytest.param("func", id="func"), @@ -189,7 +199,8 @@ def test_CoeffUnitaryTransform(style, transform, expected): pytest.param("arraylog", id="logarray"), pytest.param("string", id="string"), pytest.param("steparray", id="steparray"), - pytest.param("steparraylog", id="steparraylog") + pytest.param("steparraylog", id="steparraylog"), + pytest.param("const", id="constant"), ]) @pytest.mark.parametrize(['oper'], [ pytest.param(lambda a, b: a+b, id="sum"), @@ -230,6 +241,14 @@ def test_CoeffOptions(): assert not isinstance(coeff1, coeff2.__class__) +def test_warn_no_cython(): + option = CompilationOptions(use_cython=False) + WARN_MISSING_MODULE[0] = 1 + with pytest.warns( + UserWarning, match="`cython` and `filelock` are required" + ): + coefficient("t", compile_opt=option) + @pytest.mark.requires_cython @pytest.mark.parametrize(['codestring', 'args', 'reference'], [ pytest.param("cos(2*t)*cos(t*w1) + sin(w1*w2/2*t)*sin(t*w2)" @@ -275,7 +294,7 @@ def test_advance_use(): from qutip.core.data cimport CSR from qutip.core.data.expect cimport expect_csr """) - csr = qutip.num(3).data + csr = qutip.num(3, dtype="CSR").data coeff = coefficient("expect_csr(op, op)", args={"op": csr}, args_ctypes={"op": "CSR"}, @@ -301,7 +320,8 @@ def _mul(coeff): pytest.param("arraylog", id="logarray"), pytest.param("string", id="string"), pytest.param("steparray", id="steparray"), - pytest.param("steparraylog", id="steparraylog") + pytest.param("steparraylog", id="steparraylog"), + pytest.param("const", id="constant"), ]) @pytest.mark.parametrize(['transform'], [ pytest.param(_pass, id="single"), @@ -323,7 +343,8 @@ def test_Coeffpickle(style, transform): pytest.param("arraylog", id="logarray"), pytest.param("string", id="string"), pytest.param("steparray", id="steparray"), - pytest.param("steparraylog", id="steparraylog") + pytest.param("steparraylog", id="steparraylog"), + pytest.param("const", id="constant"), ]) @pytest.mark.parametrize(['transform'], [ pytest.param(_pass, id="single"), @@ -372,6 +393,10 @@ def test_CoeffFromScipy(): from_scipy = coefficient(interp.make_interp_spline(tlist, y, k=3)) _assert_eq_over_interval(coeff, from_scipy, rtol=1e-8, inside=True) + coeff = coefficient(y, tlist=tlist, order=3, boundary_conditions="natural") + from_scipy = coefficient(interp.make_interp_spline(tlist, y, k=3, bc_type="natural")) + _assert_eq_over_interval(coeff, from_scipy, rtol=1e-8, inside=True) + @pytest.mark.parametrize('map_func', [ pytest.param(qutip.solver.parallel.parallel_map, id='parallel_map'), diff --git a/qutip/tests/core/test_gates.py b/qutip/tests/core/test_gates.py index 7b3795b50d..00f47ffedd 100644 --- a/qutip/tests/core/test_gates.py +++ b/qutip/tests/core/test_gates.py @@ -35,7 +35,7 @@ def gate(N=None, controls=None, target=None): def _make_controled(op): - out = qutip.tensor(qutip.fock_dm(2, 0), qutip.qeye(op.dims[0])) + out = qutip.tensor(qutip.fock_dm(2, 0), qutip.qeye_like(op)) out += qutip.tensor(qutip.fock_dm(2, 1), op) return out diff --git a/qutip/tests/core/test_operators.py b/qutip/tests/core/test_operators.py index 2b883b2871..5a887280cf 100644 --- a/qutip/tests/core/test_operators.py +++ b/qutip/tests/core/test_operators.py @@ -251,6 +251,8 @@ def _id_func(val): (qutip.spin_Jp, (1,)), (qutip.destroy, (5,)), (qutip.create, (5,)), + (qutip.fdestroy, (5, 0)), + (qutip.fcreate, (5, 0)), (qutip.qzero, (5,)), (qutip.qeye, (5,)), (qutip.position, (5,)), @@ -273,6 +275,14 @@ def test_operator_type(func, args, alias, dtype): for obj in object: assert isinstance(obj.data, dtype) + with qutip.CoreOptions(default_dtype=alias): + object = func(*args) + if isinstance(object, qutip.Qobj): + assert isinstance(object.data, dtype) + else: + for obj in object: + assert isinstance(obj.data, dtype) + @pytest.mark.parametrize('dims', [8, 15, [2] * 4]) def test_qft(dims): @@ -285,3 +295,73 @@ def test_qft(dims): fft = np.fft.fft(qft[:,i]) fft /= np.sum(fft) np.testing.assert_allclose(fft, target, atol=1e-16 * N) + + +@pytest.mark.parametrize('N', [1, 3, 5, 8]) +@pytest.mark.parametrize('M', [1, 3, 5, 8]) +def test_swap(N, M): + ket1 = qutip.rand_ket(N) + ket2 = qutip.rand_ket(M) + + assert qutip.swap(N, M) @ (ket1 & ket2) == (ket2 & ket1) + + +@pytest.mark.parametrize(["dims", "superrep"], [ + pytest.param([2], None, id="simple"), + pytest.param([2, 3], None, id="tensor"), + pytest.param([[2], [2]], None, id="super"), + pytest.param([[2], [2]], "chi", id="chi"), +]) +@pytest.mark.parametrize('dtype', ["CSR", "Dense"]) +def test_qeye_like(dims, superrep, dtype): + op = qutip.rand_herm(dims, dtype=dtype) + op.superrep = superrep + new = qutip.qeye_like(op) + expected = qutip.qeye(dims, dtype=dtype) + expected.superrep = superrep + assert new == expected + + opevo = qutip.QobjEvo(op) + new = qutip.qeye_like(op) + assert new == expected + + +@pytest.mark.parametrize(["dims", "superrep"], [ + pytest.param([2], None, id="simple"), + pytest.param([2, 3], None, id="tensor"), + pytest.param([[2], [2]], None, id="super"), + pytest.param([[2], [2]], "chi", id="chi"), +]) +@pytest.mark.parametrize('dtype', ["CSR", "Dense"]) +def test_qzero_like(dims, superrep, dtype): + op = qutip.rand_herm(dims, dtype=dtype) + op.superrep = superrep + new = qutip.qzero_like(op) + expected = qutip.qzero(dims, dtype=dtype) + expected.superrep = superrep + assert new == expected + + opevo = qutip.QobjEvo(op) + new = qutip.qzero_like(op) + assert new == expected + + +@pytest.mark.parametrize('n_sites', [2, 3, 4, 5]) +def test_fcreate_fdestroy(n_sites): + identity = qutip.identity([2] * n_sites) + zero_tensor = qutip.qzero([2] * n_sites) + for site_0 in range(n_sites): + c_0 = qutip.fcreate(n_sites, site_0) + d_0 = qutip.fdestroy(n_sites, site_0) + for site_1 in range(n_sites): + c_1 = qutip.fcreate(n_sites, site_1) + d_1 = qutip.fdestroy(n_sites, site_1) + assert qutip.commutator(c_0, c_1, 'anti') == zero_tensor + assert qutip.commutator(d_0, d_1, 'anti') == zero_tensor + if site_0 == site_1: + assert qutip.commutator(c_0, d_1, 'anti') == identity + assert qutip.commutator(c_1, d_0, 'anti') == identity + else: + assert qutip.commutator(c_0, d_1, 'anti') == zero_tensor + assert qutip.commutator(c_1, d_0, 'anti') == zero_tensor + assert qutip.commutator(identity, c_0) == zero_tensor diff --git a/qutip/tests/core/test_qobj.py b/qutip/tests/core/test_qobj.py index 85e091b608..5badd1e88e 100644 --- a/qutip/tests/core/test_qobj.py +++ b/qutip/tests/core/test_qobj.py @@ -317,11 +317,10 @@ def test_QobjMultiplication(): # Allowed mul operations (scalar) @pytest.mark.parametrize("scalar", - [2+2j, np.array(2+2j), np.array([2+2j])], + [2+2j, np.array(2+2j)], ids=[ "python_number", "scalar_like_array_shape_0", - "scalar_like_array_shape_1", ]) def test_QobjMulValidScalar(scalar): "Tests multiplication of Qobj times scalar." @@ -359,11 +358,10 @@ def test_QobjMulNotValidScalar(not_scalar): # Allowed division operations (scalar) @pytest.mark.parametrize("scalar", - [2+2j, np.array(2+2j), np.array([2+2j])], + [2+2j, np.array(2+2j)], ids=[ "python_number", "scalar_like_array_shape_0", - "scalar_like_array_shape_1", ]) def test_QobjDivisionValidScalar(scalar): "Tests multiplication of Qobj times scalar." @@ -1226,3 +1224,44 @@ def test_groundstate(): with pytest.warns(UserWarning) as warning: qutip.qeye(5).groundstate() assert "degenerate" in warning[0].message.args[0] + + +@pytest.mark.filterwarnings( + "ignore::scipy.sparse.SparseEfficiencyWarning" +) +def test_data_as(): + qobj = qutip.qeye(2, dtype="CSR") + + assert scipy.sparse.isspmatrix_csr(qobj.data_as("csr_matrix")) + assert scipy.sparse.isspmatrix_csr(qobj.data_as(copy=False)) + with pytest.raises(ValueError) as err: + qobj.data_as("ndarray") + assert "csr_matrix" in str(err.value) + + qobj.data_as(copy=False)[0, 0] = 0 + qobj.data_as(copy=True)[0, 1] = 2 + assert qobj == qutip.num(2, dtype="CSR") + + qobj = qutip.qeye(2, dtype="Dense") + + assert isinstance(qobj.data_as("ndarray"), np.ndarray) + assert isinstance(qobj.data_as(copy=False), np.ndarray) + + qobj.data_as(copy=False)[0, 0] = 0 + qobj.data_as(copy=True)[0, 1] = 2 + assert qobj == qutip.num(2, dtype="Dense") + with pytest.raises(ValueError) as err: + qobj.data_as("csr_matrix") + assert "ndarray" in str(err.value) + + qobj = qutip.qeye(2, dtype="Dia") + + assert scipy.sparse.isspmatrix_dia(qobj.data_as("dia_matrix")) + assert scipy.sparse.isspmatrix_dia(qobj.data_as(copy=False)) + + qobj.data_as(copy=False).data[:, 0] = 0 + qobj.data_as(copy=True).data[:, 0] = 2 + assert qobj == qutip.num(2, dtype="Dia") + with pytest.raises(ValueError) as err: + qobj.data_as("ndarray") + assert "dia_matrix" in str(err.value) diff --git a/qutip/tests/core/test_qobjevo.py b/qutip/tests/core/test_qobjevo.py index 2e9d4a07c3..77fe8fba93 100644 --- a/qutip/tests/core/test_qobjevo.py +++ b/qutip/tests/core/test_qobjevo.py @@ -2,7 +2,8 @@ import pytest from qutip import (Qobj, QobjEvo, coefficient, qeye, sigmax, sigmaz, - rand_stochastic, rand_herm, rand_ket, liouvillian) + rand_stochastic, rand_herm, rand_ket, liouvillian, + basis, spre, spost, to_choi) import numpy as np from numpy.testing import assert_allclose @@ -46,6 +47,7 @@ def __call__(self, t, args={}): def __getitem__(self, which): return getattr(self, which)() + N = 3 args = {'w1': 1, "w2": 2} TESTTIMES = np.linspace(0.001, 1.0, 10) @@ -130,6 +132,46 @@ def test_call(pseudo_qevo, coeff_type): assert not qevo.issuper _assert_qobjevo_equivalent(pseudo_qevo, qevo) + +# Test the QobjEvo.__repr__() +def test_QobjEvo_repr(): + # case_n: cases with Objects of QobjEvo with unique __repr__ + # expected_repr_n: are the Expected result from the __repr__ + + case_1 = repr(QobjEvo([qeye(3), lambda t: t])) + expected_repr_1 = 'QobjEvo: dims = [[3], [3]], shape = (3, 3), ' + expected_repr_1 += 'type = oper, superrep = None, ' + expected_repr_1 += 'isconstant = False, num_elements = 1' + assert case_1 == expected_repr_1 + + case_2 = repr(QobjEvo(qeye(2))) + expected_repr_2 = 'QobjEvo: dims = [[2], [2]], shape = (2, 2), ' + expected_repr_2 += 'type = oper, superrep = None, ' + expected_repr_2 += 'isconstant = True, num_elements = 1' + assert case_2 == expected_repr_2 + + case_3 = repr(QobjEvo(basis(5, 2))) + expected_repr_3 = 'QobjEvo: dims = [[5], [1]], shape = (5, 1), ' + expected_repr_3 += 'type = ket, superrep = None, ' + expected_repr_3 += 'isconstant = True, num_elements = 1' + assert case_3 == expected_repr_3 + + X = sigmax() + S = spre(X) * spost(X.dag()) + case_4 = repr(QobjEvo(to_choi(S))) + expected_repr_4 = 'QobjEvo: dims = [[[2], [2]], [[2], [2]]], ' + expected_repr_4 += 'shape = (4, 4), type = super, superrep = choi, ' + expected_repr_4 += 'isconstant = True, num_elements = 1' + assert case_4 == expected_repr_4 + + case_5 = repr(QobjEvo([[qeye(4), lambda t: t], + [qeye(4), lambda t: t]], compress=False)) + expected_repr_5 = 'QobjEvo: dims = [[4], [4]], shape = (4, 4), ' + expected_repr_5 += 'type = oper, superrep = None, ' + expected_repr_5 += 'isconstant = False, num_elements = 2' + assert case_5 == expected_repr_5 + + @pytest.mark.parametrize('coeff_type', ['func_coeff', 'string', 'array', 'logarray']) def test_product_coeff(pseudo_qevo, coeff_type): @@ -149,6 +191,7 @@ def test_copy(all_qevo): _assert_qobjevo_equivalent(copy, qevo) assert copy is not qevo + @pytest.mark.parametrize('bin_op', [ pytest.param(lambda a, b: a + b, id="add"), pytest.param(lambda a, b: a - b, id="sub"), @@ -202,6 +245,7 @@ def test_binopt_qobj(all_qevo, bin_op): as_qobj = bin_op(qobj, obj(t)) _assert_qobj_almost_eq(as_qevo, as_qobj) + @pytest.mark.parametrize('bin_op', [ pytest.param(lambda a, b: a + b, id="add"), pytest.param(lambda a, b: a - b, id="sub"), @@ -222,6 +266,7 @@ def test_binopt_scalar(all_qevo, bin_op): as_qobj = bin_op(scalar, obj(t)) _assert_qobj_almost_eq(as_qevo, as_qobj) + def binop_coeff(all_qevo): obj = all_qevo coeff = coeffient("t") @@ -229,6 +274,7 @@ def binop_coeff(all_qevo): for t in TESTTIMES: _assert_qobj_almost_eq(created(t), obj(t) * t) + @pytest.mark.parametrize('unary_op', [ pytest.param(lambda a: a.conj(), id="conj"), pytest.param(lambda a: a.dag(), id="dag"), @@ -243,6 +289,7 @@ def test_unary(all_qevo, unary_op): as_qobj = unary_op(obj(t)) _assert_qobj_almost_eq(as_qevo, as_qobj) + @pytest.mark.parametrize('args_coeff_type', ['func_coeff', 'string', 'func_call']) def test_args(pseudo_qevo, args_coeff_type): @@ -267,6 +314,7 @@ def test_args(pseudo_qevo, args_coeff_type): for t in TESTTIMES: _assert_qobj_almost_eq(obj(t), pseudo_qevo(t, args)) + def test_copy_side_effects(all_qevo): t = 0.2 qevo = all_qevo @@ -280,6 +328,7 @@ def test_copy_side_effects(all_qevo): after = qevo(t) _assert_qobj_almost_eq(before, after) + @pytest.mark.parametrize('coeff_type', ['func_coeff', 'string', 'array', 'logarray'] ) @@ -292,6 +341,7 @@ def test_tidyup(all_qevo): # check that the Qobj are cleaned assert_allclose(obj(t).full(), 0) + def test_QobjEvo_pickle(all_qevo): "QobjEvo pickle" # used in parallel_map @@ -302,6 +352,15 @@ def test_QobjEvo_pickle(all_qevo): _assert_qobjevo_equivalent(recreated, obj) +def test_QobjEvo_restore(all_qevo): + "QobjEvo pickle" + # used in parallel_map + obj = all_qevo + state = obj._getstate() + recreated = QobjEvo._restore(**state) + _assert_qobjevo_equivalent(recreated, obj) + + def test_mul_vec(all_qevo): "QobjEvo matmul ket" vec = Qobj(np.arange(N)*.5+.5j) @@ -327,6 +386,7 @@ def test_matmul(all_qevo): assert_allclose((Qo1 @ mat).full(), op.matmul(t, matCSR).full(), atol=1e-14) + def test_expect_psi(all_qevo): "QobjEvo expect psi" vec = _data.dense.fast_from_numpy(np.arange(N)*.5 + .5j) @@ -337,6 +397,7 @@ def test_expect_psi(all_qevo): assert_allclose(_data.expect(Qo1.data, vec), op.expect(t, qobj), atol=1e-14) + def test_expect_rho(all_qevo): "QobjEvo expect rho" vec = _data.dense.fast_from_numpy(np.random.rand(N*N) + 1 @@ -349,6 +410,7 @@ def test_expect_rho(all_qevo): assert abs(_data.expect_super(Qo1.data, vec) - op.expect(t, qobj)) < 1e-14 + @pytest.mark.parametrize('dtype', [pytest.param(dtype, id=dtype.__name__) for dtype in _data.to.dtypes]) @@ -439,3 +501,15 @@ def test_QobjEvo_isherm_flag_knowcase(): assert QobjEvo([sigmax(), "1j"])(0)._isherm is None assert QobjEvo([[sigmax(), "t"], [sigmaz(), "1"]])(0)._isherm is True assert QobjEvo([[sigmax(), "t"], [sigmaz(), "1j"]])(0)._isherm is None + + +@pytest.mark.parametrize( + "coeff_type", + ['func_coeff', 'string', 'array', 'logarray'] +) +def test_QobjEvo_to_list(coeff_type, pseudo_qevo): + qevo = QobjEvo(*pseudo_qevo[coeff_type]) + as_list = qevo.to_list() + assert len(as_list) == 2 + restored = QobjEvo(as_list) + _assert_qobjevo_equivalent(qevo, restored) diff --git a/qutip/tests/core/test_states.py b/qutip/tests/core/test_states.py index e790d06dd8..be17941dec 100644 --- a/qutip/tests/core/test_states.py +++ b/qutip/tests/core/test_states.py @@ -308,3 +308,11 @@ def test_state_type(func, args, alias, dtype): else: for obj in object: assert isinstance(obj.data, dtype) + + with qutip.CoreOptions(default_dtype=alias): + object = func(*args) + if isinstance(object, qutip.Qobj): + assert isinstance(object.data, dtype) + else: + for obj in object: + assert isinstance(obj.data, dtype) diff --git a/qutip/tests/core/test_superop_reps.py b/qutip/tests/core/test_superop_reps.py index 31c538d118..b1e1f1b89e 100644 --- a/qutip/tests/core/test_superop_reps.py +++ b/qutip/tests/core/test_superop_reps.py @@ -14,7 +14,7 @@ Qobj, basis, identity, sigmax, sigmay, qeye, create, rand_super, rand_super_bcsz, rand_dm, tensor, super_tensor, kraus_to_choi, to_super, to_choi, to_kraus, to_chi, to_stinespring, operator_to_vector, - vector_to_operator, sprepost, destroy + vector_to_operator, sprepost, destroy, CoreOptions ) from qutip.core.gates import swap @@ -171,8 +171,9 @@ def test_random_iscptp(self, superoperator): Superoperator: Randomly generated superoperators are correctly reported as CPTP and HP. """ - assert superoperator.iscptp - assert superoperator.ishp + with CoreOptions(atol=1e-9): + assert superoperator.iscptp + assert superoperator.ishp @pytest.mark.parametrize(['qobj', 'hp', 'cp', 'tp'], [ pytest.param(sprepost(destroy(2), create(2)), True, True, False), diff --git a/qutip/tests/piqs/__init__.py b/qutip/tests/piqs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/qutip/tests/solve/test_piqs.py b/qutip/tests/piqs/test_piqs.py similarity index 99% rename from qutip/tests/solve/test_piqs.py rename to qutip/tests/piqs/test_piqs.py index a069a2905b..54e17b91bd 100644 --- a/qutip/tests/solve/test_piqs.py +++ b/qutip/tests/piqs/test_piqs.py @@ -3,8 +3,6 @@ """ import numpy as np from numpy.testing import ( - assert_, - run_module_suite, assert_raises, assert_array_equal, assert_array_almost_equal, @@ -13,7 +11,7 @@ ) from scipy.sparse import block_diag from qutip import Qobj, entropy_vn, sigmax, sigmaz -from qutip.solve._piqs import ( +from qutip.piqs._piqs import ( get_blocks, j_min, j_vals, @@ -23,8 +21,8 @@ get_index, jmm1_dictionary, ) -from qutip.solve._piqs import Dicke as _Dicke -from qutip.solve.piqs import * +from qutip.piqs._piqs import Dicke as _Dicke +from qutip.piqs.piqs import * import sys import unittest @@ -130,14 +128,14 @@ def test_dicke_blocks(self): true_matrix = (excited(N) + dicke(N, 0.5, 0.5)).unit() test_blocks = dicke_blocks(true_matrix) test_matrix = Qobj(block_diag(test_blocks)) - assert_(test_matrix == true_matrix) + assert (test_matrix == true_matrix) # test 2 # all elements in block-diagonal matrix N = 4 true_matrix = Qobj(block_matrix(N)).unit() test_blocks = dicke_blocks(true_matrix) test_matrix = Qobj(block_diag(test_blocks)) - assert_(test_matrix == true_matrix) + assert (test_matrix == true_matrix) def test_dicke_blocks_full(self): """ @@ -148,7 +146,7 @@ def test_dicke_blocks_full(self): test_matrix = Qobj(block_diag(test_blocks)) true_expanded = np.zeros((8, 8)) true_expanded[0, 0] = 1.0 - assert_(test_matrix == Qobj(true_expanded)) + assert (test_matrix == Qobj(true_expanded)) def test_dicke_function_trace(self): """ @@ -567,10 +565,10 @@ def test_j_min_(self): odd = [1, 3, 5, 7] for i in even: - assert_(j_min(i) == 0) + assert (j_min(i) == 0) for i in odd: - assert_(j_min(i) == 0.5) + assert (j_min(i) == 0.5) def test_energy_degeneracy(self): """ @@ -1530,7 +1528,3 @@ def test_pisolve(self): no_hamiltonian_system = Dicke(4, emission=0.1) result = no_hamiltonian_system.pisolve(diag_initial_state, tlist) assert_equal(True, len(result.states) > 0) - - -if __name__ == "__main__": - run_module_suite() diff --git a/qutip/tests/solve/test_stochastic_me.py b/qutip/tests/solve/test_stochastic_me.py deleted file mode 100644 index 27ac7c821a..0000000000 --- a/qutip/tests/solve/test_stochastic_me.py +++ /dev/null @@ -1,309 +0,0 @@ -import pytest -import numpy as np -from numpy.testing import assert_, run_module_suite - -from qutip import ( - smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, - destroy, coherent, qeye, fock_dm, general_stochastic, num, -) -from qutip.solve.parallel import parallel_map -from qutip.core import data as _data -def f(t, args): - return args["a"] * t - - -@pytest.mark.slow -def test_smesolve_homodyne_methods(): - "Stochastic: smesolve: homodyne methods with single jump operator" - - def arccoth(x): - return 0.5*np.log((1.+x)/(x-1.)) - - th = 0.1 # Interaction parameter - alpha = np.cos(th) - beta = np.sin(th) - gamma = 1. - - N = 30 # number of Fock states - Id = qeye(N) - a = destroy(N) - s = 0.5*((alpha+beta)*a + (alpha-beta)*a.dag()) - x = (a + a.dag()) * 2**-0.5 - H = Id - c_op = [gamma**0.5 * a] - sc_op = [s] - e_op = [x, x*x] - rho0 = fock_dm(N,0) # initial vacuum state - - T = 3. # final time - # number of time steps for which we save the expectation values - N_store = 121 - Nsub = 10 - tlist = np.linspace(0, T, N_store) - ddt = (tlist[1]-tlist[0]) - - #### Analytic solution - y0 = 0.5 - A = (gamma**2 + alpha**2 * (beta**2 + 4*gamma) - 2*alpha*beta*gamma)**0.5 - B = arccoth((-4*alpha**2*y0 + alpha*beta - gamma)/A) - y_an = (alpha*beta - gamma + A / np.tanh(0.5*A*tlist - B))/(4*alpha**2) - - - list_methods_tol = [['euler-maruyama', 2e-2], - ['pc-euler', 2e-3], - ['pc-euler-2', 2e-3], - ['platen', 1e-3], - ['milstein', 1e-3], - ['milstein-imp', 1e-3], - ['rouchon', 1e-3], - ['taylor1.5', 1e-4], - ['taylor1.5-imp', 1e-4], - ['explicit1.5', 1e-4], - ['taylor2.0', 1e-4]] - for n_method in list_methods_tol: - # Comparisons of error between sol and sol3 depend on the stochastic - # noise, thus the seed, fixing the seed remove random fails. - np.random.seed(1) - sol = smesolve(H, rho0, tlist, c_op, sc_op, e_op, - nsubsteps=Nsub, method='homodyne', solver = n_method[0]) - sol2 = smesolve(H, rho0, tlist, c_op, sc_op, e_op, store_measurement=0, - nsubsteps=Nsub, method='homodyne', solver = n_method[0], - noise = sol.noise) - sol3 = smesolve(H, rho0, tlist, c_op, sc_op, e_op, - nsubsteps=Nsub*5, method='homodyne', - solver = n_method[0], tol=1e-8) - err = 1/T * np.sum(np.abs(y_an - \ - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt - err3 = 1/T * np.sum(np.abs(y_an - \ - (sol3.expect[1]-sol3.expect[0]*sol3.expect[0].conj())))*ddt - print(n_method[0], ': deviation =', err, ', tol =', n_method[1]) - assert_(err < n_method[1]) - # 5* more substep should decrease the error - assert_(err3 < err) - # just to check that noise is not affected by smesolve - assert_(np.all(sol.noise == sol2.noise)) - assert_(np.all(sol.expect[0] == sol2.expect[0])) - - sol = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2, - nsubsteps=Nsub, method='homodyne', solver='euler', - store_measurement=1) - sol2 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2, - nsubsteps=Nsub, method='homodyne', solver='euler', - store_measurement=0) - sol3 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=11, ntraj=2, - nsubsteps=Nsub, method='homodyne', solver='euler') - # sol and sol2 have the same seed, sol3 differ. - assert_(np.all(sol.noise == sol2.noise)) - assert_(np.all(sol.noise != sol3.noise)) - assert_(not np.all(sol.measurement[0] == 0.+0j)) - assert_(np.all(sol2.measurement[0] == 0.+0j)) - sol = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=np.array([1,2]), - ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler') - sol2 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=np.array([2,1]), - ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler') - # sol and sol2 have the seed of traj 1 and 2 reversed. - assert_(np.all(sol.noise[0,:,:,:] == sol2.noise[1,:,:,:])) - assert_(np.all(sol.noise[1,:,:,:] == sol2.noise[0,:,:,:])) - - -def test_smesolve_photocurrent(): - "Stochastic: photocurrent_mesolve" - tol = 0.01 - - N = 4 - gamma = 0.25 - ntraj = 20 - nsubsteps = 100 - a = destroy(N) - - H = [[a.dag() * a,f]] - psi0 = coherent(N, 0.5) - sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] - - times = np.linspace(0, 1.0, 21) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2}) - res = photocurrent_mesolve(H, psi0, times, [], sc_ops, e_ops, args={"a":2}, - ntraj=ntraj, nsubsteps=nsubsteps, store_measurement=True, - map_func=parallel_map) - - assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol - for idx in range(len(e_ops))])) - assert_(len(res.measurement) == ntraj) - assert_(all([m.shape == (len(times), len(sc_ops)) - for m in res.measurement])) - - -def test_smesolve_homodyne(): - "Stochastic: smesolve: homodyne, time-dependent H" - tol = 0.01 - - N = 4 - gamma = 0.25 - ntraj = 20 - nsubsteps = 100 - a = destroy(N) - - H = [[a.dag() * a,f]] - psi0 = coherent(N, 0.5) - sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] - - times = np.linspace(0, 1.0, 21) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2}) - list_methods_tol = ['euler-maruyama', - 'pc-euler', - 'pc-euler-2', - 'platen', - 'milstein', - 'milstein-imp', - 'rouchon', - 'taylor15', - 'taylor15-imp', - 'explicit15'] - for solver in list_methods_tol: - res = smesolve(H, psi0, times, [], sc_ops, e_ops, - ntraj=ntraj, nsubsteps=nsubsteps, args={"a":2}, - method='homodyne', store_measurement=True, - solver=solver, map_func=parallel_map) - assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol - for idx in range(len(e_ops))])) - assert_(len(res.measurement) == ntraj) - assert_(all([m.shape == (len(times), len(sc_ops)) - for m in res.measurement])) - - -@pytest.mark.slow -def test_smesolve_heterodyne(): - "Stochastic: smesolve: heterodyne, time-dependent H" - tol = 0.01 - - N = 4 - gamma = 0.25 - ntraj = 20 - nsubsteps = 100 - a = destroy(N) - - H = [[a.dag() * a, f]] - psi0 = coherent(N, 0.5) - sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] - - times = np.linspace(0, 1.0, 21) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2}) - list_methods_tol = ['euler-maruyama', - 'pc-euler', - 'pc-euler-2', - 'platen', - 'milstein', - 'milstein-imp', - 'rouchon', - 'taylor15', - 'taylor15-imp', - 'explicit15'] - for solver in list_methods_tol: - res = smesolve(H, psi0, times, [], sc_ops, e_ops, - ntraj=ntraj, nsubsteps=nsubsteps, args={"a":2}, - method='heterodyne', store_measurement=True, - solver=solver, map_func=parallel_map) - assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol - for idx in range(len(e_ops))])) - assert_(len(res.measurement) == ntraj) - assert_(all([m.shape == (len(times), len(sc_ops), 2) - for m in res.measurement])) - - -@pytest.mark.slow -def test_general_stochastic(): - "Stochastic: general_stochastic" - "Reproduce smesolve homodyne" - tol = 0.025 - N = 4 - gamma = 0.25 - ntraj = 20 - nsubsteps = 50 - a = destroy(N) - - H = [[a.dag() * a,f]] - psi0 = coherent(N, 0.5) - sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] - - L = liouvillian(QobjEvo([[a.dag() * a,f]], args={"a":2}), c_ops = sc_ops) - sc_opsM = [QobjEvo(spre(op) + spost(op.dag())) for op in sc_ops] - e_opsM = [spre(op) for op in e_ops] - - def d1(t, vec): - return L.matmul_data(t, _data.Dense(vec)).to_array().ravel() - - def d2(t, vec): - out = [] - vec_d = _data.Dense(vec) - for op in sc_opsM: - scale = -op.expect_data(t,vec_d) - out.append( - _data.add(op.matmul_data(t,vec_d), - vec_d, scale) - ) - return np.stack([o.to_array().ravel() for o in out]) - - times = np.linspace(0, 0.5, 13) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2}) - list_methods_tol = ['euler-maruyama', - 'platen', - 'explicit15'] - for solver in list_methods_tol: - res = general_stochastic(psi0.proj(), times, d1, d2, len_d2=2, - e_ops=e_opsM, normalize=False, ntraj=ntraj, - nsubsteps=nsubsteps, solver=solver) - assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol - for idx in range(len(e_ops))])) - assert_(len(res.measurement) == ntraj) - - -def f_dargs(a, args): - return args["expect_op_3"] - 1 - - -@pytest.mark.xfail(reason="not working yet") -def test_ssesolve_feedback(): - "Stochastic: ssesolve: time-dependent H with feedback" - tol = 0.01 - N = 4 - ntraj = 10 - nsubsteps = 100 - a = destroy(N) - - H = [num(N)] - psi0 = coherent(N, 2.5) - sc_ops = [[a + a.dag(), f_dargs]] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag()), qeye(N)] - - times = np.linspace(0, 10, 101) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, - args={"expect_op_3":qeye(N)}) - res = smesolve(H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, noise=1, - ntraj=ntraj, nsubsteps=nsubsteps, method='homodyne', - map_func=parallel_map, args={"expect_op_3":qeye(N)}) - - assert res.expect == pytest.approx(res_ref.expect, abs=tol) - - -def test_smesolve_bad_e_ops(): - "Stochastic: ssesolve: time-dependent H with feedback" - tol = 0.01 - N = 4 - ntraj = 10 - nsubsteps = 100 - a = destroy(N) - - H = [num(N)] - psi0 = coherent(N, 2.5) - sc_ops = [a + a.dag()] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag()), qeye(N+1)] - - times = np.linspace(0, 10, 101) - with pytest.raises(TypeError) as exc: - res = smesolve(H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, noise=1, - ntraj=ntraj, nsubsteps=nsubsteps, method='homodyne', - map_func=parallel_map) diff --git a/qutip/tests/solve/test_stochastic_se.py b/qutip/tests/solve/test_stochastic_se.py deleted file mode 100644 index 8a40f2d756..0000000000 --- a/qutip/tests/solve/test_stochastic_se.py +++ /dev/null @@ -1,230 +0,0 @@ -import pytest -import numpy as np -from numpy.testing import assert_ - -from qutip import ( - ssesolve, destroy, coherent, mesolve, fock, qeye, - photocurrent_sesolve, num, -) -from qutip.solve.parallel import parallel_map - -def f(t, args): - return args["a"] * t - - -@pytest.mark.slow -def test_ssesolve_homodyne_methods(): - "Stochastic: ssesolve: homodyne methods with single jump operator" - - def arccoth(x): - return 0.5*np.log((1.+x)/(x-1.)) - - th = 0.1 # Interaction parameter - alpha = np.cos(th) - beta = np.sin(th) - gamma = 1. - - N = 30 # number of Fock states - Id = qeye(N) - a = destroy(N) - s = 0.5*((alpha+beta)*a + (alpha-beta)*a.dag()) - x = (a + a.dag()) * 2**-0.5 - H = Id + gamma * a * a.dag() - sc_op = [s] - e_op = [x, x*x] - rho0 = fock(N,0) # initial vacuum state - - T = 6. # final time - # number of time steps for which we save the expectation values - N_store = 200 - Nsub = 10 - tlist = np.linspace(0, T, N_store) - ddt = (tlist[1]-tlist[0]) - - #### No analytic solution for ssesolve, taylor15 with 500 substep - sol = ssesolve(H, rho0, tlist, sc_op, e_op, - nsubsteps=1000, method='homodyne', solver='taylor1.5') - y_an = (sol.expect[1]-sol.expect[0]*sol.expect[0].conj()) - - - list_methods_tol = [['euler-maruyama', 3e-2], - ['pc-euler', 5e-3], - ['pc-euler-2', 5e-3], - ['platen', 5e-3], - ['milstein', 5e-3], - ['milstein-imp', 5e-3], - ['taylor1.5', 5e-4], - ['taylor1.5-imp', 5e-4], - ['explicit1.5', 5e-4], - ['taylor2.0', 5e-4]] - for n_method in list_methods_tol: - # Comparisons of error between sol and sol3 depend on the stochastic - # noise, thus the seed, fixing the seed remove random fails. - np.random.seed(1) - sol = ssesolve(H, rho0, tlist, sc_op, e_op, - nsubsteps=Nsub, method='homodyne', solver=n_method[0]) - sol2 = ssesolve(H, rho0, tlist, sc_op, e_op, store_measurement=0, - nsubsteps=Nsub, method='homodyne', solver=n_method[0], - noise = sol.noise) - sol3 = ssesolve(H, rho0, tlist, sc_op, e_op, - nsubsteps=Nsub*10, method='homodyne', - solver=n_method[0], tol=1e-8) - err = 1/T * np.sum(np.abs(y_an - \ - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt - err3 = 1/T * np.sum(np.abs(y_an - \ - (sol3.expect[1]-sol3.expect[0]*sol3.expect[0].conj())))*ddt - assert err < n_method[1] - # 5* more substep should decrease the error - assert err3 < err - # just to check that noise is not affected by ssesolve - assert np.all(sol.noise == sol2.noise) - assert np.all(sol.expect[0] == sol2.expect[0]) - - sol = ssesolve(H, rho0, tlist[:2], sc_op, e_op, noise=10, ntraj=2, - nsubsteps=Nsub, method='homodyne', solver='euler', - store_measurement=1) - sol2 = ssesolve(H, rho0, tlist[:2], sc_op, e_op, noise=10, ntraj=2, - nsubsteps=Nsub, method='homodyne', solver='euler', - store_measurement=0) - sol3 = ssesolve(H, rho0, tlist[:2], sc_op, e_op, noise=11, ntraj=2, - nsubsteps=Nsub, method='homodyne', solver='euler') - # sol and sol2 have the same seed, sol3 differ. - assert np.all(sol.noise == sol2.noise) - assert np.all(sol.noise != sol3.noise) - assert not np.all(sol.measurement[0] == 0.+0j) - assert np.all(sol2.measurement[0] == 0.+0j) - sol = ssesolve(H, rho0, tlist[:2], sc_op, e_op, noise=np.array([1,2]), - ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler') - sol2 = ssesolve(H, rho0, tlist[:2], sc_op, e_op, noise=np.array([2,1]), - ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler') - # sol and sol2 have the seed of traj 1 and 2 reversed. - assert np.all(sol.noise[0,:,:,:] == sol2.noise[1,:,:,:]) - assert np.all(sol.noise[1,:,:,:] == sol2.noise[0,:,:,:]) - - -def test_ssesolve_photocurrent(): - "Stochastic: photocurrent_sesolve" - tol = 0.01 - - N = 4 - gamma = 0.25 - ntraj = 25 - nsubsteps = 100 - a = destroy(N) - - H = [[a.dag() * a,f]] - psi0 = coherent(N, 0.5) - sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a*0.5] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] - - times = np.linspace(0, 2.5, 50) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2}) - res = photocurrent_sesolve(H, psi0, times, sc_ops, e_ops, ntraj=ntraj, - nsubsteps=nsubsteps, store_measurement=True, - map_func=parallel_map, args={"a":2}) - - np.testing.assert_allclose(res.expect, res_ref.expect, atol=tol) - assert len(res.measurement) == ntraj - assert all([m.shape == (len(times), len(sc_ops)) for m in res.measurement]) - - -def test_ssesolve_homodyne(): - "Stochastic: ssesolve: homodyne, time-dependent H" - tol = 0.01 - - N = 4 - gamma = 0.25 - ntraj = 25 - nsubsteps = 100 - a = destroy(N) - - H = [[a.dag() * a,f]] - psi0 = coherent(N, 0.5) - sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a*0.5] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] - - times = np.linspace(0, 2.5, 50) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2}) - res = ssesolve(H, psi0, times, sc_ops, e_ops, - ntraj=ntraj, nsubsteps=nsubsteps, - method='homodyne', store_measurement=True, - map_func=parallel_map, args={"a":2}) - - np.testing.assert_allclose(res.expect, res_ref.expect, atol=tol) - assert len(res.measurement) == ntraj - assert all(m.shape == (len(times), len(sc_ops)) for m in res.measurement) - - -@pytest.mark.slow -def test_ssesolve_heterodyne(): - "Stochastic: ssesolve: heterodyne, time-dependent H" - tol = 0.01 - - N = 4 - gamma = 0.25 - ntraj = 25 - nsubsteps = 100 - a = destroy(N) - - H = [[a.dag() * a,f]] - psi0 = coherent(N, 0.5) - sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a*0.5] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] - - times = np.linspace(0, 2.5, 50) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2}) - res = ssesolve(H, psi0, times, sc_ops, e_ops, - ntraj=ntraj, nsubsteps=nsubsteps, - method='heterodyne', store_measurement=True, - map_func=parallel_map, args={"a":2}) - - np.testing.assert_allclose(res.expect, res_ref.expect, atol=tol) - assert len(res.measurement) == ntraj - assert all(m.shape == (len(times), len(sc_ops), 2) - for m in res.measurement) - - -def f_dargs(t, args): - return args["expect_op_3"] - 1 - - -@pytest.mark.xfail(reason="not yet working") -def test_ssesolve_feedback(): - "Stochastic: ssesolve: time-dependent H with feedback" - tol = 0.01 - N = 4 - ntraj = 10 - nsubsteps = 100 - a = destroy(N) - - H = [num(N)] - psi0 = coherent(N, 2.5) - sc_ops = [[a + a.dag(), f_dargs]] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag()), qeye(N)] - - times = np.linspace(0, 10, 101) - res_ref = mesolve(H, psi0, times, sc_ops, e_ops, - args={"expect_op_3": qeye(N)}) - res = ssesolve(H, psi0, times, sc_ops, e_ops, solver=None, noise=1, - ntraj=ntraj, nsubsteps=nsubsteps, method='homodyne', - map_func=parallel_map, args={"expect_op_3": qeye(N)}) - np.testing.assert_allclose(res.expect, res_ref.expect, atol=tol) - - -def test_ssesolve_bad_e_ops(): - tol = 0.01 - N = 4 - ntraj = 10 - nsubsteps = 100 - a = destroy(N) - b = destroy(N-1) - - H = [num(N)] - psi0 = coherent(N, 2.5) - sc_ops = [a + a.dag()] - e_ops = [a.dag() * a, a + a.dag(), (-1j)*(b - b.dag()), qeye(N+1)] - times = np.linspace(0, 10, 101) - with pytest.raises(TypeError) as exc: - res = ssesolve(H, psi0, times, sc_ops, e_ops, solver=None, noise=1, - ntraj=ntraj, nsubsteps=nsubsteps, method='homodyne', - map_func=parallel_map) diff --git a/qutip/tests/solver/cy/test_nm_mcsolve.py b/qutip/tests/solver/cy/test_nm_mcsolve.py new file mode 100644 index 0000000000..d37514d6de --- /dev/null +++ b/qutip/tests/solver/cy/test_nm_mcsolve.py @@ -0,0 +1,161 @@ +import numbers +import pickle + +import numpy as np +import pytest + +from qutip import coefficient +from qutip.core.coefficient import ConstantCoefficient +from qutip.solver.cy.nm_mcsolve import ( + RateShiftCoefficient, SqrtRealCoefficient, +) + + +def assert_functions_equal(f, g, tlist, rtol=1e-12, atol=1e-12): + """ Assert that to functions of t are equal at a list of specified times. + """ + assert len(tlist) > 0 + np.testing.assert_allclose( + [f(t) for t in tlist], + [g(t) for t in tlist], + rtol=rtol, atol=atol, + ) + + +class RateSet: + """ A list of coefficients and a tlist of times to test at. """ + def __init__(self, coeffs, tlist): + self.coeffs = coeffs + self.tlist = tlist + + +def rate_set(coeffs, *, tlist=np.linspace(0, 1, 20), args=None, **kw): + id = kw.pop("id") + args = args or {} + coeffs = [ + ConstantCoefficient(c) if isinstance(c, numbers.Number) + else coefficient(c, args=args) + for c in coeffs + ] + return pytest.param(RateSet(coeffs, tlist), id=id) + + +@pytest.fixture(params=[ + rate_set([], id="no_rates"), + rate_set([0], id="single_zero_rate"), + rate_set([1], id="single_positive_rate"), + rate_set([-1], id="single_negative_rate"), + rate_set([0, 0, 0], id="multiple_zero_rates"), + rate_set([0, 1], id="zero_and_positive_rate"), + rate_set([0, -1], id="zero_and_negative_rate"), + rate_set([1, -1], id="positive_and_negative_rate"), + rate_set( + [lambda t: np.sin(t)], + tlist=np.linspace(0, 2 * np.pi, 20), + id="sin_rate", + ), + rate_set( + [lambda t: np.sin(t), -0.5], + tlist=np.linspace(0, 2 * np.pi, 20), + id="sin_and_negative_rate", + ) +]) +def rates(request): + return request.param + + +def sin_t(t): + """ Pickle-able and coefficient-able sin(t). """ + return np.sin(t) + + +class TestRateShiftCoefficient: + + @staticmethod + def assert_f_equals_rate_shift(f, coeffs, tlist, **kw): + def g(t): + return 2 * np.abs(min( + [0] + [np.real(c(t)) for c in coeffs] + )) + assert_functions_equal(f, g, tlist, **kw) + + def test_call(self, rates): + rs = RateShiftCoefficient(rates.coeffs) + self.assert_f_equals_rate_shift(rs, rates.coeffs, rates.tlist) + + def test_as_double(self, rates): + rs = RateShiftCoefficient(rates.coeffs) + self.assert_f_equals_rate_shift( + rs.as_double, rates.coeffs, rates.tlist, + ) + assert all(isinstance(rs.as_double(t), float) for t in rates.tlist) + + def test_copy(self, rates): + rs = RateShiftCoefficient(rates.coeffs) + rs = rs.copy() + self.assert_f_equals_rate_shift(rs, rates.coeffs, rates.tlist) + + def test_replace_arguments(self): + coeff = coefficient(lambda t, w: np.sin(w * t), args={"w": 1.0}) + tlist = np.linspace(0, 2 * np.pi, 100) + rs = RateShiftCoefficient([coeff]) + + for w in [0, 1, 2, 3]: + rs2 = rs.replace_arguments(w=w) + self.assert_f_equals_rate_shift( + rs2, [coeff.replace_arguments(w=w)], tlist, + ) + + def test_reduce(self): + coeff = coefficient(sin_t) + tlist = np.linspace(0, 2 * np.pi, 20) + rs = RateShiftCoefficient([coeff]) + + data = pickle.dumps(rs, protocol=-1) + rs = pickle.loads(data) + self.assert_f_equals_rate_shift(rs, [coeff], tlist) + + +class TestSqrtRealCoefficient: + + @staticmethod + def assert_f_equals_sqrt_real(f, coeff, tlist, **kw): + def g(t): + return np.sqrt(np.real(coeff(t))) + assert_functions_equal(f, g, tlist, **kw) + + def test_call(self): + coeff = coefficient(lambda t: np.abs(np.sin(t))) + tlist = np.linspace(0, 2 * np.pi, 20) + sr = SqrtRealCoefficient(coeff) + self.assert_f_equals_sqrt_real(sr, coeff, tlist) + + def test_copy(self): + coeff = coefficient(lambda t: np.abs(np.sin(t))) + tlist = np.linspace(0, 2 * np.pi, 20) + sr = SqrtRealCoefficient(coeff) + sr = sr.copy() + self.assert_f_equals_sqrt_real(sr, coeff, tlist) + + def test_replace_arguments(self): + coeff = coefficient( + lambda t, w: np.abs(np.sin(w * t)), + args={"w": 1.0}, + ) + tlist = np.linspace(0, 2 * np.pi, 100) + sr = SqrtRealCoefficient(coeff) + + for w in [0, 1, 2, 3]: + sr2 = sr.replace_arguments(w=w) + self.assert_f_equals_sqrt_real( + sr2, coeff.replace_arguments(w=w), tlist, + ) + + def test_reduce(self): + coeff = coefficient(sin_t) + tlist = np.linspace(0, np.pi, 10) + sr = SqrtRealCoefficient(coeff) + + data = pickle.dumps(sr, protocol=-1) + sr = pickle.loads(data) + self.assert_f_equals_sqrt_real(sr, coeff, tlist) diff --git a/qutip/tests/solver/heom/test_bofin_baths.py b/qutip/tests/solver/heom/test_bofin_baths.py index 6900a30933..c6c5c80fc1 100644 --- a/qutip/tests/solver/heom/test_bofin_baths.py +++ b/qutip/tests/solver/heom/test_bofin_baths.py @@ -30,6 +30,7 @@ def check_exponent( ): """ Check the attributes of a BathExponent. """ assert exp.type is BathExponent.types[type] + assert exp.fermionic == (type in ["+", "-"]) assert exp.dim == dim assert exp.Q == Q assert exp.ck == pytest.approx(ck) @@ -98,7 +99,7 @@ def test_repr(self): assert repr(exp1) == ( "" + " sigma_bar_k_offset=None fermionic=False tag=None>" ) exp2 = BathExponent( "+", None, Q=None, ck=1.0, vk=2.0, sigma_bar_k_offset=-1, @@ -107,7 +108,7 @@ def test_repr(self): assert repr(exp2) == ( "" + " sigma_bar_k_offset=-1 fermionic=True tag='bath1'>" ) diff --git a/qutip/tests/solver/heom/test_bofin_solvers.py b/qutip/tests/solver/heom/test_bofin_solvers.py index deb3510152..ad6e5f2297 100644 --- a/qutip/tests/solver/heom/test_bofin_solvers.py +++ b/qutip/tests/solver/heom/test_bofin_solvers.py @@ -8,7 +8,7 @@ from scipy.integrate import quad from qutip import ( - basis, destroy, expect, liouvillian, sigmax, sigmaz, + basis, destroy, expect, liouvillian, qeye, sigmax, sigmaz, tensor, Qobj, QobjEvo ) from qutip.core import data as _data @@ -52,7 +52,7 @@ def assert_raises_steady_state_time_dependent(hsolver): a time-dependent Hamiltonian raises the appropriate exception. """ with pytest.raises(ValueError) as err: - hsolver.steady_state() + hsolver.steady_state() assert str(err.value) == ( "A steady state cannot be determined for a time-dependent" " system" @@ -204,7 +204,7 @@ def mk_ados(self, bath_dims, max_depth): def mk_rho_and_soln(self, ados, rho_dims): n_ados = len(ados.labels) - ado_soln = np.random.rand(n_ados, *[np.product(d) for d in rho_dims]) + ado_soln = np.random.rand(n_ados, *[np.prod(d) for d in rho_dims]) rho = Qobj(ado_soln[0, :], dims=rho_dims) return rho, ado_soln @@ -229,7 +229,8 @@ def test_extract(self): class DrudeLorentzPureDephasingModel: - """ Analytic Drude-Lorentz pure-dephasing model for testing the HEOM solver. + """ Analytic Drude-Lorentz pure-dephasing model for testing the HEOM + solver. """ def __init__(self, lam, gamma, T, Nk): self.lam = lam @@ -266,7 +267,8 @@ def _integrand(omega, t): ] def bath_coefficients(self): - """ Correlation function expansion coefficients for the Drude-Lorentz bath. + """ Correlation function expansion coefficients for the Drude-Lorentz + bath. """ lam, gamma, T = self.lam, self.gamma, self.T Nk = self.Nk @@ -284,7 +286,8 @@ def bath_coefficients(self): class UnderdampedPureDephasingModel: - """ Analytic Drude-Lorentz pure-dephasing model for testing the HEOM solver. + """ Analytic Drude-Lorentz pure-dephasing model for testing the HEOM + solver. """ def __init__(self, lam, gamma, w0, T, Nk): self.lam = lam @@ -324,34 +327,94 @@ def _integrand(omega, t): ] +class BosonicMode: + """ A description of a bosonic mode for inclusion in a + DiscreteLevelCurrentModel. + """ + def __init__(self, N, Lambda, Omega, gamma_b): + self.N = N + self.Lambda = Lambda + self.Omega = Omega + self.gamma_b = gamma_b + + def bath_coefficients(self): + ck_real = [0.5 * self.Lambda**2, 0.5 * self.Lambda**2] + vk_real = [0.5 * 1.0j * self.Lambda**2, -0.5 * 1.0j * self.Lambda**2] + + ck_imag = [ + -1.0j * self.Omega + self.gamma_b / 2, + 1.0j * self.Omega + self.gamma_b / 2, + ] + vk_imag = [ + -1.0j * self.Omega + self.gamma_b / 2, + 1.0j * self.Omega + self.gamma_b / 2, + ] + return ck_real, ck_imag, vk_real, vk_imag + + class DiscreteLevelCurrentModel: """ Analytic discrete level current model for testing the HEOM solver - with a fermionic bath. + with a fermionic bath (and optionally a bosonic mode). """ - def __init__(self, gamma, W, T, lmax): + def __init__(self, gamma, W, T, lmax, theta=2., e1=1., bosonic_mode=None): + # single fermion + self.e1 = e1 # energy + + # parameters for the fermionic leads self.gamma = gamma self.W = W self.T = T self.lmax = lmax # Pade cut-off self.beta = 1. / T + self.theta = theta # bias - # single fermion - self.e1 = 1. - d1 = destroy(2) - self.H = self.e1 * d1.dag() * d1 - self.Q = d1 + # bosonic_mode + self.bosonic_mode = bosonic_mode - # bias - self.theta = 2. + # Construct Hamiltonian and coupling operator + if self.bosonic_mode is None: + d1 = destroy(2) + self.H = self.e1 * d1.dag() @ d1 + self.Q = d1 + self._sys_occupation_op = d1.dag() @ d1 + else: + d1 = destroy(2) & qeye(self.bosonic_mode.N) + a = qeye(2) & destroy(self.bosonic_mode.N) + self.H = ( + self.e1 * d1.dag() @ d1 + + self.bosonic_mode.Omega * a.dag() @ a + + self.bosonic_mode.Lambda * (a + a.dag()) @ d1.dag() @ d1 + ) + if self.bosonic_mode.gamma_b != 0: + # apply phenomenological damping: + self.H = liouvillian( + self.H, [np.sqrt(bosonic_mode.gamma_b) * a], + ) + self.Q = d1 + self._sys_occupation_op = d1.dag() @ d1 - def rho(self): - """ Initial state. """ - return 0.5 * Qobj(np.ones((2, 2))) + def rho(self, rho_fermion=None): + """ Return initial system density matrix given the density matrix for + the single Fermionic mode. + """ + if rho_fermion is None: + rho_fermion = 0.5 * Qobj(np.ones((2, 2))) + elif rho_fermion.isket: + rho_fermion = rho_fermion.proj() + if self.bosonic_mode is None: + rho = rho_fermion + else: + bm0 = basis(self.bosonic_mode.N, 0) + rho = rho_fermion & (bm0 @ bm0.dag()) + return rho + + def sys_occupation(self, state): + return expect(state, self._sys_occupation_op) - def state_current(self, ado_state): + def state_current(self, ado_state, tags=None): level_1_aux = [ (ado_state.extract(label), ado_state.exps(label)[0]) - for label in ado_state.filter(level=1) + for label in ado_state.filter(level=1, tags=tags) ] def exp_sign(exp): @@ -369,6 +432,12 @@ def exp_op(exp): ) def analytic_current(self): + if self.bosonic_mode is not None: + raise RuntimeError( + "Analytic calculation of the current is not implemented in the" + " case where a bosonic mode is present." + ) + Gamma, W, beta, e1 = self.gamma, self.W, self.beta, self.e1 mu_l = self.theta / 2. mu_r = - self.theta / 2. @@ -541,27 +610,32 @@ def test_create_fermionic(self): assert hsolver.ados.exponents == exponents * 3 assert hsolver.ados.max_depth == 2 - def test_create_bath_errors(self): + def test_create_mixed_bosonic_and_fermionic(self): Q = sigmaz() H = sigmax() - mixed_types = [ + exponents = [ BathExponent("+", 2, Q=Q, ck=1.1, vk=2.1, sigma_bar_k_offset=1), BathExponent("-", 2, Q=Q, ck=1.2, vk=2.2, sigma_bar_k_offset=-1), BathExponent("R", 2, Q=Q, ck=1.2, vk=2.2), ] + bath = Bath(exponents) + + hsolver = HEOMSolver(H, bath, 2) + assert hsolver.ados.exponents == exponents + assert hsolver.ados.max_depth == 2 + + hsolver = HEOMSolver(H, [bath] * 3, 2) + assert hsolver.ados.exponents == exponents * 3 + assert hsolver.ados.max_depth == 2 + + def test_create_bath_errors(self): + Q = sigmaz() + H = sigmax() mixed_q_dims = [ BathExponent("I", 2, Q=tensor(Q, Q), ck=1.2, vk=2.2), BathExponent("R", 2, Q=Q, ck=1.2, vk=2.2), ] - with pytest.raises(ValueError) as err: - HEOMSolver(H, Bath(mixed_types), 2) - assert str(err.value) == ( - "Bath exponents are currently restricted to being either all" - " bosonic or all fermionic, but a mixture of bath exponents was" - " given." - ) - with pytest.raises(ValueError) as err: HEOMSolver(H, Bath(mixed_q_dims), 2) assert str(err.value) == ( @@ -667,7 +741,6 @@ def test_pure_dephasing_model_bosonic_bath( else: assert_raises_steady_state_time_dependent(hsolver) - @pytest.mark.parametrize(['terminator'], [ pytest.param(True, id="terminator"), pytest.param(False, id="noterminator"), @@ -775,7 +848,6 @@ def test_discrete_level_model_fermionic_bath(self, evo, liouvillianize): else: assert_raises_steady_state_time_dependent(hsolver) - @pytest.mark.parametrize(['bath_cls', 'analytic_current'], [ pytest.param(LorentzianBath, 0.001101, id="matsubara"), pytest.param(LorentzianPadeBath, 0.000813, id="pade"), @@ -813,6 +885,164 @@ def test_discrete_level_model_lorentzian_baths( # analytic_current = dlm.analytic_current() np.testing.assert_allclose(analytic_current, current, rtol=1e-3) + @pytest.mark.parametrize(['evo'], [ + pytest.param("qobj"), + pytest.param("qobjevo_const"), + pytest.param("qobjevo_timedep"), + ]) + @pytest.mark.parametrize(['liouvillianize'], [ + pytest.param(False, id="hamiltonian"), + pytest.param(True, id="liouvillian"), + ]) + def test_discrete_level_model_fermionic_bath_with_decoupled_bosonic_bath( + self, evo, liouvillianize + ): + dlm = DiscreteLevelCurrentModel( + gamma=0.01, W=1, T=0.025851991, lmax=10, + ) + H_sys = hamiltonian_to_sys(dlm.H, evo, liouvillianize) + ck_plus, vk_plus, ck_minus, vk_minus = dlm.bath_coefficients() + + options = { + "store_states": True, + "store_ados": True, + "nsteps": 15_000, + "rtol": 1e-7, + "atol": 1e-7, + } + fermionic_bath = FermionicBath( + dlm.Q, ck_plus, vk_plus, ck_minus, vk_minus, tag="fermionic", + ) + # very weak bosonic coupling which should not affect the dynamics of + # the interaction between the system and the fermionic bath: + eps = [1e-10] * 5 + bosonic_Q = sigmax() + bosonic_bath = BosonicBath( + bosonic_Q, eps, eps, eps, eps, combine=False, + ) + # for a single impurity we converge with max_depth = 2 + # we specify the bosonic bath first to ensure that the test checks + # that the sums inside HEOMSolver grad-next/prev work when the bosonic + # mode is before the fermionic ones + hsolver = HEOMSolver( + H_sys, [bosonic_bath, fermionic_bath], 2, options=options, + ) + + tlist = [0, 600] + result = hsolver.run(dlm.rho(), tlist) + current = dlm.state_current(result.ado_states[-1], tags=["fermionic"]) + analytic_current = dlm.analytic_current() + np.testing.assert_allclose(analytic_current, current, rtol=1e-3) + + if evo != "qobjevo_timedep": + rho_final, ado_state = hsolver.steady_state() + current = dlm.state_current(ado_state) + analytic_current = dlm.analytic_current() + np.testing.assert_allclose(analytic_current, current, rtol=1e-3) + else: + assert_raises_steady_state_time_dependent(hsolver) + + @pytest.mark.parametrize(['evo'], [ + pytest.param("qobj"), + pytest.param("qobjevo_const"), + pytest.param("qobjevo_timedep"), + ]) + @pytest.mark.parametrize(['liouvillianize'], [ + pytest.param(False, id="hamiltonian"), + pytest.param(True, id="liouvillian"), + ]) + def test_discrete_level_model_fermionic_bath_with_coupled_bosonic_bath( + self, evo, liouvillianize + ): + dlm = DiscreteLevelCurrentModel( + gamma=0.01, W=1, T=0.5, lmax=1, e1=0.3, theta=0.5, + ) + bosonic_mode = BosonicMode( + N=4, Omega=0.2, Lambda=0.1, gamma_b=0.1, + ) + + dlm_ref = DiscreteLevelCurrentModel( + gamma=0.01, W=1, T=0.5, lmax=1, e1=0.3, theta=0.5, + bosonic_mode=bosonic_mode, + ) + + options = { + "store_states": True, + "store_ados": True, + "nsteps": 15_000, + "rtol": 1e-7, + "atol": 1e-7, + } + + # First we construct a solver with the boson modelled as part of the + # system and only a single Fermionic bath. This will provide the + # reference result for the test: + fermionic_bath_ref = FermionicBath( + dlm_ref.Q, *dlm_ref.bath_coefficients(), tag="fermionic", + ) + + hsolver_ref = HEOMSolver( + dlm_ref.H, [fermionic_bath_ref], 2, options=options, + ) + + # Then we construct a solver for the same system, but with the + # bosonic mode as a bath This is the result we would like to check: + H_sys = hamiltonian_to_sys(dlm.H, evo, liouvillianize) + + fermionic_bath = FermionicBath( + dlm.Q, *dlm.bath_coefficients(), tag="fermionic", + ) + + bosonic_bath = BosonicBath( + dlm.Q.dag() @ dlm.Q, *bosonic_mode.bath_coefficients(), + combine=True, tag="bosonic", + ) + + hsolver = HEOMSolver( + H_sys, [bosonic_bath, fermionic_bath], 4, options=options, + ) + + # Calculate currents and occupations: + tlist = np.linspace(0, 1000, 300) + psi0 = basis(2, 0) + + result_ref = hsolver_ref.run(dlm_ref.rho(psi0), tlist) + current_ref = [ + dlm_ref.state_current(ado_state, tags=["fermionic"]) + for ado_state in result_ref.ado_states + ] + sys_occupation_ref = dlm_ref.sys_occupation( + result_ref.states + ) + + result = hsolver.run(dlm.rho(psi0), tlist) + current = [ + dlm.state_current(ado_state, tags=["fermionic"]) + for ado_state in result.ado_states + ] + sys_occupation = dlm.sys_occupation(result.states) + + np.testing.assert_allclose(current_ref, current, rtol=1e-3) + np.testing.assert_allclose( + sys_occupation_ref, sys_occupation, rtol=1e-3, + ) + + if evo != "qobjevo_timedep": + rho_final_ref, ado_state_ref = hsolver_ref.steady_state() + current_ss_ref = dlm_ref.state_current(ado_state_ref) + sys_occupation_ss_ref = dlm_ref.sys_occupation(rho_final_ref) + + rho_final, ado_state = hsolver.steady_state() + current_ss = dlm.state_current(ado_state) + sys_occupation_ss = dlm.sys_occupation(rho_final) + + np.testing.assert_allclose(current_ss_ref, current_ss, rtol=1e-3) + np.testing.assert_allclose( + sys_occupation_ss_ref, sys_occupation_ss, rtol=1e-3, + ) + else: + assert_raises_steady_state_time_dependent(hsolver) + @pytest.mark.parametrize(['ado_format'], [ pytest.param("hierarchy-ados-state", id="hierarchy-ados-state"), pytest.param("numpy", id="numpy"), @@ -897,6 +1127,7 @@ def test_solving_with_step(self): assert states[-1] == ado_state.extract(0) + class TestHeomsolveFunction: @pytest.mark.parametrize(['evo'], [ pytest.param("qobj", id="qobj"), @@ -921,8 +1152,8 @@ def test_heomsolve_with_pure_dephasing_model( options = {"nsteps": 15000, "store_states": True} e_ops = { - "11": basis(2,0) * basis(2,0).dag(), - "22": basis(2,1) * basis(2,1).dag(), + "11": basis(2, 0) * basis(2, 0).dag(), + "22": basis(2, 1) * basis(2, 1).dag(), } tlist = np.linspace(0, 10, 21) @@ -952,7 +1183,9 @@ class TestHSolverDL: pytest.param("qobjevo_const", True, id="qobjevo-const-combined"), pytest.param("listevo_const", True, id="listevo-const-combined"), pytest.param("qobjevo_timedep", True, id="qobjevo-timedep-combined"), - pytest.param("qobjevo_timedep", False, id="qobjevo-timedep-uncombined"), + pytest.param( + "qobjevo_timedep", False, id="qobjevo-timedep-uncombined", + ), ]) @pytest.mark.parametrize(['liouvillianize'], [ pytest.param(False, id="hamiltonian"), @@ -1056,7 +1289,7 @@ def mk_ados(self, bath_dims, max_depth): def mk_rho_and_soln(self, ados, rho_dims): n_ados = len(ados.labels) - ado_soln = np.random.rand(n_ados, *[np.product(d) for d in rho_dims]) + ado_soln = np.random.rand(n_ados, *[np.prod(d) for d in rho_dims]) rho = Qobj(ado_soln[0, :], dims=rho_dims) return rho, ado_soln diff --git a/qutip/tests/solver/test_floquet.py b/qutip/tests/solver/test_floquet.py index be90e6fab2..679a3cba80 100644 --- a/qutip/tests/solver/test_floquet.py +++ b/qutip/tests/solver/test_floquet.py @@ -331,3 +331,11 @@ def noise_spectrum(omega): * (array_ana_E1[idx] - delta / 2)) assert (min(abs(Xs - Xpm_m1)) < 1e-4) idx += 1 + + +def test_fsesolve_fallback(): + H = [sigmaz(), lambda t: np.sin(t * 2 * np.pi)] + psi0 = rand_ket(2) + ffstate = fmmesolve(H, psi0, [0, 1], T=1.).final_state + fstate = sesolve(H, psi0, [0, 1]).final_state + assert (ffstate - fstate).norm() < 1e-5 diff --git a/qutip/tests/solver/test_integrator.py b/qutip/tests/solver/test_integrator.py index 8779ea76a2..e87f334ba6 100644 --- a/qutip/tests/solver/test_integrator.py +++ b/qutip/tests/solver/test_integrator.py @@ -8,6 +8,10 @@ from numpy.testing import assert_allclose import pytest +# Deactivate warning for test without cython +from qutip.core.coefficient import WARN_MISSING_MODULE +WARN_MISSING_MODULE[0] = 0 + class TestIntegratorCte(): _analytical_se = lambda _, t: np.cos(t * np.pi) diff --git a/qutip/tests/solver/test_mcsolve.py b/qutip/tests/solver/test_mcsolve.py index be5b5b3bad..7a14089a29 100644 --- a/qutip/tests/solver/test_mcsolve.py +++ b/qutip/tests/solver/test_mcsolve.py @@ -5,6 +5,7 @@ from qutip.solver.mcsolve import mcsolve, MCSolver from qutip.solver.solver_base import Solver + def _return_constant(t, args): return args['constant'] @@ -51,8 +52,11 @@ def _assert_expect(self, result, expected, tol): for test, expected_part in zip(result.expect, expected): np.testing.assert_allclose(test, expected_part, rtol=tol) - def test_states_and_expect(self, hamiltonian, args, c_ops, expected, tol): - options = {"store_states": True, "map": "serial"} + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_states_and_expect(self, hamiltonian, args, c_ops, expected, tol, + improved_sampling): + options = {"store_states": True, "map": "serial", + "improved_sampling": improved_sampling} result = mcsolve(hamiltonian, self.state, self.times, args=args, c_ops=c_ops, e_ops=self.e_ops, ntraj=self.ntraj, options=options, target_tol=0.05) @@ -73,7 +77,7 @@ def pytest_generate_tests(self, metafunc): (self.h, "Qobj"), ([self.h], "list"), (qutip.QobjEvo([self.h, [self.h, _return_constant]], - args= {'constant': 0}), "QobjEvo"), + args={'constant': 0}), "QobjEvo"), (callable_qobj(self.h), "callable"), ] cases = [pytest.param(hamiltonian, {}, [], [expect], tol, id=id) @@ -88,17 +92,23 @@ def pytest_generate_tests(self, metafunc): # runtimes shorter. The known-good cases are still tested in the other # test cases, this is just testing the single-output behaviour. - def test_states_only(self, hamiltonian, args, c_ops, expected, tol): - options = {"store_states": True, "map": "serial"} + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_states_only(self, hamiltonian, args, c_ops, expected, tol, + improved_sampling): + options = {"store_states": True, "map": "serial", + "improved_sampling": improved_sampling} result = mcsolve(hamiltonian, self.state, self.times, args=args, c_ops=c_ops, e_ops=[], ntraj=self.ntraj, options=options) self._assert_states(result, expected, tol) - def test_expect_only(self, hamiltonian, args, c_ops, expected, tol): + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_expect_only(self, hamiltonian, args, c_ops, expected, tol, + improved_sampling): + options = {'map': 'serial', "improved_sampling": improved_sampling} result = mcsolve(hamiltonian, self.state, self.times, args=args, c_ops=c_ops, e_ops=self.e_ops, ntraj=self.ntraj, - options={'map': 'serial'}) + options=options) self._assert_expect(result, expected, tol) @@ -168,8 +178,9 @@ def test_stored_collapse_operators_and_times(): assert all(col in [0, 1] for col in result.col_which[0]) -@pytest.mark.parametrize('keep_runs_results', [True, False]) -def test_states_outputs(keep_runs_results): +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("keep_runs_results", [True, False]) +def test_states_outputs(keep_runs_results, improved_sampling): # We're just testing the output value, so it's important whether certain # things are complex or real, but not what the magnitudes of constants are. focks = 5 @@ -183,7 +194,8 @@ def test_states_outputs(keep_runs_results): c_ops = [a, sm] data = mcsolve(H, state, times, c_ops, ntraj=ntraj, options={"keep_runs_results": keep_runs_results, - 'map': 'serial'}) + 'map': 'serial', + "improved_sampling": improved_sampling}) assert len(data.average_states) == len(times) assert isinstance(data.average_states[0], qutip.Qobj) @@ -197,7 +209,7 @@ def test_states_outputs(keep_runs_results): assert isinstance(data.photocurrent[0][1], float) assert isinstance(data.photocurrent[1][1], float) assert (np.array(data.runs_photocurrent).shape - == (ntraj, len(c_ops), len(times)-1)) + == (ntraj, len(c_ops), len(times)-1)) if keep_runs_results: assert len(data.runs_states) == ntraj @@ -224,8 +236,9 @@ def test_states_outputs(keep_runs_results): assert data.stats['end_condition'] == "ntraj reached" -@pytest.mark.parametrize('keep_runs_results', [True, False]) -def test_expectation_outputs(keep_runs_results): +@pytest.mark.parametrize("improved_sampling", [True, False]) +@pytest.mark.parametrize("keep_runs_results", [True, False]) +def test_expectation_outputs(keep_runs_results, improved_sampling): # We're just testing the output value, so it's important whether certain # things are complex or real, but not what the magnitudes of constants are. focks = 5 @@ -240,7 +253,8 @@ def test_expectation_outputs(keep_runs_results): e_ops = [a.dag()*a, sm.dag()*sm, a] data = mcsolve(H, state, times, c_ops, e_ops, ntraj=ntraj, options={"keep_runs_results": keep_runs_results, - 'map': 'serial'}) + 'map': 'serial', + "improved_sampling": improved_sampling}) assert isinstance(data.average_expect[0][1], float) assert isinstance(data.average_expect[1][1], float) assert isinstance(data.average_expect[2][1], complex) @@ -283,9 +297,11 @@ class TestSeeds: np.sqrt(2*dampings[2]) * qutip.tensor(qutip.qeye(sizes[:2]), a[2]), ] - def test_seeds_can_be_reused(self): + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_seeds_can_be_reused(self, improved_sampling): args = (self.H, self.state, self.times) - kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj} + kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj, + "options": {"improved_sampling": improved_sampling}} first = mcsolve(*args, **kwargs) second = mcsolve(*args, seeds=first.seeds, **kwargs) for first_t, second_t in zip(first.col_times, second.col_times): @@ -293,9 +309,11 @@ def test_seeds_can_be_reused(self): for first_w, second_w in zip(first.col_which, second.col_which): np.testing.assert_equal(first_w, second_w) - def test_seeds_are_not_reused_by_default(self): + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_seeds_are_not_reused_by_default(self, improved_sampling): args = (self.H, self.state, self.times) - kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj} + kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj, + "options": {"improved_sampling": improved_sampling}} first = mcsolve(*args, **kwargs) second = mcsolve(*args, **kwargs) assert not all(np.array_equal(first_t, second_t) @@ -305,25 +323,32 @@ def test_seeds_are_not_reused_by_default(self): for first_w, second_w in zip(first.col_which, second.col_which)) - @pytest.mark.parametrize('seed', [1, np.random.SeedSequence(2)]) - def test_seed_type(self, seed): + @pytest.mark.parametrize("seed", [1, np.random.SeedSequence(2)]) + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_seed_type(self, seed, improved_sampling): args = (self.H, self.state, self.times) - kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj} + kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj, + "options": {"improved_sampling": improved_sampling}} first = mcsolve(*args, seeds=copy(seed), **kwargs) second = mcsolve(*args, seeds=copy(seed), **kwargs) for f_seed, s_seed in zip(first.seeds, second.seeds): assert f_seed.state == s_seed.state - def test_bad_seed(self): + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_bad_seed(self, improved_sampling): args = (self.H, self.state, self.times) - kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj} + kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj, + "options": {"improved_sampling": improved_sampling}} with pytest.raises(ValueError): first = mcsolve(*args, seeds=[1], **kwargs) - def test_generator(self): + @pytest.mark.parametrize("improved_sampling", [True, False]) + def test_generator(self, improved_sampling): args = (self.H, self.state, self.times) kwargs = {'c_ops': self.c_ops, 'ntraj': self.ntraj} - first = mcsolve(*args, seeds=1, options={'bitgenerator': 'MT19937'}, + first = mcsolve(*args, seeds=1, + options={'bitgenerator': 'MT19937', + "improved_sampling": improved_sampling}, **kwargs) second = mcsolve(*args, seeds=1, **kwargs) for f_seed, s_seed in zip(first.seeds, second.seeds): @@ -341,14 +366,15 @@ def test_stepping(self): H = qutip.num(size) mcsolver = MCSolver(H, a, options={'map': 'serial'}) mcsolver.start(qutip.basis(size, size-1), 0, seed=5) - state_1 = mcsolver.step(1, args={'alpha':1}) + state_1 = mcsolver.step(1, args={'alpha': 1}) mcsolver.start(qutip.basis(size, size-1), 0, seed=5) - state_2 = mcsolver.step(1, args={'alpha':1}) + state_2 = mcsolver.step(1, args={'alpha': 1}) assert state_1 == state_2 -def test_timeout(): +@pytest.mark.parametrize("improved_sampling", [True, False]) +def test_timeout(improved_sampling): size = 10 ntraj = 1000 a = qutip.destroy(size) @@ -360,11 +386,14 @@ def test_timeout(): c_ops = np.sqrt(coupling * (n_th + 1)) * a e_ops = [qutip.num(size)] res = mcsolve(H, state, times, c_ops, e_ops, ntraj=ntraj, - options={'map': 'serial'}, timeout=1e-6) + options={'map': 'serial', + "improved_sampling": improved_sampling}, + timeout=1e-6) assert res.stats['end_condition'] == 'timeout' -def test_super_H(): +@pytest.mark.parametrize("improved_sampling", [True, False]) +def test_super_H(improved_sampling): size = 10 ntraj = 1000 a = qutip.destroy(size) @@ -379,13 +408,15 @@ def test_super_H(): mc_expected = mcsolve(H, state, times, c_ops, e_ops, ntraj=ntraj, target_tol=0.1, options={'map': 'serial'}) mc = mcsolve(qutip.liouvillian(H), state, times, c_ops, e_ops, ntraj=ntraj, - target_tol=0.1, options={'map': 'serial'}) + target_tol=0.1, + options={'map': 'serial', + "improved_sampling": improved_sampling}) np.testing.assert_allclose(mc_expected.expect[0], mc.expect[0], atol=0.5) def test_MCSolver_run(): size = 10 - a = qutip.QobjEvo([qutip.destroy(size), 'coupling'], args={'coupling':0}) + a = qutip.QobjEvo([qutip.destroy(size), 'coupling'], args={'coupling': 0}) H = qutip.num(size) solver = MCSolver(H, a) solver.options = {'store_final_state': True} @@ -400,12 +431,12 @@ def test_MCSolver_run(): e_ops=[qutip.qeye(size)], args={'coupling': 1}, ntraj=1000, target_tol=0.1 ) - assert res.num_trajectories == 1001 + assert 1 < res.num_trajectories < 1001 def test_MCSolver_stepping(): size = 10 - a = qutip.QobjEvo([qutip.destroy(size), 'coupling'], args={'coupling':0}) + a = qutip.QobjEvo([qutip.destroy(size), 'coupling'], args={'coupling': 0}) H = qutip.num(size) solver = MCSolver(H, a) solver.start(qutip.basis(size, size-1), 0, seed=0) diff --git a/qutip/tests/solver/test_mesolve.py b/qutip/tests/solver/test_mesolve.py index 6f6bbab6c7..7a72a02f64 100644 --- a/qutip/tests/solver/test_mesolve.py +++ b/qutip/tests/solver/test_mesolve.py @@ -7,6 +7,11 @@ import pickle import pytest +# Deactivate warning for test without cython +from qutip.core.coefficient import WARN_MISSING_MODULE +WARN_MISSING_MODULE[0] = 0 + + all_ode_method = [ method for method, integrator in MESolver.avail_integrators().items() if integrator.support_time_dependant diff --git a/qutip/tests/solver/test_nm_mcsolve.py b/qutip/tests/solver/test_nm_mcsolve.py new file mode 100644 index 0000000000..e47b638e4b --- /dev/null +++ b/qutip/tests/solver/test_nm_mcsolve.py @@ -0,0 +1,647 @@ +from copy import copy + +import numpy as np +import pytest + +import qutip +from qutip.solver.nm_mcsolve import nm_mcsolve, NonMarkovianMCSolver + + +def test_agreement_with_mesolve_for_negative_rates(): + """ + A rough test that nm_mcsolve agress with mesolve in the + presence of negative rates. + """ + times = np.linspace(0, 0.25, 51) + psi0 = qutip.basis(2, 1) + a0 = qutip.destroy(2) + H = a0.dag() * a0 + e_ops = [ + a0.dag() * a0, + a0 * a0.dag(), + ] + + # Rate functions + kappa = 1.0 / 0.129 + nth = 0.063 + args = { + "kappa": kappa, + "nth": nth, + } + gamma1 = "kappa * nth" + gamma2 = "kappa * (nth+1) + 12 * exp(-2*t**3) * (-sin(15*t)**2)" + + # nm_mcsolve integration + ops_and_rates = [ + [a0.dag(), gamma1], + [a0, gamma2], + ] + mc_result = nm_mcsolve( + H, psi0, times, ops_and_rates, + args=args, e_ops=e_ops, ntraj=2000, + options={"rtol": 1e-8}, + seeds=0, + ) + + # mesolve integration for comparison + d_ops = [ + [qutip.lindblad_dissipator(a0.dag(), a0.dag()), gamma1], + [qutip.lindblad_dissipator(a0, a0), gamma2], + ] + me_result = qutip.mesolve( + H, psi0, times, d_ops, + args=args, e_ops=e_ops, + ) + + np.testing.assert_allclose(mc_result.trace, [1.] * len(times), rtol=0.25) + np.testing.assert_allclose( + me_result.expect[0], mc_result.expect[0], rtol=0.25, + ) + np.testing.assert_allclose( + me_result.expect[1], mc_result.expect[1], rtol=0.25, + ) + + +def test_completeness_relation(): + """ + NonMarkovianMCSolver guarantees that the operators in solver.ops + satisfy the completeness relation ``sum(Li.dag() * Li) = a*I`` where a is a + constant and I the identity. + """ + # some arbitrary H + H = qutip.sigmaz() + ground_state = qutip.basis(2, 1) + # test using all combinations of the following operators + from itertools import combinations + all_ops_and_rates = [ + (qutip.sigmap(), 1), + (qutip.sigmam(), 1), + (qutip.sigmaz(), 1), + (1j * qutip.qeye(2), 1), + ] + # empty ops_and_rates not allowed + for n in range(1, len(all_ops_and_rates) + 1): + for ops_and_rates in combinations(all_ops_and_rates, n): + solver = NonMarkovianMCSolver(H, ops_and_rates) + op = sum((L.dag() * L) for L in solver.ops) + a_candidate = qutip.expect(op, ground_state) + assert op == a_candidate * qutip.qeye(op.dims[0]) + + +def test_solver_pickleable(): + """ + NonMarkovianMCSolver objects must be pickleable for multiprocessing. + """ + import pickle + # arbitrary Hamiltonian and Lindblad operator + H = qutip.sigmaz() + L = qutip.sigmam() + # try various types of coefficient functions + rates = [ + 0, + _return_constant, + "sin(t)", + ] + args = [ + None, + {'constant': 1}, + None, + ] + for rate, arg in zip(rates, args): + solver = NonMarkovianMCSolver(H, [(L, rate)], args=arg) + jar = pickle.dumps(solver) + + loaded_solver = pickle.loads(jar) + assert len(solver.ops) == len(loaded_solver.ops) + for i in range(len(solver.ops)): + assert solver.ops[i] == loaded_solver.ops[i] + _assert_functions_equal(lambda t: solver.rate(t, i), + lambda t: loaded_solver.rate(t, i)) + _assert_functions_equal(solver.rate_shift, loaded_solver.rate_shift) + + +def _assert_functions_equal(f1, f2): + times = np.linspace(0, 1) + values1 = [f1(t) for t in times] + values2 = [f2(t) for t in times] + np.testing.assert_allclose(values1, values2) + + +def _return_constant(t, args): + return args['constant'] + + +def _return_decay(t, args): + return args['constant'] * np.exp(-args['rate'] * t) + + +class callable_qobj: + def __init__(self, oper, coeff=None): + self.oper = oper + self.coeff = coeff + + def __call__(self, t, args): + if self.coeff is not None: + return self.oper * self.coeff(t, args) + return self.oper + + +@pytest.mark.usefixtures("in_temporary_directory") +class StatesAndExpectOutputCase: + """ + Mixin class to test the states and expectation values from nm_mcsolve. + """ + size = 10 + h = qutip.num(size) + state = qutip.basis(size, size-1) + times = np.linspace(0, 1, 101) + e_ops = [qutip.num(size)] + ntraj = 2000 + + def _assert_states(self, result, expected, tol): + assert hasattr(result, 'states') + assert len(result.states) == len(self.times) + assert len(self.e_ops) == len(expected) + for test_operator, expected_part in zip(self.e_ops, expected): + test = qutip.expect(test_operator, result.states) + np.testing.assert_allclose(test, expected_part, rtol=tol) + + def _assert_expect(self, result, expected, tol): + assert hasattr(result, 'expect') + assert len(result.expect) == len(self.e_ops) + assert len(self.e_ops) == len(expected) + for test, expected_part in zip(result.expect, expected): + np.testing.assert_allclose(test, expected_part, rtol=tol) + + def test_states_and_expect( + self, hamiltonian, args, ops_and_rates, expected, tol + ): + options = {"store_states": True, "map": "serial"} + result = nm_mcsolve( + hamiltonian, self.state, self.times, args=args, + ops_and_rates=ops_and_rates, + e_ops=self.e_ops, ntraj=self.ntraj, options=options, + target_tol=0.05, + ) + self._assert_expect(result, expected, tol) + self._assert_states(result, expected, tol) + + +class TestNoCollapse(StatesAndExpectOutputCase): + """ + Test that nm_mcsolve correctly solves the system when there is a constant + Hamiltonian and no collapses. + """ + + def pytest_generate_tests(self, metafunc): + tol = 1e-8 + expect = ( + qutip.expect(self.e_ops[0], self.state) + * np.ones_like(self.times) + ) + hamiltonian_types = [ + (self.h, "Qobj"), + ([self.h], "list"), + (qutip.QobjEvo( + [self.h, [self.h, _return_constant]], + args={'constant': 0}), "QobjEvo"), + (callable_qobj(self.h), "callable"), + ] + cases = [ + pytest.param(hamiltonian, {}, [], [expect], tol, id=id) + for hamiltonian, id in hamiltonian_types + ] + metafunc.parametrize([ + 'hamiltonian', 'args', 'ops_and_rates', 'expected', 'tol', + ], cases) + + # Previously the "states_only" and "expect_only" tests were mixed in to + # every other test case. We move them out into the simplest set so that + # their behaviour remains tested, but isn't repeated as often to keep test + # runtimes shorter. The known-good cases are still tested in the other + # test cases, this is just testing the single-output behaviour. + + def test_states_only( + self, hamiltonian, args, ops_and_rates, expected, tol + ): + options = {"store_states": True, "map": "serial"} + result = nm_mcsolve( + hamiltonian, self.state, self.times, args=args, + ops_and_rates=ops_and_rates, + e_ops=[], ntraj=self.ntraj, options=options, + ) + self._assert_states(result, expected, tol) + + def test_expect_only( + self, hamiltonian, args, ops_and_rates, expected, tol + ): + result = nm_mcsolve( + hamiltonian, self.state, self.times, args=args, + ops_and_rates=ops_and_rates, + e_ops=self.e_ops, ntraj=self.ntraj, options={'map': 'serial'}, + ) + self._assert_expect(result, expected, tol) + + +class TestConstantCollapse(StatesAndExpectOutputCase): + """ + Test that nm_mcsolve correctly solves the system when the + collapse rates are constant. + """ + + def pytest_generate_tests(self, metafunc): + tol = 0.25 + rate = 0.2 + expect = ( + qutip.expect(self.e_ops[0], self.state) + * np.exp(-rate * self.times) + ) + op = qutip.destroy(self.size) + op_and_rate_types = [ + ([op, rate], {}, "constant"), + ([op, '1 * {}'.format(rate)], {}, "string"), + ([op, lambda t: rate], {}, "function"), + ([op, lambda t, w: rate], {"w": 1.0}, "function_with_args"), + ] + cases = [ + pytest.param(self.h, args, [op_and_rate], [expect], tol, id=id) + for op_and_rate, args, id in op_and_rate_types + ] + metafunc.parametrize([ + 'hamiltonian', 'args', 'ops_and_rates', 'expected', 'tol', + ], cases) + + +class TestTimeDependentCollapse(StatesAndExpectOutputCase): + """ + Test that nm_mcsolve correctly solves the system when the + collapse rates are time-dependent. + """ + + def pytest_generate_tests(self, metafunc): + tol = 0.25 + coupling = 0.2 + expect = ( + qutip.expect(self.e_ops[0], self.state) + * np.exp(-coupling * (1 - np.exp(-self.times))) + ) + op = qutip.destroy(self.size) + rate_args = {'constant': coupling, 'rate': 0.5} + rate_string = 'sqrt({} * exp(-t))'.format(coupling) + op_and_rate_types = [ + ([op, rate_string], {}, "string"), + ([op, _return_decay], rate_args, "function"), + ] + cases = [ + pytest.param(self.h, args, [op_and_rate], [expect], tol, id=id) + for op_and_rate, args, id in op_and_rate_types + ] + metafunc.parametrize([ + 'hamiltonian', 'args', 'ops_and_rates', 'expected', 'tol', + ], cases) + + +def test_stored_collapse_operators_and_times(): + """ + Test that the output contains information on which collapses happened and + at what times, and make sure that this information makes sense. + """ + size = 10 + a = qutip.destroy(size) + H = qutip.num(size) + state = qutip.basis(size, size-1) + times = np.linspace(0, 10, 100) + ops_and_rates = [ + (a, 1.0), + (a, 1.0), + ] + result = nm_mcsolve( + H, state, times, ops_and_rates, ntraj=3, + options={"map": "serial"}, + ) + assert len(result.col_times[0]) > 0 + assert len(result.col_which) == len(result.col_times) + assert all(col in [0, 1] for col in result.col_which[0]) + + +@pytest.mark.parametrize('keep_runs_results', [True, False]) +def test_states_outputs(keep_runs_results): + # We're just testing the output value, so it's important whether certain + # things are complex or real, but not what the magnitudes of constants are. + focks = 5 + ntraj = 5 + a = qutip.tensor(qutip.destroy(focks), qutip.qeye(2)) + sm = qutip.tensor(qutip.qeye(focks), qutip.sigmam()) + H = 1j*a.dag()*sm + a + H = H + H.dag() + state = qutip.basis([focks, 2], [0, 1]) + times = np.linspace(0, 10, 21) + ops_and_rates = [ + (a, 1.0), + (sm, 1.0), + ] + # nm_mcsolve adds one more operator to complete the operator set + # which results in the len(ops_and_rates) + 1 below: + total_ops = len(ops_and_rates) + 1 + data = nm_mcsolve( + H, state, times, ops_and_rates, ntraj=ntraj, + options={ + "keep_runs_results": keep_runs_results, + "map": "serial", + }, + ) + + assert len(data.average_states) == len(times) + assert isinstance(data.average_states[0], qutip.Qobj) + assert data.average_states[0].norm() == pytest.approx(1.) + assert data.average_states[0].isoper + + assert isinstance(data.average_final_state, qutip.Qobj) + assert data.average_final_state.norm() == pytest.approx(1.) + assert data.average_final_state.isoper + + assert isinstance(data.photocurrent[0][1], float) + assert isinstance(data.photocurrent[1][1], float) + assert ( + np.array(data.runs_photocurrent).shape + == (ntraj, total_ops, len(times)-1) + ) + + if keep_runs_results: + assert len(data.runs_states) == ntraj + assert len(data.runs_states[0]) == len(times) + assert isinstance(data.runs_states[0][0], qutip.Qobj) + assert data.runs_states[0][0].norm() == pytest.approx(1.) + assert data.runs_states[0][0].isoper + + assert len(data.runs_final_states) == ntraj + assert isinstance(data.runs_final_states[0], qutip.Qobj) + assert data.runs_final_states[0].norm() == pytest.approx(1.) + assert data.runs_final_states[0].isoper + + assert isinstance(data.steady_state(), qutip.Qobj) + assert data.steady_state().norm() == pytest.approx(1.) + assert data.steady_state().isoper + + np.testing.assert_allclose(times, data.times) + assert data.num_trajectories == ntraj + assert len(data.e_ops) == 0 + assert data.stats["num_collapse"] == total_ops + assert len(data.col_times) == ntraj + assert np.max(np.hstack(data.col_which)) <= total_ops + assert data.stats['end_condition'] == "ntraj reached" + + +@pytest.mark.parametrize('keep_runs_results', [True, False]) +def test_expectation_outputs(keep_runs_results): + # We're just testing the output value, so it's important whether certain + # things are complex or real, but not what the magnitudes of constants are. + focks = 5 + ntraj = 5 + a = qutip.tensor(qutip.destroy(focks), qutip.qeye(2)) + sm = qutip.tensor(qutip.qeye(focks), qutip.sigmam()) + H = 1j*a.dag()*sm + a + H = H + H.dag() + state = qutip.basis([focks, 2], [0, 1]) + times = np.linspace(0, 10, 5) + ops_and_rates = [ + (a, 1.0), + (sm, 1.0), + ] + # nm_mcsolve adds one more operator to complete the operator set + # which results in the len(ops_and_rates) + 1 below: + total_ops = len(ops_and_rates) + 1 + e_ops = [a.dag()*a, sm.dag()*sm, a] + data = nm_mcsolve( + H, state, times, ops_and_rates, e_ops, ntraj=ntraj, + options={ + "keep_runs_results": keep_runs_results, + "map": "serial", + }, + ) + assert isinstance(data.average_expect[0][1], float) + assert isinstance(data.average_expect[1][1], float) + assert isinstance(data.average_expect[2][1], complex) + assert isinstance(data.std_expect[0][1], float) + assert isinstance(data.std_expect[1][1], float) + assert isinstance(data.std_expect[2][1], float) + if keep_runs_results: + assert len(data.runs_expect) == len(e_ops) + assert len(data.runs_expect[0]) == ntraj + assert isinstance(data.runs_expect[0][0][1], float) + assert isinstance(data.runs_expect[1][0][1], float) + assert isinstance(data.runs_expect[2][0][1], complex) + assert isinstance(data.photocurrent[0][0], float) + assert isinstance(data.photocurrent[1][0], float) + assert (np.array(data.runs_photocurrent).shape + == (ntraj, total_ops, len(times)-1)) + np.testing.assert_allclose(times, data.times) + assert data.num_trajectories == ntraj + assert len(data.e_ops) == len(e_ops) + assert data.stats["num_collapse"] == total_ops + assert len(data.col_times) == ntraj + assert np.max(np.hstack(data.col_which)) <= total_ops + assert data.stats['end_condition'] == "ntraj reached" + + +class TestSeeds: + sizes = [6, 6, 6] + dampings = [0.1, 0.4, 0.1] + ntraj = 25 # Big enough to ensure there are differences without being slow + a = [qutip.destroy(size) for size in sizes] + H = 1j * (qutip.tensor(a[0], a[1].dag(), a[2].dag()) + - qutip.tensor(a[0].dag(), a[1], a[2])) + state = qutip.tensor(qutip.coherent(sizes[0], np.sqrt(2)), + qutip.basis(sizes[1:], [0, 0])) + times = np.linspace(0, 10, 2) + ops_and_rates = [ + (qutip.tensor(a[0], qutip.qeye(sizes[1:])), 2 * dampings[0]), + ( + qutip.tensor(qutip.qeye(sizes[0]), a[1], qutip.qeye(sizes[2])), + 2 * dampings[1], + ), + (qutip.tensor(qutip.qeye(sizes[:2]), a[2]), 2 * dampings[2]), + ] + + def test_seeds_can_be_reused(self): + args = (self.H, self.state, self.times) + kwargs = {'ops_and_rates': self.ops_and_rates, 'ntraj': self.ntraj} + first = nm_mcsolve(*args, **kwargs) + second = nm_mcsolve(*args, seeds=first.seeds, **kwargs) + for first_t, second_t in zip(first.col_times, second.col_times): + np.testing.assert_equal(first_t, second_t) + for first_w, second_w in zip(first.col_which, second.col_which): + np.testing.assert_equal(first_w, second_w) + + def test_seeds_are_not_reused_by_default(self): + args = (self.H, self.state, self.times) + kwargs = {'ops_and_rates': self.ops_and_rates, 'ntraj': self.ntraj} + first = nm_mcsolve(*args, **kwargs) + second = nm_mcsolve(*args, **kwargs) + assert not all(np.array_equal(first_t, second_t) + for first_t, second_t in zip(first.col_times, + second.col_times)) + assert not all(np.array_equal(first_w, second_w) + for first_w, second_w in zip(first.col_which, + second.col_which)) + + @pytest.mark.parametrize('seed', [1, np.random.SeedSequence(2)]) + def test_seed_type(self, seed): + args = (self.H, self.state, self.times) + kwargs = {'ops_and_rates': self.ops_and_rates, 'ntraj': self.ntraj} + first = nm_mcsolve(*args, seeds=copy(seed), **kwargs) + second = nm_mcsolve(*args, seeds=copy(seed), **kwargs) + for f_seed, s_seed in zip(first.seeds, second.seeds): + assert f_seed.state == s_seed.state + + def test_bad_seed(self): + args = (self.H, self.state, self.times) + kwargs = {'ops_and_rates': self.ops_and_rates, 'ntraj': self.ntraj} + with pytest.raises(ValueError): + nm_mcsolve(*args, seeds=[1], **kwargs) + + def test_generator(self): + args = (self.H, self.state, self.times) + kwargs = {'ops_and_rates': self.ops_and_rates, 'ntraj': self.ntraj} + first = nm_mcsolve( + *args, seeds=1, options={'bitgenerator': 'MT19937'}, + **kwargs, + ) + second = nm_mcsolve(*args, seeds=1, **kwargs) + for f_seed, s_seed in zip(first.seeds, second.seeds): + assert f_seed.state == s_seed.state + assert not all(np.array_equal(first_t, second_t) + for first_t, second_t in zip(first.col_times, + second.col_times)) + assert not all(np.array_equal(first_w, second_w) + for first_w, second_w in zip(first.col_which, + second.col_which)) + + def test_stepping(self): + size = 10 + a = qutip.destroy(size) + H = qutip.num(size) + ops_and_rates = [(a, 'alpha')] + mcsolver = NonMarkovianMCSolver( + H, ops_and_rates, args={'alpha': 0}, options={'map': 'serial'}, + ) + mcsolver.start(qutip.basis(size, size-1), 0, seed=5) + state_1 = mcsolver.step(1, args={'alpha': 1}) + + mcsolver.start(qutip.basis(size, size-1), 0, seed=5) + state_2 = mcsolver.step(1, args={'alpha': 1}) + assert state_1 == state_2 + + +def test_timeout(): + size = 10 + ntraj = 1000 + a = qutip.destroy(size) + H = qutip.num(size) + state = qutip.basis(size, size-1) + times = np.linspace(0, 1.0, 100) + coupling = 0.5 + n_th = 0.05 + ops_and_rates = [ + (a, np.sqrt(coupling * (n_th + 1))) + ] + e_ops = [qutip.num(size)] + res = nm_mcsolve( + H, state, times, ops_and_rates, e_ops, ntraj=ntraj, + options={'map': 'serial'}, timeout=1e-6, + ) + assert res.stats['end_condition'] == 'timeout' + + +def test_super_H(): + size = 10 + ntraj = 1000 + a = qutip.destroy(size) + H = qutip.num(size) + state = qutip.basis(size, size-1) + times = np.linspace(0, 1.0, 100) + # Arbitrary coupling and bath temperature. + coupling = 0.5 + n_th = 0.05 + ops_and_rates = [ + (a, np.sqrt(coupling * (n_th + 1))) + ] + e_ops = [qutip.num(size)] + mc_expected = nm_mcsolve( + H, state, times, ops_and_rates, e_ops, ntraj=ntraj, + target_tol=0.1, options={'map': 'serial'}, + ) + mc = nm_mcsolve( + qutip.liouvillian(H), state, times, ops_and_rates, e_ops, ntraj=ntraj, + target_tol=0.1, options={'map': 'serial'}, + ) + np.testing.assert_allclose(mc_expected.expect[0], mc.expect[0], atol=0.5) + + +def test_NonMarkovianMCSolver_run(): + size = 10 + ops_and_rates = [ + (qutip.destroy(size), 'coupling') + ] + args = {'coupling': 0} + H = qutip.num(size) + solver = NonMarkovianMCSolver(H, ops_and_rates, args=args) + solver.options = {'store_final_state': True} + res = solver.run(qutip.basis(size, size-1), np.linspace(0, 5.0, 11), + e_ops=[qutip.qeye(size)], args={'coupling': 1}) + assert res.final_state is not None + assert len(res.collapse[0]) != 0 + assert res.num_trajectories == 1 + np.testing.assert_allclose(res.expect[0], np.ones(11)) + res += solver.run( + qutip.basis(size, size-1), np.linspace(0, 5.0, 11), + e_ops=[qutip.qeye(size)], args={'coupling': 1}, + ntraj=1000, target_tol=0.1 + ) + assert 1 < res.num_trajectories < 1001 + + +def test_NonMarkovianMCSolver_stepping(): + size = 10 + ops_and_rates = [ + (qutip.destroy(size), 'coupling') + ] + args = {'coupling': 0} + H = qutip.num(size) + solver = NonMarkovianMCSolver(H, ops_and_rates, args=args) + solver.start(qutip.basis(size, size-1), 0, seed=0) + state = solver.step(1) + assert qutip.expect(qutip.qeye(size), state) == pytest.approx(1) + assert qutip.expect(qutip.num(size), state) == pytest.approx(size - 1) + assert state.isoper + assert solver.rate_shift(1) == 0 + assert solver.rate(1, 0) == 0 + assert solver.sqrt_shifted_rate(1, 0) == 0 + state = solver.step(5, args={'coupling': 5}) + assert qutip.expect(qutip.qeye(size), state) == pytest.approx(1) + assert qutip.expect(qutip.num(size), state) <= size - 1 + assert state.isoper + assert solver.rate_shift(5) == 0 + assert solver.rate(5, 0) == 5 + assert solver.sqrt_shifted_rate(5, 0) == np.sqrt(5) + + +# Defined in module-scope so it's pickleable. +def _dynamic(t, args): + return 0 if args["collapse"] else 1 + + +@pytest.mark.xfail(reason="current limitation of NonMarkovianMCSolver") +def test_dynamic_arguments(): + """Test dynamically updated arguments are usable.""" + size = 5 + a = qutip.destroy(size) + H = qutip.num(size) + times = np.linspace(0, 1, 11) + state = qutip.basis(size, 2) + + ops_and_rates = [[a, _dynamic], [a.dag(), _dynamic]] + mc = nm_mcsolve( + H, state, times, ops_and_rates, ntraj=25, args={"collapse": []}, + ) + assert all(len(collapses) <= 1 for collapses in mc.col_which) diff --git a/qutip/tests/solver/test_parallel.py b/qutip/tests/solver/test_parallel.py index 416369175e..bb53496edb 100644 --- a/qutip/tests/solver/test_parallel.py +++ b/qutip/tests/solver/test_parallel.py @@ -119,3 +119,23 @@ def test_map_store_error(map): assert result == n else: assert result is None + + +@pytest.mark.parametrize('map', [ + pytest.param(parallel_map, id='parallel_map'), + pytest.param(loky_pmap, id='loky_pmap'), + pytest.param(serial_map, id='serial_map'), +]) +def test_map_early_end(map): + if map is loky_pmap: + loky = pytest.importorskip("loky") + + results = [] + + def reduce_func(result): + results.append(result) + return 5 - len(results) + + map(_func1, range(100), reduce_func=reduce_func) + + assert len(results) < 100 diff --git a/qutip/tests/solver/test_propagator.py b/qutip/tests/solver/test_propagator.py index 2d36d9f977..5119fa0d6b 100644 --- a/qutip/tests/solver/test_propagator.py +++ b/qutip/tests/solver/test_propagator.py @@ -1,6 +1,7 @@ import numpy as np from qutip import (destroy, propagator, Propagator, propagator_steadystate, - steadystate, tensor, qeye, basis, QobjEvo, sesolve) + steadystate, tensor, qeye, basis, QobjEvo, sesolve, + liouvillian) import qutip import pytest from qutip.solver.brmesolve import BRSolver @@ -87,6 +88,13 @@ def testPropHDims(): assert U.dims == H.dims +def testPropHSuper(): + "Propagator: preserve super_oper dims" + L = liouvillian(qeye(2) & qeye(2), [destroy(2) & destroy(2)]) + U = propagator(L, 1) + assert U.dims == L.dims + + def testPropEvo(): a = destroy(5) H = a.dag()*a diff --git a/qutip/tests/solve/test_qubit_evolution.py b/qutip/tests/solver/test_qubit_evolution.py similarity index 100% rename from qutip/tests/solve/test_qubit_evolution.py rename to qutip/tests/solver/test_qubit_evolution.py diff --git a/qutip/tests/solver/test_results.py b/qutip/tests/solver/test_results.py index 3aa46982c9..a4830460ef 100644 --- a/qutip/tests/solver/test_results.py +++ b/qutip/tests/solver/test_results.py @@ -170,6 +170,8 @@ def e_op_num(t, state): class TestMultiTrajResult: def _fill_trajectories(self, multiresult, N, ntraj, collapse=False, noise=0, dm=False): + # Fix the seed to avoid failing due to bad luck + np.random.seed(1) for _ in range(ntraj): result = Result(multiresult._raw_ops, multiresult.options) result.collapse = [] diff --git a/qutip/tests/solver/test_scattering.py b/qutip/tests/solver/test_scattering.py index 0ad7257ec4..3fa41c4a53 100644 --- a/qutip/tests/solver/test_scattering.py +++ b/qutip/tests/solver/test_scattering.py @@ -7,7 +7,6 @@ # Contact: benbartlett@stanford.edu import numpy as np -from numpy.testing import assert_, run_module_suite from qutip import create, destroy, basis from qutip.solver.scattering import * diff --git a/qutip/tests/solver/test_sesolve.py b/qutip/tests/solver/test_sesolve.py index 5ca744f973..12e0e23754 100644 --- a/qutip/tests/solver/test_sesolve.py +++ b/qutip/tests/solver/test_sesolve.py @@ -6,6 +6,11 @@ from qutip.solver.krylovsolve import krylovsolve from qutip.solver.solver_base import Solver +# Deactivate warning for test without cython +from qutip.core.coefficient import WARN_MISSING_MODULE +WARN_MISSING_MODULE[0] = 0 + + all_ode_method = [ method for method, integrator in SESolver.avail_integrators().items() if integrator.support_time_dependant diff --git a/qutip/tests/solver/test_sode_method.py b/qutip/tests/solver/test_sode_method.py new file mode 100644 index 0000000000..075dbe9a13 --- /dev/null +++ b/qutip/tests/solver/test_sode_method.py @@ -0,0 +1,180 @@ +import numpy as np +from itertools import product +from qutip.core import data as _data +from qutip import (qeye, destroy, QobjEvo, rand_ket, rand_herm, create, Qobj, + operator_to_vector, fock_dm) +import qutip.solver.sode._sode as _sode +import pytest +from qutip.solver.sode.ssystem import ( + SimpleStochasticSystem, StochasticOpenSystem, StochasticClosedSystem +) +from qutip.solver.sode._noise import _Noise +from qutip.solver.stochastic import SMESolver, _StochasticRHS + + +def get_error_order(system, state, method, plot=False, **kw): + stepper = getattr(_sode, method)(system, **kw) + num_runs = 10 + ts = [ + 0.000001, 0.000002, 0.000005, 0.00001, 0.00002, 0.00005, + 0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, + ] + # state = rand_ket(system.dims[0]).data + err = np.zeros(len(ts), dtype=float) + for _ in range(num_runs): + noise = _Noise(0.1, 0.000001, system.num_collapse) + for i, t in enumerate(ts): + out = stepper.run(0, state.copy(), t, noise.dW(t), 1) + target = system.analytic(t, noise.dw(t)[0]) @ state + err[i] += _data.norm.l2(out - target) + + err /= num_runs + if plot: + import matplotlib.pyplot as plt + plt.loglog(ts, err) + return np.polyfit(np.log(ts), np.log(err + 1e-20), 1)[0] + + +def _make_oper(kind, N): + a = destroy(N) + if kind == "qeye": + out = qeye(N) * np.random.rand() + elif kind == "create": + out = a.dag() * np.random.rand() + elif kind == "destroy": + out = a * np.random.rand() + elif kind == "destroy td": + out = [a, lambda t: 1 + t/2] + elif kind == "destroy2": + out = a**2 + elif kind == "herm": + out = rand_herm(N) + elif kind == "herm td": + out = [rand_herm(N), lambda t: -1 + t/2 + t**2] + elif kind == "random": + out = Qobj(np.random.randn(N, N) + 1j * np.random.rand(N, N)) + return QobjEvo(out) + + +@pytest.mark.parametrize(["method", "order", "kw"], [ + pytest.param("Euler", 0.5, {}, id="Euler"), + pytest.param("Milstein", 1.0, {}, id="Milstein"), + pytest.param("Milstein_imp", 1.0, {}, id="Milstein implicit"), + pytest.param("Milstein_imp", 1.0, {"imp_method": "inv"}, + id="Milstein implicit inv"), + pytest.param("Platen", 1.0, {}, id="Platen"), + pytest.param("PredCorr", 1.0, {}, id="PredCorr"), + pytest.param("PredCorr", 1.0, {"alpha": 0.5}, id="PredCorr_0.5"), + pytest.param("Taylor15", 1.5, {}, id="Taylor15"), + pytest.param("Explicit15", 1.5, {}, id="Explicit15"), + pytest.param("Taylor15_imp", 1.5, {}, id="Taylor15 implicit"), + pytest.param("Taylor15_imp", 1.5, {"imp_method": "inv"}, + id="Taylor15 implicit inv"), +]) +@pytest.mark.parametrize(['H', 'sc_ops'], [ + pytest.param("qeye", ["destroy"], id='simple'), + pytest.param("destroy", ["destroy"], id='destroy'), + pytest.param("destroy", ["destroy td"], id='sc_ops td'), + pytest.param("herm td", ["qeye"], id='H td'), + pytest.param("qeye", ["qeye", "destroy", "destroy2"], id='3 sc_ops'), +]) +def test_methods(H, sc_ops, method, order, kw): + if kw == {"imp_method": "inv"} and ("td" in H or "td" in sc_ops[0]): + pytest.skip("inverse method only available for constant cases.") + N = 5 + H = _make_oper(H, N) + sc_ops = [_make_oper(op, N) for op in sc_ops] + system = SimpleStochasticSystem(H, sc_ops) + state = rand_ket(N).data + error_order = get_error_order(system, state, method, **kw) + # The first error term of the method is dt**0.5 greater than the solver + # order. + assert (order + 0.35) < error_order + + +def get_error_order_integrator(integrator, ref_integrator, state, plot=False): + ts = [ + 0.000001, 0.000002, 0.000005, 0.00001, 0.00002, 0.00005, + 0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, + ] + # state = rand_ket(system.dims[0]).data + err = np.zeros(len(ts), dtype=float) + for i, t in enumerate(ts): + integrator.options["dt"] = 0.1 + ref_integrator.options["dt"] = 0.1 + integrator.set_state(0., state, np.random.default_rng(0)) + ref_integrator.set_state(0., state, np.random.default_rng(0)) + out = integrator.integrate(t)[1] + target = ref_integrator.integrate(t)[1] + err[i] = _data.norm.l2(out - target) + + if plot: + import matplotlib.pyplot as plt + plt.loglog(ts, err) + if np.all(err < 1e-12): + # Exact match + return np.inf + return np.polyfit(np.log(ts), np.log(err + 1e-20), 1)[0] + + +@pytest.mark.parametrize(["method", "order"], [ + pytest.param("euler", 0.5, id="Euler"), + pytest.param("milstein", 1.0, id="Milstein"), + pytest.param("milstein_imp", 1.0, id="Milstein implicit"), + pytest.param("platen", 1.0, id="Platen"), + pytest.param("pred_corr", 1.0, id="PredCorr"), + pytest.param("rouchon", 1.0, id="rouchon"), + pytest.param("explicit1.5", 1.5, id="Explicit15"), + pytest.param("taylor1.5_imp", 1.5, id="Taylor15 implicit"), +]) +@pytest.mark.parametrize(['H', 'c_ops', 'sc_ops'], [ + pytest.param("qeye", [], ["destroy"], id='simple'), + pytest.param("qeye", ["destroy"], ["destroy"], id='simple + collapse'), + pytest.param("herm", ["destroy", "destroy2"], [], id='2 c_ops'), + pytest.param("herm", [], ["destroy", "destroy2"], id='2 sc_ops'), + pytest.param("herm", ["create", "destroy"], ["destroy", "destroy2"], + id='many terms'), + pytest.param("herm", [], ["random"], id='random'), + pytest.param("herm", ["random"], ["random"], id='complex'), + pytest.param("herm td", ["random"], ["destroy"], id='H td'), + pytest.param("herm", ["random"], ["destroy td"], id='sc_ops td'), +]) +def test_open_integrator(method, order, H, c_ops, sc_ops): + N = 5 + H = _make_oper(H, N) + c_ops = [_make_oper(op, N) for op in c_ops] + sc_ops = [_make_oper(op, N) for op in sc_ops] + + rhs = _StochasticRHS(StochasticOpenSystem, H, sc_ops, c_ops, False) + ref_sode = SMESolver.avail_integrators()["taylor1.5"](rhs, {"dt": 0.01}) + sode = SMESolver.avail_integrators()[method](rhs, {"dt": 0.01}) + state = operator_to_vector(fock_dm(5, 3, dtype="Dense")).data + + error_order = get_error_order_integrator(sode, ref_sode, state) + assert (order + 0.35) < error_order + + +@pytest.mark.parametrize(["method", "order"], [ + pytest.param("euler", 0.5, id="Euler"), + pytest.param("platen", 1.0, id="Platen"), + pytest.param("rouchon", 1.0, id="Rouchon"), +]) +@pytest.mark.parametrize(['H', 'sc_ops'], [ + pytest.param("qeye", ["destroy"], id='simple'), + pytest.param("herm", ["destroy", "destroy2"], id='2 sc_ops'), + pytest.param("herm", ["random"], id='random'), + pytest.param("herm td", ["destroy"], id='H td'), + pytest.param("herm", ["destroy td"], id='sc_ops td'), +]) +def test_closed_integrator(method, order, H, sc_ops): + N = 5 + H = _make_oper(H, N) + sc_ops = [_make_oper(op, N) for op in sc_ops] + + rhs = _StochasticRHS(StochasticClosedSystem, H, sc_ops, (), False) + ref_sode = SMESolver.avail_integrators()["explicit1.5"](rhs, {"dt": 0.01}) + sode = SMESolver.avail_integrators()[method](rhs, {"dt": 0.01}) + state = operator_to_vector(fock_dm(5, 3, dtype="Dense")).data + + error_order = get_error_order_integrator(sode, ref_sode, state) + assert (order + 0.35) < error_order diff --git a/qutip/tests/solver/test_stochastic.py b/qutip/tests/solver/test_stochastic.py new file mode 100644 index 0000000000..f6843c997e --- /dev/null +++ b/qutip/tests/solver/test_stochastic.py @@ -0,0 +1,318 @@ +import pytest +import numpy as np +from qutip import ( + mesolve, liouvillian, QobjEvo, spre, spost, + destroy, coherent, qeye, fock_dm, num, basis +) +from qutip.solver.stochastic import smesolve, ssesolve, SMESolver, SSESolver +from qutip.core import data as _data + + +def f(t, w): + return w * t + +def _make_system(N, system): + gamma = 0.25 + a = destroy(N) + + if system == "simple": + H = a.dag() * a + sc_ops = [np.sqrt(gamma) * a] + + elif system == "2 c_ops": + H = QobjEvo([a.dag() * a]) + sc_ops = [np.sqrt(gamma) * a, gamma * a * a] + + elif system == "H td": + H = [[a.dag() * a, f]] + sc_ops = [np.sqrt(gamma) * QobjEvo(a)] + + elif system == "complex": + H = [a.dag() * a + a.dag() + a] + sc_ops = [np.sqrt(gamma) * a, gamma * a * a] + + elif system == "c_ops td": + H = [a.dag() * a] + sc_ops = [[np.sqrt(gamma) * a, f]] + + return H, sc_ops + + +@pytest.mark.parametrize("system", [ + "simple", "2 c_ops", "H td", "complex", "c_ops td", +]) +@pytest.mark.parametrize("heterodyne", [True, False]) +def test_smesolve(heterodyne, system): + tol = 0.05 + N = 4 + ntraj = 20 + + H, sc_ops = _make_system(N, system) + c_ops = [destroy(N)] + psi0 = coherent(N, 0.5) + a = destroy(N) + e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] + + times = np.linspace(0, 0.1, 21) + res_ref = mesolve(H, psi0, times, c_ops + sc_ops, e_ops, args={"w": 2}) + + options = { + "store_measurement": False, + "map": "serial", + } + + res = smesolve( + H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, c_ops=c_ops, + ntraj=ntraj, args={"w": 2}, options=options, heterodyne=heterodyne, + seeds=1, + ) + + for idx in range(len(e_ops)): + np.testing.assert_allclose( + res.expect[idx], res_ref.expect[idx], rtol=tol, atol=tol + ) + + +@pytest.mark.parametrize("heterodyne", [True, False]) +@pytest.mark.parametrize("method", SMESolver.avail_integrators().keys()) +def test_smesolve_methods(method, heterodyne): + tol = 0.05 + N = 4 + ntraj = 20 + system = "simple" + + H, sc_ops = _make_system(N, system) + c_ops = [destroy(N)] + psi0 = coherent(N, 0.5) + a = destroy(N) + e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] + + times = np.linspace(0, 0.1, 21) + res_ref = mesolve(H, psi0, times, c_ops + sc_ops, e_ops, args={"w": 2}) + + options = { + "store_measurement": True, + "map": "parallel", + "method": method, + } + + res = smesolve( + H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, c_ops=c_ops, + ntraj=ntraj, args={"w": 2}, options=options, heterodyne=heterodyne, + seeds=list(range(ntraj)), + ) + + for idx in range(len(e_ops)): + np.testing.assert_allclose( + res.expect[idx], res_ref.expect[idx], rtol=tol, atol=tol + ) + + assert len(res.measurement) == ntraj + + if heterodyne: + assert all([ + dw.shape == (len(sc_ops), 2, len(times)-1) + for dw in res.dW + ]) + assert all([ + w.shape == (len(sc_ops), 2, len(times)) + for w in res.wiener_process + ]) + assert all([ + m.shape == (len(sc_ops), 2, len(times)-1) + for m in res.measurement + ]) + else: + assert all([ + dw.shape == (len(sc_ops), len(times)-1) + for dw in res.dW + ]) + assert all([ + w.shape == (len(sc_ops), len(times)) + for w in res.wiener_process + ]) + assert all([ + m.shape == (len(sc_ops), len(times)-1) + for m in res.measurement + ]) + + +@pytest.mark.parametrize("system", [ + "simple", "2 c_ops", "H td", "complex", "c_ops td", +]) +@pytest.mark.parametrize("heterodyne", [True, False]) +def test_ssesolve(heterodyne, system): + tol = 0.1 + N = 4 + ntraj = 20 + + H, sc_ops = _make_system(N, system) + psi0 = coherent(N, 0.5) + a = destroy(N) + e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] + + times = np.linspace(0, 0.1, 21) + res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"w": 2}) + + options = { + "map": "serial", + } + + res = ssesolve( + H, psi0, times, sc_ops, e_ops=e_ops, + ntraj=ntraj, args={"w": 2}, options=options, heterodyne=heterodyne, + seeds=list(range(ntraj)), + ) + + for idx in range(len(e_ops)): + np.testing.assert_allclose( + res.expect[idx], res_ref.expect[idx], rtol=tol, atol=tol + ) + + assert res.measurement is None + assert res.wiener_process is None + assert res.dW is None + + +@pytest.mark.parametrize("heterodyne", [True, False]) +@pytest.mark.parametrize("method", SSESolver.avail_integrators().keys()) +def test_ssesolve_method(method, heterodyne): + "Stochastic: smesolve: homodyne, time-dependent H" + tol = 0.1 + N = 4 + ntraj = 20 + system = "simple" + + H, sc_ops = _make_system(N, system) + psi0 = coherent(N, 0.5) + a = destroy(N) + e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] + + times = np.linspace(0, 0.1, 21) + res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"w": 2}) + + options = { + "store_measurement": True, + "map": "parallel", + "method": method, + "keep_runs_results": True, + } + + res = ssesolve( + H, psi0, times, sc_ops, e_ops=e_ops, + ntraj=ntraj, args={"w": 2}, options=options, heterodyne=heterodyne, + seeds=1, + ) + + for idx in range(len(e_ops)): + np.testing.assert_allclose( + res.average_expect[idx], res_ref.expect[idx], rtol=tol, atol=tol + ) + + assert len(res.measurement) == ntraj + + if heterodyne: + assert all([ + dw.shape == (len(sc_ops), 2, len(times)-1) + for dw in res.dW + ]) + assert all([ + w.shape == (len(sc_ops), 2, len(times)) + for w in res.wiener_process + ]) + assert all([ + m.shape == (len(sc_ops), 2, len(times)-1) + for m in res.measurement + ]) + else: + assert all([ + dw.shape == (len(sc_ops), len(times)-1) + for dw in res.dW + ]) + assert all([ + w.shape == (len(sc_ops), len(times)) + for w in res.wiener_process + ]) + assert all([ + m.shape == (len(sc_ops), len(times)-1) + for m in res.measurement + ]) + + +def test_reuse_seeds(): + tol = 0.05 + N = 4 + ntraj = 5 + + H, sc_ops = _make_system(N, "simple") + c_ops = [destroy(N)] + psi0 = coherent(N, 0.5) + a = destroy(N) + e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())] + + times = np.linspace(0, 0.1, 2) + + options = { + "store_final_state": True, + "map": "serial", + "keep_runs_results": True, + "store_measurement": True, + } + + res = smesolve( + H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, c_ops=c_ops, + ntraj=ntraj, args={"w": 2}, options=options, + ) + + res2 = smesolve( + H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, c_ops=c_ops, + ntraj=ntraj, args={"w": 2}, options=options, + seeds=res.seeds, + ) + + np.testing.assert_allclose( + res.wiener_process, res2.wiener_process, atol=1e-14 + ) + + np.testing.assert_allclose(res.expect, res2.expect, atol=1e-14) + + for out1, out2 in zip(res.final_state, res2.final_state): + assert out1 == out2 + + +@pytest.mark.parametrize("heterodyne", [True, False]) +def test_m_ops(heterodyne): + N = 10 + ntraj = 1 + + H = num(N) + sc_ops = [destroy(N), qeye(N)] + psi0 = basis(N, N-1) + m_ops = [num(N), qeye(N)] + if heterodyne: + m_ops = m_ops * 2 + + times = np.linspace(0, 1.0, 51) + + options = {"store_measurement": True,} + + solver = SMESolver(H, sc_ops, heterodyne=heterodyne, options=options) + solver.m_ops = m_ops + solver.dW_factors = [0.] * len(m_ops) + + res = solver.run(psi0, times, e_ops=m_ops) + # With dW_factors=0, measurements are computed as expectation values. + if heterodyne: + np.testing.assert_allclose(res.expect[0][1:], res.measurement[0][0][0]) + np.testing.assert_allclose(res.expect[1][1:], res.measurement[0][0][1]) + else: + np.testing.assert_allclose(res.expect[0][1:], res.measurement[0][0]) + np.testing.assert_allclose(res.expect[1][1:], res.measurement[0][1]) + + solver.dW_factors = [1.] * len(m_ops) + # With dW_factors=0, measurements are computed as expectation values. + res = solver.run(psi0, times, e_ops=m_ops) + std = 1/times[1]**0.5 + noise = res.expect[0][1:] - res.measurement[0][0] + assert np.mean(noise) == pytest.approx(0., abs=std/50**0.5 * 4) + assert np.std(noise) == pytest.approx(std, abs=std/50**0.5 * 4) diff --git a/qutip/tests/solver/test_stochastic_system.py b/qutip/tests/solver/test_stochastic_system.py new file mode 100644 index 0000000000..4fa27a618b --- /dev/null +++ b/qutip/tests/solver/test_stochastic_system.py @@ -0,0 +1,179 @@ +import numpy as np +from qutip import ( + qeye, num, destroy, create, QobjEvo, Qobj, + basis, rand_herm, fock_dm, liouvillian, operator_to_vector +) +from qutip.solver.sode.ssystem import * +from qutip.solver.sode.ssystem import SimpleStochasticSystem, StochasticClosedSystem +import qutip.core.data as _data +import pytest +from itertools import product + + +def L0(system, f): + def _func(t, rho, dt=1e-6): + n = rho.shape[0] + f0 = f(t, rho) + + out = (f(t+dt, rho) - f0) / dt + + jac = np.zeros((n, n), dtype=complex) + for i in range(n): + dxi = basis(n, i).data + jac[:, i] = (f(t, rho + dt * dxi) - f0).to_array().flatten() / dt + out = out + _data.Dense(jac) @ system.drift(t, rho) + + for i, j in product(range(n), repeat=2): + dxi = basis(n, i).data + dxj = basis(n, j).data + sec = f(t, (rho + dxi * dt + dxj * dt)) + sec = sec - f(t, (rho + dxj * dt)) + sec = sec - f(t, (rho + dxi * dt)) + sec = sec + f0 + sec = sec / dt / dt * 0.5 + for k in range(system.num_collapse): + out = out + ( + sec + * _data.inner(dxi, system.diffusion(t, rho)[k]) + * _data.inner(dxj, system.diffusion(t, rho)[k]) + ) + return out + return _func + + +def L(system, ii, f): + def _func(t, rho, dt=1e-6): + n = rho.shape[0] + jac = np.zeros((n, n), dtype=complex) + f0 = f(t, rho) + for i in range(n): + dxi = basis(n, i).data + jac[:, i] = (f(t, (rho + dt * dxi)) - f0).to_array().flatten() + return _data.Dense(jac) @ system.diffusion(t, rho)[ii] / dt + return _func + + +def LL(system, ii, jj, f): + # Can be implemented as 2 calls of ``L``, but would use 2 ``dt`` which + # cannot be different. + def _func(t, rho, dt=1e-6): + f0 = f(t, rho) + bi = system.diffusion(t, rho)[ii] + bj = system.diffusion(t, rho)[jj] + out = rho *0. + n = rho.shape[0] + + for i, j in product(range(n), repeat=2): + dxi = basis(n, i, dtype="Dense").data + dxj = basis(n, j, dtype="Dense").data + sec = f(t, (rho + dxi * dt + dxj * dt)) + sec = sec - f(t, (rho + dxj * dt)) + sec = sec - f(t, (rho + dxi * dt)) + sec = sec + f0 + sec = sec / dt / dt + + out = out + ( + sec * _data.inner(dxi, bi) * _data.inner(dxj, bj) + ) + df = (f(t, (rho + dxj * dt)) - f0) / dt + db = ( + system.diffusion(t, (rho + dxi * dt))[jj] + - system.diffusion(t, rho)[jj] + ) / dt + + out = out + ( + df * _data.inner(dxi, bi) * _data.inner(dxj, db) + ) + + return out + return _func + + +def _check_equivalence(f, target, args): + """ + Check that the error is proportional to `dt`. + """ + dts = np.logspace(-4, -1, 7) + errors_dt = np.array([ + _data.norm.l2(f(*args, dt=dt) - target) + for dt in dts + ]) + if np.all(errors_dt < 1e-6): + return + + power = np.polyfit(np.log(dts), np.log(errors_dt + 1e-16), 1)[0] + # Sometime the dt term is cancelled and the dt**2 term is dominant + assert power > 0.9 + + +def _run_derr_check(solver, state): + """ + Compare each derrivatives to the finite differences equivalent. + """ + t = 0 + N = solver.num_collapse + a = solver.drift + solver.set_state(t, state) + + assert _data.norm.l2(solver.drift(t, state) - solver.a()) < 1e-6 + for i in range(N): + b = lambda *args: solver.diffusion(*args)[i] + assert b(t, state) == solver.bi(i) + for j in range(N): + _check_equivalence( + L(solver, j, b), solver.Libj(j, i), (t, state) + ) + + _check_equivalence(L0(solver, a), solver.L0a(), (t, state)) + + for i in range(N): + b = lambda *args: solver.diffusion(*args)[i] + _check_equivalence(L0(solver, b), solver.L0bi(i), (t, state)) + _check_equivalence(L(solver, i, a), solver.Lia(i), (t, state)) + + for j in range(i, N): + for k in range(j, N): + _check_equivalence( + LL(solver, k, j, b), solver.LiLjbk(k, j, i), (t, state) + ) + + +def _make_oper(kind, N): + if kind == "qeye": + out = qeye(N) + elif kind == "destroy": + out = destroy(N) + elif kind == "destroy2": + out = destroy(N)**2 + elif kind == "tridiag": + out = destroy(N) + num(N) + create(N) + elif kind == "td": + out = [num(N), [destroy(N) + create(N), lambda t: 1 + t]] + elif kind == "rand": + out = rand_herm(N) + return QobjEvo(out) + + +@pytest.mark.parametrize(['H', 'sc_ops'], [ + pytest.param("qeye", ["destroy"], id='simple'), + pytest.param("tridiag", ["destroy"], id='simple'), + pytest.param("qeye", ["destroy", "destroy2"], id='2 c_ops'), + pytest.param("td", ["destroy"], id='H td'), + pytest.param("qeye", ["td"], id='c_ops td'), + pytest.param("rand", ["rand"], id='random'), +]) +@pytest.mark.parametrize('heterodyne', [False, True]) +def test_open_system_derr(H, sc_ops, heterodyne): + N = 5 + H = _make_oper(H, N) + sc_ops = [_make_oper(op, N) for op in sc_ops] + if heterodyne: + new_sc_ops = [] + for c_op in sc_ops: + new_sc_ops.append(c_op / np.sqrt(2)) + new_sc_ops.append(c_op * (-1j / np.sqrt(2))) + sc_ops = new_sc_ops + + system = StochasticOpenSystem(H, sc_ops) + state = operator_to_vector(fock_dm(N, N-2, dtype="Dense")).data + _run_derr_check(system, state) diff --git a/qutip/tests/solver/test_transfertensor.py b/qutip/tests/solver/test_transfertensor.py new file mode 100644 index 0000000000..775c1f5383 --- /dev/null +++ b/qutip/tests/solver/test_transfertensor.py @@ -0,0 +1,52 @@ +import pytest +import qutip +import numpy as np +from qutip.solver.nonmarkov.transfertensor import ttmsolve + + +@pytest.mark.parametrize("call", [True, False]) +def test_ttmsolve_jc_model(call): + """ + Checks the output of ttmsolve using an example from Jaynes-Cumming model, + which can also be found in the qutip-notebooks repository. + """ + # Define Hamiltonian and states + N, kappa, g = 3, 1.0, 10 + a = qutip.tensor(qutip.qeye(2), qutip.destroy(N)) + sm = qutip.tensor(qutip.sigmam(), qutip.qeye(N)) + sz = qutip.tensor(qutip.sigmaz(), qutip.qeye(N)) + H = g * (a.dag() * sm + a * sm.dag()) + c_ops = [np.sqrt(kappa) * a] + # identity superoperator + Id = qutip.tensor(qutip.qeye(2), qutip.qeye(N)) + E0 = qutip.sprepost(Id, Id) + # partial trace superoperator + ptracesuper = qutip.tensor_contract(E0, (1, N)) + # initial states + rho0a = qutip.ket2dm(qutip.basis(2, 0)) + psi0c = qutip.basis(N, 0) + rho0c = qutip.ket2dm(psi0c) + rho0 = qutip.tensor(rho0a, rho0c) + superrho0cav = qutip.sprepost( + qutip.tensor(qutip.qeye(2), psi0c), qutip.tensor(qutip.qeye(2), psi0c.dag()) + ) + + # calculate exact solution using mesolve + times = np.arange(0, 5.0, 0.1) + exactsol = qutip.mesolve(H, rho0, times, c_ops, [sz]) + + if not call: + learning_times = np.arange(0, 2.0, 0.1) + Et_list = qutip.mesolve(H, E0, learning_times, c_ops, []).states + learning_maps = [ptracesuper @ Et @ superrho0cav for Et in Et_list] + else: + prop = qutip.Propagator(qutip.liouvillian(H, c_ops)) + def learning_maps(t): + return ptracesuper @ prop(t) @ superrho0cav + + # solve using transfer method + ttmsol = ttmsolve(learning_maps, rho0a, times, + e_ops=[qutip.sigmaz()], num_learning=21) + + # check that ttm result and exact solution are close in the learning times + assert np.allclose(ttmsol.expect[0], exactsol.expect[0], atol=1e-5) diff --git a/qutip/tests/test_animation.py b/qutip/tests/test_animation.py new file mode 100644 index 0000000000..676e7473a4 --- /dev/null +++ b/qutip/tests/test_animation.py @@ -0,0 +1,136 @@ +import pytest +import qutip +import numpy as np +import matplotlib as mpl +import matplotlib.pyplot as plt +from scipy.special import sph_harm + +def test_result_state(): + H = qutip.rand_dm(2) + tlist = np.linspace(0, 3*np.pi, 2) + results = qutip.mesolve(H, H, tlist, [], []) + + fig, ani = qutip.anim_fock_distribution(results) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_result_state_ValueError(): + H = qutip.rand_dm(2) + tlist = np.linspace(0, 3*np.pi, 2) + results = qutip.mesolve(H, H, tlist, [], [], + options={"store_states": False}) + + text = 'Nothing to visualize. You might have forgotten ' +\ + 'to set options={"store_states": True}.' + with pytest.raises(ValueError) as exc_info: + fig, ani = qutip.anim_fock_distribution(results) + assert str(exc_info.value) == text + + +def test_anim_wigner_sphere(): + psi = qutip.rand_ket(5) + wigner = qutip.wigner_transform(psi, 2, False, 50, ["x"]) + + fig, ani = qutip.anim_wigner_sphere([wigner]*2) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_anim_hinton(): + rho = qutip.rand_dm(5) + rhos = [rho]*2 + + fig, ani = qutip.anim_hinton(rhos) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_anim_sphereplot(): + theta = np.linspace(0, np.pi, 90) + phi = np.linspace(0, 2 * np.pi, 60) + phi_mesh, theta_mesh = np.meshgrid(phi, theta) + values = sph_harm(-1, 2, phi_mesh, theta_mesh).T + fig, ani = qutip.anim_sphereplot([values]*2, theta, phi) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_anim_matrix_histogram(): + rho = qutip.rand_dm(5) + rhos = [rho]*2 + + fig, ani = qutip.anim_matrix_histogram(rhos) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_anim_fock_distribution(): + rho = qutip.rand_dm(5) + rhos = [rho]*2 + + fig, ani = qutip.anim_fock_distribution(rhos) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_anim_wigner(): + rho = qutip.rand_dm(5) + rhos = [rho]*2 + + fig, ani = qutip.anim_wigner(rhos) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +@pytest.mark.filterwarnings( + "ignore:The input coordinates to pcolor:UserWarning" +) +def test_anim_spin_distribution(): + j = 5 + psi = qutip.spin_state(j, -j) + psi = qutip.spin_coherent(j, np.random.rand() * np.pi, + np.random.rand() * 2 * np.pi) + theta = np.linspace(0, np.pi, 50) + phi = np.linspace(0, 2 * np.pi, 50) + Q, THETA, PHI = qutip.spin_q_function(psi, theta, phi) + + fig, ani = qutip.anim_spin_distribution([Q]*2, THETA, PHI) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_anim_qubism(): + state = qutip.ket("01") + + fig, ani = qutip.anim_qubism([state]*2) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_anim_schmidt(): + state = qutip.ket("01") + + fig, ani = qutip.anim_schmidt([state]*2) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) diff --git a/qutip/tests/test_bloch.py b/qutip/tests/test_bloch.py index f736090c2e..33e8fb2fae 100644 --- a/qutip/tests/test_bloch.py +++ b/qutip/tests/test_bloch.py @@ -9,14 +9,11 @@ try: import matplotlib.pyplot as plt from matplotlib.testing.decorators import check_figures_equal + import IPython + check_pngs_equal = check_figures_equal(extensions=["png"]) except ImportError: - def check_figures_equal(*args, **kw): - def _error(*args, **kw): - raise RuntimeError("matplotlib is not installed") plt = None - - -check_pngs_equal = check_figures_equal(extensions=["png"]) + check_pngs_equal = pytest.mark.skip(reason="matplotlib not installed") class RefBloch(Bloch): @@ -474,6 +471,8 @@ def test_vector_errors_color_length(self, vectors, colors): def test_repr_svg(): + pytest.importorskip("matplotlib") + pytest.importorskip("ipython") svg = Bloch()._repr_svg_() assert isinstance(svg, str) assert svg.startswith("= -bound) - assert np.all(result.final_amps <= bound) - - def test_unitarity_via_dump(self): - """ - Test that unitarity is maintained at all times throughout the - optimisation of the controls. - """ - kwargs = {'num_tslots': 1000, 'evo_time': 4, 'fid_err_targ': 1e-9, - 'dyn_params': {'dumping': 'FULL'}} - system = _merge_kwargs(hadamard, kwargs) - result = _optimize_pulse(system) - dynamics = result.optimizer.dynamics - assert dynamics.dump is not None, "Dynamics dump not created" - # Use the dump to check unitarity of all propagators and evo_ops - dynamics.unitarity_tol = 1e-13 # 1e-14 for eigh but 1e-13 for eig. - for item, description in [('prop', 'propagators'), - ('fwd_evo', 'forward evolution operators'), - ('onto_evo', 'onto evolution operators')]: - non_unitary = sum(not dynamics._is_unitary(x) - for dump in dynamics.dump.evo_dumps - for x in getattr(dump, item)) - assert non_unitary == 0, "Found non-unitary " + description + "." - - def test_crab(self, propagation): - tol = 1e-5 - evo_time = 10 - result = cpo.opt_pulse_crab_unitary( - hadamard.system, hadamard.controls, - hadamard.initial, hadamard.target, - num_tslots=12, evo_time=evo_time, fid_err_targ=tol, - **propagation, - alg_params={'crab_pulse_params': {'randomize_coeffs': False, - 'randomize_freqs': False}}, - init_coeff_scaling=0.5, - guess_pulse_type='GAUSSIAN', - guess_pulse_params={'variance': evo_time * 0.1}, - guess_pulse_scaling=1.0, - guess_pulse_offset=1.0, - amp_lbound=None, - amp_ubound=None, - ramping_pulse_type='GAUSSIAN_EDGE', - ramping_pulse_params={'decay_time': evo_time * 0.01}, - gen_stats=True) - error = " ".join(["Infidelity: {:7.4e}".format(result.fid_err), - "reason:", result.termination_reason]) - assert result.goal_achieved, error - assert abs(result.fid_err) < tol - assert abs(result.final_amps[0, 0]) < tol, "Lead-in amplitude nonzero." - - -# The full object-orientated interface to the optimiser is rather complex. To -# attempt to simplify the test of the configuration loading, we break it down -# into steps here. - -def _load_configuration(path): - configuration = qutip.control.optimconfig.OptimConfig() - configuration.param_fname = path.name - configuration.param_fpath = str(path) - configuration.pulse_type = "ZERO" - qutip.control.loadparams.load_parameters(str(path), config=configuration) - return configuration - - -def _load_dynamics(path, system, configuration, stats): - dynamics = qutip.control.dynamics.DynamicsUnitary(configuration) - dynamics.drift_dyn_gen = system.system - dynamics.ctrl_dyn_gen = system.controls - dynamics.initial = system.initial - dynamics.target = system.target - qutip.control.loadparams.load_parameters(str(path), dynamics=dynamics) - dynamics.init_timeslots() - dynamics.stats = stats - return dynamics - - -def _load_pulse_generator(path, configuration, dynamics): - pulse_generator = qutip.control.pulsegen.create_pulse_gen( - pulse_type=configuration.pulse_type, - dyn=dynamics) - qutip.control.loadparams.load_parameters(str(path), - pulsegen=pulse_generator) - return pulse_generator - - -def _load_termination_conditions(path): - conditions = qutip.control.termcond.TerminationConditions() - qutip.control.loadparams.load_parameters(str(path), term_conds=conditions) - return conditions - - -def _load_optimizer(path, configuration, dynamics, pulse_generator, - termination_conditions, stats): - method = configuration.optim_method - if method is None: - raise qutip.control.errors.UsageError( - "Optimization algorithm must be specified using the 'optim_method'" - " parameter.") - known = {'BFGS': 'OptimizerBFGS', 'FMIN_L_BFGS_B': 'OptimizerLBFGSB'} - constructor = getattr(qutip.control.optimizer, - known.get(method, 'Optimizer')) - optimizer = constructor(configuration, dynamics) - optimizer.method = method - qutip.control.loadparams.load_parameters(str(path), optim=optimizer) - optimizer.config = configuration - optimizer.dynamics = dynamics - optimizer.pulse_generator = pulse_generator - optimizer.termination_conditions = termination_conditions - optimizer.stats = stats - return optimizer - - -class TestFileIO: - def test_load_parameters_from_file(self): - system = hadamard - path = pathlib.Path(__file__).parent / "Hadamard_params.ini" - stats = qutip.control.stats.Stats() - configuration = _load_configuration(path) - dynamics = _load_dynamics(path, system, configuration, stats) - pulse_generator = _load_pulse_generator(path, configuration, dynamics) - termination_conditions = _load_termination_conditions(path) - optimizer = _load_optimizer(path, - configuration, - dynamics, - pulse_generator, - termination_conditions, - stats) - init_amps = np.array([optimizer.pulse_generator.gen_pulse() - for _ in system.controls]).T - optimizer.dynamics.initialize_controls(init_amps) - result = optimizer.run_optimization() - - kwargs = {'num_tslots': 6, 'evo_time': 6, 'fid_err_targ': 1e-10, - 'init_pulse_type': 'LIN', 'dyn_type': 'UNIT', - 'amp_lbound': -1, 'amp_ubound': 1, - 'gen_stats': True} - target = _optimize_pulse(system._replace(kwargs=kwargs)) - np.testing.assert_allclose(result.final_amps, target.final_amps, - atol=1e-5) - - @pytest.mark.usefixtures("in_temporary_directory") - def test_dumping_to_files(self): - N_OPTIMDUMP_FILES = 10 - N_DYNDUMP_FILES = 49 - dumping = {'dumping': 'FULL', 'dump_to_file': True} - kwargs = {'num_tslots': 1_000, 'evo_time': 4, 'fid_err_targ': 1e-9, - 'optim_params': {'dump_dir': 'optim', **dumping}, - 'dyn_params': {'dump_dir': 'dyn', **dumping}} - system = _merge_kwargs(hadamard, kwargs) - result = _optimize_pulse(system) - - # Check dumps were generated and have the right number of files. - assert result.optimizer.dump is not None - assert result.optimizer.dynamics.dump is not None - assert (len(os.listdir(result.optimizer.dump.dump_dir)) - == N_OPTIMDUMP_FILES) - assert (len(os.listdir(result.optimizer.dynamics.dump.dump_dir)) - == N_DYNDUMP_FILES) - - # Dump all to specific file stream. - for dump, type_ in [(result.optimizer.dump, 'optimizer'), - (result.optimizer.dynamics.dump, 'dynamics')]: - with tempfile.NamedTemporaryFile() as file: - dump.writeout(file) - assert os.stat(file.name).st_size > 0,\ - " ".join(["Empty", type_, "file."]) - - -def _count_waves(system): - optimizer = cpo.create_pulse_optimizer(system.system, system.controls, - system.initial, system.target, - **system.kwargs) - pulse = optimizer.pulse_generator.gen_pulse() - zero_crossings = pulse[0:-2]*pulse[1:-1] < 0 - return (sum(zero_crossings) + 1) // 2 - - -@pytest.mark.parametrize('pulse_type', - [pytest.param(x, id=x.lower()) - for x in ['SINE', 'SQUARE', 'TRIANGLE', 'SAW']]) -class TestPeriodicControlFunction: - num_tslots = 1_000 - evo_time = 10 - - @pytest.mark.parametrize('n_waves', [1, 5, 10, 100]) - def test_number_of_waves(self, pulse_type, n_waves): - kwargs = {'num_tslots': self.num_tslots, 'evo_time': self.evo_time, - 'init_pulse_type': pulse_type, - 'init_pulse_params': {'num_waves': n_waves}, - 'gen_stats': False} - system = _merge_kwargs(hadamard, kwargs) - assert _count_waves(system) == n_waves - - @pytest.mark.parametrize('frequency', [0.1, 1, 10, 20]) - def test_frequency(self, pulse_type, frequency): - kwargs = {'num_tslots': self.num_tslots, 'evo_time': self.evo_time, - 'init_pulse_type': pulse_type, - 'init_pulse_params': {'freq': frequency}, - 'fid_err_targ': 1e-5, - 'gen_stats': False} - system = _merge_kwargs(hadamard, kwargs) - assert _count_waves(system) == self.evo_time*frequency - - -class TestTimeDependence: - """ - Test that systems where the system Hamiltonian is time-dependent behave as - expected under the optimiser. - """ - def test_drift(self): - """ - Test that introducing time dependence to the system does change the - result of the optimisation. - """ - num_tslots = 20 - system = _merge_kwargs(hadamard, {'num_tslots': num_tslots, - 'evo_time': 10}) - result_fixed = _optimize_pulse(system) - system_flat = system._replace(system=[system.system]*num_tslots) - result_flat = _optimize_pulse(system_flat) - step = [0.0]*(num_tslots//2) + [1.0]*(num_tslots//2) - system_step = system._replace(system=[x*system.system for x in step]) - result_step = _optimize_pulse(system_step) - np.testing.assert_allclose(result_fixed.final_amps, - result_flat.final_amps, - rtol=1e-9) - assert np.any((result_flat.final_amps-result_step.final_amps) > 1e-3),\ - "Flat and step drights result in the same control pulses." - - def test_controls_all_time_slots_equal_to_no_time_dependence(self): - """ - Test that simply duplicating the system in each time slot (i.e. no - actual time dependence has no effect on the final result. - """ - num_tslots = 20 - system = _merge_kwargs(hadamard, {'num_tslots': num_tslots, - 'evo_time': 10, - 'fid_err_targ': 1e-10}) - result_single = _optimize_pulse(system) - system_vary = system._replace(controls=[[_sx]]*num_tslots) - result_vary = _optimize_pulse(system_vary) - np.testing.assert_allclose(result_single.final_amps, - result_vary.final_amps, - atol=1e-9) - - def test_controls_identity_operators_ignored(self): - """ - Test that moments in time where the control parameters are simply the - identity are just ignored by the optimiser (since they'll never be able - to do anything. - """ - num_tslots = 20 - controls = [[_sx] if k % 3 else [_si] for k in range(num_tslots)] - system = _merge_kwargs(hadamard, {'num_tslots': num_tslots, - 'evo_time': 10}) - system = system._replace(controls=controls) - result = _optimize_pulse(system) - for k in range(0, num_tslots, 3): - np.testing.assert_allclose(result.initial_amps[k], - result.final_amps[k], - rtol=1e-9) diff --git a/qutip/tests/test_ipynbtools.py b/qutip/tests/test_ipynbtools.py new file mode 100644 index 0000000000..2b1c76559a --- /dev/null +++ b/qutip/tests/test_ipynbtools.py @@ -0,0 +1,24 @@ +from qutip.ipynbtools import version_table +import pytest + + +@pytest.mark.parametrize('verbose', [False, True]) +def test_version_table(verbose): + html_data = version_table(verbose=verbose).data + assert "Software" in html_data + assert "Version" in html_data + assert "QuTiP" in html_data + assert "Numpy" in html_data + assert "SciPy" in html_data + assert "matplotlib" in html_data + assert "IPython" in html_data + if verbose: + assert "Installation path" in html_data + if pytest.importorskip("getpass") is not None: + assert "User" in html_data + + +@pytest.mark.skipif(not pytest.importorskip("Cython"), reason="cython not installed") +def test_version_table_with_cython(): + html_data = version_table().data + assert "Cython" in html_data diff --git a/qutip/tests/test_openmp.py b/qutip/tests/test_openmp.py index 05804ca5e9..592ab54692 100644 --- a/qutip/tests/test_openmp.py +++ b/qutip/tests/test_openmp.py @@ -1,5 +1,5 @@ import numpy as np -from numpy.testing import assert_equal, assert_, run_module_suite +from numpy.testing import assert_equal import unittest from qutip import * from qutip.settings import settings as qset @@ -18,7 +18,7 @@ def test_openmp_spmv(): out_openmp = np.zeros_like(vec) _spmvpy(L.data, L.indices, L.indptr, vec, 1, out) _spmvpy_openmp(L.data, L.indices, L.indptr, vec, 1, out_openmp, 2) - assert_(np.allclose(out, out_openmp, 1e-15)) + assert (np.allclose(out, out_openmp, 1e-15)) # @unittest.skipIf(qset.has_openmp == False, 'OPENMP not available.') @unittest.skipIf(True, 'OPENMP disabled.') @@ -61,8 +61,8 @@ def test_openmp_mesolve(): out = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm], options=opts) opts = SolverOptions(use_openmp=True) out_omp = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm], options=opts) - assert_(np.allclose(out.expect[0],out_omp.expect[0])) - assert_(np.allclose(out.expect[1],out_omp.expect[1])) + assert (np.allclose(out.expect[0],out_omp.expect[0])) + assert (np.allclose(out.expect[1],out_omp.expect[1])) # @unittest.skipIf(qset.has_openmp == False, 'OPENMP not available.') @@ -107,5 +107,5 @@ def test_openmp_mesolve_td(): out_omp = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm], options=opts) opts = SolverOptions(use_openmp=False) out = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm], options=opts) - assert_(np.allclose(out.expect[0],out_omp.expect[0])) - assert_(np.allclose(out.expect[1],out_omp.expect[1])) + assert (np.allclose(out.expect[0],out_omp.expect[0])) + assert (np.allclose(out.expect[1],out_omp.expect[1])) diff --git a/qutip/tests/test_orbital.py b/qutip/tests/test_orbital.py deleted file mode 100644 index e5a1829766..0000000000 --- a/qutip/tests/test_orbital.py +++ /dev/null @@ -1,55 +0,0 @@ -import pytest -from scipy.special import sph_harm -from qutip.orbital import orbital -import qutip -import numpy as np - - -def test_orbital_single_ket(): - """ Checks output for a single ket as input""" - # setup mesh for theta and phi - theta_list = np.linspace(0, np.pi, num=50) - phi_list = np.linspace(0, 2 * np.pi, num=100) - for theta, phi in zip(theta_list, phi_list): - # set l and m - for l in range(0, 5): - for m in range(-l, l + 1): - q = qutip.basis(2 * l + 1, l + m) - # check that outputs are the same, - # note that theta and phi are interchanged for scipy - assert sph_harm(m, l, phi, theta) == orbital(theta, phi, q) - - -def test_orbital_multiple_ket(): - """ Checks if the combination of multiple kets works """ - theta_list = np.linspace(0, np.pi, num=50) - phi_list = np.linspace(0, 2 * np.pi, num=100) - l, m = 5, 2 - q1 = qutip.basis(2 * l + 1, l + m) - l, m = 3, -1 - q2 = qutip.basis(2 * l + 1, l + m) - for theta, phi in zip(theta_list, phi_list): - exp = sph_harm(2, 5, phi, theta) + sph_harm(-1, 3, phi, theta) - assert orbital(theta, phi, q1, q2) == exp - - -def test_orbital_explicit(): - """ Checks explicit configurations of orbital functions""" - theta_list = np.linspace(0, np.pi, num=50) - phi_list = np.linspace(0, 2 * np.pi, num=100) - # Constant function - l, m = 0, 0 - q = qutip.basis(2 * l + 1, l + m) - assert orbital(0, 0, q) == 0.5 * np.sqrt(1 / np.pi) - # cosine function - l, m = 1, 0 - q = qutip.basis(2 * l + 1, l + m) - assert np.allclose(orbital(theta_list, 0, q), - 0.5 * np.sqrt(3 / np.pi) * np.cos(theta_list)) - # cosine with phase - l, m = 1, 1 - q = qutip.basis(2 * l + 1, l + m) - phi_mesh, theta_mesh = np.meshgrid(phi_list, theta_list) - assert np.allclose(orbital(theta_list, phi_list, q), - -0.5 * np.sqrt(3 / (2 * np.pi)) * np.sin( - theta_mesh) * np.exp(1j * phi_mesh)) diff --git a/qutip/tests/test_partial_transpose.py b/qutip/tests/test_partial_transpose.py index 02d3fec328..c9aa1c10ed 100644 --- a/qutip/tests/test_partial_transpose.py +++ b/qutip/tests/test_partial_transpose.py @@ -3,7 +3,6 @@ """ import numpy as np -from numpy.testing import assert_, run_module_suite from qutip import Qobj, partial_transpose, tensor, rand_dm from qutip.partial_transpose import _partial_transpose_reference @@ -16,7 +15,7 @@ def test_partial_transpose_bipartite(): # no transpose rho_pt = partial_transpose(rho, [0, 0]) - assert_(np.abs(np.max(rho_pt.full() - rho.full())) < 1e-12) + assert (np.abs(np.max(rho_pt.full() - rho.full())) < 1e-12) # partial transpose subsystem 1 rho_pt = partial_transpose(rho, [1, 0]) @@ -24,7 +23,7 @@ def test_partial_transpose_bipartite(): [4, 5, 12, 13], [2, 3, 10, 11], [6, 7, 14, 15]]) - assert_(np.abs(np.max(rho_pt.full() - rho_pt_expected)) < 1e-12) + assert (np.abs(np.max(rho_pt.full() - rho_pt_expected)) < 1e-12) # partial transpose subsystem 2 rho_pt = partial_transpose(rho, [0, 1]) @@ -32,11 +31,11 @@ def test_partial_transpose_bipartite(): [1, 5, 3, 7], [8, 12, 10, 14], [9, 13, 11, 15]]) - assert_(np.abs(np.max(rho_pt.full() - rho_pt_expected)) < 1e-12) + assert (np.abs(np.max(rho_pt.full() - rho_pt_expected)) < 1e-12) # full transpose rho_pt = partial_transpose(rho, [1, 1]) - assert_(np.abs(np.max(rho_pt.full() - rho.trans().full())) < 1e-12) + assert (np.abs(np.max(rho_pt.full() - rho.trans().full())) < 1e-12) def test_partial_transpose_comparison(): @@ -72,7 +71,3 @@ def test_partial_transpose_randomized(): rho_pt2 = partial_transpose(rho, mask, method="sparse") np.abs(np.max(rho_pt2.full() - rho_pt_ref.full())) < 1e-12 - - -if __name__ == "__main__": - run_module_suite() diff --git a/qutip/tests/test_progressbar.py b/qutip/tests/test_progressbar.py new file mode 100644 index 0000000000..d1fa275f17 --- /dev/null +++ b/qutip/tests/test_progressbar.py @@ -0,0 +1,44 @@ +from qutip.ui.progressbar import progress_bars +import pytest +import time + + +bars = ["base", "text", "Enhanced"] + +try: + import tqdm + bars.append("tqdm") +except ImportError: + bars.append( + pytest.param("tqdm", marks=pytest.mark.skip("module not installed")) + ) + +try: + import IPython + bars.append("html") +except ImportError: + bars.append( + pytest.param("html", marks=pytest.mark.skip("module not installed")) + ) + + +@pytest.mark.parametrize("pbar", bars) +def test_progressbar(pbar): + N = 5 + bar = progress_bars[pbar](N) + assert bar.total_time() < 0 + for _ in range(N): + time.sleep(0.25) + bar.update() + bar.finished() + assert bar.total_time() > 0 + + +@pytest.mark.parametrize("pbar", bars[1:]) +def test_progressbar_has_print(pbar, capsys): + N = 2 + bar = progress_bars[pbar](N) + bar.update() + bar.finished() + out, err = capsys.readouterr() + assert out + err != "" diff --git a/qutip/tests/test_qpt.py b/qutip/tests/test_qpt.py index c2d092e207..aada73a324 100644 --- a/qutip/tests/test_qpt.py +++ b/qutip/tests/test_qpt.py @@ -1,5 +1,4 @@ import numpy as np -from numpy.testing import assert_, run_module_suite import scipy.linalg as la from qutip import spre, spost, qeye, sigmax, sigmay, sigmaz, qpt @@ -19,7 +18,7 @@ def test_qpt_snot(): chi2 = np.zeros((2 ** (2 * N), 2 ** (2 * N)), dtype=complex) chi2[1, 1] = chi2[1, 3] = chi2[3, 1] = chi2[3, 3] = 0.5 - assert_(la.norm(chi2 - chi1) < 1e-8) + assert (la.norm(chi2 - chi1) < 1e-8) def test_qpt_cnot(): @@ -44,7 +43,4 @@ def test_qpt_cnot(): chi2[12, 12] = chi2[13, 13] = 0.25 chi2[13, 12] = chi2[12, 13] = -0.25 - assert_(la.norm(chi2 - chi1) < 1e-8) - -if __name__ == "__main__": - run_module_suite() + assert (la.norm(chi2 - chi1) < 1e-8) diff --git a/qutip/tests/test_random.py b/qutip/tests/test_random.py index fcbf1b4280..13f9554bbf 100644 --- a/qutip/tests/test_random.py +++ b/qutip/tests/test_random.py @@ -4,7 +4,7 @@ import scipy.linalg as la import pytest -from qutip import qeye, num, to_kraus, kraus_to_choi, CoreOptions +from qutip import qeye, num, to_kraus, kraus_to_choi, CoreOptions, Qobj from qutip import data as _data from qutip.random_objects import ( rand_herm, @@ -285,3 +285,27 @@ def test_kraus_map(dimensions, dtype): _assert_metadata(kmap[0], dimensions, dtype) with CoreOptions(atol=1e-9): assert kraus_to_choi(kmap).iscptp + + +dtype_names = list(_data.to._str2type.keys()) + list(_data.to.dtypes) +dtype_types = list(_data.to._str2type.values()) + list(_data.to.dtypes) +@pytest.mark.parametrize(['alias', 'dtype'], zip(dtype_names, dtype_types), + ids=[str(dtype) for dtype in dtype_names]) +@pytest.mark.parametrize('func', [ + rand_herm, + rand_unitary, + rand_dm, + rand_ket, + rand_stochastic, + rand_super, + rand_super_bcsz, + rand_kraus_map, +]) +def test_random_dtype(func, alias, dtype): + with CoreOptions(default_dtype=alias): + object = func(2) + if isinstance(object, Qobj): + assert isinstance(object.data, dtype) + else: + for obj in object: + assert isinstance(obj.data, dtype) diff --git a/qutip/tests/test_subsys_apply.py b/qutip/tests/test_subsys_apply.py index ed85466625..bd915d1d9c 100644 --- a/qutip/tests/test_subsys_apply.py +++ b/qutip/tests/test_subsys_apply.py @@ -1,5 +1,4 @@ from numpy.linalg import norm -from numpy.testing import assert_, run_module_suite from qutip import ( Qobj, tensor, vector_to_operator, operator_to_vector, kraus_to_super, @@ -28,18 +27,12 @@ def test_SimpleSingleApply(self): reference=True) naive_diff = (analytic_result - naive_result).full() naive_diff_norm = norm(naive_diff) - assert_(naive_diff_norm < tol, - msg="SimpleSingle: naive_diff_norm {} " - "is beyond tolerance {}".format( - naive_diff_norm, tol)) + assert naive_diff_norm < tol efficient_result = subsystem_apply(rho_3, single_op, [True]) efficient_diff = (efficient_result - analytic_result).full() efficient_diff_norm = norm(efficient_diff) - assert_(efficient_diff_norm < tol, - msg="SimpleSingle: efficient_diff_norm {} " - "is beyond tolerance {}".format( - efficient_diff_norm, tol)) + assert efficient_diff_norm < tol def test_SimpleSuperApply(self): """ @@ -54,18 +47,12 @@ def test_SimpleSuperApply(self): reference=True) naive_diff = (analytic_result - naive_result).full() naive_diff_norm = norm(naive_diff) - assert_(naive_diff_norm < tol, - msg="SimpleSuper: naive_diff_norm {} " - "is beyond tolerance {}".format( - naive_diff_norm, tol)) + assert naive_diff_norm < tol efficient_result = subsystem_apply(rho_3, superop, [True]) efficient_diff = (efficient_result - analytic_result).full() efficient_diff_norm = norm(efficient_diff) - assert_(efficient_diff_norm < tol, - msg="SimpleSuper: efficient_diff_norm {} " - "is beyond tolerance {}".format( - efficient_diff_norm, tol)) + assert efficient_diff_norm < tol def test_ComplexSingleApply(self): """ @@ -86,19 +73,13 @@ def test_ComplexSingleApply(self): reference=True) naive_diff = (analytic_result - naive_result).full() naive_diff_norm = norm(naive_diff) - assert_(naive_diff_norm < tol, - msg="ComplexSingle: naive_diff_norm {} " - "is beyond tolerance {}".format( - naive_diff_norm, tol)) + assert naive_diff_norm < tol efficient_result = subsystem_apply(rho_input, single_op, [False, True, False, True, False]) efficient_diff = (efficient_result - analytic_result).full() efficient_diff_norm = norm(efficient_diff) - assert_(efficient_diff_norm < tol, - msg="ComplexSingle: efficient_diff_norm {} " - "is beyond tolerance {}".format( - efficient_diff_norm, tol)) + assert efficient_diff_norm < tol def test_ComplexSuperApply(self): """ @@ -126,16 +107,10 @@ def test_ComplexSuperApply(self): reference=True) naive_diff = (analytic_result - naive_result).full() naive_diff_norm = norm(naive_diff) - assert_(naive_diff_norm < tol, - msg="ComplexSuper: naive_diff_norm {} " - "is beyond tolerance {}".format( - naive_diff_norm, tol)) + assert naive_diff_norm < tol efficient_result = subsystem_apply(rho_input, superop, [False, True, False, True, False]) efficient_diff = (efficient_result - analytic_result).full() efficient_diff_norm = norm(efficient_diff) - assert_(efficient_diff_norm < tol, - msg="ComplexSuper: efficient_diff_norm {} " - "is beyond tolerance {}".format( - efficient_diff_norm, tol)) + assert efficient_diff_norm < tol diff --git a/qutip/tests/test_three_level.py b/qutip/tests/test_three_level.py deleted file mode 100644 index 6aee63c071..0000000000 --- a/qutip/tests/test_three_level.py +++ /dev/null @@ -1,27 +0,0 @@ -import numpy as np -from numpy.testing import assert_, assert_equal, run_module_suite -from qutip import basis -from qutip.three_level_atom import * - - -three_states = three_level_basis() -three_check = np.empty((3,), dtype=object) -three_check[:] = [basis(3, 0), basis(3, 1), basis(3, 2)] -three_ops = three_level_ops() - - -def testThreeStates(): - "Three-level atom: States" - assert_equal(np.all(three_states == three_check), True) - - -def testThreeOps(): - "Three-level atom: Operators" - assert_equal((three_ops[0]*three_states[0]).full(), three_check[0].full()) - assert_equal((three_ops[1]*three_states[1]).full(), three_check[1].full()) - assert_equal((three_ops[2]*three_states[2]).full(), three_check[2].full()) - assert_equal((three_ops[3]*three_states[1]).full(), three_check[0].full()) - assert_equal((three_ops[4]*three_states[1]).full(), three_check[2].full()) - -if __name__ == "__main__": - run_module_suite() diff --git a/qutip/tests/test_transfertensor.py b/qutip/tests/test_transfertensor.py deleted file mode 100644 index c736ef04ac..0000000000 --- a/qutip/tests/test_transfertensor.py +++ /dev/null @@ -1,45 +0,0 @@ -import pytest -import qutip as qt -import numpy as np -from qutip.solve.nonmarkov.transfertensor import ttmsolve - - -def test_ttmsolve_jc_model(): - """ - Checks the output of ttmsolve using an example from Jaynes-Cumming model, - which can also be found in the qutip-notebooks repository. - """ - # Define Hamiltonian and states - N, kappa, g = 3, 1.0, 10 - a = qt.tensor(qt.qeye(2), qt.destroy(N)) - sm = qt.tensor(qt.sigmam(), qt.qeye(N)) - sz = qt.tensor(qt.sigmaz(), qt.qeye(N)) - H = g * (a.dag() * sm + a * sm.dag()) - c_ops = [np.sqrt(kappa) * a] - # identity superoperator - Id = qt.tensor(qt.qeye(2), qt.qeye(N)) - E0 = qt.sprepost(Id, Id) - # partial trace superoperator - ptracesuper = qt.tensor_contract(E0, (1, N)) - # initial states - rho0a = qt.ket2dm(qt.basis(2, 0)) - psi0c = qt.basis(N, 0) - rho0c = qt.ket2dm(psi0c) - rho0 = qt.tensor(rho0a, rho0c) - superrho0cav = qt.sprepost(qt.tensor(qt.qeye(2), psi0c), - qt.tensor(qt.qeye(2), psi0c.dag())) - - # calculate exact solution using mesolve - times = np.arange(0, 5.0, 0.1) - exactsol = qt.mesolve(H, rho0, times, c_ops, []) - exact_z = qt.expect(sz, exactsol.states) - - # solve using transfer method - learning_times = np.arange(0, 2.0, 0.1) - Et_list = qt.mesolve(H, E0, learning_times, c_ops, []).states - learning_maps = [ptracesuper * (Et * superrho0cav) for Et in Et_list] - ttmsol = ttmsolve(learning_maps, rho0a, times) - ttm_z = qt.expect(qt.sigmaz(), ttmsol.states) - - # check that ttm result and exact solution are close in the learning times - assert np.allclose(ttm_z, exact_z, atol=1e-5) diff --git a/qutip/tests/test_visualization.py b/qutip/tests/test_visualization.py new file mode 100644 index 0000000000..b4c2a7e365 --- /dev/null +++ b/qutip/tests/test_visualization.py @@ -0,0 +1,640 @@ +import pytest +import qutip +import numpy as np +import matplotlib as mpl +import matplotlib.pyplot as plt +from scipy.special import sph_harm + + +def test_cyclic(): + qutip.settings.colorblind_safe = True + rho = qutip.rand_dm(5) + + fig, ax = qutip.hinton(rho, color_style='phase') + plt.close() + + qutip.settings.colorblind_safe = False + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_diverging(): + qutip.settings.colorblind_safe = True + rho = qutip.rand_dm(5) + + fig, ax = qutip.hinton(rho) + plt.close() + + qutip.settings.colorblind_safe = False + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_sequential(): + qutip.settings.colorblind_safe = True + theta = np.linspace(0, np.pi, 90) + phi = np.linspace(0, 2 * np.pi, 60) + phi_mesh, theta_mesh = np.meshgrid(phi, theta) + values = sph_harm(-1, 2, phi_mesh, theta_mesh).T + fig, ax = qutip.sphereplot(values, theta, phi) + plt.close() + + qutip.settings.colorblind_safe = False + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +@pytest.mark.parametrize('f, a, projection', [ + (True, True, '2d'), + (True, True, '3d'), + (True, False, '2d'), + (True, False, '3d'), + (False, True, '2d'), + (False, True, '3d'), + (False, False, '2d'), + (False, False, '3d'), +]) +def test_is_fig_and_ax(f, a, projection): + rho = qutip.rand_dm(5) + + fig = plt.figure() + ax = None + if a: + if projection == '2d': + ax = fig.add_subplot(111) + else: + ax = fig.add_subplot(111, projection='3d') + if not f: + fig = None + + fig, ax = qutip.plot_wigner(rho, projection=projection, + fig=fig, ax=ax) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_set_ticklabels(): + rho = qutip.rand_dm(5) + text = "got 1 ticklabels but needed 5" + + with pytest.raises(Exception) as exc_info: + fig, ax = qutip.hinton(rho, x_basis=[1]) + assert str(exc_info.value) == text + + +def test_equal_shape(): + rhos = [qutip.rand_dm(5)]*2 + [qutip.rand_dm(4)] + text = "All inputs should have the same shape." + + with pytest.raises(Exception) as exc_info: + fig, ax = qutip.hinton(rhos) + assert str(exc_info.value) == text + + +@pytest.mark.parametrize('args', [ + ({'reflections': True}), + ({'cmap': mpl.cm.cividis}), + ({'colorbar': False}), +]) +def test_plot_wigner_sphere(args): + psi = qutip.rand_ket(5) + wigner = qutip.wigner_transform(psi, 2, False, 50, ["x"]) + + fig, ax = qutip.plot_wigner_sphere(wigner, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_plot_wigner_sphere_anim(): + psi = qutip.rand_ket(5) + wigner = qutip.wigner_transform(psi, 2, False, 50, ["x"]) + + fig, ani = qutip.plot_wigner_sphere([wigner]*2) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def to_oper_bra(oper): + return qutip.operator_to_vector(oper).dag() + + +def to_oper(oper): + return oper + + +@pytest.mark.parametrize('transform, args', [ + (to_oper, {}), + (qutip.operator_to_vector, {}), + (to_oper_bra, {}), + (qutip.spre, {}), + (to_oper, {'x_basis': [0, 1, 2, 3]}), + (to_oper, {'y_basis': [0, 1, 2, 3]}), + (to_oper, {'color_style': 'threshold'}), + (to_oper, {'color_style': 'phase'}), + (to_oper, {'colorbar': False}), +]) +def test_hinton(transform, args): + rho = transform(qutip.rand_dm(4)) + + fig, ax = qutip.hinton(rho, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_hinton1(): + fig, ax = qutip.hinton(np.zeros((3, 3))) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_hinton_anim(): + rho = qutip.rand_dm(5) + rhos = [rho]*2 + + fig, ani = qutip.hinton(rhos) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_hinton_ValueError0(): + text = "Input quantum object must be an operator or superoperator." + rho = qutip.basis(2, 0) + + with pytest.raises(ValueError) as exc_info: + fig, ax = qutip.hinton(rho) + assert str(exc_info.value) == text + + +@pytest.mark.parametrize('transform, args, error_message', [ + (to_oper, {'color_style': 'color_style'}, + "Unknown color style color_style for Hinton diagrams."), + (qutip.spre, {}, + "Hinton plots of superoperators are currently only supported for qubits.") +]) +def test_hinton_ValueError1(transform, args, error_message): + rho = transform(qutip.rand_dm(5)) + + with pytest.raises(ValueError) as exc_info: + fig, ax = qutip.hinton(rho, **args) + assert str(exc_info.value) == error_message + + +@pytest.mark.parametrize('args', [ + ({'cmap': mpl.cm.cividis}), + ({'colorbar': False}), +]) +def test_sphereplot(args): + theta = np.linspace(0, np.pi, 90) + phi = np.linspace(0, 2 * np.pi, 60) + phi_mesh, theta_mesh = np.meshgrid(phi, theta) + values = sph_harm(-1, 2, phi_mesh, theta_mesh).T + fig, ax = qutip.sphereplot(values, theta, phi, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_sphereplot_anim(): + theta = np.linspace(0, np.pi, 90) + phi = np.linspace(0, 2 * np.pi, 60) + phi_mesh, theta_mesh = np.meshgrid(phi, theta) + values = sph_harm(-1, 2, phi_mesh, theta_mesh).T + fig, ani = qutip.sphereplot([values]*2, theta, phi) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +@pytest.mark.parametrize('response', [ + ('normal'), + ('error') +]) +def test_update_yaxis(response): + if response == 'normal': + fig, ax = qutip.matrix_histogram(np.zeros((3, 3))) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + else: + text = "got 1 ylabels but needed 5" + + with pytest.raises(ValueError) as exc_info: + fig, ax = qutip.matrix_histogram(qutip.rand_dm(5), + y_basis=[1]) + + assert str(exc_info.value) == text + + +@pytest.mark.parametrize('response', [ + ('normal'), + ('error') +]) +def test_update_xaxis(response): + if response == 'normal': + fig, ax = qutip.matrix_histogram(np.zeros((3, 3))) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + else: + text = "got 1 xlabels but needed 5" + + with pytest.raises(ValueError) as exc_info: + fig, ax = qutip.matrix_histogram(qutip.rand_dm(5), + x_basis=[1]) + assert str(exc_info.value) == text + + +def test_get_matrix_components(): + text = "got an unexpected argument, error for bar_style" + + with pytest.raises(ValueError) as exc_info: + fig, ax = qutip.matrix_histogram(qutip.rand_dm(5), + bar_style='error') + assert str(exc_info.value) == text + + +@pytest.mark.parametrize('args', [ + ({'options': {'stick': True, 'azim': 45}}), + ({'options': {'stick': True, 'azim': 135}}), + ({'options': {'stick': True, 'azim': 225}}), + ({'options': {'stick': True, 'azim': 315}}), +]) +def test_stick_to_planes(args): + rho = qutip.rand_dm(5) + + fig, ax = qutip.matrix_histogram(rho, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +@pytest.mark.parametrize('args', [ + ({}), + ({'options': {'zticks': [1]}}), + ({'x_basis': [1, 2, 3, 4, 5]}), + ({'y_basis': [1, 2, 3, 4, 5]}), + ({'limits': [0, 1]}), + ({'color_limits': [0, 1]}), + ({'color_style': 'phase'}), + ({'options': {'threshold': 0.1}}), + ({'color_style': 'real', 'colorbar': True}), + ({'color_style': 'img', 'colorbar': True}), + ({'color_style': 'abs', 'colorbar': True}), + ({'color_style': 'phase', 'colorbar': True}), + ({'color_limits': [0, 1], 'color_style': 'phase', 'colorbar': True}) +]) +def test_matrix_histogram(args): + rho = qutip.rand_dm(5) + + fig, ax = qutip.matrix_histogram(rho, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_matrix_histogram_zeros(): + rho = qutip.Qobj([[0, 0], [0, 0]]) + + fig, ax = qutip.matrix_histogram(rho) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_matrix_histogram_anim(): + rho = qutip.rand_dm(5) + rhos = [rho]*2 + + fig, ani = qutip.matrix_histogram(rhos) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +@pytest.mark.parametrize('args, expected', [ + ({'options': 'error'}, ("options must be a dictionary")), + ({'options': {'e1': '1', 'e2': '2'}}, + ("invalid key(s) found in options: e1, e2", + "invalid key(s) found in options: e2, e1")), +]) +def test_matrix_histogram_ValueError(args, expected): + + with pytest.raises(ValueError) as exc_info: + fig, ax = qutip.matrix_histogram(qutip.rand_dm(5), + **args) + assert str(exc_info.value) in expected + + +@pytest.mark.parametrize('args', [ + ({'h_labels': ['H0', 'H0+Hint']}), + ({'energy_levels': [-2, 0, 2]}), +]) +def test_plot_energy_levels(args): + H0 = qutip.tensor(qutip.sigmaz(), qutip.identity(2)) + \ + qutip.tensor(qutip.identity(2), qutip.sigmaz()) + Hint = 0.1 * qutip.tensor(qutip.sigmax(), qutip.sigmax()) + + fig, ax = qutip.plot_energy_levels([H0, Hint], **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_plot_energy_levels_ValueError(): + with pytest.raises(ValueError) as exc_info: + fig, ax = qutip.plot_energy_levels(1) + assert str(exc_info.value) == "H_list must be a list of Qobj instances" + + +@pytest.mark.parametrize('rho_type, args', [ + ('oper', {}), + ('ket', {}), + ('oper', {'fock_numbers': [0, 1, 2, 3]}), + ('oper', {'unit_y_range': False}), +]) +def test_plot_fock_distribution(rho_type, args): + if rho_type == 'oper': + rho = qutip.rand_dm(4) + else: + rho = qutip.basis(2, 0) + + fig, ax = qutip.plot_fock_distribution(rho, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_plot_fock_distribution_anim(): + rho = qutip.rand_dm(5) + rhos = [rho]*2 + + fig, ani = qutip.plot_fock_distribution(rhos) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +@pytest.mark.parametrize('rho_type, args', [ + ('oper', {}), + ('ket', {}), + ('oper', {'xvec': np.linspace(-1, 1, 100)}), + ('oper', {'yvec': np.linspace(-1, 1, 100)}), + ('oper', {'method': 'fft'}), + ('oper', {'projection': '3d'}), + ('oper', {'colorbar': True}) +]) +def test_plot_wigner(rho_type, args): + if rho_type == 'oper': + rho = qutip.rand_dm(4) + else: + rho = qutip.basis(2, 0) + + fig, ax = qutip.plot_wigner(rho, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_plot_wigner_anim(): + rho = qutip.rand_dm(5) + rhos = [rho]*2 + + fig, ani = qutip.plot_wigner(rhos) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_plot_wigner_ValueError(): + text = "Unexpected value of projection keyword argument" + with pytest.raises(ValueError) as exc_info: + rho = qutip.rand_dm(4) + + fig, ax = qutip.plot_wigner(rho, projection=1) + assert str(exc_info.value) == text + + +@pytest.mark.parametrize('n_of_results, n_of_e_ops, one_axes, args', [ + (1, 3, False, {}), + (1, 3, False, {'ylabels': [1, 2, 3]}), + (1, 1, True, {}), + (2, 3, False, {}), +]) +def test_plot_expectation_values(n_of_results, n_of_e_ops, one_axes, args): + H = qutip.sigmaz() + 0.3 * qutip.sigmay() + e_ops = [qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()] + times = np.linspace(0, 10, 100) + psi0 = (qutip.basis(2, 0) + qutip.basis(2, 1)).unit() + result = qutip.mesolve(H, psi0, times, [], e_ops[:n_of_e_ops]) + + if n_of_results == 1: + results = result + else: + results = [result, result] + + if one_axes: + fig = plt.figure() + axes = fig.add_subplot(111) + else: + fig = None + axes = None + + fig, axes = qutip.plot_expectation_values(results, **args, + fig=fig, axes=axes) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(axes, np.ndarray) + + +@pytest.mark.filterwarnings( + "ignore:The input coordinates to pcolor:UserWarning" +) +@pytest.mark.parametrize('color, args', [ + ('sequential', {}), + ('diverging', {}), + ('sequential', {'projection': '3d'}), + ('sequential', {'colorbar': True}) +]) +def test_plot_spin_distribution(color, args): + j = 5 + psi = qutip.spin_coherent(j, np.random.rand() * np.pi, + np.random.rand() * 2 * np.pi) + theta = np.linspace(0, np.pi, 50) + phi = np.linspace(0, 2 * np.pi, 50) + Q, THETA, PHI = qutip.spin_q_function(psi, theta, phi) + if color == 'diverging': + Q *= -1e12 + Q[0, 0] = -1e13 + + fig, ax = qutip.plot_spin_distribution(Q, THETA, PHI, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +@pytest.mark.filterwarnings( + "ignore:The input coordinates to pcolor:UserWarning" +) +def test_plot_spin_distribution_anim(): + j = 5 + psi = qutip.spin_coherent(j, np.random.rand() * np.pi, + np.random.rand() * 2 * np.pi) + theta = np.linspace(0, np.pi, 50) + phi = np.linspace(0, 2 * np.pi, 50) + Q, THETA, PHI = qutip.spin_q_function(psi, theta, phi) + + fig, ani = qutip.plot_spin_distribution([Q]*2, THETA, PHI) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_plot_spin_distribution_ValueError(): + text = "Unexpected value of projection keyword argument" + j = 5 + psi = qutip.spin_coherent(j, np.random.rand() * np.pi, + np.random.rand() * 2 * np.pi) + theta = np.linspace(0, np.pi, 50) + phi = np.linspace(0, 2 * np.pi, 50) + Q, THETA, PHI = qutip.spin_q_function(psi, theta, phi) + + with pytest.raises(ValueError) as exc_info: + fig, ax = qutip.plot_spin_distribution(Q, THETA, PHI, projection=1) + assert str(exc_info.value) == text + + +@pytest.mark.parametrize('args', [ + ({}), + ({'theme': 'dark'}), +]) +def test_complex_array_to_rgb(args): + Y = qutip.complex_array_to_rgb(np.zeros((3, 3)), **args) + plt.close() + + assert isinstance(Y, np.ndarray) + + +@pytest.mark.parametrize('dims, args', [ + (2, {}), + (3, {}), + (2, {'how': 'pairs'}), + (2, {'how': 'pairs_skewed'}), + (2, {'how': 'before_after'}), + (2, {'legend_iteration': 'all'}), + (2, {'legend_iteration': 'grid_iteration'}), + (2, {'legend_iteration': 1, 'how': 'before_after'}), + (2, {'legend_iteration': 1, 'how': 'pairs'}), +]) +def test_plot_qubism(dims, args): + if dims == 2: + state = qutip.ket("01") + else: + state = qutip.ket("010") + + fig, ax = qutip.plot_qubism(state, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_plot_qubism_anim(): + state = qutip.ket("01") + + fig, ani = qutip.plot_qubism([state]*2) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +@pytest.mark.parametrize('ket, args, expected', [ + (False, {}, "Qubism works only for pure states, i.e. kets."), + (True, {'how': 'error'}, "No such 'how'."), + (True, {'legend_iteration': 'error'}, "No such option for " + + "legend_iteration keyword argument. " + + "Use 'all', 'grid_iteration' or an integer."), +]) +def test_plot_qubism_Error(ket, args, expected): + if ket: + state = qutip.ket("01") + else: + state = qutip.bra("01") + + with pytest.raises(Exception) as exc_info: + fig, ax = qutip.plot_qubism(state, **args) + assert str(exc_info.value) == expected + + +def test_plot_qubism_dimension(): + text = "For 'pairs_skewed' pairs of dimensions need to be the same." + + ket = qutip.basis(3) & qutip.basis(2) + + with pytest.raises(Exception) as exc_info: + qutip.plot_qubism(ket, how='pairs_skewed') + assert str(exc_info.value) == text + + +@pytest.mark.parametrize('args', [ + ({'splitting': None}), + ({'labels_iteration': 1}), +]) +def test_plot_schmidt(args): + state = qutip.ket("01") + + fig, ax = qutip.plot_schmidt(state, **args) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ax, mpl.axes.Axes) + + +def test_plot_schmidt_anim(): + state = qutip.ket("01") + + fig, ani = qutip.plot_schmidt([state]*2) + plt.close() + + assert isinstance(fig, mpl.figure.Figure) + assert isinstance(ani, mpl.animation.ArtistAnimation) + + +def test_plot_schmidt_Error(): + state = qutip.bra("01") + text = "Schmidt plot works only for pure states, i.e. kets." + + with pytest.raises(Exception) as exc_info: + fig, ax = qutip.plot_schmidt(state) + assert str(exc_info.value) == text diff --git a/qutip/tests/test_wigner.py b/qutip/tests/test_wigner.py index 78c5714845..7087ca18d8 100644 --- a/qutip/tests/test_wigner.py +++ b/qutip/tests/test_wigner.py @@ -3,8 +3,7 @@ import itertools from scipy.special import laguerre from numpy.random import rand -from numpy.testing import assert_, run_module_suite, assert_equal, \ - assert_almost_equal, assert_allclose +from numpy.testing import assert_equal, assert_almost_equal, assert_allclose import qutip from qutip.core.states import coherent, fock, ket, bell_state @@ -226,7 +225,7 @@ def test_against_naive_implementation(self, xs, ys, g, size): naive = np.empty(alphas.shape, dtype=np.float64) for i, alpha in enumerate(alphas.flat): coh = qutip.coherent(size, alpha, method='analytic').full() - naive.flat[i] = (coh.conj().T @ state_np @ coh).real + naive.flat[i] = (coh.conj().T @ state_np @ coh).real[0, 0] naive *= (0.5*g)**2 / np.pi np.testing.assert_allclose(naive, qutip.qfunc(state, xs, ys, g)) np.testing.assert_allclose(naive, qutip.QFunc(xs, ys, g)(state)) @@ -264,7 +263,7 @@ def test_wigner_bell1_su2parity(): wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray) - assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11) + assert (np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11) @pytest.mark.slow @@ -283,7 +282,7 @@ def test_wigner_bell4_su2parity(): wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray) - assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11) + assert (np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11) @pytest.mark.slow @@ -302,17 +301,17 @@ def test_wigner_bell4_fullparity(): wigner_analyt[t, p] = -0.30901699 wigner_theo = wigner_transform(psi, 0.5, True, steps, slicearray) - assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-4) + assert (np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-4) def test_parity(): """wigner: testing the parity function. """ j = 0.5 - assert_(_parity(2, j)[0, 0] - (1 - np.sqrt(3)) / 2. < 1e-11) - assert_(_parity(2, j)[0, 1] < 1e-11) - assert_(_parity(2, j)[1, 1] - (1 + np.sqrt(3)) / 2. < 1e-11) - assert_(_parity(2, j)[1, 0] < 1e-11) + assert (_parity(2, j)[0, 0] - (1 - np.sqrt(3)) / 2. < 1e-11) + assert (_parity(2, j)[0, 1] < 1e-11) + assert (_parity(2, j)[1, 1] - (1 + np.sqrt(3)) / 2. < 1e-11) + assert (_parity(2, j)[1, 0] < 1e-11) @pytest.mark.slow @@ -334,7 +333,7 @@ def test_wigner_pure_su2(): wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray) - assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11) + assert (np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11) @pytest.mark.slow @@ -378,7 +377,7 @@ def test_wigner_ghz_su2parity(): wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray) - assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11) + assert (np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11) @pytest.mark.slow @@ -398,10 +397,10 @@ def test_angle_slicing(): wigner3 = wigner_transform(psi3, j, False, steps, ['l', 'x']) wigner4 = wigner_transform(psi4, j, False, steps, ['l', 'y']) - assert_(np.sum(np.abs(wigner2 - wigner1)) < 1e-11) - assert_(np.sum(np.abs(wigner3 - wigner2)) < 1e-11) - assert_(np.sum(np.abs(wigner4 - wigner3)) < 1e-11) - assert_(np.sum(np.abs(wigner4 - wigner1)) < 1e-11) + assert (np.sum(np.abs(wigner2 - wigner1)) < 1e-11) + assert (np.sum(np.abs(wigner3 - wigner2)) < 1e-11) + assert (np.sum(np.abs(wigner4 - wigner3)) < 1e-11) + assert (np.sum(np.abs(wigner4 - wigner1)) < 1e-11) def test_wigner_coherent(): @@ -426,13 +425,13 @@ def test_wigner_coherent(): W_analytic = 2 / np.pi * np.exp(-2 * abs(a - beta) ** 2) # check difference - assert_(np.sum(abs(W_qutip - W_analytic) ** 2) < 1e-4) - assert_(np.sum(abs(W_qutip_cl - W_analytic) ** 2) < 1e-4) + assert (np.sum(abs(W_qutip - W_analytic) ** 2) < 1e-4) + assert (np.sum(abs(W_qutip_cl - W_analytic) ** 2) < 1e-4) # check normalization - assert_(np.sum(W_qutip) * dx * dy - 1.0 < 1e-8) - assert_(np.sum(W_qutip_cl) * dx * dy - 1.0 < 1e-8) - assert_(np.sum(W_analytic) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip_cl) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_analytic) * dx * dy - 1.0 < 1e-8) def test_wigner_fock(): @@ -462,15 +461,15 @@ def test_wigner_fock(): np.exp(-2 * abs(a) ** 2) * np.polyval(laguerre(n), 4 * abs(a) ** 2) # check difference - assert_(np.sum(abs(W_qutip - W_analytic)) < 1e-4) - assert_(np.sum(abs(W_qutip_cl - W_analytic)) < 1e-4) - assert_(np.sum(abs(W_qutip_sparse - W_analytic)) < 1e-4) + assert (np.sum(abs(W_qutip - W_analytic)) < 1e-4) + assert (np.sum(abs(W_qutip_cl - W_analytic)) < 1e-4) + assert (np.sum(abs(W_qutip_sparse - W_analytic)) < 1e-4) # check normalization - assert_(np.sum(W_qutip) * dx * dy - 1.0 < 1e-8) - assert_(np.sum(W_qutip_cl) * dx * dy - 1.0 < 1e-8) - assert_(np.sum(W_qutip_sparse) * dx * dy - 1.0 < 1e-8) - assert_(np.sum(W_analytic) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip_cl) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip_sparse) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_analytic) * dx * dy - 1.0 < 1e-8) def test_wigner_compare_methods_dm(): @@ -498,11 +497,11 @@ def test_wigner_compare_methods_dm(): W_qutip2 = wigner(rho, xvec, yvec, g=2, method='laguerre') # check difference - assert_(np.sum(abs(W_qutip1 - W_qutip1)) < 1e-4) + assert (np.sum(abs(W_qutip1 - W_qutip1)) < 1e-4) # check normalization - assert_(np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8) - assert_(np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8) def test_wigner_compare_methods_ket(): @@ -530,11 +529,11 @@ def test_wigner_compare_methods_ket(): W_qutip2 = wigner(psi, xvec, yvec, g=2, sparse=True) # check difference - assert_(np.sum(abs(W_qutip1 - W_qutip2)) < 1e-4) + assert (np.sum(abs(W_qutip1 - W_qutip2)) < 1e-4) # check normalization - assert_(np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8) - assert_(np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8) + assert (np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8) def test_wigner_fft_comparse_ket(): @@ -688,6 +687,3 @@ def test_spin_wigner_overlap(spin, pure, n=5): W_overlap = np.trapz( np.trapz(W_state * W * np.sin(THETA), theta), phi).real assert_almost_equal(W_overlap, state_overlap, decimal=4) - -if __name__ == "__main__": - run_module_suite() diff --git a/qutip/three_level_atom.py b/qutip/three_level_atom.py deleted file mode 100644 index eee506ef56..0000000000 --- a/qutip/three_level_atom.py +++ /dev/null @@ -1,74 +0,0 @@ -r''' -This module provides functions that are useful for simulating the -three level atom with QuTiP. A three level atom (qutrit) has three states, -which are linked by dipole transitions so that 1 <-> 2 <-> 3. -Depending on there relative energies they are in the ladder, lambda or -vee configuration. The structure of the relevant operators is the same -for any of the three configurations:: - - Ladder: Lambda: Vee: - |two> |three> - -------|three> ------- ------- - | / \ |one> / - | / \ ------- / - | / \ \ / - -------|two> / \ \ / - | / \ \ / - | / \ \ / - | / -------- \ / - -------|one> ------- |three> ------- - |one> |two> - -References ----------- -The naming of qutip operators follows the convention in [1]_ . - -.. [1] Shore, B. W., "The Theory of Coherent Atomic Excitation", - Wiley, 1990. - -Notes ------ -Contributed by Markus Baden, Oct. 07, 2011 - -''' - -__all__ = ['three_level_basis', 'three_level_ops'] - -import numpy as np -from . import qutrit_basis - - -def three_level_basis(): - ''' Basis states for a three level atom. - - Returns - ------- - states : np.array - :obj:`numpy.ndarray` of three level atom basis vectors. - ''' - # A three level atom has the same representation as a qutrit, i.e. - # three states - return qutrit_basis() - - -def three_level_ops(): - """ - Operators for a three level system (qutrit) - - Returns - ------- - ops : np.array - :obj:`numpy.ndarray` of three level operators. - """ - out = np.empty((5,), dtype=object) - one, two, three = qutrit_basis() - # Note that the three level operators are different - # from the qutrit operators. A three level atom only - # has transitions 1 <-> 2 <-> 3, so we define the - # operators seperately from the qutrit code - out[0] = one * one.dag() - out[1] = two * two.dag() - out[2] = three * three.dag() - out[3] = one * two.dag() - out[4] = three * two.dag() - return out diff --git a/qutip/tomography.py b/qutip/tomography.py index a663ee30b4..ebff6a0c68 100644 --- a/qutip/tomography.py +++ b/qutip/tomography.py @@ -3,7 +3,8 @@ from numpy import hstack, real, imag import scipy.linalg as la from . import tensor, spre, spost, stack_columns, unstack_columns -from .visualization import matrix_histogram, matrix_histogram_complex +from .visualization import matrix_histogram +import itertools try: import matplotlib.pyplot as plt @@ -11,7 +12,7 @@ pass -def _index_permutations(size_list, perm=[]): +def _index_permutations(size_list): """ Generate a list with all index permutations. @@ -19,22 +20,14 @@ def _index_permutations(size_list, perm=[]): ---------- size_list : list A list that contains the sizes for each composite system. - perm : list - A list of permutations Returns ------- perm_idx : list List containing index permutations. - """ - if len(size_list) == 0: - yield perm - else: - for n in range(size_list[0]): - for ip in _index_permutations(size_list[1:], perm + [n]): - yield ip + return itertools.product(*[range(N) for N in size_list]) def qpt_plot(chi, lbls_list, title=None, fig=None, axes=None): @@ -78,10 +71,11 @@ def qpt_plot(chi, lbls_list, title=None, fig=None, axes=None): xlabels.append("".join([lbls_list[k][inds[k]] for k in range(len(lbls_list))])) - matrix_histogram(real(chi), xlabels, xlabels, - title=r"real($\chi$)", limits=[-1, 1], ax=axes[0]) - matrix_histogram(imag(chi), xlabels, xlabels, - title=r"imag($\chi$)", limits=[-1, 1], ax=axes[1]) + matrix_histogram(real(chi), xlabels, xlabels, limits=[-1, 1], ax=axes[0]) + axes[0].set_title(r"real($\chi$)") + + matrix_histogram(imag(chi), xlabels, xlabels, limits=[-1, 1], ax=axes[1]) + axes[1].set_title(r"imag($\chi$)") if title and fig: fig.suptitle(title) @@ -139,8 +133,10 @@ def qpt_plot_combined(chi, lbls_list, title=None, if not title: title = r"$\chi$" - matrix_histogram_complex(chi, xlabels, xlabels, title=title, ax=ax, - threshold=threshold) + matrix_histogram(chi, xlabels, xlabels, bar_style='abs', + color_style='phase', + options={'threshold': threshold}, ax=ax) + ax.set_title(title) return fig, ax diff --git a/qutip/ui/progressbar.py b/qutip/ui/progressbar.py index c5a6829ef5..1999d0bf8f 100644 --- a/qutip/ui/progressbar.py +++ b/qutip/ui/progressbar.py @@ -1,10 +1,11 @@ __all__ = ['BaseProgressBar', 'TextProgressBar', 'EnhancedTextProgressBar', 'TqdmProgressBar', - 'progess_bars'] + 'HTMLProgressBar', 'progress_bars'] import time import datetime import sys +from qutip import settings class BaseProgressBar(object): @@ -16,16 +17,16 @@ class BaseProgressBar(object): n_vec = linspace(0, 10, 100) pbar = TextProgressBar(len(n_vec)) for n in n_vec: - pbar.update(n) + pbar.update() compute_with_n(n) pbar.finished() """ - def __init__(self, iterations=0, chunk_size=10): - pass + def __init__(self, iterations=0, chunk_size=10, **kwargs): + self._start(iterations, chunk_size) - def start(self, iterations, chunk_size=10, **kwargs): + def _start(self, iterations, chunk_size=10, **kwargs): self.N = float(iterations) self.n = 0 self.p_chunk_size = chunk_size @@ -33,7 +34,7 @@ def start(self, iterations, chunk_size=10, **kwargs): self.t_start = time.time() self.t_done = self.t_start - 1 - def update(self, n=None): + def update(self): pass def total_time(self): @@ -63,14 +64,10 @@ class TextProgressBar(BaseProgressBar): A simple text-based progress bar. """ - def __init__(self, iterations=0, chunk_size=10): - pass - # super(TextProgressBar, self).start(iterations, chunk_size) - - def start(self, iterations, chunk_size=10, **kwargs): - super(TextProgressBar, self).start(iterations, chunk_size) + def __init__(self, iterations=0, chunk_size=10, **kwargs): + super()._start(iterations, chunk_size) - def update(self, n=None): + def update(self): self.n += 1 n = self.n p = (n / self.N) * 100.0 @@ -91,16 +88,12 @@ class EnhancedTextProgressBar(BaseProgressBar): An enhanced text-based progress bar. """ - def __init__(self, iterations=0, chunk_size=10): - pass - # super(EnhancedTextProgressBar, self).start(iterations, chunk_size) - - def start(self, iterations, chunk_size=10, **kwargs): - super(EnhancedTextProgressBar, self).start(iterations, chunk_size) + def __init__(self, iterations=0, chunk_size=10, **kwargs): + super()._start(iterations, chunk_size) self.fill_char = '*' self.width = 25 - def update(self, n=None): + def update(self): self.n += 1 n = self.n percent_done = int(round(n / self.N * 100.0)) @@ -128,16 +121,13 @@ class TqdmProgressBar(BaseProgressBar): A progress bar using tqdm module """ - def __init__(self, iterations=0, chunk_size=10): + def __init__(self, iterations=0, chunk_size=10, **kwargs): from tqdm.auto import tqdm - self.tqdm = tqdm - - def start(self, iterations, **kwargs): - self.pbar = self.tqdm(total=iterations, **kwargs) + self.pbar = tqdm(total=iterations, **kwargs) self.t_start = time.time() self.t_done = self.t_start - 1 - def update(self, n=None): + def update(self): self.pbar.update() def finished(self): @@ -145,7 +135,61 @@ def finished(self): self.t_done = time.time() -progess_bars = { +class HTMLProgressBar(BaseProgressBar): + """ + A simple HTML progress bar for using in IPython notebooks. Based on + IPython ProgressBar demo notebook: + https://github.com/ipython/ipython/tree/master/examples/notebooks + + Example usage: + + n_vec = linspace(0, 10, 100) + pbar = HTMLProgressBar(len(n_vec)) + for n in n_vec: + pbar.update() + compute_with_n(n) + """ + + def __init__(self, iterations=0, chunk_size=1.0, **kwargs): + from IPython.display import HTML, Javascript, display + import uuid + + self.display = display + self.Javascript = Javascript + self.divid = str(uuid.uuid4()) + self.textid = str(uuid.uuid4()) + self.pb = HTML( + '
\n ' + f'
 
\n' + '
\n' + f'

\n' + ) + self.display(self.pb) + super()._start(iterations, chunk_size) + + def update(self): + self.n += 1 + n = self.n + p = (n / self.N) * 100.0 + if p >= self.p_chunk: + lbl = ("Elapsed time: %s. " % self.time_elapsed() + + "Est. remaining time: %s." % self.time_remaining_est(p)) + js_code = ("$('div#%s').width('%i%%');" % (self.divid, p) + + "$('p#%s').text('%s');" % (self.textid, lbl)) + self.display(self.Javascript(js_code)) + self.p_chunk += self.p_chunk_size + + def finished(self): + self.t_done = time.time() + lbl = "Elapsed time: %s" % self.time_elapsed() + js_code = ("$('div#%s').width('%i%%');" % (self.divid, 100.0) + + "$('p#%s').text('%s');" % (self.textid, lbl)) + self.display(self.Javascript(js_code)) + + +progress_bars = { "Enhanced": EnhancedTextProgressBar, "enhanced": EnhancedTextProgressBar, "Text": TextProgressBar, @@ -153,6 +197,9 @@ def finished(self): True: TextProgressBar, "Tqdm": TqdmProgressBar, "tqdm": TqdmProgressBar, + "Html": HTMLProgressBar, + "html": HTMLProgressBar, + "base": BaseProgressBar, "": BaseProgressBar, False: BaseProgressBar, None: BaseProgressBar, diff --git a/qutip/visualization.py b/qutip/visualization.py index 27fe62c60a..32cb6e401c 100644 --- a/qutip/visualization.py +++ b/qutip/visualization.py @@ -3,14 +3,11 @@ visualizations of quantum states and processes. """ -__all__ = ['hinton', 'sphereplot', 'energy_level_diagram', - 'plot_energy_levels', 'fock_distribution', - 'plot_fock_distribution', 'wigner_fock_distribution', - 'plot_wigner_fock_distribution', 'plot_wigner', - 'plot_expectation_values', 'plot_spin_distribution_2d', - 'plot_spin_distribution_3d', 'plot_qubism', 'plot_schmidt', - 'complex_array_to_rgb', 'matrix_histogram', - 'matrix_histogram_complex', 'sphereplot', 'plot_wigner_sphere'] +__all__ = ['plot_wigner_sphere', 'hinton', 'sphereplot', + 'matrix_histogram', 'plot_energy_levels', 'plot_fock_distribution', + 'plot_wigner', 'plot_expectation_values', + 'plot_spin_distribution', 'complex_array_to_rgb', + 'plot_qubism', 'plot_schmidt'] import warnings import itertools as it @@ -30,6 +27,7 @@ try: import matplotlib.pyplot as plt import matplotlib as mpl + import matplotlib.animation as animation from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D @@ -46,80 +44,188 @@ def _axes3D(*args, **kwargs): pass -def plot_wigner_sphere(fig, ax, wigner, reflections): +def _cyclic_cmap(): + if settings.colorblind_safe: + return cm.twilight + else: + return complex_phase_cmap() + + +def _diverging_cmap(): + if settings.colorblind_safe: + return cm.seismic + else: + return cm.RdBu + + +def _sequential_cmap(): + if settings.colorblind_safe: + return cm.cividis + else: + return cm.jet + + +def _is_fig_and_ax(fig, ax, projection='2d'): + if fig is None: + if ax is None: + fig = plt.figure() + if projection == '2d': + ax = fig.add_subplot(1, 1, 1) + else: + ax = _axes3D(fig) + else: + fig = ax.get_figure() + else: + if ax is None: + if projection == '2d': + ax = fig.add_subplot(1, 1, 1) + else: + ax = _axes3D(fig) + + return fig, ax + + +def _set_ticklabels(ax, ticklabels, ticks, axis, fontsize=14): + if len(ticks) != len(ticklabels): + raise ValueError( + f"got {len(ticklabels)} ticklabels but needed {len(ticks)}" + ) + if axis == 'x': + ax.set_xticks(ticks) + ax.set_xticklabels(ticklabels, fontsize=fontsize) + elif axis == 'y': + ax.set_yticks(ticks) + ax.set_yticklabels(ticklabels, fontsize=fontsize) + else: + raise ValueError( + "axis must be either 'x' or 'y'" + ) + + +def _equal_shape(matrices): + first_shape = matrices[0].shape + + text = "All inputs should have the same shape." + if not all(matrix.shape == first_shape for matrix in matrices): + raise ValueError(text) + + +def plot_wigner_sphere(wigner, reflections=False, *, cmap=None, + colorbar=True, fig=None, ax=None): """Plots a coloured Bloch sphere. Parameters ---------- - fig : :obj:`matplotlib.figure.Figure` - An instance of :obj:`~matplotlib.figure.Figure`. - ax : :obj:`matplotlib.axes.Axes` - An axes instance in the given figure. - wigner : list of float + wigner : a wigner transformation The wigner transformation at `steps` different theta and phi. - reflections : bool + + reflections : bool, default=False If the reflections of the sphere should be plotted as well. + cmap : a matplotlib colormap instance, optional + Color map to use when plotting. + + colorbar : bool, default=True + Whether (True) or not (False) a colorbar should be attached. + + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The ax context in which the plot will be drawn. + + Returns + ------- + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. + Notes ----- Special thanks to Russell P Rundle for writing this function. """ + + fig, ax = _is_fig_and_ax(fig, ax, projection='3d') + + if not isinstance(wigner, list): + wigners = [wigner] + else: + wigners = wigner + + _equal_shape(wigners) + + wigner_max = np.real(np.amax(np.abs(wigners[0]))) + for wigner in wigners: + wigner_max = max(np.real(np.amax(np.abs(wigner))), wigner_max) + + norm = mpl.colors.Normalize(-wigner_max, wigner_max) + + if cmap is None: + cmap = _diverging_cmap() + + artist_list = list() + for wigner in wigners: + steps = len(wigner) + theta = np.linspace(0, np.pi, steps) + phi = np.linspace(0, 2 * np.pi, steps) + x = np.outer(np.sin(theta), np.cos(phi)) + y = np.outer(np.sin(theta), np.sin(phi)) + z = np.outer(np.cos(theta), np.ones(steps)) + wigner = np.real(wigner) + + artist = list() + # Plot coloured Bloch sphere: + artist.append(ax.plot_surface(x, y, z, facecolors=cmap(norm(wigner)), + rcount=steps, ccount=steps, linewidth=0, + zorder=0.5, antialiased=None)) + + if reflections: + side_color = cmap(norm(wigner[0:steps, 0:steps])) + + # Plot bottom reflection: + artist.append(ax.plot_surface(x[0:steps, 0:steps], + y[0:steps, 0:steps], + -1.5*np.ones((steps, steps)), + facecolors=side_color, + rcount=steps/2, ccount=steps/2, + linewidth=0, zorder=0.5, + antialiased=False)) + + # Plot side reflection: + artist.append(ax.plot_surface(-1.5*np.ones((steps, steps)), + y[0:steps, 0:steps], + z[0:steps, 0:steps], + facecolors=side_color, + rcount=steps/2, ccount=steps/2, + linewidth=0, zorder=0.5, + antialiased=False)) + + # Plot back reflection: + artist.append(ax.plot_surface(x[0:steps, 0:steps], + 1.5*np.ones((steps, steps)), + z[0:steps, 0:steps], + facecolors=side_color, + rcount=steps/2, ccount=steps/2, + linewidth=0, zorder=0.5, + antialiased=False)) + artist_list.append(artist) + + if len(wigners) == 1: + output = ax + else: + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) + ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") - steps = len(wigner) - - theta = np.linspace(0, np.pi, steps) - phi = np.linspace(0, 2 * np.pi, steps) - x = np.outer(np.sin(theta), np.cos(phi)) - y = np.outer(np.sin(theta), np.sin(phi)) - z = np.outer(np.cos(theta), np.ones(steps)) - wigner = np.real(wigner) - wigner_max = np.real(np.amax(np.abs(wigner))) - - wigner_c1 = cm.seismic_r((wigner + wigner_max) / (2 * wigner_max)) - - # Plot coloured Bloch sphere: - ax.plot_surface(x, y, z, facecolors=wigner_c1, vmin=-wigner_max, - vmax=wigner_max, rcount=steps, ccount=steps, linewidth=0, - zorder=0.5, antialiased=None) - - if reflections: - wigner_c2 = cm.seismic_r((wigner[0:steps, 0:steps]+wigner_max) / - (2*wigner_max)) # bottom - wigner_c3 = cm.seismic_r((wigner[0:steps, 0:steps]+wigner_max) / - (2*wigner_max)) # side - wigner_c4 = cm.seismic_r((wigner[0:steps, 0:steps]+wigner_max) / - (2*wigner_max)) # back - - # Plot bottom reflection: - ax.plot_surface(x[0:steps, 0:steps], y[0:steps, 0:steps], - -1.5*np.ones((steps, steps)), facecolors=wigner_c2, - vmin=-wigner_max, vmax=wigner_max, rcount=steps/2, - ccount=steps/2, linewidth=0, zorder=0.5, - antialiased=False) - - # Plot side reflection: - ax.plot_surface(-1.5*np.ones((steps, steps)), y[0:steps, 0:steps], - z[0:steps, 0:steps], facecolors=wigner_c3, - vmin=-wigner_max, vmax=wigner_max, rcount=steps/2, - ccount=steps/2, linewidth=0, zorder=0.5, - antialiased=False) - - # Plot back reflection: - ax.plot_surface(x[0:steps, 0:steps], 1.5*np.ones((steps, steps)), - z[0:steps, 0:steps], facecolors=wigner_c4, - vmin=-wigner_max, vmax=wigner_max, rcount=steps/2, - ccount=steps/2, linewidth=0, zorder=0.5, - antialiased=False) - # Create colourbar: - m = cm.ScalarMappable(cmap=cm.seismic_r) - m.set_array([-wigner_max, wigner_max]) - plt.colorbar(m, shrink=0.5, aspect=10) + if colorbar: + cax, kw = mpl.colorbar.make_axes(ax, shrink=0.75, pad=.1) + mpl.colorbar.ColorbarBase(cax, norm=norm, cmap=cmap) - plt.show() + return fig, output # Adopted from the SciPy Cookbook. @@ -137,8 +243,7 @@ def _blob(x, y, w, w_max, area, color_fn, ax=None): else: handle = plt - handle.fill(xcorners, ycorners, - color=color_fn(w)) + return handle.fill(xcorners, ycorners, color=color_fn(w)) def _cb_labels(left_dims): @@ -171,35 +276,28 @@ def _cb_labels(left_dims): # Adopted from the SciPy Cookbook. -def hinton(rho, xlabels=None, ylabels=None, title=None, ax=None, cmap=None, - label_top=True, color_style="scaled"): - """Draws a Hinton diagram for visualizing a density matrix or superoperator. +def hinton(rho, x_basis=None, y_basis=None, color_style="scaled", + label_top=True, *, cmap=None, colorbar=True, fig=None, ax=None): + """Draws a Hinton diagram to visualize a density matrix or superoperator. Parameters ---------- rho : qobj Input density matrix or superoperator. - xlabels : list of strings or False - list of x labels + .. note:: - ylabels : list of strings or False - list of y labels + Hinton plots of superoperators are currently only + supported for qubits. - title : string - title of the plot (optional) + x_basis : list of strings, optional + list of x ticklabels to represent x basis of the input. - ax : a matplotlib axes instance - The axes context in which the plot will be drawn. - - cmap : a matplotlib colormap instance - Color map to use when plotting. + y_basis : list of strings, optional + list of y ticklabels to represent y basis of the input. - label_top : bool - If True, x-axis labels will be placed on top, otherwise - they will appear below the plot. + color_style : string, default="scaled" - color_style : string Determines how colors are assigned to each square: - If set to ``"scaled"`` (default), each color is chosen by @@ -212,11 +310,27 @@ def hinton(rho, xlabels=None, ylabels=None, title=None, ax=None, cmap=None, - If set to ``"phase"``, each color is chosen according to the angle of the corresponding matrix element. + label_top : bool, default=True + If True, x ticklabels will be placed on top, otherwise + they will appear below the plot. + + cmap : a matplotlib colormap instance, optional + Color map to use when plotting. + + colorbar : bool, default=True + Whether (True) or not (False) a colorbar should be attached. + + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The ax context in which the plot will be drawn. + Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. Raises ------ @@ -240,81 +354,82 @@ def hinton(rho, xlabels=None, ylabels=None, title=None, ax=None, cmap=None, >>> fig.show() """ - # Apply default colormaps. - # TODO: abstract this away into something that makes default - # colormaps. - cmap = ( - (cm.Greys_r if settings.colorblind_safe else cm.RdBu) - if cmap is None else cmap - ) - - # Extract plotting data W from the input. - if isinstance(rho, Qobj): - if rho.isoper: - W = rho.full() - - # Create default labels if none are given. - if xlabels is None or ylabels is None: + fig, ax = _is_fig_and_ax(fig, ax) + + if not isinstance(rho, list): + rhos = [rho] + else: + rhos = rho + + _equal_shape(rhos) + + Ws = list() + w_max = 0 + for rho in rhos: + # Extract plotting data W from the input. + if isinstance(rho, Qobj): + if rho.isoper or rho.isoperket or rho.isoperbra: + if rho.isoperket: + rho = vector_to_operator(rho) + elif rho.isoperbra: + rho = vector_to_operator(rho.dag()) + W = rho.full() + # Create default labels if none are given. labels = _cb_labels(rho.dims[0]) - xlabels = xlabels if xlabels is not None else list(labels[0]) - ylabels = ylabels if ylabels is not None else list(labels[1]) - - elif rho.isoperket: - W = vector_to_operator(rho).full() - elif rho.isoperbra: - W = vector_to_operator(rho.dag()).full() - elif rho.issuper: - if not isqubitdims(rho.dims): - raise ValueError("Hinton plots of superoperators are " - "currently only supported for qubits.") - # Convert to a superoperator in the Pauli basis, - # so that all the elements are real. - sqobj = _to_superpauli(rho) - nq = int(log2(sqobj.shape[0]) / 2) - W = sqobj.full().T - # Create default labels, too. - if (xlabels is None) or (ylabels is None): + if x_basis is None: + x_basis = list(labels[0]) + if y_basis is None: + y_basis = list(labels[1]) + + elif rho.issuper: + if not isqubitdims(rho.dims): + raise ValueError("Hinton plots of superoperators are " + "currently only supported for qubits.") + # Convert to a superoperator in the Pauli basis, + # so that all the elements are real. + sqobj = _to_superpauli(rho) + nq = int(log2(sqobj.shape[0]) / 2) + W = sqobj.full().T + # Create default labels, too. labels = list(map("".join, it.product("IXYZ", repeat=nq))) - xlabels = xlabels if xlabels is not None else labels - ylabels = ylabels if ylabels is not None else labels - + if x_basis is None: + x_basis = labels + if y_basis is None: + y_basis = labels + + else: + raise ValueError( + "Input quantum object must be " + "an operator or superoperator.") else: - raise ValueError( - "Input quantum object must be an operator or superoperator." - ) + W = rho + Ws.append(W) - else: - W = rho + height, width = W.shape - if ax is None: - fig, ax = plt.subplots(1, 1, figsize=(8, 6)) - else: - fig = None - - if not (xlabels or ylabels): - ax.axis('off') - if title: - ax.set_title(title) - - ax.axis('equal') - ax.set_frame_on(False) - - height, width = W.shape - - w_max = 1.25 * max(abs(np.array(W)).flatten()) - if w_max <= 0.0: - w_max = 1.0 + w_max = max(1.25 * max(abs(np.array(W)).flatten()), w_max) + if w_max <= 0.0: + w_max = 1.0 # Set color_fn here. if color_style == "scaled": + if cmap is None: + cmap = _diverging_cmap() + def color_fn(w): w = np.abs(w) * np.sign(np.real(w)) return cmap(int((w + w_max) * 256 / (2 * w_max))) elif color_style == "threshold": + if cmap is None: + cmap = _diverging_cmap() + def color_fn(w): w = np.real(w) return cmap(255 if w > 0 else 0) elif color_style == "phase": + if cmap is None: + cmap = _cyclic_cmap() + def color_fn(w): return cmap(int(255 * (np.angle(w) / 2 / np.pi + 0.5))) else: @@ -322,106 +437,141 @@ def color_fn(w): "Unknown color style {} for Hinton diagrams.".format(color_style) ) + artist_list = list() ax.fill(array([0, width, width, 0]), array([0, 0, height, height]), color=cmap(128)) - for x in range(width): - for y in range(height): - _x = x + 1 - _y = y + 1 - _blob( - _x - 0.5, height - _y + 0.5, W[y, x], w_max, - min(1, abs(W[y, x]) / w_max), color_fn=color_fn, ax=ax) + for W in Ws: + artist = list() + for x in range(width): + for y in range(height): + _x = x + 1 + _y = y + 1 + artist += _blob(_x - 0.5, height - _y + 0.5, W[y, x], + w_max, min(1, abs(W[y, x]) / w_max), + color_fn=color_fn, ax=ax) + artist_list.append(artist) + + if len(rhos) == 1: + output = ax + else: + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) - # color axis - vmax = np.pi if color_style == "phase" else abs(W).max() - norm = mpl.colors.Normalize(-vmax, vmax) - cax, kw = mpl.colorbar.make_axes(ax, shrink=0.75, pad=.1) - mpl.colorbar.ColorbarBase(cax, norm=norm, cmap=cmap) + # axis + if not (x_basis or y_basis): + ax.axis('off') + ax.axis('equal') + ax.set_frame_on(False) - xtics = 0.5 + np.arange(width) # x axis - ax.xaxis.set_major_locator(plt.FixedLocator(xtics)) - if xlabels: - nxlabels = len(xlabels) - if nxlabels != len(xtics): - raise ValueError(f"got {nxlabels} xlabels but needed {len(xtics)}") - ax.set_xticklabels(xlabels) - if label_top: - ax.xaxis.tick_top() - ax.tick_params(axis='x', labelsize=14) + xticks = 0.5 + np.arange(width) + if x_basis: + _set_ticklabels(ax, x_basis, xticks, 'x') + if label_top: + ax.xaxis.tick_top() # y axis - ytics = 0.5 + np.arange(height) - ax.yaxis.set_major_locator(plt.FixedLocator(ytics)) - if ylabels: - nylabels = len(ylabels) - if nylabels != len(ytics): - raise ValueError(f"got {nylabels} ylabels but needed {len(ytics)}") - ax.set_yticklabels(list(reversed(ylabels))) - ax.tick_params(axis='y', labelsize=14) + yticks = 0.5 + np.arange(height) + if y_basis: + _set_ticklabels(ax, list(reversed(y_basis)), yticks, 'y') - return fig, ax + if colorbar: + vmax = np.pi if color_style == "phase" else w_max + norm = mpl.colors.Normalize(-vmax, vmax) + cax, kw = mpl.colorbar.make_axes(ax, shrink=0.75, pad=.1) + mpl.colorbar.ColorbarBase(cax, norm=norm, cmap=cmap) + return fig, output -def sphereplot(theta, phi, values, fig=None, ax=None, save=False): + +def sphereplot(values, theta, phi, *, + cmap=None, colorbar=True, fig=None, ax=None): """Plots a matrix of values on a sphere Parameters ---------- + values : array + Data set to be plotted + theta : float - Angle with respect to z-axis + Angle with respect to z-axis. Its range is between 0 and pi phi : float - Angle in x-y plane + Angle in x-y plane. Its range is between 0 and 2*pi - values : array - Data set to be plotted + cmap : a matplotlib colormap instance, optional + Color map to use when plotting. + + colorbar : bool, default=True + Whether (True) or not (False) a colorbar should be attached. - fig : a matplotlib Figure instance + fig : a matplotlib Figure instance, optional The Figure canvas in which the plot will be drawn. - ax : a matplotlib axes instance + ax : a matplotlib axes instance, optional The axes context in which the plot will be drawn. - save : bool {False , True} - Whether to save the figure or not - Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. """ - if fig is None or ax is None: - fig = plt.figure() - ax = _axes3D(fig) + fig, ax = _is_fig_and_ax(fig, ax, projection='3d') + + if not isinstance(values, list): + V = [values] + else: + V = values + + _equal_shape(V) + + r_and_ph = list() + min_ph = pi + max_ph = -pi + for values in V: + r = array(abs(values)) + ph = angle(values) + min_ph = min(min_ph, ph.min()) + max_ph = max(max_ph, ph.max()) + r_and_ph.append((r, ph)) + + # normalize color range based on phase angles in list ph + norm = mpl.colors.Normalize(min_ph, max_ph) + + if cmap is None: + cmap = _sequential_cmap() + + # plot with facecolors set to cm.jet colormap normalized to nrm thetam, phim = np.meshgrid(theta, phi) xx = sin(thetam) * cos(phim) yy = sin(thetam) * sin(phim) zz = cos(thetam) - r = array(abs(values)) - ph = angle(values) - # normalize color range based on phase angles in list ph - nrm = mpl.colors.Normalize(ph.min(), ph.max()) + artist_list = list() + for r, ph in r_and_ph: + artist = [ax.plot_surface(r * xx, r * yy, r * zz, rstride=1, cstride=1, + facecolors=cmap(norm(ph)), linewidth=0,)] + artist_list.append(artist) + + if len(V) == 1: + output = ax + else: + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) - # plot with facecolors set to cm.jet colormap normalized to nrm - ax.plot_surface(r * xx, r * yy, r * zz, rstride=1, cstride=1, - facecolors=cm.jet(nrm(ph)), linewidth=0) - # create new axes on plot for colorbar and shrink it a bit. - # pad shifts location of bar with repsect to the main plot - cax, kw = mpl.colorbar.make_axes(ax, shrink=.66, pad=.02) + if colorbar: + # create new axes on plot for colorbar and shrink it a bit. + # pad shifts location of bar with repsect to the main plot + cax, kw = mpl.colorbar.make_axes(ax, shrink=.66, pad=.05) - # create new colorbar in axes cax with cm jet and normalized to nrm like - # our facecolors - cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cm.jet, norm=nrm) - # add our colorbar label - cb1.set_label('Angle') + # create new colorbar in axes cax with cmap and normalized to nrm like + # our facecolors + cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) + # add our colorbar label + cb1.set_label('Angle') - if save: - plt.savefig("sphereplot.png") - - return fig, ax + return fig, output def _remove_margins(axis): @@ -440,22 +590,9 @@ def _get_coord_info_new(renderer): axis._get_coord_info = _get_coord_info_new -def _truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): - """ - truncates portion of a colormap and returns the new one - """ - if isinstance(cmap, str): - cmap = plt.get_cmap(cmap) - new_cmap = mpl.colors.LinearSegmentedColormap.from_list( - 'trunc({n},{a:.2f},{b:.2f})'.format( - n=cmap.name, a=minval, b=maxval), - cmap(np.linspace(minval, maxval, n))) - return new_cmap - - def _stick_to_planes(stick, azim, ax, M, spacing): """adjusts xlim and ylim in way that bars will - Stick to xz and yz planes + stick to xz and yz planes """ if stick is True: azim = azim % 360 @@ -477,8 +614,8 @@ def _update_yaxis(spacing, M, ax, ylabels): """ updates the y-axis """ - ytics = [x + (1 - (spacing / 2)) for x in range(M.shape[1])] - ax.axes.w_yaxis.set_major_locator(plt.FixedLocator(ytics)) + ytics = [y + (1 - (spacing / 2)) for y in range(M.shape[1])] + ax.yaxis.set_major_locator(plt.FixedLocator(ytics)) if ylabels: nylabels = len(ylabels) if nylabels != len(ytics): @@ -495,8 +632,8 @@ def _update_xaxis(spacing, M, ax, xlabels): """ updates the x-axis """ - xtics = [x + (1 - (spacing / 2)) for x in range(M.shape[1])] - ax.axes.w_xaxis.set_major_locator(plt.FixedLocator(xtics)) + xtics = [x + (1 - (spacing / 2)) for x in range(M.shape[0])] + ax.xaxis.set_major_locator(plt.FixedLocator(xtics)) if xlabels: nxlabels = len(xlabels) if nxlabels != len(xtics): @@ -513,14 +650,30 @@ def _update_zaxis(ax, z_min, z_max, zticks): """ updates the z-axis """ - ax.axes.w_zaxis.set_major_locator(plt.IndexLocator(1, 0.5)) + ax.zaxis.set_major_locator(plt.IndexLocator(1, 0.5)) if isinstance(zticks, list): ax.set_zticks(zticks) ax.set_zlim3d([min(z_min, 0), z_max]) -def matrix_histogram(M, xlabels=None, ylabels=None, title=None, limits=None, - colorbar=True, fig=None, ax=None, options=None): +def _get_matrix_components(option, M, argument): + if option == 'real': + return np.real(M.flatten()) + elif option == 'img': + return np.imag(M.flatten()) + elif option == 'abs': + return np.abs(M.flatten()) + elif option == 'phase': + return angle(M.flatten()) + else: + raise ValueError("got an unexpected argument, " + f"{option} for {argument}") + + +def matrix_histogram(M, x_basis=None, y_basis=None, limits=None, + bar_style='real', color_limits=None, color_style='real', + options=None, *, cmap=None, colorbar=True, + fig=None, ax=None): """ Draw a histogram for the matrix M, with the given x and y labels and title. @@ -529,93 +682,107 @@ def matrix_histogram(M, xlabels=None, ylabels=None, title=None, limits=None, M : Matrix of Qobj The matrix to visualize - xlabels : list of strings - list of x labels + x_basis : list of strings, optional + list of x ticklabels - ylabels : list of strings - list of y labels + y_basis : list of strings, optional + list of y ticklabels - title : string - title of the plot (optional) + limits : list/array with two float numbers, optional + The z-axis limits [min, max] - limits : list/array with two float numbers - The z-axis limits [min, max] (optional) + bar_style : string, default="real" - ax : a matplotlib axes instance - The axes context in which the plot will be drawn. + - If set to ``"real"`` (default), each bar is plotted + as the real part of the corresponding matrix element + - If set to ``"img"``, each bar is plotted + as the imaginary part of the corresponding matrix element + - If set to ``"abs"``, each bar is plotted + as the absolute value of the corresponding matrix element + - If set to ``"phase"`` (default), each bar is plotted + as the angle of the corresponding matrix element - colorbar : bool (default: True) + color_limits : list/array with two float numbers, optional + The limits of colorbar [min, max] + + color_style : string, default="real" + Determines how colors are assigned to each square: + + - If set to ``"real"`` (default), each color is chosen + according to the real part of the corresponding matrix element. + - If set to ``"img"``, each color is chosen according to + the imaginary part of the corresponding matrix element. + - If set to ``"abs"``, each color is chosen according to + the absolute value of the corresponding matrix element. + - If set to ``"phase"``, each color is chosen according to + the angle of the corresponding matrix element. + + cmap : a matplotlib colormap instance, optional + Color map to use when plotting. + + colorbar : bool, default=True show colorbar - options : dict + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. + + ax : a matplotlib axes instance, optional + The axes context in which the plot will be drawn. + + options : dict, optional A dictionary containing extra options for the plot. The names (keys) and values of the options are described below: - 'zticks' : list of numbers + 'zticks' : list of numbers, optional A list of z-axis tick locations. - 'cmap' : string (default: 'jet') - The name of the color map to use. - - 'cmap_min' : float (default: 0.0) - The lower bound to truncate the color map at. - A value in range 0 - 1. The default, 0, leaves the lower - bound of the map unchanged. - - 'cmap_max' : float (default: 1.0) - The upper bound to truncate the color map at. - A value in range 0 - 1. The default, 1, leaves the upper - bound of the map unchanged. - - 'bars_spacing' : float (default: 0.1) + 'bars_spacing' : float, default=0.1 spacing between bars. - 'bars_alpha' : float (default: 1.) + 'bars_alpha' : float, default=1. transparency of bars, should be in range 0 - 1 - 'bars_lw' : float (default: 0.5) + 'bars_lw' : float, default=0.5 linewidth of bars' edges. - 'bars_edgecolor' : color (default: 'k') + 'bars_edgecolor' : color, default='k' The colors of the bars' edges. Examples: 'k', (0.1, 0.2, 0.5) or '#0f0f0f80'. - 'shade' : bool (default: True) + 'shade' : bool, default=True Whether to shade the dark sides of the bars (True) or not (False). The shading is relative to plot's source of light. - 'azim' : float + 'azim' : float, default=-35 The azimuthal viewing angle. - 'elev' : float + 'elev' : float, default=35 The elevation viewing angle. - 'proj_type' : string (default: 'ortho' if ax is not passed) - The type of projection ('ortho' or 'persp') - - 'stick' : bool (default: False) + 'stick' : bool, default=False Changes xlim and ylim in such a way that bars next to XZ and YZ planes will stick to those planes. This option has no effect if ``ax`` is passed as a parameter. - 'cbar_pad' : float (default: 0.04) + 'cbar_pad' : float, default=0.04 The fraction of the original axes between the colorbar and the new image axes. (i.e. the padding between the 3D figure and the colorbar). - 'cbar_to_z' : bool (default: False) + 'cbar_to_z' : bool, default=False Whether to set the color of maximum and minimum z-values to the maximum and minimum colors in the colorbar (True) or not (False). - 'figsize' : tuple of two numbers - The size of the figure. + 'threshold': float, optional + Threshold for when bars of smaller height should be transparent. If + not set, all bars are colored according to the color map. - Returns : + Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. Raises ------ @@ -625,17 +792,16 @@ def matrix_histogram(M, xlabels=None, ylabels=None, title=None, limits=None, """ # default options - default_opts = {'figsize': None, 'cmap': 'jet', 'cmap_min': 0., - 'cmap_max': 1., 'zticks': None, 'bars_spacing': 0.2, + default_opts = {'zticks': None, 'bars_spacing': 0.2, 'bars_alpha': 1., 'bars_lw': 0.5, 'bars_edgecolor': 'k', - 'shade': False, 'azim': -35, 'elev': 35, - 'proj_type': 'ortho', 'stick': False, - 'cbar_pad': 0.04, 'cbar_to_z': False} + 'shade': True, 'azim': -35, 'elev': 35, 'stick': False, + 'cbar_pad': 0.04, 'cbar_to_z': False, 'threshold': None} # update default_opts from input options if options is None: - pass - elif isinstance(options, dict): + options = dict() + + if isinstance(options, dict): # check if keys in options dict are valid if set(options) - set(default_opts): raise ValueError("invalid key(s) found in options: " @@ -643,217 +809,155 @@ def matrix_histogram(M, xlabels=None, ylabels=None, title=None, limits=None, else: # updating default options default_opts.update(options) + options = default_opts else: raise ValueError("options must be a dictionary") - if isinstance(M, Qobj): - # extract matrix data from Qobj - M = M.full() - - n = np.size(M) - xpos, ypos = np.meshgrid(range(M.shape[0]), range(M.shape[1])) - xpos = xpos.T.flatten() + 0.5 - ypos = ypos.T.flatten() + 0.5 - zpos = np.zeros(n) - dx = dy = (1 - default_opts['bars_spacing']) * np.ones(n) - dz = np.real(M.flatten()) - - if isinstance(limits, list) and len(limits) == 2: - z_min = limits[0] - z_max = limits[1] - else: - z_min = min(dz) - z_max = max(dz) - if z_min == z_max: - z_min -= 0.1 - z_max += 0.1 - - if default_opts['cbar_to_z']: - norm = mpl.colors.Normalize(min(dz), max(dz)) - else: - norm = mpl.colors.Normalize(z_min, z_max) - cmap = _truncate_colormap(default_opts['cmap'], - default_opts['cmap_min'], - default_opts['cmap_max']) - colors = cmap(norm(dz)) - - if ax is None: - fig = plt.figure(figsize=default_opts['figsize']) - ax = _axes3D(fig, - azim=default_opts['azim'] % 360, - elev=default_opts['elev'] % 360) - ax.set_proj_type(default_opts['proj_type']) - - ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color=colors, - edgecolors=default_opts['bars_edgecolor'], - linewidths=default_opts['bars_lw'], - alpha=default_opts['bars_alpha'], - shade=default_opts['shade']) - # remove vertical lines on xz and yz plane - ax.yaxis._axinfo["grid"]['linewidth'] = 0 - ax.xaxis._axinfo["grid"]['linewidth'] = 0 - - if title: - ax.set_title(title) - - # x axis - _update_xaxis(default_opts['bars_spacing'], M, ax, xlabels) - - # y axis - _update_yaxis(default_opts['bars_spacing'], M, ax, ylabels) - - # z axis - _update_zaxis(ax, z_min, z_max, default_opts['zticks']) - - # stick to xz and yz plane - _stick_to_planes(default_opts['stick'], - default_opts['azim'], ax, M, - default_opts['bars_spacing']) - - # color axis - if colorbar: - cax, kw = mpl.colorbar.make_axes(ax, shrink=.75, - pad=default_opts['cbar_pad']) - mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) + fig, ax = _is_fig_and_ax(fig, ax, projection='3d') - # removing margins - _remove_margins(ax.xaxis) - _remove_margins(ax.yaxis) - _remove_margins(ax.zaxis) + if not isinstance(M, list): + Ms = [M] + else: + Ms = M + + _equal_shape(Ms) + + for i in range(len(Ms)): + M = Ms[i] + if isinstance(M, Qobj): + if x_basis is None: + x_basis = list(_cb_labels([M.shape[0]])[0]) + if y_basis is None: + y_basis = list(_cb_labels([M.shape[1]])[1]) + # extract matrix data from Qobj + M = M.full() + + bar_M = _get_matrix_components(bar_style, M, 'bar_style') + + if isinstance(limits, list) and \ + len(limits) == 2: + z_min = limits[0] + z_max = limits[1] + else: + z_min = min(bar_M) if i == 0 else min(min(bar_M), z_min) + z_max = max(bar_M) if i == 0 else max(max(bar_M), z_max) - return fig, ax + if z_min == z_max: + z_min -= 0.1 + z_max += 0.1 + color_M = _get_matrix_components(color_style, M, 'color_style') -def matrix_histogram_complex(M, xlabels=None, ylabels=None, - title=None, limits=None, phase_limits=None, - colorbar=True, fig=None, ax=None, - threshold=None): - """ - Draw a histogram for the amplitudes of matrix M, using the argument - of each element for coloring the bars, with the given x and y labels - and title. + if isinstance(color_limits, list) and \ + len(color_limits) == 2: + c_min = color_limits[0] + c_max = color_limits[1] + else: + if color_style == 'phase': + c_min = -pi + c_max = pi + else: + c_min = min(color_M) if i == 0 else min(min(color_M), c_min) + c_max = min(color_M) if i == 0 else max(max(color_M), c_max) - Parameters - ---------- - M : Matrix of Qobj - The matrix to visualize + if c_min == c_max: + c_min -= 0.1 + c_max += 0.1 - xlabels : list of strings - list of x labels + norm = mpl.colors.Normalize(c_min, c_max) - ylabels : list of strings - list of y labels + if cmap is None: + # change later + if color_style == 'phase': + cmap = _cyclic_cmap() + else: + cmap = _sequential_cmap() - title : string - title of the plot (optional) + artist_list = list() + for M in Ms: - limits : list/array with two float numbers - The z-axis limits [min, max] (optional) + if isinstance(M, Qobj): + M = M.full() - phase_limits : list/array with two float numbers - The phase-axis (colorbar) limits [min, max] (optional) + bar_M = _get_matrix_components(bar_style, M, 'bar_style') + color_M = _get_matrix_components(color_style, M, 'color_style') - ax : a matplotlib axes instance - The axes context in which the plot will be drawn. + n = np.size(M) + xpos, ypos = np.meshgrid(range(M.shape[0]), range(M.shape[1])) + xpos = xpos.T.flatten() + 0.5 + ypos = ypos.T.flatten() + 0.5 + zpos = np.zeros(n) + dx = dy = (1 - options['bars_spacing']) * np.ones(n) + colors = cmap(norm(color_M)) - threshold: float (None) - Threshold for when bars of smaller height should be transparent. If - not set, all bars are colored according to the color map. + colors[:, 3] = options['bars_alpha'] - Returns - ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + if options['threshold'] is not None: + colors[:, 3] *= 1 * (bar_M >= options['threshold']) - Raises - ------ - ValueError - Input argument is not valid. + idx, = np.where(bar_M < options['threshold']) + bar_M[idx] = 0 - """ + artist = ax.bar3d(xpos, ypos, zpos, dx, dy, bar_M, color=colors, + edgecolors=options['bars_edgecolor'], + linewidths=options['bars_lw'], + shade=options['shade']) + artist_list.append([artist]) - if isinstance(M, Qobj): - # extract matrix data from Qobj - M = M.full() - - n = np.size(M) - xpos, ypos = np.meshgrid(range(M.shape[0]), range(M.shape[1])) - xpos = xpos.T.flatten() - 0.5 - ypos = ypos.T.flatten() - 0.5 - zpos = np.zeros(n) - dx = dy = 0.8 * np.ones(n) - Mvec = M.flatten() - dz = abs(Mvec) - - # make small numbers real, to avoid random colors - idx, = np.where(abs(Mvec) < 0.001) - Mvec[idx] = abs(Mvec[idx]) - - if phase_limits: # check that limits is a list type - phase_min = phase_limits[0] - phase_max = phase_limits[1] + if len(Ms) == 1: + output = ax else: - phase_min = -pi - phase_max = pi - - norm = mpl.colors.Normalize(phase_min, phase_max) - cmap = complex_phase_cmap() - - colors = cmap(norm(angle(Mvec))) - if threshold is not None: - colors[:, 3] = 1 * (dz > threshold) + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) - if ax is None: - fig = plt.figure() - ax = _axes3D(fig, azim=-35, elev=35) - - ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color=colors) - - if title: - ax.set_title(title) + # remove vertical lines on xz and yz plane + ax.yaxis._axinfo["grid"]['linewidth'] = 0 + ax.xaxis._axinfo["grid"]['linewidth'] = 0 # x axis - xtics = -0.5 + np.arange(M.shape[0]) - ax.axes.w_xaxis.set_major_locator(plt.FixedLocator(xtics)) - if xlabels: - nxlabels = len(xlabels) - if nxlabels != len(xtics): - raise ValueError(f"got {nxlabels} xlabels but needed {len(xtics)}") - ax.set_xticklabels(xlabels) - ax.tick_params(axis='x', labelsize=12) + _update_xaxis(options['bars_spacing'], M, ax, x_basis) # y axis - ytics = -0.5 + np.arange(M.shape[1]) - ax.axes.w_yaxis.set_major_locator(plt.FixedLocator(ytics)) - if ylabels: - nylabels = len(ylabels) - if nylabels != len(ytics): - raise ValueError(f"got {nylabels} ylabels but needed {len(ytics)}") - ax.set_yticklabels(ylabels) - ax.tick_params(axis='y', labelsize=12) + _update_yaxis(options['bars_spacing'], M, ax, y_basis) # z axis - if limits and isinstance(limits, list): - ax.set_zlim3d(limits) - else: - ax.set_zlim3d([0, 1]) # use min/max - # ax.set_zlabel('abs') + _update_zaxis(ax, z_min, z_max, options['zticks']) + + # stick to xz and yz plane + _stick_to_planes(options['stick'], + options['azim'], ax, M, + options['bars_spacing']) + ax.view_init(azim=options['azim'], elev=options['elev']) + + # removing margins + _remove_margins(ax.xaxis) + _remove_margins(ax.yaxis) + _remove_margins(ax.zaxis) # color axis if colorbar: - cax, kw = mpl.colorbar.make_axes(ax, shrink=.75, pad=.0) + cax, kw = mpl.colorbar.make_axes(ax, shrink=.75, + pad=options['cbar_pad']) cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) - cb.set_ticks([-pi, -pi / 2, 0, pi / 2, pi]) - cb.set_ticklabels( - (r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$')) - cb.set_label('arg') - return fig, ax + if color_style == 'real': + cb.set_label('real') + elif color_style == 'img': + cb.set_label('imaginary') + elif color_style == 'abs': + cb.set_label('absolute') + else: + cb.set_label('arg') + if color_limits is None: + cb.set_ticks([-pi, -pi / 2, 0, pi / 2, pi]) + cb.set_ticklabels( + (r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$')) + + return fig, output -def plot_energy_levels(H_list, N=0, labels=None, show_ylabels=False, - figsize=(8, 12), fig=None, ax=None): +def plot_energy_levels(H_list, h_labels=None, energy_levels=None, N=0, *, + fig=None, ax=None): """ Plot the energy level diagrams for a list of Hamiltonians. Include up to N energy levels. For each element in H_list, the energy @@ -866,23 +970,20 @@ def plot_energy_levels(H_list, N=0, labels=None, show_ylabels=False, H_list : List of Qobj A list of Hamiltonians. - labels : List of string - A list of labels for each Hamiltonian + h_lables : List of string, optional + A list of xticklabels for each Hamiltonian - show_ylabels : Bool (default False) - Show y labels to the left of energy levels of the initial + energy_levels : List of string, optional + A list of yticklabels to the left of energy levels of the initial Hamiltonian. - N : int + N : int, default=0 The number of energy levels to plot - figsize : tuple (int,int) - The size of the figure (width, height). - - fig : a matplotlib Figure instance + fig : a matplotlib Figure instance, optional The Figure canvas in which the plot will be drawn. - ax : a matplotlib axes instance + ax : a matplotlib axes instance, optional The axes context in which the plot will be drawn. Returns @@ -902,15 +1003,12 @@ def plot_energy_levels(H_list, N=0, labels=None, show_ylabels=False, if not isinstance(H_list, list): raise ValueError("H_list must be a list of Qobj instances") - if not fig and not ax: - fig, ax = plt.subplots(1, 1, figsize=figsize) + fig, ax = _is_fig_and_ax(fig, ax) H = H_list[0] N = H.shape[0] if N == 0 else min(H.shape[0], N) - xticks = [] yticks = [] - x = 0 evals0 = H.eigenenergies(eigvals=N) for e_idx, e in enumerate(evals0[:N]): @@ -937,335 +1035,268 @@ def plot_energy_levels(H_list, N=0, labels=None, show_ylabels=False, ax.set_frame_on(False) - if show_ylabels: + if energy_levels: yticks = np.unique(np.around(yticks, 1)) - ax.set_yticks(yticks) + _set_ticklabels(ax, energy_levels, yticks, 'y') else: - ax.axes.get_yaxis().set_visible(False) + # show eigenenergies + yticks = np.unique(np.around(yticks, 1)) + ax.set_yticks(yticks) - if labels: + if h_labels: ax.get_xaxis().tick_bottom() - ax.set_xticks(xticks) - ax.set_xticklabels(labels, fontsize=16) + _set_ticklabels(ax, h_labels, xticks, 'x') else: - ax.axes.get_xaxis().set_visible(False) + # hide xtick + ax.tick_params(axis='x', which='both', + bottom=False, labelbottom=False) return fig, ax -def energy_level_diagram(H_list, N=0, labels=None, show_ylabels=False, - figsize=(8, 12), fig=None, ax=None): - warnings.warn("Deprecated: Use plot_energy_levels") - return plot_energy_levels(H_list, N=N, labels=labels, - show_ylabels=show_ylabels, - figsize=figsize, fig=fig, ax=ax) - - -def plot_fock_distribution(rho, offset=0, fig=None, ax=None, - figsize=(8, 6), title=None, unit_y_range=True): +def plot_fock_distribution(rho, fock_numbers=None, color="green", + unit_y_range=True, *, fig=None, ax=None): """ Plot the Fock distribution for a density matrix (or ket) that describes an oscillator mode. Parameters ---------- - rho : :class:`qutip.Qobj` + rho : `qutip.Qobj` The density matrix (or ket) of the state to visualize. - fig : a matplotlib Figure instance - The Figure canvas in which the plot will be drawn. + fock_numbers : list of strings, optional + list of x ticklabels to represent fock numbers - ax : a matplotlib axes instance - The axes context in which the plot will be drawn. + color : color or list of colors, default="green" + The colors of the bar faces. + + unit_y_range : bool, default=True + Set y-axis limits [0, 1] or not - title : string - An optional title for the figure. + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. - figsize : (width, height) - The size of the matplotlib figure (in inches) if it is to be created - (that is, if no 'fig' and 'ax' arguments are passed). + ax : a matplotlib axes instance, optional + The axes context in which the plot will be drawn. Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. """ - if not fig and not ax: - fig, ax = plt.subplots(1, 1, figsize=figsize) + fig, ax = _is_fig_and_ax(fig, ax) + + if not isinstance(rho, list): + rhos = [rho] + else: + rhos = rho + + _equal_shape(rhos) + + artist_list = list() + for rho in rhos: + if isket(rho): + rho = ket2dm(rho) + + N = rho.shape[0] + + artist = ax.bar(np.arange(N), np.real(rho.diag()), + color=color, alpha=0.6, width=0.8).patches + artist_list.append(artist) - if isket(rho): - rho = ket2dm(rho) + if len(rhos) == 1: + output = ax + else: + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) - N = rho.shape[0] + if fock_numbers: + _set_ticklabels(ax, fock_numbers, np.arange(N), 'x', fontsize=12) - ax.bar(np.arange(offset, offset + N), np.real(rho.diag()), - color="green", alpha=0.6, width=0.8) if unit_y_range: ax.set_ylim(0, 1) - ax.set_xlim(-.5 + offset, N + offset) + ax.set_xlim(-.5, N) ax.set_xlabel('Fock number', fontsize=12) ax.set_ylabel('Occupation probability', fontsize=12) - if title: - ax.set_title(title) - - return fig, ax - + return fig, output -def fock_distribution(rho, offset=0, fig=None, ax=None, - figsize=(8, 6), title=None, unit_y_range=True): - warnings.warn("Deprecated: Use plot_fock_distribution") - return plot_fock_distribution(rho, offset=offset, fig=fig, ax=ax, - figsize=figsize, title=title, - unit_y_range=unit_y_range) - -def plot_wigner(rho, fig=None, ax=None, figsize=(6, 6), - cmap=None, alpha_max=7.5, colorbar=False, - method='clenshaw', projection='2d'): +def plot_wigner(rho, xvec=None, yvec=None, method='clenshaw', + projection='2d', *, cmap=None, colorbar=False, + fig=None, ax=None): """ Plot the the Wigner function for a density matrix (or ket) that describes an oscillator mode. Parameters ---------- - rho : :class:`qutip.Qobj` + rho : `qutip.Qobj` The density matrix (or ket) of the state to visualize. - fig : a matplotlib Figure instance - The Figure canvas in which the plot will be drawn. + xvec : array_like, optional + x-coordinates at which to calculate the Wigner function. - ax : a matplotlib axes instance - The axes context in which the plot will be drawn. + yvec : array_like, optional + y-coordinates at which to calculate the Wigner function. Does not + apply to the 'fft' method. - figsize : (width, height) - The size of the matplotlib figure (in inches) if it is to be created - (that is, if no 'fig' and 'ax' arguments are passed). + method : string {'clenshaw', 'iterative', 'laguerre', 'fft'}, + default='clenshaw' + The method used for calculating the wigner function. See the + documentation for qutip.wigner for details. - cmap : a matplotlib cmap instance - The colormap. + projection: string {'2d', '3d'}, default='2d' + Specify whether the Wigner function is to be plotted as a + contour graph ('2d') or surface plot ('3d'). - alpha_max : float - The span of the x and y coordinates (both [-alpha_max, alpha_max]). + cmap : a matplotlib cmap instance, optional + The colormap. - colorbar : bool + colorbar : bool, default=False Whether (True) or not (False) a colorbar should be attached to the Wigner function graph. - method : string {'clenshaw', 'iterative', 'laguerre', 'fft'} - The method used for calculating the wigner function. See the - documentation for qutip.wigner for details. + fig : a matplotlib Figure instance, optional + The Figure canvas in which the plot will be drawn. - projection: string {'2d', '3d'} - Specify whether the Wigner function is to be plotted as a - contour graph ('2d') or surface plot ('3d'). + ax : a matplotlib axes instance, optional + The axes context in which the plot will be drawn. Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. """ - if not fig and not ax: - if projection == '2d': - fig, ax = plt.subplots(1, 1, figsize=figsize) - elif projection == '3d': - fig = plt.figure(figsize=figsize) - ax = fig.add_subplot(1, 1, 1, projection='3d') - else: - raise ValueError('Unexpected value of projection keyword argument') - - if isket(rho): - rho = ket2dm(rho) - - xvec = np.linspace(-alpha_max, alpha_max, 200) - W0 = wigner(rho, xvec, xvec, method=method) + if projection not in ('2d', '3d'): + raise ValueError('Unexpected value of projection keyword argument') - W, yvec = W0 if isinstance(W0, tuple) else (W0, xvec) + fig, ax = _is_fig_and_ax(fig, ax, projection) - wlim = abs(W).max() - - if cmap is None: - cmap = cm.get_cmap('RdBu') - - if projection == '2d': - cf = ax.contourf(xvec, yvec, W, 100, - norm=mpl.colors.Normalize(-wlim, wlim), cmap=cmap) - elif projection == '3d': - X, Y = np.meshgrid(xvec, xvec) - cf = ax.plot_surface(X, Y, W0, rstride=5, cstride=5, linewidth=0.5, - norm=mpl.colors.Normalize(-wlim, wlim), cmap=cmap) + if not isinstance(rho, list): + rhos = [rho] else: - raise ValueError('Unexpected value of projection keyword argument.') - - if xvec is not yvec: - ax.set_ylim(xvec.min(), xvec.max()) - - ax.set_xlabel(r'$\rm{Re}(\alpha)$', fontsize=12) - ax.set_ylabel(r'$\rm{Im}(\alpha)$', fontsize=12) - - if colorbar: - fig.colorbar(cf, ax=ax) - - ax.set_title("Wigner function", fontsize=12) - - return fig, ax + rhos = rho + _equal_shape(rhos) -def plot_wigner_fock_distribution(rho, fig=None, axes=None, figsize=(8, 4), - cmap=None, alpha_max=7.5, colorbar=False, - method='iterative', projection='2d'): - """ - Plot the Fock distribution and the Wigner function for a density matrix - (or ket) that describes an oscillator mode. + wlim = 0 + Ws = list() + xvec = np.linspace(-7.5, 7.5, 200) if xvec is None else xvec + yvec = np.linspace(-7.5, 7.5, 200) if yvec is None else yvec + for rho in rhos: + if isket(rho): + rho = ket2dm(rho) - Parameters - ---------- - rho : :class:`qutip.Qobj` - The density matrix (or ket) of the state to visualize. + W0 = wigner(rho, xvec, yvec, method=method) - fig : a matplotlib Figure instance - The Figure canvas in which the plot will be drawn. - - axes : a list of two matplotlib axes instances - The axes context in which the plot will be drawn. - - figsize : (width, height) - The size of the matplotlib figure (in inches) if it is to be created - (that is, if no 'fig' and 'ax' arguments are passed). - - cmap : a matplotlib cmap instance - The colormap. - - alpha_max : float - The span of the x and y coordinates (both [-alpha_max, alpha_max]). - - colorbar : bool - Whether (True) or not (False) a colorbar should be attached to the - Wigner function graph. + W, yvec = W0 if isinstance(W0, tuple) else (W0, yvec) + Ws.append(W) - method : string {'iterative', 'laguerre', 'fft'} - The method used for calculating the wigner function. See the - documentation for qutip.wigner for details. + wlim = max(abs(W).max(), wlim) - projection: string {'2d', '3d'} - Specify whether the Wigner function is to be plotted as a - contour graph ('2d') or surface plot ('3d'). + norm = mpl.colors.Normalize(-wlim, wlim) - Returns - ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. - """ + if cmap is None: + cmap = _diverging_cmap() - if not fig and not axes: + artist_list = list() + for W in Ws: if projection == '2d': - fig, axes = plt.subplots(1, 2, figsize=figsize) - elif projection == '3d': - fig = plt.figure(figsize=figsize) - axes = [fig.add_subplot(1, 2, 1), - fig.add_subplot(1, 2, 2, projection='3d')] + cf = ax.contourf(xvec, yvec, W, 100, norm=norm, + cmap=cmap).collections else: - raise ValueError('Unexpected value of projection keyword argument') - - if isket(rho): - rho = ket2dm(rho) + X, Y = np.meshgrid(xvec, yvec) + cf = [ax.plot_surface(X, Y, W, rstride=5, cstride=5, linewidth=0.5, + norm=norm, cmap=cmap)] + artist_list.append(cf) - plot_fock_distribution(rho, fig=fig, ax=axes[0]) - plot_wigner(rho, fig=fig, ax=axes[1], figsize=figsize, cmap=cmap, - alpha_max=alpha_max, colorbar=colorbar, method=method, - projection=projection) + if len(rhos) == 1: + output = ax + else: + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) - return fig, axes + ax.set_xlabel(r'$\rm{Re}(\alpha)$', fontsize=12) + ax.set_ylabel(r'$\rm{Im}(\alpha)$', fontsize=12) + if colorbar: + if projection == '2d': + shrink = 1 + else: + shrink = .75 + cax, kw = mpl.colorbar.make_axes(ax, shrink=shrink, pad=.1) + cbar = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) -def wigner_fock_distribution(rho, fig=None, axes=None, figsize=(8, 4), - cmap=None, alpha_max=7.5, colorbar=False, - method='iterative'): - warnings.warn("Deprecated: Use plot_wigner_fock_distribution") - return plot_wigner_fock_distribution(rho, fig=fig, axes=axes, - figsize=figsize, cmap=cmap, - alpha_max=alpha_max, - colorbar=colorbar, - method=method) + return fig, output -def plot_expectation_values(results, ylabels=[], title=None, show_legend=False, - fig=None, axes=None, figsize=(8, 4)): +def plot_expectation_values(results, ylabels=None, *, + fig=None, axes=None): """ Visualize the results (expectation values) for an evolution solver. `results` is assumed to be an instance of Result, or a list of Result instances. - Parameters ---------- results : (list of) :class:`qutip.solver.Result` List of results objects returned by any of the QuTiP evolution solvers. - ylabels : list of strings + ylabels : list of strings, optional The y-axis labels. List should be of the same length as `results`. - title : string - The title of the figure. - - show_legend : bool - Whether or not to show the legend. - - fig : a matplotlib Figure instance + fig : a matplotlib Figure instance, optional The Figure canvas in which the plot will be drawn. - axes : a matplotlib axes instance + axes : (list of) axes instances, optional The axes context in which the plot will be drawn. - figsize : (width, height) - The size of the matplotlib figure (in inches) if it is to be created - (that is, if no 'fig' and 'ax' arguments are passed). - Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, axes : tuple + A tuple of the matplotlib figure and array of axes instances + used to produce the figure. """ if not isinstance(results, list): results = [results] n_e_ops = max([len(result.expect) for result in results]) - if not fig or not axes: - if not figsize: - figsize = (12, 3 * n_e_ops) - fig, axes = plt.subplots(n_e_ops, 1, sharex=True, - figsize=figsize, squeeze=False) + if axes is None: + if fig is None: + fig = plt.figure() + axes = np.array([fig.add_subplot(n_e_ops, 1, i+1) + for i in range(n_e_ops)]) - for r_idx, result in enumerate(results): - for e_idx, e in enumerate(result.expect): - axes[e_idx, 0].plot(result.times, e, - label="%s [%d]" % (result.solver, e_idx)) + # create np.ndarray if axes is one axes object or list + if not isinstance(axes, np.ndarray): + if not isinstance(axes, list): + axes = [axes] + axes = np.array(axes) - if title: - fig.suptitle(title) + for _, result in enumerate(results): + for e_idx, e in enumerate(result.expect): + axes[e_idx].plot(result.times, e, + label="%s [%d]" % (result.solver, e_idx)) - axes[n_e_ops - 1, 0].set_xlabel("time", fontsize=12) + axes[n_e_ops - 1].set_xlabel("time", fontsize=12) for n in range(n_e_ops): - if show_legend: - axes[n, 0].legend() if ylabels: - axes[n, 0].set_ylabel(ylabels[n], fontsize=12) + axes[n].set_ylabel(ylabels[n], fontsize=12) return fig, axes -def plot_spin_distribution_2d(P, THETA, PHI, - fig=None, ax=None, figsize=(8, 8)): +def plot_spin_distribution(P, THETA, PHI, projection='2d', *, + cmap=None, colorbar=False, fig=None, ax=None): """ - Plot a spin distribution function (given as meshgrid data) with a 2D - projection where the surface of the unit sphere is mapped on the unit disk. + Plots a spin distribution (given as meshgrid data). Parameters ---------- @@ -1273,109 +1304,97 @@ def plot_spin_distribution_2d(P, THETA, PHI, Distribution values as a meshgrid matrix. THETA : matrix - Meshgrid matrix for the theta coordinate. + Meshgrid matrix for the theta coordinate. Its range is between 0 and pi PHI : matrix - Meshgrid matrix for the phi coordinate. + Meshgrid matrix for the phi coordinate. Its range is between 0 and 2*pi - fig : a matplotlib figure instance + projection: string {'2d', '3d'}, default='2d' + Specify whether the spin distribution function is to be plotted as a 2D + projection where the surface of the unit sphere is mapped on + the unit disk ('2d') or surface plot ('3d'). + + cmap : a matplotlib cmap instance, optional + The colormap. + + colorbar : bool, default=False + Whether (True) or not (False) a colorbar should be attached to the + Wigner function graph. + + fig : a matplotlib figure instance, optional The figure canvas on which the plot will be drawn. - ax : a matplotlib axis instance + ax : a matplotlib axis instance, optional The axis context in which the plot will be drawn. - figsize : (width, height) - The size of the matplotlib figure (in inches) if it is to be created - (that is, if no 'fig' and 'ax' arguments are passed). - Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. """ - if not fig or not ax: - if not figsize: - figsize = (8, 8) - fig, ax = plt.subplots(1, 1, figsize=figsize) - - Y = (THETA - pi / 2) / (pi / 2) - X = (pi - PHI) / pi * np.sqrt(cos(THETA - pi / 2)) - - if P.min() < -1e12: - cmap = cm.RdBu + if projection in ('2d', '3d'): + fig, ax = _is_fig_and_ax(fig, ax, projection) else: - cmap = cm.RdYlBu - - ax.pcolor(X, Y, P.real, cmap=cmap) - ax.set_xlabel(r'$\varphi$', fontsize=18) - ax.set_ylabel(r'$\theta$', fontsize=18) - - ax.set_xticks([-1, 0, 1]) - ax.set_xticklabels([r'$0$', r'$\pi$', r'$2\pi$'], fontsize=18) - ax.set_yticks([-1, 0, 1]) - ax.set_yticklabels([r'$\pi$', r'$\pi/2$', r'$0$'], fontsize=18) - - return fig, ax - - -def plot_spin_distribution_3d(P, THETA, PHI, - fig=None, ax=None, figsize=(8, 6)): - """Plots a matrix of values on a sphere - - Parameters - ---------- - P : matrix - Distribution values as a meshgrid matrix. + raise ValueError('Unexpected value of projection keyword argument') - THETA : matrix - Meshgrid matrix for the theta coordinate. - - PHI : matrix - Meshgrid matrix for the phi coordinate. - - fig : a matplotlib figure instance - The figure canvas on which the plot will be drawn. - - ax : a matplotlib axis instance - The axis context in which the plot will be drawn. - - figsize : (width, height) - The size of the matplotlib figure (in inches) if it is to be created - (that is, if no 'fig' and 'ax' arguments are passed). - - Returns - ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + if not isinstance(P, list): + Ps = [P] + else: + Ps = P - """ + _equal_shape(Ps) - if fig is None or ax is None: - fig = plt.figure(figsize=figsize) - ax = _axes3D(fig, azim=-35, elev=35) + min_P = Ps[0].min() + max_P = Ps[0].max() + for P in Ps: + min_P = min(min_P, P.min()) + max_P = max(max_P, P.max()) - xx = sin(THETA) * cos(PHI) - yy = sin(THETA) * sin(PHI) - zz = cos(THETA) + if cmap is None: + if min_P < -1e12: + cmap = _diverging_cmap() + norm = mpl.colors.Normalize(-max_P, max_P) + else: + cmap = _sequential_cmap() + norm = mpl.colors.Normalize(min_P, max_P) - if P.min() < -1e12: - cmap = cm.RdBu - norm = mpl.colors.Normalize(-P.max(), P.max()) + artist_list = list() + if projection == '2d': + Y = (THETA - pi / 2) / (pi / 2) + X = (pi - PHI) / pi * np.sqrt(cos(THETA - pi / 2)) + for P in Ps: + artist_list.append([ax.pcolor(X, Y, P.real, cmap=cmap)]) + ax.set_xlabel(r'$\varphi$', fontsize=18) + ax.set_ylabel(r'$\theta$', fontsize=18) + ax.axis('equal') + ax.set_xticks([-1, 0, 1]) + ax.set_xticklabels([r'$0$', r'$\pi$', r'$2\pi$'], fontsize=18) + ax.set_yticks([-1, 0, 1]) + ax.set_yticklabels([r'$\pi$', r'$\pi/2$', r'$0$'], fontsize=18) else: - cmap = cm.RdYlBu - norm = mpl.colors.Normalize(P.min(), P.max()) - - ax.plot_surface(xx, yy, zz, rstride=1, cstride=1, - facecolors=cmap(norm(P)), linewidth=0) + xx = sin(THETA) * cos(PHI) + yy = sin(THETA) * sin(PHI) + zz = cos(THETA) + for P in Ps: + artist = [ax.plot_surface(xx, yy, zz, rstride=1, cstride=1, + facecolors=cmap(norm(P)), linewidth=0)] + artist_list.append(artist) + ax.view_init(azim=-35, elev=35) + + if len(Ps) == 1: + output = ax + else: + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) - cax, kw = mpl.colorbar.make_axes(ax, shrink=.66, pad=.02) - cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) - cb1.set_label('magnitude') + if colorbar: + cax, _ = mpl.colorbar.make_axes(ax, shrink=.66, pad=.1) + cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm) + cb1.set_label('magnitude') - return fig, ax + return fig, output # @@ -1397,10 +1416,10 @@ def complex_array_to_rgb(X, theme='light', rmax=None): X : array Array (of any dimension) of complex numbers. - theme : 'light' (default) or 'dark' + theme : 'light' or 'dark', default='light' Set coloring theme for mapping complex values into colors. - rmax : float + rmax : float, optional Maximal abs value for color normalization. If None (default), uses np.abs(X).max(). @@ -1552,55 +1571,54 @@ def _sequence_to_latex(seq, style='ket'): return latex.format("".join(map(str, seq))) -def plot_qubism(ket, theme='light', how='pairs', - grid_iteration=1, legend_iteration=0, - fig=None, ax=None, figsize=(6, 6)): +def plot_qubism(ket, theme='light', how='pairs', grid_iteration=1, + legend_iteration=0, *, fig=None, ax=None): """ Qubism plot for pure states of many qudits. Works best for spin chains, especially with even number of particles of the same dimension. Allows to see entanglement between first 2k particles and the rest. + .. note:: + + colorblind_safe does not apply because of its unique colormap + Parameters ---------- ket : Qobj Pure state for plotting. - theme : 'light' (default) or 'dark' + theme : 'light' or 'dark', default='light' Set coloring theme for mapping complex values into colors. See: complex_array_to_rgb. - how : 'pairs' (default), 'pairs_skewed' or 'before_after' + how : 'pairs', 'pairs_skewed' or 'before_after', default='pairs' Type of Qubism plotting. Options: - 'pairs' - typical coordinates, - 'pairs_skewed' - for ferromagnetic/antriferromagnetic plots, - 'before_after' - related to Schmidt plot (see also: plot_schmidt). - grid_iteration : int (default 1) + grid_iteration : int, default=1 Helper lines to be drawn on plot. Show tiles for 2*grid_iteration particles vs all others. - legend_iteration : int (default 0) or 'grid_iteration' or 'all' + legend_iteration : int or 'grid_iteration' or 'all', default=0 Show labels for first ``2*legend_iteration`` particles. Option 'grid_iteration' sets the same number of particles as for grid_iteration. Option 'all' makes label for all particles. Typically it should be 0, 1, 2 or perhaps 3. - fig : a matplotlib figure instance + fig : a matplotlib figure instance, optional The figure canvas on which the plot will be drawn. - ax : a matplotlib axis instance + ax : a matplotlib axis instance, optional The axis context in which the plot will be drawn. - figsize : (width, height) - The size of the matplotlib figure (in inches) if it is to be created - (that is, if no 'fig' and 'ax' arguments are passed). - Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. Notes ----- @@ -1615,46 +1633,65 @@ def plot_qubism(ket, theme='light', how='pairs', (2012), open access. """ - if not isket(ket): - raise Exception("Qubism works only for pure states, i.e. kets.") - # add for dm? (perhaps a separate function, plot_qubism_dm) + fig, ax = _is_fig_and_ax(fig, ax) - if not fig and not ax: - fig, ax = plt.subplots(1, 1, figsize=figsize) + if not isinstance(ket, list): + kets = [ket] + else: + kets = ket - dim_list = ket.dims[0] - n = len(dim_list) + _equal_shape(kets) + + artist_list = list() + for ket in kets: + if not isket(ket): + raise Exception("Qubism works only for pure states, i.e. kets.") + # add for dm? (perhaps a separate function, plot_qubism_dm) - # for odd number of particles - pixels are rectangular - if n % 2 == 1: - ket = tensor(ket, Qobj([1] * dim_list[-1])) dim_list = ket.dims[0] - n += 1 + n = len(dim_list) - ketdata = ket.full() + # for odd number of particles - pixels are rectangular + if n % 2 == 1: + ket = tensor(ket, Qobj([1] * dim_list[-1])) + dim_list = ket.dims[0] + n += 1 + + ketdata = ket.full() + + if how == 'pairs': + dim_list_y = dim_list[::2] + dim_list_x = dim_list[1::2] + elif how == 'pairs_skewed': + dim_list_y = dim_list[::2] + dim_list_x = dim_list[1::2] + if dim_list_x != dim_list_y: + raise Exception("For 'pairs_skewed' pairs " + + "of dimensions need to be the same.") + elif how == 'before_after': + dim_list_y = list(reversed(dim_list[:(n // 2)])) + dim_list_x = dim_list[(n // 2):] + else: + raise Exception("No such 'how'.") - if how == 'pairs': - dim_list_y = dim_list[::2] - dim_list_x = dim_list[1::2] - elif how == 'pairs_skewed': - dim_list_y = dim_list[::2] - dim_list_x = dim_list[1::2] - if dim_list_x != dim_list_y: - raise Exception("For 'pairs_skewed' pairs " + - "of dimensions need to be the same.") - elif how == 'before_after': - dim_list_y = list(reversed(dim_list[:(n // 2)])) - dim_list_x = dim_list[(n // 2):] - else: - raise Exception("No such 'how'.") + size_x = np.prod(dim_list_x) + size_y = np.prod(dim_list_y) + + qub = np.zeros([size_x, size_y], dtype=complex) + for i in range(ketdata.size): + qub[_to_qubism_index_pair(i, dim_list, how=how)] = ketdata[i, 0] + qub = qub.transpose() - size_x = np.prod(dim_list_x) - size_y = np.prod(dim_list_y) + artist = [ax.imshow(complex_array_to_rgb(qub, theme=theme), + interpolation="none", + extent=(0, size_x, 0, size_y))] + artist_list.append(artist) - qub = np.zeros([size_x, size_y], dtype=complex) - for i in range(ketdata.size): - qub[_to_qubism_index_pair(i, dim_list, how=how)] = ketdata[i, 0] - qub = qub.transpose() + if len(kets) == 1: + output = ax + else: + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) quadrants_x = np.prod(dim_list_x[:grid_iteration]) quadrants_y = np.prod(dim_list_y[:grid_iteration]) @@ -1669,9 +1706,6 @@ def plot_qubism(ket, theme='light', how='pairs', theme2color_of_lines = {'light': '#000000', 'dark': '#FFFFFF'} ax.grid(True, color=theme2color_of_lines[theme]) - ax.imshow(complex_array_to_rgb(qub, theme=theme), - interpolation="none", - extent=(0, size_x, 0, size_y)) if legend_iteration == 'all': label_n = n // 2 @@ -1715,13 +1749,12 @@ def plot_qubism(ket, theme='light', how='pairs', size_y - (scale_y * y + shift_y), _sequence_to_latex(seq), **opts) - return fig, ax + return fig, output -def plot_schmidt(ket, splitting=None, - labels_iteration=(3, 2), - theme='light', - fig=None, ax=None, figsize=(6, 6)): + +def plot_schmidt(ket, theme='light', splitting=None, + labels_iteration=(3, 2), *, fig=None, ax=None): """ Plotting scheme related to Schmidt decomposition. Converts a state into a matrix (A_ij -> A_i^j), @@ -1729,63 +1762,85 @@ def plot_schmidt(ket, splitting=None, See also: plot_qubism with how='before_after' for a similar plot. + .. note:: + + colorblind_safe does not apply because of its unique colormap + Parameters ---------- ket : Qobj Pure state for plotting. - splitting : int - Plot for a number of first particles versus the rest. - If not given, it is (number of particles + 1) // 2. - - theme : 'light' (default) or 'dark' + theme : 'light' or 'dark', default='light' Set coloring theme for mapping complex values into colors. See: complex_array_to_rgb. - labels_iteration : int or pair of ints (default (3,2)) + splitting : int, optional + Plot for a number of first particles versus the rest. + If not given, it is (number of particles + 1) // 2. + + labels_iteration : int or pair of ints, default=(3,2) Number of particles to be shown as tick labels, for first (vertical) and last (horizontal) particles, respectively. - fig : a matplotlib figure instance + fig : a matplotlib figure instance, optional The figure canvas on which the plot will be drawn. - ax : a matplotlib axis instance + ax : a matplotlib axis instance, optional The axis context in which the plot will be drawn. - figsize : (width, height) - The size of the matplotlib figure (in inches) if it is to be created - (that is, if no 'fig' and 'ax' arguments are passed). - Returns ------- - fig, ax : tuple - A tuple of the matplotlib figure and axes instances used to produce - the figure. + fig, output : tuple + A tuple of the matplotlib figure and the axes instance or animation + instance used to produce the figure. """ - if not isket(ket): - raise Exception("Schmidt plot works only for pure states, i.e. kets.") - if not fig and not ax: - fig, ax = plt.subplots(1, 1, figsize=figsize) + fig, ax = _is_fig_and_ax(fig, ax) + + if not isinstance(ket, list): + kets = [ket] + else: + kets = ket - dim_list = ket.dims[0] + _equal_shape(kets) - if splitting is None: - splitting = (len(dim_list) + 1) // 2 + artist_list = list() + + for ket in kets: + if not isket(ket): + err = "Schmidt plot works only for pure states, i.e. kets." + raise Exception(err) + + dim_list = ket.dims[0] - if isinstance(labels_iteration, int): - labels_iteration = labels_iteration, labels_iteration + if splitting is None: + splitting = (len(dim_list) + 1) // 2 - ketdata = ket.full() + if isinstance(labels_iteration, int): + labels_iteration = labels_iteration, labels_iteration - dim_list_y = dim_list[:splitting] - dim_list_x = dim_list[splitting:] + ketdata = ket.full() - size_x = np.prod(dim_list_x) - size_y = np.prod(dim_list_y) + dim_list_y = dim_list[:splitting] + dim_list_x = dim_list[splitting:] - ketdata = ketdata.reshape((size_y, size_x)) + size_x = np.prod(dim_list_x) + size_y = np.prod(dim_list_y) + + ketdata = ketdata.reshape((size_y, size_x)) + + artist = [ax.imshow(complex_array_to_rgb(ketdata, theme=theme), + interpolation="none", + extent=(0, size_x, 0, size_y))] + artist_list.append(artist) + + if len(kets) == 1: + output = ax + else: + output = animation.ArtistAnimation(fig, artist_list, interval=50, + blit=True, repeat_delay=1000) dim_list_small_x = dim_list_x[:labels_iteration[1]] dim_list_small_y = dim_list_y[:labels_iteration[0]] @@ -1812,8 +1867,4 @@ def plot_schmidt(ket, splitting=None, ax.set_xlabel("last particles") ax.set_ylabel("first particles") - ax.imshow(complex_array_to_rgb(ketdata, theme=theme), - interpolation="none", - extent=(0, size_x, 0, size_y)) - - return fig, ax + return fig, output diff --git a/setup.cfg b/setup.cfg index f03112d113..1eb1f11cd2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -56,6 +56,9 @@ tests = pytest-rerunfailures ipython = ipython +extras = + loky + tqdm ; This uses ConfigParser's string interpolation to include all the above ; dependencies into one single target, convenient for testing full builds. full = @@ -64,3 +67,4 @@ full = %(semidefinite)s %(tests)s %(ipython)s + %(extras)s diff --git a/setup.py b/setup.py index 80e80c4157..78312b5504 100755 --- a/setup.py +++ b/setup.py @@ -44,6 +44,7 @@ def process_options(): options = _determine_user_arguments(options) options = _determine_version(options) options = _determine_compilation_options(options) + options = _determine_cythonize_options(options) return options @@ -131,6 +132,17 @@ def _determine_compilation_options(options): return options +def _determine_cythonize_options(options): + options["annotate"] = False + if "--annotate" in sys.argv: + options["annotate"] = True + sys.argv.remove("--annotate") + if "-a" in sys.argv: + options["annotate"] = True + sys.argv.remove("-a") + return options + + def _determine_version(options): """ Adds the 'short_version', 'version' and 'release' options. @@ -202,6 +214,7 @@ def _extension_extra_sources(): extra_sources = { 'qutip.core.data.matmul': [ 'qutip/core/data/src/matmul_csr_vector.cpp', + 'qutip/core/data/src/matmul_diag_vector.cpp', ], } out = collections.defaultdict(list) @@ -269,7 +282,7 @@ def create_extension_modules(options): extra_compile_args=options['cflags'], extra_link_args=options['ldflags'], language='c++')) - return cythonize(out) + return cythonize(out, annotate=options["annotate"]) def print_epilogue():