diff --git a/.gitignore b/.gitignore index 5b72b9fc9f8..d416adce9cc 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,11 @@ doc/source/generated doc/source/odl_interface doc/source/odl*.rst +docs/_build/ +docs/generated +docs/source/odl_interface +docs/source/odl*.rst + ## Python diff --git a/.gitmodules b/.gitmodules index 8cf215efc0c..e69de29bb2d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,4 +0,0 @@ -[submodule "doc/sphinxext"] - path = doc/numpydoc - url = https://github.com/odlgroup/numpydoc - branch = v0.9.2-odl diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000000..f9fdab60b12 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,22 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version, and other tools you might need +build: + os: ubuntu-24.04 + tools: + python: "3.13" + jobs: + pre_build: + - python docs/source/generate_doc.py + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +python: + install: + - requirements: docs/requirements.txt \ No newline at end of file diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 25210c894a1..2e49d265ed6 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -4,18 +4,20 @@ We are grateful for contributions and would like to acknowledge all people who h ## Contributors in alphabetic order -* [Jonas Adler](https://github.com/adler-j). Package maintainer. Created much of the `Set`, `LinearSpace` and `Operator` structure including utilities. Has also contributed to most other submodules. +* [Jonas Adler](https://github.com/adler-j). Created much of the `Set`, `LinearSpace` and `Operator` structure including utilities. Has also contributed to most other submodules. * [Sebastian Banert](https://github.com/sbanert). Contributions to `odl.solvers`. * [Chong Chen](https://github.com/chongchenmath). Started work on the `odl.deform` package. * [Matthias J. Ehrhardt](https://github.com/mehrhardt). Several contributions to `odl.solvers`, in addition to general bug fixes and improvements. * [Barbara Gris](https://github.com/bgris). Added `examples/solvers/find_optimal_parameters`. * [Johan Karlsson](https://github.com/hilding79). Contributions to `odl.solvers`. -* [Holger Kohr](https://github.com/kohr-h). Package maintainer. Was part of the design of ODL and created several of the submodules, including `odl.discr`, `odl.trafos` and `odl.tomo`. Has contributed to most modules. +* [Holger Kohr](https://github.com/kohr-h). Was part of the design of ODL and created several of the submodules, including `odl.discr`, `odl.trafos` and `odl.tomo`. Has contributed to most modules. * [Gregory R. Lee](https://github.com/grlee77). Bugfixes. * [Julian Moosmann](https://github.com/moosmann). Significant work on the initial `odl.tomo` module. -* [Kati Niinimki](https://github.com/niinimaki). Implemented the `WaveletTransform`. +* [Kati Niinim�ki](https://github.com/niinimaki). Implemented the `WaveletTransform`. * [Willem Jan Palenstijn](https://github.com/wjp). Added adjoint of `RayTransform`. * [Axel Ringh](https://github.com/aringh). Created much of the `odl.solvers` module, in particular oversaw the addition of `Functional`. +* [Justus Sagemüller] (https://github.com/leftaroundabout). Package developer since 2023. Made ODL mutli-backend and array-API compatible. +* [Emilien Valat] (https://github.com/Emvlt). Package developer since 2024. Made ODL mutli-backend and array-API compatible. * [Olivier Verdier](https://github.com/olivierverdier). Typos. * [Gustav Zickert](https://github.com/zickert). Started the `odl.contrib.fom` package. -* [Ozan ktem](https://github.com/ozanoktem). Father of the project. Proposed the idea and made sure we had money to get it done! +* [Ozan �ktem](https://github.com/ozanoktem). Father of the project. Proposed the idea and made sure we had money to get it done! diff --git a/README.md b/README.md index a98e2bf6713..8283d20a19c 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ [![Anaconda-Server Badge](https://anaconda.org/conda-forge/odl/badges/version.svg)](https://anaconda.org/conda-forge/odl) [![license](https://img.shields.io/badge/license-MPL--2.0-orange.svg)](https://opensource.org/licenses/MPL-2.0) [![DOI](https://zenodo.org/badge/45596393.svg)](https://zenodo.org/badge/latestdoi/45596393) @@ -24,7 +23,7 @@ Installing ODL should be as easy as For more detailed instructions, check out the [Installation guide](https://odlgroup.github.io/odl/getting_started/installing.html). -ODL is compatible with Python 2/3 and all major platforms (GNU/Linux / Mac / Windows). +ODL is compatible with Python 3 and all major platforms (GNU/Linux / Mac / Windows). Resources ========= @@ -70,7 +69,7 @@ Mozilla Public License version 2.0 or later. See the [LICENSE](LICENSE) file. ODL developers -------------- -Development of ODL started in 2014 as part of the project "Low complexity image reconstruction in medical imaging” by Ozan Öktem ([@ozanoktem](https://github.com/ozanoktem)), Jonas Adler ([@adler-j](https://github.com/adler-j)) and Holger Kohr ([@kohr-h](https://github.com/kohr-h)). Several others have made significant contributions, see the [contributors](CONTRIBUTORS.md) list. +Development of ODL started in 2014 as part of the project "Low complexity image reconstruction in medical imaging” by Ozan Öktem ([@ozanoktem](https://github.com/ozanoktem)), Jonas Adler ([@adler-j](https://github.com/adler-j)) and Holger Kohr ([@kohr-h](https://github.com/kohr-h)). Since 2023, Justus Sagemüller ([@leftaroundabout](https://github.com/leftaroundabout)) and Emilien Valat ([@Emvlt](https://github.com/Emvlt)) have been ODL's main developers. Several others have made significant contributions, see the [contributors](CONTRIBUTORS.md) list. To contact the developers either open an issue on the issue tracker or send an email to odl@math.kth.se. diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 9358ddf39c9..00000000000 --- a/doc/Makefile +++ /dev/null @@ -1,210 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build -SOURCEDIR = source - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) -GENERATE = cd $(SOURCEDIR) && python generate_doc.py && cd .. - -REPO_NAME = odl -PROJECT_NAME = ODL -PROJECT_URL = 'https\://github.com/odlgroup/odl' -PROJECT_ML_URL = 'https\://github.com/odlgroup/odl/issues' - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - @echo " gitwash to update the Gitwash standard intro into the Git workflow" - -clean: - rm -rf $(BUILDDIR)/* - rm -rf $(SOURCEDIR)/generated/* - rm -rf $(SOURCEDIR)/odl*.rst - -html: - $(GENERATE) - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/odl.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/odl.qhc" - -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/odl" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/odl" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -coverage: - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." - -gitwash: - python ./gitwash_dumper.py source/dev ODL \ - --project-url=https\://github.com/odlgroup/odl \ - --repo-name=odl \ - --project-ml-url=odl@math.kth.se \ - --github-user=odlgroup diff --git a/doc/gitwash_dumper.py b/doc/gitwash_dumper.py deleted file mode 100755 index 1e56dbe704a..00000000000 --- a/doc/gitwash_dumper.py +++ /dev/null @@ -1,234 +0,0 @@ -''' Checkout gitwash repo into directory and do search replace on name ''' - -from __future__ import (absolute_import, division, print_function) - -import os -from os.path import join as pjoin -import shutil -import sys -import re -import glob -import fnmatch -import tempfile -from subprocess import call -from optparse import OptionParser - -verbose = False - - -def clone_repo(url, branch): - cwd = os.getcwd() - tmpdir = tempfile.mkdtemp() - try: - cmd = 'git clone %s %s' % (url, tmpdir) - call(cmd, shell=True) - os.chdir(tmpdir) - cmd = 'git checkout %s' % branch - call(cmd, shell=True) - except: - shutil.rmtree(tmpdir) - raise - finally: - os.chdir(cwd) - return tmpdir - - -def cp_files(in_path, globs, out_path): - try: - os.makedirs(out_path) - except OSError: - pass - out_fnames = [] - for in_glob in globs: - in_glob_path = pjoin(in_path, in_glob) - for in_fname in glob.glob(in_glob_path): - out_fname = in_fname.replace(in_path, out_path) - pth, _ = os.path.split(out_fname) - if not os.path.isdir(pth): - os.makedirs(pth) - shutil.copyfile(in_fname, out_fname) - out_fnames.append(out_fname) - return out_fnames - - -def filename_search_replace(sr_pairs, filename, backup=False): - ''' Search and replace for expressions in files - - ''' - with open(filename, 'rt') as in_fh: - in_txt = in_fh.read(-1) - out_txt = in_txt[:] - for in_exp, out_exp in sr_pairs: - in_exp = re.compile(in_exp) - out_txt = in_exp.sub(out_exp, out_txt) - if in_txt == out_txt: - return False - with open(filename, 'wt') as out_fh: - out_fh.write(out_txt) - if backup: - with open(filename + '.bak', 'wt') as bak_fh: - bak_fh.write(in_txt) - return True - - -def copy_replace(replace_pairs, - repo_path, - out_path, - cp_globs=('*',), - rep_globs=('*',), - renames=()): - out_fnames = cp_files(repo_path, cp_globs, out_path) - renames = [(re.compile(in_exp), out_exp) for in_exp, out_exp in renames] - fnames = [] - for rep_glob in rep_globs: - fnames += fnmatch.filter(out_fnames, rep_glob) - if verbose: - print('\n'.join(fnames)) - for fname in fnames: - filename_search_replace(replace_pairs, fname, False) - for in_exp, out_exp in renames: - new_fname, n = in_exp.subn(out_exp, fname) - if n: - os.rename(fname, new_fname) - break - - -def make_link_targets(proj_name, - user_name, - repo_name, - known_link_fname, - out_link_fname, - url=None, - ml_url=None): - """ Check and make link targets - - If url is None or ml_url is None, check if there are links present for - these in `known_link_fname`. If not, raise error. The check is: - - Look for a target `proj_name`. - Look for a target `proj_name` + ' mailing list' - - Also, look for a target `proj_name` + 'github'. If this exists, don't - write this target into the new file below. - - If we are writing any of the url, ml_url, or github address, then write - new file with these links, of form: - - .. _`proj_name` - .. _`proj_name`: url - .. _`proj_name` mailing list: url - """ - with open(known_link_fname, 'rt') as link_fh: - link_contents = link_fh.readlines() - have_url = url is not None - have_ml_url = ml_url is not None - have_gh_url = None - for line in link_contents: - if not have_url: - match = re.match(r'..\s+_`%s`:\s+' % proj_name, line) - if match: - have_url = True - if not have_ml_url: - match = re.match(r'..\s+_`%s mailing list`:\s+' % proj_name, line) - if match: - have_ml_url = True - if not have_gh_url: - match = re.match(r'..\s+_`%s github`:\s+' % proj_name, line) - if match: - have_gh_url = True - if not have_url or not have_ml_url: - raise RuntimeError('Need command line or known project ' - 'and / or mailing list URLs') - lines = [] - if url is not None: - lines.append('.. _`%s`: %s\n' % (proj_name, url)) - if not have_gh_url: - gh_url = 'http://github.com/%s/%s\n' % (user_name, repo_name) - lines.append('.. _`%s github`: %s\n' % (proj_name, gh_url)) - if ml_url is not None: - lines.append('.. _`%s mailing list`: %s\n' % (proj_name, ml_url)) - if len(lines) == 0: - # Nothing to do - return - # A neat little header line - lines = ['.. %s\n' % proj_name] + lines - with open(out_link_fname, 'wt') as out_links: - out_links.writelines(lines) - - -USAGE = ''' - -If not set with options, the repository name is the same as the - -If not set with options, the main github user is the same as the -repository name.''' - - -GITWASH_CENTRAL = 'git://github.com/matthew-brett/gitwash.git' -GITWASH_BRANCH = 'master' - - -def main(): - parser = OptionParser() - parser.set_usage(parser.get_usage().strip() + USAGE) - parser.add_option("--repo-name", dest="repo_name", - help="repository name - e.g. nitime", - metavar="REPO_NAME") - parser.add_option("--github-user", dest="main_gh_user", - help="github username for main repo - e.g fperez", - metavar="MAIN_GH_USER") - parser.add_option("--gitwash-url", dest="gitwash_url", - help="URL to gitwash repository - default %s" - % GITWASH_CENTRAL, - default=GITWASH_CENTRAL, - metavar="GITWASH_URL") - parser.add_option("--gitwash-branch", dest="gitwash_branch", - help="branch in gitwash repository - default %s" - % GITWASH_BRANCH, - default=GITWASH_BRANCH, - metavar="GITWASH_BRANCH") - parser.add_option("--source-suffix", dest="source_suffix", - help="suffix of ReST source files - default '.rst'", - default='.rst', - metavar="SOURCE_SUFFIX") - parser.add_option("--project-url", dest="project_url", - help="URL for project web pages", - default=None, - metavar="PROJECT_URL") - parser.add_option("--project-ml-url", dest="project_ml_url", - help="URL for project mailing list", - default=None, - metavar="PROJECT_ML_URL") - (options, args) = parser.parse_args() - if len(args) < 2: - parser.print_help() - sys.exit() - out_path, project_name = args - if options.repo_name is None: - options.repo_name = project_name - if options.main_gh_user is None: - options.main_gh_user = options.repo_name - repo_path = clone_repo(options.gitwash_url, options.gitwash_branch) - try: - copy_replace((('PROJECTNAME', project_name), - ('REPONAME', options.repo_name), - ('MAIN_GH_USER', options.main_gh_user)), - repo_path, - out_path, - cp_globs=(pjoin('gitwash', '*'),), - rep_globs=('*.rst',), - renames=(('\.rst$', options.source_suffix),)) - make_link_targets(project_name, - options.main_gh_user, - options.repo_name, - pjoin(out_path, 'gitwash', 'known_projects.inc'), - pjoin(out_path, 'gitwash', 'this_project.inc'), - options.project_url, - options.project_ml_url) - finally: - shutil.rmtree(repo_path) - - -if __name__ == '__main__': - main() diff --git a/doc/make.bat b/doc/make.bat deleted file mode 100644 index 610e044d9fc..00000000000 --- a/doc/make.bat +++ /dev/null @@ -1,263 +0,0 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source -set I18NSPHINXOPTS=%SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. texinfo to make Texinfo files - echo. gettext to make PO message catalogs - echo. changes to make an overview over all changed/added/deprecated items - echo. xml to make Docutils-native XML files - echo. pseudoxml to make pseudoxml-XML files for display purposes - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - echo. coverage to run coverage check of the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - - -REM Check if sphinx-build is available and fallback to Python version if any -%SPHINXBUILD% 2> nul -if errorlevel 9009 goto sphinx_python -goto sphinx_ok - -:sphinx_python - -set SPHINXBUILD=python -m sphinx.__init__ -%SPHINXBUILD% 2> nul -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -:sphinx_ok - - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\odl.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\odl.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "latexpdf" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - cd %BUILDDIR%/latex - make all-pdf - cd %~dp0 - echo. - echo.Build finished; the PDF files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "latexpdfja" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - cd %BUILDDIR%/latex - make all-pdf-ja - cd %~dp0 - echo. - echo.Build finished; the PDF files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "texinfo" ( - %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. - goto end -) - -if "%1" == "gettext" ( - %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The message catalogs are in %BUILDDIR%/locale. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - if errorlevel 1 exit /b 1 - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - if errorlevel 1 exit /b 1 - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - if errorlevel 1 exit /b 1 - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -if "%1" == "coverage" ( - %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage - if errorlevel 1 exit /b 1 - echo. - echo.Testing of coverage in the sources finished, look at the ^ -results in %BUILDDIR%/coverage/python.txt. - goto end -) - -if "%1" == "xml" ( - %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The XML files are in %BUILDDIR%/xml. - goto end -) - -if "%1" == "pseudoxml" ( - %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. - goto end -) - -:end diff --git a/doc/numpydoc b/doc/numpydoc deleted file mode 160000 index 1f707329c30..00000000000 --- a/doc/numpydoc +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1f707329c304505936bf0970014e01c9ab4cad3f diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/doc/source/guide/numpy_guide.rst b/doc/source/guide/numpy_guide.rst deleted file mode 100644 index 6dce19fa383..00000000000 --- a/doc/source/guide/numpy_guide.rst +++ /dev/null @@ -1,173 +0,0 @@ -.. _numpy_in_depth: - -############################## -Using ODL with NumPy and SciPy -############################## - -`NumPy `_ is the ubiquitous library for array computations in Python, and is used by almost all major numerical packages. -It provides optimized `Array objects `_ that allow efficient storage of large arrays. -It also provides several optimized algorithms for many of the functions used in numerical programming, such as taking the cosine or adding two arrays. - -`SciPy `_ is a library built on top of NumPy providing more advanced algorithms such as linear solvers, statistics, signal and image processing etc. - -Many operations are more naturally performed using NumPy/SciPy than with ODL, and with that in mind ODL has been designed such that interfacing with them is as easy and fast as possible. - -Casting vectors to and from arrays -================================== -ODL vectors are stored in an abstract way, enabling storage on the CPU, GPU, or perhaps on a cluster on the other side of the world. -This allows algorithms to be written in a generalized and storage-agnostic manner. -Still, it is often convenient to be able to access the data and look at it, perhaps to initialize a vector, or to call an external function. - -To cast a NumPy array to an element of an ODL vector space, one can simply call the `LinearSpace.element` method in an appropriate space:: - - >>> r3 = odl.rn(3) - >>> arr = np.array([1, 2, 3]) - >>> x = r3.element(arr) - -If the data type and storage methods allow it, the element simply wraps the underlying array using a `view -`_:: - - >>> float_arr = np.array([1.0, 2.0, 3.0]) - >>> x = r3.element(float_arr) - >>> x.data is float_arr - True - -Casting ODL vector space elements to NumPy arrays can be done in two ways, either through the member function `Tensor.asarray`, or using `numpy.asarray`. -These are both optimized and return a view if possible:: - - >>> x.asarray() - array([ 1., 2., 3.]) - >>> np.asarray(x) - array([ 1., 2., 3.]) - -These methods work with any ODL object represented by an array. -For example, in discretizations, a two-dimensional array can be used:: - - >>> space = odl.uniform_discr([0, 0], [1, 1], shape=(3, 3)) - >>> arr = np.array([[1, 2, 3], - ... [4, 5, 6], - ... [7, 8, 9]]) - >>> x = space.element(arr) - >>> x.asarray() - array([[ 1., 2., 3.], - [ 4., 5., 6.], - [ 7., 8., 9.]]) - -Using ODL objects with NumPy functions -====================================== -A very convenient feature of ODL is its seamless interaction with NumPy functions. -For universal functions or `ufuncs `_, this is supported by several mechanisms as explained below. - -Evaluating a NumPy ufunc on an ODL object works as expected:: - - >>> r3 = odl.rn(3) - >>> x = r3.element([1, 2, 3]) - >>> np.negative(x) - rn(3).element([-1., -2., -3.]) - -It is also possible to use an ODL object as ``out`` parameter:: - - >>> out = r3.element() - >>> result = np.negative(x, out=out) # variant 1 - >>> out - rn(3).element([-1., -2., -3.]) - >>> result is out - True - >>> out = r3.element() - >>> result = x.ufuncs.negative(out=out) # variant 2 - >>> out - rn(3).element([-1., -2., -3.]) - >>> result is out - True - -.. note:: - Using ``out`` of type other than `numpy.ndarray` in NumPy ufuncs (variant 1 above) **only works with NumPy version 1.13 or higher**. - Variant 2 also works with older versions, but the interface may be removed in a future version of ODL. - - Before NumPy 1.13, the sequence of actions triggered by the call ``np.negative(x)`` would be like this: - - 1. Cast ``x`` to a NumPy array by ``x_arr = x.__array__()``. - 2. Run the ufunc on the array, ``res_arr = np.negative(x_arr)``. - 3. Re-wrap the result as ``res = x.__array_wrap__(res_arr)``. - 4. Return ``res``. - - This method has two major drawbacks, namely (1) users cannot override the ufunc that is being called, and (2) custom objects are not accepted as ``out`` parameters. - Therefore, a new ``__array_ufunc__`` mechanism was [introduced in NumPy 1.13](https://docs.scipy.org/doc/numpy/release.html#array-ufunc-added) that removes these limitations. - It is used whenever a NumPy ufunc is called on an object implementing this method, which then takes full control of the ufunc mechanism. - For details, check out the `NEP `_ describing the logic, or the `interface documentation `_. - See also `NumPy's general documentation on ufuncs `_ - - -For other functions that are not ufuncs, ODL vector space elements are usually accepted as input, but the output is typically of type `numpy.ndarray`, i.e., the result will not be not re-wrapped:: - - >>> np.convolve(x, x, mode='same') - array([ 4., 10., 12.]) - -In such a case, or if a space element has to be modified in-place using some NumPy function (or any function defined on arrays), we have the `writable_array` context manager that exposes a NumPy array which gets automatically assigned back to the ODL object:: - - >>> with odl.util.writable_array(x) as x_arr: - ... np.cumsum(x_arr, out=x_arr) - >>> x - rn(3).element([ 1., 3., 6.]) - -.. note:: - The re-assignment is a no-op if ``x`` has a NumPy array as its data container, hence the operation will be as fast as manipulating ``x`` directly. - The same syntax also works with other data containers, but in this case, copies to and from a NumPy array are usually necessary. - - -NumPy functions as Operators -============================ -To solve the above issue, it is often useful to write an `Operator` wrapping NumPy functions, thus allowing full access to the ODL ecosystem. -The convolution operation, written as ODL operator, could look like this:: - - >>> class MyConvolution(odl.Operator): - ... """Operator for convolving with a given kernel.""" - ... - ... def __init__(self, kernel): - ... """Initialize the convolution.""" - ... self.kernel = kernel - ... - ... # Initialize operator base class. - ... # This operator maps from the space of vector to the same space and is linear - ... super(MyConvolution, self).__init__( - ... domain=kernel.space, range=kernel.space, linear=True) - ... - ... def _call(self, x): - ... # The output of an Operator is automatically cast to an ODL object - ... return np.convolve(x, self.kernel, mode='same') - -This operator can then be called on its domain elements:: - - >>> kernel = odl.rn(3).element([1, 2, 1]) - >>> conv_op = MyConvolution(kernel) - >>> conv_op([1, 2, 3]) - rn(3).element([ 4., 8., 8.]) - -It can be also be used with any of the ODL operator functionalities such as multiplication with scalar, composition, etc:: - - >>> scaled_op = 2 * conv_op # scale output by 2 - >>> scaled_op([1, 2, 3]) - rn(3).element([ 8., 16., 16.]) - >>> y = odl.rn(3).element([1, 1, 1]) - >>> inner_product_op = odl.InnerProductOperator(y) - >>> # Create composition with inner product operator with [1, 1, 1]. - >>> # When called on a vector, the result should be the sum of the - >>> # convolved vector. - >>> composed_op = inner_product_op * conv_op - >>> composed_op([1, 2, 3]) - 20.0 - -For more information on ODL Operators, how to implement them and their features, see the guide on `operators_in_depth`. - -Using ODL with SciPy linear solvers -=================================== -SciPy includes `a series of very competent solvers `_ that may be useful in solving some linear problems. -If you have invested some effort into writing an ODL operator, or perhaps wish to use a pre-existing operator, then the function `as_scipy_operator` creates a Python object that can be used in SciPy's linear solvers. -Here is a simple example of solving Poisson's equation :math:`- \Delta u = f` on the interval :math:`[0, 1]`:: - - >>> space = odl.uniform_discr(0, 1, 5) - >>> op = -odl.Laplacian(space) - >>> f = space.element(lambda x: (x > 0.4) & (x < 0.6)) # indicator function on [0.4, 0.6] - >>> u, status = scipy.sparse.linalg.cg(odl.as_scipy_operator(op), f) - >>> u - array([ 0.02, 0.04, 0.06, 0.04, 0.02]) diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000000..d0c3cbf1020 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000000..dc1312ab09c --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000000..1413e04d5f9 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,15 @@ +# Core docs builder +sphinx>=7.0 + +# Theme +sphinx-rtd-theme +sphinxcontrib-napoleon + +# Extensions +sphinx-autodoc-typehints +myst-parser + +# Install the project itself so autodoc works +-e . + +pytest \ No newline at end of file diff --git a/doc/source/_static/custom.css b/docs/source/_static/custom.css similarity index 100% rename from doc/source/_static/custom.css rename to docs/source/_static/custom.css diff --git a/doc/source/_templates/autosummary/base.rst b/docs/source/_templates/autosummary/base.rst similarity index 100% rename from doc/source/_templates/autosummary/base.rst rename to docs/source/_templates/autosummary/base.rst diff --git a/doc/source/_templates/autosummary/class.rst b/docs/source/_templates/autosummary/class.rst similarity index 100% rename from doc/source/_templates/autosummary/class.rst rename to docs/source/_templates/autosummary/class.rst diff --git a/doc/source/_templates/autosummary/method.rst b/docs/source/_templates/autosummary/method.rst similarity index 100% rename from doc/source/_templates/autosummary/method.rst rename to docs/source/_templates/autosummary/method.rst diff --git a/doc/source/_templates/autosummary/module.rst b/docs/source/_templates/autosummary/module.rst similarity index 100% rename from doc/source/_templates/autosummary/module.rst rename to docs/source/_templates/autosummary/module.rst diff --git a/doc/source/conf.py b/docs/source/conf.py similarity index 90% rename from doc/source/conf.py rename to docs/source/conf.py index e46c36ae07b..b54ce2f0e04 100644 --- a/doc/source/conf.py +++ b/docs/source/conf.py @@ -13,7 +13,6 @@ import sys import sphinx -import sphinx_rtd_theme from packaging.version import parse as parse_version # --- General configuration --- # @@ -23,14 +22,29 @@ try: # Verify that we can import odl + sys.path.insert(0, os.path.abspath('..')) import odl except Exception as e: print('Failed importing odl, exiting', file=sys.stderr) print(e, file=sys.stderr) sys.exit(1) -# Add numpydoc path -sys.path.insert(0, os.path.abspath('../numpydoc')) +import pkgutil + +# -- Path setup: make sure your package is importable +sys.path.insert(0, os.path.abspath("..")) # adjust if docs/ isn't directly inside repo root + +# -- Define your package name +PACKAGE_NAME = "odl" # <-- change this + +# -- Detect available top-level modules (the ones actually installed) +installed_modules = {mod.name for mod in pkgutil.iter_modules()} + +# -- Automatically mock any import that is not in installed_modules or your package +autodoc_mock_imports = ['numpy'] + +# # Add numpydoc path +# sys.path.insert(0, os.path.abspath('../numpydoc')) # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -40,8 +54,7 @@ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.extlinks', - 'sphinx.ext.intersphinx', - 'numpydoc' + 'sphinx.ext.intersphinx' ] # Use newer 'imgmath' extension if possible if parse_version(sphinx.__version__) >= parse_version('1.4'): @@ -118,8 +131,8 @@ def setup(app): # General information about the project. project = u'odl' -copyright = u'2014-2020 The ODL Contributors' -author = u'Jonas Adler, Holger Kohr, Ozan Öktem' +copyright = u'2014-2025 The ODL Contributors' +author = u'Jonas Adler, Holger Kohr, Justus Sagemüller, Ozan Öktem, Emilien Valat' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -161,9 +174,6 @@ def setup(app): # a list of builtin themes. html_theme = 'sphinx_rtd_theme' -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'ODL' @@ -213,7 +223,7 @@ def setup(app): # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'odl.tex', u'ODL Documentation', - u'Jonas Adler, Holger Kohr, Ozan Öktem', 'manual'), + u'Jonas Adler, Holger Kohr, Justus Sagemüller, Ozan Öktem, Emilien Valat', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of diff --git a/doc/source/dev/dev.rst b/docs/source/dev/dev.rst similarity index 100% rename from doc/source/dev/dev.rst rename to docs/source/dev/dev.rst diff --git a/doc/source/dev/document.rst b/docs/source/dev/document.rst similarity index 100% rename from doc/source/dev/document.rst rename to docs/source/dev/document.rst diff --git a/doc/source/dev/extend.rst b/docs/source/dev/extend.rst similarity index 100% rename from doc/source/dev/extend.rst rename to docs/source/dev/extend.rst diff --git a/doc/source/dev/gitwash/branch-dropdown.png b/docs/source/dev/gitwash/branch-dropdown.png similarity index 100% rename from doc/source/dev/gitwash/branch-dropdown.png rename to docs/source/dev/gitwash/branch-dropdown.png diff --git a/doc/source/dev/gitwash/configure_git.rst b/docs/source/dev/gitwash/configure_git.rst similarity index 100% rename from doc/source/dev/gitwash/configure_git.rst rename to docs/source/dev/gitwash/configure_git.rst diff --git a/doc/source/dev/gitwash/development_workflow.rst b/docs/source/dev/gitwash/development_workflow.rst similarity index 100% rename from doc/source/dev/gitwash/development_workflow.rst rename to docs/source/dev/gitwash/development_workflow.rst diff --git a/doc/source/dev/gitwash/following_latest.rst b/docs/source/dev/gitwash/following_latest.rst similarity index 100% rename from doc/source/dev/gitwash/following_latest.rst rename to docs/source/dev/gitwash/following_latest.rst diff --git a/doc/source/dev/gitwash/fork_button.jpg b/docs/source/dev/gitwash/fork_button.jpg similarity index 100% rename from doc/source/dev/gitwash/fork_button.jpg rename to docs/source/dev/gitwash/fork_button.jpg diff --git a/doc/source/dev/gitwash/forking_hell.rst b/docs/source/dev/gitwash/forking_hell.rst similarity index 100% rename from doc/source/dev/gitwash/forking_hell.rst rename to docs/source/dev/gitwash/forking_hell.rst diff --git a/doc/source/dev/gitwash/git_development.rst b/docs/source/dev/gitwash/git_development.rst similarity index 100% rename from doc/source/dev/gitwash/git_development.rst rename to docs/source/dev/gitwash/git_development.rst diff --git a/doc/source/dev/gitwash/git_install.rst b/docs/source/dev/gitwash/git_install.rst similarity index 100% rename from doc/source/dev/gitwash/git_install.rst rename to docs/source/dev/gitwash/git_install.rst diff --git a/doc/source/dev/gitwash/git_intro.rst b/docs/source/dev/gitwash/git_intro.rst similarity index 100% rename from doc/source/dev/gitwash/git_intro.rst rename to docs/source/dev/gitwash/git_intro.rst diff --git a/doc/source/dev/gitwash/git_links.inc b/docs/source/dev/gitwash/git_links.inc similarity index 100% rename from doc/source/dev/gitwash/git_links.inc rename to docs/source/dev/gitwash/git_links.inc diff --git a/doc/source/dev/gitwash/git_resources.rst b/docs/source/dev/gitwash/git_resources.rst similarity index 100% rename from doc/source/dev/gitwash/git_resources.rst rename to docs/source/dev/gitwash/git_resources.rst diff --git a/doc/source/dev/gitwash/index.rst b/docs/source/dev/gitwash/index.rst similarity index 100% rename from doc/source/dev/gitwash/index.rst rename to docs/source/dev/gitwash/index.rst diff --git a/doc/source/dev/gitwash/known_projects.inc b/docs/source/dev/gitwash/known_projects.inc similarity index 100% rename from doc/source/dev/gitwash/known_projects.inc rename to docs/source/dev/gitwash/known_projects.inc diff --git a/doc/source/dev/gitwash/links.inc b/docs/source/dev/gitwash/links.inc similarity index 100% rename from doc/source/dev/gitwash/links.inc rename to docs/source/dev/gitwash/links.inc diff --git a/doc/source/dev/gitwash/maintainer_workflow.rst b/docs/source/dev/gitwash/maintainer_workflow.rst similarity index 100% rename from doc/source/dev/gitwash/maintainer_workflow.rst rename to docs/source/dev/gitwash/maintainer_workflow.rst diff --git a/doc/source/dev/gitwash/new-pull-request-button.png b/docs/source/dev/gitwash/new-pull-request-button.png similarity index 100% rename from doc/source/dev/gitwash/new-pull-request-button.png rename to docs/source/dev/gitwash/new-pull-request-button.png diff --git a/doc/source/dev/gitwash/patching.rst b/docs/source/dev/gitwash/patching.rst similarity index 100% rename from doc/source/dev/gitwash/patching.rst rename to docs/source/dev/gitwash/patching.rst diff --git a/doc/source/dev/gitwash/set_up_fork.rst b/docs/source/dev/gitwash/set_up_fork.rst similarity index 100% rename from doc/source/dev/gitwash/set_up_fork.rst rename to docs/source/dev/gitwash/set_up_fork.rst diff --git a/doc/source/dev/gitwash/this_project.inc b/docs/source/dev/gitwash/this_project.inc similarity index 100% rename from doc/source/dev/gitwash/this_project.inc rename to docs/source/dev/gitwash/this_project.inc diff --git a/doc/source/dev/release.rst b/docs/source/dev/release.rst similarity index 100% rename from doc/source/dev/release.rst rename to docs/source/dev/release.rst diff --git a/doc/source/dev/testing.rst b/docs/source/dev/testing.rst similarity index 100% rename from doc/source/dev/testing.rst rename to docs/source/dev/testing.rst diff --git a/doc/source/generate_doc.py b/docs/source/generate_doc.py similarity index 98% rename from doc/source/generate_doc.py rename to docs/source/generate_doc.py index 9ab9b65758e..5182eb8595f 100644 --- a/doc/source/generate_doc.py +++ b/docs/source/generate_doc.py @@ -142,7 +142,7 @@ def make_interface(): else: this_class_string = '' - with open(modname + '.rst', 'w') as text_file: + with open(f'docs/source/{modname}.rst', 'w') as text_file: text_file.write(string.format(shortname=shortmodname, name=modname, line=line, diff --git a/doc/source/getting_started/about_odl.rst b/docs/source/getting_started/about_odl.rst similarity index 100% rename from doc/source/getting_started/about_odl.rst rename to docs/source/getting_started/about_odl.rst diff --git a/doc/source/getting_started/code/getting_started_convolution.py b/docs/source/getting_started/code/getting_started_convolution.py similarity index 91% rename from doc/source/getting_started/code/getting_started_convolution.py rename to docs/source/getting_started/code/getting_started_convolution.py index fa0e4c78022..489e87406fc 100644 --- a/doc/source/getting_started/code/getting_started_convolution.py +++ b/docs/source/getting_started/code/getting_started_convolution.py @@ -36,13 +36,13 @@ def adjoint(self): space = odl.uniform_discr([-1, -1], [1, 1], [100, 100]) # Convolution kernel, a small centered rectangle. -kernel = odl.phantom.cuboid(space, [-0.05, -0.05], [0.05, 0.05]) +kernel = odl.core.phantom.cuboid(space, [-0.05, -0.05], [0.05, 0.05]) # Create convolution operator A = Convolution(kernel) # Create phantom (the "unknown" solution) -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Apply convolution to phantom to create data g = A(phantom) @@ -97,11 +97,11 @@ def adjoint(self): a = 0.001 # Create functionals for the l2 distance and l1 norm. -g_funcs = [odl.solvers.L2NormSquared(space).translated(g), - a * odl.solvers.L1Norm(grad.range)] +g_funcs = [odl.functional.L2NormSquared(space).translated(g), + a * odl.functional.L1Norm(grad.range)] # Functional of the bound constraint 0 <= f <= 1 -f = odl.solvers.IndicatorBox(space, 0, 1) +f = odl.functional.IndicatorBox(space, 0, 1) # Find scaling constants so that the solver converges. # See the douglas_rachford_pd documentation for more information. diff --git a/doc/source/getting_started/figures/getting_started_TV_douglas_rachford.png b/docs/source/getting_started/figures/getting_started_TV_douglas_rachford.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_TV_douglas_rachford.png rename to docs/source/getting_started/figures/getting_started_TV_douglas_rachford.png diff --git a/doc/source/getting_started/figures/getting_started_conjugate_gradient.png b/docs/source/getting_started/figures/getting_started_conjugate_gradient.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_conjugate_gradient.png rename to docs/source/getting_started/figures/getting_started_conjugate_gradient.png diff --git a/doc/source/getting_started/figures/getting_started_convolved.png b/docs/source/getting_started/figures/getting_started_convolved.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_convolved.png rename to docs/source/getting_started/figures/getting_started_convolved.png diff --git a/doc/source/getting_started/figures/getting_started_kernel.png b/docs/source/getting_started/figures/getting_started_kernel.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_kernel.png rename to docs/source/getting_started/figures/getting_started_kernel.png diff --git a/doc/source/getting_started/figures/getting_started_landweber.png b/docs/source/getting_started/figures/getting_started_landweber.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_landweber.png rename to docs/source/getting_started/figures/getting_started_landweber.png diff --git a/doc/source/getting_started/figures/getting_started_phantom.png b/docs/source/getting_started/figures/getting_started_phantom.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_phantom.png rename to docs/source/getting_started/figures/getting_started_phantom.png diff --git a/doc/source/getting_started/figures/getting_started_tikhonov_conjugate_gradient.png b/docs/source/getting_started/figures/getting_started_tikhonov_conjugate_gradient.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_tikhonov_conjugate_gradient.png rename to docs/source/getting_started/figures/getting_started_tikhonov_conjugate_gradient.png diff --git a/doc/source/getting_started/figures/getting_started_tikhonov_gradient_conjugate_gradient.png b/docs/source/getting_started/figures/getting_started_tikhonov_gradient_conjugate_gradient.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_tikhonov_gradient_conjugate_gradient.png rename to docs/source/getting_started/figures/getting_started_tikhonov_gradient_conjugate_gradient.png diff --git a/doc/source/getting_started/figures/getting_started_tikhonov_identity_conjugate_gradient.png b/docs/source/getting_started/figures/getting_started_tikhonov_identity_conjugate_gradient.png similarity index 100% rename from doc/source/getting_started/figures/getting_started_tikhonov_identity_conjugate_gradient.png rename to docs/source/getting_started/figures/getting_started_tikhonov_identity_conjugate_gradient.png diff --git a/doc/source/getting_started/first_steps.rst b/docs/source/getting_started/first_steps.rst similarity index 100% rename from doc/source/getting_started/first_steps.rst rename to docs/source/getting_started/first_steps.rst diff --git a/doc/source/getting_started/getting_started.rst b/docs/source/getting_started/getting_started.rst similarity index 100% rename from doc/source/getting_started/getting_started.rst rename to docs/source/getting_started/getting_started.rst diff --git a/doc/source/getting_started/installing.rst b/docs/source/getting_started/installing.rst similarity index 100% rename from doc/source/getting_started/installing.rst rename to docs/source/getting_started/installing.rst diff --git a/doc/source/getting_started/installing_conda.rst b/docs/source/getting_started/installing_conda.rst similarity index 100% rename from doc/source/getting_started/installing_conda.rst rename to docs/source/getting_started/installing_conda.rst diff --git a/doc/source/getting_started/installing_extensions.rst b/docs/source/getting_started/installing_extensions.rst similarity index 100% rename from doc/source/getting_started/installing_extensions.rst rename to docs/source/getting_started/installing_extensions.rst diff --git a/doc/source/getting_started/installing_pip.rst b/docs/source/getting_started/installing_pip.rst similarity index 100% rename from doc/source/getting_started/installing_pip.rst rename to docs/source/getting_started/installing_pip.rst diff --git a/doc/source/getting_started/installing_source.rst b/docs/source/getting_started/installing_source.rst similarity index 100% rename from doc/source/getting_started/installing_source.rst rename to docs/source/getting_started/installing_source.rst diff --git a/doc/source/guide/code/functional_indepth_example.py b/docs/source/guide/code/functional_indepth_example.py similarity index 98% rename from doc/source/guide/code/functional_indepth_example.py rename to docs/source/guide/code/functional_indepth_example.py index 427f4b1481a..ac4d9d0bd90 100644 --- a/doc/source/guide/code/functional_indepth_example.py +++ b/docs/source/guide/code/functional_indepth_example.py @@ -98,7 +98,7 @@ def _call(self, x): my_func = MyFunctional(space=space, y=linear_term) # Now we evaluate the functional in a random point -point = odl.util.testutils.noise_element(space) +point = odl.core.util.testutils.noise_element(space) print('Value of the functional in a random point: {}' ''.format(my_func(point))) diff --git a/doc/source/guide/faq.rst b/docs/source/guide/faq.rst similarity index 100% rename from doc/source/guide/faq.rst rename to docs/source/guide/faq.rst diff --git a/doc/source/guide/figures/circular_cone3d_sketch.svg b/docs/source/guide/figures/circular_cone3d_sketch.svg similarity index 100% rename from doc/source/guide/figures/circular_cone3d_sketch.svg rename to docs/source/guide/figures/circular_cone3d_sketch.svg diff --git a/doc/source/guide/figures/coord_sys_3d.svg b/docs/source/guide/figures/coord_sys_3d.svg similarity index 100% rename from doc/source/guide/figures/coord_sys_3d.svg rename to docs/source/guide/figures/coord_sys_3d.svg diff --git a/doc/source/guide/figures/parallel2d_geom.svg b/docs/source/guide/figures/parallel2d_geom.svg similarity index 100% rename from doc/source/guide/figures/parallel2d_geom.svg rename to docs/source/guide/figures/parallel2d_geom.svg diff --git a/doc/source/guide/figures/pdhg_data.png b/docs/source/guide/figures/pdhg_data.png similarity index 100% rename from doc/source/guide/figures/pdhg_data.png rename to docs/source/guide/figures/pdhg_data.png diff --git a/doc/source/guide/figures/pdhg_phantom.png b/docs/source/guide/figures/pdhg_phantom.png similarity index 100% rename from doc/source/guide/figures/pdhg_phantom.png rename to docs/source/guide/figures/pdhg_phantom.png diff --git a/doc/source/guide/figures/pdhg_result.png b/docs/source/guide/figures/pdhg_result.png similarity index 100% rename from doc/source/guide/figures/pdhg_result.png rename to docs/source/guide/figures/pdhg_result.png diff --git a/doc/source/guide/functional_guide.rst b/docs/source/guide/functional_guide.rst similarity index 100% rename from doc/source/guide/functional_guide.rst rename to docs/source/guide/functional_guide.rst diff --git a/doc/source/guide/geometry_guide.rst b/docs/source/guide/geometry_guide.rst similarity index 100% rename from doc/source/guide/geometry_guide.rst rename to docs/source/guide/geometry_guide.rst diff --git a/doc/source/guide/glossary.rst b/docs/source/guide/glossary.rst similarity index 100% rename from doc/source/guide/glossary.rst rename to docs/source/guide/glossary.rst diff --git a/doc/source/guide/guide.rst b/docs/source/guide/guide.rst similarity index 96% rename from doc/source/guide/guide.rst rename to docs/source/guide/guide.rst index 0fc15da59ed..bdbca6738a4 100644 --- a/doc/source/guide/guide.rst +++ b/docs/source/guide/guide.rst @@ -19,3 +19,4 @@ It is intended to familiarize you with important concepts that can be hard to in functional_guide proximal_lang_guide pdhg_guide + migrate1.0_guide diff --git a/doc/source/guide/linearspace_guide.rst b/docs/source/guide/linearspace_guide.rst similarity index 100% rename from doc/source/guide/linearspace_guide.rst rename to docs/source/guide/linearspace_guide.rst diff --git a/docs/source/guide/migrate1.0_guide.rst b/docs/source/guide/migrate1.0_guide.rst new file mode 100644 index 00000000000..6905ef0470d --- /dev/null +++ b/docs/source/guide/migrate1.0_guide.rst @@ -0,0 +1,77 @@ +.. _migrate_0.x_to_1.x: + +################################# +Migrating from ODL 0.x to ODL 1.x +################################# + +If you have a project built around ODL versions 0.6, 0.7, 0.8, or built the +development version from the master branch until 2025 ("1.0-dev"), then you may +need to make some changes to use your code together with the official 1.0 +release. This guide explains how. + +NumPy ufuncs +============ + +The most significant change in 1.0 is in the way pointwise / elementwise functions +are applied to ODL objects (e.g. `DiscretizedSpaceElement`). +ODL 0.x ultimately stored all data of such an object inside one or multiple NumPy +arrays, and was thus able to hook into NumPy's "ufunc" mechanism to allow code like:: + + >>> import odl # up to version 0.8.3 + >>> import numpy as np + >>> space = odl.uniform_discr(0, np.pi, 7, nodes_on_bdry=True) + >>> xs = space.element(lambda x: x) + >>> np.cos(xs) + uniform_discr(0.0, 3.1415927, 7, nodes_on_bdry=True).element( + [ 1. , 0.8660254, 0.5 , ..., -0.5 , -0.8660254, + -1. ] + ) + +If you run the same code with ODL 1.0, you will get an error message. The reason is +that ODL can now use other backends like PyTorch for storing the data, on which NumPy +ufuncs do not work. To offer a consistent way of performing pointwise operations on +such objects regardless of the backend, ODL now offers versions of these functions +in its own namespace: + + >>> # import odl from version 1.0 + >>> odl.cos(xs) + uniform_discr(0.0, 3.1415927, 7, nodes_on_bdry=True).element( + [ 1. , 0.8660254, 0.5 , ..., -0.5 , -0.8660254, + -1. ] + ) + + +Operator composition +==================== + +Operators are a central feature of ODL. +Typically, multiple primitive operators are composed to a whole pipeline. +ODL 0.x used Python's `*` for this purpose, which is intuitive from a +mathematical perspective particular for linear operators as composition +corresponds to matrix multiplication then. + +Unfortunately it conflicted with another use of `*`, which most array libraries +employ, namely pointwise multiplication (for matrices, this is the Hadamard +product). To avoid mistakes from the different interpretations, from ODL 1.0 on +the `@` symbol should instead be used for composing operators (this is also used +by NumPy and PyTorch for matrix multiplication). +This also applies to the various ways ODL overloads "composition"; for example, +to pre-compose an :math:`L^2` norm with a pointwise scaling, you could write:: + + >>> op = odl.functional.L2Norm(space) @ (1 + odl.sin(xs)) + >>> op + FunctionalRightVectorMult(L2Norm(uniform_discr(0.0, 3.1415927, 7, nodes_on_bdry=True)), uniform_discr(0.0, 3.1415927, 7, nodes_on_bdry=True).element( + [ 1. , 1.5 , 1.8660254, ..., 1.8660254, 1.5 , + 1. ] + )) + >>> op(space.one()) + 2.9360830109198384 + +In some cases, the old `*` syntax is still interpreted as composition when that +is unambiguous, but this is deprecated and should be replaced with `@`. +Only use `*` for multiplying odl objects pointwise, for example:: + + >>> odl.sqrt(xs) * odl.sqrt(xs) - xs + uniform_discr(0.0, 3.1415927, 7, nodes_on_bdry=True).element( + [ 0., 0., 0., ..., 0., 0., -0.] + ) diff --git a/docs/source/guide/numpy_guide.rst b/docs/source/guide/numpy_guide.rst new file mode 100644 index 00000000000..38e2c36f3c1 --- /dev/null +++ b/docs/source/guide/numpy_guide.rst @@ -0,0 +1,170 @@ +.. _numpy_in_depth: + +###################################### +Using ODL with NumPy, SciPy or PyTorch +###################################### + +`NumPy `_ is the traditional library for array computations in Python, and is still used by most major numerical packages. +It provides optimized `Array objects `_ that allow efficient storage of large arrays. +It also provides several optimized algorithms for many of the functions used in numerical programming, such as taking the cosine or adding two arrays. + +`SciPy `_ is a library built on top of NumPy providing more advanced algorithms such as linear solvers, statistics, signal and image processing etc. + +`PyTorch `_ is best known as a deep learning framework, but also useful as a general-purpose, GPU-accelerated array library. + +Many operations are more naturally performed using one of those libraries than with ODL, and with that in mind ODL has been designed such that interfacing with them is as easy and fast as possible. + +Casting vectors to and from arrays +================================== +ODL vectors are stored in an abstract way, enabling storage on the CPU, GPU, using different backends which can be switched using the `impl` argument when declaring the space. +This allows algorithms to be written in a generalized and storage-agnostic manner. +Still, it is often convenient to be able to access the raw data either for inspection or manipulation, perhaps to initialize a vector, or to call an external function. + +To cast a NumPy array to an element of an ODL vector space, one can simply call the `LinearSpace.element` method in an appropriate space:: + + >>> import odl + >>> import numpy as np + >>> r3 = odl.rn(3) + >>> arr = np.array([1, 2, 3]) + >>> x = r3.element(arr) + >>> x + rn(3).element([ 1., 2., 3. ]) + +`element` works not only for NumPy arrays, but also for raw arrays of any library supporting the DLPack standard. + + >>> import torch + >>> x_t = r3.element(torch.tensor([4, 5, 6])) + >>> x_t + rn(3).element([ 4., 5., 6. ]) + +This element will still internally be stored using NumPy: storage is determined by the space. + + >>> type(x_t.element) + + +To store in PyTorch instead, only the space declaration needs to be modified, by the `impl` argument (whose default is `'numpy'`). Again, it is then possible to generate elements from any source: + + >>> r3_t = odl.rn(3, impl='pytorch') + >>> type(r3_t.element(arr).data) + + +.. note:: + Relying on the automatic copying of the `LinearSpace.element` method is not necessarily a good idea: for one thing, DLPack support is still somewhat inconsistent in PyTorch as of 2025; for another, it circumvents the device-preserving policy of ODL (i.e. it will in general incur copying of data between different devices, which can take considerable time). + As a rule of thumb, you should only declare spaces and call `element` on them at the start of a computation. Inside of your algorithms' loops, you should use existing spaces and elements and modify them with ODL operators instead. + +The other way around, casting ODL vector space elements to NumPy arrays can be done through the member function `Tensor.asarray`. This returns a view if possible:: + + >>> x.asarray() + array([ 1., 2., 3.]) + +`Tensor.asarray` only yields a NumPy array if the space has `impl='numpy'`. +If for example `impl='pytorch'`, it gives a `torch.Tensor` instead. + + >>> r3_t.element(arr).asarray() + tensor([1., 2., 3.], dtype=torch.float64) + +.. note:: + For simple ℝⁿ spaces, instead of `asarray` one can also access the `data` attribute directly. That is not recommended for user code, though. + +These methods work with any ODL object represented by an array. +For example, in discretizations, a two-dimensional array can be used:: + + >>> space = odl.uniform_discr([0, 0], [1, 1], shape=(3, 3)) + >>> arr = np.array([[1, 2, 3], + ... [4, 5, 6], + ... [7, 8, 9]]) + >>> x = space.element(arr) + >>> x.asarray() + array([[ 1., 2., 3.], + [ 4., 5., 6.], + [ 7., 8., 9.]]) + +Using ODL objects with array-based functions +============================================ +Although ODL offers its own interface to formulate mathematical algorithms (which we recommend using), there are situations where one needs to manipulate objects on the raw array level. + +.. note:: + ODL versions 0.7 and 0.8 allowed directly applying NumPy ufuncs to ODL objects. + This is not allowed anymore in ODL 1.x, since the ufunc compatibility mechanism interfered with high-performance support for other backends. + +.. + TODO link to migration guide. + +Apart from unwrapping the contained arrays and `.element`-wrapping their modified versions again (see above), there is also the option to modify as space element in-place using some NumPy function (or any function defined on backend-specific arrays). For this purpose we have the `writable_array` context manager that exposes a raw array which gets automatically assigned back to the ODL object:: + + >>> x = odl.rn(3).element([1,2,3]) + >>> with odl.util.writable_array(x) as x_arr: + ... np.cumsum(x_arr, out=x_arr) + >>> x + rn(3).element([ 1., 3., 6.]) + +.. note:: + The re-assignment is a no-op if ``x`` has a single array as its data container, hence the operation will be as fast as manipulating ``x`` directly. + The same syntax also works with other data containers, but in this case, copies are usually necessary. + + +NumPy functions as Operators +============================ +It is often useful to write an `Operator` wrapping NumPy or other low-level functions, thus allowing full access to the ODL ecosystem. +The convolution operation, written as ODL operator, could look like this:: + + >>> class MyConvolution(odl.Operator): + ... """Operator for convolving with a given kernel.""" + ... + ... def __init__(self, kernel): + ... """Initialize the convolution.""" + ... self.kernel = kernel + ... + ... # Initialize operator base class. + ... # This operator maps from the space of vector to the same space and is linear + ... super(MyConvolution, self).__init__( + ... domain=kernel.space, range=kernel.space, linear=True) + ... + ... def _call(self, x): + ... # The output of an Operator is automatically cast to an ODL object + ... return self.range.element(np.convolve(x.asarray(), self.kernel.asarray(), mode='same')) + +This operator can then be called on its domain elements:: + + >>> kernel = odl.rn(3).element([1, 2, 1]) + >>> conv_op = MyConvolution(kernel) + >>> conv_op([1, 2, 3]) + rn(3).element([ 4., 8., 8.]) + +N.B. the input list `[1,2,3]` is automatically wrapped into `conv_op.domain.element` by the `Operator` base class before the low-level call; in production code it is recommended to do this explicitly for better control. + +Such operators can also be used with any of the ODL operator functionalities such as multiplication with scalar, composition, etc:: + + >>> scaled_op = 2 * conv_op # scale output by 2 + >>> scaled_op([1, 2, 3]) + rn(3).element([ 8., 16., 16.]) + >>> y = odl.rn(3).element([1, 1, 1]) + >>> inner_product_op = odl.InnerProductOperator(y) + >>> # Create composition with inner product operator with [1, 1, 1]. + >>> # When called on a vector, the result should be the sum of the + >>> # convolved vector. + >>> composed_op = inner_product_op @ conv_op + >>> composed_op([1, 2, 3]) + 20.0 + +For more information on ODL Operators, how to implement them and their features, see the guide on `operators_in_depth`. + +Using ODL with SciPy linear solvers +=================================== +SciPy includes `a series of very competent solvers `_ that may be useful in solving some linear problems. +If you have invested some effort into writing an ODL operator, or perhaps wish to use a pre-existing operator, then the function `as_scipy_operator` creates a Python object that can be used in SciPy's linear solvers. +Here is a simple example of solving Poisson's equation :math:`- \Delta u = f` on the interval :math:`[0, 1]`:: + + >>> space = odl.uniform_discr(0, 1, 5) + >>> op = -odl.Laplacian(space) + >>> f = space.element(lambda x: (x > 0.4) & (x < 0.6)) # indicator function on [0.4, 0.6] + >>> u, status = scipy.sparse.linalg.cg(odl.as_scipy_operator(op), f.asarray()) + >>> u + array([ 0.02, 0.04, 0.06, 0.04, 0.02]) + +Of course, this also could (and should!) be done with ODL's own version of the solver: + + >>> x = op.domain.element() + >>> odl.solvers.conjugate_gradient(op=op, x=x, rhs=f, niter=100) + >>> x + uniform_discr(0.0, 1.0, 5).element([ 0.02, 0.04, 0.06, 0.04, 0.02]) diff --git a/doc/source/guide/operator_guide.rst b/docs/source/guide/operator_guide.rst similarity index 100% rename from doc/source/guide/operator_guide.rst rename to docs/source/guide/operator_guide.rst diff --git a/doc/source/guide/pdhg_guide.rst b/docs/source/guide/pdhg_guide.rst similarity index 100% rename from doc/source/guide/pdhg_guide.rst rename to docs/source/guide/pdhg_guide.rst diff --git a/doc/source/guide/proximal_lang_guide.rst b/docs/source/guide/proximal_lang_guide.rst similarity index 100% rename from doc/source/guide/proximal_lang_guide.rst rename to docs/source/guide/proximal_lang_guide.rst diff --git a/doc/source/guide/vectorization_guide.rst b/docs/source/guide/vectorization_guide.rst similarity index 52% rename from doc/source/guide/vectorization_guide.rst rename to docs/source/guide/vectorization_guide.rst index 452db8e41a9..36c7d8d6e95 100644 --- a/doc/source/guide/vectorization_guide.rst +++ b/docs/source/guide/vectorization_guide.rst @@ -6,7 +6,7 @@ Vectorized functions This section is intended as a small guideline on how to write functions which work with the -vectorization machinery by Numpy which is used internally in ODL. +vectorization machinery by low-level libraries which are used internally in ODL. What is vectorization? @@ -14,45 +14,52 @@ What is vectorization? In general, :term:`vectorization` means that a function can be evaluated on a whole array of values at once instead of looping over individual entries. This is very important for performance in an -interpreted language like python, since loops are usually very slow compared to compiled languages. +interpreted language like Python, since loops are usually very slow compared to compiled languages. -Technically, vectorization in Numpy works through the `Universal functions (ufunc)`_ interface. It -is fast because all loops over data are implemented in C, and the resulting implementations are -exposed to Python for each function individually. +How to use NumPy's ufuncs +========================= -How to use Numpy's ufuncs? -========================== +Until recently, the most common means of vectorization were the *uniform functions* from the `NumPy `_ library:: -The easiest way to write fast custom mathematical functions in Python is to use the -`available ufuncs`_ and compose them to a new function:: - - def gaussian(x): + def gaussian(x: np.ndarray): # Negation, powers and scaling are vectorized, of course. - return np.exp(-x ** 2 / 2) + return np.exp(-x**2 / 2) - def step(x): + def step(x: np.ndarray): # np.where checks the condition in the first argument and # returns the second for `True`, otherwise the third. The # last two arguments can be arrays, too. # Note that also the comparison operation is vectorized. return np.where(x[0] <= 0, 0, 1) -This should cover a very large range of useful functions already (basic arithmetic is vectorized, -too!). An even larger list of `special functions`_ are available in the Scipy package. +This covers a very large range of useful functions already (basic arithmetic is vectorized, +too!). Unfortunately, it is not compatible with GPU-based storage. + +Other libraries offer a similar set of functions too, restricted to inputs from the same:: + + def gaussian_torch(x: torch.Tensor): + return torch.exp(-x**2 / 2) +The `Python Array API `_ is an attempt at unifying these functionalities, but it still requires selecting a *namespace* corresponding to a particular API-instantiation at the start:: -Usage in ODL -============ + def gaussian_arr_api(x): + xp = x.__array_namespace__() + return xp.exp(-x**2 / 2) -Python functions are in most cases used as input to a discretization process. For example, we may +Usage of raw-array functions in ODL +=================================== + +One use pointwise functions is as input to a discretization process. For example, we may want to discretize a two-dimensional Gaussian function:: >>> def gaussian2(x): - ... return np.exp(-(x[0] ** 2 + x[1] ** 2) / 2) + ... xp = x[0].__array_namespace__() + ... return np.exp(-(x[0]**2 + x[1]**2) / 2) on the rectangle [-5, 5] x [-5, 5] with 100 pixels in each -dimension. The code for this is simply :: +dimension. One way to do this is to pass the existing (raw-array based, +discretization-oblivious) function to the `DiscretizedSpace.element` method:: >>> # Note that the minimum and maxiumum coordinates are given as >>> # vectors, not one interval at a time. @@ -64,7 +71,10 @@ dimension. The code for this is simply :: What happens behind the scenes is that ``discr`` creates a :term:`discretization` object which has a built-in method ``element`` to turn continuous functions into discrete arrays by evaluating them at a set of grid points. In the example above, this grid is a uniform sampling of the rectangle -by 100 points per dimension. +by 100 points per dimension. :: + + >>> gaussian_discr.shape + (100, 100) To make this process fast, ``element`` assumes that the function is written in a way that not only supports vectorization, but also guarantees that the output has the correct shape. The function @@ -77,20 +87,28 @@ receives a :term:`meshgrid` tuple as input, in the above case consisting of two (1, 100) When inserted into the function, the final shape of the output is determined by Numpy's -`broadcasting rules`_. For the Gaussian function, Numpy will conclude that the output shape must +`broadcasting rules`_ (or generally the Array API). For the Gaussian function, Numpy will conclude that the output shape must be ``(100, 100)`` since the arrays in ``mesh`` are added after squaring. This size is the same as expected by the discretization. -If a function does not use all components of the input, ODL tries to broadcast the result to the shape of the discretized space:: +Pointwise functions on ODL objects +================================== - >>> def gaussian_const_x0(x): - ... return np.exp(-x[1] ** 2 / 2) # no x[0] -> broadcasting +A perhaps more elegant alternative to the above is to start by generating ODL objects +corresponding only to primitive quantities, and then carry out the interesting computations +on those objects. This offers more type safety, and avoids the need to worry about any +array-namespaces:: - >>> gaussian_const_x0(mesh).shape - (1, 100) - >>> discr.element(gaussian_const_x0).shape - (100, 100) + >>> r_sq = discr.element(lambda x: x[0]**2 + x[1]**2) + >>> gaussian_discr = odl.exp(-r_sq/2) + +In this case, `odl.exp` automatically resolves whichever array backend is +needed, as governed by the space:: + >>> discr = odl.uniform_discr([-5, -5], [5, 5], (100, 100), impl='pytorch') + >>> r_sq = discr.element(lambda x: x[0]**2 + x[1]**2) + >>> type(odl.exp(-r_sq/2).data) + Further reading =============== diff --git a/doc/source/index.rst b/docs/source/index.rst similarity index 100% rename from doc/source/index.rst rename to docs/source/index.rst diff --git a/doc/source/math/derivatives_guide.rst b/docs/source/math/derivatives_guide.rst similarity index 100% rename from doc/source/math/derivatives_guide.rst rename to docs/source/math/derivatives_guide.rst diff --git a/doc/source/math/discretization.rst b/docs/source/math/discretization.rst similarity index 100% rename from doc/source/math/discretization.rst rename to docs/source/math/discretization.rst diff --git a/doc/source/math/images/discr.png b/docs/source/math/images/discr.png similarity index 100% rename from doc/source/math/images/discr.png rename to docs/source/math/images/discr.png diff --git a/doc/source/math/images/resize_large.svg b/docs/source/math/images/resize_large.svg similarity index 100% rename from doc/source/math/images/resize_large.svg rename to docs/source/math/images/resize_large.svg diff --git a/doc/source/math/images/resize_small.svg b/docs/source/math/images/resize_small.svg similarity index 100% rename from doc/source/math/images/resize_small.svg rename to docs/source/math/images/resize_small.svg diff --git a/doc/source/math/linear_spaces.rst b/docs/source/math/linear_spaces.rst similarity index 100% rename from doc/source/math/linear_spaces.rst rename to docs/source/math/linear_spaces.rst diff --git a/doc/source/math/math.rst b/docs/source/math/math.rst similarity index 100% rename from doc/source/math/math.rst rename to docs/source/math/math.rst diff --git a/doc/source/math/resizing_ops.rst b/docs/source/math/resizing_ops.rst similarity index 100% rename from doc/source/math/resizing_ops.rst rename to docs/source/math/resizing_ops.rst diff --git a/doc/source/math/solvers/nonsmooth/pdhg.rst b/docs/source/math/solvers/nonsmooth/pdhg.rst similarity index 100% rename from doc/source/math/solvers/nonsmooth/pdhg.rst rename to docs/source/math/solvers/nonsmooth/pdhg.rst diff --git a/doc/source/math/solvers/nonsmooth/proximal_operators.rst b/docs/source/math/solvers/nonsmooth/proximal_operators.rst similarity index 100% rename from doc/source/math/solvers/nonsmooth/proximal_operators.rst rename to docs/source/math/solvers/nonsmooth/proximal_operators.rst diff --git a/doc/source/math/solvers/solvers.rst b/docs/source/math/solvers/solvers.rst similarity index 100% rename from doc/source/math/solvers/solvers.rst rename to docs/source/math/solvers/solvers.rst diff --git a/doc/source/math/trafos/fourier_transform.rst b/docs/source/math/trafos/fourier_transform.rst similarity index 100% rename from doc/source/math/trafos/fourier_transform.rst rename to docs/source/math/trafos/fourier_transform.rst diff --git a/doc/source/math/trafos/index.rst b/docs/source/math/trafos/index.rst similarity index 100% rename from doc/source/math/trafos/index.rst rename to docs/source/math/trafos/index.rst diff --git a/doc/source/refs.rst b/docs/source/refs.rst similarity index 100% rename from doc/source/refs.rst rename to docs/source/refs.rst diff --git a/doc/source/release_notes.rst b/docs/source/release_notes.rst similarity index 100% rename from doc/source/release_notes.rst rename to docs/source/release_notes.rst diff --git a/examples/deform/linearized_fixed_displacement.py b/examples/deform/linearized_fixed_displacement.py index af32996ff04..137eb8fc25e 100644 --- a/examples/deform/linearized_fixed_displacement.py +++ b/examples/deform/linearized_fixed_displacement.py @@ -28,7 +28,7 @@ templ_space = odl.uniform_discr([-1, -1], [1, 1], (100, 100)) # The template is a rectangle of size 1.0 x 0.5 -template = odl.phantom.cuboid(templ_space, [-0.5, -0.25], [0.5, 0.25]) +template = odl.core.phantom.cuboid(templ_space, [-0.5, -0.25], [0.5, 0.25]) # Create a product space for displacement field disp_field_space = templ_space.tangent_bundle diff --git a/examples/deform/linearized_fixed_template.py b/examples/deform/linearized_fixed_template.py index eb75f7362d1..c56163d1b08 100644 --- a/examples/deform/linearized_fixed_template.py +++ b/examples/deform/linearized_fixed_template.py @@ -27,7 +27,7 @@ templ_space = odl.uniform_discr([-1, -1], [1, 1], (100, 100)) # The template is a rectangle of size 1.0 x 0.5 -template = odl.phantom.cuboid(templ_space, [-0.5, -0.25], [0.5, 0.25]) +template = odl.core.phantom.cuboid(templ_space, [-0.5, -0.25], [0.5, 0.25]) # Create a product space for displacement field disp_field_space = templ_space.tangent_bundle diff --git a/examples/diagnostics/diagonstics_space.py b/examples/diagnostics/diagonstics_space.py index 38351bed813..2ed7e800378 100644 --- a/examples/diagnostics/diagonstics_space.py +++ b/examples/diagnostics/diagonstics_space.py @@ -5,22 +5,22 @@ print('\n\n TESTING FOR Lp SPACE \n\n') discr = odl.uniform_discr(0, 1, 10) -odl.diagnostics.SpaceTest(discr).run_tests() +odl.core.diagnostics.SpaceTest(discr).run_tests() print('\n\n TESTING FOR rn SPACE \n\n') spc = odl.rn(10) -odl.diagnostics.SpaceTest(spc).run_tests() +odl.core.diagnostics.SpaceTest(spc).run_tests() print('\n\n TESTING FOR cn SPACE \n\n') spc = odl.cn(10) -odl.diagnostics.SpaceTest(spc).run_tests() +odl.core.diagnostics.SpaceTest(spc).run_tests() -if 'cuda' in odl.space.entry_points.tensor_space_impl_names(): +if 'cuda' in odl.core.space.entry_points.tensor_space_impl_names(): print('\n\n TESTING FOR CUDA rn SPACE \n\n') spc = odl.rn(10, impl='cuda') - odl.diagnostics.SpaceTest(spc, tol=0.0001).run_tests() + odl.core.diagnostics.SpaceTest(spc, tol=0.0001).run_tests() diff --git a/examples/operator/convolution_operator.py b/examples/operator/convolution_operator.py index bf344a183e4..030ba6d7bce 100644 --- a/examples/operator/convolution_operator.py +++ b/examples/operator/convolution_operator.py @@ -14,7 +14,7 @@ def __init__(self, kernel): """Initialize a convolution operator with a known kernel.""" # Store the kernel - self.kernel = kernel + self.kernel = kernel.data # Initialize the Operator class by calling its __init__ method. # This sets properties such as domain and range and allows the other @@ -24,7 +24,7 @@ def __init__(self, kernel): def _call(self, x): """Implement calling the operator by calling scipy.""" - return scipy.signal.fftconvolve(self.kernel, x, mode='same') + return scipy.signal.fftconvolve(self.kernel, x.data, mode='same') @property def adjoint(self): @@ -42,13 +42,13 @@ def adjoint(self): space = odl.uniform_discr([-1, -1], [1, 1], [100, 100]) # Convolution kernel, a small centered rectangle -kernel = odl.phantom.cuboid(space, [-0.05, -0.05], [0.05, 0.05]) +kernel = odl.core.phantom.cuboid(space, [-0.05, -0.05], [0.05, 0.05]) # Create convolution operator A = Convolution(kernel) # Create phantom (the "unknown" solution) -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Apply convolution to phantom to create data g = A(phantom) diff --git a/examples/operator/pytorch_autograd.py b/examples/operator/pytorch_autograd.py new file mode 100644 index 00000000000..2c4c9897b1e --- /dev/null +++ b/examples/operator/pytorch_autograd.py @@ -0,0 +1,90 @@ +"""Differentiation of functions implemented with ODL operators, through the +backpropagation functionality offered by PyTorch.""" + +import odl +import numpy as np +import torch + + +class Convolution(odl.Operator): + """Operator calculating the convolution of a kernel with a function. + + See the convolution example for explanation. + + This operator is implemented directly in terms of PyTorch operations, + and is therefore differentiable without further ado. + """ + + def __init__(self, kernel, domain, range): + """Initialize a convolution operator with a known kernel.""" + + self.kernel = kernel + + super(Convolution, self).__init__( + domain=domain, range=range, linear=True) + + def _call(self, x): + return self.range.element( + torch.conv2d( input=x.data.unsqueeze(0) + , weight=self.kernel.unsqueeze(0).unsqueeze(0) + , stride=(1,1) + , padding="same" + ).squeeze(0) + ) + + @property + def adjoint(self): + return Convolution( torch.flip(self.kernel, dims=(0,1)) + , domain=self.range, range=self.domain ) + +class PointwiseSquare_PyTorch(odl.Operator): + def __init__(self, domain): + super().__init__(domain=domain, range=domain, linear=False) + + def _call(self, x): + return x*x + + +# Define the space on which the problem should be solved +# Here the square [-1, 1] x [-1, 1] discretized on a 100x100 grid +phantom_space = odl.uniform_discr([-1, -1], [1, 1], [100, 100], impl='pytorch', dtype=np.float32) +space = odl.PytorchTensorSpace([100,100], dtype=np.float32) + +# Convolution kernel, a Sobel-like edge detector in y direction +kernel = torch.tensor([[-1, 0, 1] + ,[-1, 0, 1] + ,[-1, 0, 1]], dtype=torch.float32) + +# Create composed operator +A = ( PointwiseSquare_PyTorch(domain=space) + * Convolution(kernel, domain=space, range=space) + ) + +# Create phantom, as example input +phantom = odl.core.phantom.shepp_logan(phantom_space, modified=True) + +torch_input = phantom.data.detach().clone() + +torch_input.requires_grad = True +odl_input = space.element_type(space, data=torch_input) + +# Apply convolution to phantom to create data +g = A(odl_input) +grad = space.element(torch.autograd.grad(torch.sum(g.data), torch_input)[0]) + +# Alternative version in raw PyTorch +# g_torch = torch.conv2d( input=torch_input.unsqueeze(0) +# , weight=kernel.unsqueeze(0).unsqueeze(0) +# , padding="same" +# ).squeeze(0) ** 2 + +# grad = space.element(torch.autograd.grad(torch.sum(g_torch), torch_input)[0]) + +def display(x, label, **kwargs): + phantom_space.element(x.data).show(label, **kwargs) + +# Display the results using the show method +display(odl_input, 'phantom') +display(g, 'convolved phantom') +display(grad, 'autograd', force_show=True) + diff --git a/examples/solvers/admm_tomography.py b/examples/solvers/admm_tomography.py index 54d86cf9bd7..98a6e5a32dc 100644 --- a/examples/solvers/admm_tomography.py +++ b/examples/solvers/admm_tomography.py @@ -21,7 +21,6 @@ See the documentation of the `admm_linearized` solver for further details. """ -import numpy as np import odl # --- Set up the forward operator (ray transform) --- # @@ -32,17 +31,17 @@ min_pt=[-20, -20], max_pt=[20, 20], shape=[300, 300], dtype='float32') # Make a parallel beam geometry with flat detector, using 360 angles -geometry = odl.tomo.parallel_beam_geometry(reco_space, num_angles=180) +geometry = odl.applications.tomo.parallel_beam_geometry(reco_space, num_angles=180) # Create the forward operator -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # --- Generate artificial data --- # # Create phantom and noisy projection data -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) data = ray_trafo(phantom) -data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(ray_trafo.range) * odl.mean(data) * 0.1 # --- Set up the inverse problem --- # @@ -53,12 +52,12 @@ L = odl.BroadcastOperator(ray_trafo, grad) # Data matching and regularization functionals -data_fit = odl.solvers.L2NormSquared(ray_trafo.range).translated(data) -reg_func = 0.015 * odl.solvers.L1Norm(grad.range) -g = odl.solvers.SeparableSum(data_fit, reg_func) +data_fit = odl.functional.L2NormSquared(ray_trafo.range).translated(data) +reg_func = 0.015 * odl.functional.L1Norm(grad.range) +g = odl.functional.SeparableSum(data_fit, reg_func) # We don't use the f functional, setting it to zero -f = odl.solvers.ZeroFunctional(L.domain) +f = odl.functional.ZeroFunctional(L.domain) # --- Select parameters and solve using ADMM --- # diff --git a/examples/solvers/adupdates_tomography.py b/examples/solvers/adupdates_tomography.py index 5b61df4e1c9..366469f8127 100644 --- a/examples/solvers/adupdates_tomography.py +++ b/examples/solvers/adupdates_tomography.py @@ -48,24 +48,24 @@ reco_space = odl.uniform_discr(min_pt=[-40.0, -40.0], max_pt=[40.0, 40.0], shape=[1024, 1024]) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create the forward operators. They correspond to a fully sampled parallel # beam geometry. -geometry = odl.tomo.parallel_beam_geometry(reco_space) +geometry = odl.applications.tomo.parallel_beam_geometry(reco_space) if SPLIT_METHOD == 'block': # Split the data into blocks: # 111 222 333 ns = geometry.angles.size // SPLIT_NUMBER - ray_trafos = [odl.tomo.RayTransform(reco_space, + ray_trafos = [odl.applications.tomo.RayTransform(reco_space, geometry[i * ns:(i + 1) * ns]) for i in range(SPLIT_NUMBER)] elif SPLIT_METHOD == 'interlaced': # Split the data into slices: # 123 123 123 - ray_trafos = [odl.tomo.RayTransform(reco_space, + ray_trafos = [odl.applications.tomo.RayTransform(reco_space, geometry[i::SPLIT_NUMBER]) for i in range(SPLIT_NUMBER)] else: @@ -74,7 +74,7 @@ # Create the artificial data. data_spaces = [op.range for op in ray_trafos] noisefree_data = [op(phantom) for op in ray_trafos] -data = [proj + 0.10 * np.ptp(proj) * odl.phantom.white_noise(proj.space) +data = [proj + 0.10 * np.ptp(proj) * odl.core.phantom.white_noise(proj.space) for proj in noisefree_data] # Functionals and operators for the total variation. This is the l1 norm of the @@ -103,20 +103,20 @@ reco_space, even_pts) * partial_der op2 = reco_space.cell_sides[dim] * odl.SamplingOperator( reco_space, odd_pts) * partial_der - tv_functionals += [odl.solvers.L1Norm(op1.range), - odl.solvers.L1Norm(op2.range)] + tv_functionals += [odl.functional.L1Norm(op1.range), + odl.functional.L1Norm(op2.range)] tv_operators += [op1, op2] tv_stepsizes += [0.5 / reco_shape[dim], 0.5 / reco_shape[dim]] # Functional and operator enforcing the nonnegativity of the image. -nonneg_functional = odl.solvers.IndicatorNonnegativity(reco_space) +nonneg_functional = odl.functional.IndicatorNonnegativity(reco_space) nonneg_operator = odl.IdentityOperator(reco_space) nonneg_stepsize = 1.0 # ... and the data fit functionals. The coefficient is a regularization # paratemeter, which determines the tradeoff between data fit and regularity. data_fit_functionals = [1.0 * - odl.solvers.L2NormSquared(ds).translated(rhs) + odl.functional.L2NormSquared(ds).translated(rhs) for (ds, rhs) in zip(data_spaces, data)] # In the stepsizes, we avoid the possible division by zero by adding a small # positive value. The matrix corresponding to the operator `op` has only diff --git a/examples/solvers/bregman_tv_tomography.py b/examples/solvers/bregman_tv_tomography.py index 2f63e0d1323..7615c004e0d 100644 --- a/examples/solvers/bregman_tv_tomography.py +++ b/examples/solvers/bregman_tv_tomography.py @@ -48,25 +48,25 @@ shape=[128, 128], dtype='float32') # Make a parallel beam geometry with flat detector, and create ray transform -geometry = odl.tomo.parallel_beam_geometry(reco_space, num_angles=100) -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +geometry = odl.applications.tomo.parallel_beam_geometry(reco_space, num_angles=100) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create phantom, forward project to create sinograms, and add 10% noise -discr_phantom = odl.phantom.shepp_logan(reco_space, modified=True) +discr_phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) noise_free_data = ray_trafo(discr_phantom) -noise = odl.phantom.white_noise(ray_trafo.range) +noise = odl.core.phantom.white_noise(ray_trafo.range) noise *= 0.10 / noise.norm() * noise_free_data.norm() data = noise_free_data + noise # Components for variational problem: l2-squared data matching and isotropic # TV-regularization -l2_norm = 0.5 * odl.solvers.L2NormSquared(ray_trafo.range).translated(data) +l2_norm = 0.5 * odl.functional.L2NormSquared(ray_trafo.range).translated(data) gradient = odl.Gradient(reco_space) reg_param = 0.3 -l12_norm = reg_param * odl.solvers.GroupL1Norm(gradient.range) +l12_norm = reg_param * odl.functional.GroupL1Norm(gradient.range) # Assemble functionals and operators for the optimization algorithm -f = odl.solvers.ZeroFunctional(reco_space) # No f functional used, set to zero +f = odl.functional.ZeroFunctional(reco_space) # No f functional used, set to zero g = [l2_norm, l12_norm] L = [ray_trafo, gradient] @@ -97,7 +97,7 @@ # Create the affine part of the Bregman functional constant = l12_norm(gradient(x)) - linear_part = reg_param * odl.solvers.QuadraticForm(vector=-p, + linear_part = reg_param * odl.functional.QuadraticForm(vector=-p, constant=constant) callback_inner = odl.solvers.CallbackPrintIteration(step=50) @@ -115,7 +115,7 @@ force_show=True) # Create an FBP-reconstruction to compare with -fbp_op = odl.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.4) +fbp_op = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.4) fbp_reco = fbp_op(data) fbp_reco.show(title='FBP Reconstruction') diff --git a/examples/solvers/conjugate_gradient_tomography.py b/examples/solvers/conjugate_gradient_tomography.py index f9f247fc74c..cf380737971 100644 --- a/examples/solvers/conjugate_gradient_tomography.py +++ b/examples/solvers/conjugate_gradient_tomography.py @@ -26,21 +26,21 @@ # Detector: uniformly sampled, n = 300, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 300) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Create the forward operator -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # --- Generate artificial data --- # # Create phantom -discr_phantom = odl.phantom.shepp_logan(reco_space, modified=True) +discr_phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create sinogram of forward projected phantom with noise data = ray_trafo(discr_phantom) -data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(ray_trafo.range) * odl.mean(data) * 0.1 # Optionally pass callback to the solver to display intermediate results callback = (odl.solvers.CallbackPrintIteration() & diff --git a/examples/solvers/deconvolution_1d.py b/examples/solvers/deconvolution_1d.py index b60d6d23d55..61776ae43fc 100644 --- a/examples/solvers/deconvolution_1d.py +++ b/examples/solvers/deconvolution_1d.py @@ -11,12 +11,12 @@ def __init__(self, kernel, adjkernel=None): self.kernel = kernel self.adjkernel = (adjkernel if adjkernel is not None else kernel.space.element(kernel[::-1].copy())) - self.norm = float(np.sum(np.abs(self.kernel))) + self.norm = float(odl.sum(odl.abs(self.kernel))) super(Convolution, self).__init__( domain=kernel.space, range=kernel.space, linear=True) def _call(self, x): - return scipy.signal.convolve(x, self.kernel, mode='same') + return scipy.signal.convolve(x.data, self.kernel.data, mode='same') @property def adjoint(self): @@ -43,18 +43,18 @@ def opnorm(self): # Display callback def callback(x): - plt.plot(conv(x)) + conv(x).show() # Test CGN plt.figure() -plt.plot(phantom) +phantom.show() odl.solvers.conjugate_gradient_normal(conv, discr_space.zero(), phantom, iterations, callback) # Landweber plt.figure() -plt.plot(phantom) +phantom.show() odl.solvers.landweber(conv, discr_space.zero(), phantom, iterations, omega, callback) diff --git a/examples/solvers/denoising_with_entropy_type_regularization.py b/examples/solvers/denoising_with_entropy_type_regularization.py index f8499cc64de..19fef04ec1b 100644 --- a/examples/solvers/denoising_with_entropy_type_regularization.py +++ b/examples/solvers/denoising_with_entropy_type_regularization.py @@ -13,13 +13,13 @@ """ import numpy as np -import scipy.misc +import skimage import odl # Read test image: # convert integer values to float, and rotate to get the image upright -image = np.rot90(scipy.misc.ascent()[::2, ::2], 3).astype('float') +image = np.rot90(skimage.data.camera()).astype('float') shape = image.shape # Rescale @@ -47,18 +47,18 @@ # Proximal operator related to the primal variable # Non-negativity constraint -f = odl.solvers.IndicatorNonnegativity(op.domain) +f = odl.functional.IndicatorNonnegativity(op.domain) # Functionals related to the dual variable # Kulback-Leibler data matching -kl_divergence = odl.solvers.KullbackLeibler(space, prior=noisy) +kl_divergence = odl.functional.KullbackLeibler(space, prior=noisy) # Isotropic TV-regularization: l1-norm of grad(x) -l1_norm = 0.1 * odl.solvers.L1Norm(gradient.range) +l1_norm = 0.1 * odl.functional.L1Norm(gradient.range) # Make separable sum of functionals, order must correspond to the operator K -g = odl.solvers.SeparableSum(kl_divergence, l1_norm) +g = odl.functional.SeparableSum(kl_divergence, l1_norm) # Optional: pass callback objects to solver callback = (odl.solvers.CallbackPrintIteration() & diff --git a/examples/solvers/douglas_rachford_pd_heron.py b/examples/solvers/douglas_rachford_pd_heron.py index 36a469c7831..db07445ede6 100644 --- a/examples/solvers/douglas_rachford_pd_heron.py +++ b/examples/solvers/douglas_rachford_pd_heron.py @@ -30,13 +30,13 @@ # The function f in the douglas rachford solver is not needed so we set it # to the zero function -f = odl.solvers.ZeroFunctional(space) +f = odl.functional.ZeroFunctional(space) # g is the distance function `d(x, Omega_i)`. Here, the l2 distance. -g = [odl.solvers.L2Norm(space)] * len(rectangles) +g = [odl.functional.L2Norm(space)] * len(rectangles) # l are the indicator functions on the rectangles. -l = [odl.solvers.IndicatorBox(space, minp, maxp) for minp, maxp in rectangles] +l = [odl.functional.IndicatorBox(space, minp, maxp) for minp, maxp in rectangles] # Select step size tau = 1.0 / len(rectangles) @@ -52,7 +52,9 @@ def print_objective(x): """Calculate the objective value and prints it.""" value = 0 for minp, maxp in rectangles: - x_proj = np.minimum(np.maximum(x, minp), maxp) + x_proj = odl.minimum( + odl.maximum(x, x.space.element(minp)), x.space.element(maxp) + ) value += (x - x_proj).norm() print('Point = [{:.4f}, {:.4f}], Value = {:.4f}'.format(x[0], x[1], value)) diff --git a/examples/solvers/douglas_rachford_pd_mri.py b/examples/solvers/douglas_rachford_pd_mri.py index adf3f5448c0..7a654aec04b 100644 --- a/examples/solvers/douglas_rachford_pd_mri.py +++ b/examples/solvers/douglas_rachford_pd_mri.py @@ -23,11 +23,11 @@ ft = odl.trafos.FourierTransform(space) sampling_points = np.random.rand(*ft.range.shape) < subsampling sampling_mask = ft.range.element(sampling_points) -mri_op = sampling_mask * ft +mri_op = sampling_mask @ ft # Create noisy MRI data -phantom = odl.phantom.shepp_logan(space, modified=True) -noisy_data = mri_op(phantom) + odl.phantom.white_noise(mri_op.range) * 0.1 +phantom = odl.core.phantom.shepp_logan(space, modified=True) +noisy_data = mri_op(phantom) + odl.core.phantom.white_noise(mri_op.range) * 0.1 phantom.show('Phantom') noisy_data.show('Noisy MRI Data') @@ -38,9 +38,9 @@ lin_ops = [mri_op, gradient] # Create functionals as needed -g = [odl.solvers.L2Norm(mri_op.range).translated(noisy_data), - lam * odl.solvers.L1Norm(gradient.range)] -f = odl.solvers.IndicatorBox(space, 0, 1) +g = [odl.functional.L2Norm(mri_op.range).translated(noisy_data), + lam * odl.functional.L1Norm(gradient.range)] +f = odl.functional.IndicatorBox(space, 0, 1) # Solve x = mri_op.domain.zero() @@ -48,7 +48,7 @@ odl.solvers.CallbackPrintIteration()) odl.solvers.douglas_rachford_pd(x, f, g, lin_ops, tau=2.0, sigma=[1.0, 0.1], - niter=500, callback=callback) + niter=100, callback=callback) x.show('Douglas-Rachford Result') ft.inverse(noisy_data).show('Fourier Inversion Result', force_show=True) diff --git a/examples/solvers/douglas_rachford_pd_tomography_tv.py b/examples/solvers/douglas_rachford_pd_tomography_tv.py index c5613d0f16f..c364e7f32ae 100644 --- a/examples/solvers/douglas_rachford_pd_tomography_tv.py +++ b/examples/solvers/douglas_rachford_pd_tomography_tv.py @@ -41,20 +41,22 @@ # Reconstruction space: discretized functions on the rectangle # [-20, 20]^2 with 512 samples per dimension. -space = odl.uniform_discr(min_pt=[-20, -20], max_pt=[20, 20], shape=[512, 512]) +space = odl.uniform_discr( + min_pt=[-20, -20], max_pt=[20, 20], shape=[512, 512], dtype='float32' + ) # Make a parallel beam geometry with flat detector # Angles: uniformly spaced, n = 22, min = 0, max = pi angle_partition = odl.uniform_partition(0, np.pi, 22) # Detector: uniformly sampled, n = 512, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 512) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(space, geometry) # Create sinogram -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) data = ray_trafo(phantom) # --- Create functionals for solving the optimization problem --- @@ -63,14 +65,14 @@ gradient = odl.Gradient(space) # Functional to enforce 0 <= x <= 1 -f = odl.solvers.IndicatorBox(space, 0, 1) +f = odl.functional.IndicatorBox(space, 0, 1) if data_matching == 'exact': # Functional to enforce Ax = g # Due to the splitting used in the douglas_rachford_pd solver, we only # create the functional for the indicator function on g here, the forward # model is handled separately. - indicator_zero = odl.solvers.IndicatorZero(ray_trafo.range) + indicator_zero = odl.functional.IndicatorZero(ray_trafo.range) indicator_data = indicator_zero.translated(data) elif data_matching == 'inexact': # Functional to enforce ||Ax - g||_2 < eps @@ -84,17 +86,17 @@ eps = 5.0 # Add noise to data - raw_noise = odl.phantom.white_noise(ray_trafo.range) + raw_noise = odl.core.phantom.white_noise(ray_trafo.range) data += raw_noise * eps / raw_noise.norm() # Create indicator - indicator_l2_ball = odl.solvers.IndicatorLpUnitBall(ray_trafo.range, 2) + indicator_l2_ball = odl.functional.IndicatorLpUnitBall(ray_trafo.range, 2) indicator_data = indicator_l2_ball.translated(data / eps) * (1 / eps) else: raise RuntimeError('unknown data_matching') # Functional for TV minimization -cross_norm = lam * odl.solvers.GroupL1Norm(gradient.range) +cross_norm = lam * odl.functional.GroupL1Norm(gradient.range) # --- Create functionals for solving the optimization problem --- @@ -112,10 +114,10 @@ x = ray_trafo.domain.zero() odl.solvers.douglas_rachford_pd(x, f, g, lin_ops, tau=0.1, sigma=[0.1, 0.02], lam=1.5, - niter=200, callback=callback) + niter=100, callback=callback) # Compare with filtered back-projection -fbp_recon = odl.tomo.fbp_op(ray_trafo)(data) +fbp_recon = odl.applications.tomo.fbp_op(ray_trafo)(data) fbp_recon.show('FBP Reconstruction') phantom.show('Phantom') data.show('Sinogram', force_show=True) diff --git a/examples/solvers/forward_backward_pd_denoising.py b/examples/solvers/forward_backward_pd_denoising.py index 707b8968a60..c2734f1382e 100755 --- a/examples/solvers/forward_backward_pd_denoising.py +++ b/examples/solvers/forward_backward_pd_denoising.py @@ -8,11 +8,11 @@ """ import numpy as np -import scipy.misc +import skimage import odl # Load image -image = np.rot90(scipy.misc.ascent()[::2, ::2], 3) +image = np.rot90(skimage.data.camera(), 3) # Reading the size n, m = image.shape @@ -22,7 +22,7 @@ # Create data, noise and noisy data data = space.element(image) -noise = odl.phantom.white_noise(space) * 10.0 +noise = odl.core.phantom.white_noise(space) * 10.0 noisy_data = data + noise data.show('Original Data') noisy_data.show('Noisy Nata') @@ -36,13 +36,13 @@ lin_ops = [gradient] # Create functionals for the 1-norm and the bound constrains. -g = [1e1 * odl.solvers.L1Norm(gradient.range)] -f = odl.solvers.IndicatorBox(space, 0, 255) +g = [1e1 * odl.functional.L1Norm(gradient.range)] +f = odl.functional.IndicatorBox(space, 0, 255) # This gradient encodes the differentiable term(s) of the goal functional, # which corresponds to the "forward" part of the method. In this example the # differentiable part is the squared 2-norm. -h = 0.5 * odl.solvers.L2NormSquared(space).translated(noisy_data) +h = 0.5 * odl.functional.L2NormSquared(space).translated(noisy_data) # Create initial guess for the solver. x = noisy_data.copy() @@ -53,6 +53,6 @@ # Call the solver. x is updated in-place with the consecutive iterates. odl.solvers.forward_backward_pd(x, f, g, lin_ops, h, tau=1.0, - sigma=[0.01], niter=1000, callback=callback) + sigma=[0.01], niter=100, callback=callback) x.show(title='Reconstruction', force_show=True) diff --git a/examples/solvers/functional_basic_example_solver.py b/examples/solvers/functional_basic_example_solver.py index 9a14dcd48fa..a1160eb837d 100644 --- a/examples/solvers/functional_basic_example_solver.py +++ b/examples/solvers/functional_basic_example_solver.py @@ -33,10 +33,10 @@ # The problem will be solved using the forward-backward primal-dual algorithm. # In this setting we let f = nonnegativity contraint, g = l1-norm, L = # the indentity operator, and h = the squared l2-norm. -f = odl.solvers.IndicatorNonnegativity(space) -g = lam * odl.solvers.L1Norm(space) +f = odl.functional.IndicatorNonnegativity(space) +g = lam * odl.functional.L1Norm(space) L = odl.IdentityOperator(space) -h = 1.0 / 2.0 * odl.solvers.L2NormSquared(space).translated(offset) +h = 1.0 / 2.0 * odl.functional.L2NormSquared(space).translated(offset) # Some solver parameters niter = 50 diff --git a/examples/solvers/invhartley_pytorch.py b/examples/solvers/invhartley_pytorch.py new file mode 100644 index 00000000000..c517baa2eac --- /dev/null +++ b/examples/solvers/invhartley_pytorch.py @@ -0,0 +1,91 @@ +"""Example of a deconvolution problem with different solvers (CPU).""" + +import numpy as np +import torch +import matplotlib.pyplot as plt +import scipy.signal +import odl + + +class Convolution(odl.Operator): + def __init__(self, kernel, domain, range, adjkernel=None): + self.kernel = kernel + self.adjkernel = torch.flip(kernel, dims=(0,)) if adjkernel is None else adjkernel + self.norm = float(torch.sum(torch.abs(self.kernel))) + super(Convolution, self).__init__( + domain=domain, range=range, linear=True) + + def _call(self, x): + return self.range.element( + torch.conv1d( input=x.data.unsqueeze(0) + , weight=self.kernel.unsqueeze(0).unsqueeze(0) + , stride=1 + , padding="same" + ).squeeze(0) + ) + + @property + def adjoint(self): + return Convolution( self.adjkernel + , domain=self.range, range=self.domain + , adjkernel = self.kernel + ) + + def opnorm(self): + return self.norm + + +resolution = 50 + +# Discretization +discr_space = odl.uniform_discr(-5, 5, resolution*10, impl='pytorch', dtype=np.float32) + +# Complicated functions to check performance +def mk_kernel(): + q = 1.172 + # Select main lobe and one side lobe on each side + r = np.ceil(3*np.pi/(2*q)) + # Quantised to resolution + nr = int(np.ceil(r*resolution)) + r = nr / resolution + x = torch.linspace(-r, r, nr*2 + 1) + return torch.exp(-x**2 * 2) * np.cos(x * q) +kernel = mk_kernel() + +phantom = discr_space.element(lambda x: np.ones_like(x) ** 2 * (x > -1) * (x < 1)) +# phantom = discr_space.element(lambda x: x ** 2 * np.sin(x) ** 2 * (x > 5)) + +# Create operator +conv = Convolution(kernel, domain=discr_space, range=discr_space) + +# Dampening parameter for landweber +iterations = 100 +omega = 1 / conv.opnorm() ** 2 + + + +def test_with_plot(conv, phantom, solver, **extra_args): + fig, axs = plt.subplots(2) + fig.suptitle("CGN") + def plot_fn(ax_id, fn, *plot_args, **plot_kwargs): + axs[ax_id].plot(fn, *plot_args, **plot_kwargs) + axs[0].set_title("x") + axs[1].set_title("k*x") + plot_fn(0, phantom) + plot_fn(1, conv(phantom)) + def plot_callback(x): + plot_fn(0, conv(x), '--') + plot_fn(1, conv(x), '--') + solver(conv, discr_space.zero(), phantom, iterations, callback=plot_callback, **extra_args) + +# Test CGN +test_with_plot(conv, phantom, odl.solvers.conjugate_gradient_normal) + +# # Landweber +# lw_fig, lw_axs = plt.subplots(1) +# lw_fig.suptitle("Landweber") +# lw_axs.plot(phantom) +# odl.solvers.landweber(conv, discr_space.zero(), phantom, +# iterations, omega, lambda x: lw_axs.plot(conv(x))) + +plt.show() diff --git a/examples/solvers/kaczmarz_tomography.py b/examples/solvers/kaczmarz_tomography.py index 345b346b17e..adcf1ab7f57 100644 --- a/examples/solvers/kaczmarz_tomography.py +++ b/examples/solvers/kaczmarz_tomography.py @@ -24,7 +24,7 @@ min_pt=[-20, -20], max_pt=[20, 20], shape=[128, 128], dtype='float32') # Make a parallel beam geometry with flat detector -geometry = odl.tomo.parallel_beam_geometry(space) +geometry = odl.applications.tomo.parallel_beam_geometry(space) # Here we split the geometry according to both angular subsets and # detector subsets. @@ -39,14 +39,14 @@ n = 20 ns = geometry.angles.size // n - ray_trafos = [odl.tomo.RayTransform(space, geometry[i * ns:(i + 1) * ns]) + ray_trafos = [odl.applications.tomo.RayTransform(space, geometry[i * ns:(i + 1) * ns]) for i in range(n)] elif split == 'interlaced': # Split the data into slices: # 123 123 123 n = 20 - ray_trafos = [odl.tomo.RayTransform(space, geometry[i::n]) + ray_trafos = [odl.applications.tomo.RayTransform(space, geometry[i::n]) for i in range(n)] # Create one large ray transform from components @@ -56,7 +56,7 @@ # Create phantom -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create sinogram of forward projected phantom with noise data = ray_trafo(phantom) diff --git a/examples/solvers/lbfgs_tomography.py b/examples/solvers/lbfgs_tomography.py index 0d04a52b8f1..2dcbb98568e 100644 --- a/examples/solvers/lbfgs_tomography.py +++ b/examples/solvers/lbfgs_tomography.py @@ -18,7 +18,7 @@ # Reconstruction space: discretized functions on the rectangle # [-20, 20]^2 with 200 samples per dimension. reco_space = odl.uniform_discr( - min_pt=[-20, -20], max_pt=[20, 20], shape=[200, 200]) + min_pt=[-20, -20], max_pt=[20, 20], shape=[200, 200], dtype='float32') # Make a parallel beam geometry with flat detector # Angles: uniformly spaced, n = 400, min = 0, max = pi @@ -26,26 +26,26 @@ # Detector: uniformly sampled, n = 400, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 400) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Create the forward operator -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # --- Generate artificial data --- # # Create phantom -discr_phantom = odl.phantom.shepp_logan(reco_space, modified=True) +discr_phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create sinogram of forward projected phantom with noise data = ray_trafo(discr_phantom) -data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(ray_trafo.range) * odl.mean(data) * 0.1 # --- Set up optimization problem and solve --- # # Create objective functional ||Ax - b||_2^2 as composition of l2 norm squared # and the residual operator. -obj_fun = odl.solvers.L2NormSquared(ray_trafo.range) * (ray_trafo - data) +obj_fun = odl.functional.L2NormSquared(ray_trafo.range) * (ray_trafo - data) # Create line search line_search = 1.0 diff --git a/examples/solvers/lbfgs_tomography_tv.py b/examples/solvers/lbfgs_tomography_tv.py index e929afe23a0..7556c06dd87 100644 --- a/examples/solvers/lbfgs_tomography_tv.py +++ b/examples/solvers/lbfgs_tomography_tv.py @@ -22,7 +22,7 @@ # Reconstruction space: discretized functions on the rectangle # [-20, 20]^2 with 200 samples per dimension. reco_space = odl.uniform_discr( - min_pt=[-20, -20], max_pt=[20, 20], shape=[200, 200]) + min_pt=[-20, -20], max_pt=[20, 20], shape=[200, 200], dtype='float32') # Make a parallel beam geometry with flat detector # Angles: uniformly spaced, n = 400, min = 0, max = pi @@ -30,34 +30,34 @@ # Detector: uniformly sampled, n = 400, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 400) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Create the forward operator -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # --- Generate artificial data --- # # Create phantom -discr_phantom = odl.phantom.shepp_logan(reco_space, modified=True) +discr_phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create sinogram of forward projected phantom with noise data = ray_trafo(discr_phantom) -data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(ray_trafo.range) * odl.mean(data) * 0.1 # --- Set up optimization problem and solve --- # # Create data term ||Ax - b||_2^2 as composition of the squared L2 norm and the # ray trafo translated by the data. -l2_norm = odl.solvers.L2NormSquared(ray_trafo.range) +l2_norm = odl.functional.L2NormSquared(ray_trafo.range) data_discrepancy = l2_norm * (ray_trafo - data) # Create regularizing functional || |grad(x)| ||_1 and smooth the functional # using the Moreau envelope. # The parameter sigma controls the strength of the regularization. gradient = odl.Gradient(reco_space) -l1_norm = odl.solvers.GroupL1Norm(gradient.range) -smoothed_l1 = odl.solvers.MoreauEnvelope(l1_norm, sigma=0.03) +l1_norm = odl.functional.GroupL1Norm(gradient.range) +smoothed_l1 = odl.functional.MoreauEnvelope(l1_norm, sigma=0.03) regularizer = smoothed_l1 * gradient # Create full objective functional diff --git a/examples/solvers/nuclear_norm_minimization.py b/examples/solvers/nuclear_norm_minimization.py index 310f90b866a..4b23dcbec29 100644 --- a/examples/solvers/nuclear_norm_minimization.py +++ b/examples/solvers/nuclear_norm_minimization.py @@ -33,8 +33,8 @@ # Create functionals for the data discrepancy (L2 squared) and for the # regularizer (nuclear norm). The nuclear norm is defined on the range of # the vectorial gradient, which is vector valued. -l2err = odl.solvers.L2NormSquared(pspace).translated(data) -nuc_norm = 0.02 * odl.solvers.NuclearNorm(pgradient.range) +l2err = odl.functional.L2NormSquared(pspace).translated(data) +nuc_norm = 0.02 * odl.functional.NuclearNorm(pgradient.range) # Assemble operators and functionals for the solver routine lin_ops = [odl.IdentityOperator(pspace), pgradient] @@ -43,7 +43,7 @@ # The solver we want to use also takes an additional functional f which can be # used to enforce bounds constraints and other prior information. Here we lack # prior information so we set it to zero. -f = odl.solvers.ZeroFunctional(pspace) +f = odl.functional.ZeroFunctional(pspace) # Create a callback that shows the current function value and also shows the # iterate graphically every 20:th step. @@ -57,6 +57,6 @@ x = data.copy() odl.solvers.douglas_rachford_pd(x, f, g, lin_ops, tau=1e-2, sigma=[1.0, 1e-3], - niter=2000, callback=callback) + niter=100, callback=callback) x.show('Reconstruction', force_show=True) diff --git a/examples/solvers/nuclear_norm_tomography.py b/examples/solvers/nuclear_norm_tomography.py index 767a5e86898..3d890df8f44 100644 --- a/examples/solvers/nuclear_norm_tomography.py +++ b/examples/solvers/nuclear_norm_tomography.py @@ -40,25 +40,25 @@ angle_partition = odl.uniform_partition(0, np.pi, 300) # Detector: uniformly sampled, n = 300, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 300) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Create the forward operator, and also the vectorial forward operator. -ray_trafo = odl.tomo.RayTransform(space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(space, geometry) forward_op = odl.DiagonalOperator(ray_trafo, 2) # Create phantom where the first component contains only part of the # information in the second component. # We do this by using a sub-set of the ellipses in the well known Shepp-Logan # phantom. -ellipses = odl.phantom.shepp_logan_ellipsoids(space.ndim, modified=True) +ellipses = odl.core.phantom.shepp_logan_ellipsoids(space.ndim, modified=True) phantom = forward_op.domain.element( - [odl.phantom.ellipsoid_phantom(space, ellipses[:2]), - odl.phantom.ellipsoid_phantom(space, ellipses)]) + [odl.core.phantom.ellipsoid_phantom(space, ellipses[:2]), + odl.core.phantom.ellipsoid_phantom(space, ellipses)]) phantom.show('phantom') # Create data where second channel is highly noisy (SNR = 1) data = forward_op(phantom) -data[1] += odl.phantom.white_noise(forward_op.range[1]) * np.mean(data[1]) +data[1] += odl.core.phantom.white_noise(forward_op.range[1]) * odl.mean(data[1]) data.show('data') # Set up gradient and vectorial gradient @@ -66,24 +66,24 @@ pgradient = odl.DiagonalOperator(gradient, 2) # Create data discrepancy functionals -l2err1 = odl.solvers.L2NormSquared(ray_trafo.range).translated(data[0]) -l2err2 = odl.solvers.L2NormSquared(ray_trafo.range).translated(data[1]) +l2err1 = odl.functional.L2NormSquared(ray_trafo.range).translated(data[0]) +l2err2 = odl.functional.L2NormSquared(ray_trafo.range).translated(data[1]) # Scale the error term of the second channel so it is more heavily regularized. # Note that we need to use SeparableSum, otherwise the solver would not be able # to compute the proximal. # The separable sum is defined by: l2err([x, y]) = l2err1(x) + 0.1 * l2err(y) -l2err = odl.solvers.SeparableSum(l2err1, 0.1 * l2err2) +l2err = odl.functional.SeparableSum(l2err1, 0.1 * l2err2) # Create nuclear norm -nuc_norm = odl.solvers.NuclearNorm(pgradient.range, +nuc_norm = odl.functional.NuclearNorm(pgradient.range, singular_vector_exp=1) # Assemble the functionals and operators for the solver lam = 0.1 lin_ops = [forward_op, pgradient] g = [l2err, lam * nuc_norm] -f = odl.solvers.IndicatorBox(forward_op.domain, 0, 1) +f = odl.functional.IndicatorBox(forward_op.domain, 0, 1) # Create callback that prints current iterate value and displays every 20th # iterate. diff --git a/examples/solvers/pdhg_deconvolve.py b/examples/solvers/pdhg_deconvolve.py index b65e6bcc9ae..b02e8bf8576 100644 --- a/examples/solvers/pdhg_deconvolve.py +++ b/examples/solvers/pdhg_deconvolve.py @@ -32,14 +32,14 @@ convolution = ft.inverse * gaussian * ft # Optional: Run diagnostics to assure the adjoint is properly implemented -# odl.diagnostics.OperatorTest(conv_op).run_tests() +# odl.core.diagnostics.OperatorTest(conv_op).run_tests() # Create phantom -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create the convolved version of the phantom data = convolution(phantom) -data += odl.phantom.white_noise(convolution.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(convolution.range) * odl.mean(data) * 0.1 data.show('Convolved Data') # Set up PDHG: @@ -51,23 +51,23 @@ op = odl.BroadcastOperator(convolution, gradient) # Create the functional for unconstrained primal variable -f = odl.solvers.ZeroFunctional(op.domain) +f = odl.functional.ZeroFunctional(op.domain) # l2-squared data matching -l2_norm_squared = odl.solvers.L2NormSquared(space).translated(data) +l2_norm_squared = odl.functional.L2NormSquared(space).translated(data) # Isotropic TV-regularization i.e. the l1-norm -l1_norm = 0.01 * odl.solvers.L1Norm(gradient.range) +l1_norm = 0.01 * odl.functional.L1Norm(gradient.range) # Make separable sum of functionals, order must be the same as in `op` -g = odl.solvers.SeparableSum(l2_norm_squared, l1_norm) +g = odl.functional.SeparableSum(l2_norm_squared, l1_norm) # --- Select solver parameters and solve using PDHG --- # # Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1 op_norm = 1.1 * odl.power_method_opnorm(op) -niter = 300 # Number of iterations +niter = 100 # Number of iterations tau = 10.0 / op_norm # Step size for the primal variable sigma = 0.1 / op_norm # Step size for the dual variables diff --git a/examples/solvers/pdhg_denoising.py b/examples/solvers/pdhg_denoising.py index ed2662d3cf9..9f0b0576084 100644 --- a/examples/solvers/pdhg_denoising.py +++ b/examples/solvers/pdhg_denoising.py @@ -11,12 +11,12 @@ """ import numpy as np -import scipy.misc +import skimage import odl # Read test image: use only every second pixel, convert integer to float, # and rotate to get the image upright -image = np.rot90(scipy.misc.ascent()[::2, ::2], 3).astype('float') +image = np.rot90(skimage.data.camera()).astype('float') shape = image.shape # Rescale max to 1 @@ -29,7 +29,7 @@ orig = space.element(image) # Add noise -image += 0.1 * odl.phantom.white_noise(orig.space) +orig += 0.1 * odl.core.phantom.white_noise(orig.space) # Data of noisy image noisy = space.element(image) @@ -43,23 +43,23 @@ # Set up the functionals # l2-squared data matching -l2_norm = odl.solvers.L2NormSquared(space).translated(noisy) +l2_norm = odl.functional.L2NormSquared(space).translated(noisy) # Isotropic TV-regularization: l1-norm of grad(x) -l1_norm = 0.15 * odl.solvers.L1Norm(gradient.range) +l1_norm = 0.15 * odl.functional.L1Norm(gradient.range) # Make separable sum of functionals, order must correspond to the operator K -g = odl.solvers.SeparableSum(l2_norm, l1_norm) +g = odl.functional.SeparableSum(l2_norm, l1_norm) # Non-negativity constraint -f = odl.solvers.IndicatorNonnegativity(op.domain) +f = odl.functional.IndicatorNonnegativity(op.domain) # --- Select solver parameters and solve using PDHG --- # # Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1 op_norm = 1.1 * odl.power_method_opnorm(op, xstart=noisy) -niter = 200 # Number of iterations +niter = 100 # Number of iterations tau = 1.0 / op_norm # Step size for the primal variable sigma = 1.0 / op_norm # Step size for the dual variable diff --git a/examples/solvers/pdhg_denoising_L1_HuberTV.py b/examples/solvers/pdhg_denoising_L1_HuberTV.py index 950df4052a5..5c3636df6d0 100644 --- a/examples/solvers/pdhg_denoising_L1_HuberTV.py +++ b/examples/solvers/pdhg_denoising_L1_HuberTV.py @@ -20,16 +20,16 @@ # Define ground truth, space and noisy data shape = [100, 100] space = odl.uniform_discr([0, 0], shape, shape) -orig = odl.phantom.smooth_cuboid(space) -d = odl.phantom.salt_pepper_noise(orig, fraction=0.2) +orig = odl.core.phantom.smooth_cuboid(space) +d = odl.core.phantom.salt_pepper_noise(orig, fraction=0.2) # Define objective functional op = odl.Gradient(space) # operator norm_op = np.sqrt(8) + 1e-2 # norm with forward differences is well-known lam = 2 # Regularization parameter const = 0.5 -f = const / lam * odl.solvers.L1Norm(space).translated(d) # data fit -g = const * odl.solvers.Huber(op.range, gamma=.01) # regularization +f = const / lam * odl.functional.L1Norm(space).translated(d) # data fit +g = const * odl.functional.Huber(op.range, gamma=.01) # regularization obj_fun = f + g * op # combined functional mu_g = 1 / g.grad_lipschitz # Strong convexity of "f*" diff --git a/examples/solvers/pdhg_denoising_L2_HuberTV.py b/examples/solvers/pdhg_denoising_L2_HuberTV.py index 541cdacc645..2bcbea378d0 100644 --- a/examples/solvers/pdhg_denoising_L2_HuberTV.py +++ b/examples/solvers/pdhg_denoising_L2_HuberTV.py @@ -26,24 +26,24 @@ """ import numpy as np -import scipy.misc +import skimage import odl import matplotlib.pyplot as plt # Define ground truth, space and noisy data -image = np.rot90(scipy.misc.ascent()[::2, ::2].astype('float'), 3) +image = np.rot90(skimage.data.camera().astype('float'), 3) shape = image.shape image /= image.max() space = odl.uniform_discr([0, 0], shape, shape) orig = space.element(image.copy()) -d = odl.phantom.white_noise(space, orig, 0.1) +d = odl.core.phantom.white_noise(space, orig, 0.1) # Define objective functional op = odl.Gradient(space) # operator norm_op = np.sqrt(8) + 1e-4 # norm with forward differences is well-known lam = 0.1 # Regularization parameter -f = 1 / (2 * lam) * odl.solvers.L2NormSquared(space).translated(d) # data fit -g = odl.solvers.Huber(op.range, gamma=.01) # regularization +f = 1 / (2 * lam) * odl.functional.L2NormSquared(space).translated(d) # data fit +g = odl.functional.Huber(op.range, gamma=.01) # regularization obj_fun = f + g * op # combined functional mu_g = 1 / lam # strong convexity of "g" mu_f = 1 / f.grad_lipschitz # strong convexity of "f*" diff --git a/examples/solvers/pdhg_denoising_ROF_algorithm_comparison.py b/examples/solvers/pdhg_denoising_ROF_algorithm_comparison.py index 66b50454149..54b9e0b3299 100644 --- a/examples/solvers/pdhg_denoising_ROF_algorithm_comparison.py +++ b/examples/solvers/pdhg_denoising_ROF_algorithm_comparison.py @@ -16,14 +16,14 @@ """ import numpy as np -import scipy.misc +import skimage import odl import matplotlib.pyplot as plt # --- define setting --- # # Read test image: use only every second pixel, convert integer to float -image = scipy.misc.ascent()[::2, ::2].astype('float') +image = skimage.data.camera().astype('float') shape = image.shape # Rescale max to 1 @@ -36,7 +36,7 @@ orig = space.element(image.copy()) # Add noise and convert to space element -noisy = orig + 0.1 * odl.phantom.white_noise(space) +noisy = orig + 0.1 * odl.core.phantom.white_noise(space) # Gradient operator gradient = odl.Gradient(space, method='forward') @@ -46,13 +46,13 @@ # l2-squared data matching factr = 0.5 / reg_param -l2_norm = factr * odl.solvers.L2NormSquared(space).translated(noisy) +l2_norm = factr * odl.functional.L2NormSquared(space).translated(noisy) # Isotropic TV-regularization: l1-norm of grad(x) -l1_norm = odl.solvers.GroupL1Norm(gradient.range, 2) +l1_norm = odl.functional.GroupL1Norm(gradient.range, 2) # characteristic function -char_fun = odl.solvers.IndicatorNonnegativity(space) +char_fun = odl.functional.IndicatorNonnegativity(space) # define objective obj = l2_norm + l1_norm * gradient + char_fun @@ -90,7 +90,7 @@ def reset(self): callback = (odl.solvers.CallbackPrintIteration() & CallbackStore()) # number of iterations -niter = 500 +niter = 100 # %% Run Algorithms @@ -100,7 +100,7 @@ def reset(self): op = odl.BroadcastOperator(odl.IdentityOperator(space), gradient) # Make separable sum of functionals, order must correspond to the operator K -g = odl.solvers.SeparableSum(l2_norm, l1_norm) +g = odl.functional.SeparableSum(l2_norm, l1_norm) # Non-negativity constraint f = char_fun @@ -131,7 +131,7 @@ def reset(self): g = l1_norm # Create new functional that combines data fit and characteritic function -f = odl.solvers.FunctionalQuadraticPerturb(char_fun, factr, -2 * factr * noisy) +f = odl.functional.FunctionalQuadraticPerturb(char_fun, factr, -2 * factr * noisy) # The operator norm of the gradient with forward differences is well-known op_norm = np.sqrt(8) + 1e-4 @@ -158,23 +158,23 @@ def reset(self): # show images plt.figure(0) ax1 = plt.subplot(231) -ax1.imshow(orig, clim=[0, 1], cmap='gray') +ax1.imshow(orig.data, clim=[0, 1], cmap='gray') ax1.title.set_text('Original Image') ax2 = plt.subplot(232) -ax2.imshow(noisy, clim=[0, 1], cmap='gray') +ax2.imshow(noisy.data, clim=[0, 1], cmap='gray') ax2.title.set_text('Noisy Image') ax3 = plt.subplot(234) -ax3.imshow(x_alg1, clim=[0, 1], cmap='gray') +ax3.imshow(x_alg1.data, clim=[0, 1], cmap='gray') ax3.title.set_text('Algo 1') ax4 = plt.subplot(235) -ax4.imshow(x_alg2, clim=[0, 1], cmap='gray') +ax4.imshow(x_alg2.data, clim=[0, 1], cmap='gray') ax4.title.set_text('Algo 2') ax5 = plt.subplot(236) -ax5.imshow(x_alg3, clim=[0, 1], cmap='gray') +ax5.imshow(x_alg3.data, clim=[0, 1], cmap='gray') ax5.title.set_text('Algo 3') # show function values diff --git a/examples/solvers/pdhg_denoising_complex.py b/examples/solvers/pdhg_denoising_complex.py index 7b2fdb3148f..833439825e2 100644 --- a/examples/solvers/pdhg_denoising_complex.py +++ b/examples/solvers/pdhg_denoising_complex.py @@ -11,12 +11,12 @@ """ import numpy as np -import scipy.misc +import skimage import odl # Read test image: use only every second pixel, convert integer to float, # and rotate to get the image upright -image = np.rot90(scipy.misc.ascent()[::1, ::1], 3).astype('float32') +image = np.rot90(skimage.data.camera(), 3).astype('float32') image = image + 1j * image.T shape = image.shape @@ -30,7 +30,7 @@ orig = space.element(image) # Add noise -noisy = image + 0.05 * odl.phantom.white_noise(orig.space) +noisy = orig + 0.05 * odl.core.phantom.white_noise(orig.space) # Gradient operator gradient = odl.Gradient(space) @@ -41,14 +41,14 @@ # Set up the functionals # l2-squared data matching -l2_norm = odl.solvers.L2NormSquared(space).translated(noisy) +l2_norm = odl.functional.L2NormSquared(space).translated(noisy) # Isotropic TV-regularization: l1-norm of grad(x) -l1_norm = 0.15 * odl.solvers.L1Norm(gradient.range) +l1_norm = 0.15 * odl.functional.L1Norm(gradient.range) # Make separable sum of functionals, order must correspond to the operator K -f = odl.solvers.ZeroFunctional(op.domain) -g = odl.solvers.SeparableSum(l2_norm, l1_norm) +f = odl.functional.ZeroFunctional(op.domain) +g = odl.functional.SeparableSum(l2_norm, l1_norm) # --- Select solver parameters and solve using Chambolle-Pock --- # diff --git a/examples/solvers/pdhg_denoising_tgv.py b/examples/solvers/pdhg_denoising_tgv.py index 6dc234d6c93..12dc0c5c4fb 100644 --- a/examples/solvers/pdhg_denoising_tgv.py +++ b/examples/solvers/pdhg_denoising_tgv.py @@ -44,12 +44,12 @@ # --- Generate artificial data --- # # Create phantom -phantom = odl.phantom.tgv_phantom(U) +phantom = odl.core.phantom.tgv_phantom(U) phantom.show(title='Phantom') # Create sinogram of forward projected phantom with noise data = A(phantom) -data += odl.phantom.white_noise(A.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(A.range) * odl.mean(data) * 0.1 data.show(title='Simulated Data') @@ -66,9 +66,9 @@ # TODO: As the weighted space is currently not supported in ODL we find a # workaround. # W = odl.ProductSpace(U, 3, weighting=[1, 1, 2]) -# sym_gradient = odl.operator.ProductSpaceOperator( +# sym_gradient = odl.core.operator.ProductSpaceOperator( # [[Dx, 0], [0, Dy], [0.5*Dy, 0.5*Dx]], range=W) -E = odl.operator.ProductSpaceOperator( +E = odl.core.operator.ProductSpaceOperator( [[Dx, 0], [0, Dy], [0.5 * Dy, 0.5 * Dx], [0.5 * Dy, 0.5 * Dx]]) W = E.range @@ -86,28 +86,28 @@ E * odl.ComponentProjection(domain, 1)) # Do not use the f functional, set it to zero. -f = odl.solvers.ZeroFunctional(domain) +f = odl.functional.ZeroFunctional(domain) # l2-squared data matching -l2_norm = odl.solvers.L2NormSquared(A.range).translated(data) +l2_norm = odl.functional.L2NormSquared(A.range).translated(data) # parameters alpha = 1e-1 beta = 1 # The l1-norms scaled by regularization paramters -l1_norm_1 = alpha * odl.solvers.L1Norm(V) -l1_norm_2 = alpha * beta * odl.solvers.L1Norm(W) +l1_norm_1 = alpha * odl.functional.L1Norm(V) +l1_norm_2 = alpha * beta * odl.functional.L1Norm(W) # Combine functionals, order must correspond to the operator K -g = odl.solvers.SeparableSum(l2_norm, l1_norm_1, l1_norm_2) +g = odl.functional.SeparableSum(l2_norm, l1_norm_1, l1_norm_2) # --- Select solver parameters and solve using PDHG --- # # Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1 op_norm = 1.1 * odl.power_method_opnorm(op) -niter = 400 # Number of iterations +niter = 100 # Number of iterations tau = 1.0 / op_norm # Step size for the primal variable sigma = 1.0 / op_norm # Step size for the dual variable diff --git a/examples/solvers/pdhg_tomography.py b/examples/solvers/pdhg_tomography.py index 71dfefef568..8fcf6d90161 100644 --- a/examples/solvers/pdhg_tomography.py +++ b/examples/solvers/pdhg_tomography.py @@ -26,19 +26,19 @@ angle_partition = odl.uniform_partition(0, np.pi, 360) # Detector: uniformly sampled, n = 512, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 512) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Create the forward operator -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # --- Generate artificial data --- # # Create phantom -discr_phantom = odl.phantom.shepp_logan(reco_space, modified=True) +discr_phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create sinogram of forward projected phantom with noise data = ray_trafo(discr_phantom) -data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(ray_trafo.range) * odl.mean(data) * 0.1 # --- Set up the inverse problem --- # @@ -49,25 +49,25 @@ op = odl.BroadcastOperator(ray_trafo, gradient) # Do not use the f functional, set it to zero. -f = odl.solvers.ZeroFunctional(op.domain) +f = odl.functional.ZeroFunctional(op.domain) # Create functionals for the dual variable # l2-squared data matching -l2_norm = odl.solvers.L2NormSquared(ray_trafo.range).translated(data) +l2_norm = odl.functional.L2NormSquared(ray_trafo.range).translated(data) # Isotropic TV-regularization i.e. the l1-norm -l1_norm = 0.015 * odl.solvers.L1Norm(gradient.range) +l1_norm = 0.015 * odl.functional.L1Norm(gradient.range) # Combine functionals, order must correspond to the operator K -g = odl.solvers.SeparableSum(l2_norm, l1_norm) +g = odl.functional.SeparableSum(l2_norm, l1_norm) # --- Select solver parameters and solve using PDHG --- # # Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1 op_norm = 1.1 * odl.power_method_opnorm(op) -niter = 200 # Number of iterations +niter = 100 # Number of iterations tau = 1.0 / op_norm # Step size for the primal variable sigma = 1.0 / op_norm # Step size for the dual variable diff --git a/examples/solvers/pdhg_tomography_tgv.py b/examples/solvers/pdhg_tomography_tgv.py index 82889986b5f..30207f13b2b 100644 --- a/examples/solvers/pdhg_tomography_tgv.py +++ b/examples/solvers/pdhg_tomography_tgv.py @@ -39,20 +39,20 @@ min_pt=[-20, -20], max_pt=[20, 20], shape=[100, 100], dtype='float32') # Make a parallel beam geometry with flat detector -geometry = odl.tomo.parallel_beam_geometry(U) +geometry = odl.applications.tomo.parallel_beam_geometry(U) # Create the forward operator -A = odl.tomo.RayTransform(U, geometry) +A = odl.applications.tomo.RayTransform(U, geometry) # --- Generate artificial data --- # # Create phantom -phantom = odl.phantom.tgv_phantom(U) +phantom = odl.core.phantom.tgv_phantom(U) phantom.show(title='Phantom') # Create sinogram of forward projected phantom with noise data = A(phantom) -data += odl.phantom.white_noise(A.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(A.range) * odl.mean(data) * 0.1 data.show(title='Simulated Data (Sinogram)') @@ -69,9 +69,9 @@ # TODO: As the weighted space is currently not supported in ODL we find a # workaround. # W = odl.ProductSpace(U, 3, weighting=[1, 1, 2]) -# sym_gradient = odl.operator.ProductSpaceOperator( +# sym_gradient = odl.core.operator.ProductSpaceOperator( # [[Dx, 0], [0, Dy], [0.5*Dy, 0.5*Dx]], range=W) -E = odl.operator.ProductSpaceOperator( +E = odl.core.operator.ProductSpaceOperator( [[Dx, 0], [0, Dy], [0.5 * Dy, 0.5 * Dx], [0.5 * Dy, 0.5 * Dx]]) W = E.range @@ -89,28 +89,28 @@ E * odl.ComponentProjection(domain, 1)) # Do not use the f functional, set it to zero. -f = odl.solvers.ZeroFunctional(domain) +f = odl.functional.ZeroFunctional(domain) # l2-squared data matching -l2_norm = odl.solvers.L2NormSquared(A.range).translated(data) +l2_norm = odl.functional.L2NormSquared(A.range).translated(data) # parameters alpha = 4e-1 beta = 1 # The l1-norms scaled by regularization paramters -l1_norm_1 = alpha * odl.solvers.L1Norm(V) -l1_norm_2 = alpha * beta * odl.solvers.L1Norm(W) +l1_norm_1 = alpha * odl.functional.L1Norm(V) +l1_norm_2 = alpha * beta * odl.functional.L1Norm(W) # Combine functionals, order must correspond to the operator K -g = odl.solvers.SeparableSum(l2_norm, l1_norm_1, l1_norm_2) +g = odl.functional.SeparableSum(l2_norm, l1_norm_1, l1_norm_2) # --- Select solver parameters and solve using PDHG --- # # Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1 op_norm = 1.1 * odl.power_method_opnorm(op) -niter = 300 # Number of iterations +niter = 100 # Number of iterations tau = 1.0 / op_norm # Step size for the primal variable sigma = 1.0 / op_norm # Step size for the dual variable diff --git a/examples/solvers/proximal_gradient_denoising.py b/examples/solvers/proximal_gradient_denoising.py index 668fe8f4920..24185661955 100644 --- a/examples/solvers/proximal_gradient_denoising.py +++ b/examples/solvers/proximal_gradient_denoising.py @@ -21,8 +21,8 @@ min_pt=[-20, -20], max_pt=[20, 20], shape=[300, 300]) # Create phantom -data = odl.phantom.shepp_logan(space, modified=True) -data = odl.phantom.salt_pepper_noise(data) +data = odl.core.phantom.shepp_logan(space, modified=True) +data = odl.core.phantom.salt_pepper_noise(data) # Create gradient operator grad = odl.Gradient(space) @@ -31,11 +31,11 @@ # --- Set up the inverse problem --- # # Create data discrepancy by translating the l1 norm -l1_norm = odl.solvers.L1Norm(space) +l1_norm = odl.functional.L1Norm(space) data_discrepancy = l1_norm.translated(data) # l2-squared norm of gradient -regularizer = 0.05 * odl.solvers.L2NormSquared(grad.range) * grad +regularizer = 0.05 * odl.functional.L2NormSquared(grad.range) * grad # --- Select solver parameters and solve using proximal gradient --- # diff --git a/examples/solvers/proximal_gradient_wavelet_tomography.py b/examples/solvers/proximal_gradient_wavelet_tomography.py index 06ce67210fe..f772d4794c8 100644 --- a/examples/solvers/proximal_gradient_wavelet_tomography.py +++ b/examples/solvers/proximal_gradient_wavelet_tomography.py @@ -27,21 +27,21 @@ angle_partition = odl.uniform_partition(0, np.pi, 300) # Detector: uniformly sampled, n = 300, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 300) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Create the forward operator, and also the vectorial forward operator. -ray_trafo = odl.tomo.RayTransform(space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(space, geometry) # --- Generate artificial data --- # # Create phantom -discr_phantom = odl.phantom.shepp_logan(space, modified=True) +discr_phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create sinogram of forward projected phantom with noise data = ray_trafo(discr_phantom) -data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1 +data += odl.core.phantom.white_noise(ray_trafo.range) * odl.mean(data) * 0.1 # --- Set up the inverse problem --- # @@ -59,10 +59,10 @@ Wtrafoinv = W.inverse * (1 / (np.power(1.7, scales))) # Create regularizer as l1 norm -regularizer = 0.0005 * odl.solvers.L1Norm(W.range) +regularizer = 0.0005 * odl.functional.L1Norm(W.range) # l2-squared norm of residual -l2_norm_sq = odl.solvers.L2NormSquared(ray_trafo.range).translated(data) +l2_norm_sq = odl.functional.L2NormSquared(ray_trafo.range).translated(data) # Compose from the right with ray transform and wavelet transform data_discrepancy = l2_norm_sq * ray_trafo * Wtrafoinv @@ -85,7 +85,7 @@ def callb(x): # Run the algorithm (FISTA) x = data_discrepancy.domain.zero() odl.solvers.accelerated_proximal_gradient( - x, f=regularizer, g=data_discrepancy, niter=400, gamma=gamma, + x, f=regularizer, g=data_discrepancy, niter=100, gamma=gamma, callback=callb) # Display images diff --git a/examples/solvers/proximal_lang_poisson.py b/examples/solvers/proximal_lang_poisson.py deleted file mode 100644 index 4218a69ee65..00000000000 --- a/examples/solvers/proximal_lang_poisson.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Poisson's problem using the ProxImaL solver. - -Solves the optimization problem - - min_x 10 ||laplacian(x) - g||_2^2 + || |grad(x)| ||_1 - -Where ``laplacian`` is the spatial Laplacian, ``grad`` the spatial -gradient and ``g`` is given noisy data. -""" - -import numpy as np -import odl -import proximal - -# Create space defined on a square from [0, 0] to [100, 100] with (100 x 100) -# points -space = odl.uniform_discr([0, 0], [100, 100], [100, 100]) - -# Create ODL operator for the Laplacian -laplacian = odl.Laplacian(space) - -# Create right hand side -phantom = odl.phantom.shepp_logan(space, modified=True) -phantom.show('original image') -rhs = laplacian(phantom) -rhs += odl.phantom.white_noise(space) * np.std(rhs) * 0.1 -rhs.show('rhs') - -# Convert laplacian to ProxImaL operator -proximal_lang_laplacian = odl.as_proximal_lang_operator(laplacian) - -# Convert to array -rhs_arr = rhs.asarray() - -# Set up optimization problem -x = proximal.Variable(space.shape) -funcs = [10 * proximal.sum_squares(proximal_lang_laplacian(x) - rhs_arr), - proximal.norm1(proximal.grad(x))] - -# Solve the problem using ProxImaL -prob = proximal.Problem(funcs) -prob.solve(verbose=True) - -# Convert back to odl and display result -result_odl = space.element(x.value) -result_odl.show('result from ProxImaL', force_show=True) diff --git a/examples/solvers/proximal_lang_tomography.py b/examples/solvers/proximal_lang_tomography.py deleted file mode 100644 index ab3ffdcc0ea..00000000000 --- a/examples/solvers/proximal_lang_tomography.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Tomography with TV regularization using the ProxImaL solver. - -Solves the optimization problem - - min_{0 <= x <= 1} ||A(x) - g||_2^2 + 0.2 || |grad(x)| ||_1 - -Where ``A`` is a parallel beam forward projector, ``grad`` the spatial -gradient and ``g`` is given noisy data. -""" - -import numpy as np -import odl -import proximal - - -# --- Set up the forward operator (ray transform) --- # - - -# Reconstruction space: discretized functions on the rectangle -# [-20, 20]^2 with 300 samples per dimension. -reco_space = odl.uniform_discr( - min_pt=[-20, -20], max_pt=[20, 20], shape=[300, 300], dtype='float32') - -# Make a parallel beam geometry with flat detector -# Angles: uniformly spaced, n = 360, min = 0, max = pi -angle_partition = odl.uniform_partition(0, np.pi, 360) -# Detector: uniformly sampled, n = 512, min = -30, max = 30 -detector_partition = odl.uniform_partition(-30, 30, 512) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) - -# Initialize the ray transform (forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) - -# Convert ray transform to proximal language operator -proximal_lang_ray_trafo = odl.as_proximal_lang_operator(ray_trafo) - -# Create sinogram of forward projected phantom with noise -phantom = odl.phantom.shepp_logan(reco_space, modified=True) -phantom.show('phantom') -data = ray_trafo(phantom) -data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.1 -data.show('noisy data') - -# Convert to array for ProxImaL -rhs_arr = data.asarray() - -# Set up optimization problem -# Note that proximal is not aware of the underlying space and only works with -# matrices. Hence the norm in proximal does not match the norm in the ODL space -# exactly. -x = proximal.Variable(reco_space.shape) -funcs = [proximal.sum_squares(proximal_lang_ray_trafo(x) - rhs_arr), - 0.2 * proximal.norm1(proximal.grad(x)), - proximal.nonneg(x), - proximal.nonneg(1 - x)] - -# Solve the problem using ProxImaL -prob = proximal.Problem(funcs) -prob.solve(verbose=True) - -# Convert back to odl and display result -result_odl = reco_space.element(x.value) -result_odl.show('ProxImaL result', force_show=True) diff --git a/examples/solvers/rosenbrock_minimization.py b/examples/solvers/rosenbrock_minimization.py index d15d3176791..9b057fa3cb7 100644 --- a/examples/solvers/rosenbrock_minimization.py +++ b/examples/solvers/rosenbrock_minimization.py @@ -27,7 +27,7 @@ space = odl.rn(2) # Create objective functional -f = odl.solvers.RosenbrockFunctional(space) +f = odl.functional.RosenbrockFunctional(space) # Define a line search method line_search = odl.solvers.BacktrackingLineSearch(f) diff --git a/examples/solvers/scipy_solvers.py b/examples/solvers/scipy_solvers.py index 6414ed63fd0..48d4dffd57f 100644 --- a/examples/solvers/scipy_solvers.py +++ b/examples/solvers/scipy_solvers.py @@ -14,14 +14,14 @@ # Create discrete space, a square from [-1, 1] x [-1, 1] with (11 x 11) points space = odl.uniform_discr([-1, -1], [1, 1], [11, 11]) -# Create odl operator for negative laplacian +# Create odl core.operator for negative laplacian laplacian = -odl.Laplacian(space) # Create right hand side, a gaussian around the point (0, 0) rhs = space.element(lambda x: np.exp(-(x[0]**2 + x[1]**2) / 0.1**2)) # Convert laplacian to scipy operator -scipy_laplacian = odl.operator.oputils.as_scipy_operator(laplacian) +scipy_laplacian = odl.core.operator.oputils.as_scipy_operator(laplacian) # Convert to array and flatten rhs_arr = rhs.asarray().ravel() diff --git a/examples/space/simple_r.py b/examples/space/simple_r.py deleted file mode 100644 index f35766424bc..00000000000 --- a/examples/space/simple_r.py +++ /dev/null @@ -1,53 +0,0 @@ -"""An example of a very simple space, the real numbers.""" - -import odl - - -class Reals(odl.set.LinearSpace): - """The real numbers.""" - - def __init__(self): - super(Reals, self).__init__(field=odl.RealNumbers()) - - def _inner(self, x1, x2): - return x1.__val__ * x2.__val__ - - def _lincomb(self, a, x1, b, x2, out): - out.__val__ = a * x1.__val__ + b * x2.__val__ - - def _multiply(self, x1, x2, out): - out.__val__ = x1.__val__ * x2.__val__ - - def __eq__(self, other): - return isinstance(other, Reals) - - def element(self, value=0): - return RealNumber(self, value) - - -class RealNumber(odl.set.space.LinearSpaceElement): - """Real vectors are floats.""" - - __val__ = None - - def __init__(self, space, v): - super(RealNumber, self).__init__(space) - self.__val__ = v - - def __float__(self): - return self.__val__.__float__() - - def __str__(self): - return str(self.__val__) - - -R = Reals() -x = R.element(5.0) -y = R.element(10.0) - -print(x) -print(y) -print(x + y) -print(x * y) -print(x - y) -print(3.14 * x) diff --git a/examples/space/simple_rn.py b/examples/space/simple_rn.py deleted file mode 100644 index f091ca02c34..00000000000 --- a/examples/space/simple_rn.py +++ /dev/null @@ -1,139 +0,0 @@ -"""An example of a very simple space, the space rn. - -Including some benchmarks with an optimized version. -""" - -import numpy as np -import odl -from odl.space.base_tensors import TensorSpace, Tensor -from odl.util.testutils import timer - - -class SimpleRn(TensorSpace): - """The real space R^n, non-optimized implmentation.""" - - def __init__(self, size): - super(SimpleRn, self).__init__(size, dtype=float) - - def zero(self): - return self.element(np.zeros(self.size)) - - def one(self): - return self.element(np.ones(self.size)) - - def _lincomb(self, a, x1, b, x2, out): - out.data[:] = a * x1.data + b * x2.data - - def _inner(self, x1, x2): - return float(np.vdot(x1.data, x2.data)) - - def _multiply(self, x1, x2, out): - out.data[:] = x1.data * x2.data - - def _divide(self, x1, x2, out): - out.data[:] = x1.data / x2.data - - def element(self, *args, **kwargs): - if not args and not kwargs: - return self.element(np.empty(self.size)) - if isinstance(args[0], np.ndarray): - if args[0].shape == (self.size,): - return RnVector(self, args[0]) - else: - raise ValueError('input array {} is of shape {}, expected ' - 'shape ({},).'.format(args[0], args[0].shape, - self.dim,)) - else: - return self.element(np.array( - *args, **kwargs).astype(np.float64, copy=AVOID_UNNECESSARY_COPY)) - return self.element(np.empty(self.dim, dtype=np.float64)) - - -class RnVector(Tensor): - def __init__(self, space, data): - super(RnVector, self).__init__(space) - self.data = data - - def __getitem__(self, index): - return self.data.__getitem__(index) - - def __setitem__(self, index, value): - return self.data.__setitem__(index, value) - - def asarray(self, *args): - return self.data(*args) - - -r5 = SimpleRn(5) -# odl.diagnostics.SpaceTest(r5).run_tests() - -# Do some tests to compare -n = 10 ** 7 -iterations = 10 - -# Perform some benchmarks with rn -opt_spc = odl.rn(n) -simple_spc = SimpleRn(n) - -x, y, z = np.random.rand(n), np.random.rand(n), np.random.rand(n) -ox, oy, oz = (opt_spc.element(x.copy()), opt_spc.element(y.copy()), - opt_spc.element(z.copy())) -sx, sy, sz = (simple_spc.element(x.copy()), simple_spc.element(y.copy()), - simple_spc.element(z.copy())) -if 'cuda' in odl.space.entry_points.tensor_space_impl_names(): - cu_spc = odl.rn(n, impl='cuda') - cx, cy, cz = (cu_spc.element(x.copy()), cu_spc.element(y.copy()), - cu_spc.element(z.copy())) - -print(" lincomb:") -with timer("SimpleRn"): - for _ in range(iterations): - simple_spc.lincomb(2.13, sx, 3.14, sy, out=sz) -print("result: {}".format(sz[1:5])) - -with timer("odl numpy"): - for _ in range(iterations): - opt_spc.lincomb(2.13, ox, 3.14, oy, out=oz) -print("result: {}".format(oz[1:5])) - -if 'cuda' in odl.space.entry_points.tensor_space_impl_names(): - with timer("odl cuda"): - for _ in range(iterations): - cu_spc.lincomb(2.13, cx, 3.14, cy, out=cz) - print("result: {}".format(cz[1:5])) - - -print("\n Norm:") -with timer("SimpleRn"): - for _ in range(iterations): - result = sz.norm() -print("result: {}".format(result)) - -with timer("odl numpy"): - for _ in range(iterations): - result = oz.norm() -print("result: {}".format(result)) - -if 'cuda' in odl.space.entry_points.tensor_space_impl_names(): - with timer("odl cuda"): - for _ in range(iterations): - result = cz.norm() - print("result: {}".format(result)) - - -print("\n Inner:") -with timer("SimpleRn"): - for _ in range(iterations): - result = sz.inner(sx) -print("result: {}".format(result)) - -with timer("odl numpy"): - for _ in range(iterations): - result = oz.inner(ox) -print("result: {}".format(result)) - -if 'cuda' in odl.space.entry_points.tensor_space_impl_names(): - with timer("odl cuda"): - for _ in range(iterations): - result = cz.inner(cx) - print("result: {}".format(result)) diff --git a/examples/space/vectorization.py b/examples/space/vectorization.py index 849bacf9b50..242654d4acd 100644 --- a/examples/space/vectorization.py +++ b/examples/space/vectorization.py @@ -5,13 +5,13 @@ import numpy as np import odl -from odl.discr.discr_utils import sampling_function +from odl.core.discr.discr_utils import sampling_function def performance_example(): # Simple function, already supports vectorization f_vec = sampling_function( - lambda x: x ** 2, domain=odl.IntervalProd(0, 1) + lambda x: x ** 2, domain=odl.IntervalProd(0, 1), out_dtype='float32' ) # Vectorized with NumPy's poor man's vectorization function diff --git a/examples/tomo/anisotropic_voxels.py b/examples/tomo/anisotropic_voxels.py index e6022042a10..9e634050226 100644 --- a/examples/tomo/anisotropic_voxels.py +++ b/examples/tomo/anisotropic_voxels.py @@ -18,13 +18,13 @@ angle_partition = odl.uniform_partition(0, np.pi, 180) # Detector: uniformly sampled, n = (500, 500), min = (-30, -30), max = (30, 30) detector_partition = odl.uniform_partition([-30, -30], [30, 30], [500, 500]) -geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition) # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/backends/astra_performance_cpu_parallel_2d_cg.py b/examples/tomo/backends/astra_performance_cpu_parallel_2d_cg.py index 46d85362db1..2863badd187 100644 --- a/examples/tomo/backends/astra_performance_cpu_parallel_2d_cg.py +++ b/examples/tomo/backends/astra_performance_cpu_parallel_2d_cg.py @@ -12,9 +12,9 @@ import astra import numpy as np import matplotlib.pyplot as plt -import scipy.misc +import skimage import odl -from odl.util.testutils import timer +from odl.core.util.testutils import timer # Common geometry parameters @@ -23,7 +23,7 @@ n_angles = 180 det_size = 362 niter = 20 -phantom = np.rot90(scipy.misc.ascent().astype('float'), -1) +phantom = np.rot90(skimage.data.camera().astype('float'), -1) # --- ASTRA --- @@ -70,13 +70,13 @@ # --- ODL --- # Create reconstruction space -reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size) +reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size,dtype='float32') # Create geometry -geometry = odl.tomo.parallel_beam_geometry(reco_space, n_angles, det_size) +geometry = odl.applications.tomo.parallel_beam_geometry(reco_space, n_angles, det_size) # Create ray transform -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cpu') +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl='astra_cpu') # Create sinogram data = ray_trafo(phantom) diff --git a/examples/tomo/backends/astra_performance_cuda_cone_3d_cg.py b/examples/tomo/backends/astra_performance_cuda_cone_3d_cg.py index bf40a724d97..8b3cf908415 100644 --- a/examples/tomo/backends/astra_performance_cuda_cone_3d_cg.py +++ b/examples/tomo/backends/astra_performance_cuda_cone_3d_cg.py @@ -13,7 +13,7 @@ import numpy as np import matplotlib.pyplot as plt import odl -from odl.util.testutils import timer +from odl.core.util.testutils import timer # Common geometry parameters @@ -24,17 +24,17 @@ niter = 10 # Create reconstruction space -reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size) +reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size, dtype='float32') # Create geometry apart = odl.uniform_partition(0, 2 * np.pi, n_angles) dpart = odl.uniform_partition([-500, -500], [500, 500], [det_size, det_size]) -geometry = odl.tomo.ConeBeamGeometry(apart, dpart, +geometry = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_radius=500, det_radius=500) -phantom = odl.phantom.shepp_logan(reco_space, modified=True).asarray() +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True).asarray() # --- ASTRA --- @@ -42,7 +42,7 @@ astra_vol_geom = astra.create_vol_geom(*domain_size) det_row_count = geometry.det_partition.shape[1] det_col_count = geometry.det_partition.shape[0] -vec = odl.tomo.backends.astra_setup.astra_conebeam_3d_geom_to_vec(geometry) +vec = odl.applications.tomo.backends.astra_setup.astra_conebeam_3d_geom_to_vec(geometry) astra_proj_geom = astra.create_proj_geom('cone_vec', det_row_count, det_col_count, vec) @@ -87,7 +87,7 @@ # --- ODL --- # Create ray transform -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') # Create sinogram data = ray_trafo(phantom) diff --git a/examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py b/examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py index 4d110488c9b..c269b112874 100644 --- a/examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py +++ b/examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py @@ -12,9 +12,9 @@ import astra import numpy as np import matplotlib.pyplot as plt -import scipy.misc +import skimage import odl -from odl.util.testutils import timer +from odl.core.util.testutils import timer # Common geometry parameters @@ -23,7 +23,7 @@ n_angles = 180 det_size = 362 niter = 50 -phantom = np.rot90(scipy.misc.ascent().astype('float'), -1) +phantom = np.rot90(skimage.data.camera().astype('float'), -1) # --- ASTRA --- @@ -70,13 +70,13 @@ # --- ODL --- # Create reconstruction space -reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size) +reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size, dtype='float32') # Create geometry -geometry = odl.tomo.parallel_beam_geometry(reco_space, n_angles, det_size) +geometry = odl.applications.tomo.parallel_beam_geometry(reco_space, n_angles, det_size) # Create ray transform -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') # Create sinogram data = ray_trafo(phantom) diff --git a/examples/tomo/checks/check_axes_cone2d_bp.py b/examples/tomo/checks/check_axes_cone2d_bp.py index 32ea63b03c9..15928359ac6 100644 --- a/examples/tomo/checks/check_axes_cone2d_bp.py +++ b/examples/tomo/checks/check_axes_cone2d_bp.py @@ -24,18 +24,18 @@ img_min_pt = -img_max_pt reco_space = odl.uniform_discr(img_min_pt + shift, img_max_pt + shift, img_shape, dtype='float32') -phantom = odl.phantom.indicate_proj_axis(reco_space) +phantom = odl.core.phantom.indicate_proj_axis(reco_space) assert np.allclose(reco_space.cell_sides, 1) # Make fan beam geometry with 360 angles src_radius = 500 det_radius = 1000 -geometry = odl.tomo.cone_beam_geometry(reco_space, src_radius, det_radius, +geometry = odl.applications.tomo.cone_beam_geometry(reco_space, src_radius, det_radius, num_angles=360) # Test back-projection -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) backproj = ray_trafo.adjoint(proj_data) backproj.show('Back-projection') diff --git a/examples/tomo/checks/check_axes_cone2d_fp.py b/examples/tomo/checks/check_axes_cone2d_fp.py index 5b33b83fb69..36217527770 100644 --- a/examples/tomo/checks/check_axes_cone2d_fp.py +++ b/examples/tomo/checks/check_axes_cone2d_fp.py @@ -29,7 +29,7 @@ img_min_pt = -img_max_pt reco_space = odl.uniform_discr(img_min_pt + shift, img_max_pt + shift, img_shape, dtype='float32') -phantom = odl.phantom.indicate_proj_axis(reco_space) +phantom = odl.core.phantom.indicate_proj_axis(reco_space) assert np.allclose(reco_space.cell_sides, 1) @@ -50,17 +50,17 @@ assert np.allclose(detector_partition.cell_sides, 1) # Sum manually using Numpy -sum_along_x = np.sum(phantom, axis=0) -sum_along_y = np.sum(phantom, axis=1) +sum_along_x = odl.sum(phantom, axis=0) +sum_along_y = odl.sum(phantom, axis=1) -geometry = odl.tomo.FanBeamGeometry(angle_partition, detector_partition, +geometry = odl.applications.tomo.FanBeamGeometry(angle_partition, detector_partition, src_radius, det_radius) # Check initial configuration assert np.allclose(geometry.det_axis_init, [1, 0]) assert np.allclose(geometry.src_to_det_init, [0, 1]) # Create projections -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) diff --git a/examples/tomo/checks/check_axes_cone3d_bp.py b/examples/tomo/checks/check_axes_cone3d_bp.py index 1b5d35cf3ad..466266ddf1f 100644 --- a/examples/tomo/checks/check_axes_cone3d_bp.py +++ b/examples/tomo/checks/check_axes_cone3d_bp.py @@ -26,7 +26,7 @@ vol_min_pt = -vol_max_pt reco_space = odl.uniform_discr(vol_min_pt + shift, vol_max_pt + shift, vol_shape, dtype='float32') -phantom = odl.phantom.indicate_proj_axis(reco_space) +phantom = odl.core.phantom.indicate_proj_axis(reco_space) assert np.allclose(reco_space.cell_sides, 1) @@ -49,12 +49,12 @@ # %% Test case 1: Axis = [0, 0, 1] -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius, det_radius, axis=[0, 0, 1]) # Create projections and back-projection -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) backproj = ray_trafo.adjoint(proj_data) backproj.show('Backprojection, Axis = [0, 0, 1], Middle Z Slice', @@ -66,12 +66,12 @@ # %% Test case 2: Axis = [0, 1, 0] -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius, det_radius, axis=[0, 1, 0]) # Create projections and back-projection -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) backproj = ray_trafo.adjoint(proj_data) backproj.show('Backprojection, Axis = [0, 1, 0], Middle Y Slice', @@ -83,12 +83,12 @@ # %% Test case 3: Axis = [1, 0, 0] -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius, det_radius, axis=[1, 0, 0]) # Create projections and back-projection -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) backproj = ray_trafo.adjoint(proj_data) backproj.show('Backprojection, Axis = [1, 0, 0], Almost Max X Slice', diff --git a/examples/tomo/checks/check_axes_cone3d_fp.py b/examples/tomo/checks/check_axes_cone3d_fp.py index cd52450cba2..778aa67c3d1 100644 --- a/examples/tomo/checks/check_axes_cone3d_fp.py +++ b/examples/tomo/checks/check_axes_cone3d_fp.py @@ -29,7 +29,7 @@ vol_min_pt = -vol_max_pt reco_space = odl.uniform_discr(vol_min_pt + shift, vol_max_pt + shift, vol_shape, dtype='float32') -phantom = odl.phantom.indicate_proj_axis(reco_space) +phantom = odl.core.phantom.indicate_proj_axis(reco_space) assert np.allclose(reco_space.cell_sides, 1) @@ -50,15 +50,15 @@ assert np.allclose(detector_partition.cell_sides, 1) # Sum manually using Numpy -sum_along_x = np.sum(phantom, axis=0) -sum_along_y = np.sum(phantom, axis=1) -sum_along_z = np.sum(phantom, axis=2) +sum_along_x = odl.sum(phantom, axis=0) +sum_along_y = odl.sum(phantom, axis=1) +sum_along_z = odl.sum(phantom, axis=2) # %% Test case 1: axis = [0, 0, 1] -- setup -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius, det_radius, axis=[0, 0, 1]) # Check initial configuration @@ -67,7 +67,7 @@ assert np.allclose(geometry.src_to_det_init, [0, 1, 0]) # Create projections -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) @@ -78,7 +78,7 @@ # axis = [0, 0, 1], 0 degrees proj_data.show(indices=[0, None, None], title='Projection at 0 Degrees, Axis [0, 0, 1], u = x, v = z') -sum_along_y.show('Sum Along Y Axis') +# sum_along_y.show('Sum Along Y Axis') # Check axes in geometry axes_sum_y = geometry.det_axes(np.deg2rad(0)) assert np.allclose(axes_sum_y[0], [1, 0, 0]) @@ -92,7 +92,7 @@ # axis = [0, 0, 1], 90 degrees proj_data.show(indices=[1, None, None], title='Projection at 90 Degrees, Axis [0, 0, 1], u = y, v = z') -sum_along_x.show('Sum Along X Axis') +# sum_along_x.show('Sum Along X Axis') # Check axes in geometry axes_sum_x = geometry.det_axes(np.deg2rad(90)) assert np.allclose(axes_sum_x[0], [0, 1, 0]) @@ -102,7 +102,7 @@ # %% Test case 2: axis = [0, 1, 0] -- setup -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius, det_radius, axis=[0, 1, 0]) # Check initial configuration @@ -111,7 +111,7 @@ assert np.allclose(geometry.src_to_det_init, [0, 0, -1]) # Create projections -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) @@ -122,7 +122,7 @@ # axis = [0, 1, 0], 0 degrees proj_data.show(indices=[0, None, None], title='Projection at 0 Degrees, Axis [0, 1, 0], u = x, v = y') -sum_along_z.show('Sum along z axis') +# sum_along_z.show('Sum along z axis') # Check geometry axes axes_sum_z = geometry.det_axes(np.deg2rad(0)) assert np.allclose(axes_sum_z[0], [1, 0, 0]) @@ -151,7 +151,7 @@ # %% Test case 3: axis = [1, 0, 0] -- setup -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius, det_radius, axis=[1, 0, 0]) # Check initial configuration @@ -160,7 +160,7 @@ assert np.allclose(geometry.src_to_det_init, [0, 1, 0]) # Create projections -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) diff --git a/examples/tomo/checks/check_axes_parallel2d_bp.py b/examples/tomo/checks/check_axes_parallel2d_bp.py index 9e7bb9356d8..6a5dc093f8d 100644 --- a/examples/tomo/checks/check_axes_parallel2d_bp.py +++ b/examples/tomo/checks/check_axes_parallel2d_bp.py @@ -24,15 +24,15 @@ img_min_pt = -img_max_pt reco_space = odl.uniform_discr(img_min_pt + shift, img_max_pt + shift, img_shape, dtype='float32') -phantom = odl.phantom.indicate_proj_axis(reco_space) +phantom = odl.core.phantom.indicate_proj_axis(reco_space) assert np.allclose(reco_space.cell_sides, 1) # Make parallel beam geometry with 360 angles -geometry = odl.tomo.parallel_beam_geometry(reco_space, num_angles=360) +geometry = odl.applications.tomo.parallel_beam_geometry(reco_space, num_angles=360) # Test back-projection -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) backproj = ray_trafo.adjoint(proj_data) backproj.show('Back-projection') diff --git a/examples/tomo/checks/check_axes_parallel2d_fp.py b/examples/tomo/checks/check_axes_parallel2d_fp.py index 2844afb0e7a..552e8d4b8dc 100644 --- a/examples/tomo/checks/check_axes_parallel2d_fp.py +++ b/examples/tomo/checks/check_axes_parallel2d_fp.py @@ -27,7 +27,7 @@ img_min_pt = -img_max_pt reco_space = odl.uniform_discr(img_min_pt + shift, img_max_pt + shift, img_shape, dtype='float32') -phantom = odl.phantom.indicate_proj_axis(reco_space) +phantom = odl.core.phantom.indicate_proj_axis(reco_space) assert np.allclose(reco_space.cell_sides, 1) @@ -45,20 +45,20 @@ assert np.allclose(detector_partition.cell_sides, 1) # Sum manually using Numpy -sum_along_x = np.sum(phantom, axis=0) -sum_along_y = np.sum(phantom, axis=1) +sum_along_x = odl.sum(phantom, axis=0) +sum_along_y = odl.sum(phantom, axis=1) # %% Test forward projection along y axis -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Check initial configuration assert np.allclose(geometry.det_axis_init, [1, 0]) assert np.allclose(geometry.det_pos_init, [0, 1]) # Create projections -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) # Axis in this image is x. This corresponds to 0 degrees. diff --git a/examples/tomo/checks/check_axes_parallel3d_bp.py b/examples/tomo/checks/check_axes_parallel3d_bp.py index 1267689d768..ee6ffc38496 100644 --- a/examples/tomo/checks/check_axes_parallel3d_bp.py +++ b/examples/tomo/checks/check_axes_parallel3d_bp.py @@ -26,7 +26,7 @@ vol_min_pt = -vol_max_pt reco_space = odl.uniform_discr(vol_min_pt + shift, vol_max_pt + shift, vol_shape, dtype='float32') -phantom = odl.phantom.indicate_proj_axis(reco_space) +phantom = odl.core.phantom.indicate_proj_axis(reco_space) assert np.allclose(reco_space.cell_sides, 1) @@ -46,11 +46,11 @@ # %% Test case 1: axis = [0, 0, 1] -geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, +geometry = odl.applications.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, axis=[0, 0, 1]) # Create projections and back-projection -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) backproj = ray_trafo.adjoint(proj_data) backproj.show('Backprojection, Axis = [0, 0, 1], Middle Z Slice', @@ -62,11 +62,11 @@ # %% Test case 2: axis = [0, 1, 0] -geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, +geometry = odl.applications.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, axis=[0, 1, 0]) # Create projections and back-projection -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) backproj = ray_trafo.adjoint(proj_data) backproj.show('Backprojection, Axis = [0, 1, 0], Middle Y Slice', @@ -78,11 +78,11 @@ # %% Test case 3: axis = [1, 0, 0] -geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, +geometry = odl.applications.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, axis=[1, 0, 0]) # Create projections and back-projection -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) backproj = ray_trafo.adjoint(proj_data) backproj.show('Backprojection, Axis = [1, 0, 0], Almost Max X Slice', diff --git a/examples/tomo/checks/check_axes_parallel3d_fp.py b/examples/tomo/checks/check_axes_parallel3d_fp.py index 9776f50e5a6..a44d0c1dd24 100644 --- a/examples/tomo/checks/check_axes_parallel3d_fp.py +++ b/examples/tomo/checks/check_axes_parallel3d_fp.py @@ -25,7 +25,7 @@ vol_min_pt = -vol_max_pt reco_space = odl.uniform_discr(vol_min_pt + shift, vol_max_pt + shift, vol_shape, dtype='float32') -phantom = odl.phantom.indicate_proj_axis(reco_space) +phantom = odl.core.phantom.indicate_proj_axis(reco_space) assert np.allclose(reco_space.cell_sides, 1) @@ -43,15 +43,15 @@ assert np.allclose(detector_partition.cell_sides, 1) # Sum manually using Numpy -sum_along_x = np.sum(phantom, axis=0) -sum_along_y = np.sum(phantom, axis=1) -sum_along_z = np.sum(phantom, axis=2) +sum_along_x = odl.sum(phantom, axis=0) +sum_along_y = odl.sum(phantom, axis=1) +sum_along_z = odl.sum(phantom, axis=2) # %% Test case 1: axis = [0, 0, 1] -- setup -geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, +geometry = odl.applications.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, axis=[0, 0, 1]) # Check initial configuration assert np.allclose(geometry.det_axes_init[0], [1, 0, 0]) @@ -59,7 +59,7 @@ assert np.allclose(geometry.det_pos_init, [0, 1, 0]) # Create projections -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) @@ -70,7 +70,7 @@ # axis = [0, 0, 1], 0 degrees proj_data.show(indices=[0, None, None], title='Projection at 0 Degrees, Axis [0, 0, 1], u = x, v = z') -sum_along_y.show('Sum Along Y Axis') +# sum_along_y.show('Sum Along Y Axis') # Check axes in geometry axes_sum_y = geometry.det_axes(np.deg2rad(0)) assert np.allclose(axes_sum_y[0], [1, 0, 0]) @@ -84,7 +84,7 @@ # axis = [0, 0, 1], 90 degrees proj_data.show(indices=[1, None, None], title='Projection at 90 Degrees, Axis [0, 0, 1], u = y, v = z') -sum_along_x.show('Sum Along X Axis') +# sum_along_x.show('Sum Along X Axis') # Check axes in geometry axes_sum_x = geometry.det_axes(np.deg2rad(90)) assert np.allclose(axes_sum_x[0], [0, 1, 0]) @@ -94,7 +94,7 @@ # %% Test case 2: axis = [0, 1, 0] -- setup -geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, +geometry = odl.applications.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, axis=[0, 1, 0]) # Check initial configuration assert np.allclose(geometry.det_axes_init[0], [1, 0, 0]) @@ -102,7 +102,7 @@ assert np.allclose(geometry.det_pos_init, [0, 0, -1]) # Create projections -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) @@ -113,7 +113,7 @@ # axis = [0, 1, 0], 0 degrees proj_data.show(indices=[0, None, None], title='Projection at 0 Degrees, Axis [0, 1, 0], u = x, v = y') -sum_along_z.show('Sum Along Z Axis') +# sum_along_z.show('Sum Along Z Axis') # Check geometry axes axes_sum_z = geometry.det_axes(np.deg2rad(0)) assert np.allclose(axes_sum_z[0], [1, 0, 0]) @@ -142,7 +142,7 @@ # %% Test case 3: axis = [1, 0, 0] -- setup -geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, +geometry = odl.applications.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition, axis=[1, 0, 0]) # Check initial configuration assert np.allclose(geometry.det_axes_init[0], [0, 0, -1]) @@ -150,7 +150,7 @@ assert np.allclose(geometry.det_pos_init, [0, 1, 0]) # Create projections -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl=impl) proj_data = ray_trafo(phantom) diff --git a/examples/tomo/filtered_backprojection_cone_2d.py b/examples/tomo/filtered_backprojection_cone_2d.py index 5ee53307a58..7bbba64f5c1 100644 --- a/examples/tomo/filtered_backprojection_cone_2d.py +++ b/examples/tomo/filtered_backprojection_cone_2d.py @@ -25,7 +25,7 @@ # Detector: uniformly sampled, n = 512, min = -60, max = 60 detector_partition = odl.uniform_partition(-60, 60, 512) # Geometry with large fan angle -geometry = odl.tomo.FanBeamGeometry( +geometry = odl.applications.tomo.FanBeamGeometry( angle_partition, detector_partition, src_radius=40, det_radius=40) @@ -33,19 +33,19 @@ # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create FBP operator using utility function # We select a Hann filter, and only use the lowest 80% of frequencies to avoid # high frequency noise. -fbp = odl.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.8) +fbp = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.8) # --- Show some examples --- # # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/filtered_backprojection_cone_2d_short_scan.py b/examples/tomo/filtered_backprojection_cone_2d_short_scan.py index 8464ff7b8bb..8956329eaed 100644 --- a/examples/tomo/filtered_backprojection_cone_2d_short_scan.py +++ b/examples/tomo/filtered_backprojection_cone_2d_short_scan.py @@ -29,7 +29,7 @@ # Detector: uniformly sampled, n = 512, min = -40, max = 40 detector_partition = odl.uniform_partition(-40, 40, 512) # Geometry with large fan angle -geometry = odl.tomo.FanBeamGeometry( +geometry = odl.applications.tomo.FanBeamGeometry( angle_partition, detector_partition, src_radius=80, det_radius=40) @@ -37,15 +37,15 @@ # Ray transform (= forward projection). We use the ASTRA CUDA backend. -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') # Create FBP operator using utility function # We select a Hann filter, and only use the lowest 80% of frequencies to avoid # high frequency noise. -fbp = odl.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.8) +fbp = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.8) # Apply parker weighting in order to improve reconstruction -parker_weighting = odl.tomo.parker_weighting(ray_trafo) +parker_weighting = odl.applications.tomo.parker_weighting(ray_trafo) parker_weighting.show() parker_weighted_fbp = fbp * parker_weighting @@ -54,7 +54,7 @@ # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/filtered_backprojection_cone_3d.py b/examples/tomo/filtered_backprojection_cone_3d.py index c03c00ae088..ff881324552 100644 --- a/examples/tomo/filtered_backprojection_cone_3d.py +++ b/examples/tomo/filtered_backprojection_cone_3d.py @@ -25,7 +25,7 @@ # Detector: uniformly sampled, n = (512, 512), min = (-40, -40), max = (40, 40) detector_partition = odl.uniform_partition([-40, -40], [40, 40], [512, 512]) # Geometry with large cone and fan angle and tilted axis. -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius=40, det_radius=40, axis=[1, 1, 1]) @@ -34,12 +34,12 @@ # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create FBP operator using utility function # We select a Shepp-Logan filter, and only use the lowest 80% of frequencies to # avoid high frequency noise. -fbp = odl.tomo.fbp_op(ray_trafo, +fbp = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Shepp-Logan', frequency_scaling=0.8) @@ -47,7 +47,7 @@ # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/filtered_backprojection_cone_3d_short_scan.py b/examples/tomo/filtered_backprojection_cone_3d_short_scan.py index 984275390fd..4559e3ae425 100644 --- a/examples/tomo/filtered_backprojection_cone_3d_short_scan.py +++ b/examples/tomo/filtered_backprojection_cone_3d_short_scan.py @@ -32,7 +32,7 @@ # Detector: uniformly sampled, n = (512, 512), min = (-60, -60), max = (60, 60) detector_partition = odl.uniform_partition([-60, -60], [60, 60], [512, 512]) # Geometry with large cone and fan angle and tilted axis. -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius=80, det_radius=40) @@ -40,16 +40,16 @@ # Ray transform (= forward projection). We use the ASTRA CUDA backend. -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') # Create FBP operator using utility function # We select a Shepp-Logan filter, and only use the lowest 80% of frequencies to # avoid high frequency noise. -fbp = odl.tomo.fbp_op(ray_trafo, +fbp = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Shepp-Logan', frequency_scaling=0.8) # Apply parker weighting in order to improve reconstruction -parker_weighting = odl.tomo.parker_weighting(ray_trafo) +parker_weighting = odl.applications.tomo.parker_weighting(ray_trafo) parker_weighted_fbp = fbp * parker_weighting @@ -57,7 +57,7 @@ # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/filtered_backprojection_cone_circular_2d.py b/examples/tomo/filtered_backprojection_cone_circular_2d.py new file mode 100644 index 00000000000..f6207efb55e --- /dev/null +++ b/examples/tomo/filtered_backprojection_cone_circular_2d.py @@ -0,0 +1,60 @@ +""" +Example using a filtered back-projection (FBP) in fan beam using `fbp_op`. + +Note that the FBP is only approximate in this geometry, but still gives a +decent reconstruction that can be used as an initial guess in more complicated +methods. +""" + +import numpy as np +import odl + + +# --- Set up geometry of the problem --- # + + +# Reconstruction space: discretized functions on the cube +# [-20, 20]^2 with 300 samples per dimension. +reco_space = odl.uniform_discr( + min_pt=[-20, -20], max_pt=[20, 20], shape=[300, 300], + dtype='float32') + +# Make a circular cone beam geometry with flat detector +# Angles: uniformly spaced, n = 360, min = 0, max = 2 * pi +angle_partition = odl.uniform_partition(0, 2 * np.pi, 360) +# Detector: uniformly sampled, n = 512, min = -60, max = 60 +detector_partition = odl.uniform_partition(-60, 60, 512) +# Geometry with large fan angle +geometry = odl.applications.tomo.FanBeamGeometry( + angle_partition, detector_partition, src_radius=40, det_radius=40, det_curvature_radius=80) + + +# --- Create Filtered Back-projection (FBP) operator --- # + + +# Ray transform (= forward projection). +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) + +# Create FBP operator using utility function +# We select a Hann filter, and only use the lowest 80% of frequencies to avoid +# high frequency noise. +fbp = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.8) + + +# --- Show some examples --- # + + +# Create a discrete Shepp-Logan phantom (modified version) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) + +# Create projection data by calling the ray transform on the phantom +proj_data = ray_trafo(phantom) + +# Calculate filtered back-projection of data +fbp_reconstruction = fbp(proj_data) + +# Shows a slice of the phantom, projections, and reconstruction +phantom.show(title='Phantom') +proj_data.show(title='Projection Data (Sinogram)') +fbp_reconstruction.show(title='Filtered Back-projection') +(phantom - fbp_reconstruction).show(title='Error', force_show=True) diff --git a/examples/tomo/filtered_backprojection_helical_3d.py b/examples/tomo/filtered_backprojection_helical_3d.py index 503f2c558ea..589f229e7d6 100644 --- a/examples/tomo/filtered_backprojection_helical_3d.py +++ b/examples/tomo/filtered_backprojection_helical_3d.py @@ -23,7 +23,7 @@ dtype='float32') # Create helical geometry -geometry = odl.tomo.helical_geometry(space, +geometry = odl.applications.tomo.helical_geometry(space, src_radius=100, det_radius=100, num_turns=7.5, num_angles=1000) @@ -31,22 +31,22 @@ # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(space, geometry) # Unwindowed fbp # We select a Hamming filter, and only use the lowest 80% of frequencies to # avoid high frequency noise. -fbp = odl.tomo.fbp_op(ray_trafo, filter_type='Hamming', frequency_scaling=0.8) +fbp = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Hamming', frequency_scaling=0.8) # Create Tam-Danielson window to improve result -windowed_fbp = fbp * odl.tomo.tam_danielson_window(ray_trafo) +windowed_fbp = fbp * odl.applications.tomo.tam_danielson_window(ray_trafo) # --- Show some examples --- # # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/filtered_backprojection_parallel_2d.py b/examples/tomo/filtered_backprojection_parallel_2d.py index 2447a91a68b..0861d5b5dc5 100644 --- a/examples/tomo/filtered_backprojection_parallel_2d.py +++ b/examples/tomo/filtered_backprojection_parallel_2d.py @@ -31,14 +31,14 @@ detector_partition = odl.uniform_partition(-30, 30, 500) # Make a parallel beam geometry with flat detector -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # --- Create Filtered Back-projection (FBP) operator --- # # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Fourier transform in detector direction fourier = odl.trafos.FourierTransform(ray_trafo.range, axes=[1]) @@ -58,7 +58,7 @@ # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/filtered_backprojection_parallel_2d_complex.py b/examples/tomo/filtered_backprojection_parallel_2d_complex.py index 5d443fdc525..e2e5a906c69 100644 --- a/examples/tomo/filtered_backprojection_parallel_2d_complex.py +++ b/examples/tomo/filtered_backprojection_parallel_2d_complex.py @@ -27,25 +27,25 @@ detector_partition = odl.uniform_partition(-30, 30, 500) # Make a parallel beam geometry with flat detector -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # --- Create Filtered Back-projection (FBP) operator --- # # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') # Create filtered back-projection operator -fbp = odl.tomo.fbp_op(ray_trafo) +fbp = odl.applications.tomo.fbp_op(ray_trafo) # --- Show some examples --- # # Create a discrete Shepp-Logan phantom (modified version) -phantom = (odl.phantom.shepp_logan(reco_space, modified=True) + - 1j * odl.phantom.cuboid(reco_space)) +phantom = (odl.core.phantom.shepp_logan(reco_space, modified=True) + + 1j * odl.core.phantom.cuboid(reco_space)) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/filtered_backprojection_parallel_3d.py b/examples/tomo/filtered_backprojection_parallel_3d.py index 19bf4d9aadb..4d0acbd3f46 100644 --- a/examples/tomo/filtered_backprojection_parallel_3d.py +++ b/examples/tomo/filtered_backprojection_parallel_3d.py @@ -22,7 +22,7 @@ # Detector: uniformly sampled, n = (512, 512), min = (-40, -40), max = (40, 40) detector_partition = odl.uniform_partition([-40, -40], [40, 40], [512, 512]) # Geometry with tilted axis. -geometry = odl.tomo.Parallel3dAxisGeometry( +geometry = odl.applications.tomo.Parallel3dAxisGeometry( angle_partition, detector_partition, axis=[1, 1, 1]) @@ -30,19 +30,19 @@ # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create FBP operator using utility function # We select a Hann filter, and only use the lowest 80% of frequencies to # avoid high frequency noise. -fbp = odl.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.8) +fbp = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.8) # --- Show some examples --- # # Create a Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/ray_trafo_cone_2d.py b/examples/tomo/ray_trafo_cone_2d.py index f2819659144..7a0797da227 100644 --- a/examples/tomo/ray_trafo_cone_2d.py +++ b/examples/tomo/ray_trafo_cone_2d.py @@ -13,14 +13,14 @@ angle_partition = odl.uniform_partition(0, 2 * np.pi, 360) # Detector: uniformly sampled, n = 512, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 512) -geometry = odl.tomo.FanBeamGeometry(angle_partition, detector_partition, +geometry = odl.applications.tomo.FanBeamGeometry(angle_partition, detector_partition, src_radius=1000, det_radius=100) # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/ray_trafo_cone_3d.py b/examples/tomo/ray_trafo_cone_3d.py index 1010f8f5bb7..bede49545eb 100644 --- a/examples/tomo/ray_trafo_cone_3d.py +++ b/examples/tomo/ray_trafo_cone_3d.py @@ -14,15 +14,15 @@ angle_partition = odl.uniform_partition(0, 2 * np.pi, 360) # Detector: uniformly sampled, n = (512, 512), min = (-30, -30), max = (30, 30) detector_partition = odl.uniform_partition([-30, -30], [30, 30], [512, 512]) -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius=1000, det_radius=100, axis=[1, 0, 0]) # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, True) +phantom = odl.core.phantom.shepp_logan(reco_space, True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/ray_trafo_helical_cone_3d.py b/examples/tomo/ray_trafo_helical_cone_3d.py index 617ec733e4e..5514c5ab9bb 100644 --- a/examples/tomo/ray_trafo_helical_cone_3d.py +++ b/examples/tomo/ray_trafo_helical_cone_3d.py @@ -15,15 +15,15 @@ # Detector: uniformly sampled, n = (512, 64), min = (-50, -3), max = (50, 3) detector_partition = odl.uniform_partition([-50, -3], [50, 3], [512, 64]) # Spiral has a pitch of 5, we run 8 rounds (due to max angle = 8 * 2 * pi) -geometry = odl.tomo.ConeBeamGeometry( +geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius=100, det_radius=100, pitch=5.0) # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/ray_trafo_helical_cone_spherical_3d.py b/examples/tomo/ray_trafo_helical_cone_spherical_3d.py new file mode 100644 index 00000000000..1b521d9910b --- /dev/null +++ b/examples/tomo/ray_trafo_helical_cone_spherical_3d.py @@ -0,0 +1,41 @@ +"""Example using the ray transform with helical cone beam geometry.""" + +import numpy as np +import odl + +# Reconstruction space: discretized functions on the cube +# [-20, 20]^2 x [0, 40] with 300 samples per dimension. +reco_space = odl.uniform_discr( + min_pt=[-20, -20, 0], max_pt=[20, 20, 40], shape=[300, 300, 300], + dtype='float32') + +# Make a helical cone beam geometry with flat detector +# Angles: uniformly spaced, n = 2000, min = 0, max = 8 * 2 * pi +angle_partition = odl.uniform_partition(0, 8 * 2 * np.pi, 2000) +# Detector: uniformly sampled, n = (512, 64), min = (-50, -3), max = (50, 3) +detector_partition = odl.uniform_partition([-50, -3], [50, 3], [512, 64]) +# Spiral has a pitch of 5, we run 8 rounds (due to max angle = 8 * 2 * pi) +geometry = odl.applications.tomo.ConeBeamGeometry( + angle_partition, detector_partition, src_radius=100, det_radius=100, + pitch=5.0, det_curvature_radius=80) + +# Ray transform (= forward projection). +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) + +# Create a discrete Shepp-Logan phantom (modified version) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) + +# Create projection data by calling the ray transform on the phantom +proj_data = ray_trafo(phantom) + +# Back-projection can be done by simply calling the adjoint operator on the +# projection data (or any element in the projection space). +backproj = ray_trafo.adjoint(proj_data) + +# Shows a slice of the phantom, projections, and reconstruction +phantom.show(coords=[None, None, 20], title='Phantom, Middle Z Slice') +proj_data.show(coords=[2 * np.pi, None, None], + title='Projection After Exactly One Turn') +proj_data.show(coords=[None, None, 0], title='Sinogram, Middle Slice') +backproj.show(coords=[None, None, 20], title='Back-projection, Middle Z Slice', + force_show=True) diff --git a/examples/tomo/ray_trafo_parallel_2d.py b/examples/tomo/ray_trafo_parallel_2d.py index 52c6dcab638..6b147f5b7f8 100644 --- a/examples/tomo/ray_trafo_parallel_2d.py +++ b/examples/tomo/ray_trafo_parallel_2d.py @@ -13,13 +13,13 @@ angle_partition = odl.uniform_partition(0, np.pi, 180) # Detector: uniformly sampled, n = 512, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 512) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/ray_trafo_parallel_2d_complex.py b/examples/tomo/ray_trafo_parallel_2d_complex.py index 3ffbd9949bc..6ef947ce0cd 100644 --- a/examples/tomo/ray_trafo_parallel_2d_complex.py +++ b/examples/tomo/ray_trafo_parallel_2d_complex.py @@ -17,16 +17,16 @@ angle_partition = odl.uniform_partition(0, np.pi, 360) # Detector: uniformly sampled, n = 512, min = -30, max = 30 detector_partition = odl.uniform_partition(-30, 30, 512) -geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel2dGeometry(angle_partition, detector_partition) # Ray transform (= forward projection). The backend is set explicitly - # possible choices are 'astra_cpu', 'astra_cuda' and 'skimage'. -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create a discretized phantom that is a Shepp-Logan phantom in the real # part and a cuboid in the imaginary part -phantom = (odl.phantom.shepp_logan(reco_space, modified=True) + - 1j * odl.phantom.cuboid(reco_space)) +phantom = (odl.core.phantom.shepp_logan(reco_space, modified=True) + + 1j * odl.core.phantom.cuboid(reco_space)) # Create projection data by calling the ray transform on the phantom. # This is equivalent to evaluating the ray transform on the real and diff --git a/examples/tomo/ray_trafo_parallel_3d.py b/examples/tomo/ray_trafo_parallel_3d.py index 7c0cc9232eb..fb0820915e3 100644 --- a/examples/tomo/ray_trafo_parallel_3d.py +++ b/examples/tomo/ray_trafo_parallel_3d.py @@ -14,13 +14,13 @@ angle_partition = odl.uniform_partition(0, np.pi, 180) # Detector: uniformly sampled, n = (512, 512), min = (-30, -30), max = (30, 30) detector_partition = odl.uniform_partition([-30, -30], [30, 30], [512, 512]) -geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition) +geometry = odl.applications.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition) # Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry) # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) # Create projection data by calling the ray transform on the phantom proj_data = ray_trafo(phantom) diff --git a/examples/tomo/ray_trafo_parallel_3d_euler.py b/examples/tomo/ray_trafo_parallel_3d_euler.py deleted file mode 100644 index 013bed8d2dd..00000000000 --- a/examples/tomo/ray_trafo_parallel_3d_euler.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Example of using the ray transform in 3D parallel geometry with 2 angles. - -The `Parallel3dEulerGeometry` is defined in terms of 2 or 3 Euler angles -and is not aligned to a rotation axis. -""" - -import numpy as np -import odl - - -# Reconstruction space: discretized functions on the cube -# [-20, 20]^3 with 300 samples per dimension. -reco_space = odl.uniform_discr( - min_pt=[-20, -20, -20], max_pt=[20, 20, 20], shape=[300, 300, 300], - dtype='float32') - -# Make a parallel beam geometry with flat detector -# Angles: 20 x 20 Euler angles corresponding to an octant of the 3D unit sphere -angle_grid = odl.RectGrid(np.linspace(0, np.pi / 2, 20), - np.linspace(0, np.pi / 2, 20)) -angle_partition = odl.uniform_partition_fromgrid(angle_grid) - -# Detector: uniformly sampled, n = (500, 500), min = (-40, -40), max = (40, 40) -detector_partition = odl.uniform_partition([-40, -40], [40, 40], [500, 500]) -# Geometry with tilted axis. -geometry = odl.tomo.Parallel3dEulerGeometry(angle_partition, - detector_partition) - -# Ray transform (= forward projection). -ray_trafo = odl.tomo.RayTransform(reco_space, geometry) - -# Create a Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) - -# Create projection data by calling the ray transform on the phantom -proj_data = ray_trafo(phantom) - -# Calculate back-projection of the data -backproj = ray_trafo.adjoint(proj_data) - -# Show a slice of phantom, projections, and reconstruction -phantom.show(title='Phantom') -proj_data.show(title='Simulated Data: Sinogram for theta = 0 and v = 0', - coords=[None, 0, None, 0]) -proj_data.show(title='Simulated Data: Sinogram for phi = 0 and v = 0', - coords=[0, None, None, 0]) -proj_data.show(title='Simulated Data: "Cone Plot" for u = 0 and v = 0', - coords=[None, None, 0, 0]) -proj_data.show( - title='Simulated Data: Projection for phi = pi/4 and theta = pi/4', - coords=[np.pi / 4, np.pi / 4, None, None]) - -backproj.show(title='Back-projection, Slice z=0', force_show=True) diff --git a/examples/trafos/fourier_trafo.py b/examples/trafos/fourier_trafo.py index 123bab1313d..acafd110e5e 100644 --- a/examples/trafos/fourier_trafo.py +++ b/examples/trafos/fourier_trafo.py @@ -5,14 +5,14 @@ # Discretized space: discretized functions on the rectangle [-1, 1] x [-1, 1] # with 512 samples per dimension and complex data type (for full FT). -space = odl.uniform_discr([-1, -1], [1, 1], (512, 512), dtype='complex') +space = odl.uniform_discr([-1, -1], [1, 1], (512, 512), dtype='complex64') # Make the Fourier transform operator on this space. The range is calculated # automatically. The default backend is numpy.fft. ft_op = odl.trafos.FourierTransform(space) # Create a phantom and its Fourier transfrom and display them. -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) phantom.show(title='Shepp-Logan Phantom') phantom_ft = ft_op(phantom) phantom_ft.show(title='Full Fourier Transform') @@ -32,7 +32,7 @@ # its complex conjugate. This is faster and more memory efficient. real_space = space.real_space ft_op_halfc = odl.trafos.FourierTransform(real_space, halfcomplex=True) -phantom_real = odl.phantom.shepp_logan(real_space, modified=True) +phantom_real = odl.core.phantom.shepp_logan(real_space, modified=True) phantom_real.show(title='Shepp-Logan Phantom, Real Version') phantom_real_ft = ft_op_halfc(phantom_real) phantom_real_ft.show(title='Half-complex Fourier Transform') diff --git a/examples/trafos/fourier_trafo_pytorch.py b/examples/trafos/fourier_trafo_pytorch.py new file mode 100644 index 00000000000..aa75392e26e --- /dev/null +++ b/examples/trafos/fourier_trafo_pytorch.py @@ -0,0 +1,53 @@ +"""Simple example on the usage of the Fourier Transform.""" + +import odl + + +# Discretized space: discretized functions on the rectangle [-1, 1] x [-1, 1] +# with 512 samples per dimension and complex data type (for full FT). +space = odl.uniform_discr([-1, -1], [1, 1], (512, 512), dtype='complex', impl='pytorch') + +# Make the Fourier transform operator on this space. The range is calculated +# automatically. The default backend is numpy.fft. +ft_op = odl.trafos.FourierTransform(space) + +# Create a phantom and its Fourier transfrom and display them. +phantom = odl.core.phantom.shepp_logan(space, modified=True) +phantom.show(title='Shepp-Logan Phantom') +phantom_ft = ft_op(phantom) +phantom_ft.show(title='Full Fourier Transform', force_show=False) + +# Calculate the inverse transform. +phantom_ft_inv = ft_op.inverse(phantom_ft) +phantom_ft_inv.show(title='Full Fourier Transform Inverted') + +# Calculate the FT only along the first axis. +ft_op_axis0 = odl.trafos.FourierTransform(space, axes=0) +phantom_ft_axis0 = ft_op_axis0(phantom) +phantom_ft_axis0.show(title='Fourier transform Along Axis 0') + +# If a real space is used, the Fourier transform can be calculated in the +# "half-complex" mode. This means that along the last axis of the transform, +# only the negative half of the spectrum is stored since the other half is +# its complex conjugate. This is faster and more memory efficient. +real_space = space.real_space +ft_op_halfc = odl.trafos.FourierTransform(real_space, halfcomplex=True) +phantom_real = odl.core.phantom.shepp_logan(real_space, modified=True) +phantom_real.show(title='Shepp-Logan Phantom, Real Version') +phantom_real_ft = ft_op_halfc(phantom_real) +phantom_real_ft.show(title='Half-complex Fourier Transform') + +# If the space is real, the inverse also gives a real result. +phantom_real_ft_inv = ft_op_halfc.inverse(phantom_real_ft) +phantom_real_ft_inv.show(title='Half-complex Fourier Transform Inverted', + force_show=False) + +# The FT operator itself has no option of (zero-)padding, but it can be +# composed with a `ResizingOperator` which does exactly that. Note that the +# FT needs to be redefined on the enlarged space. +padding_op = odl.ResizingOperator(space, ran_shp=(768, 768)) +ft_op = odl.trafos.FourierTransform(padding_op.range) +padded_ft_op = ft_op * padding_op +print(f"{padded_ft_op.range.element().dtype=}") +phantom_ft_padded = padded_ft_op(phantom) +phantom_ft_padded.show('Padded FT of the Phantom', force_show=True) diff --git a/examples/trafos/wavelet_trafo.py b/examples/trafos/wavelet_trafo.py index 7fb4e3d2aa9..f5e75f9a5b4 100644 --- a/examples/trafos/wavelet_trafo.py +++ b/examples/trafos/wavelet_trafo.py @@ -11,7 +11,7 @@ wavelet_op = odl.trafos.WaveletTransform(space, wavelet='Haar', nlevels=2) # Create a phantom and its wavelet transfrom and display them. -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) phantom.show(title='Shepp-Logan Phantom') # Note that the wavelet transform is a vector in rn. diff --git a/examples/ufunc_ops/README.md b/examples/ufunc_ops/README.md deleted file mode 100644 index eebcc4a08e1..00000000000 --- a/examples/ufunc_ops/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Ufuncs examples - -These examples show how you can use the Universal Functions (UFuncs) in ODL. - -These allow you to create, e.g. the pointwise `sin` operator which can be used just like any other operator in ODL. - -## Basic usage examples - -Example | Purpose | Complexity -------- | ------- | ---------- -[`ufunc_basics.py`](ufunc_basics.py) | Create and call ufunc functionals | low -[`ufunc_composition.py`](ufunc_composition.py) | Compose ufuncs with other operators | low -[`ufunc_solvers.py`](ufunc_solvers.py) | Demonstrate how to use ufuncs in optimization | low \ No newline at end of file diff --git a/examples/ufunc_ops/ufunc_basics.py b/examples/ufunc_ops/ufunc_basics.py deleted file mode 100644 index cd2fb583579..00000000000 --- a/examples/ufunc_ops/ufunc_basics.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Basic examples of using the ufunc functionals in ODL.""" - -import odl - - -# Trigonometric functions can be computed, along with their gradients. - - -cos = odl.ufunc_ops.cos() -sin = odl.ufunc_ops.sin() - -# Compute cosine and its gradient - -print('cos(0)={}, cos.gradient(0.2)={}, -sin(0.2)={}'.format( - cos(0), cos.gradient(0.2), -sin(0.2))) - - -# Other functions include the square, exponential, etc -# Higher order derivatives are obtained via the gradient of the gradient, etc. - -square = odl.ufunc_ops.square() - -print('[x^2](3) = {}, [d/dx x^2](3) = {}, ' - '[d^2/dx^2 x^2](3) = {}, [d^3/dx^3 x^2](3) = {}' - ''.format(square(3), square.gradient(3), - square.gradient.gradient(3), - square.gradient.gradient.gradient(3))) - -# Can also define ufuncs on vector-spaces, then they act pointwise. - -r3 = odl.rn(3) -exp_r3 = odl.ufunc_ops.exp(r3) -print('e^[1, 2, 3] = {}'.format(exp_r3([1, 2, 3]))) diff --git a/examples/ufunc_ops/ufunc_composition.py b/examples/ufunc_ops/ufunc_composition.py deleted file mode 100644 index 7c2306244a2..00000000000 --- a/examples/ufunc_ops/ufunc_composition.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Examples of composing Ufuncs. - -Here we demonstrate how the functionals can be composed with other operators -and functionals in order to achieve more complicated functions. - -We create the L2-norm squared in two ways, first using the built in -L2NormSquared functional, and also by composing the square ufunc with the -L2Norm functional. - -We also demonstrate that we can do this pointwise. -""" - -import odl - -# Create square functional. It's domain is by default the real numbers. -square = odl.ufunc_ops.square() - -# Create L2 norm functionals -space = odl.rn(3) -l2_norm = odl.solvers.L2Norm(space) -l2_norm_squared_comp = square * odl.solvers.L2Norm(space) -l2_norm_squared_raw = odl.solvers.L2NormSquared(space) - -# Evaluate in a point and see that the results are equal -x = space.element([1, 2, 3]) - -print('composed ||x||^2 = {}'.format(l2_norm_squared_comp(x))) -print('raw ||x||^2 = {}'.format(l2_norm_squared_raw(x))) - -# The usual properties like gradients follow as expected -print('composed grad ||x||^2 = {}'.format(l2_norm_squared_comp.gradient(x))) -print('raw grad ||x||^2 = {}'.format(l2_norm_squared_raw.gradient(x))) - - -# The above can also be done using pointwise UFuncs -square = odl.ufunc_ops.square(space) - -l2_norm_composed_pointwise = l2_norm * square -print('composed ||x^2|| = {}'.format(l2_norm_composed_pointwise(x))) -print('raw ||x^2|| = {}'.format(l2_norm(x ** 2))) diff --git a/examples/ufunc_ops/ufunc_solvers.py b/examples/ufunc_ops/ufunc_solvers.py deleted file mode 100644 index 56a5b2e43f3..00000000000 --- a/examples/ufunc_ops/ufunc_solvers.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Examples of using UFuncs in optimization. - -Here, we minimize the logarithm of the rosenbrock function: - - min_x log(rosenbrock(x) + 0.1) -""" - -import odl - -# Create space and functionals -r2 = odl.rn(2) -rosenbrock = odl.solvers.RosenbrockFunctional(r2, scale=2.0) -log = odl.ufunc_ops.log() - -# Create goal functional by composing log with rosenbrock and add 0.1 to -# avoid singularity at 0 -opt_fun = log * (rosenbrock + 0.1) - -# Solve problem using steepest descent with backtracking line search, -# starting in the point x = [0, 0] -line_search = odl.solvers.BacktrackingLineSearch(opt_fun) - -x = opt_fun.domain.zero() -odl.solvers.steepest_descent(opt_fun, x, maxiter=100, - line_search=line_search) - -print('Optimization result={}. Should be [1, 1]'.format(x)) diff --git a/examples/visualization/show_1d.py b/examples/visualization/show_1d.py index a8a13876a52..9f1dd8ed1cb 100644 --- a/examples/visualization/show_1d.py +++ b/examples/visualization/show_1d.py @@ -12,7 +12,7 @@ import odl space = odl.uniform_discr(0, 5, 100) -elem = space.element(np.sin) +elem = space.element(lambda x : np.sin(x)) # Get figure object fig = elem.show(title='Sine Functions') diff --git a/examples/visualization/show_2d.py b/examples/visualization/show_2d.py index d02e65f7089..1a3e2fc79d4 100644 --- a/examples/visualization/show_2d.py +++ b/examples/visualization/show_2d.py @@ -9,7 +9,7 @@ import odl space = odl.uniform_discr([0, 0], [1, 1], [100, 100]) -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Show all data phantom.show() diff --git a/examples/visualization/show_2d_complex.py b/examples/visualization/show_2d_complex.py index 705f2113b40..15d67d73db5 100644 --- a/examples/visualization/show_2d_complex.py +++ b/examples/visualization/show_2d_complex.py @@ -8,6 +8,6 @@ import odl -space = odl.uniform_discr([0, 0], [1, 1], [100, 100], dtype='complex') -phantom = odl.phantom.shepp_logan(space, modified=True) * (1 + 0.5j) +space = odl.uniform_discr([0, 0], [1, 1], [100, 100], dtype=complex) +phantom = odl.core.phantom.shepp_logan(space, modified=True) * (1 + 0.5j) phantom.show(force_show=True) diff --git a/examples/visualization/show_productspace.py b/examples/visualization/show_productspace.py index 8909c5541a0..131489cb774 100644 --- a/examples/visualization/show_productspace.py +++ b/examples/visualization/show_productspace.py @@ -11,7 +11,7 @@ # Making a product space element where each component consists of a # Shepp-Logan phantom multiplied by the constant i, where i is the # index of the product space component. -elem = pspace.element([odl.phantom.shepp_logan(space, modified=True) * i +elem = pspace.element([odl.core.phantom.shepp_logan(space, modified=True) * i for i in range(m)]) # By default 4 uniformly spaced elements are shown. Since there are 7 in diff --git a/examples/visualization/show_update_1d.py b/examples/visualization/show_update_1d.py index 9501e63bc13..f5ece0c48f8 100644 --- a/examples/visualization/show_update_1d.py +++ b/examples/visualization/show_update_1d.py @@ -7,7 +7,7 @@ n = 100 m = 20 space = odl.uniform_discr(0, 5, n) -elem = space.element(np.sin) +elem = space.element(lambda x : np.sin(x)) # Pre-create a plot and set some property, here the plot limits in the y axis. fig = plt.figure() diff --git a/examples/visualization/show_update_2d.py b/examples/visualization/show_update_2d.py index 77e61b325b8..27c49012b67 100644 --- a/examples/visualization/show_update_2d.py +++ b/examples/visualization/show_update_2d.py @@ -6,7 +6,7 @@ n = 100 m = 20 space = odl.uniform_discr([0, 0], [1, 1], [n, n]) -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create a figure by saving the result of show fig = None diff --git a/examples/visualization/show_update_in_place_2d.py b/examples/visualization/show_update_in_place_2d.py index 8a86a7c23e9..8e039ffbbc6 100644 --- a/examples/visualization/show_update_in_place_2d.py +++ b/examples/visualization/show_update_in_place_2d.py @@ -9,7 +9,7 @@ n = 100 m = 200 space = odl.uniform_discr([0, 0], [1, 1], [n, n]) -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create a figure by saving the result of show fig = None diff --git a/odl/__init__.py b/odl/__init__.py index 985ba26f79a..4e544149267 100644 --- a/odl/__init__.py +++ b/odl/__init__.py @@ -14,11 +14,13 @@ from __future__ import absolute_import -from os import pardir, path +from os import pardir, path, environ +environ['SCIPY_ARRAY_API']='1' import numpy as np __all__ = ( + 'array_API_support' 'set', 'space', 'operator', @@ -31,7 +33,6 @@ 'tomo', 'trafos', 'ufunc_ops', - 'util', ) # Set package version @@ -54,29 +55,32 @@ # Import all names from "core" subpackages into the top-level namespace; # the `__all__` collection is extended later to make import errors more # visible (otherwise one gets errors like "... has no attribute __all__") -from .discr import * -from .operator import * -from .set import * -from .space import * +from .core.set import * +from .core.array_API_support import * +from .core.discr import * +from .core.operator import * +from .core.space import * # More "advanced" subpackages keep their namespaces separate from top-level, # we only import the modules themselves from . import contrib -from . import deform -from . import diagnostics -from . import phantom +from .core import diagnostics +from .core import phantom from . import solvers -from . import tomo +from . import functional +from .applications import tomo from . import trafos -from . import ufunc_ops -from . import util +# from . import ufunc_ops # Add `test` function to global namespace so users can run `odl.test()` -from .util import test +from .core.util import test + +# Make often-used ODL definitions appear as members of the main `odl` namespace +# in the documentation (they are aliased in that namespace), even though they +# are defined in modules with more verbose names. +for entity in [rn, cn, uniform_discr, Operator]: + entity.__module__ = "odl" + # Amend `__all__` -__all__ += discr.__all__ -__all__ += operator.__all__ -__all__ += set.__all__ -__all__ += space.__all__ __all__ += ('test',) diff --git a/odl/applications/CRYO_EM/README.md b/odl/applications/CRYO_EM/README.md new file mode 100644 index 00000000000..c75e4954661 --- /dev/null +++ b/odl/applications/CRYO_EM/README.md @@ -0,0 +1 @@ +Placeholder folder for CRYO_EM \ No newline at end of file diff --git a/odl/applications/MRI/README.md b/odl/applications/MRI/README.md new file mode 100644 index 00000000000..26b6b6b02fa --- /dev/null +++ b/odl/applications/MRI/README.md @@ -0,0 +1 @@ +Placeholder folder for MRI \ No newline at end of file diff --git a/odl/applications/PET/README.md b/odl/applications/PET/README.md new file mode 100644 index 00000000000..098cac0bf48 --- /dev/null +++ b/odl/applications/PET/README.md @@ -0,0 +1 @@ +Placeholder folder for PET \ No newline at end of file diff --git a/odl/applications/README.md b/odl/applications/README.md new file mode 100644 index 00000000000..d41eb395afb --- /dev/null +++ b/odl/applications/README.md @@ -0,0 +1,2 @@ +# Applications +This folder contains application-specific code, such as MRI, CT, Cryo-EM, PET... diff --git a/odl/tomo/README.md b/odl/applications/tomo/README.md similarity index 100% rename from odl/tomo/README.md rename to odl/applications/tomo/README.md diff --git a/odl/tomo/__init__.py b/odl/applications/tomo/__init__.py similarity index 100% rename from odl/tomo/__init__.py rename to odl/applications/tomo/__init__.py diff --git a/odl/tomo/analytic/__init__.py b/odl/applications/tomo/analytic/__init__.py similarity index 100% rename from odl/tomo/analytic/__init__.py rename to odl/applications/tomo/analytic/__init__.py diff --git a/odl/tomo/analytic/filtered_back_projection.py b/odl/applications/tomo/analytic/filtered_back_projection.py similarity index 90% rename from odl/tomo/analytic/filtered_back_projection.py rename to odl/applications/tomo/analytic/filtered_back_projection.py index 2002eded301..fcfa2608937 100644 --- a/odl/tomo/analytic/filtered_back_projection.py +++ b/odl/applications/tomo/analytic/filtered_back_projection.py @@ -9,10 +9,11 @@ from __future__ import print_function, division, absolute_import import numpy as np +import odl -from odl.discr import ResizingOperator -from odl.trafos import FourierTransform, PYFFTW_AVAILABLE - +from odl.core.array_API_support import get_array_and_backend +from odl.core.discr import ResizingOperator +from odl.trafos import FourierTransform __all__ = ('fbp_op', 'fbp_filter_op', 'tam_danielson_window', 'parker_weighting') @@ -78,20 +79,24 @@ def _fbp_filter(norm_freq, filter_type, frequency_scaling): ... frequency_scaling=0.8) """ filter_type, filter_type_in = str(filter_type).lower(), filter_type + + norm_freq, backend = get_array_and_backend(norm_freq) + array_namespace = backend.array_namespace + if callable(filter_type): filt = filter_type(norm_freq) elif filter_type == 'ram-lak': - filt = np.copy(norm_freq) + pass elif filter_type == 'shepp-logan': - filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling)) + filt = norm_freq * array_namespace.sinc(norm_freq / (2 * frequency_scaling)) elif filter_type == 'cosine': - filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling)) + filt = norm_freq * array_namespace.cos(norm_freq * np.pi / (2 * frequency_scaling)) elif filter_type == 'hamming': filt = norm_freq * ( - 0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling))) + 0.54 + 0.46 * array_namespace.cos(norm_freq * np.pi / (frequency_scaling))) elif filter_type == 'hann': filt = norm_freq * ( - np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2) + array_namespace.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2) else: raise ValueError('unknown `filter_type` ({})' ''.format(filter_type_in)) @@ -132,7 +137,7 @@ def tam_danielson_window(ray_trafo, smoothing_width=0.05, n_pi=1): -------- fbp_op : Filtered back-projection operator from `RayTransform` tam_danielson_window : Weighting for short scan data - odl.tomo.geometry.conebeam.ConeBeamGeometry : + odl.applications.tomo.geometry.conebeam.ConeBeamGeometry : Primary use case for this window function. References @@ -236,8 +241,8 @@ def parker_weighting(ray_trafo, q=0.25): -------- fbp_op : Filtered back-projection operator from `RayTransform` tam_danielson_window : Indicator function for helical data - odl.tomo.geometry.conebeam.FanBeamGeometry : Use case in 2d - odl.tomo.geometry.conebeam.ConeBeamGeometry : Use case in 3d (for pitch 0) + odl.applications.tomo.geometry.conebeam.FanBeamGeometry : Use case in 2d + odl.applications.tomo.geometry.conebeam.ConeBeamGeometry : Use case in 3d (for pitch 0) References ---------- @@ -295,9 +300,9 @@ def b(alpha): # Create weighting function beta = np.asarray(angles - min_rot_angle, - dtype=ray_trafo.range.dtype) # rotation angle + dtype=ray_trafo.range.dtype_identifier) # rotation angle alpha = np.asarray(np.arctan2(dx, src_radius + det_radius), - dtype=ray_trafo.range.dtype) + dtype=ray_trafo.range.dtype_identifier) # Compute sum in place to save memory S_sum = S(beta / b(alpha) - 0.5) @@ -309,7 +314,6 @@ def b(alpha): return ray_trafo.range.element( np.broadcast_to(S_sum * scale, ray_trafo.range.shape)) - def fbp_filter_op(ray_trafo, padding=True, filter_type='Ram-Lak', frequency_scaling=1.0): """Create a filter operator for FBP from a `RayTransform`. @@ -363,17 +367,19 @@ def fbp_filter_op(ray_trafo, padding=True, filter_type='Ram-Lak', -------- tam_danielson_window : Windowing for helical data """ - impl = 'pyfftw' if PYFFTW_AVAILABLE else 'numpy' + impl = 'default' alen = ray_trafo.geometry.motion_params.length if ray_trafo.domain.ndim == 2: # Define ramp filter def fourier_filter(x): - abs_freq = np.abs(x[1]) - norm_freq = abs_freq / np.max(abs_freq) + _, backend = get_array_and_backend(x[0]) + array_namespace = backend.array_namespace + abs_freq = array_namespace.abs(x[1]) + norm_freq = abs_freq / array_namespace.max(abs_freq) filt = _fbp_filter(norm_freq, filter_type, frequency_scaling) scaling = 1 / (2 * alen) - return filt * np.max(abs_freq) * scaling + return filt * array_namespace.max(abs_freq) * scaling # Define (padded) fourier transform if padding: @@ -420,15 +426,17 @@ def fourier_filter(x): def fourier_filter(x): # If axis is aligned to a coordinate axis, save some memory and # time by using broadcasting + x, backend = get_array_and_backend(x[0]) + array_namespace = backend.array_namespace if not used_axes[0]: - abs_freq = np.abs(rot_dir[1] * x[2]) + abs_freq = array_namespace.abs(rot_dir[1] * x[2]) elif not used_axes[1]: - abs_freq = np.abs(rot_dir[0] * x[1]) + abs_freq = array_namespace.abs(rot_dir[0] * x[1]) else: - abs_freq = np.abs(rot_dir[0] * x[1] + rot_dir[1] * x[2]) - norm_freq = abs_freq / np.max(abs_freq) + abs_freq = array_namespace.abs(rot_dir[0] * x[1] + rot_dir[1] * x[2]) + norm_freq = abs_freq / array_namespace.max(abs_freq) filt = _fbp_filter(norm_freq, filter_type, frequency_scaling) - scaling = scale * np.max(abs_freq) / (2 * alen) + scaling = scale * array_namespace.max(abs_freq) / (2 * alen) return filt * scaling # Define (padded) fourier transform @@ -538,7 +546,7 @@ def fbp_op(ray_trafo, padding=True, filter_type='Ram-Lak', if __name__ == '__main__': import odl import matplotlib.pyplot as plt - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests # Display the various filters x = np.linspace(0, 1, 100) @@ -559,10 +567,10 @@ def fbp_op(ray_trafo, padding=True, filter_type='Ram-Lak', min_pt=[-20, -20, 0], max_pt=[20, 20, 40], shape=[300, 300, 300]) angle_partition = odl.uniform_partition(0, 8 * 2 * np.pi, 2000) detector_partition = odl.uniform_partition([-40, -4], [40, 4], [500, 500]) - geometry = odl.tomo.ConeBeamGeometry( + geometry = odl.applications.tomo.ConeBeamGeometry( angle_partition, detector_partition, src_radius=100, det_radius=100, pitch=5.0) - ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') + ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') # Crete and show TD window td_window = tam_danielson_window(ray_trafo, smoothing_width=0) @@ -571,9 +579,9 @@ def fbp_op(ray_trafo, padding=True, filter_type='Ram-Lak', # Show the Parker weighting # Create Ray Transform in fan beam geometry - geometry = odl.tomo.cone_beam_geometry(reco_space, + geometry = odl.applications.tomo.cone_beam_geometry(reco_space, src_radius=40, det_radius=80) - ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') + ray_trafo = odl.applications.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') # Crete and show parker weighting parker_weighting = parker_weighting(ray_trafo) diff --git a/odl/tomo/backends/__init__.py b/odl/applications/tomo/backends/__init__.py similarity index 100% rename from odl/tomo/backends/__init__.py rename to odl/applications/tomo/backends/__init__.py diff --git a/odl/applications/tomo/backends/astra_cpu.py b/odl/applications/tomo/backends/astra_cpu.py new file mode 100644 index 00000000000..998fc153596 --- /dev/null +++ b/odl/applications/tomo/backends/astra_cpu.py @@ -0,0 +1,295 @@ +# Copyright 2014-2020 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Backend for ASTRA using CPU.""" + +from __future__ import absolute_import, division, print_function + +import warnings +import numpy as np +from odl.core.discr import DiscretizedSpace, DiscretizedSpaceElement +from odl.applications.tomo.backends.astra_setup import ( + astra_algorithm, astra_data, astra_projection_geometry, astra_projector, + astra_volume_geometry) +from odl.applications.tomo.backends.util import _add_default_complex_impl +from odl.applications.tomo.geometry import ( + DivergentBeamGeometry, Geometry, ParallelBeamGeometry) +from odl.core.util import writable_array +from odl.core.array_API_support import lookup_array_backend, get_array_and_backend +try: + import astra +except ImportError: + pass + +__all__ = ( + 'astra_cpu_projector', + 'default_astra_proj_type', +) + + +def default_astra_proj_type(geom): + """Return the default ASTRA projector type for a given geometry. + + Parameters + ---------- + geom : `Geometry` + ODL geometry object for which to get the default projector type. + + Returns + ------- + astra_proj_type : str + Default projector type for the given geometry. + + In 2D: + + - `ParallelBeamGeometry`: ``'linear'`` + - `DivergentBeamGeometry`: ``'line_fanflat'`` + + In 3D: + + - `ParallelBeamGeometry`: ``'linear3d'`` + - `DivergentBeamGeometry`: ``'linearcone'`` + """ + if isinstance(geom, ParallelBeamGeometry): + return 'linear' if geom.ndim == 2 else 'linear3d' + elif isinstance(geom, DivergentBeamGeometry): + return 'line_fanflat' if geom.ndim == 2 else 'linearcone' + else: + raise TypeError( + 'no default exists for {}, `astra_proj_type` must be given ' + 'explicitly'.format(type(geom)) + ) + +def astra_cpu_projector( + direction:str, + input_data:DiscretizedSpaceElement, + geometry:Geometry, + range_space:DiscretizedSpace, + out :DiscretizedSpaceElement = None, + astra_proj_type: str | None = None + ) -> DiscretizedSpaceElement: + """Run an ASTRA projection on the given data using the CPU. + + Parameters + ---------- + input_data : `DiscretizedSpaceElement` + Input data to which the projector is applied. + geometry : `Geometry` + Geometry defining the tomographic setup. + range_space : `DiscretizedSpace` + Space to which the calling operator maps. + out : ``range_space`` element, optional + Element of the range_space space to which the result is written. If + ``None``, an element in ``range`` is created. + astra_proj_type : str, range_space + Type of projector that should be used. See `the ASTRA documentation + `_ for details. + By default, a suitable projector type for the given geometry is + selected, see `default_astra_proj_type`. + + Returns + ------- + out : ``proj_space`` element + Projection data resulting from the application of the projector. + If ``out`` was provided, the returned object is a reference to it. + """ + ### Asserting that we get the right data types. + assert direction in ['forward', 'backward'] + if not isinstance(input_data, DiscretizedSpaceElement): + raise TypeError( + 'Input data {!r} is not a `DiscretizedSpaceElement` instance' + ''.format(input_data) + ) + if not isinstance(geometry, Geometry): + raise TypeError( + 'geometry {!r} is not a Geometry instance'.format(geometry) + ) + if not isinstance(range_space, DiscretizedSpace): + raise TypeError( + '`range_space` {!r} is not a DiscretizedSpace instance.' + ''.format(range_space) + ) + if input_data.ndim != geometry.ndim: + raise ValueError( + 'dimensions {} of input data and {} of geometry do not match' + ''.format(input_data.ndim, geometry.ndim) + ) + if out is None: + out_element = range_space.real_space.element() + else: + if out not in range_space.real_space: + raise TypeError( + '`out` {} is neither None nor a `DiscretizedSpaceElement` ' + 'instance'.format(out) + ) + out_element = out.data + ### Unpacking the dimension of the problem + ndim = input_data.ndim + + ### Unpacking the underlying arrays + input_data_arr, input_backend = get_array_and_backend(input_data, must_be_contiguous=True) + + if input_backend.impl != 'numpy': + out_element = np.ascontiguousarray(input_backend.to_numpy(out_element)) + input_data_arr = np.ascontiguousarray(input_backend.to_numpy(input_data_arr)) + + range_backend = lookup_array_backend(range_space.impl) + assert input_backend == range_backend, f"The input's tensor space backend does not match the range's: {input_backend} != {range_backend}" + + # Create astra geometries + # The volume geometry is defined by the space of the input data in the forward mode and the range_space in the backward mode + if direction == 'forward': + vol_geom = astra_volume_geometry(input_data.space, 'cpu') + else: + vol_geom = astra_volume_geometry(range_space, 'cpu') + + # Parsing the pprojection geometry does not depend on the mode + proj_geom = astra_projection_geometry(geometry, 'cpu') + + # Create projector + if astra_proj_type is None: + astra_proj_type = default_astra_proj_type(geometry) + proj_id = astra_projector(astra_proj_type, vol_geom, proj_geom, ndim) + + # Create ASTRA data structures + # In the forward mode, the input is the volume + # In the backward mode, the input is the sinogram/projection + if direction == 'forward': + input_id = astra_data(vol_geom, datatype='volume', data=input_data_arr, + allow_copy=True) + else: + input_id = astra_data(proj_geom, datatype='projection', data=input_data_arr, allow_copy=True + ) + + with writable_array(out_element, must_be_contiguous=True) as out_arr: + if direction == 'forward': + output_id = astra_data( + proj_geom, + datatype='projection', + data=out_arr, + ndim=range_space.ndim) + vol_id = input_id + sino_id = output_id + else: + output_id = astra_data( + vol_geom, + datatype='volume', + data=out_arr, + ndim=range_space.ndim) + vol_id = output_id + sino_id = input_id + + # Create algorithm + algo_id = astra_algorithm( + direction=direction, + ndim = ndim, + vol_id = vol_id, + sino_id = sino_id, + proj_id = proj_id, + astra_impl='cpu') + + # Run algorithm + astra.algorithm.run(algo_id) + + # There is no scaling for the forward mode + if direction == 'backward': + # Weight the adjoint by appropriate weights + scaling_factor = float(input_data.space.weighting.const) + scaling_factor /= float(range_space.weighting.const) + + out_element *= scaling_factor + + # Delete ASTRA objects + astra.algorithm.delete(algo_id) + astra.data2d.delete((vol_id, sino_id)) + astra.projector.delete(proj_id) + + if out is None: + return range_space.element(out_element) + else: + out.data[:] = range_space.element(out_element).data + +class AstraCpuImpl: + """Thin wrapper implementing ASTRA CPU for `RayTransform`.""" + + def __init__(self, geometry, vol_space, proj_space): + """Initialize a new instance. + + Parameters + ---------- + geometry : `Geometry` + Geometry defining the tomographic setup. + vol_space : `DiscretizedSpace` + Reconstruction space, the space of the images to be forward + projected. + proj_space : `DiscretizedSpace` + Projection space, the space of the result. + """ + if not isinstance(geometry, Geometry): + raise TypeError( + '`geometry` must be a `Geometry` instance, got {!r}' + ''.format(geometry) + ) + if not isinstance(vol_space, DiscretizedSpace): + raise TypeError( + '`vol_space` must be a `DiscretizedSpace` instance, got {!r}' + ''.format(vol_space) + ) + if not isinstance(proj_space, DiscretizedSpace): + raise TypeError( + '`proj_space` must be a `DiscretizedSpace` instance, got {!r}' + ''.format(proj_space) + ) + if geometry.ndim > 2: + raise ValueError( + '`impl` {!r} only works for 2d'.format(self.__class__.__name__) + ) + + if vol_space.size >= 512 ** 2: + warnings.warn( + "The 'astra_cpu' backend may be too slow for volumes of this " + "size. Consider using 'astra_cuda' if your machine has an " + "Nvidia GPU.", + RuntimeWarning, + ) + + self.geometry = geometry + self._vol_space = vol_space + self._proj_space = proj_space + + @property + def vol_space(self): + return self._vol_space + + @property + def proj_space(self): + return self._proj_space + + @_add_default_complex_impl + def call_backward(self, x, out=None, **kwargs): + # return astra_cpu_back_projector( + # x, self.geometry, self.vol_space.real_space, out, **kwargs + # ) + return astra_cpu_projector( + 'backward', x, self.geometry, self.vol_space.real_space, out, **kwargs + ) + + @_add_default_complex_impl + def call_forward(self, x, out=None, **kwargs): + # return astra_cpu_forward_projector( + # x, self.geometry, self.proj_space.real_space, out, **kwargs + # ) + return astra_cpu_projector( + 'forward', x, self.geometry, self.proj_space.real_space, out, **kwargs + ) + + +if __name__ == '__main__': + from odl.core.util.testutils import run_doctests + + run_doctests() diff --git a/odl/tomo/backends/astra_cuda.py b/odl/applications/tomo/backends/astra_cuda.py similarity index 63% rename from odl/tomo/backends/astra_cuda.py rename to odl/applications/tomo/backends/astra_cuda.py index 049079a6b3d..1ddb23b3e6a 100644 --- a/odl/tomo/backends/astra_cuda.py +++ b/odl/applications/tomo/backends/astra_cuda.py @@ -16,20 +16,23 @@ import numpy as np from packaging.version import parse as parse_version -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr import DiscretizedSpace -from odl.tomo.backends.astra_setup import ( - ASTRA_VERSION, astra_algorithm, astra_data, astra_projection_geometry, +from odl.core.discr import DiscretizedSpace +from odl.applications.tomo.backends.astra_setup import ( + ASTRA_VERSION, astra_projection_geometry, astra_projector, astra_supports, astra_versions_supporting, astra_volume_geometry) -from odl.tomo.backends.util import _add_default_complex_impl -from odl.tomo.geometry import ( +from odl.applications.tomo.backends.util import _add_default_complex_impl +from odl.applications.tomo.geometry import ( ConeBeamGeometry, FanBeamGeometry, Geometry, Parallel2dGeometry, Parallel3dAxisGeometry) +from odl.core.discr.discr_space import DiscretizedSpaceElement +from odl.core.array_API_support import empty, get_array_and_backend try: import astra - + # This is important, although not use explicitely. + # If not imported, astra.experimental is not "visible" + import astra.experimental ASTRA_CUDA_AVAILABLE = astra.astra.use_cuda() except ImportError: ASTRA_CUDA_AVAILABLE = False @@ -37,16 +40,17 @@ __all__ = ( 'ASTRA_CUDA_AVAILABLE', ) + +def index_of_cuda_device(device: "torch.device"): + if device == 'cpu': + return None + else: + return int(str(device).split(':')[-1]) class AstraCudaImpl: """`RayTransform` implementation for CUDA algorithms in ASTRA.""" - - algo_forward_id = None - algo_backward_id = None - vol_id = None - sino_id = None - proj_id = None + projector_id = None def __init__(self, geometry, vol_space, proj_space): """Initialize a new instance. @@ -111,6 +115,22 @@ def __init__(self, geometry, vol_space, proj_space): # ASTRA projectors are not thread-safe, thus we need to lock manually self._mutex = Lock() + assert vol_space.impl == proj_space.impl, f'Volume space ({vol_space.impl}) != Projection space ({proj_space.impl})' + + if self.geometry.ndim == 3: + if vol_space.impl == 'numpy': + self.transpose_tuple = (1,0,2) + elif vol_space.impl == 'pytorch': + self.transpose_tuple = (1,0) + else: + raise NotImplementedError('Not implemented for another backend') + + self.fp_scaling_factor = astra_cuda_fp_scaling_factor( + self.geometry + ) + self.bp_scaling_factor = astra_cuda_bp_scaling_factor( + self.proj_space, self.vol_space, self.geometry + ) @property def vol_space(self): @@ -130,70 +150,26 @@ def create_ids(self): motion_shape = (np.prod(self.geometry.motion_partition.shape),) proj_shape = motion_shape + self.geometry.det_partition.shape - proj_ndim = len(proj_shape) - - if proj_ndim == 2: - astra_proj_shape = proj_shape - astra_vol_shape = self.vol_space.shape - elif proj_ndim == 3: - # The `u` and `v` axes of the projection data are swapped, - # see explanation in `astra_*_3d_geom_to_vec`. - astra_proj_shape = (proj_shape[1], proj_shape[0], proj_shape[2]) - astra_vol_shape = self.vol_space.shape - - self.vol_array = np.empty(astra_vol_shape, dtype='float32', order='C') - self.proj_array = np.empty(astra_proj_shape, dtype='float32', - order='C') + self.proj_ndim = len(proj_shape) # Create ASTRA data structures - vol_geom = astra_volume_geometry(self.vol_space) - proj_geom = astra_projection_geometry(self.geometry) - self.vol_id = astra_data( - vol_geom, - datatype='volume', - ndim=self.vol_space.ndim, - data=self.vol_array, - allow_copy=AVOID_UNNECESSARY_COPY, - ) - - proj_type = 'cuda' if proj_ndim == 2 else 'cuda3d' - self.proj_id = astra_projector( - proj_type, vol_geom, proj_geom, proj_ndim - ) - - self.sino_id = astra_data( - proj_geom, - datatype='projection', - ndim=proj_ndim, - data=self.proj_array, - allow_copy=AVOID_UNNECESSARY_COPY, - ) - - # Create algorithm - self.algo_forward_id = astra_algorithm( - 'forward', - proj_ndim, - self.vol_id, - self.sino_id, - self.proj_id, - impl='cuda', - ) - - # Create algorithm - self.algo_backward_id = astra_algorithm( - 'backward', - proj_ndim, - self.vol_id, - self.sino_id, - self.proj_id, - impl='cuda', - ) + self.vol_geom = astra_volume_geometry(self.vol_space, 'cuda') + + self.proj_geom = astra_projection_geometry(self.geometry, 'cuda') + + self.projector_id = astra_projector( + astra_proj_type = 'cuda3d', + astra_vol_geom = self.vol_geom, + astra_proj_geom = self.proj_geom, + ndim = 3, + override_2D = bool(self.geometry.ndim == 2) + ) @_add_default_complex_impl def call_forward(self, x, out=None, **kwargs): return self._call_forward_real(x, out, **kwargs) - def _call_forward_real(self, vol_data, out=None, **kwargs): + def _call_forward_real(self, vol_data:DiscretizedSpaceElement, out=None, **kwargs): """Run an ASTRA forward projection on the given data using the GPU. Parameters @@ -215,43 +191,56 @@ def _call_forward_real(self, vol_data, out=None, **kwargs): assert vol_data in self.vol_space.real_space if out is not None: - assert out in self.proj_space - else: - out = self.proj_space.element() + assert out in self.proj_space.real_space, f"The out argument provided is a {type(out)}, which is not an element of the projection space {self.proj_space.real_space}" + if self.vol_space.impl == 'pytorch': + warnings.warn("You requested an out-of-place transform with PyTorch. This will require cloning the data and will allocate extra memory", RuntimeWarning) + proj_data = out.data[None] if self.proj_ndim==2 else out.data + if self.geometry.ndim == 3: + proj_data = proj_data.transpose(*self.transpose_tuple) - # Copy data to GPU memory - if self.geometry.ndim == 2: - astra.data2d.store(self.vol_id, vol_data.asarray()) - elif self.geometry.ndim == 3: - astra.data3d.store(self.vol_id, vol_data.asarray()) else: - raise RuntimeError('unknown ndim') - - # Run algorithm - astra.algorithm.run(self.algo_forward_id) - - # Copy result to host - if self.geometry.ndim == 2: - out[:] = self.proj_array - elif self.geometry.ndim == 3: - out[:] = np.swapaxes(self.proj_array, 0, 1).reshape( - self.proj_space.shape) - - # Fix scaling to weight by pixel size - if ( - isinstance(self.geometry, Parallel2dGeometry) - and parse_version(ASTRA_VERSION) < parse_version('1.9.9.dev') - ): - # parallel2d scales with pixel stride - out *= 1 / float(self.geometry.det_partition.cell_volume) + proj_data = empty( + impl = self.proj_space.impl, + shape = astra.geom_size(self.proj_geom), + dtype = self.proj_space.dtype, + device = self.proj_space.device + ) + + if self.proj_ndim == 2: + volume_data = vol_data.data[None] + elif self.proj_ndim == 3: + volume_data = vol_data.data + else: + raise NotImplementedError + + volume_data, vol_backend = get_array_and_backend(volume_data, must_be_contiguous=True) + proj_data, proj_backend = get_array_and_backend(proj_data, must_be_contiguous=True) + + if self.proj_space.impl == 'pytorch': + device_index = index_of_cuda_device( + self.proj_space.tspace.device) #type:ignore + if device_index is not None: + astra.set_gpu_index(device_index) + + astra.experimental.direct_FP3D( #type:ignore + self.projector_id, + volume_data, + proj_data + ) + + proj_data *= self.fp_scaling_factor + proj_data = proj_data[0] if self.geometry.ndim == 2 else proj_data.transpose(*self.transpose_tuple) - return out + if out is not None: + out.data[:] = proj_data if self.proj_space.impl == 'numpy' else proj_data.clone() + else: + return self.proj_space.element(proj_data) @_add_default_complex_impl def call_backward(self, x, out=None, **kwargs): return self._call_backward_real(x, out, **kwargs) - def _call_backward_real(self, proj_data, out=None, **kwargs): + def _call_backward_real(self, proj_data:DiscretizedSpaceElement, out=None, **kwargs): """Run an ASTRA back-projection on the given data using the GPU. Parameters @@ -274,57 +263,74 @@ def _call_backward_real(self, proj_data, out=None, **kwargs): assert proj_data in self.proj_space.real_space if out is not None: - assert out in self.vol_space + assert out in self.vol_space.real_space, f"The out argument provided is a {type(out)}, which is not an element of the projection space {self.vol_space.real_space}" + if self.vol_space.impl == 'pytorch': + warnings.warn( + "You requested an out-of-place transform with PyTorch. \ + This will require cloning the data and will allocate extra memory", + RuntimeWarning) + volume_data = out.data[None] if self.geometry.ndim==2 else out.data else: - out = self.vol_space.element() - - # Copy data to GPU memory - if self.geometry.ndim == 2: - astra.data2d.store(self.sino_id, proj_data.asarray()) - elif self.geometry.ndim == 3: - shape = (-1,) + self.geometry.det_partition.shape - reshaped_proj_data = proj_data.asarray().reshape(shape) - swapped_proj_data = np.ascontiguousarray( - np.swapaxes(reshaped_proj_data, 0, 1) + volume_data = empty( + self.vol_space.impl, + astra.geom_size(self.vol_geom), + dtype = self.vol_space.dtype, + device = self.vol_space.device ) - astra.data3d.store(self.sino_id, swapped_proj_data) - # Run algorithm - astra.algorithm.run(self.algo_backward_id) + ### Transpose projection tensor + if self.proj_ndim == 2: + proj_data = proj_data.data[None] + elif self.proj_ndim == 3: + proj_data = proj_data.data.transpose(*self.transpose_tuple) + else: + raise NotImplementedError + + # Ensure data is contiguous otherwise astra will throw an error + volume_data, vol_backend = get_array_and_backend(volume_data, must_be_contiguous=True) + proj_data, proj_backend = get_array_and_backend(proj_data, must_be_contiguous=True) + + if self.vol_space.tspace.impl == 'pytorch': + device_index = index_of_cuda_device(self.vol_space.tspace.device) #type:ignore + if device_index is not None: + astra.set_gpu_index(device_index) + + ### Call the backprojection + astra.experimental.direct_BP3D( #type:ignore + self.projector_id, + volume_data, + proj_data + ) + volume_data *= self.bp_scaling_factor + volume_data = volume_data[0] if self.geometry.ndim == 2 else volume_data - # Copy result to CPU memory - out[:] = self.vol_array + if out is not None: + out[:] = volume_data if self.vol_space.impl == 'numpy' else volume_data.clone() + return out + else: + return self.vol_space.element(volume_data) - # Fix scaling to weight by pixel/voxel size - out *= astra_cuda_bp_scaling_factor( - self.proj_space, self.vol_space, self.geometry - ) - return out +def astra_cuda_fp_scaling_factor(geometry): + """Volume scaling accounting for differing adjoint definitions. - def __del__(self): - """Delete ASTRA objects.""" - if self.geometry.ndim == 2: - adata, aproj = astra.data2d, astra.projector - else: - adata, aproj = astra.data3d, astra.projector3d - - if self.algo_forward_id is not None: - astra.algorithm.delete(self.algo_forward_id) - self.algo_forward_id = None - if self.algo_backward_id is not None: - astra.algorithm.delete(self.algo_backward_id) - self.algo_backward_id = None - if self.vol_id is not None: - adata.delete(self.vol_id) - self.vol_id = None - if self.sino_id is not None: - adata.delete(self.sino_id) - self.sino_id = None - if self.proj_id is not None: - aproj.delete(self.proj_id) - self.proj_id = None + ASTRA defines the adjoint operator in terms of a fully discrete + setting (transposed "projection matrix") without any relation to + physical dimensions, which makes a re-scaling necessary to + translate it to spaces with physical dimensions. + Behavior of ASTRA changes slightly between versions, so we keep + track of it and adapt the scaling accordingly. + """ + if ( + isinstance(geometry, Parallel2dGeometry) + and parse_version(ASTRA_VERSION) < parse_version('1.9.9.dev') + ): + # parallel2d scales with pixel stride + return 1 / float(geometry.det_partition.cell_volume) + + else: + return 1 def astra_cuda_bp_scaling_factor(proj_space, vol_space, geometry): """Volume scaling accounting for differing adjoint definitions. @@ -460,6 +466,6 @@ def astra_cuda_bp_scaling_factor(proj_space, vol_space, geometry): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/backends/astra_setup.py b/odl/applications/tomo/backends/astra_setup.py similarity index 81% rename from odl/tomo/backends/astra_setup.py rename to odl/applications/tomo/backends/astra_setup.py index 5de74ed4be6..96196c14728 100644 --- a/odl/tomo/backends/astra_setup.py +++ b/odl/applications/tomo/backends/astra_setup.py @@ -24,18 +24,17 @@ """ from __future__ import absolute_import, division, print_function - +from typing import Dict import warnings import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY - -from odl.discr import DiscretizedSpace, DiscretizedSpaceElement -from odl.tomo.geometry import ( +from odl.core.discr import DiscretizedSpace, DiscretizedSpaceElement +from odl.applications.tomo.geometry import ( DivergentBeamGeometry, Flat1dDetector, Flat2dDetector, Geometry, ParallelBeamGeometry) -from odl.tomo.util.utility import euler_matrix +from odl.applications.tomo.util.utility import euler_matrix +from odl.core.array_API_support import get_array_and_backend try: import astra @@ -52,8 +51,8 @@ ASTRA_VERSION = astra.__version__ except AttributeError: # Below version 1.8 - _maj = astra.astra.version() // 100 - _min = astra.astra.version() % 100 + _maj = astra.astra.version() // 100 #type:ignore + _min = astra.astra.version() % 100 #type:ignore ASTRA_VERSION = '.'.join([str(_maj), str(_min)]) if (_maj, _min) < (1, 7): warnings.warn( @@ -127,6 +126,10 @@ 'par2d_distance_driven_proj': '>1.8.3', } +ODL_TO_ASTRA_INDEX_PERMUTATIONS = [ + 2,1,0, 5,4,3, 8,7,6, 11,10,9 +] + def astra_supports(feature): """Return bool indicating whether current ASTRA supports ``feature``. @@ -143,7 +146,7 @@ def astra_supports(feature): ``True`` if the currently imported version of ASTRA supports the feature in question, ``False`` otherwise. """ - from odl.util.utility import pkg_supports + from odl.core.util.utility import pkg_supports return pkg_supports(feature, ASTRA_VERSION, ASTRA_FEATURES) @@ -160,7 +163,7 @@ def astra_versions_supporting(feature): ------- version_spec : str Specifier for versions of ASTRA that support ``feature``. See - `odl.util.utility.pkg_supports` for details. + `odl.core.util.utility.pkg_supports` for details. """ try: return ASTRA_FEATURES[str(feature)] @@ -168,7 +171,7 @@ def astra_versions_supporting(feature): raise ValueError('unknown feature {!r}'.format(feature)) -def astra_volume_geometry(vol_space): +def astra_volume_geometry(vol_space:DiscretizedSpace, astra_impl:str): """Create an ASTRA volume geometry from the discretized domain. From the ASTRA documentation: @@ -186,6 +189,8 @@ def astra_volume_geometry(vol_space): vol_space : `DiscretizedSpace` Discretized space where the reconstruction (volume) lives. It must be 2- or 3-dimensional and uniformly discretized. + astra_impl : str + cuda or cpu Returns ------- @@ -236,9 +241,22 @@ def astra_volume_geometry(vol_space): # NOTE: We need to flip the sign of the (ODL) x component since # ASTRA seems to move it in the other direction. Not quite clear # why. - vol_geom = astra.create_vol_geom(vol_shp[0], vol_shp[1], - vol_min[1], vol_max[1], - -vol_max[0], -vol_min[0]) + if astra_impl == 'cpu': + vol_geom = astra.create_vol_geom( + vol_shp[0], vol_shp[1], + vol_min[1], vol_max[1], + -vol_max[0], -vol_min[0]) + elif astra_impl == 'cuda': + vol_geom = astra.create_vol_geom( + vol_shp[0], vol_shp[1], 1, + vol_min[1], vol_max[1], + vol_min[0], vol_max[0], + -1,1) + else: + raise ValueError(f'astra_impl argument can only be "cpu" or "cuda, got {astra_impl}') + + + elif vol_space.ndim == 3: # Not supported in all versions of ASTRA if ( @@ -273,7 +291,7 @@ def astra_volume_geometry(vol_space): return vol_geom -def astra_conebeam_3d_geom_to_vec(geometry): +def astra_conebeam_3d_geom_to_vec(geometry:Geometry): """Create vectors for ASTRA projection geometries from ODL geometry. The 3D vectors are used to create an ASTRA projection geometry for @@ -333,15 +351,36 @@ def astra_conebeam_3d_geom_to_vec(geometry): # ASTRA has (z, y, x) axis convention, in contrast to (x, y, z) in ODL, # so we need to adapt to this by changing the order. - newind = [] - for i in range(4): - newind += [2 + 3 * i, 1 + 3 * i, 0 + 3 * i] - vectors = vectors[:, newind] + vectors = vectors[:, ODL_TO_ASTRA_INDEX_PERMUTATIONS] return vectors +def astra_fanflat_2d_geom_to_conebeam_vec(geometry:Geometry): + """ Create vectors for ASTRA projection geometry. + This is required for the CUDA implementation of fanflat geometry. + """ + ########################################################################## + angles = geometry.angles + mid_pt = geometry.det_params.mid_pt + vectors = np.zeros((angles.shape[-1], 12)) -def astra_conebeam_2d_geom_to_vec(geometry): + # Source positions + vectors[:, 1:3] = geometry.src_position(angles) + # Detector positions + detector_positions = geometry.det_point_position(angles, mid_pt.item()) + vectors[:, 4:6] = detector_positions + px_size = geometry.det_partition.cell_sides[0] + det_axes = geometry.det_axis(angles) + vectors[:, 7:9] = det_axes * px_size + vectors[:, 9] = px_size + + # ASTRA has (z, y, x) axis convention, in contrast to (x, y, z) in ODL, + # so we need to adapt to this by changing the order. + vectors = vectors[:, ODL_TO_ASTRA_INDEX_PERMUTATIONS] + + return vectors + +def astra_conebeam_2d_geom_to_vec(geometry:Geometry): """Create vectors for ASTRA projection geometries from ODL geometry. The 2D vectors are used to create an ASTRA projection geometry for @@ -389,7 +428,7 @@ def astra_conebeam_2d_geom_to_vec(geometry): mid_pt = geometry.det_params.mid_pt # Need to cast `mid_pt` to float since otherwise the empty axis is # not removed - centers = geometry.det_point_position(angles, float(mid_pt)) + centers = geometry.det_point_position(angles, mid_pt.item()) vectors[:, 2:4] = rot_minus_90.dot(centers.T).T # Vector from detector pixel 0 to 1 @@ -400,7 +439,7 @@ def astra_conebeam_2d_geom_to_vec(geometry): return vectors -def astra_parallel_3d_geom_to_vec(geometry): +def astra_parallel_3d_geom_to_vec(geometry:Geometry): """Create vectors for ASTRA projection geometries from ODL geometry. The 3D vectors are used to create an ASTRA projection geometry for @@ -461,14 +500,37 @@ def astra_parallel_3d_geom_to_vec(geometry): # ASTRA has (z, y, x) axis convention, in contrast to (x, y, z) in ODL, # so we need to adapt to this by changing the order. - new_ind = [] - for i in range(4): - new_ind += [2 + 3 * i, 1 + 3 * i, 0 + 3 * i] - vectors = vectors[:, new_ind] + vectors = vectors[:, ODL_TO_ASTRA_INDEX_PERMUTATIONS] + return vectors +def astra_parallel_2d_geom_to_parallel3d_vec(geometry:Geometry): + angles = geometry.angles + mid_pt = geometry.det_params.mid_pt + + vectors = np.zeros((angles.shape[-1], 12)) + + # Ray direction = -(detector-to-source normal vector) + vectors[:, 1:3] = -geometry.det_to_src(angles, mid_pt) + + # Center of the detector in 3D space + vectors[:, 4:6] = geometry.det_point_position(angles, mid_pt) + + # Vectors from detector pixel (0, 0) to (1, 0) and (0, 0) to (0, 1) + det_axes = geometry.det_axis(angles) + px_size = geometry.det_partition.cell_sides[0] + vectors[:, 7:9] = det_axes * px_size + vectors[:, 9] = px_size + + # ASTRA has (z, y, x) axis convention, in contrast to (x, y, z) in ODL, + # so we need to adapt to this by changing the order. + vectors = vectors[:, ODL_TO_ASTRA_INDEX_PERMUTATIONS] -def astra_projection_geometry(geometry): + return vectors + +def astra_projection_geometry( + geometry:Geometry, + astra_impl:str): """Create an ASTRA projection geometry from an ODL geometry object. As of ASTRA version 1.7, the length values are not required any more to be @@ -478,7 +540,8 @@ def astra_projection_geometry(geometry): ---------- geometry : `Geometry` ODL projection geometry from which to create the ASTRA geometry. - + astra_impl : str + cuda or cpu Returns ------- proj_geom : dict @@ -487,10 +550,9 @@ def astra_projection_geometry(geometry): if not isinstance(geometry, Geometry): raise TypeError('`geometry` {!r} is not a `Geometry` instance' ''.format(geometry)) - - if 'astra' in geometry.implementation_cache: + if f'astra_{astra_impl}' in geometry.implementation_cache: # Shortcut, reuse already computed value. - return geometry.implementation_cache['astra'] + return geometry.implementation_cache[f'astra_{astra_impl}'] if not geometry.det_partition.is_uniform: raise ValueError('non-uniform detector sampling is not supported') @@ -499,21 +561,37 @@ def astra_projection_geometry(geometry): isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and geometry.ndim == 2): # TODO: change to parallel_vec when available - det_width = geometry.det_partition.cell_sides[0] det_count = geometry.detector.size - # Instead of rotating the data by 90 degrees counter-clockwise, - # we subtract pi/2 from the geometry angles, thereby rotating the - # geometry by 90 degrees clockwise - angles = geometry.angles - np.pi / 2 - proj_geom = astra.create_proj_geom('parallel', det_width, det_count, - angles) + + if astra_impl == 'cpu': + # Instead of rotating the data by 90 degrees counter-clockwise, + # we subtract pi/2 from the geometry angles, thereby rotating the + # geometry by 90 degrees clockwise + angles = geometry.angles - np.pi / 2 + det_width = geometry.det_partition.cell_sides[0] + proj_geom = astra.create_proj_geom('parallel', det_width, det_count, angles) + elif astra_impl == 'cuda': + vec = astra_parallel_2d_geom_to_parallel3d_vec(geometry) + proj_geom = astra.create_proj_geom('parallel3d_vec', 1, det_count, vec) + else: + raise ValueError(f'astra_impl argument can only be "cpu" or "cuda, got {astra_impl}') elif (isinstance(geometry, DivergentBeamGeometry) and isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and geometry.ndim == 2): det_count = geometry.detector.size - vec = astra_conebeam_2d_geom_to_vec(geometry) - proj_geom = astra.create_proj_geom('fanflat_vec', det_count, vec) + det_width = geometry.det_partition.cell_sides[0] + if astra_impl == 'cpu': + vec = astra_conebeam_2d_geom_to_vec(geometry) + proj_geom = astra.create_proj_geom('fanflat_vec', det_count, vec) + elif astra_impl == 'cuda': + det_row_count = 1 + det_col_count = geometry.det_partition.shape[0] + vec = astra_fanflat_2d_geom_to_conebeam_vec(geometry) + proj_geom = astra.create_proj_geom('cone_vec', det_row_count, + det_col_count, vec) + else: + raise ValueError(f'astra_impl argument can only be "cpu" or "cuda, got {astra_impl}') elif (isinstance(geometry, ParallelBeamGeometry) and isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and @@ -538,14 +616,14 @@ def astra_projection_geometry(geometry): raise NotImplementedError('unknown ASTRA geometry type {!r}' ''.format(geometry)) - if 'astra' not in geometry.implementation_cache: + if f'astra_{astra_impl}' not in geometry.implementation_cache: # Save computed value for later - geometry.implementation_cache['astra'] = proj_geom + geometry.implementation_cache[f'astra_{astra_impl}'] = proj_geom return proj_geom -def astra_data(astra_geom, datatype, data=None, ndim=2, allow_copy=AVOID_UNNECESSARY_COPY): +def astra_data(astra_geom:Dict, datatype:str, data=None, ndim:int=2, allow_copy=False): """Create an ASTRA data object. Parameters @@ -601,7 +679,7 @@ def astra_data(astra_geom, datatype, data=None, ndim=2, allow_copy=AVOID_UNNECES # ASTRA checks if data is c-contiguous and aligned if data is not None: if allow_copy: - data_array = np.asarray(data, dtype='float32', order='C') + data_array, array_backend = get_array_and_backend(data) return link(astra_dtype_str, astra_geom, data_array) else: if isinstance(data, np.ndarray): @@ -617,7 +695,10 @@ def astra_data(astra_geom, datatype, data=None, ndim=2, allow_copy=AVOID_UNNECES return create(astra_dtype_str, astra_geom) -def astra_projector(astra_proj_type, astra_vol_geom, astra_proj_geom, ndim): +def astra_projector( + astra_proj_type:str, astra_vol_geom:Dict, astra_proj_geom:Dict, ndim:2, + override_2D = False + ): """Create an ASTRA projector configuration dictionary. Parameters @@ -642,8 +723,6 @@ def astra_projector(astra_proj_type, astra_vol_geom, astra_proj_geom, ndim): raise ValueError('invalid projection geometry dict {}' ''.format(astra_proj_geom)) - ndim = int(ndim) - astra_geom = astra_proj_geom['type'] if ( astra_geom == 'parallel_vec' @@ -694,6 +773,8 @@ def astra_projector(astra_proj_type, astra_vol_geom, astra_proj_geom, ndim): proj_cfg['VolumeGeometry'] = astra_vol_geom proj_cfg['ProjectionGeometry'] = astra_proj_geom proj_cfg['options'] = {} + if override_2D: + proj_cfg['ProjectionKernel'] = '2d_weighting' # Add the approximate 1/r^2 weighting exposed in intermediate versions of # ASTRA @@ -709,7 +790,7 @@ def astra_projector(astra_proj_type, astra_vol_geom, astra_proj_geom, ndim): return astra.projector3d.create(proj_cfg) -def astra_algorithm(direction, ndim, vol_id, sino_id, proj_id, impl): +def astra_algorithm(direction:str, ndim:int, vol_id:int, sino_id:int, proj_id:int, astra_impl:str): """Create an ASTRA algorithm object to run the projector. Parameters @@ -738,13 +819,13 @@ def astra_algorithm(direction, ndim, vol_id, sino_id, proj_id, impl): if ndim not in (2, 3): raise ValueError('{}-dimensional projectors not supported' ''.format(ndim)) - if impl not in ('cpu', 'cuda'): + if astra_impl not in ('cpu', 'cuda'): raise ValueError("`impl` type '{}' not understood" - ''.format(impl)) - if ndim == 3 and impl == 'cpu': + ''.format(astra_impl)) + if ndim == 3 and astra_impl == 'cpu': raise NotImplementedError( '3d algorithms for CPU not supported by ASTRA') - if proj_id is None and impl == 'cpu': + if proj_id is None and astra_impl == 'cpu': raise ValueError("'cpu' implementation requires projector ID") algo_map = {'forward': {2: {'cpu': 'FP', 'cuda': 'FP_CUDA'}, @@ -752,7 +833,7 @@ def astra_algorithm(direction, ndim, vol_id, sino_id, proj_id, impl): 'backward': {2: {'cpu': 'BP', 'cuda': 'BP_CUDA'}, 3: {'cpu': None, 'cuda': 'BP3D_CUDA'}}} - algo_cfg = {'type': algo_map[direction][ndim][impl], + algo_cfg = {'type': algo_map[direction][ndim][astra_impl], 'ProjectorId': proj_id, 'ProjectionDataId': sino_id} if direction == 'forward': @@ -765,5 +846,5 @@ def astra_algorithm(direction, ndim, vol_id, sino_id, proj_id, impl): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/backends/skimage_radon.py b/odl/applications/tomo/backends/skimage_radon.py similarity index 92% rename from odl/tomo/backends/skimage_radon.py rename to odl/applications/tomo/backends/skimage_radon.py index 3e53ef8c6f9..e2a499d1d35 100644 --- a/odl/tomo/backends/skimage_radon.py +++ b/odl/applications/tomo/backends/skimage_radon.py @@ -14,12 +14,12 @@ import numpy as np -from odl.discr import ( +from odl.core.discr import ( DiscretizedSpace, uniform_discr_frompartition, uniform_partition) -from odl.discr.discr_utils import linear_interpolator, point_collocation -from odl.tomo.backends.util import _add_default_complex_impl -from odl.tomo.geometry import Geometry, Parallel2dGeometry -from odl.util.utility import writable_array +from odl.core.discr.discr_utils import linear_interpolator, point_collocation +from odl.applications.tomo.backends.util import _add_default_complex_impl +from odl.applications.tomo.geometry import Geometry, Parallel2dGeometry +from odl.core.util.utility import writable_array try: import skimage @@ -211,11 +211,21 @@ def __init__(self, geometry, vol_space, proj_space): '`vol_space` must be a `DiscretizedSpace` instance, got {!r}' ''.format(vol_space) ) + if vol_space.impl != 'numpy': + raise TypeError( + '`vol_space` implementation must be `numpy`, got {!r}' + ''.format(vol_space.impl) + ) if not isinstance(proj_space, DiscretizedSpace): raise TypeError( '`proj_space` must be a `DiscretizedSpace` instance, got {!r}' ''.format(proj_space) ) + if proj_space.impl != 'numpy': + raise TypeError( + '`proj_space` implementation must be `numpy`, got {!r}' + ''.format(proj_space.impl) + ) if not isinstance(geometry, Parallel2dGeometry): raise TypeError( "{!r} backend only supports 2d parallel geometries" @@ -274,6 +284,6 @@ def call_backward(self, x, out, **kwargs): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/backends/util.py b/odl/applications/tomo/backends/util.py similarity index 86% rename from odl/tomo/backends/util.py rename to odl/applications/tomo/backends/util.py index 17782a56bdc..0963b7d970b 100644 --- a/odl/tomo/backends/util.py +++ b/odl/applications/tomo/backends/util.py @@ -46,20 +46,16 @@ def wrapper(self, x, out=None, **kwargs): if self.vol_space.is_real and self.proj_space.is_real: return fn(self, x, out, **kwargs) elif self.vol_space.is_complex and self.proj_space.is_complex: - result_parts = [ - fn(self, x.real, getattr(out, 'real', None), **kwargs), - fn(self, x.imag, getattr(out, 'imag', None), **kwargs) - ] - if out is None: if x in self.vol_space: range = self.proj_space else: range = self.vol_space - out = range.element() - out.real = result_parts[0] - out.imag = result_parts[1] + out = range.zero() + + fn(self, x.real, out.real, **kwargs) + fn(self, x.imag, out.imag, **kwargs) return out else: diff --git a/odl/tomo/geometry/__init__.py b/odl/applications/tomo/geometry/__init__.py similarity index 100% rename from odl/tomo/geometry/__init__.py rename to odl/applications/tomo/geometry/__init__.py diff --git a/odl/tomo/geometry/conebeam.py b/odl/applications/tomo/geometry/conebeam.py similarity index 98% rename from odl/tomo/geometry/conebeam.py rename to odl/applications/tomo/geometry/conebeam.py index 65074211877..3437be1ccec 100644 --- a/odl/tomo/geometry/conebeam.py +++ b/odl/applications/tomo/geometry/conebeam.py @@ -12,17 +12,17 @@ import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr import uniform_partition -from odl.tomo.geometry.detector import ( +from odl.core.discr import uniform_partition +from odl.applications.tomo.geometry.detector import ( CircularDetector, CylindricalDetector, Flat1dDetector, Flat2dDetector, SphericalDetector) -from odl.tomo.geometry.geometry import ( +from odl.applications.tomo.geometry.geometry import ( AxisOrientedGeometry, DivergentBeamGeometry) -from odl.tomo.util.utility import ( +from odl.applications.tomo.util.utility import ( euler_matrix, is_inside_bounds, transform_system) -from odl.util import array_str, indent, signature_string +from odl.core.util import array_str, indent, signature_string __all__ = ('FanBeamGeometry', 'ConeBeamGeometry', 'cone_beam_geometry', 'helical_geometry') @@ -181,7 +181,7 @@ def __init__(self, apart, dpart, src_radius, det_radius, >>> geom = FanBeamGeometry( ... apart, dpart, ... src_radius=1, det_radius=5, - ... src_shift_func=lambda angle: odl.tomo.flying_focal_spot( + ... src_shift_func=lambda angle: odl.applications.tomo.flying_focal_spot( ... angle, apart=apart, shifts=[(0.1, 0), (0, 0.1)]), ... det_shift_func=lambda angle: [0.0, 0.05]) >>> geom.src_shift_func(geom.angles) @@ -480,7 +480,7 @@ def src_position(self, angle): >>> geom = FanBeamGeometry( ... apart, dpart, ... src_radius=1, det_radius=5, - ... src_shift_func=lambda angle: odl.tomo.flying_focal_spot( + ... src_shift_func=lambda angle: odl.applications.tomo.flying_focal_spot( ... angle, ... apart=apart, ... shifts=[(0.1, 0), (0, 0.1)]), @@ -683,7 +683,7 @@ def __getitem__(self, indices): -------- >>> apart = odl.uniform_partition(0, 4, 4) >>> dpart = odl.uniform_partition(-1, 1, 20) - >>> geom = odl.tomo.FanBeamGeometry(apart, dpart, 50, 100) + >>> geom = odl.applications.tomo.FanBeamGeometry(apart, dpart, 50, 100) Extract sub-geometry with every second angle: @@ -917,7 +917,7 @@ def __init__(self, apart, dpart, src_radius, det_radius, >>> geom = ConeBeamGeometry( ... apart, dpart, ... src_radius=1, det_radius=5, - ... src_shift_func=lambda angle: odl.tomo.flying_focal_spot( + ... src_shift_func=lambda angle: odl.applications.tomo.flying_focal_spot( ... angle, apart=apart, shifts=[(0, 0.1, 0), (0, 0, 0.1)]), ... det_shift_func=lambda angle: [0.0, 0.05, 0.03]) >>> geom.src_shift_func(geom.angles) @@ -1423,7 +1423,7 @@ def src_position(self, angle): >>> geom = ConeBeamGeometry( ... apart, dpart, ... src_radius=1, det_radius=5, - ... src_shift_func=lambda angle: odl.tomo.flying_focal_spot( + ... src_shift_func=lambda angle: odl.applications.tomo.flying_focal_spot( ... angle, apart=apart, shifts=[(0, 0.1, 0), (0, 0, 0.1)]), ... src_to_det_init=(-0.71, 0.71, 0)) >>> geom.angles @@ -1517,7 +1517,7 @@ def __getitem__(self, indices): -------- >>> apart = odl.uniform_partition(0, 4, 4) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], [20, 20]) - >>> geom = odl.tomo.ConeBeamGeometry(apart, dpart, 50, 100, pitch=2) + >>> geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, 50, 100, pitch=2) Extract sub-geometry with every second angle: @@ -1928,5 +1928,5 @@ def helical_geometry(space, src_radius, det_radius, num_turns, if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/geometry/detector.py b/odl/applications/tomo/geometry/detector.py similarity index 99% rename from odl/tomo/geometry/detector.py rename to odl/applications/tomo/geometry/detector.py index d7f08a819e7..cd9f4b15398 100644 --- a/odl/tomo/geometry/detector.py +++ b/odl/applications/tomo/geometry/detector.py @@ -14,12 +14,12 @@ import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr import RectPartition -from odl.tomo.util import is_inside_bounds, perpendicular_vector -from odl.tomo.util.utility import rotation_matrix_from_to -from odl.util import array_str, indent, signature_string +from odl.core.discr import RectPartition +from odl.applications.tomo.util import is_inside_bounds, perpendicular_vector +from odl.applications.tomo.util.utility import rotation_matrix_from_to +from odl.core.util import array_str, indent, signature_string __all__ = ('Detector', 'Flat1dDetector', 'Flat2dDetector', 'CircularDetector', @@ -1420,5 +1420,5 @@ def __str__(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/geometry/geometry.py b/odl/applications/tomo/geometry/geometry.py similarity index 97% rename from odl/tomo/geometry/geometry.py rename to odl/applications/tomo/geometry/geometry.py index b0a38874a89..856436b7101 100644 --- a/odl/tomo/geometry/geometry.py +++ b/odl/applications/tomo/geometry/geometry.py @@ -12,11 +12,11 @@ from builtins import object import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr import RectPartition -from odl.tomo.geometry.detector import Detector -from odl.tomo.util import axis_rotation_matrix, is_inside_bounds +from odl.core.discr import RectPartition +from odl.applications.tomo.geometry.detector import Detector +from odl.applications.tomo.util import axis_rotation_matrix, is_inside_bounds __all__ = ('Geometry', 'DivergentBeamGeometry', 'AxisOrientedGeometry') @@ -287,7 +287,7 @@ def det_point_position(self, mparam, dparam): >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) - >>> geom = odl.tomo.Parallel2dGeometry(apart, dpart) + >>> geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart) >>> geom.det_point_position(0, 0) # (0, 1) + 0 * (1, 0) array([ 0., 1.]) >>> geom.det_point_position(0, 1) # (0, 1) + 1 * (1, 0) @@ -334,7 +334,7 @@ def det_point_position(self, mparam, dparam): >>> apart = odl.uniform_partition([0, 0], [np.pi, 2 * np.pi], ... (10, 20)) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) - >>> geom = odl.tomo.Parallel3dEulerGeometry(apart, dpart) + >>> geom = odl.applications.tomo.Parallel3dEulerGeometry(apart, dpart) >>> # 2 values for each variable, resulting in 2 vectors >>> angles = ([0, np.pi / 2], [0, np.pi]) >>> dparams = ([-1, 0], [-1, 0]) @@ -480,7 +480,7 @@ def det_to_src(self, angle, dparam, normalized=True): >>> apart = odl.uniform_partition(0, 2 * np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) - >>> geom = odl.tomo.FanBeamGeometry(apart, dpart, src_radius=2, + >>> geom = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_radius=2, ... det_radius=3) >>> geom.det_to_src(0, 0) array([ 0., -1.]) @@ -622,5 +622,5 @@ def rotation_matrix(self, angle): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/geometry/parallel.py b/odl/applications/tomo/geometry/parallel.py similarity index 98% rename from odl/tomo/geometry/parallel.py rename to odl/applications/tomo/geometry/parallel.py index c74bc55a4b1..623142529c4 100644 --- a/odl/tomo/geometry/parallel.py +++ b/odl/applications/tomo/geometry/parallel.py @@ -12,13 +12,13 @@ import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr import uniform_partition -from odl.tomo.geometry.detector import Flat1dDetector, Flat2dDetector -from odl.tomo.geometry.geometry import AxisOrientedGeometry, Geometry -from odl.tomo.util import euler_matrix, is_inside_bounds, transform_system -from odl.util import array_str, indent, signature_string +from odl.core.discr import uniform_partition +from odl.applications.tomo.geometry.detector import Flat1dDetector, Flat2dDetector +from odl.applications.tomo.geometry.geometry import AxisOrientedGeometry, Geometry +from odl.applications.tomo.util import euler_matrix, is_inside_bounds, transform_system +from odl.core.util import array_str, indent, signature_string __all__ = ('ParallelBeamGeometry', 'Parallel2dGeometry', @@ -241,7 +241,7 @@ def det_to_src(self, angle, dparam): >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) - >>> geom = odl.tomo.Parallel2dGeometry(apart, dpart) + >>> geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart) >>> geom.det_to_src(0, 0) array([ 0., -1.]) >>> geom.det_to_src(0, 1) @@ -678,7 +678,7 @@ def __getitem__(self, indices): -------- >>> apart = odl.uniform_partition(0, 4, 4) >>> dpart = odl.uniform_partition(-1, 1, 20) - >>> geom = odl.tomo.Parallel2dGeometry(apart, dpart) + >>> geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart) Extract sub-geometry with every second angle: @@ -1442,7 +1442,7 @@ def __getitem__(self, indices): -------- >>> apart = odl.uniform_partition(0, 4, 4) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], [20, 20]) - >>> geom = odl.tomo.Parallel3dAxisGeometry(apart, dpart) + >>> geom = odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart) Extract sub-geometry with every second angle: @@ -1590,5 +1590,5 @@ def parallel_beam_geometry(space, num_angles=None, det_shape=None): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/geometry/spect.py b/odl/applications/tomo/geometry/spect.py similarity index 97% rename from odl/tomo/geometry/spect.py rename to odl/applications/tomo/geometry/spect.py index b7dcefc4b5c..95463748552 100644 --- a/odl/tomo/geometry/spect.py +++ b/odl/applications/tomo/geometry/spect.py @@ -11,9 +11,9 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.tomo.geometry.parallel import Parallel3dAxisGeometry -from odl.tomo.util.utility import transform_system -from odl.util import signature_string, indent, array_str +from odl.applications.tomo.geometry.parallel import Parallel3dAxisGeometry +from odl.applications.tomo.util.utility import transform_system +from odl.core.util import signature_string, indent, array_str __all__ = ('ParallelHoleCollimatorGeometry', ) diff --git a/odl/tomo/operators/__init__.py b/odl/applications/tomo/operators/__init__.py similarity index 100% rename from odl/tomo/operators/__init__.py rename to odl/applications/tomo/operators/__init__.py diff --git a/odl/tomo/operators/ray_trafo.py b/odl/applications/tomo/operators/ray_trafo.py similarity index 96% rename from odl/tomo/operators/ray_trafo.py rename to odl/applications/tomo/operators/ray_trafo.py index 64419fd43b6..3b34bc7c161 100644 --- a/odl/tomo/operators/ray_trafo.py +++ b/odl/applications/tomo/operators/ray_trafo.py @@ -14,16 +14,16 @@ import numpy as np -from odl.discr import DiscretizedSpace -from odl.operator import Operator -from odl.space.weighting import ConstWeighting -from odl.tomo.backends import ( +from odl.core.discr import DiscretizedSpace +from odl.core.operator import Operator +from odl.core.space.weightings.weighting import ConstWeighting +from odl.applications.tomo.backends import ( ASTRA_AVAILABLE, ASTRA_CUDA_AVAILABLE, SKIMAGE_AVAILABLE) -from odl.tomo.backends.astra_cpu import AstraCpuImpl -from odl.tomo.backends.astra_cuda import AstraCudaImpl -from odl.tomo.backends.skimage_radon import SkImageImpl -from odl.tomo.geometry import Geometry -from odl.util import is_string +from odl.applications.tomo.backends.astra_cpu import AstraCpuImpl +from odl.applications.tomo.backends.astra_cuda import AstraCudaImpl +from odl.applications.tomo.backends.skimage_radon import SkImageImpl +from odl.applications.tomo.geometry import Geometry +from odl.core.util import is_string # RAY_TRAFO_IMPLS are used by `RayTransform` when no `impl` is given. # The last inserted implementation has highest priority. @@ -126,6 +126,7 @@ def __init__(self, vol_space, geometry, **kwargs): geometry.partition.shape, weighting=weighting, dtype=dtype, + device=vol_space.device ) if geometry.motion_partition.ndim == 0: @@ -382,6 +383,6 @@ def adjoint(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/util/__init__.py b/odl/applications/tomo/util/__init__.py similarity index 100% rename from odl/tomo/util/__init__.py rename to odl/applications/tomo/util/__init__.py diff --git a/odl/tomo/util/source_detector_shifts.py b/odl/applications/tomo/util/source_detector_shifts.py similarity index 94% rename from odl/tomo/util/source_detector_shifts.py rename to odl/applications/tomo/util/source_detector_shifts.py index d37a24a206d..9f5376c2d1b 100644 --- a/odl/tomo/util/source_detector_shifts.py +++ b/odl/applications/tomo/util/source_detector_shifts.py @@ -10,8 +10,8 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr.discr_utils import nearest_interpolator +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.discr.discr_utils import nearest_interpolator __all__ = ('flying_focal_spot',) diff --git a/odl/tomo/util/testutils.py b/odl/applications/tomo/util/testutils.py similarity index 75% rename from odl/tomo/util/testutils.py rename to odl/applications/tomo/util/testutils.py index 0ca3301f1c1..db0b4ded30c 100644 --- a/odl/tomo/util/testutils.py +++ b/odl/applications/tomo/util/testutils.py @@ -32,14 +32,18 @@ def identity(*args, **kwargs): else: skip_if_no_astra = pytest.mark.skipif( - 'not odl.tomo.ASTRA_AVAILABLE', + 'not odl.applications.tomo.ASTRA_AVAILABLE', reason='ASTRA not available', ) skip_if_no_astra_cuda = pytest.mark.skipif( - 'not odl.tomo.ASTRA_CUDA_AVAILABLE', + 'not odl.applications.tomo.ASTRA_CUDA_AVAILABLE', reason='ASTRA CUDA not available', ) skip_if_no_skimage = pytest.mark.skipif( - 'not odl.tomo.SKIMAGE_AVAILABLE', + 'not odl.applications.tomo.SKIMAGE_AVAILABLE', reason='skimage not available', ) + skip_if_no_pytorch = pytest.mark.skipif( + "not 'pytorch' in odl.core.space.entry_points.TENSOR_SPACE_IMPLS", + reason='pytorch not available not available', + ) diff --git a/odl/tomo/util/utility.py b/odl/applications/tomo/util/utility.py similarity index 99% rename from odl/tomo/util/utility.py rename to odl/applications/tomo/util/utility.py index 5cde100512e..e670c2e3eb7 100644 --- a/odl/tomo/util/utility.py +++ b/odl/applications/tomo/util/utility.py @@ -9,7 +9,7 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY __all__ = ('euler_matrix', 'axis_rotation', 'axis_rotation_matrix', @@ -676,5 +676,5 @@ def is_inside_bounds(value, params): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/backends/arrays/npy_tensors.py b/odl/backends/arrays/npy_tensors.py new file mode 100644 index 00000000000..ca9fc4f5052 --- /dev/null +++ b/odl/backends/arrays/npy_tensors.py @@ -0,0 +1,487 @@ +# Copyright 2014-2020 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""NumPy implementation of tensor spaces.""" + +from __future__ import absolute_import, division, print_function + +from odl.core.set.space import LinearSpaceElement +from odl.core.space.base_tensors import Tensor, TensorSpace +from odl.core.util import is_numeric_dtype +from odl.core.array_API_support import ArrayBackend + +import array_api_compat.numpy as xp + +__all__ = ('NumpyTensorSpace','numpy_array_backend') + +def _npy_to_device(x, device): + if device == 'cpu': + return x + else: + raise ValueError(f"NumPy only supports device CPU, not {device}.") + +numpy_array_backend = ArrayBackend( + impl = 'numpy', + available_dtypes = { + key : xp.dtype(key) for key in [ + "bool", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + "float32", + "float64", + "complex64", + "complex128", + ]}, + array_namespace = xp, + array_constructor = xp.asarray, + from_dlpack = xp.from_dlpack, + array_type = xp.ndarray, + make_contiguous = lambda x: x if x.data.c_contiguous else xp.ascontiguousarray(x), + identifier_of_dtype = lambda dt: str(dt), + available_devices = ['cpu'], + to_cpu = lambda x: x, + to_numpy = lambda x : x, + to_device = _npy_to_device + ) + +class NumpyTensorSpace(TensorSpace): + + """Set of tensors of arbitrary data type, implemented with NumPy. + + A tensor is, in the most general sense, a multi-dimensional array + that allows operations per entry (keep the rank constant), + reductions / contractions (reduce the rank) and broadcasting + (raises the rank). + For non-numeric data type like ``object``, the range of valid + operations is rather limited since such a set of tensors does not + define a vector space. + Any numeric data type, on the other hand, is considered valid for + a tensor space, although certain operations - like division with + integer dtype - are not guaranteed to yield reasonable results. + + Under these restrictions, all basic vector space operations are + supported by this class, along with reductions based on arithmetic + or comparison, and element-wise mathematical functions ("ufuncs"). + + This class is implemented using `numpy.ndarray`'s as back-end. + + See the `Wikipedia article on tensors`_ for further details. + See also [Hac2012] "Part I Algebraic Tensors" for a rigorous + treatment of tensors with a definition close to this one. + + Note also that this notion of tensors is the same as in popular + Deep Learning frameworks. + + References + ---------- + [Hac2012] Hackbusch, W. *Tensor Spaces and Numerical Tensor Calculus*. + Springer, 2012. + + .. _Wikipedia article on tensors: https://en.wikipedia.org/wiki/Tensor + """ + + def __init__(self, shape, dtype='float64', device = 'cpu', **kwargs): + r"""Initialize a new instance. + + Parameters + ---------- + shape : positive int or sequence of positive ints + Number of entries per axis for elements in this space. A + single integer results in a space with rank 1, i.e., 1 axis. + dtype (str): optional + Data type of each element. Defaults to 'float64' + device (str): + Device on which the data is. For Numpy, it must be 'cpu'. + + Other Parameters + ---------------- + weighting : optional + Use weighted inner product, norm, and dist. The following + types are supported as ``weighting``: + + ``None``: no weighting, i.e. weighting with ``1.0`` (default). + + `Weighting`: Use this weighting as-is. Compatibility + with this space's elements is not checked during init. + + ``float``: Weighting by a constant. + + array-like: Pointwise weighting by an array. + + This option cannot be combined with ``dist``, + ``norm`` or ``inner``. It also cannot be used in case of + non-numeric ``dtype``. + + dist : callable, optional + Distance function defining a metric on the space. + It must accept two `NumpyTensor` arguments and return + a non-negative real number. See ``Notes`` for + mathematical requirements. + + By default, ``dist(x, y)`` is calculated as ``norm(x - y)``. + + This option cannot be combined with ``weight``, + ``norm`` or ``inner``. It also cannot be used in case of + non-numeric ``dtype``. + + norm : callable, optional + The norm implementation. It must accept a + `NumpyTensor` argument, return a non-negative real number. + See ``Notes`` for mathematical requirements. + + By default, ``norm(x)`` is calculated as ``inner(x, x)``. + + This option cannot be combined with ``weight``, + ``dist`` or ``inner``. It also cannot be used in case of + non-numeric ``dtype``. + + inner : callable, optional + The inner product implementation. It must accept two + `NumpyTensor` arguments and return an element of the field + of the space (usually real or complex number). + See ``Notes`` for mathematical requirements. + + This option cannot be combined with ``weight``, + ``dist`` or ``norm``. It also cannot be used in case of + non-numeric ``dtype``. + + exponent : positive float, optional + Exponent of the norm. For values other than 2.0, no + inner product is defined. + + This option has no impact if either ``dist``, ``norm`` or + ``inner`` is given, or if ``dtype`` is non-numeric. + + Default: 2.0 + + kwargs : + Further keyword arguments are passed to the weighting + classes. + + See Also + -------- + odl.core.space.space_utils.rn : constructor for real tensor spaces + odl.core.space.space_utils.cn : constructor for complex tensor spaces + odl.core.space.space_utils.tensor_space : + constructor for tensor spaces of arbitrary scalar data type + + Notes + ----- + - A distance function or metric on a space :math:`\mathcal{X}` + is a mapping + :math:`d:\mathcal{X} \times \mathcal{X} \to \mathbb{R}` + satisfying the following conditions for all space elements + :math:`x, y, z`: + + * :math:`d(x, y) \geq 0`, + * :math:`d(x, y) = 0 \Leftrightarrow x = y`, + * :math:`d(x, y) = d(y, x)`, + * :math:`d(x, y) \leq d(x, z) + d(z, y)`. + + - A norm on a space :math:`\mathcal{X}` is a mapping + :math:`\| \cdot \|:\mathcal{X} \to \mathbb{R}` + satisfying the following conditions for all + space elements :math:`x, y`: and scalars :math:`s`: + + * :math:`\| x\| \geq 0`, + * :math:`\| x\| = 0 \Leftrightarrow x = 0`, + * :math:`\| sx\| = |s| \cdot \| x \|`, + * :math:`\| x+y\| \leq \| x\| + + \| y\|`. + + - An inner product on a space :math:`\mathcal{X}` over a field + :math:`\mathbb{F} = \mathbb{R}` or :math:`\mathbb{C}` is a + mapping + :math:`\langle\cdot, \cdot\rangle: \mathcal{X} \times + \mathcal{X} \to \mathbb{F}` + satisfying the following conditions for all + space elements :math:`x, y, z`: and scalars :math:`s`: + + * :math:`\langle x, y\rangle = + \overline{\langle y, x\rangle}`, + * :math:`\langle sx + y, z\rangle = s \langle x, z\rangle + + \langle y, z\rangle`, + * :math:`\langle x, x\rangle = 0 \Leftrightarrow x = 0`. + + Examples + -------- + Explicit initialization with the class constructor: + + >>> space = NumpyTensorSpace(3, float) + >>> space + rn(3) + >>> space.shape + (3,) + >>> space.dtype + dtype('float64') + + A more convenient way is to use factory functions: + + >>> space = odl.rn(3, weighting=[1, 2, 3]) + >>> space + rn(3, weighting=[1, 2, 3]) + >>> space = odl.tensor_space((2, 3), dtype=int) + >>> space + tensor_space((2, 3), 'int32') + """ + super(NumpyTensorSpace, self).__init__(shape, dtype, device, **kwargs) + + ########## Attributes ########## + @property + def array_backend(self) -> ArrayBackend: + return numpy_array_backend + + @property + def array_namespace(self): + """Name of the array_namespace""" + return xp + + @property + def element_type(self): + """Type of elements in this space: `NumpyTensor`.""" + return NumpyTensor + + @property + def impl(self): + """Name of the implementation back-end: ``'numpy'``.""" + return 'numpy' + + ######### public methods ######### + def broadcast_to(self, inp): + arr = self.array_namespace.broadcast_to( + self.array_namespace.asarray(inp, device=self.device), + self.shape + ) + # Make sure the result is writeable, if not make copy. + # This happens for e.g. results of `np.broadcast_to()`. + if not arr.flags.writeable: + arr = arr.copy() + return arr + + ######### private methods ######### + +class NumpyTensor(Tensor): + + """Representation of a `NumpyTensorSpace` element.""" + + def __init__(self, space, data): + """Initialize a new instance.""" + # Tensor.__init__(self, space) + LinearSpaceElement.__init__(self, space) + self.__data = xp.asarray(data, dtype=space.dtype, device=space.device) + + @property + def data(self): + """The `numpy.ndarray` representing the data of ``self``.""" + return self.__data + + @data.setter + def data(self, value): + self.__data = value + + def _assign(self, other, avoid_deep_copy): + """Assign the values of ``other``, which is assumed to be in the + same space, to ``self``.""" + if avoid_deep_copy: + self.__data = other.__data + else: + self.__data[:] = other.__data + + ######### Public methods ######### + def copy(self): + """Return an identical (deep) copy of this tensor. + + Parameters + ---------- + None + + Returns + ------- + copy : `NumpyTensor` + The deep copy + + Examples + -------- + >>> space = odl.rn(3) + >>> x = space.element([1, 2, 3]) + >>> y = x.copy() + >>> y == x + True + >>> y is x + False + """ + return self.space.element(self.data.copy()) + + def __getitem__(self, indices): + """Return ``self[indices]``. + + Parameters + ---------- + indices : index expression + Integer, slice or sequence of these, defining the positions + of the data array which should be accessed. + + Returns + ------- + values : `NumpyTensorSpace.dtype` or `NumpyTensor` + The value(s) at the given indices. Note that the returned + object is a writable view into the original tensor, except + for the case when ``indices`` is a list. + + Examples + -------- + For one-dimensional spaces, indexing is as in linear arrays: + + >>> space = odl.rn(3) + >>> x = space.element([1, 2, 3]) + >>> x[0] + 1.0 + >>> x[1:] + rn(2).element([ 2., 3.]) + + In higher dimensions, the i-th index expression accesses the + i-th axis: + + >>> space = odl.rn((2, 3)) + >>> x = space.element([[1, 2, 3], + ... [4, 5, 6]]) + >>> x[0, 1] + 2.0 + >>> x[:, 1:] + rn((2, 2)).element( + [[ 2., 3.], + [ 5., 6.]] + ) + + Slices can be assigned to, except if lists are used for indexing: + + >>> y = x[:, ::2] # view into x + >>> y[:] = -9 + >>> x + rn((2, 3)).element( + [[-9., 2., -9.], + [-9., 5., -9.]] + ) + >>> y = x[[0, 1], [1, 2]] # not a view, won't modify x + >>> y + rn(2).element([ 2., -9.]) + >>> y[:] = 0 + >>> x + rn((2, 3)).element( + [[-9., 2., -9.], + [-9., 5., -9.]] + ) + """ + # Lazy implementation: index the array and deal with it + if isinstance(indices, NumpyTensor): + indices = indices.data + arr = self.data[indices] + + if xp.isscalar(arr): + if self.space.field is not None: + return self.space.field.element(arr) + else: + return arr + else: + if is_numeric_dtype(self.dtype): + weighting = self.space.weighting + else: + weighting = None + space = type(self.space)( + arr.shape, dtype=self.dtype, exponent=self.space.exponent, + weighting=weighting) + return space.element(arr, copy=False) + + def __setitem__(self, indices, values): + """Implement ``self[indices] = values``. + + Parameters + ---------- + indices : index expression + Integer, slice or sequence of these, defining the positions + of the data array which should be written to. + values : scalar, array-like or `NumpyTensor` + The value(s) that are to be assigned. + + If ``index`` is an integer, ``value`` must be a scalar. + + If ``index`` is a slice or a sequence of slices, ``value`` + must be broadcastable to the shape of the slice. + + Examples + -------- + For 1d spaces, entries can be set with scalars or sequences of + correct shape: + + >>> space = odl.rn(3) + >>> x = space.element([1, 2, 3]) + >>> x[0] = -1 + >>> x[1:] = (0, 1) + >>> x + rn(3).element([-1., 0., 1.]) + + It is also possible to use tensors of other spaces for + casting and assignment: + + >>> space = odl.rn((2, 3)) + >>> x = space.element([[1, 2, 3], + ... [4, 5, 6]]) + >>> x[0, 1] = -1 + >>> x + rn((2, 3)).element( + [[ 1., -1., 3.], + [ 4., 5., 6.]] + ) + >>> short_space = odl.tensor_space((2, 2), dtype='int32') + >>> y = short_space.element([[-1, 2], + ... [0, 0]]) + >>> x[:, :2] = y + >>> x + rn((2, 3)).element( + [[-1., 2., 3.], + [ 0., 0., 6.]] + ) + + The Numpy assignment and broadcasting rules apply: + + >>> x[:] = np.array([[0, 0, 0], + ... [1, 1, 1]]) + >>> x + rn((2, 3)).element( + [[ 0., 0., 0.], + [ 1., 1., 1.]] + ) + >>> x[:, 1:] = [7, 8] + >>> x + rn((2, 3)).element( + [[ 0., 7., 8.], + [ 1., 7., 8.]] + ) + >>> x[:, ::2] = -2. + >>> x + rn((2, 3)).element( + [[-2., 7., -2.], + [-2., 7., -2.]] + ) + """ + if isinstance(indices, type(self)): + indices = indices.data + if isinstance(values, type(self)): + values = values.data + + self.data[indices] = values + +if __name__ == '__main__': + from odl.core.util.testutils import run_doctests + run_doctests() diff --git a/odl/backends/arrays/pytorch_tensors.py b/odl/backends/arrays/pytorch_tensors.py new file mode 100644 index 00000000000..f59a5e38be3 --- /dev/null +++ b/odl/backends/arrays/pytorch_tensors.py @@ -0,0 +1,528 @@ +# Copyright 2014-2020 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""NumPy implementation of tensor spaces.""" + +from __future__ import absolute_import, division, print_function + +from odl.core.set.space import LinearSpaceElement +from odl.core.space.base_tensors import Tensor, TensorSpace +from odl.core.util import is_numeric_dtype +from odl.core.array_API_support import ArrayBackend + +import numpy as np + +# Only for module availability checking +import importlib.util +from os import path +from sys import argv + +torch_module = importlib.util.find_spec("torch") +if torch_module is not None: + import torch + import array_api_compat.torch as xp + PYTORCH_AVAILABLE = True +else: + if path.basename(argv[0]) == 'pytest': + # If running the doctest suite, we should be able to load this + # module (without running anything) even if Torch is not installed. + PYTORCH_AVAILABLE = False + import pytest + pytest.skip(allow_module_level=True) + else: + raise ImportError("You are trying to use the PyTorch backend, but" + + " the `torch` dependency is not available." + + "\nEither use a different backend, or install" + + " a suitable version of Torch." ) + +__all__ = ( + 'PYTORCH_AVAILABLE', + 'PyTorchTensorSpace', + 'pytorch_array_backend' + + ) +if PYTORCH_AVAILABLE: + device_strings = ['cpu'] + [f'cuda:{i}' for i in range(torch.cuda.device_count())] + +def to_numpy(x): + if isinstance(x, (int, float, bool, complex)): + return x + elif isinstance(x, Tensor): + return x.data.detach().cpu().numpy() + else: + return x.detach().cpu().numpy() + +def from_dlpack(x, device='cpu', copy=None): + """This should theoretically be a stand-in for `from_dlpack` in the Torch instantiation + of the Array API. That function varies however in behaviour between current PyTorch versions, + causing numerous failures. So instead, for now we manually implement conversions from the + alternative backends relevant to ODL (at the moment, NumPy and PyTorch itself). + """ + if isinstance(x, torch.Tensor): + if x.device == device and copy != True: + return x + return x.to(device) + elif isinstance(x, np.ndarray): + return torch.tensor(x, device=torch.device(device)) + else: + raise NotImplementedError(f"With PyTorch {torch.__version__}, currently no way to handle input of type {type(x)}.") + +if PYTORCH_AVAILABLE: + pytorch_array_backend = ArrayBackend( + impl = 'pytorch', + available_dtypes = { + "bool" : xp.bool, + "int8" : xp.int8, + "int16" : xp.int16, + "int32" : xp.int32, + "int64" : xp.int64, + "uint8" : xp.uint8, + "uint16" : xp.uint16, + "uint32" : xp.uint32, + "uint64" : xp.uint64, + "float32" : xp.float32, + "float64" :xp.float64, + "complex64" : xp.complex64, + "complex128" : xp.complex128, + }, + array_namespace = xp, + array_constructor = xp.asarray, + from_dlpack = from_dlpack, + array_type = xp.Tensor, + make_contiguous = lambda x: x if x.data.is_contiguous() else x.contiguous(), + identifier_of_dtype = lambda dt: (dt) if dt in [int, bool, float, complex] else str(dt).split('.')[-1], + available_devices = device_strings, + to_cpu = lambda x: x if isinstance(x, (int, float, bool, complex)) else x.detach().cpu(), + to_numpy = to_numpy, + to_device = lambda x, device: x.to(device) + ) +else: + pytorch_array_backend = None + +class PyTorchTensorSpace(TensorSpace): + + """Set of tensors of arbitrary data type, implemented with PyTorch. + + A tensor is, in the most general sense, a multi-dimensional array + that allows operations per entry (keep the rank constant), + reductions / contractions (reduce the rank) and broadcasting + (raises the rank). + For non-numeric data type like ``object``, the range of valid + operations is rather limited since such a set of tensors does not + define a vector space. + Any numeric data type, on the other hand, is considered valid for + a tensor space, although certain operations - like division with + integer dtype - are not guaranteed to yield reasonable results. + + Under these restrictions, all basic vector space operations are + supported by this class, along with reductions based on arithmetic + or comparison, and element-wise mathematical functions. + + This class is implemented using `torch.Tensor`'s as back-end. + + See the `Wikipedia article on tensors`_ for further details. + See also [Hac2012] "Part I Algebraic Tensors" for a rigorous + treatment of tensors with a definition close to this one. + + Note also that this notion of tensors is the same as in popular + Deep Learning frameworks. + + References + ---------- + [Hac2012] Hackbusch, W. *Tensor Spaces and Numerical Tensor Calculus*. + Springer, 2012. + + .. _Wikipedia article on tensors: https://en.wikipedia.org/wiki/Tensor + """ + + def __init__(self, shape, dtype='float64', device = 'cpu', requires_grad=False, **kwargs): + r"""Initialize a new instance. + + Parameters + ---------- + shape : positive int or sequence of positive ints + Number of entries per axis for elements in this space. A + single integer results in a space with rank 1, i.e., 1 axis. + dtype (str): optional + Data type of each element. Defaults to 'float64' + device (str): + Device on which the data is. Defaults to 'cpu' + requires_grad (bool): + Is True if gradients need to be computed for this Tensor using PyTorch's autograd engine, False otherwise. + + Other Parameters + ---------------- + weighting : optional + Use weighted inner product, norm, and dist. The following + types are supported as ``weighting``: + + ``None``: no weighting, i.e. weighting with ``1.0`` (default). + + `Weighting`: Use this weighting as-is. Compatibility + with this space's elements is not checked during init. + + ``float``: Weighting by a constant. + + array-like: Pointwise weighting by an array. + + This option cannot be combined with ``dist``, + ``norm`` or ``inner``. It also cannot be used in case of + non-numeric ``dtype``. + + dist : callable, optional + Distance function defining a metric on the space. + It must accept two `PyTorchTensor` arguments and return + a non-negative real number. See ``Notes`` for + mathematical requirements. + + By default, ``dist(x, y)`` is calculated as ``norm(x - y)``. + + This option cannot be combined with ``weight``, + ``norm`` or ``inner``. It also cannot be used in case of + non-numeric ``dtype``. + + norm : callable, optional + The norm implementation. It must accept a + `PyTorchTensor` argument, return a non-negative real number. + See ``Notes`` for mathematical requirements. + + By default, ``norm(x)`` is calculated as ``inner(x, x)``. + + This option cannot be combined with ``weight``, + ``dist`` or ``inner``. It also cannot be used in case of + non-numeric ``dtype``. + + inner : callable, optional + The inner product implementation. It must accept two + `PyTorchTensor` arguments and return an element of the field + of the space (usually real or complex number). + See ``Notes`` for mathematical requirements. + + This option cannot be combined with ``weight``, + ``dist`` or ``norm``. It also cannot be used in case of + non-numeric ``dtype``. + + exponent : positive float, optional + Exponent of the norm. For values other than 2.0, no + inner product is defined. + + This option has no impact if either ``dist``, ``norm`` or + ``inner`` is given, or if ``dtype`` is non-numeric. + + Default: 2.0 + + kwargs : + Further keyword arguments are passed to the weighting + classes. + + See Also + -------- + odl.core.space.space_utils.rn : constructor for real tensor spaces + odl.core.space.space_utils.cn : constructor for complex tensor spaces + odl.core.space.space_utils.tensor_space : + constructor for tensor spaces of arbitrary scalar data type + + Notes + ----- + - A distance function or metric on a space :math:`\mathcal{X}` + is a mapping + :math:`d:\mathcal{X} \times \mathcal{X} \to \mathbb{R}` + satisfying the following conditions for all space elements + :math:`x, y, z`: + + * :math:`d(x, y) \geq 0`, + * :math:`d(x, y) = 0 \Leftrightarrow x = y`, + * :math:`d(x, y) = d(y, x)`, + * :math:`d(x, y) \leq d(x, z) + d(z, y)`. + + - A norm on a space :math:`\mathcal{X}` is a mapping + :math:`\| \cdot \|:\mathcal{X} \to \mathbb{R}` + satisfying the following conditions for all + space elements :math:`x, y`: and scalars :math:`s`: + + * :math:`\| x\| \geq 0`, + * :math:`\| x\| = 0 \Leftrightarrow x = 0`, + * :math:`\| sx\| = |s| \cdot \| x \|`, + * :math:`\| x+y\| \leq \| x\| + + \| y\|`. + + - An inner product on a space :math:`\mathcal{X}` over a field + :math:`\mathbb{F} = \mathbb{R}` or :math:`\mathbb{C}` is a + mapping + :math:`\langle\cdot, \cdot\rangle: \mathcal{X} \times + \mathcal{X} \to \mathbb{F}` + satisfying the following conditions for all + space elements :math:`x, y, z`: and scalars :math:`s`: + + * :math:`\langle x, y\rangle = + \overline{\langle y, x\rangle}`, + * :math:`\langle sx + y, z\rangle = s \langle x, z\rangle + + \langle y, z\rangle`, + * :math:`\langle x, x\rangle = 0 \Leftrightarrow x = 0`. + + Examples + -------- + Explicit initialization with the class constructor: + + >>> space = PyTorchTensorSpace(3, dtype=float) + >>> space + rn(3, 'float64', 'pytorch') + >>> space.shape + (3,) + >>> space.dtype + torch.float64 + """ + super(PyTorchTensorSpace, self).__init__(shape, dtype, device, **kwargs) + + ########## Attributes ########## + @property + def array_backend(self) -> ArrayBackend: + return pytorch_array_backend + + @property + def array_namespace(self): + """Name of the array_namespace""" + return xp + + @property + def element_type(self): + """Type of elements in this space: `PyTorchTensor`.""" + return PyTorchTensor + + @property + def impl(self): + """Name of the implementation back-end: ``'pytorch'``.""" + return 'pytorch' + + ######### public methods ######### + def broadcast_to(self, inp): + arr = self.array_namespace.broadcast_to( + self.array_namespace.asarray(inp, device=self.device), + self.shape + ) + return arr + + ######### private methods ######### + +class PyTorchTensor(Tensor): + + """Representation of a `PyTorchTensorrSpace` element.""" + + def __init__(self, space, data, requires_grad=False): + """Initialize a new instance.""" + # Tensor.__init__(self, space) + LinearSpaceElement.__init__(self, space) + self.__data = xp.asarray(data, dtype=space.dtype, device=space.device, requires_grad = requires_grad) + + @property + def data(self): + """The `torch.Tensor` representing the data of ``self``.""" + return self.__data + + @data.setter + def data(self, value): + self.__data = value + + + def _assign(self, other, avoid_deep_copy): + """Assign the values of ``other``, which is assumed to be in the + same space, to ``self``.""" + if avoid_deep_copy: + self.__data = other.__data + else: + self.__data[:] = other.__data + + ######### Public methods ######### + def copy(self): + """Return an identical (deep) copy of this tensor. + + Parameters + ---------- + None + + Returns + ------- + copy : `PyTorchTensor` + The deep copy + + Examples + -------- + >>> space = odl.rn(3, impl='pytorch') + >>> x = space.element([1, 2, 3]) + >>> y = x.copy() + >>> y == x + True + >>> y is x + False + """ + return self.space.element(self.data.clone()) + + def __getitem__(self, indices): + """Return ``self[indices]``. + + Parameters + ---------- + indices : index expression + Integer, slice or sequence of these, defining the positions + of the data array which should be accessed. + + Returns + ------- + values : `PyTorchTensorSpace.dtype` or `PyTorchTensor` + The value(s) at the given indices. Note that the returned + object is a writable view into the original tensor, except + for the case when ``indices`` is a list. + + Examples + -------- + For one-dimensional spaces, indexing is as in linear arrays: + + >>> space = odl.rn(3, impl='pytorch') + >>> x = space.element([1, 2, 3]) + >>> x[0] + 1.0 + >>> x[1:] + rn(2, 'float64', 'pytorch').element([ 2., 3.]) + + In higher dimensions, the i-th index expression accesses the + i-th axis: + + >>> space = odl.rn((2, 3), impl='pytorch') + >>> x = space.element([[1, 2, 3], + ... [4, 5, 6]]) + >>> x[0, 1] + 2.0 + >>> x[:, 1:] + rn((2, 2), 'float64', 'pytorch').element( + [[ 2., 3.], + [ 5., 6.]] + ) + + Slices can be assigned to, except if lists are used for indexing: + + >>> y = x[:, ::2] # view into x + >>> y[:] = -9 + >>> x + rn((2, 3), 'float64', 'pytorch').element( + [[-9., 2., -9.], + [-9., 5., -9.]] + ) + >>> y = x[[0, 1], [1, 2]] # not a view, won't modify x + >>> y + rn(2, 'float64', 'pytorch').element([ 2., -9.]) + >>> y[:] = 0 + >>> x + rn((2, 3), 'float64', 'pytorch').element( + [[-9., 2., -9.], + [-9., 5., -9.]] + ) + """ + # Lazy implementation: index the array and deal with it + if isinstance(indices, type(self)): + indices = indices.data + arr = self.data[indices] + + if arr.ndim == 0: + if self.space.field is not None: + return self.space.field.element(arr) + else: + return arr + else: + if is_numeric_dtype(self.dtype): + weighting = self.space.weighting + else: + weighting = None + space = type(self.space)( + arr.shape, dtype=self.dtype, exponent=self.space.exponent, + weighting=weighting, device=self.device) + return space.element(arr, copy=False) + + def __setitem__(self, indices, values): + """Implement ``self[indices] = values``. + + Parameters + ---------- + indices : index expression + Integer, slice or sequence of these, defining the positions + of the data array which should be written to. + values : scalar, array-like or `PyTorchTensor` + The value(s) that are to be assigned. + + If ``index`` is an integer, ``value`` must be a scalar. + + If ``index`` is a slice or a sequence of slices, ``value`` + must be broadcastable to the shape of the slice. + + Examples + -------- + For 1d spaces, entries can be set with scalars or sequences of + correct shape: + + >>> space = PyTorchTensorSpace(3, dtype=float) + >>> x = space.element([1, 2, 3]) + >>> x[0] = -1 + >>> x[1:] = (0, 1) + >>> x + rn(3, 'float64', 'pytorch').element([-1., 0., 1.]) + + It is also possible to use tensors of other spaces for + casting and assignment: + + >>> space = PyTorchTensorSpace((2,3), dtype=float) + >>> x = space.element([[1, 2, 3], + ... [4, 5, 6]]) + >>> x[0, 1] = -1 + >>> x + rn((2, 3), 'float64', 'pytorch').element( + [[ 1., -1., 3.], + [ 4., 5., 6.]] + ) + >>> short_space = PyTorchTensorSpace((2, 2), dtype='int32') + >>> y = short_space.element([[-1, 2], + ... [0, 0]]) + >>> x[:, :2] = y + >>> x + rn((2, 3), 'float64', 'pytorch').element( + [[-1., 2., 3.], + [ 0., 0., 6.]] + ) + + The PyTorch assignment and broadcasting rules apply: + + >>> x[:] = torch.tensor([[0, 0, 0], + ... [1, 1, 1]]) + >>> x + rn((2, 3), 'float64', 'pytorch').element( + [[ 0., 0., 0.], + [ 1., 1., 1.]] + ) + >>> x[:, 1:] = [7, 8] + >>> x + rn((2, 3), 'float64', 'pytorch').element( + [[ 0., 7., 8.], + [ 1., 7., 8.]] + ) + >>> x[:, ::2] = -2. + >>> x + rn((2, 3), 'float64', 'pytorch').element( + [[-2., 7., -2.], + [-2., 7., -2.]] + ) + """ + if isinstance(indices, type(self)): + indices = indices.data + if isinstance(values, type(self)): + values = values.data + + if isinstance(values, (list, tuple)): + values = self.array_backend.array_constructor(values) + self.data[indices] = values + +if __name__ == '__main__': + from odl.core.util.testutils import run_doctests + run_doctests() diff --git a/odl/backends/sparse/pytorch_backend.py b/odl/backends/sparse/pytorch_backend.py new file mode 100644 index 00000000000..cb6710f4064 --- /dev/null +++ b/odl/backends/sparse/pytorch_backend.py @@ -0,0 +1,17 @@ +from torch import sparse_coo_tensor, Tensor, sparse_coo, matmul + +from odl.core.sparse.sparse_template import SparseMatrixFormat, _registered_sparse_formats + +def is_sparse_COO(matrix): + return isinstance(matrix, Tensor) and matrix.is_sparse and matrix.layout == sparse_coo + +if ('pytorch' not in _registered_sparse_formats + or 'COO' not in _registered_sparse_formats['pytorch']): + pytorch_coo_tensor = SparseMatrixFormat( + sparse_format='COO', + impl = 'pytorch', + constructor = sparse_coo_tensor, + is_of_this_sparse_format = is_sparse_COO, + to_dense = lambda matrix: matrix.to_dense(), + matmul_spmatrix_with_vector = matmul + ) diff --git a/odl/backends/sparse/scipy_backend.py b/odl/backends/sparse/scipy_backend.py new file mode 100644 index 00000000000..c3eddd1a9b1 --- /dev/null +++ b/odl/backends/sparse/scipy_backend.py @@ -0,0 +1,15 @@ +from scipy.sparse import coo_matrix + +from odl.core.sparse.sparse_template import SparseMatrixFormat, _registered_sparse_formats + +if ('scipy' not in _registered_sparse_formats + or 'COO' not in _registered_sparse_formats['scipy']): + scipy_coo_tensor = SparseMatrixFormat( + sparse_format='COO', + impl = 'scipy', + constructor = coo_matrix, + is_of_this_sparse_format = lambda x : isinstance(x, coo_matrix), + to_dense = lambda matrix: matrix.toarray(), + matmul_spmatrix_with_vector = lambda matrix, x: matrix.dot(x) + ) + diff --git a/odl/contrib/datasets/ct/examples/fips_reconstruct.py b/odl/contrib/datasets/ct/examples/fips_reconstruct.py index 94c01c8803d..5930cfe8e62 100644 --- a/odl/contrib/datasets/ct/examples/fips_reconstruct.py +++ b/odl/contrib/datasets/ct/examples/fips_reconstruct.py @@ -7,8 +7,8 @@ space = odl.uniform_discr([-20, -20], [20, 20], [2296, 2296]) geometry = odl.contrib.datasets.ct.fips.walnut_geometry() -ray_transform = odl.tomo.RayTransform(space, geometry) -fbp_op = odl.tomo.fbp_op(ray_transform, filter_type='Hann') +ray_transform = odl.applications.tomo.RayTransform(space, geometry) +fbp_op = odl.applications.tomo.fbp_op(ray_transform, filter_type='Hann') data = fips.walnut_data() fbp_op(data).show('Walnut FBP reconstruction', clim=[0, 0.05]) @@ -17,8 +17,8 @@ space = odl.uniform_discr([-50, -50], [50, 50], [2240, 2240]) geometry = fips.lotus_root_geometry() -ray_transform = odl.tomo.RayTransform(space, geometry) -fbp_op = odl.tomo.fbp_op(ray_transform, filter_type='Hann') +ray_transform = odl.applications.tomo.RayTransform(space, geometry) +fbp_op = odl.applications.tomo.fbp_op(ray_transform, filter_type='Hann') data = fips.lotus_root_data() fbp_op(data).show('Lotus root FBP reconstruction', clim=[0, 0.1]) diff --git a/odl/contrib/datasets/ct/examples/mayo_reconstruct.py b/odl/contrib/datasets/ct/examples/mayo_reconstruct.py index 81f211d47a7..b56c2406c1e 100644 --- a/odl/contrib/datasets/ct/examples/mayo_reconstruct.py +++ b/odl/contrib/datasets/ct/examples/mayo_reconstruct.py @@ -23,13 +23,13 @@ # Reconstruction space and ray transform space = odl.uniform_discr_frompartition(partition, dtype='float32') -ray_trafo = odl.tomo.RayTransform(space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(space, geometry) # Define FBP operator -fbp = odl.tomo.fbp_op(ray_trafo, padding=True) +fbp = odl.applications.tomo.fbp_op(ray_trafo, padding=True) # Tam-Danielsson window to handle redundant data -td_window = odl.tomo.tam_danielson_window(ray_trafo, n_pi=3) +td_window = odl.applications.tomo.tam_danielson_window(ray_trafo, n_pi=3) # Calculate FBP reconstruction fbp_result = fbp(td_window * proj_data) diff --git a/odl/contrib/datasets/ct/fips.py b/odl/contrib/datasets/ct/fips.py index 1a3cd5ccdf2..6acaf12a122 100644 --- a/odl/contrib/datasets/ct/fips.py +++ b/odl/contrib/datasets/ct/fips.py @@ -17,8 +17,8 @@ from __future__ import division import numpy as np from odl.contrib.datasets.util import get_data -from odl.discr import uniform_partition -from odl.tomo import FanBeamGeometry +from odl.core.discr import uniform_partition +from odl.applications.tomo import FanBeamGeometry __all__ = ('walnut_data', 'walnut_geometry', @@ -149,5 +149,5 @@ def lotus_root_geometry(): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/datasets/ct/mayo.py b/odl/contrib/datasets/ct/mayo.py index d74a487f913..6222201520d 100644 --- a/odl/contrib/datasets/ct/mayo.py +++ b/odl/contrib/datasets/ct/mayo.py @@ -24,7 +24,7 @@ import tqdm from dicom.datadict import DicomDictionary, NameDict, CleanName -from odl.discr.discr_utils import linear_interpolator +from odl.core.discr.discr_utils import linear_interpolator from odl.contrib.datasets.ct.mayo_dicom_dict import new_dict_items # Update the DICOM dictionary with the extra Mayo tags @@ -169,7 +169,7 @@ def load_projections(folder, indices=None): # Assemble geometry angle_partition = odl.nonuniform_partition(angles) - geometry = odl.tomo.ConeBeamGeometry(angle_partition, + geometry = odl.applications.tomo.ConeBeamGeometry(angle_partition, detector_partition, src_radius=src_radius, det_radius=det_radius, @@ -178,7 +178,7 @@ def load_projections(folder, indices=None): # Create a *temporary* ray transform (we need its range) spc = odl.uniform_discr([-1] * 3, [1] * 3, [32] * 3) - ray_trafo = odl.tomo.RayTransform(spc, geometry, interp='linear') + ray_trafo = odl.applications.tomo.RayTransform(spc, geometry, interp='linear') # convert coordinates theta, up, vp = ray_trafo.range.grid.meshgrid @@ -301,5 +301,5 @@ def load_reconstruction(folder, slice_start=0, slice_end=-1): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/datasets/images/cambridge.py b/odl/contrib/datasets/images/cambridge.py index 306bb82b113..d63ee79e134 100644 --- a/odl/contrib/datasets/images/cambridge.py +++ b/odl/contrib/datasets/images/cambridge.py @@ -163,5 +163,5 @@ def blurring_kernel(shape=None): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/datasets/mri/examples/tugraz_reconstruct.py b/odl/contrib/datasets/mri/examples/tugraz_reconstruct.py index c457af2314c..8fcfb674aa1 100644 --- a/odl/contrib/datasets/mri/examples/tugraz_reconstruct.py +++ b/odl/contrib/datasets/mri/examples/tugraz_reconstruct.py @@ -1,7 +1,7 @@ """Example of using the TU Graz datasets.""" import odl.contrib.datasets.mri.tugraz as tugraz -from odl.util.testutils import run_doctests +from odl.core.util.testutils import run_doctests # 4-channel head example data = tugraz.mri_head_data_4_channel() diff --git a/odl/contrib/datasets/util.py b/odl/contrib/datasets/util.py index cfcf5f359bf..abbad0b6f82 100644 --- a/odl/contrib/datasets/util.py +++ b/odl/contrib/datasets/util.py @@ -78,5 +78,5 @@ def get_data(filename, subset, url): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/fom/examples/noise_power_spectrum.py b/odl/contrib/fom/examples/noise_power_spectrum.py index b0208a7be33..2ecfb2db9cb 100644 --- a/odl/contrib/fom/examples/noise_power_spectrum.py +++ b/odl/contrib/fom/examples/noise_power_spectrum.py @@ -10,14 +10,14 @@ min_pt=[-20, -20], max_pt=[20, 20], shape=[512, 512]) # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) phantom.show('phantom') # Create some data with noise -op = odl.tomo.RayTransform(space, - odl.tomo.parallel_beam_geometry(space)) -fbp_op = odl.tomo.fbp_op(op, filter_type='Hann', frequency_scaling=0.5) -noisy_data = op(phantom) + odl.phantom.white_noise(op.range) +op = odl.applications.tomo.RayTransform(space, + odl.applications.tomo.parallel_beam_geometry(space)) +fbp_op = odl.applications.tomo.fbp_op(op, filter_type='Hann', frequency_scaling=0.5) +noisy_data = op(phantom) + odl.core.phantom.white_noise(op.range) reconstruction = fbp_op(noisy_data) reconstruction.show('reconstruction') diff --git a/odl/contrib/fom/examples/supervised_comparison.py b/odl/contrib/fom/examples/supervised_comparison.py index 166aa02a4f0..52b31ca1448 100644 --- a/odl/contrib/fom/examples/supervised_comparison.py +++ b/odl/contrib/fom/examples/supervised_comparison.py @@ -18,7 +18,7 @@ min_pt=[-20, -20], max_pt=[20, 20], shape=[100, 100]) # Create a discrete Shepp-Logan phantom (modified version) -phantom = odl.phantom.shepp_logan(reco_space, modified=True) +phantom = odl.core.phantom.shepp_logan(reco_space, modified=True) mse = [] mae = [] @@ -36,7 +36,7 @@ mask = (np.asarray(phantom) == 1) for stddev in np.linspace(0.1, 10, 100): - phantom_noisy = phantom + odl.phantom.white_noise(reco_space, + phantom_noisy = phantom + odl.core.phantom.white_noise(reco_space, stddev=stddev) mse.append( fom.mean_squared_error(phantom_noisy, phantom, normalized=True)) diff --git a/odl/contrib/fom/supervised.py b/odl/contrib/fom/supervised.py index 60d65a8ed01..a12017c7f9f 100644 --- a/odl/contrib/fom/supervised.py +++ b/odl/contrib/fom/supervised.py @@ -14,7 +14,7 @@ import odl from odl.contrib.fom.util import spherical_sum -from odl.discr.grid import sparse_meshgrid +from odl.core.discr.grid import sparse_meshgrid __all__ = ('mean_squared_error', 'mean_absolute_error', 'mean_value_difference', 'standard_deviation_difference', @@ -75,7 +75,7 @@ def mean_squared_error(data, ground_truth, mask=None, space = data.space ground_truth = space.element(ground_truth) - l2norm = odl.solvers.L2Norm(space) + l2norm = odl.functional.L2Norm(space) if mask is not None: data = data * mask @@ -148,7 +148,7 @@ def mean_absolute_error(data, ground_truth, mask=None, space = data.space ground_truth = space.element(ground_truth) - l1_norm = odl.solvers.L1Norm(space) + l1_norm = odl.functional.L1Norm(space) if mask is not None: data = data * mask ground_truth = ground_truth * mask @@ -219,7 +219,7 @@ def mean_value_difference(data, ground_truth, mask=None, normalized=False, space = data.space ground_truth = space.element(ground_truth) - l1_norm = odl.solvers.L1Norm(space) + l1_norm = odl.functional.L1Norm(space) if mask is not None: data = data * mask ground_truth = ground_truth * mask @@ -296,8 +296,8 @@ def standard_deviation_difference(data, ground_truth, mask=None, space = data.space ground_truth = space.element(ground_truth) - l1_norm = odl.solvers.L1Norm(space) - l2_norm = odl.solvers.L2Norm(space) + l1_norm = odl.functional.L1Norm(space) + l2_norm = odl.functional.L2Norm(space) if mask is not None: data = data * mask @@ -717,8 +717,8 @@ def psnr(data, ground_truth, use_zscore=False, force_lower_is_better=False): True """ if use_zscore: - data = odl.util.zscore(data) - ground_truth = odl.util.zscore(ground_truth) + data = odl.core.util.zscore(data) + ground_truth = odl.core.util.zscore(ground_truth) mse = mean_squared_error(data, ground_truth) max_true = np.max(np.abs(ground_truth)) @@ -879,5 +879,5 @@ def noise_power_spectrum(data, ground_truth, radial=False, if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/fom/test/test_supervised.py b/odl/contrib/fom/test/test_supervised.py index 00b708e7867..dadb290447d 100644 --- a/odl/contrib/fom/test/test_supervised.py +++ b/odl/contrib/fom/test/test_supervised.py @@ -17,7 +17,7 @@ import odl from odl.contrib import fom -from odl.util.testutils import noise_element, simple_fixture, skip_if_no_pyfftw +from odl.core.util.testutils import noise_element, simple_fixture, skip_if_no_pyfftw # --- pytest fixtures --- # @@ -105,8 +105,8 @@ def test_filter_image_fft(fft_impl): def test_mean_squared_error(space): - true = odl.phantom.white_noise(space) - data = odl.phantom.white_noise(space) + true = odl.core.phantom.white_noise(space) + data = odl.core.phantom.white_noise(space) result = fom.mean_squared_error(data, true) expected = np.mean((true - data) ** 2) @@ -115,8 +115,8 @@ def test_mean_squared_error(space): def test_mean_absolute_error(space): - true = odl.phantom.white_noise(space) - data = odl.phantom.white_noise(space) + true = odl.core.phantom.white_noise(space) + data = odl.core.phantom.white_noise(space) result = fom.mean_absolute_error(data, true) expected = np.mean(np.abs(true - data)) @@ -126,8 +126,8 @@ def test_mean_absolute_error(space): def test_psnr(space): """Test the ``psnr`` fom.""" - true = odl.phantom.white_noise(space) - data = odl.phantom.white_noise(space) + true = odl.core.phantom.white_noise(space) + data = odl.core.phantom.white_noise(space) zero = space.zero() # Check the corner cases @@ -159,7 +159,7 @@ def test_psnr(space): def test_ssim(space): - ground_truth = odl.phantom.white_noise(space) + ground_truth = odl.core.phantom.white_noise(space) # SSIM of true image should be either # * 1 with force_lower_is_better == False, @@ -181,7 +181,7 @@ def test_ssim(space): # SSIM with ground truth zero should always give zero if not normalized # and 1/2 otherwise. - data = odl.phantom.white_noise(space) + data = odl.core.phantom.white_noise(space) result = fom.ssim(data, space.zero()) assert result == pytest.approx(0) @@ -213,8 +213,8 @@ def test_mean_value_difference_sign(): def test_mean_value_difference_range_value(space): - I0 = odl.util.testutils.noise_element(space) - I1 = odl.util.testutils.noise_element(space) + I0 = odl.core.util.testutils.noise_element(space) + I1 = odl.core.util.testutils.noise_element(space) max0 = np.max(I0) max1 = np.max(I1) min0 = np.min(I0) @@ -226,7 +226,7 @@ def test_mean_value_difference_range_value(space): def test_standard_deviation_difference_range_value(space): - I0 = odl.util.testutils.noise_element(space) + I0 = odl.core.util.testutils.noise_element(space) value_shift = np.random.normal(0, 10) assert fom.standard_deviation_difference(I0, I0) == pytest.approx(0) @@ -257,4 +257,4 @@ def test_range_difference(space): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/contrib/fom/test/test_unsupervised.py b/odl/contrib/fom/test/test_unsupervised.py index a1ed6162d3f..1249fdab72f 100644 --- a/odl/contrib/fom/test/test_unsupervised.py +++ b/odl/contrib/fom/test/test_unsupervised.py @@ -65,4 +65,4 @@ def test_estimate_noise_std_normal_2d_pointwise(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/contrib/fom/unsupervised.py b/odl/contrib/fom/unsupervised.py index 930954387bc..fcbb0cc1a9f 100644 --- a/odl/contrib/fom/unsupervised.py +++ b/odl/contrib/fom/unsupervised.py @@ -76,5 +76,5 @@ def estimate_noise_std(img, average=True): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/fom/util.py b/odl/contrib/fom/util.py index 3891c382361..35009409616 100644 --- a/odl/contrib/fom/util.py +++ b/odl/contrib/fom/util.py @@ -10,7 +10,7 @@ import numpy as np -from odl.discr import uniform_discr +from odl.core.discr import uniform_discr from odl.trafos.backends import PYFFTW_AVAILABLE __all__ = () diff --git a/odl/contrib/param_opt/examples/find_optimal_parameters.py b/odl/contrib/param_opt/examples/find_optimal_parameters.py index a141f8ff67a..f9ddb91eca8 100644 --- a/odl/contrib/param_opt/examples/find_optimal_parameters.py +++ b/odl/contrib/param_opt/examples/find_optimal_parameters.py @@ -27,19 +27,19 @@ dtype='float32') # Define forward operator -geometry = odl.tomo.parallel_beam_geometry(space) -ray_trafo = odl.tomo.RayTransform(space, geometry) +geometry = odl.applications.tomo.parallel_beam_geometry(space) +ray_trafo = odl.applications.tomo.RayTransform(space, geometry) # Define true phantoms -phantoms = [odl.phantom.shepp_logan(space, modified=True), - odl.phantom.derenzo_sources(space)] +phantoms = [odl.core.phantom.shepp_logan(space, modified=True), + odl.core.phantom.derenzo_sources(space)] # Define noisy data data = [] for phantom in phantoms: noiseless_data = ray_trafo(phantom) noise_scale = (1 / signal_to_noise) * np.mean(noiseless_data) - noise = noise_scale * odl.phantom.white_noise(ray_trafo.range) + noise = noise_scale * odl.core.phantom.white_noise(ray_trafo.range) noisy_data = noiseless_data + noise data.append(noisy_data) @@ -52,7 +52,7 @@ def reconstruction(proj_data, lam): print('lam = {}'.format(lam)) - fbp_op = odl.tomo.fbp_op(ray_trafo, + fbp_op = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=1 / lam) return fbp_op(proj_data) @@ -77,13 +77,13 @@ def reconstruction(proj_data, parameters): return np.inf * space.one() # Create data term ||Ax - b||_2^2 - l2_norm = odl.solvers.L2NormSquared(ray_trafo.range) + l2_norm = odl.functional.L2NormSquared(ray_trafo.range) data_discrepancy = l2_norm * (ray_trafo - proj_data) # Create regularizing functional huber(|grad(x)|) gradient = odl.Gradient(space) - l1_norm = odl.solvers.GroupL1Norm(gradient.range) - smoothed_l1 = odl.solvers.MoreauEnvelope(l1_norm, sigma=sigma) + l1_norm = odl.functional.GroupL1Norm(gradient.range) + smoothed_l1 = odl.functional.MoreauEnvelope(l1_norm, sigma=sigma) regularizer = smoothed_l1 * gradient # Create full objective functional @@ -122,12 +122,12 @@ def reconstruction(proj_data, lam): gradient = odl.Gradient(space) op = odl.BroadcastOperator(ray_trafo, gradient) - f = odl.solvers.ZeroFunctional(op.domain) + f = odl.functional.ZeroFunctional(op.domain) - l2_norm = odl.solvers.L2NormSquared( + l2_norm = odl.functional.L2NormSquared( ray_trafo.range).translated(proj_data) - l1_norm = lam * odl.solvers.GroupL1Norm(gradient.range) - g = odl.solvers.SeparableSum(l2_norm, l1_norm) + l1_norm = lam * odl.functional.GroupL1Norm(gradient.range) + g = odl.functional.SeparableSum(l2_norm, l1_norm) # Select solver parameters op_norm = 1.5 * odl.power_method_opnorm(op, maxiter=10) diff --git a/odl/contrib/param_opt/test/test_param_opt.py b/odl/contrib/param_opt/test/test_param_opt.py index 2146cca07f9..f71aa3b875f 100644 --- a/odl/contrib/param_opt/test/test_param_opt.py +++ b/odl/contrib/param_opt/test/test_param_opt.py @@ -13,7 +13,7 @@ import odl import odl.contrib.fom import odl.contrib.param_opt -from odl.util.testutils import simple_fixture +from odl.core.util.testutils import simple_fixture space = simple_fixture('space', [odl.rn(3), @@ -27,7 +27,7 @@ def test_optimal_parameters_one_parameter(space, fom): """Tests if optimal_parameters works for some simple examples.""" - noise = [odl.phantom.white_noise(space) for _ in range(2)] + noise = [odl.core.phantom.white_noise(space) for _ in range(2)] phantoms = noise.copy() data = noise.copy() @@ -45,7 +45,7 @@ def reconstruction(data, lam): def test_optimal_parameters_two_parameters(space, fom): """Tests if optimal_parameters works for some simple examples.""" - noise = [odl.phantom.white_noise(space) for _ in range(2)] + noise = [odl.core.phantom.white_noise(space) for _ in range(2)] # Normalize to reduce test fails due to randomness noise = [noise_elem / noise_elem.norm() for noise_elem in noise] phantoms = noise.copy() @@ -69,4 +69,4 @@ def reconstruction2(data, params): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/contrib/pyshearlab/examples/basic_shearlab.py b/odl/contrib/pyshearlab/examples/basic_shearlab.py index 7de191da5cc..550570b26cc 100644 --- a/odl/contrib/pyshearlab/examples/basic_shearlab.py +++ b/odl/contrib/pyshearlab/examples/basic_shearlab.py @@ -7,7 +7,7 @@ op = odl.contrib.pyshearlab.PyShearlabOperator(space, num_scales=2) -phantom = odl.phantom.shepp_logan(space, True) +phantom = odl.core.phantom.shepp_logan(space, True) y = op(phantom) y.show('Shearlet coefficients') diff --git a/odl/contrib/pyshearlab/examples/wave_shear_separation.py b/odl/contrib/pyshearlab/examples/wave_shear_separation.py index 6793dbc2e3b..5b2d5c5fc6d 100644 --- a/odl/contrib/pyshearlab/examples/wave_shear_separation.py +++ b/odl/contrib/pyshearlab/examples/wave_shear_separation.py @@ -13,11 +13,11 @@ from odl.contrib.pyshearlab import PyShearlabOperator space = odl.uniform_discr([-1, -1], [1, 1], [128, 128]) -img = odl.phantom.ellipsoid_phantom(space, [[1, 0.02, 0.3, 0.5, 0, 0]]) -img += odl.phantom.cuboid(space, [-0.3, -0.3], [0.3, 0.3]) +img = odl.core.phantom.ellipsoid_phantom(space, [[1, 0.02, 0.3, 0.5, 0, 0]]) +img += odl.core.phantom.cuboid(space, [-0.3, -0.3], [0.3, 0.3]) # Generate noisy data -noise = odl.phantom.white_noise(space) * 0.001 +noise = odl.core.phantom.white_noise(space) * 0.001 noisy_data = img + noise # Create shearlet and wavelet transforms @@ -28,13 +28,13 @@ # Functionals sol_space = space ** 2 -l1norm_wave = odl.solvers.L1Norm(wave_op.range) -l1norm_shear = odl.solvers.L1Norm(shear_op.range) -data_matching = 1000 * odl.solvers.L2NormSquared(space) +l1norm_wave = odl.functional.L1Norm(wave_op.range) +l1norm_shear = odl.functional.L1Norm(shear_op.range) +data_matching = 1000 * odl.functional.L2NormSquared(space) data_matching = data_matching.translated(noisy_data) -f = odl.solvers.ZeroFunctional(sol_space) -penalizer = odl.solvers.SeparableSum(0.05 * l1norm_wave, +f = odl.functional.ZeroFunctional(sol_space) +penalizer = odl.functional.SeparableSum(0.05 * l1norm_wave, l1norm_shear) # Forward operators diff --git a/odl/contrib/pyshearlab/pyshearlab_operator.py b/odl/contrib/pyshearlab/pyshearlab_operator.py index f33236ab4ae..8001f29be03 100644 --- a/odl/contrib/pyshearlab/pyshearlab_operator.py +++ b/odl/contrib/pyshearlab/pyshearlab_operator.py @@ -191,5 +191,5 @@ def inverse(self): if __name__ == '__main__': # pylint: disable=wrong-import-position - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/pyshearlab/test/operator_test.py b/odl/contrib/pyshearlab/test/operator_test.py index 8824f2d5b15..3dfb999b0db 100644 --- a/odl/contrib/pyshearlab/test/operator_test.py +++ b/odl/contrib/pyshearlab/test/operator_test.py @@ -12,7 +12,7 @@ import numpy as np import odl import odl.contrib.pyshearlab -from odl.util.testutils import all_almost_equal, simple_fixture +from odl.core.util.testutils import all_almost_equal, simple_fixture dtype = simple_fixture('dtype', ['float32', 'float64']) @@ -27,7 +27,7 @@ def test_operator(dtype, shape): op = odl.contrib.pyshearlab.PyShearlabOperator(space, num_scales=2) - phantom = odl.phantom.shepp_logan(space, True) + phantom = odl.core.phantom.shepp_logan(space, True) # Test evaluation y = op(phantom) @@ -54,4 +54,4 @@ def test_operator(dtype, shape): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/contrib/shearlab/examples/basic_shearlab.py b/odl/contrib/shearlab/examples/basic_shearlab.py index 13ce7a6b2fa..facdcaecec3 100644 --- a/odl/contrib/shearlab/examples/basic_shearlab.py +++ b/odl/contrib/shearlab/examples/basic_shearlab.py @@ -7,7 +7,7 @@ op = odl.contrib.shearlab.ShearlabOperator(space, num_scales=2) -phantom = odl.phantom.shepp_logan(space, True) +phantom = odl.core.phantom.shepp_logan(space, True) y = op(phantom) y.show('Shearlet coefficients') diff --git a/odl/contrib/shearlab/examples/wave_shear_separation.py b/odl/contrib/shearlab/examples/wave_shear_separation.py index e4ce757937c..2fd0294e6fc 100644 --- a/odl/contrib/shearlab/examples/wave_shear_separation.py +++ b/odl/contrib/shearlab/examples/wave_shear_separation.py @@ -13,11 +13,11 @@ from odl.contrib import shearlab space = odl.uniform_discr([-1, -1], [1, 1], [128, 128]) -img = odl.phantom.ellipsoid_phantom(space, [[1, 0.02, 0.3, 0.5, 0, 0]]) -img += odl.phantom.cuboid(space, [-0.3, -0.3], [0.3, 0.3]) +img = odl.core.phantom.ellipsoid_phantom(space, [[1, 0.02, 0.3, 0.5, 0, 0]]) +img += odl.core.phantom.cuboid(space, [-0.3, -0.3], [0.3, 0.3]) # Generate noisy data -noise = odl.phantom.white_noise(space) * 0.001 +noise = odl.core.phantom.white_noise(space) * 0.001 noisy_data = img + noise # Create shearlet and wavelet transforms @@ -28,13 +28,13 @@ # Functionals sol_space = space ** 2 -l1norm_wave = odl.solvers.L1Norm(wave_op.range) -l1norm_shear = odl.solvers.L1Norm(shear_op.range) -data_matching = 1000 * odl.solvers.L2NormSquared(space) +l1norm_wave = odl.functional.L1Norm(wave_op.range) +l1norm_shear = odl.functional.L1Norm(shear_op.range) +data_matching = 1000 * odl.functional.L2NormSquared(space) data_matching = data_matching.translated(noisy_data) -f = odl.solvers.ZeroFunctional(sol_space) -penalizer = odl.solvers.SeparableSum(0.05 * l1norm_wave, +f = odl.functional.ZeroFunctional(sol_space) +penalizer = odl.functional.SeparableSum(0.05 * l1norm_wave, l1norm_shear) # Forward operators diff --git a/odl/contrib/shearlab/shearlab_operator.py b/odl/contrib/shearlab/shearlab_operator.py index 76b120b43c8..b3bdc70b059 100644 --- a/odl/contrib/shearlab/shearlab_operator.py +++ b/odl/contrib/shearlab/shearlab_operator.py @@ -348,5 +348,5 @@ def shearrecadjoint2D(X, shearletsystem): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/shearlab/test/operator_test.py b/odl/contrib/shearlab/test/operator_test.py index 0541b52eb56..f82977822ed 100644 --- a/odl/contrib/shearlab/test/operator_test.py +++ b/odl/contrib/shearlab/test/operator_test.py @@ -12,7 +12,7 @@ import numpy as np import odl import odl.contrib.shearlab -from odl.util.testutils import all_almost_equal, simple_fixture +from odl.core.util.testutils import all_almost_equal, simple_fixture dtype = simple_fixture('dtype', ['float32', 'float64']) @@ -27,7 +27,7 @@ def test_operator(dtype, shape): op = odl.contrib.pyshearlab.ShearlabOperator(space, num_scales=2) - phantom = odl.phantom.shepp_logan(space, True) + phantom = odl.core.phantom.shepp_logan(space, True) # Test evaluation y = op(phantom) @@ -54,4 +54,4 @@ def test_operator(dtype, shape): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/contrib/solvers/examples/tomography_nonlocalmeans.py b/odl/contrib/solvers/examples/tomography_nonlocalmeans.py index 2d29ed5bbd6..64778990f5d 100644 --- a/odl/contrib/solvers/examples/tomography_nonlocalmeans.py +++ b/odl/contrib/solvers/examples/tomography_nonlocalmeans.py @@ -36,22 +36,22 @@ dtype='float32') # Make a parallel beam geometry with flat detector -geometry = odl.tomo.parallel_beam_geometry(space) +geometry = odl.applications.tomo.parallel_beam_geometry(space) # Create the forward operator -ray_trafo = odl.tomo.RayTransform(space, geometry) +ray_trafo = odl.applications.tomo.RayTransform(space, geometry) # --- Generate artificial data --- # # Create phantom -phantom = odl.phantom.forbild(space) +phantom = odl.core.phantom.forbild(space) phantom.show('phantom', clim=[1.0, 1.1]) # Create sinogram of forward projected phantom with noise data = ray_trafo(phantom) -data += odl.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.01 +data += odl.core.phantom.white_noise(ray_trafo.range) * np.mean(data) * 0.01 # --- Set up the inverse problem --- # @@ -59,10 +59,10 @@ gradient = odl.Gradient(space) # Create functionals for the regularizers and the bound constrains. -l1_norm = odl.solvers.GroupL1Norm(gradient.range) +l1_norm = odl.functional.GroupL1Norm(gradient.range) nlm_func = odl.contrib.solvers.NLMRegularizer(space, h=0.02, impl=impl, patch_size=5, patch_distance=11) -f = odl.solvers.IndicatorBox(space, 0, 2) +f = odl.functional.IndicatorBox(space, 0, 2) # Assemble the linear operators. Here the TV-term is represented as a # composition of the 1-norm and the gradient. See the documentation of the @@ -85,7 +85,7 @@ # This gradient encodes the differentiable term(s) of the goal functional, # which corresponds to the "forward" part of the method. In this example the # differentiable part is the squared 2-norm. -l2_norm = odl.solvers.L2NormSquared(ray_trafo.range) +l2_norm = odl.functional.L2NormSquared(ray_trafo.range) h = l2_norm.translated(data) * ray_trafo # Used to display intermediate results and print iteration number. @@ -93,7 +93,7 @@ odl.solvers.CallbackPrintIteration()) # Use FBP as initial guess -fbp_op = odl.tomo.fbp_op(ray_trafo, filter_type='Hann') +fbp_op = odl.applications.tomo.fbp_op(ray_trafo, filter_type='Hann') fbp = fbp_op(data) fbp.show('fbp', clim=[1.0, 1.1]) diff --git a/odl/contrib/solvers/functional/nonlocalmeans_functionals.py b/odl/contrib/solvers/functional/nonlocalmeans_functionals.py index e8e275c8aa4..c96c1767598 100644 --- a/odl/contrib/solvers/functional/nonlocalmeans_functionals.py +++ b/odl/contrib/solvers/functional/nonlocalmeans_functionals.py @@ -11,8 +11,8 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.operator import Operator -from odl.solvers.functional.functional import Functional +from odl.core.operator import Operator +from odl.functional.functional import Functional __all__ = ('NLMRegularizer',) @@ -118,5 +118,5 @@ def _call(self, x): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/solvers/operator/proximal_lang.py b/odl/contrib/solvers/operator/proximal_lang.py index ef9229b0a8e..27a14afed83 100644 --- a/odl/contrib/solvers/operator/proximal_lang.py +++ b/odl/contrib/solvers/operator/proximal_lang.py @@ -65,6 +65,6 @@ def adjoint(inp, out): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/solvers/spdhg/examples/PET_1k.py b/odl/contrib/solvers/spdhg/examples/PET_1k.py index 71a7d8a850e..a529b7c90ca 100644 --- a/odl/contrib/solvers/spdhg/examples/PET_1k.py +++ b/odl/contrib/solvers/spdhg/examples/PET_1k.py @@ -56,9 +56,9 @@ # create geometry of operator X = odl.uniform_discr(min_pt=[-1, -1], max_pt=[1, 1], shape=[nvoxelx, nvoxelx]) -geometry = odl.tomo.parallel_beam_geometry(X, num_angles=200, det_shape=250) +geometry = odl.applications.tomo.parallel_beam_geometry(X, num_angles=200, det_shape=250) -G = odl.BroadcastOperator(*[odl.tomo.RayTransform(X, g, impl='astra_cpu') +G = odl.BroadcastOperator(*[odl.applications.tomo.RayTransform(X, g, impl='astra_cpu') for g in geometry]) # create ground truth @@ -87,7 +87,7 @@ for s in sino_supp]) background = 10 * smooth_supp + 10 background *= counts_background / background.ufuncs.sum() - data = odl.phantom.poisson_noise(factors * sino + background, seed=1807) + data = odl.core.phantom.poisson_noise(factors * sino + background, seed=1807) arr = np.empty(3, dtype=object) arr[0] = data @@ -113,11 +113,11 @@ (data, factors, background) = np.load(file_data) # data fit -f = odl.solvers.SeparableSum( - *[odl.solvers.KullbackLeibler(Yi, yi).translated(-ri) +f = odl.functional.SeparableSum( + *[odl.functional.KullbackLeibler(Yi, yi).translated(-ri) for Yi, yi, ri in zip(Y, data, background)]) # TODO: should be ideally like -# f = odl.solvers.KullbackLeibler(Y, data).translated(-background) +# f = odl.functional.KullbackLeibler(Y, data).translated(-background) # prior and regularisation parameter g = spdhg.TotalVariationNonNegative(X, alpha=2e-1) @@ -171,13 +171,13 @@ else: (x_opt, y_opt, subx_opt, suby_opt, obj_opt) = np.load(file_target) -dist_x = odl.solvers.L2NormSquared(X).translated(x_opt) # primal distance -dist_y = odl.solvers.L2NormSquared(Y).translated(y_opt) # dual distance +dist_x = odl.functional.L2NormSquared(X).translated(x_opt) # primal distance +dist_y = odl.functional.L2NormSquared(Y).translated(y_opt) # dual distance bregman_g = spdhg.bregman(g, x_opt, subx_opt) # primal Bregman distance # TODO: should be like: bregman_g = g.bregman(x_opt, subgrad=subx_opt) # dual Bregman distance -bregman_f = odl.solvers.SeparableSum( +bregman_f = odl.functional.SeparableSum( *[spdhg.bregman(fi.convex_conj, yi, ri) for fi, yi, ri in zip(f, y_opt, suby_opt)]) # TODO: should be like: bregman_f = f.bregman(y_opt, subgrad=subx_opt) diff --git a/odl/contrib/solvers/spdhg/examples/PET_linear_rate.py b/odl/contrib/solvers/spdhg/examples/PET_linear_rate.py index 1d8b002e232..b2eb6faca82 100644 --- a/odl/contrib/solvers/spdhg/examples/PET_linear_rate.py +++ b/odl/contrib/solvers/spdhg/examples/PET_linear_rate.py @@ -58,8 +58,8 @@ X = odl.uniform_discr(min_pt=[-1, -1], max_pt=[1, 1], shape=[nvoxelx, nvoxelx], dtype='float32') -geometry = odl.tomo.parallel_beam_geometry(X, num_angles=200, det_shape=250) -G = odl.BroadcastOperator(*[odl.tomo.RayTransform(X, gi, impl='astra_cpu') +geometry = odl.applications.tomo.parallel_beam_geometry(X, num_angles=200, det_shape=250) +G = odl.BroadcastOperator(*[odl.applications.tomo.RayTransform(X, gi, impl='astra_cpu') for gi in geometry]) # create ground truth @@ -88,7 +88,7 @@ for sino_support in sinogram_support]) background = 10 * smoothed_support + 10 background *= counts_background / background.ufuncs.sum() - data = odl.phantom.poisson_noise(factors * sinogram + background, + data = odl.core.phantom.poisson_noise(factors * sinogram + background, seed=1807) arr = np.empty(3, dtype=object) @@ -115,7 +115,7 @@ (data, factors, background) = np.load(file_data) # data fit -f = odl.solvers.SeparableSum( +f = odl.functional.SeparableSum( *[spdhg.KullbackLeiblerSmooth(Yi, yi, ri) for Yi, yi, ri in zip(Y, data, background)]) # TODO: should be like: @@ -186,8 +186,8 @@ (x_opt, y_opt, subx_opt, suby_opt, obj_opt) = np.load(file_target) # set distances -dist_x = 1 / 2 * odl.solvers.L2NormSquared(X).translated(x_opt) -dist_y = 1 / 2 * odl.solvers.L2NormSquared(Y).translated(y_opt) +dist_x = 1 / 2 * odl.functional.L2NormSquared(X).translated(x_opt) +dist_y = 1 / 2 * odl.functional.L2NormSquared(Y).translated(y_opt) class CallbackStore(odl.solvers.Callback): diff --git a/odl/contrib/solvers/spdhg/examples/ROF_1k2_primal.py b/odl/contrib/solvers/spdhg/examples/ROF_1k2_primal.py index b1902534f1e..3c606d91eba 100644 --- a/odl/contrib/solvers/spdhg/examples/ROF_1k2_primal.py +++ b/odl/contrib/solvers/spdhg/examples/ROF_1k2_primal.py @@ -54,7 +54,7 @@ clim = [0, 1] # create data -data = odl.phantom.white_noise(X, mean=groundtruth, stddev=0.1, seed=1807) +data = odl.core.phantom.white_noise(X, mean=groundtruth, stddev=0.1, seed=1807) # save images and data if not os.path.exists('{}/groundtruth.png'.format(folder_main)): @@ -71,9 +71,9 @@ Y = A.range # set up functional f -f = odl.solvers.SeparableSum(*[odl.solvers.L1Norm(Yi) for Yi in Y]) +f = odl.functional.SeparableSum(*[odl.functional.L1Norm(Yi) for Yi in Y]) # set up functional g -g = 1 / (2 * alpha) * odl.solvers.L2NormSquared(X).translated(data) +g = 1 / (2 * alpha) * odl.functional.L2NormSquared(X).translated(data) obj_fun = f * A + g # define objective function mu_g = 1 / alpha # define strong convexity constants @@ -113,14 +113,14 @@ (x_opt, y_opt, subx_opt, suby_opt, obj_opt, normA) = np.load(file_target) # set norms of the primal and dual variable -dist_x = odl.solvers.L2NormSquared(X).translated(x_opt) -dist_y = odl.solvers.L2NormSquared(Y).translated(y_opt) +dist_x = odl.functional.L2NormSquared(X).translated(x_opt) +dist_y = odl.functional.L2NormSquared(Y).translated(y_opt) # create Bregman distances for f and g bregman_g = spdhg.bregman(g, x_opt, subx_opt) # define Bregman distance for f and f_p -bregman_f = odl.solvers.SeparableSum( +bregman_f = odl.functional.SeparableSum( *[spdhg.bregman(fi.convex_conj, yi, ri) for fi, yi, ri in zip(f, y_opt, suby_opt)]) diff --git a/odl/contrib/solvers/spdhg/examples/deblurring_1k2_dual.py b/odl/contrib/solvers/spdhg/examples/deblurring_1k2_dual.py index 263b21e618b..d62e685f9d5 100644 --- a/odl/contrib/solvers/spdhg/examples/deblurring_1k2_dual.py +++ b/odl/contrib/solvers/spdhg/examples/deblurring_1k2_dual.py @@ -73,7 +73,7 @@ # create data background = 200 * Y[2].one() -data = odl.phantom.poisson_noise(A[2](groundtruth) + background, seed=1807) +data = odl.core.phantom.poisson_noise(A[2](groundtruth) + background, seed=1807) # save images and data if not os.path.exists('{}/groundtruth.png'.format(folder_main)): @@ -86,12 +86,12 @@ gamma = 0.99 # auxiliary step size parameter < 1 # set up functional f -f = odl.solvers.SeparableSum( - odl.solvers.Huber(A[0].range, gamma=1), - odl.solvers.Huber(A[1].range, gamma=1), +f = odl.functional.SeparableSum( + odl.functional.Huber(A[0].range, gamma=1), + odl.functional.Huber(A[1].range, gamma=1), 1 / alpha * spdhg.KullbackLeiblerSmooth(A[2].range, data, background)) -g = odl.solvers.IndicatorBox(X, clim[0], clim[1]) # set up functional g +g = odl.functional.IndicatorBox(X, clim[0], clim[1]) # set up functional g obj_fun = f * A + g # define objective function mu_i = [1 / fi.grad_lipschitz for fi in f] # strong convexity constants of fi @@ -102,7 +102,7 @@ if not os.path.exists(file_target): # compute norm of operator - normA = tol_norm * A.norm(estimate=True, xstart=odl.phantom.white_noise(X)) + normA = tol_norm * A.norm(estimate=True, xstart=odl.core.phantom.white_noise(X)) sigma, tau = [gamma / normA] * 2 # set step size parameters x_opt, y_opt = X.zero(), Y.zero() # initialise variables @@ -138,8 +138,8 @@ (x_opt, y_opt, subx_opt, suby_opt, obj_opt, normA) = np.load(file_target) # set norms of the primal and dual variable -dist_x = odl.solvers.L2NormSquared(X).translated(x_opt) -dist_y = odl.solvers.L2NormSquared(Y).translated(y_opt) +dist_x = odl.functional.L2NormSquared(X).translated(x_opt) +dist_y = odl.functional.L2NormSquared(Y).translated(y_opt) class CallbackStore(odl.solvers.Callback): @@ -200,7 +200,7 @@ def __call__(self, w): file_normA = '{}/norms_{}subsets.npy'.format(folder_main, 1) if not os.path.exists(file_normA): - xstart = odl.phantom.white_noise(X) + xstart = odl.core.phantom.white_noise(X) norm_estimate = A.norm(estimate=True, xstart=xstart) normA = [tol_norm * norm_estimate] @@ -214,7 +214,7 @@ def __call__(self, w): file_normA = '{}/norms_{}subsets.npy'.format(folder_main, n) if not os.path.exists(file_normA): - xstart = odl.phantom.white_noise(X) + xstart = odl.core.phantom.white_noise(X) norm_estimate = A[2].norm(estimate=True, xstart=xstart) normA = [2, 2, tol_norm * norm_estimate] diff --git a/odl/contrib/solvers/spdhg/examples/get_started.py b/odl/contrib/solvers/spdhg/examples/get_started.py index 12ab1e6c5fd..dd39023af32 100644 --- a/odl/contrib/solvers/spdhg/examples/get_started.py +++ b/odl/contrib/solvers/spdhg/examples/get_started.py @@ -26,7 +26,7 @@ image_gray = images.building(gray=True) X = odl.uniform_discr([0, 0], image_gray.shape, image_gray.shape) groundtruth = X.element(image_gray) -data = odl.phantom.white_noise(X, mean=groundtruth, stddev=0.1, seed=1807) +data = odl.core.phantom.white_noise(X, mean=groundtruth, stddev=0.1, seed=1807) # set parameter alpha = .12 # regularisation parameter @@ -35,8 +35,8 @@ # set functionals and operator A = odl.BroadcastOperator(*[odl.PartialDerivative(X, d, pad_mode='symmetric') for d in [0, 1]]) -f = odl.solvers.SeparableSum(*[odl.solvers.L1Norm(Yi) for Yi in A.range]) -g = 1 / (2 * alpha) * odl.solvers.L2NormSquared(X).translated(data) +f = odl.functional.SeparableSum(*[odl.functional.L1Norm(Yi) for Yi in A.range]) +g = 1 / (2 * alpha) * odl.functional.L2NormSquared(X).translated(data) # set sampling n = 2 # number of subsets diff --git a/odl/contrib/solvers/spdhg/misc.py b/odl/contrib/solvers/spdhg/misc.py index e0484a8faf2..0feaa916fcd 100644 --- a/odl/contrib/solvers/spdhg/misc.py +++ b/odl/contrib/solvers/spdhg/misc.py @@ -57,7 +57,7 @@ def save_signal(signal, name, folder, fignum): def bregman(f, v, subgrad): - return (odl.solvers.FunctionalQuadraticPerturb(f, linear_term=-subgrad) - + return (odl.functional.FunctionalQuadraticPerturb(f, linear_term=-subgrad) - f(v) + subgrad.inner(v)) @@ -138,7 +138,7 @@ def total_variation(domain, grad=None): else: grad = grad - f = odl.solvers.GroupL1Norm(grad.range, exponent=2) + f = odl.functional.GroupL1Norm(grad.range, exponent=2) return f * grad @@ -200,8 +200,8 @@ def __init__(self, domain, alpha=1, prox_options={}, grad=None, self.alpha = alpha self.tv = total_variation(domain, grad=grad) self.grad = self.tv.right - self.nn = odl.solvers.IndicatorBox(domain, 0, np.inf) - self.l2 = 0.5 * odl.solvers.L2NormSquared(domain) + self.nn = odl.functional.IndicatorBox(domain, 0, np.inf) + self.l2 = 0.5 * odl.functional.L2NormSquared(domain) self.proj_P = self.tv.left.convex_conj.proximal(0) self.proj_C = self.nn.proximal(1) diff --git a/odl/contrib/solvers/spdhg/stochastic_primal_dual_hybrid_gradient.py b/odl/contrib/solvers/spdhg/stochastic_primal_dual_hybrid_gradient.py index 7c73ccb8a46..7a783189df6 100644 --- a/odl/contrib/solvers/spdhg/stochastic_primal_dual_hybrid_gradient.py +++ b/odl/contrib/solvers/spdhg/stochastic_primal_dual_hybrid_gradient.py @@ -67,7 +67,7 @@ def pdhg(x, f, g, A, tau, sigma, niter, **kwargs): def fun_select(k): return [0] - f = odl.solvers.SeparableSum(f) + f = odl.functional.SeparableSum(f) A = odl.BroadcastOperator(A, 1) # Dual variable diff --git a/odl/contrib/tensorflow/examples/tensorflow_layer_matrix.py b/odl/contrib/tensorflow/examples/tensorflow_layer_matrix.py index a1dfb893a34..861ef84eb6e 100644 --- a/odl/contrib/tensorflow/examples/tensorflow_layer_matrix.py +++ b/odl/contrib/tensorflow/examples/tensorflow_layer_matrix.py @@ -31,7 +31,7 @@ x_tf = tf.constant(x)[None, ..., None] z_tf = tf.constant(z)[None, ..., None] -# Create tensorflow layer from odl operator +# Create tensorflow layer from odl core.operator odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer( odl_op, 'MatrixOperator') y_tf = odl_op_layer(x_tf) diff --git a/odl/contrib/tensorflow/examples/tensorflow_layer_productspace.py b/odl/contrib/tensorflow/examples/tensorflow_layer_productspace.py index 263cd3a27eb..7f7e02f18a9 100644 --- a/odl/contrib/tensorflow/examples/tensorflow_layer_productspace.py +++ b/odl/contrib/tensorflow/examples/tensorflow_layer_productspace.py @@ -24,7 +24,7 @@ x_tf = tf.ones([1, 10, 10, 1]) z_tf = tf.ones([1, 2, 10, 10, 1]) -# Create tensorflow layer from odl operator +# Create tensorflow layer from odl core.operator odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(odl_op, 'Gradient') y_tf = odl_op_layer(x_tf) diff --git a/odl/contrib/tensorflow/examples/tensorflow_layer_ray_transform.py b/odl/contrib/tensorflow/examples/tensorflow_layer_ray_transform.py index d6c726bf4f9..35fef26fe4f 100644 --- a/odl/contrib/tensorflow/examples/tensorflow_layer_ray_transform.py +++ b/odl/contrib/tensorflow/examples/tensorflow_layer_ray_transform.py @@ -16,13 +16,13 @@ space = odl.uniform_discr([-64, -64], [64, 64], [128, 128], dtype='float32') -geometry = odl.tomo.parallel_beam_geometry(space) -ray_transform = odl.tomo.RayTransform(space, geometry) +geometry = odl.applications.tomo.parallel_beam_geometry(space) +ray_transform = odl.applications.tomo.RayTransform(space, geometry) x = tf.constant(np.asarray(ray_transform.domain.one())) z = tf.constant(np.asarray(ray_transform.range.one())) -# Create tensorflow layer from odl operator +# Create tensorflow layer from odl core.operator odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer( ray_transform, 'RayTransform') diff --git a/odl/contrib/tensorflow/examples/tensorflow_tomography.py b/odl/contrib/tensorflow/examples/tensorflow_tomography.py index 68d9fcd8114..65bdac668e1 100644 --- a/odl/contrib/tensorflow/examples/tensorflow_tomography.py +++ b/odl/contrib/tensorflow/examples/tensorflow_tomography.py @@ -17,16 +17,16 @@ # Create ODL data structures space = odl.uniform_discr([-64, -64], [64, 64], [128, 128], dtype='float32') -geometry = odl.tomo.parallel_beam_geometry(space) -ray_transform = odl.tomo.RayTransform(space, geometry) +geometry = odl.applications.tomo.parallel_beam_geometry(space) +ray_transform = odl.applications.tomo.RayTransform(space, geometry) grad = odl.Gradient(space) # Create data -phantom = odl.phantom.shepp_logan(space, True) +phantom = odl.core.phantom.shepp_logan(space, True) data = ray_transform(phantom) -noisy_data = data + odl.phantom.white_noise(data.space) +noisy_data = data + odl.core.phantom.white_noise(data.space) -# Create tensorflow layers from odl operators +# Create tensorflow layers from odl core.operators ray_transform_layer = odl.contrib.tensorflow.as_tensorflow_layer( ray_transform, name='RayTransform') grad_layer = odl.contrib.tensorflow.as_tensorflow_layer( diff --git a/odl/contrib/tensorflow/layer.py b/odl/contrib/tensorflow/layer.py index 34a14522349..3f98bcc48d1 100644 --- a/odl/contrib/tensorflow/layer.py +++ b/odl/contrib/tensorflow/layer.py @@ -395,5 +395,5 @@ def space_shape(space): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/tensorflow/operator.py b/odl/contrib/tensorflow/operator.py index e7563a907b6..61e84983d64 100644 --- a/odl/contrib/tensorflow/operator.py +++ b/odl/contrib/tensorflow/operator.py @@ -128,5 +128,5 @@ def _call(self, y): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/tensorflow/space.py b/odl/contrib/tensorflow/space.py index ff152fadf04..c48d88ab90d 100644 --- a/odl/contrib/tensorflow/space.py +++ b/odl/contrib/tensorflow/space.py @@ -11,9 +11,9 @@ from __future__ import print_function, division, absolute_import import tensorflow as tf -from odl.set import LinearSpace, RealNumbers -from odl.set.space import LinearSpaceElement -from odl.operator import Operator +from odl.core.set import LinearSpace, RealNumbers +from odl.core.set.space import LinearSpaceElement +from odl.core.operator import Operator __all__ = ('TensorflowSpace', 'TensorflowSpaceOperator') @@ -133,5 +133,5 @@ def adjoint(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/tensorflow/test/tensorflow_test.py b/odl/contrib/tensorflow/test/tensorflow_test.py index 5a1d839ec23..c49eb1b53f6 100644 --- a/odl/contrib/tensorflow/test/tensorflow_test.py +++ b/odl/contrib/tensorflow/test/tensorflow_test.py @@ -16,7 +16,7 @@ import odl import odl.contrib.tensorflow -from odl.util import all_almost_equal +from odl.core.util import all_almost_equal def test_as_tensorflow_layer(): @@ -32,7 +32,7 @@ def test_as_tensorflow_layer(): x_tf = tf.constant(x)[None, ..., None] z_tf = tf.constant(z)[None, ..., None] - # Create tensorflow layer from odl operator + # Create tensorflow layer from odl core.operator odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer( odl_op, 'MatrixOperator') y_tf = odl_op_layer(x_tf) @@ -52,4 +52,4 @@ def test_as_tensorflow_layer(): if __name__ == '__main__': with tf.Session(): - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/contrib/theano/examples/theano_layer_matrix.py b/odl/contrib/theano/examples/theano_layer_matrix.py index e89f0c26bce..c2a856554ba 100644 --- a/odl/contrib/theano/examples/theano_layer_matrix.py +++ b/odl/contrib/theano/examples/theano_layer_matrix.py @@ -43,7 +43,7 @@ # --- Wrap ODL functional as Theano operator --- # # Define ODL cost and composed functional -odl_cost = odl.solvers.L2NormSquared(odl_op.range) +odl_cost = odl.functional.L2NormSquared(odl_op.range) odl_functional = odl_cost * odl_op # Create Theano layer from ODL cost diff --git a/odl/contrib/theano/layer.py b/odl/contrib/theano/layer.py index fbf127ae790..3a666997c39 100644 --- a/odl/contrib/theano/layer.py +++ b/odl/contrib/theano/layer.py @@ -55,7 +55,7 @@ def __init__(self, operator): Create a functional, i.e., an operator with scalar output: >>> space = odl.rn(3) - >>> functional = odl.solvers.L2NormSquared(space) + >>> functional = odl.functional.L2NormSquared(space) >>> func_op = TheanoOperator(functional) >>> x = theano.tensor.dvector() >>> apply = func_op.make_node(x) @@ -128,7 +128,7 @@ def perform(self, node, inputs, output_storage): Evaluate a functional, i.e., an operator with scalar output: >>> space = odl.rn(3) - >>> functional = odl.solvers.L2NormSquared(space) + >>> functional = odl.functional.L2NormSquared(space) >>> func_op = TheanoOperator(functional) >>> x = theano.tensor.dvector() >>> op_x = func_op(x) @@ -203,7 +203,7 @@ def grad(self, inputs, output_grads): Compute the gradient of a custom functional: >>> space = odl.rn(3) - >>> functional = odl.solvers.L2NormSquared(space) + >>> functional = odl.functional.L2NormSquared(space) >>> func_op = TheanoOperator(functional) >>> x = theano.tensor.dvector() >>> op_x = func_op(x) @@ -326,5 +326,5 @@ def infer_shape(self, node, input_shapes): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/theano/test/theano_test.py b/odl/contrib/theano/test/theano_test.py index 3eeb73b7ac1..e52aca845b3 100644 --- a/odl/contrib/theano/test/theano_test.py +++ b/odl/contrib/theano/test/theano_test.py @@ -16,7 +16,7 @@ import odl import odl.contrib.theano -from odl.util import all_almost_equal +from odl.core.util import all_almost_equal def test_theano_operator(): @@ -33,7 +33,7 @@ def test_theano_operator(): x_theano = T.dvector() dy_theano = T.dvector() - # Create Theano layer from odl operator + # Create Theano layer from odl core.operator odl_op_layer = odl.contrib.theano.TheanoOperator(odl_op) # Build computation graphs @@ -65,13 +65,13 @@ def test_theano_gradient(): x = [1., 2.] # Define ODL cost and the composed functional - odl_cost = odl.solvers.L2NormSquared(odl_op.range) + odl_cost = odl.functional.L2NormSquared(odl_op.range) odl_functional = odl_cost * odl_op # Create Theano placeholder x_theano = T.dvector() - # Create Theano layers from odl operators + # Create Theano layers from odl core.operators odl_op_layer = odl.contrib.theano.TheanoOperator(odl_op) odl_cost_layer = odl.contrib.theano.TheanoOperator(odl_cost) @@ -94,4 +94,4 @@ def test_theano_gradient(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/contrib/tomo/__init__.py b/odl/contrib/tomo/__init__.py index 09703808e73..2f39fc2fd02 100644 --- a/odl/contrib/tomo/__init__.py +++ b/odl/contrib/tomo/__init__.py @@ -6,7 +6,7 @@ # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. -"""Contributed code for the odl.tomo package.""" +"""Contributed code for the odl.applications.tomo package.""" from __future__ import absolute_import diff --git a/odl/contrib/tomo/elekta.py b/odl/contrib/tomo/elekta.py index 4c0a290a642..71aa19371c6 100644 --- a/odl/contrib/tomo/elekta.py +++ b/odl/contrib/tomo/elekta.py @@ -106,7 +106,7 @@ def elekta_icon_geometry(sad=780.0, sdd=1000.0, shape=detector_shape) # Create the geometry - geometry = odl.tomo.ConeBeamGeometry( + geometry = odl.applications.tomo.ConeBeamGeometry( angles, detector_partition, src_radius=sad, det_radius=sdd - sad) @@ -190,15 +190,15 @@ def elekta_icon_fbp(ray_transform, >>> from odl.contrib import tomo >>> geometry = tomo.elekta_icon_geometry() >>> space = tomo.elekta_icon_space() - >>> ray_transform = odl.tomo.RayTransform(space, geometry) + >>> ray_transform = odl.applications.tomo.RayTransform(space, geometry) >>> fbp_op = tomo.elekta_icon_fbp(ray_transform) """ - fbp_op = odl.tomo.fbp_op(ray_transform, + fbp_op = odl.applications.tomo.fbp_op(ray_transform, padding=padding, filter_type=filter_type, frequency_scaling=frequency_scaling) if parker_weighting: - parker_weighting = odl.tomo.parker_weighting(ray_transform) + parker_weighting = odl.applications.tomo.parker_weighting(ray_transform) fbp_op = fbp_op * parker_weighting return fbp_op @@ -284,7 +284,7 @@ def elekta_xvi_geometry(sad=1000.0, sdd=1500.0, shape=detector_shape) # Create the geometry - geometry = odl.tomo.ConeBeamGeometry( + geometry = odl.applications.tomo.ConeBeamGeometry( angles, detector_partition, src_radius=sad, det_radius=sdd - sad) @@ -357,10 +357,10 @@ def elekta_xvi_fbp(ray_transform, >>> from odl.contrib import tomo >>> geometry = tomo.elekta_xvi_geometry() >>> space = tomo.elekta_xvi_space() - >>> ray_transform = odl.tomo.RayTransform(space, geometry) + >>> ray_transform = odl.applications.tomo.RayTransform(space, geometry) >>> fbp_op = tomo.elekta_xvi_fbp(ray_transform) """ - fbp_op = odl.tomo.fbp_op(ray_transform, + fbp_op = odl.applications.tomo.fbp_op(ray_transform, padding=padding, filter_type=filter_type, frequency_scaling=frequency_scaling) @@ -369,5 +369,5 @@ def elekta_xvi_fbp(ray_transform, if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/contrib/tomo/examples/elekta_icon_algebraic_reconstruction.py b/odl/contrib/tomo/examples/elekta_icon_algebraic_reconstruction.py index f57afe43672..b1e1d3442d0 100644 --- a/odl/contrib/tomo/examples/elekta_icon_algebraic_reconstruction.py +++ b/odl/contrib/tomo/examples/elekta_icon_algebraic_reconstruction.py @@ -20,11 +20,11 @@ geometries = [geometry[i * step:(i + 1) * step] for i in range(subsets)] # Create ray transform -ray_transforms = [odl.tomo.RayTransform(space, geom, use_cache=False) +ray_transforms = [odl.applications.tomo.RayTransform(space, geom, use_cache=False) for geom in geometries] # Create simple phantom -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create artificial data projections = [rt(phantom) for rt in ray_transforms] diff --git a/odl/contrib/tomo/examples/elekta_icon_fbp.py b/odl/contrib/tomo/examples/elekta_icon_fbp.py index e275cd5f055..0f1d1fd3834 100644 --- a/odl/contrib/tomo/examples/elekta_icon_fbp.py +++ b/odl/contrib/tomo/examples/elekta_icon_fbp.py @@ -14,14 +14,14 @@ space = tomo.elekta_icon_space(shape=(112, 112, 112)) # Create ray transform -ray_transform = odl.tomo.RayTransform(space, geometry, +ray_transform = odl.applications.tomo.RayTransform(space, geometry, use_cache=False) # Get default FDK reconstruction operator recon_op = tomo.elekta_icon_fbp(ray_transform) # Create simplified phantom -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create artificial data projections = ray_transform(phantom) diff --git a/odl/contrib/tomo/examples/elekta_xvi_algebraic_reconstruction.py b/odl/contrib/tomo/examples/elekta_xvi_algebraic_reconstruction.py index fad1386e1e0..44c02d44b79 100644 --- a/odl/contrib/tomo/examples/elekta_xvi_algebraic_reconstruction.py +++ b/odl/contrib/tomo/examples/elekta_xvi_algebraic_reconstruction.py @@ -20,11 +20,11 @@ geometries = [geometry[i * step:(i + 1) * step] for i in range(subsets)] # Create ray transform -ray_transforms = [odl.tomo.RayTransform(space, geom, use_cache=False) +ray_transforms = [odl.applications.tomo.RayTransform(space, geom, use_cache=False) for geom in geometries] # Create simple phantom -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create artificial data projections = [rt(phantom) for rt in ray_transforms] diff --git a/odl/contrib/tomo/examples/elekta_xvi_fbp.py b/odl/contrib/tomo/examples/elekta_xvi_fbp.py index 373815bf7f5..e6d598b354d 100644 --- a/odl/contrib/tomo/examples/elekta_xvi_fbp.py +++ b/odl/contrib/tomo/examples/elekta_xvi_fbp.py @@ -14,13 +14,13 @@ space = tomo.elekta_xvi_space(shape=(112, 112, 112)) # Create ray transform -ray_transform = odl.tomo.RayTransform(space, geometry, use_cache=False) +ray_transform = odl.applications.tomo.RayTransform(space, geometry, use_cache=False) # Get default FDK reconstruction operator recon_op = tomo.elekta_xvi_fbp(ray_transform) # Create simplified phantom -phantom = odl.phantom.shepp_logan(space, modified=True) +phantom = odl.core.phantom.shepp_logan(space, modified=True) # Create artificial data projections = ray_transform(phantom) diff --git a/odl/contrib/torch/examples/operator_function.py b/odl/contrib/torch/examples/operator_function.py index 5e21b95496c..9fa5d4de617 100644 --- a/odl/contrib/torch/examples/operator_function.py +++ b/odl/contrib/torch/examples/operator_function.py @@ -36,7 +36,7 @@ # --- Gradient (backward) --- # # Define ODL loss functional -l2sq = odl.solvers.L2NormSquared(op.range) +l2sq = odl.functional.L2NormSquared(op.range) # Compute forward pass z = OperatorFunction.apply(op, x) @@ -54,7 +54,7 @@ # --- Gradients for input batches --- # # This time without operator -l2sq = odl.solvers.L2NormSquared(odl.rn(3)) +l2sq = odl.functional.L2NormSquared(odl.rn(3)) # To define a loss, we need to handle two arguments and the final diff --git a/odl/contrib/torch/operator.py b/odl/contrib/torch/operator.py index 5b8f826e106..c794e2b4f3b 100644 --- a/odl/contrib/torch/operator.py +++ b/odl/contrib/torch/operator.py @@ -22,6 +22,13 @@ from packaging.version import parse as parse_version from odl import Operator +from odl.core.space.base_tensors import Tensor, TensorSpace +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY + +from odl.backends.arrays.pytorch_tensors import pytorch_array_backend + +from typing import Optional + if parse_version(torch.__version__) < parse_version('0.4'): warnings.warn("This interface is designed to work with Pytorch >= 0.4", @@ -31,7 +38,6 @@ class OperatorFunction(torch.autograd.Function): - """Wrapper of an ODL operator as a ``torch.autograd.Function``. This wrapper exposes an `Operator` object to the PyTorch autograd @@ -75,7 +81,7 @@ class OperatorFunction(torch.autograd.Function): Functionals, i.e., operators with scalar output, are also supported: - >>> odl_func = odl.solvers.L2NormSquared(odl.rn(3, dtype='float32')) + >>> odl_func = odl.functional.L2NormSquared(odl.rn(3, dtype='float32')) >>> x = torch.tensor([1.0, 2.0, 3.0]) >>> OperatorFunction.apply(odl_func, x) tensor(14.) @@ -116,7 +122,7 @@ class OperatorFunction(torch.autograd.Function): We can again use a custom functional, with single or multiple inputs: - >>> odl_func = odl.solvers.L2NormSquared(odl.rn(3, dtype='float32')) + >>> odl_func = odl.functional.L2NormSquared(odl.rn(3, dtype='float32')) >>> x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True) >>> loss = OperatorFunction.apply(odl_func, x) >>> loss @@ -140,7 +146,7 @@ class OperatorFunction(torch.autograd.Function): Loss functions of type ``loss_func(input, target)`` with reduction can be implemented e.g. as follows: - >>> l2sq = odl.solvers.L2NormSquared(odl.rn(3, dtype='float32')) + >>> l2sq = odl.functional.L2NormSquared(odl.rn(3, dtype='float32')) >>> >>> def my_mse(input, target, reduction='mean'): ... val = OperatorFunction.apply(l2sq, input - target) @@ -166,7 +172,7 @@ class OperatorFunction(torch.autograd.Function): """ @staticmethod - def forward(ctx, operator, input): + def forward(ctx, operator: Operator, input_tensor: torch.Tensor) -> torch.Tensor: """Evaluate forward pass on the input. Parameters @@ -185,24 +191,18 @@ def forward(ctx, operator, input): result : `torch.Tensor` Tensor holding the result of the evaluation. """ - if not isinstance(operator, Operator): - raise TypeError( - "`operator` must be an `Operator` instance, got {!r}" - "".format(operator) - ) - - # Save operator for backward; input only needs to be saved if - # the operator is nonlinear (for `operator.derivative(input)`) + assert(isinstance(input_tensor, torch.Tensor)) + assert(isinstance(operator, Operator)) + assert(isinstance(operator.domain, TensorSpace)) ctx.operator = operator + ctx.device = input_tensor.device + + input_tensor = input_tensor.detach() if not operator.is_linear: - # Only needed for nonlinear operators - ctx.save_for_backward(input) + ctx.save_for_backward(input_tensor) - # TODO(kohr-h): use GPU memory directly when possible - # TODO(kohr-h): remove `copy_if_zero_strides` when NumPy 1.16.0 - # is required - input_arr = copy_if_zero_strides(input.cpu().detach().numpy()) + input_arr = input_tensor.to(device=operator.domain.device) # Determine how to loop over extra shape "left" of the operator # domain shape @@ -230,31 +230,40 @@ def forward(ctx, operator, input): ctx.op_in_dtype = operator.domain.dtype ctx.op_out_dtype = op_out_dtype - # Evaluate the operator on all inputs in a loop + def _apply_op_to_single_torch(single_input: torch.Tensor) -> torch.Tensor: + x = operator.domain.element(single_input) + y = operator(x) + if isinstance(y, Tensor): + y = pytorch_array_backend.from_dlpack(y.data) + elif isinstance(y, (int, float, complex)): + y = torch.tensor(y) + else: + raise TypeError(f"Unsupported result of type {type(y)} from operator.") + return y.to(device=ctx.device) + if extra_shape: - # Multiple inputs: flatten extra axes, then do one entry at a time input_arr_flat_extra = input_arr.reshape((-1,) + op_in_shape) results = [] for inp in input_arr_flat_extra: - results.append(operator(inp)) + results.append(_apply_op_to_single_torch(inp)) # Stack results, reshape to the expected output shape and enforce # correct dtype - result_arr = np.stack(results).astype(op_out_dtype, copy=AVOID_UNNECESSARY_COPY) - result_arr = result_arr.reshape(extra_shape + op_out_shape) + result_arr = torch.stack(results) + result = result_arr.reshape(extra_shape + op_out_shape) + else: # Single input: evaluate directly - result_arr = np.asarray( - operator(input_arr) - ).astype(op_out_dtype, copy=AVOID_UNNECESSARY_COPY) + result = _apply_op_to_single_torch(input_arr) + + return result + - # Convert back to tensor - tensor = torch.from_numpy(result_arr).to(input.device) - return tensor @staticmethod - def backward(ctx, grad_output): - r"""Apply the adjoint of the derivative at ``grad_output``. + def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: + r"""Apply the adjoint of the derivative at the input of the preceding + ``forward`` call to ``grad_output``. This method is usually not called explicitly but as a part of the ``backward()`` pass of a backpropagation step. @@ -300,23 +309,13 @@ def backward(ctx, grad_output): computing ``[f'(x)^*(y)]`` using the input ``x`` stored during the previous `forward` pass. """ + # Return early if there's nothing to do if not ctx.needs_input_grad[1]: return None, None operator = ctx.operator - # Get `operator` and `input` from the context object (the input - # is only needed for nonlinear operators) - if not operator.is_linear: - # TODO: implement directly for GPU data - # TODO(kohr-h): remove `copy_if_zero_strides` when NumPy 1.16.0 - # is required - input_arr = copy_if_zero_strides( - ctx.saved_tensors[0].detach().cpu().numpy() - ) - - # ODL weights spaces, pytorch doesn't, so we need to handle this try: dom_weight = operator.domain.weighting.const except AttributeError: @@ -327,26 +326,25 @@ def backward(ctx, grad_output): ran_weight = 1.0 scaling = dom_weight / ran_weight - # Convert `grad_output` to NumPy array - grad_output_arr = copy_if_zero_strides( - grad_output.detach().cpu().numpy() - ) + grad_output_arr = grad_output.to(device=operator.domain.device) - # Get shape information from the context object op_in_shape = ctx.op_in_shape op_out_shape = ctx.op_out_shape extra_shape = ctx.extra_shape - op_in_dtype = ctx.op_in_dtype - # Check if `grad_output` is consistent with `extra_shape` and - # `op_out_shape` - if grad_output_arr.shape != extra_shape + op_out_shape: - raise ValueError( - 'expected tensor of shape {}, got shape {}' - ''.format(extra_shape + op_out_shape, grad_output_arr.shape) - ) + def _apply_op_to_single_torch( single_input: Optional[torch.Tensor] + , single_grad_out: torch.Tensor ) -> torch.Tensor: + g = operator.range.element(single_grad_out) + if operator.is_linear: + result = operator.adjoint(g) + else: + x = operator.domain.element(single_input) + result = operator.derivative(x).adjoint(g) + return pytorch_array_backend.from_dlpack(result.data).to(ctx.device) + + if not operator.is_linear: + input_arr = ctx.saved_tensors[0].detach() - # Evaluate the (derivative) adjoint on all inputs in a loop if extra_shape: # Multiple gradients: flatten extra axes, then do one entry # at a time @@ -355,39 +353,34 @@ def backward(ctx, grad_output): ) results = [] + if operator.is_linear: for ograd in grad_output_arr_flat_extra: - results.append(np.asarray(operator.adjoint(ograd))) + results.append(_apply_op_to_single_torch(None, ograd)) else: # Need inputs, flattened in the same way as the gradients input_arr_flat_extra = input_arr.reshape((-1,) + op_in_shape) - for ograd, inp in zip( - grad_output_arr_flat_extra, input_arr_flat_extra - ): - results.append( - np.asarray(operator.derivative(inp).adjoint(ograd)) - ) + for ograd, inp in zip(grad_output_arr_flat_extra, input_arr_flat_extra): + results.append(_apply_op_to_single_torch(inp, ograd)) # Stack results, reshape to the expected output shape and enforce # correct dtype - result_arr = np.stack(results).astype(op_in_dtype, copy=AVOID_UNNECESSARY_COPY) - result_arr = result_arr.reshape(extra_shape + op_in_shape) + result_tensor = torch.stack(results).reshape(extra_shape + op_in_shape) else: - # Single gradient: evaluate directly if operator.is_linear: - result_arr = np.asarray( - operator.adjoint(grad_output_arr) - ).astype(op_in_dtype, copy=AVOID_UNNECESSARY_COPY) + result_tensor = _apply_op_to_single_torch(None, grad_output.detach()) else: - result_arr = np.asarray( - operator.derivative(input_arr).adjoint(grad_output_arr) - ).astype(op_in_dtype, copy=AVOID_UNNECESSARY_COPY) - - # Apply scaling, convert to tensor and return + result_tensor = _apply_op_to_single_torch(input_arr, grad_output.detach()) + if scaling != 1.0: - result_arr *= scaling - grad_input = torch.from_numpy(result_arr).to(grad_output.device) - return None, grad_input # return `None` for the `operator` part + result_tensor *= scaling + + return None, result_tensor + + + + + class OperatorModule(torch.nn.Module): @@ -517,7 +510,7 @@ def copy_if_zero_strides(arr): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests import odl from torch import autograd, nn run_doctests(extraglobs={'np': np, 'odl': odl, 'torch': torch, diff --git a/odl/contrib/torch/test/test_operator.py b/odl/contrib/torch/test/test_operator.py index 8c259d2dcb6..b8072730fa7 100644 --- a/odl/contrib/torch/test/test_operator.py +++ b/odl/contrib/torch/test/test_operator.py @@ -14,41 +14,47 @@ import odl from odl.contrib import torch as odl_torch -from odl.util.testutils import all_almost_equal, simple_fixture +from odl.core.util.testutils import all_almost_equal, simple_fixture +from odl.core.util.dtype_utils import _universal_dtype_identifier +from odl.backends.arrays.pytorch_tensors import pytorch_array_backend dtype = simple_fixture('dtype', ['float32', 'float64']) -device_params = ['cpu'] -if torch.cuda.is_available(): - device_params.append('cuda') +device_params = pytorch_array_backend.available_devices device = simple_fixture('device', device_params) shape = simple_fixture('shape', [(3,), (2, 3), (2, 2, 3)]) -def test_autograd_function_forward(dtype, device): +def test_autograd_function_forward(dtype, device, odl_impl_device_pairs): """Test forward evaluation with operators as autograd functions.""" # Define ODL operator - matrix = np.random.rand(2, 3).astype(dtype) + matrix = np.random.rand(2, 3) + impl, odl_device = odl_impl_device_pairs + space = odl.tensor_space((2,3), impl=impl, device=odl_device, dtype=dtype) + matrix = space.element(matrix) odl_op = odl.MatrixOperator(matrix) # Compute forward pass with both ODL and PyTorch x_arr = np.ones(3, dtype=dtype) + x_odl = odl_op.domain.element(x_arr) x = torch.from_numpy(x_arr).to(device) res = odl_torch.OperatorFunction.apply(odl_op, x) - res_arr = res.detach().cpu().numpy() - odl_res = odl_op(x_arr) + odl_res = odl_op(x_odl) + odl_res_torch = torch.asarray(odl_res.data, device=device) + assert _universal_dtype_identifier(res.dtype) == dtype + assert all_almost_equal(res, odl_res_torch) + assert str(x.device)== str(res.device) == device - assert res_arr.dtype == dtype - assert all_almost_equal(res_arr, odl_res) - assert x.device.type == res.device.type == device - -def test_autograd_function_backward(dtype, device): +def test_autograd_function_backward(dtype, device, odl_impl_device_pairs): """Test backprop with operators/functionals as autograd functions.""" + + impl, odl_device = odl_impl_device_pairs + # Define ODL operator and cost functional matrix = np.random.rand(2, 3).astype(dtype) - odl_op = odl.MatrixOperator(matrix) - odl_cost = odl.solvers.L2NormSquared(odl_op.range) + odl_op = odl.MatrixOperator(matrix, impl=impl, device=odl_device) + odl_cost = odl.functional.L2NormSquared(odl_op.range) odl_functional = odl_cost * odl_op # Define evaluation point and mark as `requires_grad` to enable @@ -71,14 +77,17 @@ def test_autograd_function_backward(dtype, device): assert grad_arr.dtype == dtype assert all_almost_equal(grad_arr, odl_grad) - assert x.device.type == grad.device.type == device + assert x.device == grad.device == torch.device(device) -def test_module_forward(shape, device): +def test_module_forward(shape, device, odl_impl_device_pairs): """Test forward evaluation with operators as modules.""" + + impl, odl_device = odl_impl_device_pairs + # Define ODL operator and wrap as module ndim = len(shape) - space = odl.uniform_discr([0] * ndim, shape, shape, dtype='float32') + space = odl.uniform_discr([0] * ndim, shape, shape, dtype='float32', impl=impl, device=odl_device) odl_op = odl.ScalingOperator(space, 2) op_mod = odl_torch.OperatorModule(odl_op) @@ -92,9 +101,9 @@ def test_module_forward(shape, device): res_arr = res.detach().cpu().numpy() assert res_arr.shape == (1,) + odl_op.range.shape assert all_almost_equal( - res_arr, np.asarray(odl_op(x_arr))[None, ...] + res_arr, odl_op(x_arr).data[None, ...] ) - assert x.device.type == res.device.type == device + assert x.device == res.device == torch.device(device) # Test with 2 extra dims x = torch.from_numpy(x_arr).to(device)[None, None, ...] @@ -103,16 +112,19 @@ def test_module_forward(shape, device): res_arr = res.detach().cpu().numpy() assert res_arr.shape == (1, 1) + odl_op.range.shape assert all_almost_equal( - res_arr, np.asarray(odl_op(x_arr))[None, None, ...] + res_arr, odl_op(x_arr).data[None, None, ...] ) - assert x.device.type == res.device.type == device + assert x.device == res.device == torch.device(device) -def test_module_forward_diff_shapes(device): +def test_module_forward_diff_shapes(device, odl_impl_device_pairs): """Test operator module with different shapes of input and output.""" + + impl, odl_device = odl_impl_device_pairs + # Define ODL operator and wrap as module matrix = np.random.rand(2, 3).astype('float32') - odl_op = odl.MatrixOperator(matrix) + odl_op = odl.MatrixOperator(matrix, impl=impl, device=odl_device) op_mod = odl_torch.OperatorModule(odl_op) # Input data @@ -125,9 +137,9 @@ def test_module_forward_diff_shapes(device): res_arr = res.detach().cpu().numpy() assert res_arr.shape == (1,) + odl_op.range.shape assert all_almost_equal( - res_arr, np.asarray(odl_op(x_arr))[None, ...] + res_arr, odl_op(x_arr).data[None, ...] ) - assert x.device.type == res.device.type == device + assert x.device == res.device == torch.device(device) # Test with 2 extra dims x = torch.from_numpy(x_arr).to(device)[None, None, ...] @@ -136,16 +148,19 @@ def test_module_forward_diff_shapes(device): res_arr = res.detach().cpu().numpy() assert res_arr.shape == (1, 1) + odl_op.range.shape assert all_almost_equal( - res_arr, np.asarray(odl_op(x_arr))[None, None, ...] + res_arr, odl_op(x_arr).data[None, None, ...] ) - assert x.device.type == res.device.type == device + assert x.device == res.device == torch.device(device) -def test_module_backward(device): +def test_module_backward(device, odl_impl_device_pairs): """Test backpropagation with operators as modules.""" + + impl, odl_device = odl_impl_device_pairs + # Define ODL operator and wrap as module matrix = np.random.rand(2, 3).astype('float32') - odl_op = odl.MatrixOperator(matrix) + odl_op = odl.MatrixOperator(matrix, impl=impl, device=odl_device) op_mod = odl_torch.OperatorModule(odl_op) loss_fn = nn.MSELoss() @@ -164,7 +179,7 @@ def test_module_backward(device): loss.backward() assert all(p is not None for p in model.parameters()) assert x.grad.detach().cpu().abs().sum() != 0 - assert x.device.type == loss.device.type == device + assert x.device == loss.device == torch.device(device) # Test with conv layers (2 extra dims) layer_before = nn.Conv1d(1, 2, 2) # 1->2 channels @@ -184,8 +199,8 @@ def test_module_backward(device): loss.backward() assert all(p is not None for p in model.parameters()) assert x.grad.detach().cpu().abs().sum() != 0 - assert x.device.type == loss.device.type == device + assert x.device == loss.device == torch.device(device) if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/core/README.md b/odl/core/README.md new file mode 100644 index 00000000000..4633edfcb52 --- /dev/null +++ b/odl/core/README.md @@ -0,0 +1,20 @@ +# Core +This folder contains the core ODL code. + +* [Array-API support](array_API_support): Code to implement the array-API defined functions as ODL functions. Also contains the ArrayBackend Dataclass to handle multi-backends. + +* [diagnostics](diagnostics): Automated tests for user-defined operators and spaces. `SpaceTest` verifies that various properties of linear spaces work as expected, while `OperatorTest` does the same for operators. + +* [discr](discr): Contains the set of discretized functions on some domain. + +* [operator](operator): Operators between sets. Defines the class `Operator` which is the main abstract class used for any mapping between two `Set`'s. Further defines several general classes of operators applicable to general spaces. + +* [phantom](phantom): Standardized test images. Functions for generating standardized test examples such as `shepp_logan`. + +* [set](set): Sets of objects. Defines the abstract class `Set` and `LinearSpace` as well as some concrete implementations such as `RealNumbers`. + +* [space](space): Concrete vector spaces. Contains concrete implementations of `LinearSpace`, including `NumpyTensorSpace` and `ProductSpace`. + +* [sparse](sparse): Multi-backend sparse arrays handling. + +* [util](util) Utilities. Functionality mainly intended to be used by other ODL functions such as linear algebra and visualization. diff --git a/odl/core/__init__.py b/odl/core/__init__.py new file mode 100644 index 00000000000..2e9f00246e7 --- /dev/null +++ b/odl/core/__init__.py @@ -0,0 +1,16 @@ +from .array_API_support import * +from .discr import * +from .operator import * +from .set import * +from .space import * +from .sparse import * +from .util import * + +__all__ = () + +__all__ += array_API_support.__all__ +__all__ += util.__all__ +__all__ += discr.__all__ +__all__ += operator.__all__ +__all__ += set.__all__ +__all__ += space.__all__ diff --git a/odl/core/array_API_support/__init__.py b/odl/core/array_API_support/__init__.py new file mode 100644 index 00000000000..7b673d79954 --- /dev/null +++ b/odl/core/array_API_support/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2014-2025 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Python Array API support.""" + +from __future__ import absolute_import + +from .element_wise import * +from .statistical import * +from .utils import * +from .comparisons import * +from .array_creation import * + +__all__ = () +__all__ += element_wise.__all__ +__all__ += statistical.__all__ +__all__ += utils.__all__ +__all__ += comparisons.__all__ +__all__ += array_creation.__all__ diff --git a/odl/core/array_API_support/array_creation.py b/odl/core/array_API_support/array_creation.py new file mode 100644 index 00000000000..869715fac80 --- /dev/null +++ b/odl/core/array_API_support/array_creation.py @@ -0,0 +1,165 @@ +# Copyright 2014-2025 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +""" +Array creation functions expected by the python array API. +Although ODL has many ways to create a tensor, we have found useful during development and testing to be able to create arrays in a certain backend. +We do not expect the users to work with these functions often but have still implemented them as we deemed useful during development. + +Notes: + -> the functions with name *_like take an array/ODL object as an input + -> the other functions require impl, shape, dtype, device arguments. + +Examples: +>>> odl.arange('numpy', 0,10,1, dtype='float32', device='cuda:0') +Traceback (most recent call last): +ValueError: Unsupported device for NumPy: 'cuda:0' +>>> odl.arange('numpy',start=0,stop=10,step=1, dtype='float32', device='cpu') +array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32) +>>> odl.asarray(odl.rn(4).element([1,2,3,4])) +array([ 1., 2., 3., 4.]) +>>> odl.full('numpy', (4,4), 4) == np.full((4,4),4) +array([[ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True]], dtype=bool) +>>> odl.full_like(x = np.full((4,4),4), fill_value=4) == np.full((4,4),4) +array([[ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True]], dtype=bool) +""" + +from .utils import get_array_and_backend, lookup_array_backend + +__all__ = ( + 'arange', + 'asarray', + 'empty', + 'empty_like', + 'eye', + # 'from_dlpack', + 'full', + 'full_like', + 'linspace', + 'meshgrid', + 'ones', + 'ones_like', + 'tril', + 'triu', + 'zeros', + 'zeros_like' +) + +def _helper_from_impl(fname, impl, *args, **kwargs): + backend = lookup_array_backend(impl) + fn = getattr(backend.array_namespace, fname) + return fn(*args, **kwargs) + +def _helper_from_array(fname, x, **kwargs): + x, backend_x = get_array_and_backend(x) + fn = getattr(backend_x.array_namespace, fname) + return fn(x, **kwargs) + +def arange(impl, start, stop=None, step=1, dtype=None, device=None): + """ + Returns evenly spaced values within the half-open interval [start, stop) as a one-dimensional array. + """ + return _helper_from_impl('arange', impl, start, stop=stop, step=step, dtype=dtype, device=device) + +def asarray(x): + """ + Returns an array corresponding to an ODL object. + """ + return _helper_from_array('asarray', x) + +def empty(impl, shape, dtype=None, device=None): + """ + Returns an uninitialized array having a specified shape. + """ + return _helper_from_impl('empty', impl, shape, dtype=dtype, device=device) + +def empty_like(x, dtype=None, device=None): + """ + Returns an uninitialized array with the same shape as an input array x. + """ + return _helper_from_array('empty_like', x=x, dtype=dtype, device=device) + +def eye(impl, n_rows, n_cols=None, k=0, dtype=None, device=None): + """ + Returns a two-dimensional array with ones on the kth diagonal and zeros elsewhere. + """ + return _helper_from_impl('eye', impl, n_rows=n_rows, n_cols=n_cols, k=k, dtype=dtype, device=device) + +# def from_dlpack(x, device=None): +# """ +# Returns a new array containing the data from another (array) object with a __dlpack__ method. +# Note: +# The device argument is currently NOT used, this is due to Pytorch needing to catch up with the array API standard +# """ +# return _helper_from_array('from_dlpack', x=x) + +def full(impl, shape, fill_value, dtype=None, device=None): + """ + Returns a new array having a specified shape and filled with fill_value. + """ + return _helper_from_impl('full', impl, shape=shape, fill_value=fill_value, dtype=dtype, device=device) + +def full_like(x, fill_value, dtype=None, device=None): + """ + Returns a new array filled with fill_value and having the same shape as an input array x. + """ + return _helper_from_array('full_like', x=x, fill_value=fill_value, dtype=dtype, device=device) + +def linspace(impl, start, stop, num, dtype=None, device=None, endpoint=True): + """ + Returns evenly spaced numbers over a specified interval. + """ + return _helper_from_impl('linspace', impl, start, stop, num, dtype=dtype, device=device, endpoint=endpoint) + +def meshgrid(impl, *arrays, indexing='xy'): + """ + Returns coordinate matrices from coordinate vectors. + """ + return _helper_from_impl('meshgrid', impl, *arrays, indexing=indexing) + +def ones(impl, shape, dtype=None, device=None): + """ + Returns a new array having a specified shape and filled with ones. + """ + return _helper_from_impl('ones', impl, shape=shape, dtype=dtype, device=device) + +def ones_like(x, dtype=None, device=None): + """ + Returns a new array filled with ones and having the same shape as an input array x. + """ + return _helper_from_array('ones_like', x, dtype=dtype, device=device) + +def tril(x, k=0): + """ + Returns the lower triangular part of a matrix (or a stack of matrices) x. + """ + return _helper_from_array('tril', x, k=k) + +def triu(x, k=0): + """ + Returns the upper triangular part of a matrix (or a stack of matrices) x. + """ + return _helper_from_array('triu', x, k=k) + +def zeros(impl, shape, dtype=None, device=None): + """ + Returns a new array having a specified shape and filled with zeros. + """ + return _helper_from_impl('zeros', impl, shape=shape, dtype=dtype, device=device) + +def zeros_like(x, dtype=None, device=None): + """ + Returns a new array filled with zeros and having the same shape as an input array x. + """ + return _helper_from_array('zeros_like', x, dtype=dtype, device=device) \ No newline at end of file diff --git a/odl/core/array_API_support/comparisons.py b/odl/core/array_API_support/comparisons.py new file mode 100644 index 00000000000..eaec3f4db59 --- /dev/null +++ b/odl/core/array_API_support/comparisons.py @@ -0,0 +1,126 @@ +# Copyright 2014-2025 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +""" +Comparisons functions + -> Utility functions expected by the python array API: `all` and `any` + -> Convenience functions that work in both backends: `allclose` and `isclose` + -> Convenient composition of two functions: `all_equal` + +Args: + x (Number, LinearSpaceElement): Left-hand operand + y (Number, LinearSpaceElement): Right-hand operand + +Returns: + x (bool | array-like of bools): Output of the comparison + +Notes: + 1) These functions do not return ODL objects +""" + +from .utils import get_array_and_backend +from numbers import Number +import numpy as np + +__all__ = ( + "all", + "allclose", + "odl_all_equal", + "any", + "isclose" +) + + +def _helper(x, fname, **kwargs): + """ + Examples + >>> space = odl.rn(3) + >>> e1 = space.element((1,2,3)) + >>> odl.isclose(e1, space.element([1,2,3])) + array([ True, True, True], dtype=bool) + >>> odl.isclose(e1, space.element([1.5,2,3.2])) + array([False, True, False], dtype=bool) + >>> odl.allclose(e1, space.element([1,2,3])) + True + >>> odl.allclose(e1, space.element([2,2,2])) + False + >>> odl.all(odl.isclose(e1, space.element([1,2,3]))) + True + >>> odl.any(odl.isclose(e1, space.element([1.5,2,3.2]))) + True + >>> odl.all(e1 == [1,2,3]) + Traceback (most recent call last): + ValueError: The left hand operand is a python Number of type and no right hand arguments were provided. + """ + if isinstance(x, Number): + if 'y' in kwargs: + y = kwargs.pop('y') + if isinstance(y, Number): + fn = getattr(np, fname) + else: + y, backend_y = get_array_and_backend(y) + fn = getattr(backend_y.array_namespace, fname) + # Devilish pytorch call for eq + # https://docs.pytorch.org/docs/2.7/generated/torch.eq.html + if fname == 'equal': + return fn(y, x, **kwargs) + else: + return fn(x, y, **kwargs) + else: + raise ValueError(f"The left hand operand is a python Number of type {type(x)} and no right hand arguments were provided.") + + x, backend_x = get_array_and_backend(x) + fn = getattr(backend_x.array_namespace, fname) + if 'y' in kwargs: + y = kwargs.pop('y') + if isinstance(y, Number): + pass + else: + y, backend_y = get_array_and_backend(y) + assert backend_x == backend_y, f"Two different backends {backend_x.impl} and {backend_y.impl} were provided, This operation is not supported by odl functions. Please ensure that your objects have the same implementation." + return fn(x, y, **kwargs) + else: + return fn(x, **kwargs) + +def all(x): + """ + Test whether all array elements along a given axis evaluate to True. + """ + return _helper(x, 'all') + +def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False): + """ + Returns True if two arrays are element-wise equal within a tolerance. + Note: This is not a Python Array API method, but it happens to work in Numpy and Pytorch. + """ + return _helper(x, 'allclose', y=y, rtol=rtol, atol=atol, equal_nan=equal_nan) + +def odl_all_equal(x, y): + """ + Test whether all array elements along a given axis evaluate to True. + Note: This is not a Python Array API method, but a composition for convenience. + It requires both sides of the comparison to use the same implementation, like + other array-API functions, to avoid inefficient copying / restructuring. + For a more flexible equality check useful for testing purposes, consider + `all_equal` from `odl.testutils`. + """ + return _helper(_helper(x, 'equal', y=y), 'all') + +def any(x): + """ + Test whether any array element along a given axis evaluates to True. + """ + return _helper(x, 'any') + +def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False): + """ + Returns a boolean array where two arrays are element-wise equal within a tolerance. + Note: This is not a Python Array API method, but it happens to work in Numpy and Pytorch. + """ + return _helper(x, 'isclose', y=y, rtol=rtol, atol=atol, equal_nan=equal_nan) + diff --git a/odl/core/array_API_support/element_wise.py b/odl/core/array_API_support/element_wise.py new file mode 100644 index 00000000000..015867ddbda --- /dev/null +++ b/odl/core/array_API_support/element_wise.py @@ -0,0 +1,573 @@ +# Copyright 2014-2025 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Element-wise functions expected by the python array API. +Internally, all functions apply an element-wise `operation` on: + -> a python int/float/complex and a LinearSpaceElement + -> two LinearSpaceElement + -> a single LinearSpaceElement + +Args: + operation (str): a string identifier to lookup the desired function in the LinearSpaceElement's namespace. + x1 (int | float | complex | LinearSpaceElement : Left operand + x2 (int | float | complex | LinearSpaceElement (Optional) : Right operand. Defaults to None. + out (LinearSpaceElement, optional): Out LinearSpaceElement for inplace updates. Defaults to None. + +Returns: + LinearSpaceElement: result of the element-wise operation on the array wrapped inside the element of an ODL space. + +Notes: + 1) The output array is wrapped in a space of which type depends of the output array's. This is a change of behaviour compared to ODL < 0.8.2 + 2) Although one could use it to perform an operation on array-specific backend only, there is no clean way to infer a LinearSpace from the output. As such, one of the two operands must be a LinearSpaceElement +""" + +__all__ = ( + 'abs', + 'acos', + 'acosh', + 'add', + 'asin', + 'asinh', + 'atan', + 'atan2', + 'atanh', + 'bitwise_and', + 'bitwise_left_shift', + 'bitwise_invert', + 'bitwise_or', + 'bitwise_right_shift', + 'bitwise_xor', + 'ceil', + 'clip', + 'conj', + 'copysign', + 'cos', + 'cosh', + 'divide', + 'equal', + 'exp', + 'expm1', + 'floor', + 'floor_divide', + 'greater', + 'greater_equal', + 'hypot', + 'imag', + 'isfinite', + 'isinf', + 'isnan', + 'less', + 'less_equal', + 'log', + 'log1p', + 'log2', + 'log10', + 'logaddexp', + 'logical_and', + 'logical_not', + 'logical_or', + 'logical_xor', + 'maximum', + 'minimum', + 'multiply', + 'negative', + 'nextafter', + 'not_equal', + 'positive', + 'pow', + 'real', + 'reciprocal', + 'remainder', + 'round', + 'sign', + 'signbit', + 'sin', + 'sinh', + 'sqrt', + 'square', + 'subtract', + 'tan', + 'tanh', + 'trunc', +) + + +def _apply_element_wise(operation: str, x1, x2=None, out=None, **kwargs): + """ + Examples + >>> e0 = odl.rn(3).zero() + >>> e1 = odl.rn(3).one() + >>> e2 = e0 + e1 + >>> print(e2) + [ 1., 1., 1.] + >>> e3 = odl.add(e0, e1) + >>> print(e3) + [ 1., 1., 1.] + >>> e2 == e3 + True + >>> e2 in odl.rn(3) + True + >>> new_el = e0 + 3j + >>> new_el in odl.rn(3) + False + >>> odl.add(odl.zeros_like(e1), e1) + Traceback (most recent call last): + TypeError: The type of the left operand is not supported. + """ + # Lazy import of LinearSpaceElement and Operator for dispatching call + from odl.core.operator import Operator + from odl.core.set.space import LinearSpaceElement + assert not isinstance(x1, Operator) or not isinstance(x2, Operator), f"ODL's array-API support for element-wise functions does not allow ODL Operators" + if isinstance(x1, LinearSpaceElement): + return x1.space._elementwise_num_operation(operation=operation, x1=x1, x2=x2, out=out, **kwargs) + # Handling the left argument as a float/int/complex and right argument as a LinearSpaceElement + elif isinstance(x2, LinearSpaceElement): + return x2.space._elementwise_num_operation(operation=operation, x1=x1, x2=x2, out=out, **kwargs) + else: + raise(AttributeError(f"Either x1 or x2 (if provided) need to be a LinearSpaceElement, got {type(x1)} and {type(x2)} with values {x1=} and {x2=}")) + + +def abs(x, out=None): + """Calculates the absolute value for each element `x_i` of the input array + `x`.""" + return _apply_element_wise('abs', x, out=out) + + +def acos(x, out=None): + """Calculates an implementation-dependent approximation of the principal + value of the inverse cosine for each element `x_i` of the input array + `x`.""" + return _apply_element_wise('acos', x, out=out) + + +def acosh(x, out=None): + """Calculates an implementation-dependent approximation to the inverse + hyperbolic cosine for each element `x_i` of the input array `x`.""" + return _apply_element_wise('acosh', x, out=out) + + +def add(x1, x2, out=None): + """Calculates the sum for each element `x1_i` of the input array `x1` with + the respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('add', x1, x2=x2, out=out) + + +def asin(x, out=None): + """Calculates an implementation-dependent approximation of the principal + value of the inverse sine for each element `x_i` of the input array `x`.""" + return _apply_element_wise('asin', x, out=out) + + +def asinh(x, out=None): + """Calculates an implementation-dependent approximation to the inverse + hyperbolic sine for each element `x_i` in the input array `x`.""" + return _apply_element_wise('asinh', x, out=out) + + +def atan(x, out=None): + """Calculates an implementation-dependent approximation of the principal + value of the inverse tangent for each element `x_i` of the input array + `x`.""" + return _apply_element_wise('atan', x, out=out) + + +def atan2(x1, x2, out=None): + """Calculates an implementation-dependent approximation of the inverse + tangent of the quotient `x1/x2`, having domain `[-infinity, +infinity] + \times [-infinity, +infinity]` (where the `\times` notation denotes the set + of ordered pairs of elements `(x1_i, x2_i)`) and codomain `[-pi, +pi]`, + for each pair of elements `(x1_i, x2_i)` of the input arrays `x1` and `x2`, + respectively.""" + return _apply_element_wise("atan2", x1, x2=x2, out=out) + + +def atanh(x, out=None): + """Calculates an implementation-dependent approximation to the inverse + hyperbolic tangent for each element `x_i` of the input array `x`.""" + return _apply_element_wise('atanh', x, out=out) + + +def bitwise_and(x1, x2, out=None): + """Computes the bitwise AND of the underlying binary representation of each + element `x1_i` of the input array `x1` with the respective element `x2_i` + of the input array `x2`.""" + return _apply_element_wise('bitwise_and', x1, x2=x2, out=out) + + +def bitwise_left_shift(x1, x2, out=None): + """Shifts the bits of each element `x1_i` of the input array `x1` to the + left by appending `x2_i` (i.e., the respective element in the input array + `x2`) zeros to the right of `x1_i`.""" + return _apply_element_wise('bitwise_left_shift', x1, x2=x2, out=out) + + +def bitwise_invert(x, out=None): + """Inverts (flips) each bit for each element `x_i` of the input array + `x`.""" + return _apply_element_wise('bitwise_invert', x, out=out) + + +def bitwise_or(x1, x2, out=None): + """Computes the bitwise OR of the underlying binary representation of each + element `x1_i` of the input array `x1` with the respective element `x2_i` + of the input array `x2`.""" + return _apply_element_wise('bitwise_or', x1, x2=x2, out=out) + + +def bitwise_right_shift(x1, x2, out=None): + """Shifts the bits of each element `x1_i` of the input array `x1` to the + right according to the respective element `x2_i` of the input array + `x2`.""" + return _apply_element_wise('bitwise_right_shift', x1, x2=x2, out=out) + + +def bitwise_xor(x1, x2, out=None): + """Computes the bitwise XOR of the underlying binary representation of each + element `x1_i` of the input array `x1` with the respective element `x2_i` + of the input array `x2`.""" + return _apply_element_wise('bitwise_xor', x1, x2=x2, out=out) + + +def ceil(x, out=None): + """Rounds each element `x_i` of the input array `x` to the smallest (i.e., + closest to `-infty`) integer-valued number that is not less than `x_i`.""" + return _apply_element_wise('ceil', x, out=out) + + +def clip(x, out=None, min=None, max=None): + """Clamps each element `x_i` of the input array `x` to the range `[min, + max]`.""" + return _apply_element_wise('clip', x, out=out, min=min, max=max) + + +def conj(x, out=None): + """Returns the complex conjugate for each element `x_i` of the input array + `x`.""" + return _apply_element_wise('conj', x, out=out) + + +def copysign(x1, x2, out=None): + """Composes a floating-point value with the magnitude of `x1_i` and the + sign of `x2_i` for each element of the input array `x1`.""" + return _apply_element_wise('copysign', x1, x2=x2, out=out) + + +def cos(x, out=None): + """Calculates an implementation-dependent approximation to the cosine for + each element `x_i` of the input array `x`.""" + return _apply_element_wise('cos', x, out=out) + + +def cosh(x, out=None): + """Calculates an implementation-dependent approximation to the hyperbolic + cosine for each element `x_i` in the input array `x`.""" + return _apply_element_wise('cosh', x, out=out) + + +def divide(x1, x2, out=None): + """Calculates the division of each element `x1_i` of the input array `x1` + with the respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('divide', x1, x2=x2, out=out) + + +def equal(x1, x2, out=None): + """Computes the truth value of `x1_i == x2_i` for each element `x1_i` of + the input array `x1` with the respective element `x2_i` of the input array + `x2`.""" + return _apply_element_wise('equal', x1, x2=x2, out=out) + + +def exp(x1, out=None): + """Calculates an implementation-dependent approximation to the exponential + function for each element `x_i` of the input array `x` (`e` raised to the + power of `x_i`, where `e` is the base of the natural logarithm).""" + return _apply_element_wise('exp', x1, out=out) + + +def expm1(x1, out=None): + """Calculates an implementation-dependent approximation to `exp(x_i) - 1` + for each element `x_i` of the input array `x`.""" + return _apply_element_wise("expm1", x1, out=out) + + +def floor(x1, out=None): + """Rounds each element `x_i` of the input array `x` to the largest (i.e., + closest to `+infty`) integer-valued number that is not greater than + `x_i`.""" + return _apply_element_wise('floor', x1, out=out) + + +def floor_divide(x1, x2, out=None): + """Calculates the largest integer-valued number that is not greater than + the result of dividing each element `x1_i` of the input array `x1` by the + respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('floor_divide', x1, x2=x2, out=out) + + +def greater(x1, x2, out=None): + """Computes the truth value of `x1_i > x2_i` for each element `x1_i` of the + input array `x1` with the respective element `x2_i` of the input array + `x2`.""" + return _apply_element_wise('greater', x1, x2=x2, out=out) + + +def greater_equal(x1, x2, out=None): + """Computes the truth value of `x1_i >= x2_i` for each element `x1_i` of + the input array `x1` with the respective element `x2_i` of the input array + `x2`.""" + return _apply_element_wise('greater_equal', x1, x2=x2, out=out) + + +def hypot(x1, x2, out=None): + """Computes the square root of the sum of squares for each element `x1_i` + of the input array `x1` with the respective element `x2_i` of the input + array `x2`.""" + return _apply_element_wise('hypot', x1, x2=x2, out=out) + + +def imag(x1, out=None): + """Returns the imaginary part of each element `x_i` of the input array + `x`.""" + return _apply_element_wise('imag', x1, out=out) + + +def isfinite(x1, out=None): + """Tests each element `x_i` of the input array `x` to determine if it is + finite (i.e., not `NaN` and not an infinity).""" + return _apply_element_wise('isfinite', x1, out=out) + + +def isinf(x1, out=None): + """Tests each element `x_i` of the input array `x` to determine if it is a + positive or negative infinity.""" + if x1 == float('Inf') or x1 == -float("Inf"): + return True + return _apply_element_wise('isinf', x1, out=out) + + +def isnan(x1, out=None): + """Tests each element `x_i` of the input array `x` to determine if it is a + `NaN`.""" + if x1 == float('Nan'): + return True + return _apply_element_wise('isnan', x1, out=out) + + +def less(x1, x2, out=None): + """Computes the truth value of `x1_i < x2_i` for each element `x1_i` of the + input array `x1` with the respective element `x2_i` of the input array + `x2`.""" + return _apply_element_wise('less', x1, x2=x2, out=out) + + +def less_equal(x1, x2, out=None): + """Computes the truth value of `x1_i <= x2_i` for each element `x1_i` of + the input array `x1` with the respective element `x2_i` of the input array + `x2`.""" + return _apply_element_wise('less_equal', x1, x2=x2, out=out) + + +def log(x1, out=None): + """Calculates an implementation-dependent approximation to the natural + logarithm for each element `x_i` of the input array `x`.""" + return _apply_element_wise('log', x1, out=out) + + +def log1p(x1, out=None): + """Calculates an implementation-dependent approximation to `ln(1 + x_i)` + for each element `x_i` of the input array `x`. + + For small `x`, the result of this function should be more accurate + than `log(1 + x)`. + """ + return _apply_element_wise("log1p", x1, out=out) + + +def log2(x1, out=None): + """Calculates an implementation-dependent approximation to the base two + logarithm for each element `x_i` of the input array `x`.""" + return _apply_element_wise("log2", x1, out=out) + + +def log10(x1, out=None): + """Calculates an implementation-dependent approximation to the base ten + logarithm for each element `x_i` of the input array `x`.""" + return _apply_element_wise("log10", x1, out=out) + + +def logaddexp(x1, x2, out=None): + """Calculates the logarithm of the sum of exponentiations `log(exp(x1) + + exp(x2))` for each element `x1_i` of the input array `x1` with the + respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('logaddexp', x1, x2=x2, out=out) + + +def logical_and(x1, x2, out=None): + """Computes the logical AND for each element `x1_i` of the input array `x1` + with the respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('logical_and', x1, x2=x2, out=out) + + +def logical_not(x1, out=None): + """Computes the logical NOT for each element `x_i` of the input array + `x`.""" + return _apply_element_wise('logical_not', x1, out=out) + + +def logical_or(x1, x2, out=None): + """Computes the logical OR for each element `x1_i` of the input array `x1` + with the respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('logical_or', x1, x2=x2, out=out) + + +def logical_xor(x1, x2, out=None): + """Computes the logical XOR for each element `x1_i` of the input array `x1` + with the respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('logical_xor', x1, x2=x2, out=out) + + +def maximum(x1, x2, out=None): + """Computes the maximum value for each element `x1_i` of the input array + `x1` relative to the respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('maximum', x1, x2=x2, out=out) + + +def minimum(x1, x2, out=None): + """Calculates an implementation-dependent approximation of the principal + value of the inverse cosine for each element.""" + return _apply_element_wise('minimum', x1, x2=x2, out=out) + + +def multiply(x1, x2, out=None): + """Calculates the product for each element `x1_i` of the input array `x1` + with the respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('multiply', x1, x2=x2, out=out) + + +def negative(x1, out=None): + """Numerically negates each element `x_i` of the input array `x`.""" + return _apply_element_wise('negative', x1, out=out) + + +def nextafter(x1, x2, out=None): + """Returns the next representable floating-point value for each element + `x1_i` of the input array `x1` in the direction of the respective element + `x2_i` of the input array `x2`.""" + return _apply_element_wise('nextafter', x1, x2=x2, out=out) + + +def not_equal(x1, x2, out=None): + """Computes the truth value of `x1_i != x2_i` for each element `x1_i` of + the input array `x1` with the respective element `x2_i` of the input array + `x2`.""" + return _apply_element_wise('not_equal', x1, x2=x2, out=out) + + +def positive(x1, out=None): + """Numerically positive each element `x_i` of the input array `x`.""" + return _apply_element_wise('positive', x1, out=out) + + +def pow(x1, x2, out=None): + """Calculates an implementation-dependent approximation of `x1_i` raised to + the power of `x2_i` for each element `x1_i` of the input array `x1`, where + `x2_i` is the corresponding element in the input array `x2`.""" + return _apply_element_wise('pow', x1, x2=x2, out=out) + + +def real(x1, out=None): + """Returns the real part of each element `x_i` of the input array `x`.""" + return _apply_element_wise('real', x1, out=out) + + +def reciprocal(x1, out=None): + """Returns the reciprocal for each element `x_i` of the input array `x`.""" + return _apply_element_wise('reciprocal', x1, out=out) + + +def remainder(x1, x2, out=None): + """Calculates the remainder of dividing each element `x1_i` of the input + array `x1` by the respective element `x2_i` of the input array `x2`. + + The result has the same sign as the dividend `x1`, and the magnitude + is less than the magnitude of the divisor `x2`. This is often called + the "Euclidean modulo" operation. + """ + return _apply_element_wise('remainder', x1, x2=x2, out=out) + + +def round(x1, out=None): + """Rounds each element `x_i` of the input array `x` to the nearest integer. + + Halfway cases (i.e., numbers with a fractional part of `0.5`) are rounded + to the nearest even integer. + """ + return _apply_element_wise('round', x1, out=out) + + +def sign(x1, out=None): + """Returns an indication of the sign of each element `x_i` of the input + array `x`. + + The returned array has the same shape as `x`. + """ + return _apply_element_wise('sign', x1, out=out) + + +def signbit(x1, out=None): + """Determines whether the sign bit is set for each element `x_i` of the + input array `x`""" + return _apply_element_wise('signbit', x1, out=out) + + +def sin(x1, out=None): + """Calculates an implementation-dependent approximation to the sine for + each element `x_i` of the input array `x`.""" + return _apply_element_wise('sin', x1, out=out) + + +def sinh(x1, out=None): + """Calculates an implementation-dependent approximation to the hyperbolic + sine for each element `x_i` in the input array `x`.""" + return _apply_element_wise('sinh', x1, out=out) + + +def sqrt(x1, out=None): + """Calculates the square root for each element `x_i` of the input array + `x`.""" + return _apply_element_wise('sqrt', x1, out=out) + + +def square(x1, out=None): + """Calculates the square of each element `x_i` (i.e., `x_i * x_i`) of the + input array `x`""" + return _apply_element_wise('square', x1, out=out) + + +def subtract(x1, x2, out=None): + """Calculates the difference for each element `x1_i` of the input array + `x1` with the respective element `x2_i` of the input array `x2`.""" + return _apply_element_wise('subtract', x1, x2=x2, out=out) + + +def tan(x1, out=None): + """Calculates an implementation-dependent approximation to the tangent for + each element `x_i` of the input array `x`.""" + return _apply_element_wise('tan', x1, out=out) + + +def tanh(x1, out=None): + """Calculates an implementation-dependent approximation to the hyperbolic + tangent for each element `x_i` in the input array `x`.""" + return _apply_element_wise('tanh', x1, out=out) + + +def trunc(x1, out=None): + """Rounds each element `x_i` of the input array `x` to the nearest integer + towards zero.""" + return _apply_element_wise('trunc', x1, out=out) diff --git a/odl/core/array_API_support/statistical.py b/odl/core/array_API_support/statistical.py new file mode 100644 index 00000000000..eabedb47a9e --- /dev/null +++ b/odl/core/array_API_support/statistical.py @@ -0,0 +1,122 @@ +# Copyright 2014-2025 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Satistical functions expected by the python array API +Internally, all functions apply a reduction operation on a LinearSpaceElement. + +Args: + x (LinearSpaceElement): LinearSpaceElement on which to apply the reduction. + +Returns: + x (float | array-like): Output of the reduction. + +Notes: + 1) The actual implementation of the reduction is in the LinearSpace of this element. + 2) These functions can return python Numbers or backend-specific array (when calling with keepdims=True for instance), but they will not return odl objects. + +""" + +__all__ = ( + 'cumulative_prod', + 'cumulative_sum', + 'max', + 'mean', + 'min', + 'prod', + 'std', + 'sum', + 'var' +) + +def _apply_reduction(operation: str, x, **kwargs): + """ + Examples + >>> e1 = odl.rn(3).element((1,2,3)) + >>> odl.cumulative_prod(e1) == [1,2,6] + array([ True, True, True], dtype=bool) + >>> odl.cumulative_sum(e1) == [1,3,6] + array([ True, True, True], dtype=bool) + >>> odl.max(e1) == 3 + True + >>> odl.mean(e1) == 2 + True + >>> odl.min(e1) == 1 + True + >>> odl.prod(e1) == 6 + True + >>> odl.std(e1) == np.std([1,2,3]) + True + >>> odl.sum(e1) == 6 + True + >>> odl.var(e1) == np.var([1,2,3]) + True + """ + return x.space._element_reduction(operation=operation, x=x, **kwargs) + +def cumulative_prod(x, axis=None, dtype=None, include_initial=False): + """ + Calculates the cumulative product of elements in the input array x. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('cumulative_prod', x, axis=axis, dtype=dtype, include_initial=include_initial) + +def cumulative_sum(x, axis=None, dtype=None, include_initial=False): + """ + Calculates the cumulative sum of elements in the input array x. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('cumulative_sum', x, axis=axis, dtype=dtype, include_initial=include_initial) + +def max(x, axis=None, keepdims=False): + """ + Calculates the maximum value of the input array x. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('max', x, axis=axis, keepdims=keepdims) + +def mean(x, axis=None, keepdims=False): + """ + Calculates the arithmetic mean of the input array x. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('mean', x, axis=axis, keepdims=keepdims) + +def min(x, axis=None, keepdims=False): + """ + Calculates the minimum value of the input array x. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('min', x, axis=axis, keepdims=keepdims) + +def prod(x, axis=None, dtype=None, keepdims=False): + """ + Calculates the product of input array x elements. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('prod', x, axis=axis, dtype=dtype, keepdims=keepdims) + +def std(x, axis=None, correction=0.0, keepdims=False): + """ + Calculates the standard deviation of the input array x. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('std', x, axis=axis, correction=correction, keepdims=keepdims) + +def sum(x, axis=None, dtype=None, keepdims=False): + """ + Calculates the sum of the input array x. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('sum', x, axis=axis, dtype=dtype, keepdims=keepdims) + +def var(x, axis=None, correction=0.0, keepdims=False): + """ + Calculates the variance of the input array x. + Note: This function might not be doing what you expect. If you want to return an array (np.ndarray, torch.Tensor...), you are in the right place. However, you cannot use it to create a new LinearSpaceSelement. + """ + return _apply_reduction('var', x, axis=axis, correction=correction, keepdims=keepdims) diff --git a/odl/core/array_API_support/utils.py b/odl/core/array_API_support/utils.py new file mode 100644 index 00000000000..49cda859901 --- /dev/null +++ b/odl/core/array_API_support/utils.py @@ -0,0 +1,381 @@ +# Copyright 2014-2025 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Utilities for the compatibility of ODL with the python array API""" + +from types import ModuleType +from dataclasses import dataclass +from typing import Callable, Union + +__all__ = ( + 'ArrayBackend', + 'lookup_array_backend', + 'get_array_and_backend', + 'check_device', + 'can_cast', + ) + + +_registered_array_backends = {} + +# The backends shipped with ODL, with the dependencies needed to enable them. +standard_known_backends = { 'numpy': ['numpy'] + , 'pytorch': ['torch'] } + +@dataclass +class ArrayOperation: + name: str + operation_call: Callable + supports_single_input: bool + supports_two_inputs: bool + supports_out_argument: bool + +@dataclass +class ArrayBackend: + """ + Class to implement the array backend associated to each TensorSpace Implementations. + + Attributes + ---------- + impl : str + The implementation of the backend, e.g 'numpy' + array_namespace : ModuleType + The actual namespace of the backend, e.g np + available_dtypes : dict + A dictionnary mapping a Number/str datatype to the corresponding backend-specific datatype, e.g {float:np.float64, 'float64', np.float64, ...} + array_type : type + The type of the array once implemented by the backend, e.g np.ndarray + array_constructor : Callable + The function the backend uses to create an array, e.g np.asarray + from_dlpack : Callable + Stand-in for the `from_dlpack` method of the Python Array API. We would rather use that directly, + but there are multiple inconsistencies of its behaviour particular in different PyTorch versions, + so we need to wrap it as a workaround. + make_contiguous : Callable + The function the backend uses to make an array contiguous, e.g np.ascontiguousasarray + identifier_of_dtype : Callable + The function used to get a string representation of a backend-specific dtype + available_devices : list[str] + List of devices accepted by the backend + to_cpu : Callable + Function to copy an array to the CPU + to_numpy: Callable + Function to create a Numpy version of an array + + """ + impl: str + array_namespace: ModuleType + available_dtypes: dict[str, object] + array_type: type + array_constructor: Callable + from_dlpack: Callable + make_contiguous: Callable + identifier_of_dtype: Callable + available_devices : list[str] + to_cpu : Callable + to_numpy: Callable + to_device: Callable + def __post_init__(self): + if self.impl in _registered_array_backends: + raise KeyError(f"An array-backend with the identifier {self.impl} is already registered. Every backend needs to have a unique identifier.") + _registered_array_backends[self.impl] = self + self._array_operations = {} + def get_dtype_identifier(self, **kwargs) -> str: + """ + Method for getting a dtype_identifier (str) from an array or a dtype. + This is used to retrieve the dtype of a custom object as a string and pass it to another backend. + The dtype must actually be a dtype object pertaining to the `self` backend. + Strings or Python types are not allowed here. + Use `odl.core.util.dtype_utils._universal_dtype_identifier` for a general conversion from + dtype-ish objects to identifiers. + + Parameters + ---------- + **kwargs : 'array' or 'dtype' + This function inputs either an array OR a dtype + + Returns + ------- + dtype_identifier (str) + + Examples + -------- + >>> backend = odl.lookup_array_backend('numpy') + >>> backend.get_dtype_identifier(array=np.zeros(10)) + 'float64' + >>> backend.get_dtype_identifier(array=np.zeros(10, dtype = 'float32')) + 'float32' + >>> backend.get_dtype_identifier(array=np.zeros(10, float)) + 'float64' + >>> backend.get_dtype_identifier(dtype=np.dtype('float64')) + 'float64' + >>> backend.get_dtype_identifier(dtype=np.zeros(10, dtype = 'float32').dtype) + 'float32' + >>> backend.get_dtype_identifier(dtype=np.dtype(float)) + 'float64' + >>> backend.get_dtype_identifier(dtype=np.dtype(float), array=np.zeros(10, float)) + Traceback (most recent call last): + AssertionError: "array and dtype are mutually exclusive parameters" + >>> backend.get_dtype_identifier(np.dtype(float)) + Traceback (most recent call last): + TypeError: "ArrayBackend.get_dtype_identifier() takes 1 positional argument but 2 were given" + """ + if 'array' in kwargs: + assert 'dtype' not in kwargs, "array and dtype are mutually exclusive parameters" + return self.identifier_of_dtype(kwargs['array'].dtype) + if 'dtype' in kwargs: + assert 'array' not in kwargs, "array and dtype are mutually exclusive parameters" + return self.identifier_of_dtype(kwargs['dtype']) + raise ValueError("Either 'array' or 'dtype' argument must be provided.") + + def _probe_elementwise_operation(self, operation): + """ + Attempt to use a low-level operation in this backend. If successful, the operation is + then registered in the `_array_operations` dict in a suitable manner.""" + fn = getattr(self.array_namespace, operation) + test_inputs = { dtk: self.array_constructor([1,2,3], dtype=dtype) + for dtk, dtype in self.available_dtypes.items() } + test_output = None + supports_single_input = supports_two_inputs = supports_out_argument = False + for dtype, test_input in test_inputs.items(): + try: + test_output = fn(test_input) + supports_single_input = True + except (TypeError, RuntimeError): + pass + try: + test_output = fn(test_input, test_input) + supports_two_inputs = True + except (TypeError, RuntimeError): + pass + try: + if supports_single_input: + fn(test_input, out=test_output) + supports_out_argument = True + elif supports_two_inputs: + fn(test_input, test_input, out=test_output) + supports_out_argument = True + except (TypeError, RuntimeError): + pass + if supports_single_input or supports_two_inputs: + self._array_operations[operation] = ArrayOperation( + name = operation, + operation_call = fn, + supports_single_input = supports_single_input, + supports_two_inputs = supports_two_inputs, + supports_out_argument = supports_out_argument) + return + + def lookup_array_operation(self, operation: str) -> ArrayOperation: + if operation not in self._array_operations: + self._probe_elementwise_operation(operation) + return self._array_operations[operation] + + def lookup_function(self, operation: str) -> Callable: + return self.lookup_array_operation(operation).operation_call + + def __repr__(self): + """ + Implements the __repr__ method used in print. + """ + return f"ArrayBackend(impl={self.impl})" + + def __eq__(self, other): + """ + Implements the `==` operator. + It compares if `other` is also an `ArrayBackend` and if `self` and `other` have the same implementation `impl` + """ + return isinstance(other, ArrayBackend) and self.impl == other.impl + +def lookup_array_backend(impl: str) -> ArrayBackend: + """ + Convenience function for getting an `ArrayBackend` from an `impl` argument. + This is helpful to both ensure that a backend actually exists and to retrieve it. + + Parameters + ---------- + impl : str + backend identifier + + Examples + -------- + >>> lookup_array_backend('numpy') + ArrayBackend(impl=numpy) + >>> lookup_array_backend('something_else') + Traceback (most recent call last): + KeyError: "The implementation something_else is not supported by ODL. Please select a backend in ['numpy']" + >>> lookup_array_backend(72) + Traceback (most recent call last): + AssertionError: f"The impl parameter must be a string, got int" + """ + assert isinstance(impl, str), f"The impl parameter must be a string, got {type(impl)}" + try: + return _registered_array_backends[impl] + except KeyError: + if impl in standard_known_backends: + raise KeyError(f"The implementation ‘{impl}’ is not available here, likely due to a missing package. Try installing {standard_known_backends[impl]} using pip / conda / uv.") + else: + raise KeyError(f"The implementation {impl} is not supported by ODL. Please select a backend in {_registered_array_backends.keys()}") + +def get_array_and_backend(x, must_be_contiguous=False): + """ + Convenience function for getting an `ArrayBackend` from an `array-like` argument. + + Parameters + ---------- + x : Array-Like. + It can be a `np.ndarray`, a `torch.Tensor`, an ODL `Tensor` or a `ProductSpaceElement`. Object to return the `ArrayBackend` and actual underlying array from. + must_be_contiguous : bool + Boolean flag to indicate whether or not to make the array contiguous. + + Returns + ------- + x : actual array + -> unwrapped from the LinearSpaceElement + -> returned as is if it was already an array. + backend : ODL `ArrayBackend` object + + Examples + -------- + >>> array, backend = get_array_and_backend(np.zeros(2)) + >>> array + array([ 0., 0.]) + >>> backend + ArrayBackend(impl=numpy) + >>> array, backend = get_array_and_backend([1,2,3]) + Traceback (most recent call last): + ValueError: f"The registered array backends are ['numpy']. The argument provided is a list, check that the backend you want to use is supported and has been correctly instanciated." + """ + from odl.core.space.base_tensors import Tensor + if isinstance(x, Tensor): + return x.asarray(must_be_contiguous=must_be_contiguous), x.space.array_backend + + from odl.core.space.pspace import ProductSpaceElement + if isinstance(x, ProductSpaceElement): + return get_array_and_backend(x.asarray(), must_be_contiguous=must_be_contiguous) + + for backend in _registered_array_backends.values(): + backend : ArrayBackend + if isinstance(x, backend.array_type) or x in backend.available_dtypes.values(): + if must_be_contiguous: + return backend.make_contiguous(x), backend + else: + return x, backend + + else: + raise ValueError(f"The registered array backends are {list(_registered_array_backends.keys())}. The argument provided is a {type(x)}, check that the backend you want to use is supported and has been correctly instanciated.") + +def is_array_supported(x): + for backend in _registered_array_backends.values(): + backend : ArrayBackend + if isinstance(x, backend.array_type): + return True + return False + +def check_device(impl:str, device: Union[str, object]) -> str: + """ + Checks the device argument. + This checks that the device requested is available and that its compatible with the backend requested. + + If successful, returns the standard string identifier of the device. + + Parameters + ---------- + impl : str + backend identifier + device : str or backend-specific device-object + Device identifier + + Examples + -------- + >>> odl.check_device('numpy', 'cpu') + 'cpu' + >>> odl.check_device('numpy', 'anything_but_cpu') + Traceback (most recent call last): + ... + ValueError: For numpy Backend, only devices ['cpu'] are present, but anything_but_cpu was provided. + """ + backend = lookup_array_backend(impl) + for known_device in backend.available_devices: + if device == known_device: + return device + elif str(device) == known_device: + # This works at least for PyTorch, but it is not clear + # how general this is. + return str(device) + + raise ValueError(f"For {impl} Backend, only devices {backend.available_devices} are present, but {device} was provided.") + +def _dtype_info(array_namespace, dtype): + """ + Return min, max, and kind ('bool', 'int', 'uint', 'float') for a given dtype. + Works across Array API backends. + """ + name = str(dtype) + if "bool" in name: + return 0, 1, "bool" + if "int" in name and not "uint" in name: + iinfo = array_namespace.iinfo(dtype) + return iinfo.min, iinfo.max, "int" + if "uint" in name: + iinfo = array_namespace.iinfo(dtype) + return iinfo.min, iinfo.max, "uint" + if "float" in name: + finfo = array_namespace.finfo(dtype) + # floats have no exact min/max, but finfo.min/max are usable for range checks + return finfo.min, finfo.max, "float" + raise ValueError(f"Unsupported dtype: {dtype}") + +def can_cast(array_namespace, from_dtype, to_dtype, casting="safe"): + """ + NumPy-like can_cast for Python Array API backends. + Supports 'safe', 'same_kind', and 'unsafe' casting. + """ + # Convert arrays to dtypes + if hasattr(from_dtype, "dtype"): + from_dtype = from_dtype.dtype + if hasattr(to_dtype, "dtype"): + to_dtype = to_dtype.dtype + + # Same type always allowed + if from_dtype == to_dtype: + return True + + # Unsafe allows anything + if casting == "unsafe": + return True + + # Determine type categories + f_min, f_max, f_kind = _dtype_info(array_namespace, from_dtype) + t_min, t_max, t_kind = _dtype_info(array_namespace, to_dtype) + + # Safe casting: all values of from_dtype must fit in to_dtype + if casting == "safe": + if f_kind == "bool": + return True # bool -> anything is safe + if t_kind == "bool": + return False # non-bool -> bool is unsafe + if f_kind in ("int", "uint") and t_kind in ("int", "uint", "float"): + return f_min >= t_min and f_max <= t_max + if f_kind == "float" and t_kind == "float": + return array_namespace.finfo(to_dtype).precision >= array_namespace.finfo(from_dtype).precision + return False + + # Same-kind casting: allow within same category or safe upcast to float + if casting == "same_kind": + if f_kind == t_kind: + return True + # int/uint to float is allowed if range fits + if f_kind in ("int", "uint") and t_kind == "float": + return f_min >= t_min and f_max <= t_max + return False + + raise ValueError(f"Unsupported casting rule: {casting}") + +if __name__ =='__main__': + check_device('numpy', 'cpu') diff --git a/odl/diagnostics/__init__.py b/odl/core/diagnostics/__init__.py similarity index 100% rename from odl/diagnostics/__init__.py rename to odl/core/diagnostics/__init__.py diff --git a/odl/diagnostics/examples.py b/odl/core/diagnostics/examples.py similarity index 95% rename from odl/diagnostics/examples.py rename to odl/core/diagnostics/examples.py index 0ed3e82fcdc..705777b27f1 100644 --- a/odl/diagnostics/examples.py +++ b/odl/core/diagnostics/examples.py @@ -42,5 +42,5 @@ def samples(*sets): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/diagnostics/operator.py b/odl/core/diagnostics/operator.py similarity index 98% rename from odl/diagnostics/operator.py rename to odl/core/diagnostics/operator.py index 1134708fb6e..eee4687d17d 100644 --- a/odl/diagnostics/operator.py +++ b/odl/core/diagnostics/operator.py @@ -14,9 +14,9 @@ import numpy as np -from odl.diagnostics.examples import samples -from odl.operator import power_method_opnorm -from odl.util.testutils import fail_counter +from odl.core.diagnostics.examples import samples +from odl.core.operator import power_method_opnorm +from odl.core.util.testutils import fail_counter __all__ = ('OperatorTest',) diff --git a/odl/diagnostics/space.py b/odl/core/diagnostics/space.py similarity index 99% rename from odl/diagnostics/space.py rename to odl/core/diagnostics/space.py index d391fbb5956..b1c65c17f9a 100644 --- a/odl/diagnostics/space.py +++ b/odl/core/diagnostics/space.py @@ -13,9 +13,9 @@ from builtins import object from copy import copy, deepcopy -from odl.diagnostics.examples import samples -from odl.set import Field -from odl.util.testutils import fail_counter +from odl.core.diagnostics.examples import samples +from odl.core.set import Field +from odl.core.util.testutils import fail_counter __all__ = ('SpaceTest',) diff --git a/odl/discr/__init__.py b/odl/core/discr/__init__.py similarity index 100% rename from odl/discr/__init__.py rename to odl/core/discr/__init__.py diff --git a/odl/discr/diff_ops.py b/odl/core/discr/diff_ops.py similarity index 84% rename from odl/discr/diff_ops.py rename to odl/core/discr/diff_ops.py index e7ba9d7f168..67879a8a914 100644 --- a/odl/discr/diff_ops.py +++ b/odl/core/discr/diff_ops.py @@ -11,11 +11,13 @@ from __future__ import absolute_import, division, print_function import numpy as np +from math import prod -from odl.discr.discr_space import DiscretizedSpace -from odl.operator.tensor_ops import PointwiseTensorFieldOperator -from odl.space import ProductSpace -from odl.util import indent, signature_string, writable_array +from odl.core.discr.discr_space import DiscretizedSpace +from odl.core.operator.tensor_ops import PointwiseTensorFieldOperator +from odl.core.space import ProductSpace +from odl.core.util import indent, signature_string, writable_array +from odl.core.array_API_support import asarray, get_array_and_backend __all__ = ('PartialDerivative', 'Gradient', 'Divergence', 'Laplacian') @@ -290,8 +292,8 @@ def __init__(self, domain=None, range=None, method='forward', >>> adj_g = grad.adjoint(g) >>> adj_g uniform_discr([ 0., 0.], [ 2., 5.], (2, 5)).element( - [[ 0., -2., -5., -8., -11.], - [ 0., -5., -14., -23., -32.]] + [[ -0., -2., -5., -8., -11.], + [ -0., -5., -14., -23., -32.]] ) >>> g.inner(grad_f) / f.inner(adj_g) 1.0 @@ -344,20 +346,25 @@ def __init__(self, domain=None, range=None, method='forward', def _call(self, x, out=None): """Calculate the spatial gradient of ``x``.""" - if out is None: - out = self.range.element() - x_arr = x.asarray() ndim = self.domain.ndim dx = self.domain.cell_sides - for axis in range(ndim): - with writable_array(out[axis]) as out_arr: - finite_diff(x_arr, axis=axis, dx=dx[axis], method=self.method, + if out is None: + return self.range.element([ + finite_diff(x_arr, axis=axis, dx=dx[axis], method=self.method, pad_mode=self.pad_mode, pad_const=self.pad_const, - out=out_arr) - return out + ) + for axis in range(ndim)]) + else: + for axis in range(ndim): + with writable_array(out[axis]) as out_arr: + finite_diff(x_arr, axis=axis, dx=dx[axis], method=self.method, + pad_mode=self.pad_mode, + pad_const=self.pad_const, + out=out_arr) + return out def derivative(self, point=None): """Return the derivative operator. @@ -554,25 +561,38 @@ def __init__(self, domain=None, range=None, method='forward', def _call(self, x, out=None): """Calculate the divergence of ``x``.""" - if out is None: - out = self.range.element() ndim = self.range.ndim dx = self.range.cell_sides - tmp = np.empty(out.shape, out.dtype, order=out.space.default_order) - with writable_array(out) as out_arr: - for axis in range(ndim): - finite_diff(x[axis], axis=axis, dx=dx[axis], - method=self.method, pad_mode=self.pad_mode, - pad_const=self.pad_const, - out=tmp) - if axis == 0: - out_arr[:] = tmp - else: - out_arr += tmp + backend = self.range.array_backend - return out + def directional_derivative(axis, out=None): + return finite_diff( x[axis], axis=axis, dx=dx[axis] + , method=self.method, pad_mode=self.pad_mode + , pad_const=self.pad_const + , out=out ) + + if out is None: + result = directional_derivative(0) + for axis in range(1,len(x)): + result += directional_derivative(axis) + + return self.range.element(result) + + else: + assert(backend.impl != 'pytorch') + + tmp = self.range.element().asarray() + with writable_array(out) as out_arr: + for axis in range(ndim): + directional_derivative(axis, out=tmp) + if axis == 0: + out_arr[:] = tmp + else: + out_arr += tmp + + return out def derivative(self, point=None): """Return the derivative operator. @@ -714,9 +734,12 @@ def _call(self, x, out=None): else: out.set_zero() - x_arr = x.asarray() out_arr = out.asarray() - tmp = np.empty(out.shape, out.dtype, order=out.space.default_order) + + x_arr, backend = get_array_and_backend(x) + tmp = backend.array_namespace.empty( + shape=out.shape, dtype=out.dtype,device=x.device + ) ndim = self.domain.ndim dx = self.domain.cell_sides @@ -785,106 +808,10 @@ def __str__(self): return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str)) -def finite_diff(f, axis, dx=1.0, method='forward', out=None, +def _finite_diff_numpy(f_arr, axis, dx=1.0, method='forward', out=None, pad_mode='constant', pad_const=0): - """Calculate the partial derivative of ``f`` along a given ``axis``. - - In the interior of the domain of f, the partial derivative is computed - using first-order accurate forward or backward difference or - second-order accurate central differences. - - With padding the same method and thus accuracy is used on endpoints as - in the interior i.e. forward and backward differences use first-order - accuracy on edges while central differences use second-order accuracy at - edges. - - Without padding one-sided forward or backward differences are used at - the boundaries. The accuracy at the endpoints can then also be - triggered by the edge order. - - The returned array has the same shape as the input array ``f``. - - Per default forward difference with dx=1 and no padding is used. - - Parameters - ---------- - f : `array-like` - An N-dimensional array. - axis : int - The axis along which the partial derivative is evaluated. - dx : float, optional - Scalar specifying the distance between sampling points along ``axis``. - method : {'central', 'forward', 'backward'}, optional - Finite difference method which is used in the interior of the domain - of ``f``. - out : `numpy.ndarray`, optional - An N-dimensional array to which the output is written. Has to have - the same shape as the input array ``f``. - pad_mode : string, optional - The padding mode to use outside the domain. - - ``'constant'``: Fill with ``pad_const``. - - ``'symmetric'``: Reflect at the boundaries, not doubling the - outmost values. - - ``'periodic'``: Fill in values from the other side, keeping - the order. - - ``'order0'``: Extend constantly with the outmost values - (ensures continuity). - - ``'order1'``: Extend with constant slope (ensures continuity of - the first derivative). This requires at least 2 values along - each axis where padding is applied. - - ``'order2'``: Extend with second order accuracy (ensures continuity - of the second derivative). This requires at least 3 values along - each axis where padding is applied. - - pad_const : float, optional - For ``pad_mode == 'constant'``, ``f`` assumes ``pad_const`` for - indices outside the domain of ``f`` - - Returns - ------- - out : `numpy.ndarray` - N-dimensional array of the same shape as ``f``. If ``out`` was - provided, the returned object is a reference to it. - - Examples - -------- - >>> f = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) - - >>> finite_diff(f, axis=0) - array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., -9.]) - - Without arguments the above defaults to: - - >>> finite_diff(f, axis=0, dx=1.0, method='forward', pad_mode='constant') - array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., -9.]) - - Parameters can be changed one by one: - - >>> finite_diff(f, axis=0, dx=0.5) - array([ 2., 2., 2., 2., 2., 2., 2., 2., 2., -18.]) - >>> finite_diff(f, axis=0, pad_mode='order1') - array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) - - Central differences and different edge orders: - - >>> finite_diff(0.5 * f ** 2, axis=0, method='central', pad_mode='order1') - array([ 0.5, 1. , 2. , 3. , 4. , 5. , 6. , 7. , 8. , 8.5]) - >>> finite_diff(0.5 * f ** 2, axis=0, method='central', pad_mode='order2') - array([-0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) + """ NumPy-specific version of `finite_diff`. """ - In-place evaluation: - - >>> out = f.copy() - >>> out is finite_diff(f, axis=0, out=out) - True - """ - f_arr = np.asarray(f) ndim = f_arr.ndim if f_arr.shape[axis] < 2: @@ -908,43 +835,48 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, if pad_mode not in _SUPPORTED_PAD_MODES: raise ValueError('`pad_mode` {} not understood' ''.format(pad_mode)) - - pad_const = f.dtype.type(pad_const) + + f_arr, backend = get_array_and_backend(f_arr) + namespace = backend.array_namespace + device = f_arr.device + pad_const = backend.array_constructor([pad_const], dtype=f_arr.dtype, device=device) if out is None: - out = np.empty_like(f_arr) + out = namespace.empty_like(f_arr, dtype=f_arr.dtype, device=device) else: - if out.shape != f.shape: + if out.shape != f_arr.shape: raise ValueError('expected output shape {}, got {}' - ''.format(f.shape, out.shape)) + ''.format(f_arr.shape, out.shape)) + orig_shape = f_arr.shape - if f_arr.shape[axis] < 2 and pad_mode == 'order1': + if orig_shape[axis] < 2 and pad_mode == 'order1': raise ValueError("size of array to small to use 'order1', needs at " "least 2 elements along axis {}.".format(axis)) - if f_arr.shape[axis] < 3 and pad_mode == 'order2': + if orig_shape[axis] < 3 and pad_mode == 'order2': raise ValueError("size of array to small to use 'order2', needs at " "least 3 elements along axis {}.".format(axis)) - # create slice objects: initially all are [:, :, ..., :] + # Swap axes so that the axis of interest is first. In NumPy (but not PyTorch), + # this is a O(1) operation and is done to simplify the code below. + out, out_in = namespace.swapaxes(out, 0, axis), out + f_arr = namespace.swapaxes(f_arr, 0, axis) - # Swap axes so that the axis of interest is first. This is a O(1) - # operation and is done to simplify the code below. - out, out_in = np.swapaxes(out, 0, axis), out - f_arr = np.swapaxes(f_arr, 0, axis) + def fd_subtraction(a, b): + namespace.subtract(a, b, out=out[1:-1]) # Interior of the domain of f if method == 'central': # 1D equivalent: out[1:-1] = (f[2:] - f[:-2])/2.0 - np.subtract(f_arr[2:], f_arr[:-2], out=out[1:-1]) + fd_subtraction(f_arr[2:], f_arr[:-2]) out[1:-1] /= 2.0 elif method == 'forward': # 1D equivalent: out[1:-1] = (f[2:] - f[1:-1]) - np.subtract(f_arr[2:], f_arr[1:-1], out=out[1:-1]) + fd_subtraction(f_arr[2:], f_arr[1:-1]) elif method == 'backward': # 1D equivalent: out[1:-1] = (f[1:-1] - f[:-2]) - np.subtract(f_arr[1:-1], f_arr[:-2], out=out[1:-1]) + fd_subtraction(f_arr[1:-1], f_arr[:-2]) # Boundaries if pad_mode == 'constant': @@ -1129,7 +1061,204 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, return out_in +def _finite_diff_pytorch(f_arr, axis, dx=1.0, method='forward', + pad_mode='constant', pad_const=0): + """ PyTorch-specific version of `finite_diff`. Notice that this has no output argument. """ + + f_arr, _ = get_array_and_backend(f_arr) + import torch + + ndim = f_arr.ndim + + if f_arr.shape[axis] < 2: + raise ValueError('in axis {}: at least two elements required, got {}' + ''.format(axis, f_arr.shape[axis])) + + if axis < 0: + axis += ndim + if not (0 <= axis < ndim): + raise IndexError('`axis` {} outside the valid range 0 ... {}' + ''.format(axis, ndim - 1)) + + dx, dx_in = float(dx), dx + if dx <= 0 or not np.isfinite(dx): + raise ValueError("`dx` must be positive, got {}".format(dx_in)) + + method, method_in = str(method).lower(), method + if method not in _SUPPORTED_DIFF_METHODS: + raise ValueError('`method` {} was not understood'.format(method_in)) + + if pad_mode not in _SUPPORTED_PAD_MODES: + raise ValueError('`pad_mode` {} not understood' + ''.format(pad_mode)) + + orig_shape = f_arr.shape + + if orig_shape[axis] < 2 and pad_mode == 'order1': + raise ValueError("size of array to small to use 'order1', needs at " + "least 2 elements along axis {}.".format(axis)) + if orig_shape[axis] < 3 and pad_mode == 'order2': + raise ValueError("size of array to small to use 'order2', needs at " + "least 3 elements along axis {}.".format(axis)) + + # Reshape (in O(1)), so the axis of interest is the pænultimate, all previous + # axes are flattened into the batch dimension, and all subsequent axes flattened + # into the final dimension. This allows a batched 2D convolution of final size 1 + # to perform the differentiation in only the axis of interest. + f_arr = f_arr.reshape([ prod(orig_shape[:axis]) + , 1 + , orig_shape[axis] + , prod(orig_shape[axis+1:]) + ]) + + dtype = f_arr.dtype + + # Kernel for convolution that expresses the finite-difference operator on, at least, + # the interior of the domain of f + def as_kernel(mat): + return torch.tensor(mat, dtype=dtype, device=f_arr.device) + + if method == 'central': + fd_kernel = as_kernel([[[[-1],[0],[1]]]]) / (2*dx) + elif method == 'forward': + fd_kernel = as_kernel([[[[0],[-1],[1]]]]) / dx + elif method == 'backward': + fd_kernel = as_kernel([[[[-1],[1],[0]]]]) / dx + + if pad_mode == 'constant': + if pad_const==0: + result = torch.conv2d(f_arr, fd_kernel, padding='same') + else: + padding_arr = torch.ones_like(f_arr[:,:,0:1,:]) * pad_const + result = torch.conv2d( + torch.cat([padding_arr, f_arr, padding_arr], dim=-2), fd_kernel, padding='valid' + ) + + else: + raise NotImplementedError(f'{pad_mode=} not implemented for PyTorch') + + return result.reshape(orig_shape) + + +def finite_diff(f, axis, dx=1.0, method='forward', out=None, + pad_mode='constant', pad_const=0): + """Calculate the partial derivative of ``f`` along a given ``axis``. + + In the interior of the domain of f, the partial derivative is computed + using first-order accurate forward or backward difference or + second-order accurate central differences. + + With padding the same method and thus accuracy is used on endpoints as + in the interior i.e. forward and backward differences use first-order + accuracy on edges while central differences use second-order accuracy at + edges. + + Without padding one-sided forward or backward differences are used at + the boundaries. The accuracy at the endpoints can then also be + triggered by the edge order. + + The returned array has the same shape as the input array ``f``. + + Per default forward difference with dx=1 and no padding is used. + + Parameters + ---------- + f : `array-like` + An N-dimensional array. + axis : int + The axis along which the partial derivative is evaluated. + dx : float, optional + Scalar specifying the distance between sampling points along ``axis``. + method : {'central', 'forward', 'backward'}, optional + Finite difference method which is used in the interior of the domain + of ``f``. + out : `numpy.ndarray`, optional + An N-dimensional array to which the output is written. Has to have + the same shape as the input array ``f``. + pad_mode : string, optional + The padding mode to use outside the domain. + + ``'constant'``: Fill with ``pad_const``. + + ``'symmetric'``: Reflect at the boundaries, not doubling the + outmost values. + + ``'periodic'``: Fill in values from the other side, keeping + the order. + + ``'order0'``: Extend constantly with the outmost values + (ensures continuity). + + ``'order1'``: Extend with constant slope (ensures continuity of + the first derivative). This requires at least 2 values along + each axis where padding is applied. + + ``'order2'``: Extend with second order accuracy (ensures continuity + of the second derivative). This requires at least 3 values along + each axis where padding is applied. + + pad_const : float, optional + For ``pad_mode == 'constant'``, ``f`` assumes ``pad_const`` for + indices outside the domain of ``f`` + + Returns + ------- + out : `numpy.ndarray` + N-dimensional array of the same shape as ``f``. If ``out`` was + provided, the returned object is a reference to it. + + Examples + -------- + >>> f = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) + + >>> finite_diff(f, axis=0) + array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., -9.]) + + Without arguments the above defaults to: + + >>> finite_diff(f, axis=0, dx=1.0, method='forward', pad_mode='constant') + array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., -9.]) + + Parameters can be changed one by one: + + >>> finite_diff(f, axis=0, dx=0.5) + array([ 2., 2., 2., 2., 2., 2., 2., 2., 2., -18.]) + >>> finite_diff(f, axis=0, pad_mode='order1') + array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) + + Central differences and different edge orders: + + >>> finite_diff(0.5 * f ** 2, axis=0, method='central', pad_mode='order1') + array([ 0.5, 1. , 2. , 3. , 4. , 5. , 6. , 7. , 8. , 8.5]) + >>> finite_diff(0.5 * f ** 2, axis=0, method='central', pad_mode='order2') + array([-0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) + + In-place evaluation: + + >>> out = f.copy() + >>> out is finite_diff(f, axis=0, out=out) + True + """ + _, backend = get_array_and_backend(f) + if pad_mode == 'constant' and backend.impl=='pytorch': + if out is None: + return _finite_diff_pytorch( + f, axis, dx=dx, method=method, pad_mode=pad_mode, pad_const=pad_const + ) + assert isinstance(out, backend.array_type), f"{type(out)=}" + if out.shape != f.shape: + raise ValueError('expected output shape {}, got {}' + ''.format(f.shape, out.shape)) + out[:] = _finite_diff_pytorch( + f, axis, dx=dx, method=method, pad_mode=pad_mode, pad_const=pad_const + ) + return out + else: + return _finite_diff_numpy( + f, axis, dx=dx, method=method, out=out, pad_mode=pad_mode, pad_const=pad_const) + + if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/discr/discr_ops.py b/odl/core/discr/discr_ops.py similarity index 97% rename from odl/discr/discr_ops.py rename to odl/core/discr/discr_ops.py index f6c41373a40..a80add2cb16 100644 --- a/odl/discr/discr_ops.py +++ b/odl/core/discr/discr_ops.py @@ -12,16 +12,16 @@ import numpy as np -from odl.discr.discr_space import DiscretizedSpace -from odl.discr.discr_utils import ( +from odl.core.discr.discr_space import DiscretizedSpace +from odl.core.discr.discr_utils import ( _normalize_interp, per_axis_interpolator, point_collocation) -from odl.discr.partition import uniform_partition -from odl.operator import Operator -from odl.space import tensor_space -from odl.util import ( +from odl.core.discr.partition import uniform_partition +from odl.core.operator import Operator +from odl.core.space import tensor_space +from odl.core.util import ( normalized_scalar_param_list, resize_array, safe_int_conv, writable_array) -from odl.util.numerics import _SUPPORTED_RESIZE_PAD_MODES -from odl.util.utility import nullcontext +from odl.core.util.numerics import _SUPPORTED_RESIZE_PAD_MODES +from odl.core.util.utility import nullcontext __all__ = ('Resampling', 'ResizingOperator') @@ -343,8 +343,8 @@ def __init__(self, domain, range=None, ran_shp=None, **kwargs): self.__pad_mode = pad_mode # Store constant in a way that ensures safe casting (one-element array) - self.__pad_const = np.array(kwargs.pop('pad_const', 0), - dtype=ran.dtype) + self.__pad_const = ran.array_backend.array_constructor(kwargs.pop('pad_const', 0), + dtype=ran.dtype, device=ran.device) # padding mode 'constant' with `pad_const != 0` is not linear linear = (self.pad_mode != 'constant' or self.pad_const == 0.0 or self.pad_const == 0) @@ -552,5 +552,5 @@ def _resize_discr(discr, newshp, offset, discr_kwargs): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/discr/discr_space.py b/odl/core/discr/discr_space.py similarity index 74% rename from odl/discr/discr_space.py rename to odl/core/discr/discr_space.py index 033fb5e0c95..538b0875b18 100644 --- a/odl/discr/discr_space.py +++ b/odl/core/discr/discr_space.py @@ -12,18 +12,20 @@ from numbers import Integral +from contextlib import contextmanager + import numpy as np -from odl.discr.discr_utils import point_collocation, sampling_function -from odl.discr.partition import ( +from odl.core.discr.discr_utils import point_collocation, sampling_function +from odl.core.discr.partition import ( RectPartition, uniform_partition, uniform_partition_fromintv) -from odl.set import IntervalProd, RealNumbers -from odl.set.space import SupportedNumOperationParadigms, NumOperationParadigmSupport -from odl.space import ProductSpace -from odl.space.base_tensors import Tensor, TensorSpace -from odl.space.entry_points import tensor_space_impl -from odl.space.weighting import ConstWeighting -from odl.util import ( +from odl.core.set import IntervalProd, RealNumbers +from odl.core.set.space import LinearSpace, SupportedNumOperationParadigms, NumOperationParadigmSupport +from odl.core.space import ProductSpace +from odl.core.space.base_tensors import Tensor, TensorSpace, default_dtype +from odl.core.space.entry_points import tensor_space_impl +from odl.core.space.weightings.weighting import ConstWeighting +from odl.core.util import ( apply_on_boundary, array_str, dtype_str, is_floating_dtype, is_numeric_dtype, normalized_nodes_on_bdry, normalized_scalar_param_list, repr_string, safe_int_conv, signature_string_parts) @@ -78,7 +80,19 @@ def __init__(self, partition, tspace, **kwargs): self.__tspace = tspace self.__partition = partition - super(DiscretizedSpace, self).__init__(tspace.shape, tspace.dtype) + self._init_dtype(tspace.dtype) + + self._init_shape(tspace.shape, tspace.dtype) + + self._init_device(tspace.device) + + self.__use_in_place_ops = kwargs.pop('use_in_place_ops', True) + + self._init_weighting() + + field = self._init_field() + + LinearSpace.__init__(self, field) # Set axis labels axis_labels = kwargs.pop('axis_labels', None) @@ -213,36 +227,16 @@ def meshgrid(self): """All sampling points in the partition as a sparse meshgrid.""" return self.partition.meshgrid - def points(self, order='C'): + def points(self): """All sampling points in the partition. - Parameters - ---------- - order : {'C', 'F'} - Axis ordering in the resulting point array. - Returns ------- points : `numpy.ndarray` The shape of the array is ``size x ndim``, i.e. the points are stored as rows. """ - return self.partition.points(order) - - @property - def default_order(self): - """Default storage order for new elements in this space. - - This is equal to the default order of `tspace`. - """ - return self.tspace.default_order - - def default_dtype(self, field=None): - """Default data type for new elements in this space. - - This is equal to the default data type of `tspace`. - """ - return self.tspace.default_dtype(field) + return self.partition.points() def available_dtypes(self): """Available data types for new elements in this space. @@ -290,7 +284,7 @@ def is_uniformly_weighted(self): # --- Element creation - def element(self, inp=None, order=None, **kwargs): + def element(self, inp=None, **kwargs): """Create an element from ``inp`` or from scratch. Parameters @@ -300,15 +294,12 @@ def element(self, inp=None, order=None, **kwargs): are available: - ``None``: an empty element is created with no guarantee of - its state (memory allocation only). The new element will - use ``order`` as storage order if provided, otherwise - `default_order`. + its state (memory allocation only). - array-like: an element wrapping a `tensor` is created, where a copy is avoided whenever possible. This usually - requires correct `shape`, `dtype` and `impl` if applicable, - and if ``order`` is provided, also contiguousness in that - ordering. See the ``element`` method of `tspace` for more + requires correct `shape`, `dtype` and `impl` if applicable. + See the ``element`` method of `tspace` for more information. If any of these conditions is not met, a copy is made. @@ -316,10 +307,6 @@ def element(self, inp=None, order=None, **kwargs): - callable: a new element is created by sampling the function using `point_collocation`. - order : {None, 'C', 'F'}, optional - Storage order of the returned element. For ``'C'`` and ``'F'``, - contiguous memory in the respective ordering is enforced. - The default ``None`` enforces no contiguousness. kwargs : Additional arguments passed on to `point_collocation` when called on ``inp``, in the form @@ -361,24 +348,26 @@ def element(self, inp=None, order=None, **kwargs): >>> space.element(f, c=0.5) uniform_discr(-1.0, 1.0, 4).element([ 0.5 , 0.5 , 0.5 , 0.75]) """ + if 'order' in kwargs: + raise RuntimeError('The use of the order argument is now deprecated, please remove it. All arrays are C contiguous.') if inp is None: - return self.element_type(self, self.tspace.element(order=order)) - elif inp in self and order is None: + return self.element_type(self, self.tspace.element()) + elif inp in self: return inp - elif inp in self.tspace and order is None: + elif inp in self.tspace: return self.element_type(self, inp) elif callable(inp): func = sampling_function( - inp, self.domain, out_dtype=self.dtype, + inp, self.domain, out_dtype=self.dtype_identifier, impl=self.impl, device=self.device ) sampled = point_collocation(func, self.meshgrid, **kwargs) return self.element_type( - self, self.tspace.element(sampled, order=order) + self, self.tspace.element(sampled) ) else: # Sequence-type input return self.element_type( - self, self.tspace.element(inp, order=order) + self, self.tspace.element(inp) ) def zero(self): @@ -397,6 +386,18 @@ def _astype(self, dtype): return type(self)( self.partition, tspace, axis_labels=self.axis_labels) + def _to_device(self, device:str): + """Internal helper for `to_device`.""" + tspace = self.tspace.to_device(device) + return type(self)( + self.partition, tspace, axis_labels=self.axis_labels) + + def _to_impl(self, impl:str): + """Internal helper for `to_impl`.""" + tspace = self.tspace.to_impl(impl) + return type(self)( + self.partition, tspace, axis_labels=self.axis_labels) + # --- Slicing # TODO: add `byaxis`_out when discretized tensor-valued functions are @@ -602,7 +603,7 @@ def __repr__(self): posmod = [array_str, array_str, ''] default_dtype_s = dtype_str( - self.tspace.default_dtype(RealNumbers()) + default_dtype(self.tspace.array_backend, RealNumbers()) ) dtype_s = dtype_str(self.dtype) @@ -632,7 +633,13 @@ def __repr__(self): optargs.append(('weighting', self.weighting.const, None)) optmod = [''] * len(optargs) - if self.dtype in (float, complex, int, bool): + + # The following applies only if a shorthand Python-type version of + # the dtype is shown. In that case, no quotation marks should be put + # around it. This is not enabled because `dtype_str` currently always + # produces a conventional string (e.g. `'complex128'` instead of + # `complex`. + if False and self.dtype in (float, complex, int, bool): optmod[2] = '!s' inner_parts = signature_string_parts( @@ -645,7 +652,7 @@ def __repr__(self): posargs = [self.partition, self.tspace] inner_parts = signature_string_parts(posargs, []) return repr_string(ctor, inner_parts, allow_mixed_seps=False) - + def __str__(self): """Return ``str(self)``.""" return repr(self) @@ -708,7 +715,7 @@ def copy(self): """Create an identical (deep) copy of this element.""" return self.space.element(self.tensor.copy()) - def asarray(self, out=None): + def asarray(self, out=None, must_be_contiguous=False): """Extract the data of this array as a numpy array. Parameters @@ -717,7 +724,17 @@ def asarray(self, out=None): Array in which the result should be written in-place. Has to be contiguous and of the correct dtype. """ - return self.tensor.asarray(out=out) + return self.tensor.asarray(out=out, must_be_contiguous=must_be_contiguous) + + @contextmanager + def writable_array(self, must_be_contiguous: bool =False): + arr = None + try: + arr = self.tensor.asarray(must_be_contiguous=must_be_contiguous) + yield arr + finally: + if arr is not None: + self.tensor.data[:] = arr def astype(self, dtype): """Return a copy of this element with new ``dtype``. @@ -825,7 +842,10 @@ def real(self, newreal): newreal : array-like or scalar Values to be assigned to the real part of this element. """ - self.tensor.real = newreal + if isinstance(newreal, DiscretizedSpaceElement): + self.tensor.real = newreal.tensor + else: + self.tensor.real = newreal @property def imag(self): @@ -881,7 +901,10 @@ def imag(self, newimag): """ if self.space.is_real: raise ValueError('cannot set imaginary part in real spaces') - self.tensor.imag = newimag + if isinstance(newimag, DiscretizedSpaceElement): + self.tensor.imag = newimag.tensor + else: + self.tensor.imag = newimag def conj(self, out=None): """Complex conjugate of this element. @@ -956,395 +979,6 @@ def __setitem__(self, indices, values): values = values.tensor self.tensor.__setitem__(indices, values) - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - """Interface to Numpy's ufunc machinery. - - This method is called by Numpy version 1.13 and higher as a single - point for the ufunc dispatch logic. An object implementing - ``__array_ufunc__`` takes over control when a `numpy.ufunc` is - called on it, allowing it to use custom implementations and - output types. - - This includes handling of in-place arithmetic like - ``npy_array += custom_obj``. In this case, the custom object's - ``__array_ufunc__`` takes precedence over the baseline - `numpy.ndarray` implementation. It will be called with - ``npy_array`` as ``out`` argument, which ensures that the - returned object is a Numpy array. For this to work properly, - ``__array_ufunc__`` has to accept Numpy arrays as ``out`` arguments. - - See the `corresponding NEP`_ and the `interface documentation`_ - for further details. See also the `general documentation on - Numpy ufuncs`_. - - .. note:: - When using operations that alter the shape (like ``reduce``), - or the data type (can be any of the methods), - the resulting array is wrapped in a space of the same - type as ``self.space``, propagating all essential properties - like weighting, exponent etc. as closely as possible. - - Parameters - ---------- - ufunc : `numpy.ufunc` - Ufunc that should be called on ``self``. - method : str - Method on ``ufunc`` that should be called on ``self``. - Possible values: - - ``'__call__'``, ``'accumulate'``, ``'at'``, ``'outer'``, - ``'reduce'`` - - input1, ..., inputN : - Positional arguments to ``ufunc.method``. - kwargs : - Keyword arguments to ``ufunc.method``. - - Returns - ------- - ufunc_result : `DiscretizedSpaceElement`, `numpy.ndarray` or tuple - Result of the ufunc evaluation. If no ``out`` keyword argument - was given, the result is a `DiscretizedSpaceElement` or a tuple - of such, depending on the number of outputs of ``ufunc``. - If ``out`` was provided, the returned object or sequence members - refer(s) to ``out``. - - Examples - -------- - We apply `numpy.add` to elements of a one-dimensional space: - - >>> space = odl.uniform_discr(0, 1, 3) - >>> x = space.element([1, 2, 3]) - >>> y = space.element([-1, -2, -3]) - >>> x.__array_ufunc__(np.add, '__call__', x, y) - uniform_discr(0.0, 1.0, 3).element([ 0., 0., 0.]) - >>> np.add(x, y) # same mechanism for Numpy >= 1.13 - uniform_discr(0.0, 1.0, 3).element([ 0., 0., 0.]) - - As ``out``, a `DiscretizedSpaceElement` can be provided as well as a - `Tensor` of appropriate type, or its underlying data container - type (wrapped in a sequence): - - >>> out = space.element() - >>> res = x.__array_ufunc__(np.add, '__call__', x, y, out=(out,)) - >>> out - uniform_discr(0.0, 1.0, 3).element([ 0., 0., 0.]) - >>> res is out - True - >>> out_tens = odl.rn(3).element() - >>> res = x.__array_ufunc__(np.add, '__call__', x, y, out=(out_tens,)) - >>> out_tens - rn(3).element([ 0., 0., 0.]) - >>> res is out_tens - True - >>> out_arr = np.empty(3) - >>> res = x.__array_ufunc__(np.add, '__call__', x, y, out=(out_arr,)) - >>> out_arr - array([ 0., 0., 0.]) - >>> res is out_arr - True - - With multiple dimensions: - - >>> space_2d = odl.uniform_discr([0, 0], [1, 2], (2, 3)) - >>> x = y = space_2d.one() - >>> x.__array_ufunc__(np.add, '__call__', x, y) - uniform_discr([ 0., 0.], [ 1., 2.], (2, 3)).element( - [[ 2., 2., 2.], - [ 2., 2., 2.]] - ) - - The ``ufunc.accumulate`` method retains the original space: - - >>> x = space.element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'accumulate', x) - uniform_discr(0.0, 1.0, 3).element([ 1., 3., 6.]) - >>> np.add.accumulate(x) # same mechanism for Numpy >= 1.13 - uniform_discr(0.0, 1.0, 3).element([ 1., 3., 6.]) - - For multi-dimensional space elements, an optional ``axis`` parameter - can be provided (default is 0): - - >>> z = space_2d.one() - >>> z.__array_ufunc__(np.add, 'accumulate', z, axis=1) - uniform_discr([ 0., 0.], [ 1., 2.], (2, 3)).element( - [[ 1., 2., 3.], - [ 1., 2., 3.]] - ) - - The method also takes a ``dtype`` parameter: - - >>> z.__array_ufunc__(np.add, 'accumulate', z, dtype=complex) - uniform_discr([ 0., 0.], [ 1., 2.], (2, 3), dtype=complex).element( - [[ 1.+0.j, 1.+0.j, 1.+0.j], - [ 2.+0.j, 2.+0.j, 2.+0.j]] - ) - - The ``ufunc.at`` method operates in-place. Here we add the second - operand ``[5, 10]`` to ``x`` at indices ``[0, 2]``: - - >>> x = space.element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'at', x, [0, 2], [5, 10]) - >>> x - uniform_discr(0.0, 1.0, 3).element([ 6., 2., 13.]) - - For outer-product-type operations, i.e., operations where the result - shape is the sum of the individual shapes, the ``ufunc.outer`` - method can be used: - - >>> space1 = odl.uniform_discr(0, 1, 2) - >>> space2 = odl.uniform_discr(0, 2, 3) - >>> x = space1.element([0, 3]) - >>> y = space2.element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'outer', x, y) - uniform_discr([ 0., 0.], [ 1., 2.], (2, 3)).element( - [[ 1., 2., 3.], - [ 4., 5., 6.]] - ) - >>> y.__array_ufunc__(np.add, 'outer', y, x) - uniform_discr([ 0., 0.], [ 2., 1.], (3, 2)).element( - [[ 1., 4.], - [ 2., 5.], - [ 3., 6.]] - ) - - Using ``ufunc.reduce`` in 1D produces a scalar: - - >>> x = space.element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'reduce', x) - 6.0 - - In multiple dimensions, ``axis`` can be provided for reduction over - selected axes: - - >>> z = space_2d.element([[1, 2, 3], - ... [4, 5, 6]]) - >>> z.__array_ufunc__(np.add, 'reduce', z, axis=1) - uniform_discr(0.0, 1.0, 2).element([ 6., 15.]) - - References - ---------- - .. _corresponding NEP: - https://docs.scipy.org/doc/numpy/neps/ufunc-overrides.html - - .. _interface documentation: - https://docs.scipy.org/doc/numpy/reference/arrays.classes.html\ -#numpy.class.__array_ufunc__ - - .. _general documentation on Numpy ufuncs: - https://docs.scipy.org/doc/numpy/reference/ufuncs.html - - .. _reduceat documentation: - https://docs.scipy.org/doc/numpy/reference/generated/\ - """ - # --- Process `out` --- # - - # Unwrap out if provided. The output parameters are all wrapped - # in one tuple, even if there is only one. - out_tuple = kwargs.pop('out', ()) - - # Check number of `out` args, depending on `method` - if method == '__call__' and len(out_tuple) not in (0, ufunc.nout): - raise ValueError( - "need 0 or {} `out` arguments for `method='__call__'`, " - 'got {}'.format(ufunc.nout, len(out_tuple))) - elif method != '__call__' and len(out_tuple) not in (0, 1): - raise ValueError( - "need 0 or 1 `out` arguments for `method={!r}`, " - 'got {}'.format(method, len(out_tuple))) - - # We allow our own element type, tensors and their data containers - # as `out` - valid_out_types = (type(self), - type(self.tensor), - type(self.tensor.data)) - if not all(isinstance(o, valid_out_types) or o is None - for o in out_tuple): - return NotImplemented - - # Assign to `out` or `out1` and `out2`, respectively (using the - # `tensor` attribute if available) - out = out1 = out2 = None - if len(out_tuple) == 1: - out = getattr(out_tuple[0], 'tensor', out_tuple[0]) - elif len(out_tuple) == 2: - out1 = getattr(out_tuple[0], 'tensor', out_tuple[0]) - out2 = getattr(out_tuple[1], 'tensor', out_tuple[1]) - - # --- Process `inputs` --- # - - # Pull out the `tensor` attributes from `DiscretizedSpaceElement` - # instances - # since we want to pass them to `self.tensor.__array_ufunc__` - input_tensors = tuple( - elem.tensor if isinstance(elem, type(self)) else elem - for elem in inputs) - - # --- Get some parameters for later --- # - - # Need to filter for `keepdims` in case `method='reduce'` since it's - # invalid (happening below) - keepdims = kwargs.pop('keepdims', False) - - # Determine list of remaining axes from `axis` for `'reduce'` - axis = kwargs.get('axis', None) - if axis is None: - reduced_axes = list(range(1, self.ndim)) - else: - try: - iter(axis) - except TypeError: - axis = (int(axis),) - - reduced_axes = [i for i in range(self.ndim) if i not in axis] - - # --- Evaluate ufunc --- # - - if method == '__call__': - if ufunc.nout == 1: - kwargs['out'] = (out,) - res_tens = self.tensor.__array_ufunc__( - ufunc, '__call__', *input_tensors, **kwargs) - - if out is None: - # Wrap result tensor in appropriate DiscretizedSpace space. - res_space = DiscretizedSpace( - self.space.partition, - res_tens.space, - axis_labels=self.space.axis_labels - ) - result = res_space.element(res_tens) - else: - result = out_tuple[0] - - return result - - elif ufunc.nout == 2: - kwargs['out'] = (out1, out2) - res1_tens, res2_tens = self.tensor.__array_ufunc__( - ufunc, '__call__', *input_tensors, **kwargs) - - if out1 is None: - # Wrap as for nout = 1 - res_space = DiscretizedSpace( - self.space.partition, - res1_tens.space, - axis_labels=self.space.axis_labels - ) - result1 = res_space.element(res1_tens) - else: - result1 = out_tuple[0] - - if out2 is None: - # Wrap as for nout = 1 - res_space = DiscretizedSpace( - self.space.partition, - res2_tens.space, - axis_labels=self.space.axis_labels - ) - result2 = res_space.element(res2_tens) - else: - result2 = out_tuple[1] - - return result1, result2 - - else: - raise NotImplementedError('nout = {} not supported' - ''.format(ufunc.nout)) - - elif method == 'reduce' and keepdims: - raise ValueError( - '`keepdims=True` cannot be used in `reduce` since there is ' - 'no unique way to determine a function domain in collapsed ' - 'axes') - - elif method == 'reduceat': - # Makes no sense since there is no way to determine in which - # space the result should live, except in special cases when - # axes are being completely collapsed or don't change size. - raise ValueError('`reduceat` not supported') - - elif ( - method == 'outer' - and not all(isinstance(inp, type(self)) for inp in inputs) - ): - raise TypeError( - "inputs must be of type {} for `method='outer'`, " - 'got types {}' - ''.format(type(self), tuple(type(inp) for inp in inputs)) - ) - - else: # method != '__call__', and otherwise valid - - if method != 'at': - # No kwargs allowed for 'at' - kwargs['out'] = (out,) - - res_tens = self.tensor.__array_ufunc__( - ufunc, method, *input_tensors, **kwargs) - - # Shortcut for scalar or no return value - if np.isscalar(res_tens) or res_tens is None: - # The first occurs for `reduce` with all axes, - # the second for in-place stuff (`at` currently) - return res_tens - - if out is None: - # Wrap in appropriate DiscretizedSpace space depending - # on `method` - if method == 'accumulate': - res_space = DiscretizedSpace( - self.space.partition, - res_tens.space, - axis_labels=self.space.axis_labels - ) - result = res_space.element(res_tens) - - elif method == 'outer': - # Concatenate partitions and axis_labels, - # and determine `tspace` from the result tensor - inp1, inp2 = inputs - part = inp1.space.partition.append(inp2.space.partition) - labels1 = [lbl + ' (1)' for lbl in inp1.space.axis_labels] - labels2 = [lbl + ' (2)' for lbl in inp2.space.axis_labels] - labels = labels1 + labels2 - - if all(isinstance(inp.space.weighting, ConstWeighting) - for inp in inputs): - # For constant weighting, use the product of the - # two weighting constants. The result tensor space - # cannot know about the "correct" way to combine the - # two constants, so we need to do it manually here. - weighting = (inp1.space.weighting.const * - inp2.space.weighting.const) - tspace = type(res_tens.space)( - res_tens.shape, res_tens.dtype, - exponent=res_tens.space.exponent, - weighting=weighting) - else: - # Otherwise `TensorSpace` knows how to handle this - tspace = res_tens.space - - res_space = DiscretizedSpace( - part, tspace, axis_labels=labels - ) - result = res_space.element(res_tens) - - elif method == 'reduce': - # Index space by axis using `reduced_axes` - res_space = self.space.byaxis_in[reduced_axes].astype( - res_tens.dtype) - result = res_space.element(res_tens) - - else: - raise RuntimeError('bad `method`') - - else: - # `out` may be `out_tuple[0].tensor`, but we want to return - # the original one - result = out_tuple[0] - - return result def show(self, title=None, method='', coords=None, indices=None, force_show=False, fig=None, **kwargs): @@ -1435,9 +1069,9 @@ def show(self, title=None, method='', coords=None, indices=None, See Also -------- - odl.util.graphics.show_discrete_data : Underlying implementation + odl.core.util.graphics.show_discrete_data : Underlying implementation """ - from odl.util.graphics import show_discrete_data + from odl.core.util.graphics import show_discrete_data if 'interp' not in kwargs: kwargs['interp'] = 'linear' @@ -1569,7 +1203,7 @@ def uniform_discr_frompartition(partition, dtype=None, impl='numpy', **kwargs): uniform_discr : implicit uniform Lp discretization uniform_discr_fromspace : uniform Lp discretization from an existing function space - odl.discr.partition.uniform_partition : + odl.core.discr.partition.uniform_partition : partition of the function domain """ if not isinstance(partition, RectPartition): @@ -1578,12 +1212,12 @@ def uniform_discr_frompartition(partition, dtype=None, impl='numpy', **kwargs): if not partition.is_uniform: raise ValueError('`partition` is not uniform') - if dtype is not None: - dtype = np.dtype(dtype) + # if dtype is not None: + # dtype = np.dtype(dtype) tspace_type = tensor_space_impl(impl) if dtype is None: - dtype = tspace_type.default_dtype() + dtype = default_dtype(impl) weighting = kwargs.pop('weighting', None) exponent = kwargs.pop('exponent', 2.0) @@ -1593,8 +1227,9 @@ def uniform_discr_frompartition(partition, dtype=None, impl='numpy', **kwargs): else: weighting = partition.cell_volume + device = kwargs.pop('device', 'cpu') tspace = tspace_type(partition.shape, dtype, exponent=exponent, - weighting=weighting) + weighting=weighting, device=device) return DiscretizedSpace(partition, tspace, **kwargs) @@ -1635,7 +1270,7 @@ def uniform_discr_fromintv(intv_prod, shape, dtype=None, impl='numpy', uniform partition of a function domain """ if dtype is None: - dtype = tensor_space_impl(str(impl).lower()).default_dtype() + dtype = default_dtype(impl) nodes_on_bdry = kwargs.pop('nodes_on_bdry', False) partition = uniform_partition_fromintv(intv_prod, shape, nodes_on_bdry) @@ -1707,7 +1342,7 @@ def uniform_discr(min_pt, max_pt, shape, dtype=None, impl='numpy', **kwargs): >>> space = uniform_discr([0, 0], [1, 1], (10, 10), dtype=complex) >>> space - uniform_discr([ 0., 0.], [ 1., 1.], (10, 10), dtype=complex) + uniform_discr([ 0., 0.], [ 1., 1.], (10, 10), dtype='complex128') >>> space.is_complex True >>> space.real_space # Get real counterpart @@ -1797,7 +1432,7 @@ def uniform_discr_fromdiscr(discr, min_pt=None, max_pt=None, See Also -------- uniform_discr : implicit uniform Lp discretization - odl.discr.partition.uniform_partition : + odl.core.discr.partition.uniform_partition : underlying domain partitioning scheme Examples @@ -1958,5 +1593,5 @@ def scaling_func(x): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/discr/discr_utils.py b/odl/core/discr/discr_utils.py similarity index 82% rename from odl/discr/discr_utils.py rename to odl/core/discr/discr_utils.py index 2db6cd9ccdb..b9feb059918 100644 --- a/odl/discr/discr_utils.py +++ b/odl/core/discr/discr_utils.py @@ -19,14 +19,20 @@ import sys from builtins import object from functools import partial -from itertools import product -from warnings import warn +from itertools import product + +from typing import Callable +from odl.core.set.domain import IntervalProd import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.array_API_support import asarray, lookup_array_backend, ArrayBackend, get_array_and_backend +from odl.core.array_API_support.utils import is_array_supported + +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.util import ( +from odl.core.util.dtype_utils import _universal_dtype_identifier, is_floating_dtype, real_dtype, is_int_dtype +from odl.core.util import ( dtype_repr, is_real_dtype, is_string, is_valid_input_array, is_valid_input_meshgrid, out_shape_from_array, out_shape_from_meshgrid, writable_array) @@ -73,9 +79,9 @@ def point_collocation(func, points, out=None, **kwargs): -------- Sample a 1D function: - >>> from odl.discr.grid import sparse_meshgrid + >>> from odl.core.discr.grid import sparse_meshgrid >>> domain = odl.IntervalProd(0, 5) - >>> func = sampling_function(lambda x: x ** 2, domain) + >>> func = sampling_function(lambda x: x ** 2, domain, out_dtype=float) >>> mesh = sparse_meshgrid([1, 2, 3]) >>> point_collocation(func, mesh) array([ 1., 4., 9.]) @@ -95,7 +101,7 @@ def point_collocation(func, points, out=None, **kwargs): >>> xs = [1, 2] >>> ys = [3, 4, 5] >>> mesh = sparse_meshgrid(xs, ys) - >>> func = sampling_function(lambda x: x[0] - x[1], domain) + >>> func = sampling_function(lambda x: x[0] - x[1], domain, out_dtype=float) >>> point_collocation(func, mesh) array([[-2., -3., -4.], [-1., -2., -3.]]) @@ -105,7 +111,7 @@ def point_collocation(func, points, out=None, **kwargs): >>> def f(x, c=0): ... return x[0] + c - >>> func = sampling_function(f, domain) + >>> func = sampling_function(f, domain, out_dtype=float) >>> point_collocation(func, mesh) # uses default c=0 array([[ 1., 1., 1.], [ 2., 2., 2.]]) @@ -126,33 +132,18 @@ def point_collocation(func, points, out=None, **kwargs): >>> # For a function with several output components, we must specify the >>> # shape explicitly in the `out_dtype` parameter >>> func1 = sampling_function( - ... vec_valued, domain, out_dtype=(float, (3,)) + ... vec_valued, domain, out_dtype=float ... ) >>> point_collocation(func1, mesh) - array([[[ 0., 0.], - [ 1., 1.]], - - [[ 0., 0.], - [ 0., 0.]], - - [[ 4., 5.], - [ 5., 6.]]]) + [array([[ 0., 0.], + [ 1., 1.]]), array([[ 0., 0.], + [ 0., 0.]]), array([[ 4., 5.], + [ 5., 6.]])] >>> list_of_funcs = [ # equivalent to `vec_valued` ... lambda x: x[0] - 1, ... 0, # constants are allowed ... lambda x: x[0] + x[1] ... ] - >>> # For an array of functions, the output shape can be inferred - >>> func2 = sampling_function(list_of_funcs, domain) - >>> point_collocation(func2, mesh) - array([[[ 0., 0.], - [ 1., 1.]], - - [[ 0., 0.], - [ 0., 0.]], - - [[ 4., 5.], - [ 5., 6.]]]) Notes ----- @@ -163,7 +154,7 @@ def point_collocation(func, points, out=None, **kwargs): See Also -------- make_func_for_sampling : wrap a function - odl.discr.grid.RectGrid.meshgrid + odl.core.discr.grid.RectGrid.meshgrid numpy.meshgrid References @@ -225,7 +216,12 @@ def _check_interp_input(x, f): x_is_scalar = False x_type = 'meshgrid' else: - x = np.asarray(x) + ### Parsing the input + if isinstance(x, (int,float,complex, list, tuple)): + x = np.asarray(x) + else: + x, _ = get_array_and_backend(x) + if f.ndim == 1 and x.shape == (): x_is_scalar = True x = x.reshape((1, 1)) @@ -285,12 +281,12 @@ def nearest_interpolator(f, coord_vecs): >>> part = odl.uniform_partition(0, 2, 5) >>> part.coord_vectors # grid points (array([ 0.2, 0.6, 1. , 1.4, 1.8]),) - >>> f = [1, 2, 3, 4, 5] + >>> f = odl.tensor_space(5, dtype=int).element([1, 2, 3, 4, 5]) >>> interpolator = nearest_interpolator(f, part.coord_vectors) >>> interpolator(0.3) # closest to 0.2 -> value 1 1 >>> interpolator([0.6, 1.3, 1.9]) # closest to [0.6, 1.4, 1.8] - array([2, 4, 5]) + array([2, 4, 5], dtype=int32) In 2 dimensions, we can either use a (transposed) list of points or a meshgrid: @@ -309,23 +305,13 @@ def nearest_interpolator(f, coord_vecs): ... [0.0, 3.0]]).T # 3 points at once >>> interpolator(x) array([ 6., 4., 3.]) - >>> from odl.discr.grid import sparse_meshgrid + >>> from odl.core.discr.grid import sparse_meshgrid >>> mesh = sparse_meshgrid([0.0, 0.4, 1.0], [1.5, 3.5]) >>> interpolator(mesh) # 3x2 grid of points array([[ 2., 3.], [ 2., 3.], [ 6., 7.]]) - With nearest neighbor interpolation, we can also use non-scalar data - types like strings: - - >>> part = odl.uniform_partition(0, 3, 6) - >>> part.coord_vectors # grid points - (array([ 0.25, 0.75, 1.25, 1.75, 2.25, 2.75]),) - >>> f = ['s', 't', 'r', 'i', 'n', 'g'] - >>> interpolator = nearest_interpolator(f, part.coord_vectors) - >>> print(interpolator(0.9)) - t See Also -------- @@ -342,8 +328,8 @@ def nearest_interpolator(f, coord_vecs): arithmetic operations on the values, in contrast to other interpolation methods. """ - f = np.asarray(f) - + # f = np.asarray(f) + f, backend = get_array_and_backend(f) # TODO(kohr-h): pass reasonable options on to the interpolator def nearest_interp(x, out=None): """Interpolating function with vectorization.""" @@ -385,7 +371,7 @@ def linear_interpolator(f, coord_vecs): >>> part = odl.uniform_partition(0, 2, 5) >>> part.coord_vectors # grid points (array([ 0.2, 0.6, 1. , 1.4, 1.8]),) - >>> f = [1.0, 2.0, 3.0, 4.0, 5.0] + >>> f = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> interpolator = linear_interpolator(f, part.coord_vectors) >>> interpolator(0.3) # 0.75 * 1 + 0.25 * 2 = 1.25 1.25 @@ -413,14 +399,14 @@ def linear_interpolator(f, coord_vecs): ... [0.0, 3.0]]).T # 3 points at once >>> interpolator(x) array([ 4.1 , 1.8 , 1.45]) - >>> from odl.discr.grid import sparse_meshgrid + >>> from odl.core.discr.grid import sparse_meshgrid >>> mesh = sparse_meshgrid([0.0, 0.5, 1.0], [1.5, 3.5]) >>> interpolator(mesh) # 3x2 grid of points array([[ 0.85, 1.65], [ 3.7 , 5.3 ], [ 2.85, 3.65]]) """ - f = np.asarray(f) + f = asarray(f) # TODO(kohr-h): pass reasonable options on to the interpolator def linear_interp(x, out=None): @@ -480,14 +466,14 @@ def per_axis_interpolator(f, coord_vecs, interp): ... [0.0, 3.0]]).T # 3 points at once >>> interpolator(x) array([ 4. , 2. , 1.5]) - >>> from odl.discr.grid import sparse_meshgrid + >>> from odl.core.discr.grid import sparse_meshgrid >>> mesh = sparse_meshgrid([0.0, 0.5, 1.0], [1.5, 3.5]) >>> interpolator(mesh) # 3x2 grid of points array([[ 1. , 1.5], [ 4. , 5. ], [ 3. , 3.5]]) """ - f = np.asarray(f) + f = asarray(f) interp = _normalize_interp(interp, f.ndim) @@ -530,7 +516,7 @@ def __init__(self, coord_vecs, values, input_type): input_type : {'array', 'meshgrid'} Type of expected input values in ``__call__``. """ - values = np.asarray(values) + values, backend = get_array_and_backend(values) typ_ = str(input_type).lower() if typ_ not in ('array', 'meshgrid'): raise ValueError('`input_type` ({}) not understood' @@ -554,6 +540,10 @@ def __init__(self, coord_vecs, values, input_type): self.values = values self.input_type = input_type + self.backend = backend + self.namespace = backend.array_namespace + self.device = values.device + def __call__(self, x, out=None): """Do the interpolation. @@ -574,9 +564,9 @@ def __call__(self, x, out=None): """ if self.input_type == 'meshgrid': # Given a meshgrid, the evaluation will be on a ragged array. - x = np.asarray(x, dtype=object) + x = [get_array_and_backend(x_)[0] for x_ in x] else: - x = np.asarray(x) + x = get_array_and_backend(x)[0] ndim = len(self.coord_vecs) scalar_out = False @@ -597,9 +587,8 @@ def __call__(self, x, out=None): out_shape = out_shape_from_meshgrid(x) if out is not None: - if not isinstance(out, np.ndarray): - raise TypeError('`out` {!r} not a `numpy.ndarray` ' - 'instance'.format(out)) + if not isinstance(out, self.backend.array_type): + raise TypeError(f'The provided out argument is not an expected {type(self.backend.array_type)} but a {type(out)}') if out.shape != out_shape: raise ValueError('output shape {} not equal to expected ' 'shape {}'.format(out.shape, out_shape)) @@ -627,21 +616,26 @@ def _find_indices(self, x): # iterate through dimensions for xi, cvec in zip(x, self.coord_vecs): - try: - xi = np.asarray(xi).astype(self.values.dtype, casting='safe') - except TypeError: - warn("Unable to infer accurate dtype for" - +" interpolation coefficients, defaulting to `float`.") - xi = np.asarray(xi, dtype=float) - - idcs = np.searchsorted(cvec, xi) - 1 - + # try: + if is_floating_dtype(self.values.dtype): + dtype = real_dtype(self.values.dtype, backend=self.backend) + elif is_int_dtype(self.values.dtype): + dtype = real_dtype(float, backend=self.backend) + else: + raise ValueError(f'Values can only be integers or float, not {type(self.values)}') + xi = self.backend.array_constructor(xi, dtype = dtype, device=self.device) + cvec = self.backend.array_constructor(cvec, dtype = dtype, device=self.device) + idcs = self.namespace.searchsorted(cvec, xi) - 1 idcs[idcs < 0] = 0 - idcs[idcs > cvec.size - 2] = cvec.size - 2 + idcs[idcs > len(cvec) - 2] = len(cvec) - 2 index_vecs.append(idcs) - norm_distances.append((xi - cvec[idcs]) / + try: + norm_distances.append((xi - cvec[idcs]) / (cvec[idcs + 1] - cvec[idcs])) + except Exception as e: + print(f"{type(xi)=}, {type(cvec)=}") + raise e return index_vecs, norm_distances @@ -679,8 +673,9 @@ def __init__(self, coord_vecs, values, input_type): def _evaluate(self, indices, norm_distances, out=None): """Evaluate nearest interpolation.""" idx_res = [] - for i, yi in zip(indices, norm_distances): - idx_res.append(np.where(yi < .5, i, i + 1)) + + for i, yi in zip(indices, norm_distances): + idx_res.append(self.namespace.where(yi < .5, i, i + 1)) idx_res = tuple(idx_res) if out is not None: out[:] = self.values[idx_res] @@ -689,7 +684,7 @@ def _evaluate(self, indices, norm_distances, out=None): return self.values[idx_res] -def _compute_nearest_weights_edge(idcs, ndist): +def _compute_nearest_weights_edge(idcs, ndist, backend): """Helper for nearest interpolation mimicing the linear case.""" # Get out-of-bounds indices from the norm_distances. Negative # means "too low", larger than or equal to 1 means "too high" @@ -698,13 +693,13 @@ def _compute_nearest_weights_edge(idcs, ndist): # For "too low" nodes, the lower neighbor gets weight zero; # "too high" gets 1. - w_lo = np.where(ndist < 0.5, 1.0, 0.0) + w_lo = backend.array_namespace.where(ndist < 0.5, 1.0, 0.0) w_lo[lo] = 0 w_lo[hi] = 1 # For "too high" nodes, the upper neighbor gets weight zero; # "too low" gets 1. - w_hi = np.where(ndist < 0.5, 0.0, 1.0) + w_hi = backend.array_namespace.where(ndist < 0.5, 0.0, 1.0) w_hi[lo] = 1 w_hi[hi] = 0 @@ -717,15 +712,20 @@ def _compute_nearest_weights_edge(idcs, ndist): return w_lo, w_hi, edge -def _compute_linear_weights_edge(idcs, ndist): +def _compute_linear_weights_edge(idcs, ndist, backend): """Helper for linear interpolation.""" - ndist = np.asarray(ndist) + assert(isinstance(ndist, backend.array_type)) # Get out-of-bounds indices from the norm_distances. Negative # means "too low", larger than or equal to 1 means "too high" - lo = np.where(ndist < 0) - hi = np.where(ndist > 1) - + if backend.impl == 'numpy': + lo = backend.array_namespace.where(ndist < 0, ndist, 0).nonzero() + hi = backend.array_namespace.where(ndist > 1, ndist, 0).nonzero() + elif backend.impl == 'pytorch': + lo = backend.array_namespace.where(ndist < 0, ndist, 0).nonzero(as_tuple=True) + hi = backend.array_namespace.where(ndist > 1, ndist, 0).nonzero(as_tuple=True) + else: + raise NotImplementedError # For "too low" nodes, the lower neighbor gets weight zero; # "too high" gets 2 - yi (since yi >= 1) w_lo = (1 - ndist) @@ -734,7 +734,7 @@ def _compute_linear_weights_edge(idcs, ndist): # For "too high" nodes, the upper neighbor gets weight zero; # "too low" gets 1 + yi (since yi < 0) - w_hi = np.copy(ndist) + w_hi = backend.array_constructor(ndist, copy=True) w_hi[lo] += 1 w_hi[hi] = 0 @@ -747,16 +747,16 @@ def _compute_linear_weights_edge(idcs, ndist): return w_lo, w_hi, edge -def _create_weight_edge_lists(indices, norm_distances, interp): +def _create_weight_edge_lists(indices, norm_distances, interp, backend): # Pre-calculate indices and weights (per axis) low_weights = [] high_weights = [] edge_indices = [] for i, (idcs, yi, s) in enumerate(zip(indices, norm_distances, interp)): if s == 'nearest': - w_lo, w_hi, edge = _compute_nearest_weights_edge(idcs, yi) + w_lo, w_hi, edge = _compute_nearest_weights_edge(idcs, yi, backend=backend) elif s == 'linear': - w_lo, w_hi, edge = _compute_linear_weights_edge(idcs, yi) + w_lo, w_hi, edge = _compute_linear_weights_edge(idcs, yi, backend=backend) else: raise ValueError('invalid `interp` {}'.format(interp)) @@ -802,19 +802,23 @@ def _evaluate(self, indices, norm_distances, out=None): if out is None: out_shape = out_shape_from_meshgrid(norm_distances) out_dtype = self.values.dtype - out = np.zeros(out_shape, dtype=out_dtype) + out = self.namespace.zeros( + out_shape, dtype=out_dtype, device=self.device + ) else: out[:] = 0.0 # Weights and indices (per axis) low_weights, high_weights, edge_indices = _create_weight_edge_lists( - indices, norm_distances, self.interp) + indices, norm_distances, self.interp, backend=self.backend) # Iterate over all possible combinations of [i, i+1] for each # axis, resulting in a loop of length 2**ndim for lo_hi, edge in zip(product(*([['l', 'h']] * len(indices))), product(*edge_indices)): - weight = np.array([1.0], dtype=self.values.dtype) + weight = self.backend.array_constructor( + [1.0], dtype=self.values.dtype, device=self.device + ) # TODO(kohr-h): determine best summation order from array strides for lh, w_lo, w_hi in zip(lo_hi, low_weights, high_weights): @@ -823,12 +827,18 @@ def _evaluate(self, indices, norm_distances, out=None): # (n, 1, 1, ...) -> (n, m, 1, ...) -> ... # Hence, it is faster to build up the weight array instead # of doing full-size operations from the beginning. + # Emilien : This array-API compatibility is horribly slow ( sending the individual floats to the gpu while iterating is a hack around the inhomogeneous dimensions returned by _create_weight_edge_lists) if lh == 'l': - weight = weight * w_lo + weight = weight * self.backend.array_constructor( + w_lo, device=self.device) else: - weight = weight * w_hi - out += np.asarray(self.values[edge]) * weight[vslice] - return np.array(out, copy=AVOID_UNNECESSARY_COPY, ndmin=1) + weight = weight * self.backend.array_constructor( + w_hi, device=self.device) + out += self.backend.array_constructor(self.values[edge], device=self.device) * weight[vslice] + # return np.array(out, copy=AVOID_UNNECESSARY_COPY, ndmin=1) + return self.backend.array_constructor( + out, copy=AVOID_UNNECESSARY_COPY, device=self.device + ) class _LinearInterpolator(_PerAxisInterpolator): @@ -939,10 +949,10 @@ def _func_out_type(func): return has_out, out_optional -def _broadcast_nested_list(arr_lists, element_shape, ndim): +def _broadcast_nested_list(arr_lists, element_shape, ndim, backend: ArrayBackend): """ A generalisation of `np.broadcast_to`, applied to an arbitrarily deep list (or tuple) eventually containing arrays or scalars. """ - if isinstance(arr_lists, np.ndarray) or np.isscalar(arr_lists): + if isinstance(arr_lists, backend.array_type) or np.isscalar(arr_lists): if ndim == 1: # As usual, 1d is tedious to deal with. This # code deals with extra dimensions in result @@ -952,13 +962,74 @@ def _broadcast_nested_list(arr_lists, element_shape, ndim): shp = getattr(arr_lists, 'shape', ()) if shp and shp[0] == 1: arr_lists = arr_lists.reshape(arr_lists.shape[1:]) - return np.broadcast_to(arr_lists, element_shape) + return backend.array_namespace.broadcast_to(arr_lists, element_shape) else: - return [_broadcast_nested_list(row, element_shape, ndim) + return [_broadcast_nested_list(row, element_shape, ndim, backend=backend) for row in arr_lists] +def _send_nested_list_to_backend( + arr_lists, backend : ArrayBackend, device, dtype + ): + if backend.impl == 'numpy': + return arr_lists + + if isinstance(arr_lists, np.ndarray) or np.isscalar(arr_lists): + return backend.array_constructor(arr_lists, device=device, dtype=dtype) + + elif isinstance(arr_lists, (tuple,list)): + return [_send_nested_list_to_backend(arr, backend, device, dtype) for arr in arr_lists] + + else: + raise TypeError(f'Type of input {type(arr_lists)} not supported.') + +def sampling_function( + func : Callable | list | tuple, + domain : IntervalProd, + out_dtype : str = None, + impl: str ='numpy', + device: str ='cpu' + ): + def _infer_dtype(out_dtype : str | None): + if out_dtype is None: + out_dtype = 'float64' + else: + assert is_floating_dtype(out_dtype) + return out_dtype + + def _sanitise_callable(func: Callable) -> Callable: + # Get default implementations if necessary + has_out, out_optional = _func_out_type(func) + + if has_out: + raise NotImplementedError('Currently, not implemented for out-of-place functions') + + return func + + def _sanitise_input_function(func: Callable): + ''' + This function aims at unpacking the input function `func`. + The former API expects a callable or array-like (of callables) + The new API checks + ''' + if isinstance(func, Callable): + return _sanitise_callable(func) + elif isinstance(func, (list, tuple)): + raise NotImplementedError('The sampling function cannot be instantiated with a list-like of callables.') + else: + raise NotImplementedError('The function to sample must be either a Callable or an array-like (list, tuple) of callables.') + + ### We begin by sanitising the inputs: + # 1) the dtype + out_dtype = _infer_dtype(out_dtype) + # 2) the func_or_arr + func = _sanitise_input_function(func) -def sampling_function(func_or_arr, domain, out_dtype=None): + ### We then create the function + return _make_single_use_func(func, domain, out_dtype, impl, device) + + + +def old_sampling_function(func_or_arr, domain, out_dtype=None, impl: str ='numpy', device: str ='cpu'): """Return a function that can be used for sampling. For examples on this function's usage, see `point_collocation`. @@ -993,7 +1064,7 @@ def sampling_function(func_or_arr, domain, out_dtype=None): Returns ------- func : function - Wrapper function that has an optional ``out`` argument. + Wrapper function that has no optional ``out`` argument. """ if out_dtype is None: val_shape = None @@ -1070,14 +1141,12 @@ def _default_ip(func_oop, x, out, **kwargs): has_out, out_optional = _func_out_type(func) if not has_out: # Out-of-place-only - func_ip = partial(_default_ip, func) func_oop = func elif out_optional: # Dual-use - func_ip = func_oop = func + func_oop = func else: # In-place-only - func_ip = func func_oop = partial(_default_oop, func) else: @@ -1200,12 +1269,12 @@ def array_wrapper_func(x, out=None, **kwargs): else: out_comp[:] = f(x, **kwargs) - func_ip = func_oop = array_wrapper_func + func_oop = array_wrapper_func - return _make_dual_use_func(func_ip, func_oop, domain, out_dtype) + return _make_single_use_func(func_oop, domain, out_dtype, impl=impl, device=device) -def _make_dual_use_func(func_ip, func_oop, domain, out_dtype): +def _make_single_use_func(func_oop, domain, out_dtype, impl: str ='numpy', device: str ='cpu'): """Return a unifying wrapper function with optional ``out`` argument.""" # Default to `ndim=1` for unusual domains that do not define a dimension @@ -1221,7 +1290,7 @@ def _make_dual_use_func(func_ip, func_oop, domain, out_dtype): tensor_valued = val_shape != () - def dual_use_func(x, out=None, **kwargs): + def dual_use_func(x, **kwargs): """Wrapper function with optional ``out`` argument. This function closes over two other functions, one for in-place, @@ -1288,7 +1357,7 @@ def dual_use_func(x, out=None, **kwargs): if is_valid_input_meshgrid(x, ndim): scalar_in = False scalar_out_shape = out_shape_from_meshgrid(x) - scalar_out = False + # Avoid operations on tuples like x * 2 by casting to array if ndim == 1: x = x[0][None, ...] @@ -1296,12 +1365,12 @@ def dual_use_func(x, out=None, **kwargs): x = np.asarray(x) scalar_in = False scalar_out_shape = out_shape_from_array(x) - scalar_out = False + elif x in domain: x = np.atleast_2d(x).T # make a (d, 1) array scalar_in = True scalar_out_shape = (1,) - scalar_out = (out is None and not tensor_valued) + else: # Unknown input txt_1d = ' or (n,)' if ndim == 1 else '' @@ -1318,42 +1387,48 @@ def dual_use_func(x, out=None, **kwargs): raise ValueError('input contains points outside the domain {!r}' ''.format(domain)) + backend = lookup_array_backend(impl) + array_ns = backend.array_namespace + backend_scalar_out_dtype = backend.available_dtypes[_universal_dtype_identifier(scalar_out_dtype)] + + x = _send_nested_list_to_backend( + x, backend, device, backend_scalar_out_dtype) + if scalar_in: out_shape = val_shape else: out_shape = val_shape + scalar_out_shape - # Call the function and check out shape, before or after - if out is None: - - # The out-of-place evaluation path - - if ndim == 1: - try: - out = func_oop(x, **kwargs) - except (TypeError, IndexError): - # TypeError is raised if a meshgrid was used but the - # function expected an array (1d only). In this case we try - # again with the first meshgrid vector. - # IndexError is raised in expressions like x[x > 0] since - # "x > 0" evaluates to 'True', i.e. 1, and that index is - # out of range for a meshgrid tuple of length 1 :-). To get - # the real errors with indexing, we check again for the - # same scenario (scalar output when not valid) as in the - # first case. - out = func_oop(x[0], **kwargs) - - else: - # Here we don't catch exceptions since they are likely true - # errors + if ndim == 1: + try: out = func_oop(x, **kwargs) + except (TypeError, IndexError): + # TypeError is raised if a meshgrid was used but the + # function expected an array (1d only). In this case we try + # again with the first meshgrid vector. + # IndexError is raised in expressions like x[x > 0] since + # "x > 0" evaluates to 'True', i.e. 1, and that index is + # out of range for a meshgrid tuple of length 1 :-). To get + # the real errors with indexing, we check again for the + # same scenario (scalar output when not valid) as in the + # first case. + out = func_oop(x[0], **kwargs) - if isinstance(out, np.ndarray) or np.isscalar(out): - # Cast to proper dtype if needed, also convert to array if out - # is a scalar. - out = np.asarray(out, dtype=scalar_out_dtype) + else: + # Here we don't catch exceptions since they are likely true + # errors + out = func_oop(x, **kwargs) + + def _process_array(out): + if isinstance(out, backend.array_type) or np.isscalar(out): + # Cast to proper dtype if needed, also convert to array if out is a scalar. + out = backend.array_constructor( + out, + dtype=backend_scalar_out_dtype, + device=device + ) if scalar_in: - out = np.squeeze(out) + out = array_ns.squeeze(out,0) elif ndim == 1 and out.shape == (1,) + out_shape: out = out.reshape(out_shape) @@ -1361,76 +1436,22 @@ def dual_use_func(x, out=None, **kwargs): # Broadcast the returned element, but not in the # scalar case. The resulting array may be read-only, # in which case we copy. - out = np.broadcast_to(out, out_shape) - if not out.flags.writeable: - out = out.copy() - - elif tensor_valued: - # The out object can be any array-like of objects with shapes - # that should all be broadcastable to scalar_out_shape. - try: - out_arr = np.asarray(out) - except ValueError: - out_arr = np.asarray(_broadcast_nested_list( - out, scalar_out_shape, ndim=ndim)) - - if out_arr.dtype != scalar_out_dtype: - raise ValueError( - 'result is of dtype {}, expected {}' - ''.format(dtype_repr(out_arr.dtype), - dtype_repr(scalar_out_dtype)) - ) + out = array_ns.broadcast_to(out, out_shape) + out = backend.array_constructor(out, copy=True) + return out - out = out_arr.reshape(out_shape) + elif isinstance(out, (tuple, list)): + result = [] + assert len(out) != 0 + for sub_out in out: + result.append(_process_array(sub_out)) + return result - else: - # TODO(kohr-h): improve message - raise RuntimeError('bad output of function call') - - else: - # The in-place evaluation path - - if not isinstance(out, np.ndarray): - raise TypeError( - 'output must be a `numpy.ndarray` got {!r}' - ''.format(out) - ) - if out_shape != (1,) and out.shape != out_shape: - raise ValueError( - 'output has shape, expected {} from input' - ''.format(out.shape, out_shape) - ) - if out.dtype != scalar_out_dtype: - raise ValueError( - '`out` is of dtype {}, expected {}' - ''.format(out.dtype, scalar_out_dtype) - ) - - if ndim == 1 and not tensor_valued: - # TypeError for meshgrid in 1d, but expected array (see above) - try: - func_ip(x, out, **kwargs) - except TypeError: - func_ip(x[0], out, **kwargs) - else: - func_ip(x, out=out, **kwargs) - - # If we are to output a scalar, convert the result - - # Numpy < 1.12 does not implement __complex__ for arrays (in contrast - # to __float__), so we have to fish out the scalar ourselves. - if scalar_out: - scalar = out.ravel()[0].item() - if is_real_dtype(out_dtype): - return float(scalar) - else: - return complex(scalar) - else: - return out + return _process_array(out) return dual_use_func if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/discr/grid.py b/odl/core/discr/grid.py similarity index 99% rename from odl/discr/grid.py rename to odl/core/discr/grid.py index 0317629fe93..bb03156420c 100644 --- a/odl/discr/grid.py +++ b/odl/core/discr/grid.py @@ -15,8 +15,8 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.set import Set, IntervalProd -from odl.util import ( +from odl.core.set import Set, IntervalProd +from odl.core.util import ( normalized_index_expression, normalized_scalar_param_list, safe_int_conv, array_str, signature_string, indent, npy_printoptions) @@ -327,7 +327,7 @@ def min(self, **kwargs): See Also -------- max - odl.set.domain.IntervalProd.min + odl.core.set.domain.IntervalProd.min Examples -------- @@ -358,7 +358,7 @@ def max(self, **kwargs): See Also -------- min - odl.set.domain.IntervalProd.max + odl.core.set.domain.IntervalProd.max Examples -------- @@ -1097,7 +1097,7 @@ def uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=True): See Also -------- uniform_grid : Create a uniform grid directly. - odl.discr.partition.uniform_partition_fromintv : + odl.core.discr.partition.uniform_partition_fromintv : divide interval product into equally sized subsets """ if not isinstance(intv_prod, IntervalProd): @@ -1207,7 +1207,7 @@ def uniform_grid(min_pt, max_pt, shape, nodes_on_bdry=True): -------- uniform_grid_fromintv : sample a given interval product - odl.discr.partition.uniform_partition : + odl.core.discr.partition.uniform_partition : divide implicitly defined interval product into equally sized subsets @@ -1238,5 +1238,5 @@ def uniform_grid(min_pt, max_pt, shape, nodes_on_bdry=True): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/discr/partition.py b/odl/core/discr/partition.py similarity index 99% rename from odl/discr/partition.py rename to odl/core/discr/partition.py index 78aaeeeb35c..4f798bb04ea 100644 --- a/odl/discr/partition.py +++ b/odl/core/discr/partition.py @@ -18,11 +18,11 @@ from builtins import object import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr.grid import RectGrid, uniform_grid_fromintv -from odl.set import IntervalProd -from odl.util import ( +from odl.core.discr.grid import RectGrid, uniform_grid_fromintv +from odl.core.set import IntervalProd +from odl.core.util import ( normalized_index_expression, normalized_nodes_on_bdry, normalized_scalar_param_list, safe_int_conv, signature_string, indent, array_str, npy_printoptions) @@ -193,7 +193,7 @@ def min(self): See Also -------- - odl.set.domain.IntervalProd.min + odl.core.set.domain.IntervalProd.min """ return self.set.min() @@ -202,7 +202,7 @@ def max(self): See Also -------- - odl.set.domain.IntervalProd.max + odl.core.set.domain.IntervalProd.max """ return self.set.max() @@ -291,7 +291,7 @@ def points(self, order='C'): See Also -------- - odl.discr.grid.RectGrid.points + odl.core.discr.grid.RectGrid.points """ return self.grid.points(order) @@ -694,8 +694,8 @@ def squeeze(self, axis=None): See Also -------- - odl.discr.grid.RectGrid.squeeze - odl.set.domain.IntervalProd.squeeze + odl.core.discr.grid.RectGrid.squeeze + odl.core.set.domain.IntervalProd.squeeze """ if axis is None: rng = range(self.ndim) @@ -1425,5 +1425,5 @@ def nonuniform_partition(*coord_vecs, **kwargs): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/operator/__init__.py b/odl/core/operator/__init__.py similarity index 100% rename from odl/operator/__init__.py rename to odl/core/operator/__init__.py diff --git a/odl/operator/default_ops.py b/odl/core/operator/default_ops.py similarity index 98% rename from odl/operator/default_ops.py rename to odl/core/operator/default_ops.py index 448da71f2c2..7a2a2faf5e7 100644 --- a/odl/operator/default_ops.py +++ b/odl/core/operator/default_ops.py @@ -14,12 +14,14 @@ from copy import copy +from numbers import Number import numpy as np -from odl.operator.operator import Operator -from odl.set import ComplexNumbers, Field, LinearSpace, RealNumbers -from odl.set.space import LinearSpaceElement -from odl.space import ProductSpace +from odl.core.operator.operator import Operator +from odl.core.set import ComplexNumbers, Field, LinearSpace, RealNumbers +from odl.core.set.space import LinearSpaceElement +from odl.core.space import ProductSpace +from odl.core.array_API_support import sqrt, conj __all__ = ('ScalingOperator', 'ZeroOperator', 'IdentityOperator', 'LinCombOperator', 'MultiplyOperator', 'PowerOperator', @@ -301,6 +303,15 @@ def __init__(self, multiplicand, domain=None, range=None): >>> op2(3, out) rn(3).element([ 3., 6., 9.]) """ + # TODO: handle the complex conversion case better. + if not isinstance(multiplicand, LinearSpaceElement): + assert domain is not None or range is not None + if domain is None: + domain = range + if range is None: + range = domain + assert isinstance(multiplicand, Number) + if domain is None: domain = multiplicand.space @@ -383,7 +394,7 @@ def adjoint(self): 'adjoint not implemented for domain{!r}' ''.format(self.domain)) elif self.domain.is_complex: - return MultiplyOperator(np.conj(self.multiplicand), + return MultiplyOperator(conj(self.multiplicand), domain=self.range, range=self.domain) else: return MultiplyOperator(self.multiplicand, @@ -1400,7 +1411,7 @@ def __init__(self, space): def _call(self, x): """Return ``self(x)``.""" - return (x.real ** 2 + x.imag ** 2).ufuncs.sqrt() + return sqrt(x.real ** 2 + x.imag ** 2) def derivative(self, x): r"""Return the derivative operator in the "C = R^2" sense. @@ -1757,5 +1768,5 @@ def adjoint(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/operator/operator.py b/odl/core/operator/operator.py similarity index 98% rename from odl/operator/operator.py rename to odl/core/operator/operator.py index ccde843c1ca..682c8110feb 100644 --- a/odl/operator/operator.py +++ b/odl/core/operator/operator.py @@ -15,8 +15,8 @@ from builtins import object from numbers import Integral, Number -from odl.set import Field, LinearSpace, Set -from odl.set.space import LinearSpaceElement +from odl.core.set import Field, LinearSpace, Set +from odl.core.set.space import LinearSpaceElement __all__ = ( 'Operator', @@ -656,7 +656,8 @@ def __call__(self, x, out=None, **kwargs): 'when range is a field') result = self._call_in_place(x, out=out, **kwargs) - if result is not None and result is not out: + # TODO: At present, we perform an equality check on the entire array, which is as inefficient as it gets. We'd rather perform a pointer equality with the "is" keyword. However, the current machinery for the _call_in_place function might be creating a new out object, which leads to the "is" equality failing. We must investigate this _call_in_place function to identify when and why are objects created/deleted. + if result is not None and result != out: raise ValueError('`op` returned a different value than `out`. ' 'With in-place evaluation, the operator can ' 'only return nothing (`None`) or the `out` ' @@ -665,6 +666,9 @@ def __call__(self, x, out=None, **kwargs): else: # Out-of-place evaluation out = self._call_out_of_place(x, **kwargs) + if self.domain is not None and out is None: + raise OpRangeError( + "The out-of-place version of the operator does not return a value.") if out not in self.range: try: out = self.range.element(out) @@ -723,7 +727,7 @@ def norm(self, estimate=False, **kwargs): if norm is not None: return norm else: - from odl.operator.oputils import power_method_opnorm + from odl.core.operator.oputils import power_method_opnorm self.__norm = power_method_opnorm(self, **kwargs) return self.__norm @@ -1248,7 +1252,7 @@ def _call(self, x, out=None): if out is None: out = self.operator(x) else: - self.operator(x, out=out) + self.operator(x, out=out) out += self.vector return out @@ -1283,7 +1287,9 @@ def __repr__(self): def __str__(self): """Return ``str(self)``.""" - return '({} + {})'.format(self.left, self.right) + # return '({} + {})'.format(self.left, self.right) + return '{}({!r}, {!r})'.format(self.__class__.__name__, + self.operator, self.vector) class OperatorComp(Operator): @@ -1484,8 +1490,8 @@ def derivative(self, x): if self.is_linear: return self else: - left = self.right(x) * self.left.derivative(x) - right = self.left(x) * self.right.derivative(x) + left = self.right(x) @ self.left.derivative(x) + right = self.left(x) @ self.right.derivative(x) return left + right def __repr__(self): @@ -2030,7 +2036,7 @@ def derivative(self, x): if self.is_linear: return self else: - return self.vector * self.operator.derivative(x) + return self.vector @ self.operator.derivative(x) @property def adjoint(self): @@ -2177,9 +2183,9 @@ def adjoint(self): if self.vector.space.is_real: # The complex conjugate of a real vector is the vector itself. - return self.vector * self.operator.adjoint + return self.vector @ self.operator.adjoint else: - return self.vector.conj() * self.operator.adjoint + return self.vector.conj() @ self.operator.adjoint def __repr__(self): """Return ``repr(self)``.""" @@ -2225,5 +2231,5 @@ class OpNotImplementedError(NotImplementedError): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/operator/oputils.py b/odl/core/operator/oputils.py similarity index 92% rename from odl/operator/oputils.py rename to odl/core/operator/oputils.py index 74f478cc5eb..17ed55049cb 100644 --- a/odl/operator/oputils.py +++ b/odl/core/operator/oputils.py @@ -12,10 +12,10 @@ import numpy as np from future.utils import native -from odl.space import ProductSpace -from odl.space.base_tensors import TensorSpace -from odl.util import nd_iterator -from odl.util.testutils import noise_element +from odl.core.space import ProductSpace +from odl.core.space.base_tensors import TensorSpace +from odl.core.util import nd_iterator +from odl.core.util.testutils import noise_element __all__ = ( 'matrix_representation', @@ -76,7 +76,7 @@ def matrix_representation(op): [[ 4. , -4.75], [ 4. , -6.75]] ]) - >>> np.tensordot(tensor, x, axes=grad.domain.ndim) + >>> np.tensordot(tensor, x.data, axes=grad.domain.ndim) array([[[ 2. , 2. ], [-2.75, -6.75]], @@ -109,8 +109,15 @@ def matrix_representation(op): 'components'.format(op.range)) # Generate the matrix - dtype = np.promote_types(op.domain.dtype, op.range.dtype) - matrix = np.zeros(op.range.shape + op.domain.shape, dtype=dtype) + if isinstance(op.domain, TensorSpace): + namespace = op.domain.array_namespace + device = op.domain.device + else: + namespace = op[0][0].domain.array_namespace + device = op[0][0].domain.device + dtype = namespace.result_type(op.domain.dtype, op.range.dtype) + matrix = namespace.zeros( + op.range.shape + op.domain.shape, dtype=dtype, device=device) tmp_ran = op.range.element() # Store for reuse in loop tmp_dom = op.domain.zero() # Store for reuse in loop @@ -348,7 +355,7 @@ def as_scipy_functional(func, return_gradient=False): Wrap functional and solve simple problem (here toy problem ``min_x ||x||^2``): - >>> func = odl.solvers.L2NormSquared(odl.rn(3)) + >>> func = odl.functional.L2NormSquared(odl.rn(3)) >>> scipy_func = odl.as_scipy_functional(func) >>> from scipy.optimize import minimize >>> result = minimize(scipy_func, x0=[0, 1, 0]) @@ -357,7 +364,7 @@ def as_scipy_functional(func, return_gradient=False): The gradient (jacobian) can also be provided: - >>> func = odl.solvers.L2NormSquared(odl.rn(3)) + >>> func = odl.functional.L2NormSquared(odl.rn(3)) >>> scipy_func, scipy_grad = odl.as_scipy_functional(func, True) >>> from scipy.optimize import minimize >>> result = minimize(scipy_func, x0=[0, 1, 0], jac=scipy_grad) @@ -371,12 +378,12 @@ def as_scipy_functional(func, return_gradient=False): is ``CudaFn`` or some other nonlocal type, the overhead is significant. """ def func_call(arr): - return func(np.asarray(arr).reshape(func.domain.shape)) + return func(func.domain.element(np.asarray(arr).reshape(func.domain.shape))) if return_gradient: def func_gradient_call(arr): return np.asarray( - func.gradient(np.asarray(arr).reshape(func.domain.shape))) + func.gradient(np.asarray(arr).reshape(func.domain.shape)).data) return func_call, func_gradient_call else: @@ -384,6 +391,6 @@ def func_gradient_call(arr): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/operator/pspace_ops.py b/odl/core/operator/pspace_ops.py similarity index 98% rename from odl/operator/pspace_ops.py rename to odl/core/operator/pspace_ops.py index df98806720c..134b6be0f4d 100644 --- a/odl/operator/pspace_ops.py +++ b/odl/core/operator/pspace_ops.py @@ -12,10 +12,10 @@ from numbers import Integral import numpy as np -from odl.operator.operator import Operator -from odl.operator.default_ops import ZeroOperator -from odl.space import ProductSpace -from odl.util import COOMatrix +from odl.core.operator.operator import Operator +from odl.core.operator.default_ops import ZeroOperator +from odl.core.space import ProductSpace +from odl.core.util import COOMatrix __all__ = ('ProductSpaceOperator', @@ -240,9 +240,6 @@ def __init__(self, operators, domain=None, range=None): def _convert_to_spmatrix(operators): """Convert an array-like object of operators to a sparse matrix.""" - # Lazy import to improve `import odl` time - # import scipy.sparse - # Convert ops to sparse representation. This is not trivial because # operators can be indexable themselves and give the wrong impression # of an extra dimension. So we have to infer the shape manually @@ -380,9 +377,6 @@ def derivative(self, x): [ 0., 0., 0.] ]) """ - # Lazy import to improve `import odl` time - import scipy.sparse - # Short circuit optimization if self.is_linear: return self @@ -434,9 +428,6 @@ def adjoint(self): [ 1., 2., 3.] ]) """ - # Lazy import to improve `import odl` time - import scipy.sparse - adjoint_ops = [op.adjoint for op in self.ops.data] data = np.empty(len(adjoint_ops), dtype=object) data[:] = adjoint_ops @@ -1145,8 +1136,6 @@ def __init__(self, *operators, **kwargs): >>> op.operators (IdentityOperator(rn(3)), IdentityOperator(rn(3))) """ - # Lazy import to improve `import odl` time - import scipy.sparse if (len(operators) == 2 and isinstance(operators[0], Operator) and @@ -1296,5 +1285,5 @@ def __repr__(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/operator/tensor_ops.py b/odl/core/operator/tensor_ops.py similarity index 73% rename from odl/operator/tensor_ops.py rename to odl/core/operator/tensor_ops.py index 91eb1ac9d28..399bc214f5a 100644 --- a/odl/operator/tensor_ops.py +++ b/odl/core/operator/tensor_ops.py @@ -11,25 +11,162 @@ from __future__ import absolute_import, division, print_function from numbers import Integral +from typing import Optional import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.operator.operator import Operator -from odl.set import ComplexNumbers, RealNumbers -from odl.space import ProductSpace, tensor_space -from odl.space.base_tensors import TensorSpace -from odl.space.weighting import ArrayWeighting -from odl.util import dtype_repr, indent, signature_string, writable_array +from odl.core.operator.operator import Operator +from odl.core.set import ComplexNumbers, RealNumbers +from odl.core.space import ProductSpace, tensor_space +from odl.core.space.base_tensors import TensorSpace, Tensor +from odl.core.space.weightings.weighting import ArrayWeighting +from odl.core.util import dtype_repr, indent, signature_string +from odl.core.array_API_support import ArrayBackend, lookup_array_backend, abs as odl_abs, maximum, pow, sqrt, multiply, get_array_and_backend, can_cast, odl_all_equal + +from odl.core.sparse import is_sparse, get_sparse_matrix_impl, lookup_sparse_format __all__ = ('PointwiseNorm', 'PointwiseInner', 'PointwiseSum', 'MatrixOperator', 'SamplingOperator', 'WeightedSumSamplingOperator', - 'FlatteningOperator') + 'FlatteningOperator', 'DeviceChangeOperator') _SUPPORTED_DIFF_METHODS = ('central', 'forward', 'backward') +class DeviceChangeOperator(Operator): + """An operator that is mathematically the identity, but whose domain and codomain + differ in where they store their arrays. + This is useful as an adaptor between operators that need to use different devices + for some reason. + Note that it is usually more efficient to implement your whole pipeline on a single + device, if possible. + """ + def __init__(self, domain=None, range=None, domain_device=None, range_device=None): + """Create an operator tying two equivalent spaces with different storage together. + + Parameters + ---------- + domain, range : `TensorSpace`, optional + Spaces of vectors. Usually only one of them is specified; if both are + given, they must be identical save for the device. + domain_device, range_device : `str`, optional + Device specifiers such as `'cpu'` or `'cuda:0'`. Which ones are + supported depends on the backend and hardware. + If e.g. `domain` and `range_device` are specified, the range will be + chosen as `domain.to_device(range_device)`, vice versa. + """ + if range is None: + assert domain is not None + assert range_device is not None + assert domain_device is None or domain_device == domain.device + range = domain.to_device(range_device) + elif domain is None: + assert range is not None + assert domain_device is not None + assert range_device is None or range_device == range.device + domain = range.to_device(domain_device) + else: + assert domain_device is None or domain_device == domain.device + assert range_device is None or range_device == range.device + assert(domain.to_device(range.device) == range) + super().__init__(domain, range=range, linear=True) + + def _call(self, x): + """Copy data to the intended device.""" + return x.to_device(self.range.device) + + @property + def inverse(self): + """Operator that copies data back to the original device.""" + return DeviceChangeOperator(domain=self.range, range=self.domain) + + @property + def adjoint(self): + """Adjoint is the same as inverse, as device change is mathematically + the identity.""" + return self.inverse + + def norm(self, estimate=False, **kwargs): + """Return the operator norm of this operator. This is 1, as the + operator is mathematically the identity.""" + return 1 + + def __repr__(self): + """Represent the operator by its domain and the device of the range.""" + return f"{self.__class__.__name__}(domain={repr(self.domain)}, range_device={repr(self.range.device)})" + + def __str__(self): + return f"{self.__class__.__name__}(domain={str(self.domain)}, range_device={str(self.range.device)})" + +class ImplChangeOperator(Operator): + """An operator that is mathematically the identity, but whose domain and codomain + differ in what backend they use for their arrays. + This is useful as an adaptor between operators that need to use different backend + for some reason, for example one operator implemented through bespoke C code and + one operator implemented with PyTorch neural networks. + Note that it is usually more efficient to keep your whole pipeline on a single + backend and device, if possible. + """ + def __init__(self, domain=None, range=None, domain_impl=None, range_impl=None): + """Create an operator tying two equivalent spaces with different storage together. + + Parameters + ---------- + domain, range : `TensorSpace`, optional + Spaces of vectors. Usually only one of them is specified; if both are + given, they must be identical save for the backend (`impl`). + domain_impl, range_impl : `str`, optional + Backend identifier. Must correspond to a registered backend, + cf. `odl.core.space.entry_points.tensor_space_impl_names`. + If e.g. `domain` and `range_impl` are specified, the range will be + chosen as `domain.to_impl(range_impl)`, vice versa. + The device of the space must be usable simultaneously with both of + the backends. + """ + if range is None: + assert impl is not None + assert range_impl is not None + assert domain_impl is None or domain_impl == domain.impl + range = domain.to_impl(range_impl) + elif domain is None: + assert range is not None + assert domain_impl is not None + assert range_impl is None or range_impl == range.impl + domain = range.to_impl(domain_impl) + else: + assert domain_impl is None or domain_impl == domain.impl + assert range_impl is None or range_impl == range.impl + assert(domain.to_impl(range.impl) == range) + super().__init__(domain, range=range, linear=True) + + def _call(self, x): + """Copy data to the intended backend.""" + return x.to_impl(self.range.impl) + + @property + def inverse(self): + """Operator that copies data back to the original backend.""" + return ImplChangeOperator(domain=self.range, range=self.domain) + + @property + def adjoint(self): + """Adjoint is the same as inverse, as backend change is mathematically + the identity.""" + return self.inverse + + def norm(self, estimate=False, **kwargs): + """Return the operator norm of this operator. This is 1, as the + operator is mathematically the identity.""" + return 1 + + def __repr__(self): + """Represent the operator by its domain and the impl of the range.""" + return f"{self.__class__.__name__}(domain={repr(self.domain)}, range_impl={repr(self.range.impl)})" + + def __str__(self): + return f"{self.__class__.__name__}(domain={str(self.domain)}, range_impl={str(self.range.impl)})" + class PointwiseTensorFieldOperator(Operator): """Abstract operator for point-wise tensor field manipulations. @@ -52,7 +189,7 @@ class PointwiseTensorFieldOperator(Operator): See Also -------- - odl.space.pspace.ProductSpace + odl.core.space.pspace.ProductSpace """ def __init__(self, domain, range, base_space, linear=False): @@ -168,7 +305,7 @@ def __init__(self, vfspace, exponent=None, weighting=None): raise TypeError('`vfspace` {!r} is not a ProductSpace ' 'instance'.format(vfspace)) super(PointwiseNorm, self).__init__( - domain=vfspace, range=vfspace[0], base_space=vfspace[0], + domain=vfspace, range=vfspace[0].real_space, base_space=vfspace[0], linear=False) # Need to check for product space shape once higher order tensors @@ -190,24 +327,42 @@ def __init__(self, vfspace, exponent=None, weighting=None): if hasattr(self.domain.weighting, 'array'): self.__weights = self.domain.weighting.array elif hasattr(self.domain.weighting, 'const'): - self.__weights = (self.domain.weighting.const * - np.ones(len(self.domain))) + self.__weights = [self.domain.weighting.const *self.domain[i].one() for i in range(len(vfspace))] else: raise ValueError('weighting scheme {!r} of the domain does ' 'not define a weighting array or constant' ''.format(self.domain.weighting)) - elif np.isscalar(weighting): - if weighting <= 0: - raise ValueError('weighting constant must be positive, got ' - '{}'.format(weighting)) - self.__weights = float(weighting) * np.ones(len(self.domain)) + self.__is_weighted = False + else: - self.__weights = np.asarray(weighting, dtype='float64') - if (not np.all(self.weights > 0) or - not np.all(np.isfinite(self.weights))): - raise ValueError('weighting array {} contains invalid ' - 'entries'.format(weighting)) - self.__is_weighted = not np.array_equiv(self.weights, 1.0) + ### This is a bad situation: although we worked hard to get an elegant weighting, the PointwiseNorm just yanks all of that down the drain by reimplementing the norm operation and the input sanitisation just for a ProductSpace. + ### EV reimplemented these two functionnalities but moving forward, this should be coerced into abiding to our new API + + if isinstance(weighting, list) and all([isinstance(w, Tensor) for w in weighting]) : + self.__weights = weighting + self.__is_weighted = all([odl_all_equal(w, 1) for w in weighting]) + else: + if isinstance(weighting, (int, float)): + weighting = [weighting for _ in range(len(self.domain))] + + weighted_flag = [] + for i in range(len(self.domain)): + if weighting[i] <= 0: + raise ValueError(f'weighting array weighting contains invalid entry {weighting[i]}') + if weighting[i] in [1,1.0]: + weighted_flag.append(False) + else: + weighted_flag.append(True) + self.__is_weighted = True if any(weighted_flag) else False + + weighting = [ + self.domain[i].tspace.broadcast_to(weighting[i]) + for i in range(len(self.domain)) + ] + + self.__weights = [] + for i in range(len(self.domain)): + self.__weights.append(self.domain[i].element(weighting[i])) @property def exponent(self): @@ -235,7 +390,8 @@ def _call(self, f, out): def _call_vecfield_1(self, vf, out): """Implement ``self(vf, out)`` for exponent 1.""" - vf[0].ufuncs.absolute(out=out) + + odl_abs(vf[0], out=out) if self.is_weighted: out *= self.weights[0] @@ -244,14 +400,14 @@ def _call_vecfield_1(self, vf, out): tmp = self.range.element() for fi, wi in zip(vf[1:], self.weights[1:]): - fi.ufuncs.absolute(out=tmp) + odl_abs(fi, out=tmp) if self.is_weighted: tmp *= wi out += tmp def _call_vecfield_inf(self, vf, out): """Implement ``self(vf, out)`` for exponent ``inf``.""" - vf[0].ufuncs.absolute(out=out) + odl_abs(vf[0], out=out) if self.is_weighted: out *= self.weights[0] @@ -260,45 +416,45 @@ def _call_vecfield_inf(self, vf, out): tmp = self.range.element() for vfi, wi in zip(vf[1:], self.weights[1:]): - vfi.ufuncs.absolute(out=tmp) + odl_abs(vfi, out=tmp) if self.is_weighted: tmp *= wi - out.ufuncs.maximum(tmp, out=out) + maximum(out, tmp, out=out) def _call_vecfield_p(self, vf, out): """Implement ``self(vf, out)`` for exponent 1 < p < ``inf``.""" # Optimization for 1 component - just absolute value (maybe weighted) if len(self.domain) == 1: - vf[0].ufuncs.absolute(out=out) + odl_abs(vf[0], out=out) if self.is_weighted: out *= self.weights[0] ** (1 / self.exponent) return # Initialize out, avoiding one copy - self._abs_pow_ufunc(vf[0], out=out, p=self.exponent) + self._abs_pow(vf[0], out=out, p=self.exponent) if self.is_weighted: out *= self.weights[0] tmp = self.range.element() for fi, wi in zip(vf[1:], self.weights[1:]): - self._abs_pow_ufunc(fi, out=tmp, p=self.exponent) + self._abs_pow(fi, out=tmp, p=self.exponent) if self.is_weighted: tmp *= wi out += tmp - self._abs_pow_ufunc(out, out=out, p=(1 / self.exponent)) + self._abs_pow(out, out=out, p=(1 / self.exponent)) - def _abs_pow_ufunc(self, fi, out, p): + def _abs_pow(self, fi, out, p): """Compute |F_i(x)|^p point-wise and write to ``out``.""" - # Optimization for very common cases + # Optimization for very common cases if p == 0.5: - fi.ufuncs.absolute(out=out) - out.ufuncs.sqrt(out=out) + odl_abs(fi, out=out) + sqrt(out, out=out) elif p == 2.0 and self.base_space.field == RealNumbers(): - fi.multiply(fi, out=out) + multiply(fi, fi, out=out) else: - fi.ufuncs.absolute(out=out) - out.ufuncs.power(p, out=out) + odl_abs(fi, out=out) + pow(out, p, out=out) def derivative(self, vf): """Derivative of the point-wise norm operator at ``vf``. @@ -345,7 +501,7 @@ def derivative(self, vf): inner_vf = vf.copy() for gi in inner_vf: - gi *= gi.ufuncs.absolute().ufuncs.power(self.exponent - 2) + gi *= pow(odl_abs(gi), self.exponent - 2) if self.exponent >= 2: # Any component that is zero is not divided with nz = (vf_pwnorm_fac.asarray() != 0) @@ -392,7 +548,7 @@ def __init__(self, adjoint, vfspace, vecfield, weighting=None): weightings with custom inner product, norm or dist. """ if not isinstance(vfspace, ProductSpace): - raise TypeError('`vfsoace` {!r} is not a ProductSpace ' + raise TypeError('`vfspace` {!r} is not a ProductSpace ' 'instance'.format(vfspace)) if adjoint: super(PointwiseInnerBase, self).__init__( @@ -416,20 +572,46 @@ def __init__(self, adjoint, vfspace, vecfield, weighting=None): # Handle weighting, including sanity checks if weighting is None: + self.__is_weighted = False if hasattr(vfspace.weighting, 'array'): self.__weights = vfspace.weighting.array elif hasattr(vfspace.weighting, 'const'): - self.__weights = (vfspace.weighting.const * - np.ones(len(vfspace))) + # Casting the constant to an array of constants is just bad + self.__weights = [vfspace.weighting.const *vfspace[i].one() for i in range(len(vfspace))] else: raise ValueError('weighting scheme {!r} of the domain does ' 'not define a weighting array or constant' ''.format(vfspace.weighting)) - elif np.isscalar(weighting): - self.__weights = float(weighting) * np.ones(len(vfspace)) + else: - self.__weights = np.asarray(weighting, dtype='float64') - self.__is_weighted = not np.array_equiv(self.weights, 1.0) + # Check if the input has already been sanitised, i.e is it an odl.Tensor + if isinstance(weighting, list) and all([isinstance(w, Tensor) for w in weighting]) : + self.__weights = weighting + self.__is_weighted = all([odl_all_equal(w, 1) for w in weighting]) + + # these are required to provide an array-API compatible weighting parsing. + else: + if isinstance(weighting, (int, float)): + weighting = [weighting for i in range(len(vfspace))] + + weighted_flag = [] + for i in range(len(vfspace)): + if weighting[i] <= 0: + raise ValueError(f'weighting array weighting contains invalid entry {weighting[i]}') + if weighting[i] in [1,1.0]: + weighted_flag.append(False) + else: + weighted_flag.append(True) + self.__is_weighted = True if any(weighted_flag) else False + + weighting = [ + vfspace[i].tspace.broadcast_to(weighting[i]) + for i in range(len(vfspace)) + ] + + self.__weights = [] + for i in range(len(vfspace)): + self.weights.append(vfspace[i].element(weighting[i])) @property def vecfield(self): @@ -617,10 +799,12 @@ def __init__(self, sspace, vecfield, vfspace=None, weighting=None): # Get weighting from range if hasattr(self.range.weighting, 'array'): - self.__ran_weights = self.range.weighting.array + ### The tolist() is an ugly tweak to recover a list from the pspace weighting.array which is stored in numpy + self.__ran_weights = vfspace.element(self.range.weighting.array.tolist()) elif hasattr(self.range.weighting, 'const'): - self.__ran_weights = (self.range.weighting.const * - np.ones(len(self.range))) + # Casting the constant to an array of constants is just bad + self.__ran_weights = [self.range.weighting.const *self.range[i].one() for i in range(len(self.range))] + else: raise ValueError('weighting scheme {!r} of the range does ' 'not define a weighting array or constant' @@ -631,8 +815,9 @@ def _call(self, f, out): for vfi, oi, ran_wi, dom_wi in zip(self.vecfield, out, self.__ran_weights, self.weights): vfi.multiply(f, out=oi) - if not np.isclose(ran_wi, dom_wi): - oi *= dom_wi / ran_wi + # Removed the optimisation here, it would require casting ran_wi as odl.TensorSpaceElement + # if not isclose(ran_wi, dom_wi).all(): + oi *= dom_wi / ran_wi @property def adjoint(self): @@ -716,7 +901,10 @@ class MatrixOperator(Operator): recommended to use other alternatives if possible. """ - def __init__(self, matrix, domain=None, range=None, axis=0): + def __init__(self, matrix, domain=None, range=None, + impl: Optional[str]=None, + device: Optional[str]=None, + axis=0): r"""Initialize a new instance. Parameters @@ -725,6 +913,10 @@ def __init__(self, matrix, domain=None, range=None, axis=0): 2-dimensional array representing the linear operator. For Scipy sparse matrices only tensor spaces with ``ndim == 1`` are allowed as ``domain``. + The matrix is copied to `impl`/`device`, if these are + specified (only once, when the operator is initialized). + If a plain Python list is supplied, it will first + be converted to a NumPy array. domain : `TensorSpace`, optional Space of elements on which the operator can act. Its ``dtype`` must be castable to ``range.dtype``. @@ -737,6 +929,16 @@ def __init__(self, matrix, domain=None, range=None, axis=0): of the result of the multiplication. For the default ``None``, the range is inferred from ``matrix``, ``domain`` and ``axis``. + impl : `ArrayBackend`-identifying `str`, optional + Which backend to use for the low-level matrix multiplication. + If not explicitly provided, it will be inferred in the following + order of preference, depending on what is available: + 1. from `domain` + 2. from `range` + 3. from `matrix` + device : `str`, optional + On which device to store the matrix. + Same defaulting logic as for `impl`. axis : int, optional Sum over this axis of an input tensor in the multiplication. @@ -792,13 +994,66 @@ def __init__(self, matrix, domain=None, range=None, axis=0): It produces a new tensor :math:`A \cdot T \in \mathbb{F}^{ n_1 \times \dots \times n \times \dots \times n_d}`. """ - # Lazy import to improve `import odl` time - import scipy.sparse - - if scipy.sparse.isspmatrix(matrix): + def infer_backend_from(default_backend): + if impl is not None: + self.__array_backend = lookup_array_backend(impl) + else: + assert(isinstance(default_backend, ArrayBackend)) + self.__array_backend = default_backend + def infer_device_from(default_device): + self.__device = default_device if device is None else device + + self._sparse_format = lookup_sparse_format(matrix) + + if domain is not None: + infer_backend_from(domain.array_backend) + infer_device_from(domain.device) + + elif range is not None: + infer_backend_from(range.array_backend) + infer_device_from(range.device) + + elif self.is_sparse: + if self._sparse_format.impl == 'scipy': + infer_backend_from(lookup_array_backend('numpy')) + infer_device_from('cpu') + + elif self._sparse_format.impl == 'pytorch': + infer_backend_from(lookup_array_backend('pytorch')) + infer_device_from(matrix.device) + + else: + raise ValueError + + elif isinstance(matrix, (list, tuple)): + infer_backend_from(lookup_array_backend('numpy')) + infer_device_from('cpu') + else: + infer_backend_from(get_array_and_backend(matrix)[1]) + infer_device_from(matrix.device) + + self.__arr_ns = self.array_backend.array_namespace + + if self.is_sparse: + if self._sparse_format.impl == 'scipy': + if self.array_backend.impl != 'numpy': + raise TypeError(f"SciPy sparse matrices can only be used with NumPy on CPU, not {self.array_backend.impl}.") + if self.device != 'cpu': + raise TypeError(f"SciPy sparse matrices can only be used with NumPy on CPU, not {device}.") + elif self._sparse_format.impl == 'pytorch': + if self.array_backend.impl != 'pytorch': + raise TypeError(f"PyTorch sparse matrices can only be used with Pytorch, not {self.array_backend.impl}.") self.__matrix = matrix + + elif isinstance(matrix, Tensor): + self.__matrix = matrix.data + self.__matrix = self.__arr_ns.asarray(matrix.data, device=self.__device, copy=AVOID_UNNECESSARY_COPY) + while len(self.__matrix.shape) < 2: + self.__matrix = self.__matrix[None] else: - self.__matrix = np.array(matrix, copy=AVOID_UNNECESSARY_COPY, ndmin=2) + self.__matrix = self.__arr_ns.asarray(matrix, device=self.__device, copy=AVOID_UNNECESSARY_COPY) + while len(self.__matrix.shape) < 2: + self.__matrix = self.__matrix[None] self.__axis, axis_in = int(axis), axis if self.axis != axis_in: @@ -810,14 +1065,18 @@ def __init__(self, matrix, domain=None, range=None, axis=0): # Infer or check domain if domain is None: + dtype = self.array_backend.identifier_of_dtype(self.matrix.dtype) domain = tensor_space((self.matrix.shape[1],), - dtype=self.matrix.dtype) + dtype=dtype, + impl = self.array_backend.impl, + device = self.device + ) else: if not isinstance(domain, TensorSpace): raise TypeError('`domain` must be a `TensorSpace` ' 'instance, got {!r}'.format(domain)) - if scipy.sparse.isspmatrix(self.matrix) and domain.ndim > 1: + if self.is_sparse and domain.ndim > 1: raise ValueError('`domain.ndim` > 1 unsupported for ' 'scipy sparse matrices') @@ -832,14 +1091,19 @@ def __init__(self, matrix, domain=None, range=None, axis=0): if range is None: # Infer range - range_dtype = np.promote_types(self.matrix.dtype, domain.dtype) + range_dtype = self.__arr_ns.result_type( + self.matrix.dtype, domain.dtype) + range_dtype = self.array_backend.identifier_of_dtype(range_dtype) if (range_shape != domain.shape and isinstance(domain.weighting, ArrayWeighting)): # Cannot propagate weighting due to size mismatch. weighting = None else: weighting = domain.weighting - range = tensor_space(range_shape, dtype=range_dtype, + range = tensor_space(range_shape, + impl = self.array_backend.impl, + device=self.device, + dtype=range_dtype, weighting=weighting, exponent=domain.exponent) else: @@ -853,8 +1117,8 @@ def __init__(self, matrix, domain=None, range=None, axis=0): ''.format(tuple(range_shape), range.shape)) # Check compatibility of data types - result_dtype = np.promote_types(domain.dtype, self.matrix.dtype) - if not np.can_cast(result_dtype, range.dtype): + result_dtype = self.__arr_ns.result_type(domain.dtype, self.matrix.dtype) + if not can_cast(self.__arr_ns, result_dtype, range.dtype): raise ValueError('result data type {} cannot be safely cast to ' 'range data type {}' ''.format(dtype_repr(result_dtype), @@ -862,11 +1126,35 @@ def __init__(self, matrix, domain=None, range=None, axis=0): super(MatrixOperator, self).__init__(domain, range, linear=True) + @property + def is_sparse(self): + return self._sparse_format is not None + @property def matrix(self): """Matrix representing this operator.""" return self.__matrix + @property + def array_backend(self): + """Backend on which to carry out the BLAS matmul operation. + Note that this does not necessarily have to be the same as + either the range or domain of the operator, but by default it will + be chosen such. If a different backend and/or device is used, the + operator will always copy data to `self.array_backend` before + carrying out the matrix multiplication, then copy the result to + `self.range.array_backend`. Such copies should generally be avoided + as they can be slow, but they can sometimes be justified if memory + is scarce on one of the devices. + """ + return self.__array_backend + + @property + def device(self): + """Computational device on which to carry out the BLAS operation. + See remarks on `array_backend`.""" + return self.__device + @property def axis(self): """Axis of domain elements over which is summed.""" @@ -896,56 +1184,31 @@ def inverse(self): Returns ------- inverse : `MatrixOperator` - """ - # Lazy import to improve `import odl` time - import scipy.sparse - - if scipy.sparse.isspmatrix(self.matrix): - dense_matrix = self.matrix.toarray() + """ + if self.is_sparse: + matrix = self._sparse_format.to_dense(self.matrix) else: - dense_matrix = self.matrix + matrix = self.matrix + return MatrixOperator(self.__arr_ns.linalg.inv(matrix), + domain=self.range, range=self.domain, + axis=self.axis, impl=self.domain.impl, device=self.domain.device) - return MatrixOperator(np.linalg.inv(dense_matrix), - domain=self.range, range=self.domain, - axis=self.axis) - - def _call(self, x, out=None): + def _call(self, x): """Return ``self(x[, out])``.""" - # Lazy import to improve `import odl` time - import scipy.sparse - if out is None: - if scipy.sparse.isspmatrix(self.matrix): - out = self.matrix.dot(x) - else: - dot = np.tensordot(self.matrix, x, axes=(1, self.axis)) - # New axis ends up as first, need to swap it to its place - out = np.moveaxis(dot, 0, self.axis) + if self.is_sparse: + out = self._sparse_format.matmul_spmatrix_with_vector(self.matrix, x.data) else: - if scipy.sparse.isspmatrix(self.matrix): - # Unfortunately, there is no native in-place dot product for - # sparse matrices - out[:] = self.matrix.dot(x) - elif self.range.ndim == 1: - with writable_array(out) as out_arr: - self.matrix.dot(x, out=out_arr) - else: - # Could use einsum to have out, but it's damn slow - # TODO: investigate speed issue - dot = np.tensordot(self.matrix, x, axes=(1, self.axis)) - # New axis ends up as first, need to move it to its place - out[:] = np.moveaxis(dot, 0, self.axis) + dot = self.__arr_ns.tensordot(self.matrix, x.data, axes=([1], [self.axis])) + # New axis ends up as first, need to swap it to its place + out = self.__arr_ns.moveaxis(dot, 0, self.axis) return out def __repr__(self): """Return ``repr(self)``.""" - # Lazy import to improve `import odl` time - import scipy.sparse - # Matrix printing itself in an executable way (for dense matrix) - if scipy.sparse.isspmatrix(self.matrix): - # Don't convert to dense, can take forever + if self.is_sparse or self.array_backend.impl != 'numpy': matrix_str = repr(self.matrix) else: matrix_str = np.array2string(self.matrix, separator=', ') @@ -954,11 +1217,23 @@ def __repr__(self): # Optional arguments with defaults, inferred from the matrix range_shape = list(self.domain.shape) range_shape[self.axis] = self.matrix.shape[0] + + try: + default_domain = tensor_space(self.matrix.shape[1], + impl=self.array_backend.impl, + dtype=self.matrix.dtype) + except (ValueError, TypeError): + default_domain = None + try: + default_range = tensor_space(range_shape, + impl=self.array_backend.impl, + dtype=self.matrix.dtype) + except (ValueError, TypeError): + default_range = None + optargs = [ - ('domain', self.domain, tensor_space(self.matrix.shape[1], - self.matrix.dtype)), - ('range', self.range, tensor_space(range_shape, - self.matrix.dtype)), + ('domain', self.domain, default_domain), + ('range', self.range, default_range), ('axis', self.axis, 0) ] @@ -1121,7 +1396,8 @@ def __init__(self, domain, sampling_points, variant='point_eval'): if self.variant not in ('point_eval', 'integrate'): raise ValueError('`variant` {!r} not understood'.format(variant)) - ran = tensor_space(self.sampling_points[0].size, dtype=domain.dtype) + # Propagating the impl and device of the range + ran = tensor_space(self.sampling_points[0].size, dtype=domain.dtype, impl=domain.impl, device=domain.device) super(SamplingOperator, self).__init__(domain, ran, linear=True) @property @@ -1311,15 +1587,22 @@ def __init__(self, range, sampling_points, variant='char_fun'): indices_flat = np.ravel_multi_index(self.sampling_points, dims=range.shape) if np.isscalar(indices_flat): - self._indices_flat = np.array([indices_flat], dtype=int) + indices_flat = np.array([indices_flat], dtype=int) else: - self._indices_flat = indices_flat + indices_flat = np.array(indices_flat, dtype=int) + + ### Always converting the indices to the right data type + self._indices_flat = range.array_backend.array_constructor(indices_flat, dtype=int, device=range.device) self.__variant = str(variant).lower() if self.variant not in ('dirac', 'char_fun'): raise ValueError('`variant` {!r} not understood'.format(variant)) + + # Recording the namespace for bincount + self.namespace = range.array_backend.array_namespace - domain = tensor_space(self.sampling_points[0].size, dtype=range.dtype) + # Propagating the impl and device of the range + domain = tensor_space(self.sampling_points[0].size, dtype=range.dtype, impl=range.impl, device=range.device) super(WeightedSumSamplingOperator, self).__init__( domain, range, linear=True) @@ -1335,7 +1618,7 @@ def sampling_points(self): def _call(self, x): """Sum all values if indices are given multiple times.""" - y = np.bincount(self._indices_flat, weights=x, + y = self.namespace.bincount(self._indices_flat, weights=x.data, minlength=self.range.size) out = y.reshape(self.range.shape) @@ -1416,17 +1699,13 @@ class FlatteningOperator(Operator): the domain is a discrete function space. """ - def __init__(self, domain, order='C'): + def __init__(self, domain): """Initialize a new instance. Parameters ---------- domain : `TensorSpace` Set of elements on which this operator acts. - order : {'C', 'F'}, optional - If provided, flattening is performed in this order. ``'C'`` - means that that the last index is changing fastest, while in - ``'F'`` ordering, the first index changes fastest. Examples -------- @@ -1438,29 +1717,17 @@ def __init__(self, domain, order='C'): ... [4, 5, 6]]) >>> op(x) rn(6).element([ 1., 2., 3., 4., 5., 6.]) - >>> op = odl.FlatteningOperator(space, order='F') - >>> op(x) - rn(6).element([ 1., 4., 2., 5., 3., 6.]) """ if not isinstance(domain, TensorSpace): raise TypeError('`domain` must be a `TensorSpace` instance, got ' '{!r}'.format(domain)) - self.__order = str(order).upper() - if self.order not in ('C', 'F'): - raise ValueError('`order` {!r} not understood'.format(order)) - range = tensor_space(domain.size, dtype=domain.dtype) super(FlatteningOperator, self).__init__(domain, range, linear=True) def _call(self, x): """Flatten ``x``.""" - return np.ravel(x, order=self.order) - - @property - def order(self): - """order of the flattening operation.""" - return self.__order + return self.range.element(x.data.reshape([self.range.shape[0]])) @property def adjoint(self): @@ -1500,12 +1767,6 @@ def inverse(self): [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.]] ) - >>> op = odl.FlatteningOperator(space, order='F') - >>> op.inverse(y) - uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element( - [[ 1., 3., 5., 7.], - [ 2., 4., 6., 8.]] - ) >>> op(op.inverse(y)) == y True """ @@ -1528,8 +1789,7 @@ def __init__(self): def _call(self, x): """Reshape ``x`` back to n-dim. shape.""" - return np.reshape(x.asarray(), self.range.shape, - order=op.order) + return np.reshape(x.asarray(), self.range.shape) @property def adjoint(self): @@ -1554,7 +1814,6 @@ def __str__(self): def __repr__(self): """Return ``repr(self)``.""" posargs = [self.domain] - optargs = [('order', self.order, 'C')] sig_str = signature_string(posargs, optargs, mod=['!r', ''], sep=['', '', ',\n']) return '{}(\n{}\n)'.format(self.__class__.__name__, indent(sig_str)) @@ -1632,5 +1891,5 @@ def is_compatible_space(space, base_space): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/phantom/__init__.py b/odl/core/phantom/__init__.py similarity index 100% rename from odl/phantom/__init__.py rename to odl/core/phantom/__init__.py diff --git a/odl/phantom/emission.py b/odl/core/phantom/emission.py similarity index 97% rename from odl/phantom/emission.py rename to odl/core/phantom/emission.py index 4052498e4cf..0dd8ffa2d9a 100644 --- a/odl/phantom/emission.py +++ b/odl/core/phantom/emission.py @@ -10,8 +10,8 @@ from __future__ import absolute_import, division, print_function -from odl.phantom.geometric import ellipsoid_phantom -from odl.phantom.phantom_utils import cylinders_from_ellipses +from odl.core.phantom.geometric import ellipsoid_phantom +from odl.core.phantom.phantom_utils import cylinders_from_ellipses __all__ = ('derenzo_sources',) @@ -150,7 +150,7 @@ def derenzo_sources(space, min_pt=None, max_pt=None): if __name__ == '__main__': # Show the phantoms import odl - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests n = 300 diff --git a/odl/phantom/geometric.py b/odl/core/phantom/geometric.py similarity index 95% rename from odl/phantom/geometric.py rename to odl/core/phantom/geometric.py index 9f8e421c80b..f1831b80e02 100644 --- a/odl/phantom/geometric.py +++ b/odl/core/phantom/geometric.py @@ -12,8 +12,8 @@ import numpy as np -from odl.discr.discr_space import uniform_discr_fromdiscr -from odl.util.numerics import resize_array +from odl.core.discr.discr_space import uniform_discr_fromdiscr +from odl.core.util.numerics import resize_array __all__ = ( 'cuboid', @@ -50,7 +50,7 @@ def cuboid(space, min_pt=None, max_pt=None): middle of the space domain and extends halfway towards all sides: >>> space = odl.uniform_discr([0, 0], [1, 1], [4, 6]) - >>> odl.phantom.cuboid(space) + >>> odl.core.phantom.cuboid(space) uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element( [[ 0., 0., 0., 0., 0., 0.], [ 0., 1., 1., 1., 1., 0.], @@ -61,7 +61,7 @@ def cuboid(space, min_pt=None, max_pt=None): By specifying the corners, the cuboid can be arbitrarily placed and scaled: - >>> odl.phantom.cuboid(space, [0.25, 0], [0.75, 0.5]) + >>> odl.core.phantom.cuboid(space, [0.25, 0], [0.75, 0.5]) uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element( [[ 0., 0., 0., 0., 0., 0.], [ 1., 1., 1., 0., 0., 0.], @@ -89,10 +89,17 @@ def cuboid(space, min_pt=None, max_pt=None): def phantom(x): result = True - for xi, xmin, xmax in zip(x, min_pt, max_pt): + xmin = space.array_backend.array_constructor( + xmin, device=space.device + ) + xmax = space.array_backend.array_constructor( + xmax, device=space.device + ) result = (result & - np.less_equal(xmin, xi) & np.less_equal(xi, xmax)) + space.array_namespace.less_equal(xmin, xi) & + space.array_namespace.less_equal(xi, xmax) + ) return result return space.element(phantom) @@ -135,7 +142,7 @@ def defrise(space, nellipses=8, alternating=False, min_pt=None, max_pt=None): See Also -------- - odl.phantom.transmission.shepp_logan + odl.core.phantom.transmission.shepp_logan """ ellipses = defrise_ellipses(space.ndim, nellipses=nellipses, alternating=alternating) @@ -158,9 +165,9 @@ def defrise_ellipses(ndim, nellipses=8, alternating=False): See Also -------- - odl.phantom.geometric.ellipsoid_phantom : + odl.core.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms - odl.phantom.transmission.shepp_logan_ellipsoids + odl.core.phantom.transmission.shepp_logan_ellipsoids """ ellipses = [] if ndim == 2: @@ -224,7 +231,7 @@ def indicate_proj_axis(space, scale_structures=0.5): >>> space = odl.uniform_discr([0, 0], [1, 1], shape=(8, 8)) >>> phantom = indicate_proj_axis(space).asarray() - >>> print(odl.util.array_str(phantom, nprint=10)) + >>> print(odl.core.util.array_str(phantom, nprint=10)) [[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 1., 1., 0., 0., 0.], [ 0., 0., 0., 1., 1., 0., 0., 0.], @@ -235,9 +242,9 @@ def indicate_proj_axis(space, scale_structures=0.5): [ 0., 0., 0., 0., 0., 0., 0., 0.]] >>> space = odl.uniform_discr([0] * 3, [1] * 3, [8, 8, 8]) - >>> phantom = odl.phantom.indicate_proj_axis(space).asarray() + >>> phantom = odl.core.phantom.indicate_proj_axis(space).asarray() >>> axis_sum_0 = np.sum(phantom, axis=0) - >>> print(odl.util.array_str(axis_sum_0, nprint=10)) + >>> print(odl.core.util.array_str(axis_sum_0, nprint=10)) [[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], @@ -247,7 +254,7 @@ def indicate_proj_axis(space, scale_structures=0.5): [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]] >>> axis_sum_1 = np.sum(phantom, axis=1) - >>> print(odl.util.array_str(axis_sum_1, nprint=10)) + >>> print(odl.core.util.array_str(axis_sum_1, nprint=10)) [[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 2., 2., 0., 0., 0.], [ 0., 0., 0., 2., 2., 0., 0., 0.], @@ -257,7 +264,7 @@ def indicate_proj_axis(space, scale_structures=0.5): [ 0., 0., 0., 1., 1., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]] >>> axis_sum_2 = np.sum(phantom, axis=2) - >>> print(odl.util.array_str(axis_sum_2, nprint=10)) + >>> print(odl.core.util.array_str(axis_sum_2, nprint=10)) [[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 2., 2., 0., 0., 0.], [ 0., 0., 0., 2., 2., 0., 0., 0.], @@ -360,7 +367,7 @@ def _ellipse_phantom_2d(space, ellipses): shepp_logan : The typical use-case for this function. """ # Blank image - p = np.zeros(space.shape, dtype=space.dtype) + p = np.zeros(space.shape, dtype=space.dtype_identifier) minp = space.grid.min_pt maxp = space.grid.max_pt @@ -484,7 +491,7 @@ def _ellipsoid_phantom_3d(space, ellipsoids): shepp_logan : The typical use-case for this function. """ # Blank volume - p = np.zeros(space.shape, dtype=space.dtype) + p = np.zeros(space.shape, dtype=space.dtype_identifier) minp = space.grid.min_pt maxp = space.grid.max_pt @@ -658,11 +665,11 @@ def ellipsoid_phantom(space, ellipsoids, min_pt=None, max_pt=None): See Also -------- - odl.phantom.transmission.shepp_logan : Classical Shepp-Logan phantom, + odl.core.phantom.transmission.shepp_logan : Classical Shepp-Logan phantom, typically used for transmission imaging - odl.phantom.transmission.shepp_logan_ellipsoids : Ellipses for the + odl.core.phantom.transmission.shepp_logan_ellipsoids : Ellipses for the Shepp-Logan phantom - odl.phantom.geometric.defrise_ellipses : Ellipses for the + odl.core.phantom.geometric.defrise_ellipses : Ellipses for the Defrise phantom """ if space.ndim == 2: @@ -905,5 +912,5 @@ def sigmoid(val): defrise(space).show('defrise 3D', coords=[0, None, None]) # Run also the doctests - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/phantom/misc_phantoms.py b/odl/core/phantom/misc_phantoms.py similarity index 99% rename from odl/phantom/misc_phantoms.py rename to odl/core/phantom/misc_phantoms.py index bd35aad0526..d3b1f76c9d0 100644 --- a/odl/phantom/misc_phantoms.py +++ b/odl/core/phantom/misc_phantoms.py @@ -255,7 +255,7 @@ def text(space, text, font=None, border=0.2, inverted=True): if __name__ == '__main__': # Show the phantoms import odl - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests space = odl.uniform_discr([-1, -1], [1, 1], [300, 300]) submarine(space, smooth=False).show('submarine smooth=False') diff --git a/odl/phantom/noise.py b/odl/core/phantom/noise.py similarity index 92% rename from odl/phantom/noise.py rename to odl/core/phantom/noise.py index fb02c8a4a83..c2954d0dc47 100644 --- a/odl/phantom/noise.py +++ b/odl/core/phantom/noise.py @@ -12,7 +12,8 @@ import numpy as np -from odl.util import npy_random_seed +from odl.core.util import npy_random_seed +from odl.core.space.base_tensors import Tensor __all__ = ('white_noise', 'poisson_noise', 'salt_pepper_noise', 'uniform_noise') @@ -47,7 +48,7 @@ def white_noise(space, mean=0, stddev=1, seed=None): salt_pepper_noise numpy.random.normal """ - from odl.space import ProductSpace + from odl.core.space import ProductSpace with npy_random_seed(seed): if isinstance(space, ProductSpace): @@ -61,8 +62,12 @@ def white_noise(space, mean=0, stddev=1, seed=None): loc=mean.imag, scale=stddev, size=space.shape) values = real + 1j * imag else: - values = np.random.normal( - loc=mean, scale=stddev, size=space.shape) + if isinstance(mean, Tensor): + values = np.random.normal( + loc=mean.data, scale=stddev, size=space.shape) + else: + values = np.random.normal( + loc=mean, scale=stddev, size=space.shape) return space.element(values) @@ -99,7 +104,7 @@ def uniform_noise(space, low=0, high=1, seed=None): white_noise numpy.random.normal """ - from odl.space import ProductSpace + from odl.core.space import ProductSpace with npy_random_seed(seed): if isinstance(space, ProductSpace): @@ -153,7 +158,7 @@ def poisson_noise(intensity, seed=None): uniform_noise numpy.random.poisson """ - from odl.space import ProductSpace + from odl.core.space import ProductSpace with npy_random_seed(seed): if isinstance(intensity.space, ProductSpace): @@ -205,7 +210,7 @@ def salt_pepper_noise(vector, fraction=0.05, salt_vs_pepper=0.5, poisson_noise uniform_noise """ - from odl.space import ProductSpace + from odl.core.space import ProductSpace # Validate input parameters fraction, fraction_in = float(fraction), fraction @@ -250,7 +255,7 @@ def salt_pepper_noise(vector, fraction=0.05, salt_vs_pepper=0.5, if __name__ == '__main__': # Show the phantoms import odl - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests r100 = odl.rn(100) white_noise(r100).show('white_noise') @@ -265,7 +270,7 @@ def salt_pepper_noise(vector, fraction=0.05, salt_vs_pepper=0.5, white_noise(discr).show('white_noise 2d') uniform_noise(discr).show('uniform_noise 2d') - vector = odl.phantom.shepp_logan(discr, modified=True) + vector = odl.core.phantom.shepp_logan(discr, modified=True) poisson_noise(vector * 100).show('poisson_noise 2d') salt_pepper_noise(vector).show('salt_pepper_noise 2d') diff --git a/odl/phantom/phantom_utils.py b/odl/core/phantom/phantom_utils.py similarity index 93% rename from odl/phantom/phantom_utils.py rename to odl/core/phantom/phantom_utils.py index b4f12faeb08..11d5d07aa4d 100644 --- a/odl/phantom/phantom_utils.py +++ b/odl/core/phantom/phantom_utils.py @@ -26,5 +26,5 @@ def cylinders_from_ellipses(ellipses): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/phantom/transmission.py b/odl/core/phantom/transmission.py similarity index 97% rename from odl/phantom/transmission.py rename to odl/core/phantom/transmission.py index 01ea73dd33c..30a88d9db5a 100644 --- a/odl/phantom/transmission.py +++ b/odl/core/phantom/transmission.py @@ -12,8 +12,8 @@ import numpy as np -from odl.discr import DiscretizedSpace -from odl.phantom.geometric import ellipsoid_phantom +from odl.core.discr import DiscretizedSpace +from odl.core.phantom.geometric import ellipsoid_phantom __all__ = ('shepp_logan_ellipsoids', 'shepp_logan', 'forbild') @@ -90,7 +90,7 @@ def shepp_logan_ellipsoids(ndim, modified=False): See Also -------- - odl.phantom.geometric.ellipsoid_phantom : + odl.core.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms shepp_logan : Create a phantom with these ellipsoids @@ -141,9 +141,9 @@ def shepp_logan(space, modified=False, min_pt=None, max_pt=None): See Also -------- forbild : Similar phantom but with more complexity. Only supports 2d. - odl.phantom.geometric.defrise : Geometry test phantom + odl.core.phantom.geometric.defrise : Geometry test phantom shepp_logan_ellipsoids : Get the parameters that define this phantom - odl.phantom.geometric.ellipsoid_phantom : + odl.core.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoid phantoms References @@ -405,7 +405,7 @@ def transposeravel(arr): if __name__ == '__main__': # Show the phantoms import odl - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests # 2D discr = odl.uniform_discr([-1, -1], [1, 1], [1000, 1000]) diff --git a/odl/set/__init__.py b/odl/core/set/__init__.py similarity index 100% rename from odl/set/__init__.py rename to odl/core/set/__init__.py diff --git a/odl/set/domain.py b/odl/core/set/domain.py similarity index 99% rename from odl/set/domain.py rename to odl/core/set/domain.py index 524b9bce33d..905f536ba73 100644 --- a/odl/set/domain.py +++ b/odl/core/set/domain.py @@ -11,10 +11,10 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.set.sets import Set -from odl.util import ( +from odl.core.set.sets import Set +from odl.core.util import ( array_str, is_valid_input_array, is_valid_input_meshgrid, safe_int_conv) @@ -358,7 +358,7 @@ def contains_all(self, other, atol=0.0): Implicit meshgrids defined by coordinate vectors: - >>> from odl.discr.grid import sparse_meshgrid + >>> from odl.core.discr.grid import sparse_meshgrid >>> vec1 = (-1, -0.9, -0.7) >>> vec2 = (0, 0, 0) >>> vec3 = (2.5, 2.75, 3) @@ -713,7 +713,7 @@ def corners(self, order='C'): [-1. , 3. , 0.5], [-0.5, 3. , 0.5]]) """ - from odl.discr.grid import RectGrid + from odl.core.discr.grid import RectGrid minmax_vecs = [0] * self.ndim for axis in np.where(~self.nondegen_byaxis)[0]: @@ -854,5 +854,5 @@ def __str__(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/set/sets.py b/odl/core/set/sets.py similarity index 99% rename from odl/set/sets.py rename to odl/core/set/sets.py index 8095fdb2a79..4d6e3cb0ce1 100644 --- a/odl/set/sets.py +++ b/odl/core/set/sets.py @@ -16,7 +16,7 @@ import numpy as np from past.types.basestring import basestring -from odl.util import is_int_dtype, is_numeric_dtype, is_real_dtype, unique +from odl.core.util import is_int_dtype, is_numeric_dtype, is_real_dtype, unique __all__ = ('Set', 'EmptySet', 'UniversalSet', 'Field', 'Integers', 'RealNumbers', 'ComplexNumbers', 'Strings', 'CartesianProduct', @@ -942,5 +942,5 @@ def __repr__(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/set/space.py b/odl/core/set/space.py similarity index 62% rename from odl/set/space.py rename to odl/core/set/space.py index 9cede78722b..7d1dc164dfd 100644 --- a/odl/set/space.py +++ b/odl/core/set/space.py @@ -13,8 +13,10 @@ from enum import Enum from dataclasses import dataclass import numpy as np +from numbers import Number +from typing import Union -from odl.set.sets import Field, Set, UniversalSet +from odl.core.set.sets import Field, Set, UniversalSet __all__ = ('LinearSpace', 'UniversalSpace') @@ -365,88 +367,35 @@ def inner(self, x1, x2): else: return self.field.element(self._inner(x1, x2)) - def _binary_num_operation(self, low_level_method, x1, x2, out=None): - """Apply the numerical operation implemented by `low_level_method` to + def _elementwise_num_operation(self, operation:str + , x1: Union["LinearSpaceElement", Number] + , x2: Union[None, "LinearSpaceElement", Number] = None + , out=None + , namespace=None + , **kwargs ): + """TODO(Justus) rewrite docstring + Apply the numerical operation implemented by `low_level_method` to `x1` and `x2`. This is done either in in-place fashion or out-of-place, depending on which style is preferred for this space.""" - paradigms = self.supported_num_operation_paradigms - - if x1 not in self: - raise LinearSpaceTypeError('`x1` {!r} is not an element of ' - '{!r}'.format(x1, self)) - if x2 not in self: - raise LinearSpaceTypeError('`x2` {!r} is not an element of ' - '{!r}'.format(x2, self)) - - if out is not None and out not in self: - raise LinearSpaceTypeError('`out` {!r} is not an element of ' - '{!r}'.format(out, self)) - - if (paradigms.in_place.is_preferred - or not paradigms.out_of_place.is_supported - or out is not None and paradigms.in_place.is_supported): - - if out is None: - out = self.element() - - low_level_method(x1, x2, out=out) - - return out - - else: - assert(paradigms.out_of_place.is_supported) - result = self.element(low_level_method(x1, x2, out=None)) - if out is not None: - out.assign(result, avoid_deep_copy=True) - return out - else: - return result + raise NotImplementedError("abstract method") - def multiply(self, x1, x2, out=None): - """Return the pointwise product of ``x1`` and ``x2``. - - Parameters - ---------- - x1, x2 : `LinearSpaceElement` - Multiplicands in the product. - out : `LinearSpaceElement`, optional - Element to which the result is written. - - Returns - ------- - out : `LinearSpaceElement` - Product of the elements. If ``out`` was provided, the - returned object is a reference to it. - """ - return self._binary_num_operation(self._multiply, x1, x2, out) - - def divide(self, x1, x2, out=None): - """Return the pointwise quotient of ``x1`` and ``x2`` - - Parameters - ---------- - x1 : `LinearSpaceElement` - Dividend in the quotient. - x2 : `LinearSpaceElement` - Divisor in the quotient. - out : `LinearSpaceElement`, optional - Element to which the result is written. - - Returns - ------- - out : `LinearSpaceElement` - Quotient of the elements. If ``out`` was provided, the - returned object is a reference to it. - """ - return self._binary_num_operation(self._divide, x1, x2, out) + def _element_reduction(self, operation:str + , x: "LinearSpaceElement" + , **kwargs + ): + raise NotImplementedError("abstract method") @property def element_type(self): """Type of elements of this space (`LinearSpaceElement`).""" return LinearSpaceElement + def __str__(self): + """Return ``str(self)``.""" + return repr(self) + def __pow__(self, shape): """Return ``self ** shape``. @@ -468,7 +417,7 @@ def __pow__(self, shape): >>> r2 ** (4, 2) ProductSpace(ProductSpace(rn(2), 4), 2) """ - from odl.space import ProductSpace + from odl.core.space import ProductSpace try: shape = (int(shape),) @@ -498,7 +447,7 @@ def __mul__(self, other): >>> r2 * r3 ProductSpace(rn(2), rn(3)) """ - from odl.space import ProductSpace + from odl.core.space import ProductSpace if not isinstance(other, LinearSpace): raise TypeError('Can only multiply with `LinearSpace`, got {!r}' @@ -594,320 +543,176 @@ def set_zero(self): -------- LinearSpace.zero """ - return self.space.lincomb(0, self, 0, self, out=self) + self.assign(self.space.zero()) + return self # Convenience methods - def __iadd__(self, other): - """Implement ``self += other``.""" - if self.space.field is None: - return NotImplemented - elif other in self.space: - return self.space.lincomb(1, self, 1, other, out=self) - elif isinstance(other, LinearSpaceElement): - # We do not `return NotImplemented` here since we don't want a - # fallback for in-place. Otherwise python attempts - # `self = self + other` which does not modify self. - raise TypeError('cannot add {!r} and {!r} in-place' - ''.format(self, other)) - elif other in self.space.field: - one = getattr(self.space, 'one', None) - if one is None: - raise TypeError('cannot add {!r} and {!r} in-place' - ''.format(self, other)) - else: - # other --> other * space.one() - return self.space.lincomb(1, self, other, one(), out=self) - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - raise TypeError('cannot add {!r} and {!r} in-place' - ''.format(self, other)) - else: - return self.__iadd__(other) + # def __iadd__(self, other): + # """Implement ``self += other``.""" + # if self.space.field is None: + # return NotImplemented + # elif other in self.space: + # return self.space.lincomb(1, self, 1, other, out=self) + # elif isinstance(other, LinearSpaceElement): + # # We do not `return NotImplemented` here since we don't want a + # # fallback for in-place. Otherwise python attempts + # # `self = self + other` which does not modify self. + # raise TypeError('cannot add {!r} and {!r} in-place' + # ''.format(self, other)) + # elif other in self.space.field: + # one = getattr(self.space, 'one', None) + # if one is None: + # raise TypeError('cannot add {!r} and {!r} in-place' + # ''.format(self, other)) + # else: + # # other --> other * space.one() + # return self.space.lincomb(1, self, other, one(), out=self) + # else: + # try: + # other = self.space.element(other) + # except (TypeError, ValueError): + # raise TypeError('cannot add {!r} and {!r} in-place' + # ''.format(self, other)) + # else: + # return self.__iadd__(other) def __add__(self, other): """Return ``self + other``.""" - # Instead of using __iadd__ we duplicate code here for performance - if getattr(other, '__array_priority__', 0) > self.__array_priority__: - return other.__radd__(self) - elif self.space.field is None: - return NotImplemented - elif other in self.space: - return self.space.lincomb(1, self, 1, other) - elif isinstance(other, LinearSpaceElement): - return NotImplemented - elif other in self.space.field: - one = getattr(self.space, 'one', None) - if one is None: - return NotImplemented - else: - tmp = one() - return self.space.lincomb(1, self, other, tmp, out=tmp) - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - return NotImplemented - else: - return self.__add__(other) + return self.space._elementwise_num_operation( + 'add', self, other + ) + + def __sub__(self, other): + """Return ``self - other``.""" + return self.space._elementwise_num_operation( + 'subtract', self, other + ) + + def __mul__(self, other): + """Return ``self * other``.""" + return self.space._elementwise_num_operation( + 'multiply', self, other + ) + + def __truediv__(self, other): + """Implement ``self / other``.""" + if isinstance(other, Number) and other == 0: + raise ZeroDivisionError + return self.space._elementwise_num_operation( + 'divide', self, other + ) + + def __floordiv__(self, other): + """Implement ``self // other``.""" + return self.space._elementwise_num_operation( + 'floor_divide', self, other + ) + + def __mod__(self, other): + """Implement ``self % other``.""" + return self.space._elementwise_num_operation( + 'remainder', self, other + ) + + def __pow__(self, other): + """Implement ``self ** other``, element wise""" + return self.space._elementwise_num_operation( + 'pow', self, other + ) def __radd__(self, other): """Return ``other + self``.""" - if getattr(other, '__array_priority__', 0) > self.__array_priority__: - return other.__add__(self) - else: - return self.__add__(other) - - def __isub__(self, other): - """Implement ``self -= other``.""" - if self.space.field is None: - return NotImplemented - elif other in self.space: - return self.space.lincomb(1, self, -1, other, out=self) - elif isinstance(other, LinearSpaceElement): - # We do not `return NotImplemented` here since we don't want a - # fallback for in-place. Otherwise python attempts - # `self = self - other` which does not modify self. - raise TypeError('cannot subtract {!r} and {!r} in-place' - ''.format(self, other)) - elif self.space.field is None: - return NotImplemented - elif other in self.space.field: - one = getattr(self.space, 'one', None) - if one is None: - raise TypeError('cannot subtract {!r} and {!r} in-place' - ''.format(self, other)) - else: - return self.space.lincomb(1, self, -other, one(), out=self) - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - raise TypeError('cannot subtract {!r} and {!r} in-place' - ''.format(self, other)) - else: - return self.__isub__(other) - - def __sub__(self, other): - """Return ``self - other``.""" - # Instead of using __isub__ we duplicate code here for performance - if getattr(other, '__array_priority__', 0) > self.__array_priority__: - return other.__rsub__(self) - elif self.space.field is None: - return NotImplemented - elif other in self.space: - return self.space.lincomb(1, self, -1, other) - elif isinstance(other, LinearSpaceElement): - return NotImplemented - elif other in self.space.field: - one = getattr(self.space, 'one', None) - if one is None: - return NotImplemented - else: - tmp = one() - return self.space.lincomb(1, self, -other, tmp, out=tmp) - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - return NotImplemented - else: - return self.__sub__(other) + return self.space._elementwise_num_operation( + 'add', other, self + ) def __rsub__(self, other): """Return ``other - self``.""" - if getattr(other, '__array_priority__', 0) > self.__array_priority__: - return other.__sub__(self) - elif self.space.field is None: - return NotImplemented - elif other in self.space: - tmp = self.space.element() - return self.space.lincomb(1, other, -1, self, out=tmp) - elif isinstance(other, LinearSpaceElement): - return NotImplemented - elif other in self.space.field: - one = getattr(self.space, 'one', None) - if one is None: - return NotImplemented - else: - # other --> other * space.one() - tmp = one() - self.space.lincomb(other, tmp, out=tmp) - return self.space.lincomb(1, tmp, -1, self, out=tmp) - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - return NotImplemented - else: - return self.__rsub__(other) - - def __imul__(self, other): - """Implement ``self *= other``.""" - if self.space.field is None: - return NotImplemented - elif other in self.space.field: - return self.space.lincomb(other, self, out=self) - elif other in self.space: - return self.space.multiply(other, self, out=self) - elif isinstance(other, LinearSpaceElement): - # We do not `return NotImplemented` here since we don't want a - # fallback for in-place. Otherwise python attempts - # `self = self * other` which does not modify self. - raise TypeError('cannot multiply {!r} and {!r} in-place' - ''.format(self, other)) - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - raise TypeError('cannot multiply {!r} and {!r} in-place' - ''.format(self, other)) - else: - return self.__imul__(other) - - def __mul__(self, other): - """Return ``self * other``.""" - # Instead of using __imul__ we duplicate code here for performance - if getattr(other, '__array_priority__', 0) > self.__array_priority__: - return other.__rmul__(self) - elif self.space.field is None: - return NotImplemented - elif other in self.space.field: - return self.space.lincomb(other, self) - elif other in self.space: - return self.space.multiply(other, self) - elif isinstance(other, LinearSpaceElement): - return NotImplemented - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - return NotImplemented - else: - return self.__mul__(other) - + return self.space._elementwise_num_operation( + 'subtract', other, self + ) + def __rmul__(self, other): """Return ``other * self``.""" - if getattr(other, '__array_priority__', 0) > self.__array_priority__: - return other.__mul__(self) - else: - return self.__mul__(other) - + return self.space._elementwise_num_operation( + 'multiply', other, self + ) + + def __rtruediv__(self, other): + """Implement ``other / self``.""" + return self.space._elementwise_num_operation( + 'divide', other, self + ) + + def __rfloordiv__(self, other): + """Implement ``other // self``.""" + return self.space._elementwise_num_operation( + 'floor_divide', other, self + ) + + def __rmod__(self, other): + """Implement ``other % self``.""" + return self.space._elementwise_num_operation( + 'remainder', other, self + ) + + def __rpow__(self, other): + """Implement ``other ** self``, element wise""" + return self.space._elementwise_num_operation( + 'pow', other, self + ) + + def __iadd__(self, other): + """Implement ``self += other``.""" + self.space._elementwise_num_operation( + 'add', self, other, self + ) + return self + + def __isub__(self, other): + """Implement ``self -= other``.""" + self.space._elementwise_num_operation( + 'subtract', self, other, self + ) + return self + + def __imul__(self, other): + """Return ``self *= other``.""" + self.space._elementwise_num_operation( + 'multiply', self, other, self + ) + return self + def __itruediv__(self, other): """Implement ``self /= other``.""" - if self.space.field is None: - return NotImplemented - if other in self.space.field: - return self.space.lincomb(1.0 / other, self, out=self) - elif other in self.space: - return self.space.divide(self, other, out=self) - elif isinstance(other, LinearSpaceElement): - # We do not `return NotImplemented` here since we don't want a - # fallback for in-place. Otherwise python attempts - # `self = self / other` which does not modify self. - raise TypeError('cannot divide {!r} and {!r} in-place' - ''.format(self, other)) - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - raise TypeError('cannot divide {!r} and {!r} in-place' - ''.format(self, other)) - else: - return self.__itruediv__(other) - - __idiv__ = __itruediv__ - - def __truediv__(self, other): - """Return ``self / other``.""" - if getattr(other, '__array_priority__', 0) > self.__array_priority__: - return other.__rtruediv__(self) - elif self.space.field is None: - return NotImplemented - elif other in self.space.field: - return self.space.lincomb(1.0 / other, self) - elif other in self.space: - return self.space.divide(self, other) - elif isinstance(other, LinearSpaceElement): - return NotImplemented - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - return NotImplemented - else: - return self.__truediv__(other) - - __div__ = __truediv__ - - def __rtruediv__(self, other): - """Return ``other / self``.""" - if getattr(other, '__array_priority__', 0) > self.__array_priority__: - return other.__truediv__(self) - elif self.space.field is None: - return NotImplemented - elif other in self.space.field: - one = getattr(self.space, 'one', None) - if one is None: - return NotImplemented - else: - # other --> other * space.one() - tmp = one() - self.space.lincomb(other, tmp, out=tmp) - return self.space.divide(tmp, self, out=tmp) - elif other in self.space: - tmp = self.space.element() - return self.space.divide(other, self, out=tmp) - elif isinstance(other, LinearSpaceElement): - return NotImplemented - else: - try: - other = self.space.element(other) - except (TypeError, ValueError): - return NotImplemented - else: - return self.__rtruediv__(other) - - __rdiv__ = __rtruediv__ - - def __ipow__(self, p): - """Implement ``self ** p``. - - This is only defined for integer ``p``.""" - if self.space.field is None: - return NotImplemented - p, p_in = int(p), p - if p != p_in: - raise ValueError('expected integer `p`, got {}'.format(p_in)) - if p < 0: - self **= -p - self.space.divide(self.space.one(), self, out=self) - return self - elif p == 0: - self.assign(self.space.one()) - return self - elif p == 1: - return self - elif p % 2 == 0: - self *= self - self **= p // 2 - return self - else: - tmp = self.copy() - for _ in range(p - 2): - tmp *= self - self *= tmp - return self - - def __pow__(self, p): - """Return ``self ** p``.""" - if self.space.field is None: - return NotImplemented - tmp = self.copy() - tmp.__ipow__(p) - return tmp - + if isinstance(other, Number) and other == 0: + raise ZeroDivisionError + self.space._elementwise_num_operation( + 'divide', self, other, self + ) + return self + + def __ifloordiv__(self, other): + """Implement ``self //= other``.""" + self.space._elementwise_num_operation( + 'floor_divide', self, other, self + ) + return self + + def __imod__(self, other): + """Implement ``self %= other``.""" + self.space._elementwise_num_operation( + 'remainder', self, other, self + ) + return self + + def __ipow__(self, other): + """Implement ``self *= other``, element wise""" + self.space._elementwise_num_operation( + 'pow', self, other, self + ) + return self + def __neg__(self): """Return ``-self``.""" if self.space.field is None: @@ -918,7 +723,22 @@ def __pos__(self): """Return ``+self``.""" return self.copy() - # Metric space method + def __lt__(self, other): + """Implement ``self < other``.""" + return self.space._elementwise_num_operation('less', self, other) + + def __le__(self, other): + """Implement ``self <= other``.""" + return self.space._elementwise_num_operation('less_equal', self, other) + + def __gt__(self, other): + """Implement ``self > other``.""" + return self.space._elementwise_num_operation('greater', self, other) + + def __ge__(self, other): + """Implement ``self >= other``.""" + return self.space._elementwise_num_operation('greater_equal', self, other) + def __eq__(self, other): """Return ``self == other``. @@ -1068,9 +888,20 @@ def T(self): >>> x.T(y) 13.0 """ - from odl.operator import InnerProductOperator + from odl.core.operator import InnerProductOperator return InnerProductOperator(self.copy()) + def __array__(self): + raise RuntimeError(""" + You are trying to convert an ODL object to a plain array, possibly via a NumPy operation. This is not supported in ODL-1.0 anymore because it interferes with the more general Array API and easily leads to confusing results. + + Instead, you should either: + + - Use the ODL operation (e.g. `odl.sin(x)`) + - Unwrap the raw array contained in the ODL object, as `x.data` + - Explicitly convert to NumPy (or another raw array type) via DLPack + """) + # Give an `Element` a higher priority than any NumPy array type. This # forces the usage of `__op__` of `Element` if the other operand # is a NumPy object (applies also to scalars!). @@ -1168,5 +999,5 @@ class LinearSpaceNotImplementedError(NotImplementedError): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/space/__init__.py b/odl/core/space/__init__.py similarity index 81% rename from odl/space/__init__.py rename to odl/core/space/__init__.py index 59368edebf7..571cdd45c1c 100644 --- a/odl/space/__init__.py +++ b/odl/core/space/__init__.py @@ -10,12 +10,12 @@ from __future__ import absolute_import -from . import base_tensors, entry_points, weighting -from .npy_tensors import * +from . import base_tensors, entry_points from .pspace import * from .space_utils import * +from .weightings import * __all__ = () -__all__ += npy_tensors.__all__ __all__ += pspace.__all__ __all__ += space_utils.__all__ +__all__ += weightings.__all__ diff --git a/odl/core/space/base_tensors.py b/odl/core/space/base_tensors.py new file mode 100644 index 00000000000..2143a7cb8f2 --- /dev/null +++ b/odl/core/space/base_tensors.py @@ -0,0 +1,2157 @@ +# Copyright 2014-2020 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Base classes for implementations of tensor spaces.""" + +from __future__ import absolute_import, division, print_function + +from types import ModuleType +from numbers import Integral, Number +import warnings +from contextlib import contextmanager +import numpy as np + +import odl +from odl.core.set.sets import ComplexNumbers, RealNumbers +from odl.core.set.space import ( + LinearSpace, LinearSpaceElement, LinearSpaceTypeError, + SupportedNumOperationParadigms, NumOperationParadigmSupport) +from odl.core.array_API_support import ArrayBackend, lookup_array_backend, check_device +from odl.core.util import ( + array_str, indent, is_complex_dtype, + is_numeric_dtype, is_real_floating_dtype, safe_int_conv, + signature_string) +from odl.core.util.dtype_utils import( + is_real_dtype, is_int_dtype, + is_available_dtype, + _universal_dtype_identifier, + is_floating_dtype, + complex_dtype, + TYPE_PROMOTION_COMPLEX_TO_REAL, + TYPE_PROMOTION_REAL_TO_COMPLEX) +from .weightings.weighting import Weighting, ConstWeighting, ArrayWeighting +from .pspace import ProductSpaceElement + +__all__ = ('TensorSpace',) + +def default_dtype(array_backend: ArrayBackend | str, field=None): + """Return the default data type for a given field. + + Parameters + ---------- + array_backend : `ArrayBackend` or `str` + The implementation, defining what dtypes are available. + If a string is given, it is interpreted as an `impl` + identifier of an array backend from the global registry. + field : `Field`, optional + Set of numbers to be represented by a data type. + Currently supported : `RealNumbers`, `ComplexNumbers` + The default ``None`` means `RealNumbers` + + Returns + ------- + dtype : + Backend data type specifier. + """ + if not isinstance(array_backend, ArrayBackend): + array_backend = lookup_array_backend(array_backend) + if field is None or field == RealNumbers(): + return array_backend.available_dtypes['float64'] + elif field == ComplexNumbers(): + return array_backend.available_dtypes['complex128'] + else: + raise ValueError('no default data type defined for field {}' + ''.format(field)) + +class TensorSpace(LinearSpace): + + """Base class for sets of tensors of arbitrary data type. + + A tensor is, in the most general sense, a multi-dimensional array + that allows operations per entry (keep the rank constant), + reductions / contractions (reduce the rank) and broadcasting + (raises the rank). + For non-numeric data type like ``object``, the range of valid + operations is rather limited since such a set of tensors does not + define a vector space. + Any numeric data type, on the other hand, is considered valid for + a tensor space, although certain operations - like division with + integer dtype - are not guaranteed to yield reasonable results. + + Under these restrictions, all basic vector space operations are + supported by this class, along with reductions based on arithmetic + or comparison, and element-wise mathematical functions ("ufuncs"). + + See the `Wikipedia article on tensors`_ for further details. + See also [Hac2012] "Part I Algebraic Tensors" for a rigorous + treatment of tensors with a definition close to this one. + + Note also that this notion of tensors is the same as in popular + Deep Learning frameworks. + + References + ---------- + [Hac2012] Hackbusch, W. *Tensor Spaces and Numerical Tensor Calculus*. + Springer, 2012. + + .. _Wikipedia article on tensors: https://en.wikipedia.org/wiki/Tensor + """ + + def __init__(self, shape, dtype, device, **kwargs): + """Initialize a new instance. + + Parameters + ---------- + shape : nonnegative int or sequence of nonnegative ints + Number of entries of type ``dtype`` per axis in this space. A + single integer results in a space with rank 1, i.e., 1 axis. + dtype : + Data type of elements in this space. Can be provided + in any way the `numpy.dtype` constructor understands, e.g. + as built-in type or as a string. + For a data type with a ``dtype.shape``, these extra dimensions + are added *to the left* of ``shape``. + """ + # Handle shape and dtype, taking care also of dtypes with shape + self._init_dtype(dtype) + + self._init_shape(shape, dtype) + + self._init_device(device) + + self.__use_in_place_ops = kwargs.pop('use_in_place_ops', True) + + self._init_weighting(**kwargs) + + field = self._init_field() + + LinearSpace.__init__(self, field) + + ################ Init Methods, Non static ################ + def _init_device(self, device:str): + """ + Checks that the backend accepts the device passed as an argument. + + Parameters + ---------- + device : str + Device identifier + """ + self.__device = odl.check_device(self.impl, device) + + def _init_dtype(self, dtype:str | int | float | complex): + """ + Process the dtype argument. This parses the (str or Number) dtype input argument to a backend.dtype and sets two attributes + + self.dtype_identifier (str) -> Used for passing dtype information from one backend to another + self.__dtype (backend.dtype) -> Actual dtype of the TensorSpace implementation + + Note: + The check below is here just in case a user initialise a space directly from this class, which is not recommended + """ + + available_dtypes = self.array_backend.available_dtypes + identifier = _universal_dtype_identifier(dtype, array_backend_selection=[self.array_backend]) + + if identifier in available_dtypes.keys(): + self.__dtype_identifier = identifier + self.__dtype = available_dtypes[identifier] + # If that fails, we throw an error: the dtype is not a python scalar dtype, not a string describing the dtype or the + # backend call to parse the dtype has failed. + else: + raise ValueError(f"The dtype must be in {available_dtypes.keys()} or must be a dtype of the backend, but {dtype} was provided") + + def _init_shape(self, shape, dtype): + # Handle shape and dtype, taking care also of dtypes with shape + try: + shape, shape_in = tuple(safe_int_conv(s) for s in shape), shape + except TypeError: + shape, shape_in = (safe_int_conv(shape),), shape + if any(s < 0 for s in shape): + raise ValueError( + "`shape` must have only nonnegative entries, got " "{}".format(shape_in) + ) + + # We choose this order in contrast to Numpy, since we usually want + # to represent discretizations of vector- or tensor-valued functions, + # i.e., if dtype.shape == (3,) we expect f[0] to have shape `shape`. + # this is likely to break in Pytorch + # Believe it or not, this broke with pytorch + self.__shape = shape + + def _init_field(self): + if self.dtype_identifier in TYPE_PROMOTION_REAL_TO_COMPLEX: + # real includes non-floating-point like integers + field = RealNumbers() + self.__real_dtype = self.dtype + self.__real_space = self + self.__complex_dtype = self.array_backend.available_dtypes[ + TYPE_PROMOTION_REAL_TO_COMPLEX[self.dtype_identifier] + ] + + self.__complex_space = None # Set in first call of astype + elif self.dtype_identifier in TYPE_PROMOTION_COMPLEX_TO_REAL: + field = ComplexNumbers() + self.__real_dtype = self.array_backend.available_dtypes[ + TYPE_PROMOTION_COMPLEX_TO_REAL[self.dtype_identifier] + ] + self.__real_space = None # Set in first call of astype + self.__complex_dtype = self.dtype + self.__complex_space = self + else: + field = None + return field + + def _init_weighting(self, **kwargs): + weighting = kwargs.pop("weighting", None) + if weighting is None: + self.__weighting = odl.core.space_weighting(impl=self.impl, device=self.device, **kwargs) + else: + if issubclass(type(weighting), Weighting): + if weighting.impl != self.impl: + raise ValueError( + f"`weighting.impl` and space.impl must be consistent, but got \ + {weighting.impl} and {self.impl}" + ) + if isinstance(weighting, ArrayWeighting) and weighting.device != self.device: + raise ValueError( + f"`weighting.device` and space.device must be consistent, but got \ + {weighting.device} and {self.device}" + ) + self.__weighting = weighting + if weighting.shape and weighting.shape != self.shape: + raise ValueError( + f"`weighting.shape` and space.shape must be consistent, but got \ + {weighting.shape} and {self.shape}" + ) + elif hasattr(weighting, '__array__') or isinstance(weighting, (int, float)) or isinstance(weighting, (tuple, list)): + self.__weighting = odl.core.space_weighting(impl=self.impl, device=self.device, weight=weighting, **kwargs) + else: + raise TypeError( + f"""Wrong type of 'weighting' argument. Only floats, array-like and odl.Weightings are accepted + """ + ) + + ########## Attributes ########## + @property + def array_backend(self) -> ArrayBackend: + return lookup_array_backend(self.impl) + + @property + def array_namespace(self) -> ModuleType: + """Name of the array_namespace of this tensor set. This relates to the + python array api. + """ + return self.array_backend.array_namespace + + @property + def byaxis(self): + """Return the subspace defined along one or several dimensions. + + Examples + -------- + Indexing with integers or slices: + + >>> space = odl.rn((2, 3, 4)) + >>> space.byaxis[0] + rn(2) + >>> space.byaxis[1:] + rn((3, 4)) + + Lists can be used to stack spaces arbitrarily: + + >>> space.byaxis[[2, 1, 2]] + rn((4, 3, 4)) + """ + space = self + + class TensorSpacebyaxis(object): + + """Helper class for indexing by axis.""" + + def __getitem__(self, indices): + """Return ``self[indices]``.""" + try: + iter(indices) + except TypeError: + newshape = space.shape[indices] + else: + newshape = tuple(space.shape[i] for i in indices) + + return type(space)(newshape, space.dtype, weighting=space.weighting) + + def __repr__(self): + """Return ``repr(self)``.""" + return repr(space) + '.byaxis' + + return TensorSpacebyaxis() + + @property + def complex_dtype(self): + """The complex dtype corresponding to this space's `dtype`. + + Raises + ------ + NotImplementedError + If `dtype` is not a numeric data type. + """ + if not is_numeric_dtype(self.dtype): + raise NotImplementedError( + '`complex_dtype` not defined for non-numeric `dtype`') + return self.__complex_dtype + + @property + def complex_space(self): + """The space corresponding to this space's `complex_dtype`. + + Raises + ------ + ValueError + If `dtype` is not a numeric data type. + """ + if not is_numeric_dtype(self.dtype): + raise ValueError( + '`complex_space` not defined for non-numeric `dtype`') + return self.astype(self.complex_dtype) + + @property + def device(self): + """Device on which the tensorSpace is implemented. + + This property should be overridden by subclasses. + """ + return self.__device + + @property + def dtype(self): + """Scalar data type of each entry in an element of this space.""" + return self.__dtype + + @property + def dtype_identifier(self): + """Scalar data type of each entry in an element of this space.""" + return self.__dtype_identifier + + @property + def element_type(self): + """Type of elements in this space: `Tensor`.""" + raise NotImplementedError + + @property + def examples(self): + """Return example random vectors.""" + # Always return the same numbers + rand_state = np.random.get_state() + np.random.seed(1337) + + if is_numeric_dtype(self.dtype): + yield ('Linearly spaced samples', self.element( + np.linspace(0, 1, self.size).reshape(self.shape))) + yield ('Normally distributed noise', + self.element(np.random.standard_normal(self.shape))) + + if self.is_real: + yield ('Uniformly distributed noise', + self.element(np.random.uniform(size=self.shape))) + elif self.is_complex: + yield ('Uniformly distributed noise', + self.element(np.random.uniform(size=self.shape) + + np.random.uniform(size=self.shape) * 1j)) + else: + # TODO: return something that always works, like zeros or ones? + raise NotImplementedError('no examples available for non-numeric' + 'data type') + + np.random.set_state(rand_state) + + @property + def exponent(self): + """Exponent of the norm and the distance.""" + return self.weighting.exponent + + @property + def impl(self): + """Name of the implementation back-end of this tensor set. + + This property should be overridden by subclasses. + """ + raise NotImplementedError('abstract method') + + @property + def itemsize(self): + """Size in bytes of one entry in an element of this space.""" + return int(self.array_backend.array_constructor([], dtype=self.dtype).itemsize) + + @property + def is_complex(self): + """True if this is a space of complex tensors.""" + return is_complex_dtype(self.dtype_identifier) + + @property + def is_real(self): + """True if this is a space of real tensors.""" + return is_real_floating_dtype(self.dtype_identifier) + + @property + def is_weighted(self): + """Return ``True`` if the space is not weighted by constant 1.0.""" + return not ( + isinstance(self.weighting, ConstWeighting) and + self.weighting.const == 1.0 ) + + + @property + def nbytes(self): + """Total number of bytes in memory used by an element of this space.""" + return self.size * self.itemsize + + @property + def ndim(self): + """Number of axes (=dimensions) of this space, also called "rank".""" + return len(self.shape) + + @property + def real_dtype(self): + """The real dtype corresponding to this space's `dtype`. + + Raises + ------ + NotImplementedError + If `dtype` is not a numeric data type. + """ + if not is_numeric_dtype(self.dtype): + raise NotImplementedError( + '`real_dtype` not defined for non-numeric `dtype`') + return self.__real_dtype + + @property + def real_space(self): + """The space corresponding to this space's `real_dtype`. + + Raises + ------ + ValueError + If `dtype` is not a numeric data type. + """ + if not is_numeric_dtype(self.dtype): + raise ValueError( + '`real_space` not defined for non-numeric `dtype`') + return self.astype(self.real_dtype) + + @property + def supported_num_operation_paradigms(self) -> NumOperationParadigmSupport: + """NumPy has full support for in-place operation, which is usually + advantageous to reduce memory allocations. + This can be deactivated, mostly for testing purposes, by setting + `use_in_place_ops = False` when constructing the space.""" + if self.__use_in_place_ops: + return SupportedNumOperationParadigms( + in_place = NumOperationParadigmSupport.PREFERRED, + out_of_place = NumOperationParadigmSupport.SUPPORTED) + else: + return SupportedNumOperationParadigms( + in_place = NumOperationParadigmSupport.NOT_SUPPORTED, + out_of_place = NumOperationParadigmSupport.PREFERRED) + + @property + def shape(self): + """Number of scalar elements per axis. + + .. note:: + If `dtype` has a shape, we add it to the **left** of the given + ``shape`` in the class creation. This is in contrast to NumPy, + which adds extra axes to the **right**. We do this since we + usually want to represent discretizations of vector- or + tensor-valued functions by this, i.e., if + ``dtype.shape == (3,)`` we expect ``f[0]`` to have shape + ``shape``. + """ + return self.__shape + + @property + def size(self): + """Total number of entries in an element of this space.""" + return (0 if self.shape == () else + int(np.prod(self.shape, dtype='int64'))) + + @property + def weighting(self): + """This space's weighting scheme.""" + return self.__weighting + + ########## public methods ########## + def astype(self, dtype): + """Return a copy of this space with new ``dtype``. + + Parameters + ---------- + dtype : + Scalar data type of the returned space. Can be provided + in any way the `numpy.dtype` constructor understands, e.g. + as built-in type or as a string. Data types with non-trivial + shapes are not allowed. + + Returns + ------- + newspace : `TensorSpace` + Version of this space with given data type. + """ + if dtype is None: + # Need to filter this out since Numpy iterprets it as 'float' + raise ValueError('`None` is not a valid data type') + + available_dtypes = self.array_backend.available_dtypes + dtype_identifier = _universal_dtype_identifier(dtype, array_backend_selection=[self.array_backend]) + if dtype_identifier in available_dtypes: + dtype = available_dtypes[dtype_identifier] + else: + raise ValueError( + f"Tried to convert space to {dtype}, but this cannot be interpreted as any of" + + f" {available_dtypes.keys()}, which are all that are available for backend '{self.impl}'." + ) + + if dtype == self.dtype: + return self + + if is_real_floating_dtype(dtype_identifier) or is_complex_dtype(dtype_identifier): + if self.dtype_identifier == 'bool': + return self._astype(dtype_identifier) + # Caching for real and complex versions (exact dtype mappings) + elif dtype == self.real_dtype: + if self.__real_space is None: + self.__real_space = self._astype(dtype_identifier) + return self.__real_space + elif dtype == self.complex_dtype: + if self.__complex_space is None: + self.__complex_space = self._astype(dtype_identifier) + return self.__complex_space + else: + return self._astype(dtype_identifier) + else: + return self._astype(dtype_identifier) + + def to_device(self, device: str): + """Return a copy of this space with storage on a different computational device. + Mathematically this is the same space. It also uses the same backend for + array operations. + + Parameters + ---------- + device : + Where elements of this space store their arrays. The default spaces + store on `'cpu'`. Which alternatives are possible depends on the + backend (`impl`) and hardware availability. + + Returns + ------- + newspace : `TensorSpace` + Version of this space with selected device.""" + _ = check_device(self.impl, device) + return self._to_device(device) + + def to_impl(self, impl): + """Return a copy of this space using a different array-backend. + Mathematically this is the same space, but the computational performance + can be very different. + + Parameters + ---------- + impl : + Identifier of the target backend. Must correspond to a registered + `ArrayBackend`. See `odl.core.space.entry_points.tensor_space_impl_names` + for available options. + Both `impl` and the implementation of the original space must support + the same device, most typically `'cpu'`. If you want to use GPU storage, + use a separate call to `TensorSpace.to_device`. + + Returns + ------- + newspace : `TensorSpace` + Version of this space with selected backend.""" + _ = check_device(impl, self.device) + return self._to_impl(impl) + + def element(self, inp=None, device=None, copy=None): + + # Most of the cases further below deal with conversions from various array types. + # This only makes sense for plain arrays and ODL objects based on a single plain + # array (i.e. `odl.Tensor` subclasses). For other ODL objects, such as product + # space element, it would result in confusing errors, so we stop this eventuality + # right here. + if isinstance(inp, LinearSpaceElement) and not isinstance(inp, Tensor): + raise TypeError("Trying to generated a `Tensor` from an ODL object with more structure, {type(inp)=}") + + def wrapped_array(arr): + if arr.shape != self.shape: + raise ValueError( + "shape of `inp` not equal to space shape: " + "{} != {}".format(arr.shape, self.shape) + ) + if (is_real_dtype(self.dtype_identifier) and not + is_real_dtype(self.array_backend.get_dtype_identifier(array=arr))): + raise TypeError(f"A real space cannot have complex elements. Got {arr.dtype}") + elif (is_int_dtype(self.dtype_identifier) and not + is_int_dtype(self.array_backend.get_dtype_identifier(array=arr))): + raise TypeError(f"An integer space can only have integer elements. Got {arr.dtype}") + + return self.element_type(self, arr) + + + def dlpack_transfer(arr): + # We check that the object implements the dlpack protocol: + # assert hasattr(inp, "__dlpack_device__") and hasattr( + # arr, "__dlpack__" + # ), """The input does not support the DLpack framework. + # Please convert it to an object that supports it first. + # (cf:https://data-apis.org/array-api/latest/purpose_and_scope.html)""" + # We begin by checking that the transfer is actually needed: + if arr.device == self.device and arr.dtype == self.dtype: + return self.array_backend.array_constructor(arr, copy=copy) + return self.array_backend.from_dlpack(arr, device=self.device, copy=copy) +# try: +# # from_dlpack(inp, device=device, copy=copy) +# # As of Pytorch 2.7, the pytorch API from_dlpack does not implement the +# # keywords that specify the device and copy arguments +# print("in try") +# return self.array_namespace.from_dlpack(arr, device=self.device) +# except BufferError as e: +# print("in BufferError") +# print(f"{self.device=}") +# if hasattr(arr, 'device'): +# print(f"{arr.device=}") +# raise e # BufferError( +# # "The data cannot be exported as DLPack (e.g., incompatible dtype, strides, or device). " +# # "It may also be that the export fails for other reasons " +# # "(e.g., not enough memory available to materialize the data)." +# # "" +# # ) +# except ValueError: +# print("in ValueError") +# raise ValueError( +# "The data exchange is possible via an explicit copy but copy is set to False." +# ) +# ### This is a temporary fix, until pytorch provides the right API for dlpack with args!! +# # The RuntimeError should be raised only when using a GPU device +# except RuntimeError: +# return self.array_backend.array_constructor( +# arr, dtype=self.dtype, device=self.device, copy=copy) + + # Case 1: no input provided + if inp is None: + arr = self.array_namespace.empty( + self.shape, dtype=self.dtype, device=self.device + ) + # Case 2: input is provided + # Case 2.1: the input is an ODL OBJECT + # ---> The data of the input is transferred to the space's device and data type AND wrapped into the space. + elif isinstance(inp, Tensor): + if inp.space == self and copy != True: + # If it is already element of the exact space, nothing needs to be done. + return inp + arr = dlpack_transfer(inp.data) + # Case 2.2: the input is an object that implements the python array aPI (np.ndarray, torch.Tensor...) + # ---> The input is transferred to the space's device and data type AND wrapped into the space. + elif hasattr(inp, '__array__'): + arr = dlpack_transfer(inp) + # Case 2.3: the input is an array like object [[1,2,3],[4,5,6],...] + # ---> The input is transferred to the space's device and data type AND wrapped into the space. + elif isinstance(inp, (list, tuple)): + arr = self.array_backend.array_constructor(inp, dtype=self.dtype, device=self.device) + # Case 2.4: the input is a Python Number + # ---> The input is broadcasted to the space's shape and transferred to the space's device and data type AND wrapped into the space. + elif isinstance(inp, (int, float, complex)): + arr = self.broadcast_to(inp) + + else: + raise ValueError(f'The input {inp} with dtype {type(inp)} is not supported by the `element` method. The only supported types are int, float, complex, list, tuples, objects with an __array__ attribute of a supported backend (e.g np.ndarray and torch.Tensor) and ODL Tensors.') + + return wrapped_array(arr) + + def finfo(self): + "Machine limits for floating-point data types." + return self.array_namespace.finfo(self.dtype) + + def iinfo(self): + "Machine limits for integer data types." + return self.array_namespace.iinfo(self.dtype) + + def divide(self, x1, x2, out=None): + return self._divide(x1, x2, out) + + def multiply(self, x1, x2, out=None): + return self._multiply(x1, x2, out) + + def one(self): + """Return a tensor of all ones. + + This method should be overridden by subclasses. + + Returns + ------- + one : `Tensor` + A tensor of all one. + """ + return self.element( + self.array_namespace.ones(self.shape, dtype=self.dtype, device=self.device) + ) + + def zero(self): + """Return a tensor of all zeros. + + This method should be overridden by subclasses. + + Returns + ------- + zero : `Tensor` + A tensor of all zeros. + """ + return self.element( + self.array_namespace.zeros(self.shape, dtype=self.dtype, device=self.device) + ) + + ######### magic methods ######### + def __contains__(self, other): + """Return ``other in self``. + + Returns + ------- + contains : bool + ``True`` if ``other`` has a ``space`` attribute that is equal + to this space, ``False`` otherwise. + + Examples + -------- + Elements created with the `TensorSpace.element` method are + guaranteed to be contained in the same space: + + >>> spc = odl.tensor_space((2, 3), dtype='uint64') + >>> spc.element() in spc + True + >>> x = spc.element([[0, 1, 2], + ... [3, 4, 5]]) + >>> x in spc + True + + Sizes, data types and other essential properties characterize + spaces and decide about membership: + + >>> smaller_spc = odl.tensor_space((2, 2), dtype='uint64') + >>> y = smaller_spc.element([[0, 1], + ... [2, 3]]) + >>> y in spc + False + >>> x in smaller_spc + False + >>> other_dtype_spc = odl.tensor_space((2, 3), dtype='uint32') + >>> z = other_dtype_spc.element([[0, 1, 2], + ... [3, 4, 5]]) + >>> z in spc + False + >>> x in other_dtype_spc + False + + On the other hand, spaces are not unique: + + >>> spc2 = odl.tensor_space((2, 3), dtype='uint64') + >>> spc2 == spc + True + >>> x2 = spc2.element([[5, 4, 3], + ... [2, 1, 0]]) + >>> x2 in spc + True + >>> x in spc2 + True + + Of course, random garbage is not in the space: + + >>> spc = odl.tensor_space((2, 3), dtype='uint64') + >>> None in spc + False + >>> object in spc + False + >>> False in spc + False + """ + return getattr(other, 'space', None) == self + + def __eq__(self, other): + """Return ``self == other``. + + Returns + ------- + equals : bool + True if ``self`` and ``other`` have the same type, `shape` + and `dtype`, otherwise ``False``. + + Examples + -------- + Sizes, data types and other essential properties characterize + spaces and decide about equality: + + >>> spc = odl.tensor_space(3, dtype='uint64') + >>> spc == spc + True + >>> spc2 = odl.tensor_space(3, dtype='uint64') + >>> spc2 == spc + True + >>> smaller_spc = odl.tensor_space(2, dtype='uint64') + >>> spc == smaller_spc + False + >>> other_dtype_spc = odl.tensor_space(3, dtype='uint32') + >>> spc == other_dtype_spc + False + >>> other_shape_spc = odl.tensor_space((3, 1), dtype='uint64') + >>> spc == other_shape_spc + False + """ + if other is self: + return True + + return (type(other) is type(self) and + self.shape == other.shape and + self.dtype == other.dtype and + self.impl == other.impl and + self.weighting == other.weighting and + self.device == other.device + ) + + def __hash__(self): + """Return ``hash(self)``.""" + return hash((type(self), self.shape, self.dtype, self.device, self.impl, self.weighting)) + + def __len__(self): + """Number of tensor entries along the first axis.""" + return int(self.shape[0]) + + def __repr__(self): + """Return ``repr(self)``.""" + if self.ndim == 1: + posargs = [self.size] + else: + posargs = [self.shape] + + if self.is_real: + ctor_name = 'rn' + elif self.is_complex: + ctor_name = 'cn' + else: + ctor_name = 'tensor_space' + + optmod = '' + + if self.device == 'cpu': + if self.impl == 'numpy': + if ( ctor_name == 'tensor_space' + or not is_numeric_dtype(self.dtype_identifier) + or self.dtype != default_dtype(self.array_backend, self.field) ): + posargs += [self.dtype_identifier] + if is_available_dtype(self.dtype_identifier): + optmod = '!s' + else: + posargs += [self.dtype_identifier, self.impl] + else: + posargs += [self.dtype_identifier, self.impl, self.device] + + inner_str = signature_string(posargs, optargs=[], mod=['', optmod]) + weight_str = self.weighting.repr_part + if weight_str: + inner_str += ', ' + weight_str + + return '{}({})'.format(ctor_name, inner_str) + + def __str__(self): + """Return ``str(self)``.""" + return repr(self) + + ########## _underscore methods ########## + def _astype(self, dtype:str): + """Internal helper for `astype`. + + Subclasses with differing init parameters should overload this + method. + """ + kwargs = {} + if is_real_dtype(dtype) or is_complex_dtype(dtype): + # Use weighting only for floating-point types, otherwise, e.g., + # `space.astype(bool)` would fail + weighting = getattr(self, "weighting", None) + if weighting is not None: + kwargs["weighting"] = weighting + + return type(self)(self.shape, dtype=dtype, device=self.device, **kwargs) + + def _to_device(self, device:str): + """Internal helper for `to_device`. + + Subclasses with differing init parameters should overload this + method. + """ + kwargs = {} + weighting = getattr(self, "weighting", None) + if weighting is not None: + kwargs["weighting"] = weighting.to_device(device) + + return type(self)(self.shape, dtype=self.dtype, device=device, **kwargs) + + def _to_impl(self, impl:str): + """Internal helper for `to_impl`. + + Subclasses with structure other than just backend-specific ℝⁿ spaces should + overload this method. + """ + # Lazy import to avoid cyclic dependency + from odl.core.space.space_utils import tensor_space + + kwargs = {} + weighting = getattr(self, "weighting", None) + if weighting is not None: + kwargs["weighting"] = weighting.to_impl(impl) + + return tensor_space(shape=self.shape, dtype=self.dtype_identifier, impl=impl, device=self.device, **kwargs) + + def _dist(self, x1, x2): + """Return the distance between ``x1`` and ``x2``. + + This function is part of the subclassing API. Do not + call it directly. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Elements whose mutual distance is calculated. + + Returns + ------- + dist : `float` + Distance between the elements. + + Examples + -------- + Different exponents result in difference metrics: + + >>> space_2 = odl.rn(3, exponent=2) + >>> x = space_2.element([-1, -1, 2]) + >>> y = space_2.one() + >>> space_2.dist(x, y) + 3.0 + + >>> space_1 = odl.rn(3, exponent=1) + >>> x = space_1.element([-1, -1, 2]) + >>> y = space_1.one() + >>> space_1.dist(x, y) + 5.0 + + Weighting is supported, too: + + >>> space_1_w = odl.rn(3, exponent=1, weighting=[2, 1, 1]) + >>> x = space_1_w.element([-1, -1, 2]) + >>> y = space_1_w.one() + >>> space_1_w.dist(x, y) + 7.0 + """ + return self.weighting.dist(x1.data, x2.data) + + def _divide(self, x1, x2, out): + """Compute the entry-wise quotient ``x1 / x2``. + + This function is part of the subclassing API. Do not + call it directly. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Dividend and divisor in the quotient. + out : `NumpyTensor` + Element to which the result is written. + + Examples + -------- + >>> space = odl.rn(3) + >>> x = space.element([2, 0, 4]) + >>> y = space.element([1, 1, 2]) + >>> space.divide(x, y) + rn(3).element([ 2., 0., 2.]) + >>> out = space.element() + >>> result = space.divide(x, y, out=out) + >>> result + rn(3).element([ 2., 0., 2.]) + >>> out + rn(3).element([ 2., 0., 2.]) + >>> out.data is result.data + True + >>> out = np.zeros((3)) + >>> result = np.divide([2,0,4], [1,1,2], out=out) + >>> result is out + True + + """ + return odl.divide(x1, x2, out) + + def _inner(self, x1, x2): + """Return the inner product of ``x1`` and ``x2``. + + This function is part of the subclassing API. Do not + call it directly. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Elements whose inner product is calculated. + + Returns + ------- + inner : `field` `element` + Inner product of the elements. + + Examples + -------- + >>> space = odl.rn(3) + >>> x = space.element([1, 0, 3]) + >>> y = space.one() + >>> space.inner(x, y) + 4.0 + + Weighting is supported, too: + + >>> space_w = odl.rn(3, weighting=[2, 1, 1]) + >>> x = space_w.element([1, 0, 3]) + >>> y = space_w.one() + >>> space_w.inner(x, y) + 5.0 + """ + return self.weighting.inner(x1.data, x2.data) + + def _lincomb(self, a, x1, b, x2, out): + """Implement the linear combination of ``x1`` and ``x2``. + + Compute ``out = a*x1 + b*x2`` using optimized + BLAS routines if possible. + + This function is part of the subclassing API. Do not + call it directly. + + Parameters + ---------- + a, b : `TensorSpace.field` element + Scalars to multiply ``x1`` and ``x2`` with. + x1, x2 : `NumpyTensor` + Summands in the linear combination. + out : `NumpyTensor` + Tensor to which the result is written. + + Examples + -------- + >>> space = odl.rn(3) + >>> x = space.element([0, 1, 1]) + >>> y = space.element([0, 0, 1]) + >>> out = space.element() + >>> result = space.lincomb(1, x, 2, y, out) + >>> result + rn(3).element([ 0., 1., 3.]) + >>> result is out + True + """ + return odl.add(a*x1, b*x2, out) + + def _multiply(self, x1, x2, out): + """Compute the entry-wise product ``out = x1 * x2``. + + This function is part of the subclassing API. Do not + call it directly. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Factors in the product. + out : `NumpyTensor` + Element to which the result is written. + + Examples + -------- + >>> space = odl.rn(3) + >>> x = space.element([1, 0, 3]) + >>> y = space.element([-1, 1, -1]) + >>> space.multiply(x, y) + rn(3).element([-1., 0., -3.]) + >>> out = space.element() + >>> result = space.multiply(x, y, out=out) + >>> result + rn(3).element([-1., 0., -3.]) + >>> result.data is out.data + True + """ + return odl.multiply(x1, x2, out) + + def _norm(self, x): + """Return the norm of ``x``. + + This function is part of the subclassing API. Do not + call it directly. + + Parameters + ---------- + x : `NumpyTensor` + Element whose norm is calculated. + + Returns + ------- + norm : `float` + Norm of the element. + + Examples + -------- + Different exponents result in difference norms: + + >>> space_2 = odl.rn(3, exponent=2) + >>> x = space_2.element([3, 0, 4]) + >>> space_2.norm(x) + 5.0 + >>> space_1 = odl.rn(3, exponent=1) + >>> x = space_1.element([3, 0, 4]) + >>> space_1.norm(x) + 7.0 + + Weighting is supported, too: + + >>> space_1_w = odl.rn(3, exponent=1, weighting=[2, 1, 1]) + >>> x = space_1_w.element([3, 0, 4]) + >>> space_1_w.norm(x) + 10.0 + """ + return self.weighting.norm(x.data) + + def _elementwise_num_operation(self, operation:str + , x1: LinearSpaceElement | Number + , x2: None | LinearSpaceElement | Number = None + , out=None + , namespace=None + , **kwargs ): + """ + Internal helper function to implement the __magic_functions__ (such as __add__). + + Parameters + ---------- + x1 : LinearSpaceElement, Number + Left operand + x2 : LinearSpaceElement, Number + Right operand + operation: str + Attribute of the array namespace + out : TensorSpaceElement, Optional + LinearSpaceElement for out-of-place operations + + Returns + ------- + TensorSpaceElement + The result of the operation `operation` wrapped in a space with the right datatype. + + Notes: + The dtype of the returned TensorSpaceElement (and the space that wraps it) is infered + from the dtype of the array returned by the backend in which the TensorSpaceElement is + implemented. \n + In order to minimise the expensive operations performed under the hood, i.e clearly + unspecified by the user, cross-backend AND cross-devices operations are NOT allowed. \n + -> 1j + TensorSpaceElement(dtype='float32') IS supported \n + -> TensorSpaceElement(device=device1) + TensorSpaceElement(device=device2) IS NOT supported \n + -> TensorSpaceElement(impl=impl1) + TensorSpaceElement(impl=imp2) IS NOT supported \n + + The logic is as follows: + 1) if either of the operands are Python numeric types (int, float complex) + -> the operation is performed on the backend of the TensorSpaceElement and the dtype infered from it. + 2) if the two operands are TensorSpaceElements + -> the operation is delegated to the general odl.operation which performs the checks on space shape and + device consistency. + + """ + if namespace is None: + arr_operation = self.array_backend.lookup_array_operation(operation) + fn = arr_operation.operation_call + if arr_operation.supports_out_argument: + fn_in_place = arr_operation.operation_call + else: + # If there is no native `out` argument of the low-level call, an + # in-place update needs to be emulated in the relevant branches. + fn_in_place = None + else: + fn = getattr(namespace, operation) + # If an explicit namespace was provided, we have to assume it contains + # the function in whichever form appropriate for performing the call + # as requested. + fn_in_place = fn + + if out is not None: + assert isinstance(out, Tensor), f"The out argument must be an ODL Tensor, got {type(out)}." + assert self.shape == out.space.shape, f"The shapes of {self} and out {out.space.shape} differ, cannot perform {operation}" + assert self.device == out.space.device, f"The devices of {self} and out {out.space.device} differ, cannot perform {operation}" + + if x1 is None: + raise TypeError("The left-hand argument always needs to be provided") + + if x2 is None: + assert x1 in self, f"The left operand is not an element of the space." + if out is None: + result_data = fn(x1.data, **kwargs) + elif fn_in_place is None: + result_data = fn(x1.data, **kwargs) + out[:] = result_data + else: + result_data = fn_in_place(x1.data, out=out.data, **kwargs) + return self.astype(self.array_backend.get_dtype_identifier(array=result_data)).element(result_data) + + from odl.core.operator import Operator + if not isinstance(x1, (int, float, complex, Tensor, ProductSpaceElement, Operator)): + raise TypeError(f'The type of the left operand {type(x1)} is not supported.') + + if not isinstance(x2, (int, float, complex, Tensor, ProductSpaceElement, Operator)): + raise TypeError(f'The type of the right operand {type(x2)} is not supported.') + + def _dtype_helper_python_number(x: Tensor, y:int|float|complex): + # We return the backend-specific dtype + if isinstance(y, int): + # Here, we are sure that upcasting y to float will not be a problem + return x.dtype + elif isinstance(y, float): + if is_int_dtype(x.dtype): + return type(y) + elif is_floating_dtype(x.dtype): + return x.dtype + else: + raise ValueError(f'The dtype of x {type(x)} is not supported.') + elif isinstance(y, complex): + if is_int_dtype(x.dtype) or is_real_dtype(x.dtype): + return complex_dtype(x.dtype, backend=x.array_backend) + elif is_complex_dtype(x.dtype): + return x.dtype + else: + raise ValueError(f'The dtype of x {type(x)} is not supported.') + else: + raise ValueError(f'The dtype of y {type(y)} is not supported.') + + if isinstance(x1, (int, float, complex)) or isinstance(x2, (int, float, complex)): + if out is None: + if isinstance(x1, (int, float, complex)): + dtype = _dtype_helper_python_number(x2, x1) + x1 = self.array_backend.array_constructor(x1, dtype=dtype) + result_data = fn(x1, x2.data, **kwargs) + + elif isinstance(x2, (int, float, complex)): + dtype = _dtype_helper_python_number(x1, x2) + x2 = self.array_backend.array_constructor(x2, dtype=dtype) + result_data = fn(x1.data, x2, **kwargs) + + else: + if isinstance(x1, (int, float, complex)): + dtype = _dtype_helper_python_number(x2, x1) + x1 = self.array_backend.array_constructor(x1, dtype=dtype) + if fn_in_place is None: + result_data = fn(x1, x2.data, **kwargs) + out[:] = result_data + else: + result_data = fn_in_place(x1, x2.data, out=out.data, **kwargs) + + elif isinstance(x2, (int, float, complex)): + dtype = _dtype_helper_python_number(x1, x2) + x2 = self.array_backend.array_constructor(x2, dtype=dtype) + if fn_in_place is None: + result_data = fn(x1.data, x2, **kwargs) + out[:] = result_data + else: + result_data = fn_in_place(x1.data, x2, out=out.data, **kwargs) + + return self.astype(self.array_backend.get_dtype_identifier(array=result_data)).element(result_data) + + # if isinstance(x1, self.array_backend.array_type) or isinstance(x2, self.array_backend.array_type): + # if out is None: + # if isinstance(x1, self.array_backend.array_type): + # assert x1.shape == self.shape, f"The shape of self {self.shape} and x1 {x1.shape} differ, cannot perform {operation}" + # assert str(x1.device) == self.device, f"The device of self {self.device} and x1 {x1.device} differ, cannot perform {operation}" + # result_data = fn(x1, x2.data, **kwargs) + # elif isinstance(x2, self.array_backend.array_type): + # assert x2.shape == self.shape, f"The shape of self {self.shape} and x2 {x2.shape} differ, cannot perform {operation}" + # assert str(x2.device) == self.device, f"The device of self {self.device} and x2 {x2.device} differ, cannot perform {operation}" + # result_data = fn(x1.data, x2, **kwargs) + + # else: + # if isinstance(x1, self.array_backend.array_type): + # assert x1.shape == self.shape, f"The shape of self {self.shape} and x1 {x1.shape} differ, cannot perform {operation}" + # assert str(x1.device) == self.device, f"The device of self {self.device} and x1 {x1.device} differ, cannot perform {operation}" + # result_data = fn(x1, x2.data, out=out.data, **kwargs) + # elif isinstance(x2, self.array_backend.array_type): + # assert x2.shape == self.shape, f"The shape of self {self.shape} and x2 {x2.shape} differ, cannot perform {operation}" + # assert str(x2.device) == self.device, f"The device of self {self.device} and x2 {x2.device} differ, cannot perform {operation}" + # result_data = fn(x1.data, x2, out=out.data, **kwargs) + # return self.astype(self.array_backend.get_dtype_identifier(array=result_data)).element(result_data) + + if isinstance(x1, ProductSpaceElement): + if not isinstance(x2, Tensor): + raise TypeError(f'The right operand is not an ODL Tensor. {type(x2)=}') + return x1.space._elementwise_num_operation(operation, x1, x2, out, namespace=namespace, **kwargs) + + elif isinstance(x2, ProductSpaceElement): + if not isinstance(x1, Tensor): + raise TypeError(f'The left operand is not an ODL Tensor. {type(x1)=}') + return x2.space._elementwise_num_operation(operation, x1, x2, out, namespace=namespace, **kwargs) + + if isinstance(x2, Operator): + if operation=='multiply': + warnings.warn("The composition of a LinearSpaceElement and an Operator using the * operator is deprecated and will be removed in future ODL versions. Please replace * with @.") + return x2.__rmul__(x1) + elif operation =='add': + return x2.__radd__(x1) + elif operation =='subtract': + return x2.__rsub__(x1) + else: + raise TypeError(f"Attempted numerical operation {operation} between two incompatible objects ({type(x1)=}, {type(x2)=})") + + if isinstance(x1, Tensor) and isinstance(x2, Tensor): + assert self.array_backend.array_type == x2.array_backend.array_type, f"The types of {self.array_backend.array_type} and x2 {x2.array_backend.array_type} differ, cannot perform {operation}" + assert self.shape == x2.space.shape, f"The shapes of {self} and x2 {x2.space.shape} differ, cannot perform {operation}" + assert self.device == x2.space.device, f"The devices of {self} and x2 {x2.space.device} differ, cannot perform {operation}" + + if out is None: + result = fn(x1.data, x2.data) + elif fn_in_place is None: + result = fn(x1.data, x2.data) + out.data[:] = result + else: + result = fn(x1.data, x2.data, out=out.data) + + # We make sure to return an element of the right type: + # for instance, if two spaces have a int dtype, the result of the division + # of one of their element by another return should be of float dtype + return x1.space.astype(x1.space.array_backend.get_dtype_identifier(array=result)).element(result) + else: + raise TypeError(f"Neither x1 nor x2 are odl ODL Tensors. Got {type(x1)} and {type(x2)}") + + + + def _element_reduction(self, operation:str + , x: "Tensor" + , **kwargs + ): + fn = getattr(self.array_namespace, operation) + result = fn(x.data, **kwargs) + try: + return result.item() + except AttributeError: + assert result.shape == () + return result[0] + except (ValueError, RuntimeError): + # Arises when we are performing the 'reductions' along certains axis only. We can't take the item of an array with several dimensions. + # TODO: We should handle that differently than with try and excepts. + return result + + + +class Tensor(LinearSpaceElement): + + """Abstract class for representation of `TensorSpace` elements.""" + ######### static methods ######### + + ######### Attributes ######### + @property + def array_backend(self) -> ArrayBackend: + return self.space.array_backend + + @property + def array_namespace(self) -> ModuleType: + """Name of the array_namespace of this tensor. + + This relates to the python array api + """ + return self.space.array_namespace + + @property + def data(self): + """The backend-specific array representing the data of ``self``.""" + raise NotImplementedError("abstract method") + + @property + def device(self): + """Device on which the space lives.""" + return self.space.device + + @property + def dtype(self): + """Data type of each entry.""" + return self.space.dtype + + @property + def dtype_identifier(self): + """Data type as a string of each entry.""" + return self.space.dtype_identifier + + @property + def imag(self): + """Imaginary part of ``self``. + + Returns + ------- + imag : `NumpyTensor` + Imaginary part this element as an element of a + `NumpyTensorSpace` with real data type. + + Examples + -------- + Get the imaginary part: + + >>> space = odl.cn(3) + >>> x = space.element([1 + 1j, 2, 3 - 3j]) + >>> x.imag + rn(3).element([ 1., 0., -3.]) + + Set the imaginary part: + + >>> space = odl.cn(3) + >>> x = space.element([1 + 1j, 2, 3 - 3j]) + >>> zero = odl.rn(3).zero() + >>> x.imag = zero + >>> x + cn(3).element([ 1.+0.j, 2.+0.j, 3.+0.j]) + + Other array-like types and broadcasting: + + >>> x.imag = 1.0 + >>> x + cn(3).element([ 1.+1.j, 2.+1.j, 3.+1.j]) + >>> x.imag = [2, 3, 4] + >>> x + cn(3).element([ 1.+2.j, 2.+3.j, 3.+4.j]) + """ + if self.space.is_real: + return self.space.zero() + elif self.space.is_complex: + real_space = self.space.astype(self.space.real_dtype) + return real_space.element(self.data.imag, copy=False) + else: + raise NotImplementedError('`imag` not defined for non-numeric ' + 'dtype {}'.format(self.dtype)) + + @property + def impl(self): + """Name of the implementation back-end of this tensor.""" + return self.space.impl + + @property + def itemsize(self): + """Size in bytes of one tensor entry.""" + return self.space.itemsize + + @property + def nbytes(self): + """Total number of bytes in memory occupied by this tensor.""" + return self.space.nbytes + + @property + def ndim(self): + """Number of axes (=dimensions) of this tensor.""" + return self.space.ndim + + @property + def odl_tensor(self): + """Number of axes (=dimensions) of this tensor.""" + return True + + @property + def real(self): + """Real part of ``self``. + + Returns + ------- + real : `NumpyTensor` + Real part of this element as a member of a + `NumpyTensorSpace` with corresponding real data type. + + Examples + -------- + Get the real part: + + >>> space = odl.cn(3) + >>> x = space.element([1 + 1j, 2, 3 - 3j]) + >>> x.real + rn(3).element([ 1., 2., 3.]) + + Set the real part: + + >>> space = odl.cn(3) + >>> x = space.element([1 + 1j, 2, 3 - 3j]) + >>> zero = odl.rn(3).zero() + >>> x.real = zero + >>> x + cn(3).element([ 0.+1.j, 0.+0.j, 0.-3.j]) + + Other array-like types and broadcasting: + + >>> x.real = 1.0 + >>> x + cn(3).element([ 1.+1.j, 1.+0.j, 1.-3.j]) + >>> x.real = [2, 3, 4] + >>> x + cn(3).element([ 2.+1.j, 3.+0.j, 4.-3.j]) + """ + if self.space.is_real: + return self + elif self.space.is_complex: + real_space = self.space.astype(self.space.real_dtype) + return real_space.element(self.data.real, copy=False) + else: + raise NotImplementedError('`real` not defined for non-numeric ' + 'dtype {}'.format(self.dtype)) + + @property + def shape(self): + """Number of elements per axis.""" + return self.space.shape + + @property + def size(self): + """Total number of entries.""" + return self.space.size + + ######### public methods ######### + def asarray(self, out=None, must_be_contiguous: bool =False): + """Extract the data of this array as a backend-specific array/tensor. + + This method is invoked when calling `numpy.asarray` on this + tensor. + + Parameters + ---------- + out : array_like, optional + Array in which the result should be written in-place. + Has to be contiguous and of the correct backend, dtype and device. + must_be_contiguous: `bool` + If this is `True`, then the returned array must occupy + a single block of memory and the axes be ordered + (in C order). Cf. `numpy.ascontiguousarray`. + This may require making a copy. + If `False` is given, the returned array may be a view + or have transposed axes, if this allows avoiding a copy. + If an `out` argument is provided, `must_be_contiguous` + is irrelevant. + + Returns + ------- + asarray : array_like + Numpy array, pytorch tensor or similar with the same data type as ``self``. + If ``out`` was given, the returned object is a reference to it. + + Examples + -------- + >>> space = odl.rn(3, dtype='float32') + >>> x = space.element([1, 2, 3]) + >>> x.asarray() + array([ 1., 2., 3.], dtype=float32) + >>> out = np.empty(3, dtype='float32') + >>> result = x.asarray(out=out) + >>> out + array([ 1., 2., 3.], dtype=float32) + >>> result is out + True + >>> space = odl.rn((2, 3)) + >>> space.one().asarray() + array([[ 1., 1., 1.], + [ 1., 1., 1.]]) + """ + if out is None: + if must_be_contiguous: + return self.array_backend.make_contiguous(self.data) + else: + return self.data + else: + out[:] = self.data + return out + + @contextmanager + def writable_array(self, must_be_contiguous: bool =False): + """Context manager that casts `self` to a backend-specific array and saves changes + made to that array back in `self`. + + Parameters + ---------- + must_be_contiguous : bool + Whether the writable array should guarantee standard C order. + See documentation to `asarray` for the semantics. + + Examples + -------- + + >>> space = odl.uniform_discr(0, 1, 3) + >>> x = space.element([1, 2, 3]) + >>> with x.writable_array() as arr: + ... arr += [1, 1, 1] + >>> x + uniform_discr(0.0, 1.0, 3).element([ 2., 3., 4.]) + + Note that the changes are in general only saved upon exiting the + context manager. Before, the input object may remain unchanged. + """ + arr = None + try: + # TODO(Justus) it should be possible to avoid making a copy here, + # and actually just modify `data` in place. + arr = self.asarray(must_be_contiguous=must_be_contiguous) + yield arr + finally: + if arr is not None: + self.data[:] = arr + + def astype(self, dtype): + """Return a copy of this element with new ``dtype``. + + Parameters + ---------- + dtype : + Scalar data type of the returned space. Can be provided + in any way the `numpy.dtype` constructor understands, e.g. + as built-in type or as a string. Data types with non-trivial + shapes are not allowed. + + Returns + ------- + newelem : `Tensor` + Version of this element with given data type. + """ + return self.space.astype(dtype).element(self.data.astype(dtype)) + + def to_device(self, device: str): + """Return a copy of this element with the same values stored on + a different computational device. + + Parameters + ---------- + device : + Identifier of the desired storage location. Which ones are + supported depends on the array backend (`impl`). Always + allowed is `'cpu'`, but GPU alternatives like `'cuda:0'` + can offer better performance if available. + + Returns + ------- + newelem : `Tensor` + Version of this element with its data array on the desired device. + """ + return self.space.to_device(device).element( + self.array_backend.to_device(self.data, device)) + + def to_impl(self, impl: str): + """Return a copy of this element with the same values stored using + a different array backend. + + Parameters + ---------- + impl : + Identifier of the target backend. Must correspond to a registered + `ArrayBackend`. See `odl.core.space.entry_points.tensor_space_impl_names` + for available options. + Both `impl` and the implementation of the original space must support + the same device, most typically `'cpu'`. If you want to use GPU storage, + use a separate call to `Tensor.to_device`. + + Returns + ------- + newelem : `Tensor` + Version of this element with its data array using the desired backend. + """ + new_backend = lookup_array_backend(impl) + new_data = new_backend.array_namespace.from_dlpack(self.data) + + # TODO (Justus) this is a workaround for inconsistent behaviour by + # DLPack / the array backends. DLPack tries to avoid a copy and makes + # the result readonly, which is not fully supported and causes various problems. + # Making an explicit copy avoids this, but is not ideal from a performance + # perspective. It might make sense to add a `copy` argument that controls + # this, and/or exception handling. + # Perhaps in the future it will also just work by leaving it up to DLPack. + new_data = new_backend.array_constructor(new_data, copy=True) + + assert str(new_data.device) == self.device, f"Error when transferring array from {self.impl} to {impl}: device changed from {self.device} to {new_data.device}. Ensure to use a device supported by both backends." + assert _universal_dtype_identifier(new_data.dtype) == self.dtype_identifier, f"Error when transferring array from {self.impl} to {impl}: dtype changed from {self.dtype} to {new_data.dtype}. Ensure to use a dtype supported by both backends." + return self.space.to_impl(impl).element(new_data) + + def set_zero(self): + """Set this element to zero. + + See Also + -------- + LinearSpace.zero + """ + self.data[:] = 0 + return self + + def conj(self, out=None): + """Return the complex conjugate of ``self``. + + Parameters + ---------- + out : `NumpyTensor`, optional + Element to which the complex conjugate is written. + Must be an element of ``self.space``. + + Returns + ------- + out : `NumpyTensor` + The complex conjugate element. If ``out`` was provided, + the returned object is a reference to it. + + Examples + -------- + >>> space = odl.cn(3) + >>> x = space.element([1 + 1j, 2, 3 - 3j]) + >>> x.conj() + cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j]) + >>> out = space.element() + >>> result = x.conj(out=out) + >>> result + cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j]) + >>> result is out + True + + In-place conjugation: + + >>> result = x.conj(out=x) + >>> x + cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j]) + >>> result is x + True + """ + if self.space.is_real: + if out is None: + return self + else: + out[:] = self + return out + + if not is_numeric_dtype(self.space.dtype): + raise NotImplementedError('`conj` not defined for non-numeric ' + 'dtype {}'.format(self.dtype)) + + if out is None: + return self.space.element(self.data.conj()) + else: + if out not in self.space: + raise LinearSpaceTypeError('`out` {!r} not in space {!r}' + ''.format(out, self.space)) + # self.data.conj(out.data) + out.data = self.array_namespace.conj(self.data) + return out + + @imag.setter + def imag(self, newimag): + """Setter for the imaginary part. + + This method is invoked by ``x.imag = other``. + + Parameters + ---------- + newimag : `Tensor`, array-like, or scalar + Values to be assigned to the imaginary part of this element. + + Raises + ------ + ValueError + If the space is real, i.e., no imagninary part can be set. + """ + if self.space.is_real: + raise ValueError('cannot set imaginary part in real spaces') + if isinstance(newimag, Tensor): + assert(newimag in self.space.real_space) + else: + newimag = self.space.real_space.element(newimag) + self.data.imag = newimag.data + + @real.setter + def real(self, newreal): + """Setter for the real part. + + This method is invoked by ``x.real = other``. + + Parameters + ---------- + newreal : `Tensor`, array-like, or scalar + Values to be assigned to the real part of this element. + """ + if isinstance(newreal, Tensor): + assert(newreal in self.space.real_space) + else: + newreal = self.space.real_space.element(newreal) + self.data.real = newreal.data + + def show(self, title=None, method='', indices=None, force_show=False, + fig=None, **kwargs): + """Display the function graphically. + + Parameters + ---------- + title : string, optional + Set the title of the figure + + method : string, optional + 1d methods: + + ``'plot'`` : graph plot + + ``'scatter'`` : scattered 2d points (2nd axis <-> value) + + 2d methods: + + ``'imshow'`` : image plot with coloring according to + value, including a colorbar. + + ``'scatter'`` : cloud of scattered 3d points + (3rd axis <-> value) + + indices : index expression, optional + Display a slice of the array instead of the full array. The + index expression is most easily created with the `numpy.s_` + constructor, i.e. supply ``np.s_[:, 1, :]`` to display the + first slice along the second axis. + For data with 3 or more dimensions, the 2d slice in the first + two axes at the "middle" along the remaining axes is shown + (semantically ``[:, :, shape[2:] // 2]``). + This option is mutually exclusive to ``coords``. + + force_show : bool, optional + Whether the plot should be forced to be shown now or deferred until + later. Note that some backends always displays the plot, regardless + of this value. + + fig : `matplotlib.figure.Figure`, optional + The figure to show in. Expected to be of same "style", as + the figure given by this function. The most common use case + is that ``fig`` is the return value of an earlier call to + this function. + + kwargs : {'figsize', 'saveto', 'clim', ...}, optional + Extra keyword arguments passed on to the display method. + See the Matplotlib functions for documentation of extra + options. + + Returns + ------- + fig : `matplotlib.figure.Figure` + The resulting figure. It is also shown to the user. + + See Also + -------- + odl.core.util.graphics.show_discrete_data : Underlying implementation + """ + from odl.core.discr import uniform_grid + from odl.core.util.graphics import show_discrete_data + + # Default to showing x-y slice "in the middle" + if indices is None and self.ndim >= 3: + indices = tuple( + [slice(None)] * 2 + [n // 2 for n in self.space.shape[2:]] + ) + + if isinstance(indices, (Integral, slice)): + indices = (indices,) + elif indices is None or indices == Ellipsis: + indices = (slice(None),) * self.ndim + else: + indices = tuple(indices) + + # Replace None by slice(None) + indices = tuple(slice(None) if idx is None else idx for idx in indices) + + if Ellipsis in indices: + # Replace Ellipsis with the correct number of [:] expressions + pos = indices.index(Ellipsis) + indices = (indices[:pos] + + (np.s_[:], ) * (self.ndim - len(indices) + 1) + + indices[pos + 1:]) + + if len(indices) < self.ndim: + raise ValueError('too few axes ({} < {})'.format(len(indices), + self.ndim)) + if len(indices) > self.ndim: + raise ValueError('too many axes ({} > {})'.format(len(indices), + self.ndim)) + + # Squeeze grid and values according to the index expression + full_grid = uniform_grid([0] * self.ndim, np.array(self.shape) - 1, + self.shape) + grid = full_grid[indices].squeeze() + values = self.asarray()[indices].squeeze() + + return show_discrete_data(values, grid, title=title, method=method, + force_show=force_show, fig=fig, **kwargs) + + ######### magic methods ######### + def __bool__(self): + """Return ``bool(self)``.""" + if self.size > 1: + raise ValueError('The truth value of an array with more than one ' + 'element is ambiguous. ' + 'Use np.any(a) or np.all(a)') + else: + return bool(self.asarray()) + + def __complex__(self): + """Return ``complex(self)``.""" + assert len(self.data) == 1 + return complex(self.data.item()) + + def __float__(self): + """Return ``float(self)``.""" + assert len(self.data) == 1 + return float(self.data.item()) + + def __int__(self): + """Return ``int(self)``.""" + assert len(self.data) == 1 + return int(self.data.item()) + + def __copy__(self): + """Return ``copy(self)``. + + This implements the (shallow) copy interface of the ``copy`` + module of the Python standard library. + + See Also + -------- + copy + + Examples + -------- + >>> from copy import copy + >>> space = odl.rn(3) + >>> x = space.element([1, 2, 3]) + >>> y = copy(x) + >>> y == x + True + >>> y is x + False + """ + return self.copy() + + def __getitem__(self, indices): + """Return ``self[indices]``. + + This method should be overridden by subclasses. + + Parameters + ---------- + indices : index expression + Integer, slice or sequence of these, defining the positions + of the data array which should be accessed. + + Returns + ------- + values : `TensorSpace.dtype` or `Tensor` + The value(s) at the given indices. Note that depending on + the implementation, the returned object may be a (writable) + view into the original array. + """ + raise NotImplementedError('abstract method') + + def __len__(self): + """Return ``len(self)``. + + The length is equal to the number of entries along axis 0. + """ + return len(self.space) + + def __repr__(self): + """Return ``repr(self)``.""" + maxsize_full_print = 2 * np.get_printoptions()['edgeitems'] + self_str = array_str(self, nprint=maxsize_full_print) + if self.ndim == 1 and self.size <= maxsize_full_print: + return '{!r}.element({})'.format(self.space, self_str) + else: + return '{!r}.element(\n{}\n)'.format(self.space, indent(self_str)) + + def __setitem__(self, indices, values): + """Implement ``self[indices] = values``. + + This method should be overridden by subclasses. + + Parameters + ---------- + indices : index expression + Integer, slice or sequence of these, defining the positions + of the data array which should be written to. + values : scalar, `array-like` or `Tensor` + The value(s) that are to be assigned. + + If ``index`` is an integer, ``value`` must be a scalar. + + If ``index`` is a slice or a sequence of slices, ``value`` + must be broadcastable to the shape of the slice. + """ + raise NotImplementedError('abstract method') + + def __str__(self): + """Return ``str(self)``.""" + return array_str(self) + + """ + [+] = implemented + [-] = not implemented yet + [X] = Will not be implemented + The Python array API expects the following operators: + ##################################################### + ################# Arithmetic Operators ################# + [+] +x: array.__pos__() + [+] -x: array.__neg__() + [+] x1 + x2: array.__add__() + [+] x1 - x2: array.__sub__() + [+] x1 * x2: array.__mul__() + [+] x1 / x2: array.__truediv__() + [+] x1 // x2: array.__floordiv__() + [+] x1 % x2: array.__mod__() + [+] x1 ** x2: array.__pow__() + ################# Array Operators ################# + [X] x1 @ x2: array.__matmul__() -> In ODL, a matmul should be implemented as composition of operators + ################# Bitwise Operators ################# + [X] ~x: array.__invert__() + [X] x1 & x2: array.__and__() + [X] x1 | x2: array.__or__() + [X] x1 ^ x2: array.__xor__() + [X] x1 << x2: array.__lshift__() + [X] x1 >> x2: array.__rshift__() + ################# Comparison Operators ################# + [X] x1 < x2: array.__lt__() ONLY DEFINED FOR REAL-VALUED DATA TYPES + [X] x1 <= x2: array.__le__() ONLY DEFINED FOR REAL-VALUED DATA TYPES + [X] x1 > x2: array.__gt__() ONLY DEFINED FOR REAL-VALUED DATA TYPES + [X] x1 >= x2: array.__ge__() ONLY DEFINED FOR REAL-VALUED DATA TYPES + [+] x1 == x2: array.__eq__() + [+] x1 != x2: array.__ne__() + ##################################################### + ################# In-place Arithmetic Operators ################# + [+] x1 += x2: array.__iadd__() + [+] x1 -= x2: array.__isub__() + [+] x1 *= x2: array.__imul__() + [+] x1 /= x2: array.__itruediv__() + [+] x1 //= x2: array.__ifloordiv__() + [+] x1 %= x2: array.__imod__() + [+] x1 **= x2: array.__ipow__() + ################# In-place Array Operators ################# + [X] x1 @= x2: array.__imatmul__() -> In ODL, a matmul should be implemented as composition of operators + ################# In-place Bitwise Operators ################# + [X] x1 &= x2: array.__iand__() + [X] x1 |= x2: array.__ior__() + [X] x1 ^= x2: array.__ixor__() + [X] x1 <<= x2: array.__ilshift__() + [X] x1 >>= x2: array.__irshift__() + ################# Reflected Arithmetic Operators ################# + [+] x2 + x1: array.__radd__() + [+] x2 - x1: array.__rsub__() + [+] x2 * x1: array.__rmul__() + [+] x2 / x1: array.__rtruediv__() + [+] x2 // x1: array.__rfloordiv__() + [+] x2 % x1: array.__rmod__() + [+] x2 ** x1: array.__rpow__() + ################# Reflected Array Operators ################# + [X] x2 @ x1: array.__rmatmul__() -> In ODL, a matmul should be implemented as composition of operators + ################# Reflected Bitwise Operators ################# + [X] x2 & x1: array.__rand__() + [X] x2 | x1: array.__ror__() + [X] x2 ^ x1: array.__rxor__() + [X] x2 << x1: array.__rlshift__() + [X] x2 >> x1: array.__rrshift__() + """ + ####### Arithmetic Operators ####### + ################# Array Operators ################# + + ################# Bitwise Operators ################# + def __invert__(self): + """Implement ``self.invert``.""" + raise NotImplementedError + + def __and__(self, other): + """Implement ``self.bitwise_and``.""" + raise NotImplementedError + + def __or__(self, other): + """Implement ``self.bitwise_or``.""" + raise NotImplementedError + + def __xor__(self, other): + """Implement ``self.bitwise_xor``.""" + raise NotImplementedError + + def __lshift__(self, other): + """Implement ``self.bitwise_lshift``.""" + raise NotImplementedError + + def __rshift__(self, other): + """Implement ``self.bitwise_rshift``.""" + raise NotImplementedError + + ################# Comparison Operators ################# + def __eq__(self, other): + """Implement ``self == other``.""" + bool_space = self.space.astype(bool) + if other is self: + return True + elif other not in self.space: + return False + else: + return bool(self.array_namespace.all(self.data == other.data)) + + def __ne__(self, other): + """Return ``self != other``.""" + return not self.__eq__(other) + + ################# In-place Array Operators ################# + + ################# In-place Bitwise Operators ################# + def __iand__(self, other): + """Implement ``self.ibitwise_and``.""" + raise NotImplementedError + + def __ior__(self, other): + """Implement ``self.ibitwise_or``.""" + raise NotImplementedError + + def __ixor__(self, other): + """Implement ``self.ibitwise_xor``.""" + raise NotImplementedError + + def __lshift__(self, other): + """Implement ``self.ibitwise_lshift``.""" + raise NotImplementedError + + def __irshift__(self, other): + """Implement ``self.ibitwise_rshift``.""" + raise NotImplementedError + + ################# Reflected Array Operators ################# + + ################# Reflected Bitwise Operators ################# + def __rand__(self, other): + """Implement ``self.ibitwise_and``.""" + raise NotImplementedError + + def __ror__(self, other): + """Implement ``self.ibitwise_or``.""" + raise NotImplementedError + + def __rxor__(self, other): + """Implement ``self.ibitwise_xor``.""" + raise NotImplementedError + + def __rshift__(self, other): + """Implement ``self.ibitwise_lshift``.""" + raise NotImplementedError + + def __rrshift__(self, other): + """Implement ``self.ibitwise_rshift``.""" + raise NotImplementedError + + ######### private methods ######### + def _assign(self, other, avoid_deep_copy): + """Assign the values of ``other``, which is assumed to be in the + same space, to ``self``.""" + raise NotImplementedError("abstract method") + +if __name__ == '__main__': + from odl.core.util.testutils import run_doctests + run_doctests() diff --git a/odl/space/entry_points.py b/odl/core/space/entry_points.py similarity index 72% rename from odl/space/entry_points.py rename to odl/core/space/entry_points.py index fe1fc7644f8..ee665e63fdb 100644 --- a/odl/space/entry_points.py +++ b/odl/core/space/entry_points.py @@ -9,7 +9,7 @@ """Entry points for adding more spaces to ODL using external packages. External packages can add an implementation of `TensorSpace` by hooking -into the setuptools entry point ``'odl.space'`` and exposing the methods +into the setuptools entry point ``'odl.core.space'`` and exposing the methods ``tensor_space_impl`` and ``tensor_space_impl_names``. This is used with functions such as `rn`, `cn`, `tensor_space` or @@ -22,28 +22,28 @@ from __future__ import print_function, division, absolute_import -from odl.space.npy_tensors import NumpyTensorSpace +from odl.backends.arrays.npy_tensors import NumpyTensorSpace -# We don't expose anything to odl.space +# We don't expose anything to odl.core.space __all__ = () IS_INITIALIZED = False -TENSOR_SPACE_IMPLS = {'numpy': NumpyTensorSpace} - +TENSOR_SPACE_IMPLS = { + 'numpy': NumpyTensorSpace + } def _initialize_if_needed(): """Initialize ``TENSOR_SPACE_IMPLS`` if not already done.""" global IS_INITIALIZED, TENSOR_SPACE_IMPLS if not IS_INITIALIZED: - # pkg_resources has long import time - from pkg_resources import iter_entry_points - for entry_point in iter_entry_points(group='odl.space', name=None): + import importlib.util + torch_module = importlib.util.find_spec("torch") + if torch_module is not None: try: - module = entry_point.load() - except ImportError: + from odl.backends.arrays.pytorch_tensors import PyTorchTensorSpace + TENSOR_SPACE_IMPLS['pytorch'] = PyTorchTensorSpace + except ModuleNotFoundError: pass - else: - TENSOR_SPACE_IMPLS.update(module.tensor_space_impls()) IS_INITIALIZED = True @@ -72,13 +72,10 @@ def tensor_space_impl(impl): ValueError If ``impl`` is not a valid name of a tensor space imlementation. """ - if impl != 'numpy': - # Shortcut to improve "import odl" times since most users do not use - # non-numpy backends - _initialize_if_needed() - try: return TENSOR_SPACE_IMPLS[impl] except KeyError: raise ValueError("`impl` {!r} does not correspond to a valid tensor " "space implmentation".format(impl)) + +_initialize_if_needed() diff --git a/odl/space/pspace.py b/odl/core/space/pspace.py similarity index 87% rename from odl/space/pspace.py rename to odl/core/space/pspace.py index 6273e19532a..b6fc2c539e2 100644 --- a/odl/space/pspace.py +++ b/odl/core/space/pspace.py @@ -11,18 +11,20 @@ from __future__ import absolute_import, division, print_function from itertools import product -from numbers import Integral - +from numbers import Integral, Number import numpy as np -from odl.set import LinearSpace -from odl.set.space import (LinearSpaceElement, +import warnings +from contextlib import contextmanager + +from odl.core.set import LinearSpace +from odl.core.set.space import (LinearSpaceElement, SupportedNumOperationParadigms, NumOperationParadigmSupport) -from odl.space.weighting import ( +from .weightings.weighting import ( ArrayWeighting, ConstWeighting, CustomDist, CustomInner, CustomNorm, Weighting) -from odl.util import indent, is_real_dtype, signature_string -from odl.util.ufuncs import ProductSpaceUfuncs +from odl.core.array_API_support.utils import get_array_and_backend +from odl.core.util import indent, is_real_dtype, signature_string __all__ = ('ProductSpace',) @@ -283,6 +285,109 @@ def __len__(self): """ return len(self.spaces) + def _elementwise_num_operation(self, operation:str + , x1: LinearSpaceElement | Number + , x2: None | LinearSpaceElement | Number = None + , out=None + , namespace=None + , **kwargs ): + """ + Internal helper function to implement the __magic_functions__ (such as __add__). + + Parameters + ---------- + x1 : ProductSpaceElement, TensorSpaceElement, int, float, complex + Left operand + x2 : ProductSpaceElement, TensorSpaceElement, int, float, complex + Right operand + operation: str + Attribute of the array namespace + out : ProductSpaceElement, Optional + ProductSpaceElement for out-of-place operations + + Returns + ------- + ProductSpaceElement + The result of the operation `operation` wrapped in a space with the right datatype. + + """ + if self.field is None: + raise NotImplementedError(f"The space has no field.") + + if out is not None: + if not isinstance(out, ProductSpaceElement): + raise TypeError(f"Output argument for ProductSpace arithmetic must be a product space. {type(out)=}") + assert len(out.parts) == len(self) + + def _dtype_adaptive_wrapper(new_parts): + if all([xln.space == spc for xln, spc in zip(new_parts, self)]): + return self.element(new_parts) + else: + # The `xl.space._elementwise_num_operation` may change the dtype, and thus the + # part-space. For example, the `isfinite` function has boolean results. + # In this case, the resulting product space also has the new dtype, which we + # accomplish by creating the new space on the spot. + new_space = ProductSpace(*[xln.space for xln in new_parts]) + return new_space.element(new_parts) + + if x2 is None: + if out is None: + return _dtype_adaptive_wrapper([ + xl.space._elementwise_num_operation(operation=operation, x1=xl, namespace=namespace, **kwargs) + for xl in x1.parts ]) + else: + for i, xl in enumerate(x1.parts): + xl.space._elementwise_num_operation(operation=operation, x1=xl, out=out.parts[i], namespace=namespace, **kwargs) + return out + + from odl.core.operator import Operator + if isinstance(x2, Operator): + warnings.warn("The composition of a LinearSpaceElement and an Operator using the * operator is deprecated and will be removed in future ODL versions. Please replace * with @.") + return x2.__rmul__(x1) + + if isinstance(x1, ProductSpaceElement) and isinstance(x2, ProductSpaceElement): + assert len(x1.parts) == len(x2.parts) + if out is None: + return _dtype_adaptive_wrapper([ + xl.space._elementwise_num_operation(operation=operation, x1=xl, x2=xr, namespace=namespace, **kwargs) + for xl, xr in zip(x1.parts, x2.parts) ]) + else: + for i, xl in enumerate(x1.parts): + xr = x2.parts[i] + xl.space._elementwise_num_operation(operation=operation, x1=xl, x2=xr, out=out.parts[i], namespace=namespace, **kwargs) + return out + + elif isinstance(x1, ProductSpaceElement): + if out is None: + return _dtype_adaptive_wrapper([ + x.space._elementwise_num_operation(operation=operation, x1=x, x2=x2, namespace=namespace, **kwargs) + for x in x1.parts ]) + else: + for i, x in enumerate(x1.parts): + x.space._elementwise_num_operation(operation=operation, x1=x, x2=x2, out=out.parts[i], namespace=namespace, **kwargs) + return out + + elif isinstance(x2, ProductSpaceElement): + if out is None: + return _dtype_adaptive_wrapper([ + x.space._elementwise_num_operation(operation=operation, x1=x1, x2=x, namespace=namespace, **kwargs) + for x in x2.parts ]) + else: + for i, x in enumerate(x2.parts): + x.space._elementwise_num_operation(operation=operation, x1=x1, x2=x, out=out.parts[i], namespace=namespace, **kwargs) + return out + + else: + raise TypeError(f"At least one of the arguments to `ProductSpace._elementwise_num_operation` should be a `ProductSpaceElement`, but got {type(x1)=}, {type(x2)=}") + + def _element_reduction(self, operation:str + , x: "ProductSpaceElement" + , **kwargs + ): + assert(x in self) + part_results = np.array([ xp.space._element_reduction(operation, xp, **kwargs) for xp in x.parts ]) + return getattr(np, operation)(part_results).item() + @property def nbytes(self): """Total number of bytes in memory used by an element of this space.""" @@ -541,18 +646,27 @@ def element(self, inp=None, cast=True): if inp in self: return inp + + if isinstance(inp, Number): + inp = [space.element(inp) for space in self.spaces] if len(inp) != len(self): - raise ValueError('length of `inp` {} does not match length of ' + # Here, we handle the case where the user provides an input with a single element that we will try to broadcast to all of the parts of the ProductSpace. + if len(inp) == 1 and cast: + parts = [space.element(inp[0]) for space in self.spaces] + else: + raise ValueError('length of `inp` {} does not match length of ' 'space {}'.format(len(inp), len(self))) - if (all(isinstance(v, LinearSpaceElement) and v.space == space + elif (all(isinstance(v, LinearSpaceElement) and v.space == space for v, space in zip(inp, self.spaces))): parts = list(inp) - elif cast: + + elif cast and len(inp) == len(self): # Delegate constructors parts = [space.element(arg) for arg, space in zip(inp, self.spaces)] + else: raise TypeError('input {!r} not a sequence of elements of the ' 'component spaces'.format(inp)) @@ -944,6 +1058,17 @@ def _assign(self, other, avoid_deep_copy): for tgt, src in zip(self.parts, other.parts): tgt.assign(src, avoid_deep_copy=avoid_deep_copy) + def set_zero(self): + """Set this element to zero. + + See Also + -------- + LinearSpace.zero + """ + for tgt in self.parts: + tgt.set_zero() + return self + def __len__(self): """Return ``len(self)``.""" return len(self.space) @@ -1070,8 +1195,8 @@ def __setitem__(self, indices, values): for p, v in zip(indexed_parts, values): p[:] = v - def asarray(self, out=None): - """Extract the data of this vector as a numpy array. + def asarray(self, out=None, must_be_contiguous=False): + """Extract the data of this vector as a backend-specific array. Only available if `is_power_space` is True. @@ -1081,10 +1206,10 @@ def asarray(self, out=None): Parameters ---------- - out : `numpy.ndarray`, optional + out : Arraylike, optional Array in which the result should be written in-place. - Has to be contiguous and of the correct dtype and - shape. + Has to be contiguous and of the correct backend, + dtype and shape. Raises ------ @@ -1104,120 +1229,33 @@ def asarray(self, out=None): raise ValueError('cannot use `asarray` if `space.is_power_space` ' 'is `False`') else: - if out is None: - out = np.empty(self.shape, self.dtype) - - for i in range(len(self)): - out[i] = np.asarray(self[i]) - return out - - def __array__(self): - """An array representation of ``self``. - - Only available if `is_power_space` is True. - - The ordering is such that it commutes with indexing:: - - np.array(self[ind]) == np.array(self)[ind] - - Raises - ------ - ValueError - If `is_power_space` is false. - - Examples - -------- - >>> spc = odl.ProductSpace(odl.rn(3), 2) - >>> x = spc.element([[ 1., 2., 3.], - ... [ 4., 5., 6.]]) - >>> np.asarray(x) - array([[ 1., 2., 3.], - [ 4., 5., 6.]]) - """ - return self.asarray() - - def __array_wrap__(self, array): - """Return a new product space element wrapping the ``array``. - - Only available if `is_power_space` is ``True``. - - Parameters - ---------- - array : `numpy.ndarray` - Array to be wrapped. - - Returns - ------- - wrapper : `ProductSpaceElement` - Product space element wrapping ``array``. - """ - # HACK(kohr-h): This is to support (full) reductions like - # `np.sum(x)` for numpy>=1.16, where many such reductions - # moved from plain functions to `ufunc.reduce.*`, thus - # invoking the `__array__` and `__array_wrap__` machinery. - if array.shape == (): - return array.item() - - return self.space.element(array) - - @property - def ufuncs(self): - """`ProductSpaceUfuncs`, access to Numpy style ufuncs. + representative_array, representative_backend = get_array_and_backend(self.parts[0]) - These are always available if the underlying spaces are - `TensorSpace`. - - Examples - -------- - >>> r22 = odl.ProductSpace(odl.rn(2), 2) - >>> x = r22.element([[1, -2], [-3, 4]]) - >>> x.ufuncs.absolute() - ProductSpace(rn(2), 2).element([ - [ 1., 2.], - [ 3., 4.] - ]) - - These functions can also be used with non-vector arguments and - support broadcasting, per component and even recursively: - - >>> x.ufuncs.add([1, 2]) - ProductSpace(rn(2), 2).element([ - [ 2., 0.], - [-2., 6.] - ]) - >>> x.ufuncs.subtract(1) - ProductSpace(rn(2), 2).element([ - [ 0., -3.], - [-4., 3.] - ]) - - There is also support for various reductions (sum, prod, min, max): - - >>> x.ufuncs.sum() - 0.0 + if out is None: + # We are assuming that `empty` always produces a contiguous array, + # so no need to ensure it separately. + out = representative_backend.array_namespace.empty( + shape=self.shape, + dtype=self.dtype, + device=representative_array.device) - Writing to ``out`` is also supported: + out[0] = representative_array - >>> y = r22.element() - >>> result = x.ufuncs.absolute(out=y) - >>> result - ProductSpace(rn(2), 2).element([ - [ 1., 2.], - [ 3., 4.] - ]) - >>> result is y - True + for i in range(1, len(self)): + self.parts[i].asarray(out = out[i]) - See Also - -------- - odl.util.ufuncs.TensorSpaceUfuncs - Base class for ufuncs in `TensorSpace` spaces, subspaces may - override this for greater efficiency. - odl.util.ufuncs.ProductSpaceUfuncs - For a list of available ufuncs. - """ - return ProductSpaceUfuncs(self) + return out + @contextmanager + def writable_array(self, must_be_contiguous: bool =False): + arr = None + try: + arr = self.asarray(must_be_contiguous=must_be_contiguous) + yield arr + finally: + if arr is not None: + for i in range(1, len(self)): + self.parts[i]._assign(self.parts[i].space.element(arr[i])) @property def real(self): """Real part of the element. @@ -1287,7 +1325,7 @@ def real(self, newreal): # Set same value in all parts for part in self.parts: part.real = newreal - except (ValueError, TypeError): + except (AttributeError, ValueError, TypeError): # Iterate over all parts and set them separately for part, new_re in zip(self.parts, newreal): part.real = new_re @@ -1370,7 +1408,7 @@ def imag(self, newimag): # Set same value in all parts for part in self.parts: part.imag = newimag - except (ValueError, TypeError): + except (AttributeError, ValueError, TypeError): # Iterate over all parts and set them separately for part, new_im in zip(self.parts, newimag): part.imag = new_im @@ -1499,11 +1537,11 @@ def show(self, title=None, indices=None, **kwargs): See Also -------- - odl.discr.discr_space.DiscretizedSpaceElement.show : + odl.core.discr.discr_space.DiscretizedSpaceElement.show : Display of a discretized function - odl.space.base_tensors.Tensor.show : + odl.core.space.base_tensors.Tensor.show : Display of sequence type data - odl.util.graphics.show_discrete_data : + odl.core.util.graphics.show_discrete_data : Underlying implementation """ if title is None: @@ -1556,63 +1594,6 @@ def show(self, title=None, indices=None, **kwargs): return tuple(figs) - -# --- Add arithmetic operators that broadcast --- # - - -def _broadcast_arithmetic(op): - """Return ``op(self, other)`` with broadcasting. - - Parameters - ---------- - op : string - Name of the operator, e.g. ``'__add__'``. - - Returns - ------- - broadcast_arithmetic_op : function - Function intended to be used as a method for `ProductSpaceVector` - which performs broadcasting if possible. - - Notes - ----- - Broadcasting is the operation of "applying an operator multiple times" in - some sense. For example: - - .. math:: - (1, 2) + 1 = (2, 3) - - is a form of broadcasting. In this implementation, we only allow "single - layer" broadcasting, i.e., we do not support broadcasting over several - product spaces at once. - """ - def _broadcast_arithmetic_impl(self, other): - if (self.space.is_power_space and other in self.space[0]): - results = [] - for xi in self: - res = getattr(xi, op)(other) - if res is NotImplemented: - return NotImplemented - else: - results.append(res) - - return self.space.element(results) - else: - return getattr(LinearSpaceElement, op)(self, other) - - # Set docstring - docstring = """Broadcasted {op}.""".format(op=op) - _broadcast_arithmetic_impl.__doc__ = docstring - - return _broadcast_arithmetic_impl - - -for op in ['add', 'sub', 'mul', 'div', 'truediv']: - for modifier in ['', 'r', 'i']: - name = '__{}{}__'.format(modifier, op) - setattr(ProductSpaceElement, name, _broadcast_arithmetic(name)) - - class ProductSpaceArrayWeighting(ArrayWeighting): """Array weighting for `ProductSpace`. @@ -1669,7 +1650,7 @@ def __init__(self, array, exponent=2.0): during initialization. """ super(ProductSpaceArrayWeighting, self).__init__( - array, impl='numpy', exponent=exponent) + array, impl=None, device=None, exponent=exponent) def inner(self, x1, x2): """Calculate the array-weighted inner product of two elements. @@ -1691,7 +1672,7 @@ def inner(self, x1, x2): inners = np.fromiter( (x1i.inner(x2i) for x1i, x2i in zip(x1, x2)), - dtype=x1[0].space.dtype, count=len(x1)) + dtype=x1[0].space.dtype_identifier, count=len(x1)) inner = np.dot(inners, self.array) if is_real_dtype(x1[0].dtype): @@ -1777,7 +1758,7 @@ def __init__(self, constant, exponent=2.0): inner product or norm, respectively. """ super(ProductSpaceConstWeighting, self).__init__( - constant, impl='numpy', exponent=exponent) + constant, impl=None, device=None, exponent=exponent) def inner(self, x1, x2): """Calculate the constant-weighted inner product of two elements. @@ -1797,12 +1778,13 @@ def inner(self, x1, x2): 'exponent != 2 (got {})' ''.format(self.exponent)) - inners = np.fromiter( - (x1i.inner(x2i) for x1i, x2i in zip(x1, x2)), - dtype=x1[0].space.dtype, count=len(x1)) + accumulator = 0.0 + # Manual loop, to avoid having to select a universally-applicable dtype + for x1i, x2i in zip(x1, x2): + accumulator = accumulator + x1i.inner(x2i) - inner = self.const * np.sum(inners) - return x1.space.field.element(inner) + result = self.const * accumulator + return x1.space.field.element(result) def norm(self, x): """Calculate the constant-weighted norm of an element. @@ -1876,7 +1858,7 @@ def __init__(self, inner): - `` = 0`` if and only if ``x = 0`` """ super(ProductSpaceCustomInner, self).__init__( - impl='numpy', inner=inner) + impl=None, inner=inner, device=None) class ProductSpaceCustomNorm(CustomNorm): @@ -1902,7 +1884,7 @@ def __init__(self, norm): - ``||s * x|| = |s| * ||x||`` - ``||x + y|| <= ||x|| + ||y||`` """ - super(ProductSpaceCustomNorm, self).__init__(norm, impl='numpy') + super(ProductSpaceCustomNorm, self).__init__(norm, impl=None, device=None) class ProductSpaceCustomDist(CustomDist): @@ -1928,7 +1910,7 @@ def __init__(self, dist): - ``dist(x, y) = dist(y, x)`` - ``dist(x, y) <= dist(x, z) + dist(z, y)`` """ - super(ProductSpaceCustomDist, self).__init__(dist, impl='numpy') + super(ProductSpaceCustomDist, self).__init__(dist, impl=None, device=None) def _strip_space(x): @@ -1949,5 +1931,5 @@ def _indent(x): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/space/space_utils.py b/odl/core/space/space_utils.py similarity index 58% rename from odl/space/space_utils.py rename to odl/core/space/space_utils.py index ead66cf4773..534dd0e524a 100644 --- a/odl/space/space_utils.py +++ b/odl/core/space/space_utils.py @@ -11,16 +11,18 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.array_API_support import lookup_array_backend -from odl.set import RealNumbers, ComplexNumbers -from odl.space.entry_points import tensor_space_impl +from odl.core.space.base_tensors import default_dtype +from odl.core.util.dtype_utils import is_available_dtype, is_complex_dtype, is_floating_dtype +from odl.core.space.entry_points import tensor_space_impl, tensor_space_impl_names __all__ = ('vector', 'tensor_space', 'cn', 'rn') -def vector(array, dtype=None, order=None, impl='numpy'): +def vector(array, dtype=None, impl='numpy', device = 'cpu'): """Create a vector from an array-like object. Parameters @@ -31,12 +33,9 @@ def vector(array, dtype=None, order=None, impl='numpy'): dtype : optional Set the data type of the vector manually with this option. By default, the space type is inferred from the input data. - order : {None, 'C', 'F'}, optional - Axis ordering of the data storage. For the default ``None``, - no contiguousness is enforced, avoiding a copy if possible. impl : str, optional Impmlementation back-end for the space. See - `odl.space.entry_points.tensor_space_impl_names` for available + `odl.core.space.entry_points.tensor_space_impl_names` for available options. Returns @@ -55,7 +54,7 @@ def vector(array, dtype=None, order=None, impl='numpy'): Create one-dimensional vectors: >>> odl.vector([1, 2, 3]) # No automatic cast to float - tensor_space(3, dtype=int).element([1, 2, 3]) + tensor_space(3, 'int64').element([1, 2, 3]) >>> odl.vector([1, 2, 3], dtype=float) rn(3).element([ 1., 2., 3.]) >>> odl.vector([1, 2 - 1j, 3]) @@ -64,19 +63,22 @@ def vector(array, dtype=None, order=None, impl='numpy'): Non-scalar types are also supported: >>> odl.vector([True, True, False]) - tensor_space(3, dtype=bool).element([ True, True, False]) + tensor_space(3, 'bool').element([ True, True, False]) The function also supports multi-dimensional input: >>> odl.vector([[1, 2, 3], ... [4, 5, 6]]) - tensor_space((2, 3), dtype=int).element( + tensor_space((2, 3), 'int64').element( [[1, 2, 3], [4, 5, 6]] ) """ + backend = lookup_array_backend(impl) # Sanitize input - arr = np.array(array, copy=AVOID_UNNECESSARY_COPY, order=order, ndmin=1) + # I don't understand was a ndim prepended to the array dimension + arr = backend.array_constructor(array, copy=AVOID_UNNECESSARY_COPY, device = device) + if arr.dtype is object: raise ValueError('invalid input data resulting in `dtype==object`') @@ -86,11 +88,11 @@ def vector(array, dtype=None, order=None, impl='numpy'): else: space_dtype = arr.dtype - space = tensor_space(arr.shape, dtype=space_dtype, impl=impl) + space = tensor_space(arr.shape, dtype=space_dtype, impl=impl, device=device) return space.element(arr) -def tensor_space(shape, dtype=None, impl='numpy', **kwargs): +def tensor_space(shape, dtype='float64', impl='numpy', device = 'cpu', **kwargs): """Return a tensor space with arbitrary scalar data type. Parameters @@ -98,15 +100,11 @@ def tensor_space(shape, dtype=None, impl='numpy', **kwargs): shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. - dtype : optional - Data type of each element. Can be provided in any way the - `numpy.dtype` function understands, e.g. as built-in type or - as a string. - For ``None``, the `TensorSpace.default_dtype` of the - created space is used. + dtype (str) : optional + Data type of each element. Defaults to float64 impl : str, optional Impmlementation back-end for the space. See - `odl.space.entry_points.tensor_space_impl_names` for available + `odl.core.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. @@ -121,15 +119,15 @@ def tensor_space(shape, dtype=None, impl='numpy', **kwargs): vector space): >>> odl.tensor_space(3, dtype='uint64') - tensor_space(3, dtype='uint64') + tensor_space(3, 'uint64') 2x3 tensors with same data type: >>> odl.tensor_space((2, 3), dtype='uint64') - tensor_space((2, 3), dtype='uint64') + tensor_space((2, 3), 'uint64') - The default data type depends on the implementation. For - ``impl='numpy'``, it is ``'float64'``: + The default data type is ``'float64'``. How that is represented as a dtype-object + depends on the backend. >>> ts = odl.tensor_space((2, 3)) >>> ts @@ -141,17 +139,19 @@ def tensor_space(shape, dtype=None, impl='numpy', **kwargs): -------- rn, cn : Constructors for real and complex spaces """ - tspace_cls = tensor_space_impl(impl) - - if dtype is None: - dtype = tspace_cls.default_dtype() + # Check the dtype argument + is_available_dtype(dtype) + # Check the impl argument + assert ( + impl in tensor_space_impl_names() + ), f"The only supported impls are {tensor_space_impl_names()}, but {impl} was provided" # Use args by keyword since the constructor may take other arguments # by position - return tspace_cls(shape=shape, dtype=dtype, **kwargs) + return tensor_space_impl(impl)(shape=shape, dtype=dtype, device=device, **kwargs) -def cn(shape, dtype=None, impl='numpy', **kwargs): +def cn(shape, dtype='complex128', impl='numpy', device='cpu', **kwargs): """Return a space of complex tensors. Parameters @@ -159,16 +159,12 @@ def cn(shape, dtype=None, impl='numpy', **kwargs): shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. - dtype : optional - Data type of each element. Can be provided in any way the - `numpy.dtype` function understands, e.g. as built-in type or - as a string. Only complex floating-point data types are allowed. - For ``None``, the `TensorSpace.default_dtype` of the - created space is used in the form - ``default_dtype(ComplexNumbers())``. - impl : str, optional + dtype (str) : optional + Data type of each element. Must be provided as a string or Python complex type. + Defaults to complex128 + impl (str) : str, optional Impmlementation back-end for the space. See - `odl.space.entry_points.tensor_space_impl_names` for available + `odl.core.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. @@ -182,12 +178,12 @@ def cn(shape, dtype=None, impl='numpy', **kwargs): Space of complex 3-tuples with ``complex64`` entries: >>> odl.cn(3, dtype='complex64') - cn(3, dtype='complex64') + cn(3, 'complex64') Complex 2x3 tensors with ``complex64`` entries: >>> odl.cn((2, 3), dtype='complex64') - cn((2, 3), dtype='complex64') + cn((2, 3), 'complex64') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'complex128'``: @@ -203,21 +199,11 @@ def cn(shape, dtype=None, impl='numpy', **kwargs): tensor_space : Space of tensors with arbitrary scalar data type. rn : Real tensor space. """ - cn_cls = tensor_space_impl(impl) + is_complex_dtype(dtype) + return tensor_space(shape, dtype=dtype, impl=impl, device=device, **kwargs) - if dtype is None: - dtype = cn_cls.default_dtype(ComplexNumbers()) - # Use args by keyword since the constructor may take other arguments - # by position - cn = cn_cls(shape=shape, dtype=dtype, **kwargs) - if not cn.is_complex: - raise ValueError('data type {!r} not a complex floating-point type.' - ''.format(dtype)) - return cn - - -def rn(shape, dtype=None, impl='numpy', **kwargs): +def rn(shape, dtype=None, impl='numpy', device ='cpu', **kwargs): """Return a space of real tensors. Parameters @@ -225,16 +211,12 @@ def rn(shape, dtype=None, impl='numpy', **kwargs): shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. - dtype : optional - Data type of each element. Can be provided in any way the - `numpy.dtype` function understands, e.g. as built-in type or - as a string. Only real floating-point data types are allowed. - For ``None``, the `TensorSpace.default_dtype` of the - created space is used in the form - ``default_dtype(RealNumbers())``. - impl : str, optional - Impmlementation back-end for the space. See - `odl.space.entry_points.tensor_space_impl_names` for available + dtype (str) : optional + Data type of each element. See REAL_DTYPES in + `odl.core.util.utility.py` for available options. Defaults to float64 + impl (str) : str, optional + Impmlementation back-end for the space. See the constant + TENSOR_SPACE_IMPLS for available backends options. kwargs : Extra keyword arguments passed to the space constructor. @@ -248,15 +230,14 @@ def rn(shape, dtype=None, impl='numpy', **kwargs): Space of real 3-tuples with ``float32`` entries: >>> odl.rn(3, dtype='float32') - rn(3, dtype='float32') + rn(3, 'float32') Real 2x3 tensors with ``float32`` entries: >>> odl.rn((2, 3), dtype='float32') - rn((2, 3), dtype='float32') + rn((2, 3), 'float32') - The default data type depends on the implementation. For - ``impl='numpy'``, it is ``'float64'``: + The default data type is float64 >>> ts = odl.rn((2, 3)) >>> ts @@ -269,20 +250,13 @@ def rn(shape, dtype=None, impl='numpy', **kwargs): tensor_space : Space of tensors with arbitrary scalar data type. cn : Complex tensor space. """ - rn_cls = tensor_space_impl(impl) - if dtype is None: - dtype = rn_cls.default_dtype(RealNumbers()) + dtype = default_dtype(lookup_array_backend(str(impl).lower())) + is_floating_dtype(dtype) + return tensor_space(shape, dtype=dtype, impl=impl, device=device, **kwargs) - # Use args by keyword since the constructor may take other arguments - # by position - rn = rn_cls(shape=shape, dtype=dtype, **kwargs) - if not rn.is_real: - raise ValueError('data type {!r} not a real floating-point type.' - ''.format(dtype)) - return rn if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/core/space/weightings/__init__.py b/odl/core/space/weightings/__init__.py new file mode 100644 index 00000000000..ea0f24438b8 --- /dev/null +++ b/odl/core/space/weightings/__init__.py @@ -0,0 +1,6 @@ +from __future__ import absolute_import + +from .entry_points import space_weighting +from .weighting import * + +__all__ = ('space_weighting',) \ No newline at end of file diff --git a/odl/core/space/weightings/entry_points.py b/odl/core/space/weightings/entry_points.py new file mode 100644 index 00000000000..4c2f590d0d0 --- /dev/null +++ b/odl/core/space/weightings/entry_points.py @@ -0,0 +1,119 @@ +from odl.core.array_API_support import get_array_and_backend, lookup_array_backend +from .weighting import ConstWeighting, ArrayWeighting, CustomInner, CustomNorm, CustomDist + +def space_weighting( + impl : str, + device = 'cpu', + **kwargs + ): + """ + Notes: + To instantiate a weigthing, one can use a variety of mutually exclusive parameters + 1) inner (callable): the inner product between two elements of the space + 2) norm (callable): the norm of an element of the space + -> sqrt(inner(x,x).real) + 3) dist (callable): the distance between two elements of the space + -> norm(x1-x2) + 4) weight (float | ArrayLike): Scalar or element-wise weighting of the space elements + + In case a weight was provided, additionally the following is supported: + 4A) exponent (float): exponent of the summands in the norm, used for Banach spaces like L¹ + If the exponent is 2, the weight is then used for defining an inner product and the + other operations, whereas for other exponents only the norm and distance are enabled. + + For a custom inner-product space, the exponent must be 2 (the default). The inner product + also implies a norm and distance then. + A custom norm defines a distance but will disable the inner product. A custom distance + disables all other operations. + """ + + if 'exponent' in kwargs: + # Pop the kwarg + exponent = kwargs['exponent'] + assert not set(['norm', 'dist']).issubset(kwargs) + # Assign the attribute + if exponent != 2: + assert 'inner' not in kwargs + else: + exponent = 2 + + if 'inner' in kwargs: + # Pop the kwarg + inner = kwargs.pop('inner') + # check the kwarg + assert callable(inner) + # Check the consistency + assert exponent == 2 + + for arg in ['norm', 'dist', 'weight']: + if arg in kwargs: + raise ValueError(f"If a custom inner product is specified, the weighting cannot also have custom {arg}={kwargs[arg]}.") + + return CustomInner(inner, device=device, impl=impl) + + elif 'norm' in kwargs: + # Pop the kwarg + array_norm = kwargs.pop('norm') + # check the kwarg + assert callable(array_norm) + # Check the consistency + for arg in ['exponent', 'inner', 'dist', 'weight']: + if arg in kwargs: + raise ValueError(f"If a custom norm is specified, the weighting cannot also have custom {arg}={kwargs[arg]}.") + + return CustomNorm(array_norm, device=device, impl=impl) + + elif 'dist' in kwargs: + # Pop the kwarg + dist = kwargs.pop('dist') + # check the kwarg + assert callable(dist) + # Check the consistency + for arg in ['exponent', 'inner', 'norm', 'weight']: + if arg in kwargs: + raise ValueError(f"If a custom distance is specified, the weighting cannot also have custom {arg}={kwargs[arg]}.") + + + return CustomDist(dist, device=device, impl=impl) + + elif 'weight' in kwargs: + # Pop the kwarg + weight = kwargs.pop('weight') + # Check the consistency + for arg in ['inner', 'norm', 'dist']: + if arg in kwargs: + raise ValueError(f"If a custom weight is specified, the weighting cannot also have custom {arg}={kwargs[arg]}.") + + if isinstance(weight, (int, float)): + if 0 < weight and weight != float('inf'): + weight = float(weight) + else: + raise ValueError("If the weight is a scalar, it must be positive") + return ConstWeighting(const=weight, impl=impl, device=device, exponent=exponent) + + elif isinstance(weight, (tuple, list)): + array_backend = lookup_array_backend(impl) + weight = array_backend.array_constructor(weight, device=device) + if array_backend.array_namespace.any(weight < 0): + raise ValueError("If the weight is an array, all its elements must be positive") + + elif hasattr(weight, '__array__'): + weight, backend = get_array_and_backend(weight) + if backend.array_namespace.all(0 < weight): + assert device == weight.device.__str__(), f"The weighing is expecting the device {device}, but the array provided for the weight has a device {weight.device}. Please make sure that the two devices are consistent" + else: + raise ValueError("If the weight is an array, all its elements must be positive") + + else: + raise ValueError(f"A weight can only be a positive __array__, a positive float.") + + return ArrayWeighting(array=weight, impl=impl, device=device, exponent=exponent) + + elif kwargs == {}: + # TODO handle boolean case + return ConstWeighting(const=1.0, impl=impl, device=device) + + elif kwargs == {'exponent': exponent}: + return ConstWeighting(const=1.0, exponent=exponent, impl=impl, device=device) + + raise TypeError('got unknown keyword arguments {}'.format(kwargs)) diff --git a/odl/space/weighting.py b/odl/core/space/weightings/weighting.py similarity index 70% rename from odl/space/weighting.py rename to odl/core/space/weightings/weighting.py index 0c236548fea..a659ca1931f 100644 --- a/odl/space/weighting.py +++ b/odl/core/space/weightings/weighting.py @@ -10,11 +10,12 @@ from __future__ import print_function, division, absolute_import from builtins import object +import math import numpy as np -from odl.space.base_tensors import TensorSpace -from odl.util import array_str, signature_string, indent - +from odl.core.util import array_str, signature_string, indent, is_real_dtype +from odl.core.array_API_support.utils import get_array_and_backend, lookup_array_backend +from odl.core.array_API_support.comparisons import odl_all_equal __all__ = ('MatrixWeighting', 'ArrayWeighting', 'ConstWeighting', 'CustomInner', 'CustomNorm', 'CustomDist') @@ -34,19 +35,22 @@ class Weighting(object): functions are being used. """ - def __init__(self, impl, exponent=2.0): + def __init__(self, impl, device, exponent=2.0): """Initialize a new instance. Parameters ---------- impl : string Specifier for the implementation backend + device : + device identifier, compatible with the backend associated with `impl` exponent : positive float, optional Exponent of the norm. For values other than 2.0, the inner product is not defined. """ self.__impl = str(impl).lower() self.__exponent = float(exponent) + self.__device = device if self.exponent <= 0: raise ValueError('only positive exponents or inf supported, ' 'got {}'.format(exponent)) @@ -56,6 +60,28 @@ def impl(self): """Implementation backend of this weighting.""" return self.__impl + @property + def device(self): + """Backend-specific device identifier. Arrays this weighting should measure + must be stored on that device.""" + return self.__device + + def to_device(self, device): + """Return a version of the same weighting, but with any internal arrays stored + on a different device.""" + raise NotImplementedError("Abstract method") + + def to_impl(self, impl): + """Return a version of the same weighting, but with any internal arrays stored + with a different array backend (e.g. `'pytorch'` instead of `'numpy'`).""" + raise NotImplementedError("Abstract method") + + @property + def shape(self): + """A tuple of numbers, denoting the shape that arrays need to have to be + used with this weighting. An empty shape means any shape of array is supported.""" + raise NotImplementedError("Abstract method") + @property def exponent(self): """Exponent of this weighting.""" @@ -103,7 +129,7 @@ def inner(self, x1, x2): Parameters ---------- - x1, x2 : `LinearSpaceElement` + x1, x2 : ArrayLike Elements whose inner product is calculated. Returns @@ -121,7 +147,7 @@ def norm(self, x): Parameters ---------- - x1 : `LinearSpaceElement` + x1 : ArrayLike Element whose norm is calculated. Returns @@ -129,7 +155,7 @@ def norm(self, x): norm : float The norm of the element. """ - return float(np.sqrt(self.inner(x, x).real)) + return float(math.sqrt(self.inner(x, x).real)) def dist(self, x1, x2): """Calculate the distance between two elements. @@ -139,7 +165,7 @@ def dist(self, x1, x2): Parameters ---------- - x1, x2 : `LinearSpaceElement` + x1, x2 : ArrayLike Elements whose mutual distance is calculated. Returns @@ -162,7 +188,7 @@ class MatrixWeighting(Weighting): checked during initialization. """ - def __init__(self, matrix, impl, exponent=2.0, **kwargs): + def __init__(self, matrix, impl, device, exponent=2.0, **kwargs): """Initialize a new instance. Parameters @@ -171,6 +197,8 @@ def __init__(self, matrix, impl, exponent=2.0, **kwargs): Square weighting matrix of the inner product impl : string Specifier for the implementation backend + device : + device identifier, compatible with the backend associated with `impl` exponent : positive float, optional Exponent of the norm. For values other than 2.0, the inner product is not defined. @@ -216,7 +244,7 @@ def __init__(self, matrix, impl, exponent=2.0, **kwargs): precomp_mat_pow = kwargs.pop('precomp_mat_pow', False) self._cache_mat_pow = bool(kwargs.pop('cache_mat_pow', True)) self._cache_mat_decomp = bool(kwargs.pop('cache_mat_decomp', False)) - super(MatrixWeighting, self).__init__(impl=impl, exponent=exponent) + super(MatrixWeighting, self).__init__(impl=impl, device=device, exponent=exponent) # Check and set matrix if scipy.sparse.isspmatrix(matrix): @@ -455,6 +483,53 @@ def __str__(self): return repr(self) +def _pnorm_diagweight(x, p, w): + """Diagonally weighted p-norm implementation.""" + x, array_backend = get_array_and_backend(x) + ns = array_backend.array_namespace + xp = ns.abs(x.data) + if p == float('inf'): + xp *= w + return ns.max(xp) + else: + # Believe it or not, Pytorch and Numpy implement power in a *different* way + try: + xp = ns.power(xp, p, out=xp) + except AttributeError: + xp = ns.pow(xp, p, out=xp) + xp *= w + return ns.sum(xp) ** (1 / p) + +def _norm_default(x): + """Default Euclidean norm implementation.""" + x, array_backend = get_array_and_backend(x) + ns = array_backend.array_namespace + return ns.linalg.vector_norm(x.data) + +def _pnorm_default(x, p): + """Default p-norm implementation.""" + x, array_backend = get_array_and_backend(x) + ns = array_backend.array_namespace + return ns.linalg.vector_norm(x.data, ord=p) + +def _inner_default(x1, x2): + """Default Euclidean inner product implementation.""" + x1, array_backend_1 = get_array_and_backend(x1) + x2, array_backend_2 = get_array_and_backend(x2) + assert array_backend_1 == array_backend_2, f"{array_backend_1=} and {array_backend_2=} do not match" + ns = array_backend_1.array_namespace + if is_real_dtype(x2.dtype): + return ns.vecdot(x1.ravel(), x2.ravel()) + else: + # `vecdot` has the complex conjugate on the left argument, + # whereas ODL convention is that the inner product should + # be linear in the left argument (conjugate in the right). + return ns.vecdot(x2.ravel(), x1.ravel()) + + +# TODO: implement intermediate weighting schemes with arrays that are +# broadcast, i.e. between scalar and full-blown in dimensionality? + class ArrayWeighting(Weighting): """Weighting of a space by an array. @@ -467,7 +542,7 @@ class ArrayWeighting(Weighting): during initialization. """ - def __init__(self, array, impl, exponent=2.0): + def __init__(self, array, impl, device, exponent=2.0): """Initialize a new instance. Parameters @@ -475,20 +550,25 @@ def __init__(self, array, impl, exponent=2.0): array : `array-like` Weighting array of inner product, norm and distance. Native `Tensor` instances are stored as-is without copying. + Do not pass an ODL-space-element here. If you want to use such + an element, use its contained `data` instead. impl : string Specifier for the implementation backend. + device : + device identifier, compatible with the backend associated with `impl` exponent : positive float, optional Exponent of the norm. For values other than 2.0, the inner product is not defined. """ - super(ArrayWeighting, self).__init__(impl=impl, exponent=exponent) + super(ArrayWeighting, self).__init__(impl=impl, device=device, exponent=exponent) # We apply array duck-typing to allow all kinds of Numpy-array-like # data structures without change array_attrs = ('shape', 'dtype', 'itemsize') - if (all(hasattr(array, attr) for attr in array_attrs) and - not isinstance(array, TensorSpace)): + if (all(hasattr(array, attr) for attr in array_attrs)): self.__array = array + # TODO add a check that the array is compatible with the `impl`, and if not either + # convert it or raise an error. This should be done using Python Array API features. else: raise TypeError('`array` {!r} does not look like a valid array' ''.format(array)) @@ -498,6 +578,33 @@ def array(self): """Weighting array of this instance.""" return self.__array + @property + def weight(self): + """Weighting array of this instance.""" + return self.array + + @property + def shape(self): + """Arrays measured by this weighting must have the same shape as the + weighting array itself.""" + return self.array.shape + + def to_device(self, device): + _, backend = get_array_and_backend(self.array) + return ArrayWeighting(array = backend.to_device(self.array, device=device), impl=self.impl, device=device, exponent=self.exponent) + + def to_impl(self, impl): + new_backend = lookup_array_backend(impl) + new_array = new_backend.array_namespace.from_dlpack(self.array) + + # TODO the following is likely to fail in case e.g. torch-cuda is sent to 'numpy'. + # It is required to first use `to_device('cpu')`, then `to_impl`. + # It would be useful to add a device argument that allows changing backend and device in + # one step. This is currently hampered by missing `device` argument to `from_dlpack` in Torch. + assert(new_array.device == self.device) + + return ArrayWeighting(array=new_array, impl=impl, device=self.device, exponent=self.exponent) + def is_valid(self): """Return True if the array is a valid weight, i.e. positive.""" return np.all(np.greater(self.array, 0)) @@ -519,7 +626,7 @@ def __eq__(self, other): return True return (super(ArrayWeighting, self).__eq__(other) and - self.array is getattr(other, 'array', None)) + odl_all_equal(self.array, other.array)) def __hash__(self): """Return ``hash(self)``.""" @@ -539,17 +646,17 @@ def equiv(self, other): by entry-wise comparison of arrays/constants. """ # Optimization for equality - if self == other: - return True - elif (not isinstance(other, Weighting) or + if (not isinstance(other, Weighting) or self.exponent != other.exponent): return False elif isinstance(other, MatrixWeighting): return other.equiv(self) elif isinstance(other, ConstWeighting): - return np.array_equiv(self.array, other.const) + # return np.array_equiv(self.array, other.const) + return odl_all_equal(self.array, other.const) else: - return np.array_equal(self.array, other.array) + # return np.array_equal(self.array, other.array) + return odl_all_equal(self.array, other.array) @property def repr_part(self): @@ -571,12 +678,57 @@ def __str__(self): """Return ``str(self)``.""" return repr(self) + def norm(self, x): + """Return the weighted norm of ``x``. + + Parameters + ---------- + x : ArrayLike + Tensor whose norm is calculated. + + Returns + ------- + norm : float + The norm of the provided tensor. + """ + if self.exponent == 2.0: + norm_squared = self.inner(x, x).real # TODO: optimize?! + if norm_squared < 0: + norm_squared = 0.0 # Compensate for numerical error + return float(np.sqrt(norm_squared)) + else: + return float(_pnorm_diagweight(x, self.exponent, self.array)) + + def inner(self, x1, x2): + """Return the weighted inner product of ``x1`` and ``x2``. + + Parameters + ---------- + x1, x2 : ArrayLike + Tensors whose inner product is calculated. + + Returns + ------- + inner : float or complex + The inner product of the two provided vectors. + """ + if self.exponent != 2.0: + raise NotImplementedError('no inner product defined for ' + 'exponent != 2 (got {})' + ''.format(self.exponent)) + else: + inner = _inner_default(x1 * self.array, x2) + if is_real_dtype(x1.dtype): + return float(inner) + else: + return complex(inner) + class ConstWeighting(Weighting): """Weighting of a space by a constant.""" - def __init__(self, const, impl, exponent=2.0): + def __init__(self, const, impl, device, exponent=2.0): """Initialize a new instance. Parameters @@ -585,11 +737,13 @@ def __init__(self, const, impl, exponent=2.0): Weighting constant of the inner product. impl : string Specifier for the implementation backend. + device : + device identifier, compatible with the backend associated with `impl` exponent : positive float, optional Exponent of the norm. For values other than 2.0, the inner product is not defined. """ - super(ConstWeighting, self).__init__(impl=impl, exponent=exponent) + super(ConstWeighting, self).__init__(impl=impl, device=device, exponent=exponent) self.__const = float(const) if self.const <= 0: @@ -603,6 +757,22 @@ def const(self): """Weighting constant of this inner product.""" return self.__const + @property + def weight(self): + """Weighting constant of this instance.""" + return self.const + + @property + def shape(self): + """A constant weight can be applied to any shape.""" + return () + + def to_device(self, device): + return ConstWeighting(const = self.const, impl=self.impl, device=device, exponent=self.exponent) + + def to_impl(self, impl): + return ConstWeighting(const = self.const, impl=impl, device=self.device, exponent=self.exponent) + def __eq__(self, other): """Return ``self == other``. @@ -658,12 +828,74 @@ def __str__(self): """Return ``str(self)``.""" return repr(self) + def inner(self, x1, x2): + """Return the weighted inner product of ``x1`` and ``x2``. + + Parameters + ---------- + x1, x2 : ArrayLike + Tensors whose inner product is calculated. + + Returns + ------- + inner : float or complex + The inner product of the two provided tensors. + """ + if self.exponent != 2.0: + raise NotImplementedError('no inner product defined for ' + 'exponent != 2 (got {})' + ''.format(self.exponent)) + else: + return self.const * _inner_default(x1, x2) + + def norm(self, x): + """Return the weighted norm of ``x``. + + Parameters + ---------- + x1 : ArrayLike + Tensor whose norm is calculated. + + Returns + ------- + norm : float + The norm of the tensor. + """ + if self.exponent == 2.0: + return float(np.sqrt(self.const) * _norm_default(x)) + elif self.exponent == float('inf'): + return float(self.const * _pnorm_default(x, self.exponent)) + else: + return float((self.const ** (1 / self.exponent) * + _pnorm_default(x, self.exponent))) + + def dist(self, x1, x2): + """Return the weighted distance between ``x1`` and ``x2``. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Tensors whose mutual distance is calculated. + + Returns + ------- + dist : float + The distance between the tensors. + """ + if self.exponent == 2.0: + return float(np.sqrt(self.const) * _norm_default(x1 - x2)) + elif self.exponent == float('inf'): + return float(self.const * _pnorm_default(x1 - x2, self.exponent)) + else: + return float((self.const ** (1 / self.exponent) * + _pnorm_default(x1 - x2, self.exponent))) + class CustomInner(Weighting): """Class for handling a user-specified inner product.""" - def __init__(self, inner, impl): + def __init__(self, inner, impl, device, shape=()): """Initialize a new instance. Parameters @@ -681,14 +913,27 @@ def __init__(self, inner, impl): impl : string Specifier for the implementation backend. + device : + device identifier, compatible with the backend associated with `impl` + shape : + what shape array need to have to be processed by this weighting. + The `inner` callable can assume that the shape has already been checked. + If an empty shape is specified (the default), `inner` should be able to + handle arrays of arbitrary shape. """ - super(CustomInner, self).__init__(impl=impl, exponent=2.0) + super(CustomInner, self).__init__(impl=impl, device=device, exponent=2.0) + + self.__shape = shape if not callable(inner): raise TypeError('`inner` {!r} is not callable' ''.format(inner)) self.__inner = inner + @property + def shape(self): + return self.__shape + @property def inner(self): """Custom inner product of this instance..""" @@ -731,7 +976,7 @@ class CustomNorm(Weighting): Note that this removes ``inner``. """ - def __init__(self, norm, impl): + def __init__(self, norm, impl, device, shape=()): """Initialize a new instance. Parameters @@ -748,14 +993,27 @@ def __init__(self, norm, impl): - ``||x + y|| <= ||x|| + ||y||`` impl : string Specifier for the implementation backend + device : + device identifier, compatible with the backend associated with `impl` + shape : + what shape array need to have to be processed by this weighting. + The `norm` callable can assume that the shape has already been checked. + If an empty shape is specified (the default), `norm` should be able to + handle arrays of arbitrary shape. """ - super(CustomNorm, self).__init__(impl=impl, exponent=1.0) + super(CustomNorm, self).__init__(impl=impl, device=device, exponent=1.0) + + self.__shape = shape if not callable(norm): raise TypeError('`norm` {!r} is not callable' ''.format(norm)) self.__norm = norm + @property + def shape(self): + return self.__shape + def inner(self, x1, x2): """Inner product is not defined for custom distance.""" raise NotImplementedError('`inner` not defined for custom norm') @@ -803,7 +1061,7 @@ class CustomDist(Weighting): Note that this removes ``inner`` and ``norm``. """ - def __init__(self, dist, impl): + def __init__(self, dist, impl, device, shape=()): """Initialize a new instance. Parameters @@ -820,14 +1078,27 @@ def __init__(self, dist, impl): - ``dist(x, y) <= dist(x, z) + dist(z, y)`` impl : string Specifier for the implementation backend + device : + device identifier, compatible with the backend associated with `impl` + shape : + what shape array need to have to be processed by this weighting. + The `dist` callable can assume that the shape has already been checked. + If an empty shape is specified (the default), `dist` should be able to + handle arrays of arbitrary shape. """ - super(CustomDist, self).__init__(impl=impl, exponent=1.0) + super(CustomDist, self).__init__(impl=impl, device=device, exponent=1.0) + + self.__shape = shape if not callable(dist): raise TypeError('`dist` {!r} is not callable' ''.format(dist)) self.__dist = dist + @property + def shape(self): + return self.__shape + @property def dist(self): """Custom distance of this instance..""" @@ -872,5 +1143,5 @@ def __repr__(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/core/sparse/__init__.py b/odl/core/sparse/__init__.py new file mode 100644 index 00000000000..a169d994406 --- /dev/null +++ b/odl/core/sparse/__init__.py @@ -0,0 +1 @@ +from .sparse_matrix import * diff --git a/odl/core/sparse/sparse_matrix.py b/odl/core/sparse/sparse_matrix.py new file mode 100644 index 00000000000..0d3a049f8b9 --- /dev/null +++ b/odl/core/sparse/sparse_matrix.py @@ -0,0 +1,75 @@ + +from odl.core.sparse.sparse_template import SparseMatrixFormat, _registered_sparse_formats + + +from typing import Optional + + +IS_INITIALIZED = False + +def _initialize_if_needed(): + """Initialize ``_registered_sparse_formats`` if not already done.""" + global IS_INITIALIZED + if not IS_INITIALIZED: + import odl.backends.sparse.scipy_backend + import importlib.util + torch_module = importlib.util.find_spec("torch") + if torch_module is not None: + try: + import odl.backends.sparse.pytorch_backend + except ModuleNotFoundError: + pass + IS_INITIALIZED = True + +class SparseMatrix(): + """ + SparseMatrix is the ODL interface to the sparse Matrix supports in different backends. + + Note: + The user is responsible for using the *args and **kwargs expected by the respective backends: + Pytorch: + -> COO: https://docs.pytorch.org/docs/stable/generated/torch.sparse_coo_tensor.html + Scipy: + -> COO: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix. + + Examples: + SparseMatrix('COO', 'pytorch', + [[0, 1, 1],[2, 0, 2]], [3, 4, 5], + device='cuda:0') + SparseMatrix('COO', 'scipy', + (3, 4)) + """ + def __new__(cls, format:str, impl:str, *args, **kwargs): + + _initialize_if_needed() + + sparse_impl = _registered_sparse_formats[impl][format] + + return sparse_impl.constructor(*args, **kwargs) + +def lookup_sparse_format(matrix: object) -> Optional[SparseMatrixFormat]: + _initialize_if_needed() + for sp_bkend in _registered_sparse_formats.values(): + for sp_fmt in sp_bkend.values(): + if sp_fmt.is_of_this_sparse_format(matrix): + return sp_fmt + return None + +def is_sparse(matrix): + return (lookup_sparse_format(matrix) is not None) + +def get_sparse_matrix_impl(matrix): + instance = lookup_sparse_format(matrix) + assert instance is not None, 'The matrix is not a supported sparse matrix' + return instance.impl + +def get_sparse_matrix_format(matrix): + instance = lookup_sparse_format(matrix) + assert instance is not None, 'The matrix is not a supported sparse matrix' + return instance.sparse_format + +if __name__ == '__main__': + print(SparseMatrix('COO', 'pytorch', + [[0, 1, 1],[2, 0, 2]], [3, 4, 5], + device='cuda:0')) + print(SparseMatrix('COO', 'scipy', (3, 4))) diff --git a/odl/core/sparse/sparse_template.py b/odl/core/sparse/sparse_template.py new file mode 100644 index 00000000000..39575559f90 --- /dev/null +++ b/odl/core/sparse/sparse_template.py @@ -0,0 +1,19 @@ +from dataclasses import dataclass +from typing import Callable + +_registered_sparse_formats = {} + +@dataclass +class SparseMatrixFormat: + sparse_format : str + impl : str + constructor : Callable + is_of_this_sparse_format : Callable[[object], bool] + to_dense : Callable + matmul_spmatrix_with_vector : Callable + def __post_init__(self): + if self.impl not in _registered_sparse_formats: + _registered_sparse_formats[self.impl] = {} + if self.sparse_format in _registered_sparse_formats[self.impl]: + raise KeyError(f"A {self.sparse_format} sparse format for backend {self.impl} is already registered. Every sparse format needs to have a unique identifier combination.") + _registered_sparse_formats[self.impl][self.sparse_format] = self diff --git a/odl/util/__init__.py b/odl/core/util/__init__.py similarity index 80% rename from odl/util/__init__.py rename to odl/core/util/__init__.py index 2f261bfab28..b7068fbb15c 100644 --- a/odl/util/__init__.py +++ b/odl/core/util/__init__.py @@ -18,6 +18,9 @@ from .utility import * from .vectorization import * from .sparse import * +from .scipy_compatibility import * +from .dtype_utils import * +from .print_utils import * __all__ = () __all__ += graphics.__all__ @@ -28,3 +31,6 @@ __all__ += utility.__all__ __all__ += vectorization.__all__ __all__ += sparse.__all__ +__all__ += scipy_compatibility.__all__ +__all__ += dtype_utils.__all__ +__all__ += print_utils.__all__ diff --git a/odl/core/util/dtype_utils.py b/odl/core/util/dtype_utils.py new file mode 100644 index 00000000000..d8e1a905a31 --- /dev/null +++ b/odl/core/util/dtype_utils.py @@ -0,0 +1,238 @@ +# This is an attempt to progressively tidy the 'utility.py' module, which is little more than a heap of unstable/unsupported code waiting to crumble. + +# Python imports +from numbers import Number +from functools import lru_cache +# Third-Party import +import array_api_compat as xp +# ODL imports +from odl.core.array_API_support import ArrayBackend, lookup_array_backend +from odl.core.array_API_support.utils import _registered_array_backends +from typing import Optional + +__all__ = ( + 'is_available_dtype', + 'is_numeric_dtype', + 'is_boolean_dtype', + 'is_int_dtype', + 'is_signed_int_dtype', + 'is_unsigned_int_dtype', + 'is_floating_dtype', + 'is_real_floating_dtype', + 'is_complex_dtype', + 'is_real_dtype', + 'real_dtype', + 'complex_dtype' +) + +############################# DATA TYPES ############################# +# We store all the data types expected by the python array API as lists, and the maps for conversion as dicts +BOOLEAN_DTYPES = [ + "bool" + ] + +SIGNED_INTEGER_DTYPES = [ + "int8", + "int16", + "int32", + "int64", +] +UNSIGNED_INTEGER_DTYPES = [ + "uint8", + "uint16", + "uint32", + "uint64" +] + +INTEGER_DTYPES = SIGNED_INTEGER_DTYPES + UNSIGNED_INTEGER_DTYPES + +FLOAT_DTYPES = [ + "float32", + "float64" +] + +COMPLEX_DTYPES = [ + "complex64", + "complex128" +] + +REAL_DTYPES = INTEGER_DTYPES + FLOAT_DTYPES +SCALAR_DTYPES = REAL_DTYPES + COMPLEX_DTYPES +AVAILABLE_DTYPES = BOOLEAN_DTYPES + REAL_DTYPES + COMPLEX_DTYPES + +""" +See type promotion rules https://data-apis.org/array-api/latest/API_specification/type_promotion.html#type-promotion +""" + +TYPE_PROMOTION_REAL_TO_COMPLEX = { + "int8" : "complex64", + "int16" : "complex64", + "int32" : "complex64", + "int64" : "complex64", + "uint8" : "complex64", + "uint16" : "complex64", + "uint32" : "complex128", + "uint64" : "complex128", + "float32" : "complex64", + "float64" : "complex128" +} + +TYPE_PROMOTION_COMPLEX_TO_REAL = { + "complex64" : "float32", + "complex128" : "float64" +} + +DTYPE_SHORTHANDS = { + bool: 'bool', + int: 'int32', + float: 'float64', + complex: 'complex128' +} + +# These dicts should not be exposed to the users/developpers outside of the module. We rather provide functions that rely on the available array_backends present +def _universal_dtype_identifier(dtype: "str | Number |xp.dtype", array_backend_selection: list[ArrayBackend]=None) -> str : + """ + Internal helper function to convert a dtype to a backend-agnostic string identifying it semantically. + (E.g. `'int32'` and `'int64'` and `'float64'` are all possible distinct results, but `np.float64` and + `torch.float64` and `float` all map to the unique identifier `'float64'`.) + ambiguity + The dtype can be provided as a string, a python Number or as an xp.dtype. + Returns: + dtype_as_str (str), dtype identifier + Note: + xp is written here for type hinting, it refers to the fact that the dtype can be provided as a np.float32 or as a torchfloat32, for instance. + What concrete types of dtype are allowed is determined by `array_backend_selection`. + If that argument is not provided, all registered backends are taken into consideration. + """ + # Lazy import + from odl.core.space.entry_points import TENSOR_SPACE_IMPLS + + original_dtype = dtype + shorthand_elaboration = "" + if dtype in DTYPE_SHORTHANDS: + dtype = DTYPE_SHORTHANDS[dtype] + shorthand_elaboration = " (shorthand for {dtype})" + + if isinstance(dtype, (str, Number, type)): + if dtype in AVAILABLE_DTYPES: + return dtype + else: + raise TypeError(f'The provided dtype {original_dtype}{shorthand_elaboration} is not available. Please use a dtype in {AVAILABLE_DTYPES}') + if array_backend_selection is None: + array_backends = _registered_array_backends.values() + else: + array_backends = array_backend_selection + for array_backend in array_backends: + if dtype in array_backend.available_dtypes.values(): + return array_backend.identifier_of_dtype(dtype) + raise ValueError(f'The provided dtype {dtype} is not a string, a python Number or a backend-specific dtype of {[be.impl for be in array_backends]}. Please provide either of these.') + +@lru_cache +def is_available_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is available.""" + try: + _universal_dtype_identifier(dtype) + return True + except ValueError or AssertionError: + return False + +@lru_cache +def is_numeric_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is a numeric type.""" + return _universal_dtype_identifier(dtype) in SCALAR_DTYPES + +@lru_cache +def is_boolean_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is an boolean type.""" + return _universal_dtype_identifier(dtype) in BOOLEAN_DTYPES + +@lru_cache +def is_signed_int_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is an integer type.""" + return _universal_dtype_identifier(dtype) in SIGNED_INTEGER_DTYPES + +@lru_cache +def is_unsigned_int_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is an integer type.""" + return _universal_dtype_identifier(dtype) in UNSIGNED_INTEGER_DTYPES + +@lru_cache +def is_int_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is an integer type.""" + return _universal_dtype_identifier(dtype) in INTEGER_DTYPES + +@lru_cache +def is_floating_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is a floating point type.""" + return _universal_dtype_identifier(dtype) in FLOAT_DTYPES + COMPLEX_DTYPES + +@lru_cache +def is_real_floating_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is a floating point type.""" + return _universal_dtype_identifier(dtype) in FLOAT_DTYPES + +@lru_cache +def is_complex_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is a complex type.""" + return _universal_dtype_identifier(dtype) in COMPLEX_DTYPES + +@lru_cache +def is_real_dtype(dtype: "str | Number |xp.dtype") -> bool: + """Return ``True`` if ``dtype`` is a real (including integer) type.""" + return _universal_dtype_identifier(dtype) in REAL_DTYPES + +def real_dtype(dtype: "str | Number |xp.dtype", default=None, backend: Optional[ArrayBackend] =None) -> str: + """ + Returns the real counterpart of ``dtype`` if it exists + Parameters + ---------- + dtype : + Input dtype + default : + Object to be returned if no real counterpart is found for + ``dtype``, except for ``None``, in which case an error is raised. + backend : + If given, the result dtype will be returned in its version + specific to that backend (e.g. `torch.float32`), otherwise as a plain string. + """ + dtype = _universal_dtype_identifier(dtype) + def for_backend(dt): + if backend is None: + return dt + else: + try: + return backend.available_dtypes[dt] + except KeyError: + raise ValueError(f"Real version of {dtype} not available on {backend}.") + if dtype in REAL_DTYPES: + return for_backend(dtype) + elif dtype in COMPLEX_DTYPES: + return for_backend(TYPE_PROMOTION_COMPLEX_TO_REAL[dtype]) + else: + if default is None: + raise ValueError( + f"no real counterpart exists for `dtype` {dtype}") + else: + return default + +def complex_dtype(dtype: "str | Number |xp.dtype", default=None, backend: Optional[ArrayBackend] =None) -> str: + dtype = _universal_dtype_identifier(dtype) + def for_backend(dt): + if backend is None: + return dt + else: + try: + return backend.available_dtypes[dt] + except KeyError: + raise ValueError(f"Complex version of {dtype} not available on {backend}.") + if dtype in COMPLEX_DTYPES: + return for_backend(dtype) + elif dtype in REAL_DTYPES: + return for_backend(TYPE_PROMOTION_REAL_TO_COMPLEX[dtype]) + else: + if default is None: + raise ValueError( + f"no complex counterpart exists for `dtype` {dtype}") + else: + return default + diff --git a/odl/util/graphics.py b/odl/core/util/graphics.py similarity index 98% rename from odl/util/graphics.py rename to odl/core/util/graphics.py index 98b70c884f3..738b3b44b81 100644 --- a/odl/util/graphics.py +++ b/odl/core/util/graphics.py @@ -12,9 +12,9 @@ import numpy as np import warnings -from odl.util.testutils import run_doctests -from odl.util.utility import is_real_dtype - +from odl.core.util.testutils import run_doctests +from odl.core.util.dtype_utils import is_real_dtype +from odl.core.array_API_support import get_array_and_backend __all__ = ('show_discrete_data',) @@ -236,6 +236,8 @@ def show_discrete_data(values, grid, title=None, method='', method = 'imshow' if method == 'imshow': + values, array_backend = get_array_and_backend(values) + values = array_backend.to_numpy(values) args_re = [np.rot90(values.real)] args_im = [np.rot90(values.imag)] if values_are_complex else [] diff --git a/odl/util/normalize.py b/odl/core/util/normalize.py similarity index 99% rename from odl/util/normalize.py rename to odl/core/util/normalize.py index 27984f0640a..a087d15403c 100644 --- a/odl/util/normalize.py +++ b/odl/core/util/normalize.py @@ -372,5 +372,5 @@ def safe_int_conv(number): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/util/npy_compat.py b/odl/core/util/npy_compat.py similarity index 96% rename from odl/util/npy_compat.py rename to odl/core/util/npy_compat.py index f9cfdceaa3f..3922d2d4f9a 100644 --- a/odl/util/npy_compat.py +++ b/odl/core/util/npy_compat.py @@ -28,5 +28,5 @@ __all__ = ("AVOID_UNNECESSARY_COPY",) if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/util/numerics.py b/odl/core/util/numerics.py similarity index 81% rename from odl/util/numerics.py rename to odl/core/util/numerics.py index d5f59fbb67b..383e7ce2c32 100644 --- a/odl/util/numerics.py +++ b/odl/core/util/numerics.py @@ -11,7 +11,9 @@ from __future__ import absolute_import, division, print_function import numpy as np -from odl.util.normalize import normalized_scalar_param_list, safe_int_conv +from odl.core.util.normalize import normalized_scalar_param_list, safe_int_conv +from odl.core.util.dtype_utils import real_dtype +from odl.core.array_API_support.utils import ArrayBackend, get_array_and_backend __all__ = ( 'apply_on_boundary', @@ -100,7 +102,7 @@ def apply_on_boundary(array, func, only_once=True, which_boundaries=None, >>> result is out True """ - array = np.asarray(array) + array, backend = get_array_and_backend(array) if callable(func): func = [func] * array.ndim @@ -121,7 +123,9 @@ def apply_on_boundary(array, func, only_once=True, which_boundaries=None, ''.format(len(axis_order), array.ndim)) if out is None: - out = array.copy() + out = backend.array_constructor( + array, copy=True + ) else: out[:] = array # Self assignment is free, in case out is array @@ -227,9 +231,12 @@ def fast_1d_tensor_mult(ndarr, onedim_arrs, axes=None, out=None): Result of the modification. If ``out`` was given, the returned object is a reference to it. """ + ndarr, backend = get_array_and_backend(ndarr) + device = ndarr.device if out is None: - out = np.array(ndarr, copy=True) + out = backend.array_constructor(ndarr, copy=True, device=device) else: + assert out.device == device, f'The input and out arguments are on different devices : {out.device} and {device}' out[:] = ndarr # Self-assignment is free if out is ndarr if not onedim_arrs: @@ -256,17 +263,20 @@ def fast_1d_tensor_mult(ndarr, onedim_arrs, axes=None, out=None): if any(a.ndim != 1 for a in alist): raise ValueError('only 1d arrays allowed') - if len(axes) < out.ndim: + if True:#len(axes) < out.ndim: # Make big factor array (start with 0d) - factor = np.array(1.0) + factor = backend.array_constructor(1.0, device=device) for ax, arr in zip(axes, alist): # Meshgrid-style slice slc = [None] * out.ndim slc[ax] = slice(None) - factor = factor * arr[tuple(slc)] + factor = factor * backend.array_constructor( + arr[tuple(slc)], device=device + ) out *= factor + # this seems to be for performance, we have disabled it to make progress and will adress it later :-) else: # Hybrid approach @@ -276,21 +286,25 @@ def fast_1d_tensor_mult(ndarr, onedim_arrs, axes=None, out=None): last_arr = alist[axes.index(last_ax)] # Build the semi-big array and multiply - factor = np.array(1.0) + factor = backend.array_constructor(1.0, device=device) for ax, arr in zip(axes, alist): if ax == last_ax: continue slc = [None] * out.ndim slc[ax] = slice(None) - factor = factor * arr[tuple(slc)] + factor = factor * backend.array_constructor( + arr[tuple(slc)], device=device + ) out *= factor # Finally multiply by the remaining 1d array slc = [None] * out.ndim slc[last_ax] = slice(None) - out *= last_arr[tuple(slc)] + out *= backend.array_constructor( + last_arr[tuple(slc)], device=device + ) return out @@ -367,36 +381,36 @@ def resize_array(arr, newshp, offset=None, pad_mode='constant', pad_const=0, the right side. That behavior can be changed with the ``offset`` parameter: - >>> from odl.util.numerics import resize_array - >>> resize_array([1, 2, 3], (1,)) + >>> from odl.core.util.numerics import resize_array + >>> resize_array(np.array([1, 2, 3]), (1,)) array([1]) - >>> resize_array([1, 2, 3], (1,), offset=2) + >>> resize_array(np.array([1, 2, 3]), (1,), offset=2) array([3]) - >>> resize_array([1, 2, 3], (6,)) + >>> resize_array(np.array([1, 2, 3]), (6,)) array([1, 2, 3, 0, 0, 0]) - >>> resize_array([1, 2, 3], (7,), offset=2) + >>> resize_array(np.array([1, 2, 3]), (7,), offset=2) array([0, 0, 1, 2, 3, 0, 0]) The padding constant can be changed, as well as the padding mode: - >>> resize_array([1, 2, 3], (7,), pad_const=-1, offset=2) + >>> resize_array(np.array([1, 2, 3]), (7,), pad_const=-1, offset=2) array([-1, -1, 1, 2, 3, -1, -1]) - >>> resize_array([1, 2, 3], (7,), pad_mode='periodic', offset=2) + >>> resize_array(np.array([1, 2, 3]), (7,), pad_mode='periodic', offset=2) array([2, 3, 1, 2, 3, 1, 2]) - >>> resize_array([1, 2, 3], (7,), pad_mode='symmetric', offset=2) + >>> resize_array(np.array([1, 2, 3]), (7,), pad_mode='symmetric', offset=2) array([3, 2, 1, 2, 3, 2, 1]) - >>> resize_array([1, 2, 3], (7,), pad_mode='order0', offset=2) + >>> resize_array(np.array([1, 2, 3]), (7,), pad_mode='order0', offset=2) array([1, 1, 1, 2, 3, 3, 3]) - >>> resize_array([1, 2, 3], (7,), pad_mode='order1', offset=2) + >>> resize_array(np.array([1, 2, 3]), (7,), pad_mode='order1', offset=2) array([-1, 0, 1, 2, 3, 4, 5]) Everything works for arbitrary number of dimensions: >>> # Take the middle two columns and extend rows symmetrically - >>> resize_array([[1, 2, 3, 4], - ... [5, 6, 7, 8], - ... [9, 10, 11, 12]], + >>> resize_array(np.array([[1, 2, 3, 4], + ... [5, 6, 7, 8], + ... [9, 10, 11, 12]]), ... (5, 2), pad_mode='symmetric', offset=[1, 1]) array([[ 6, 7], [ 2, 3], @@ -405,9 +419,10 @@ def resize_array(arr, newshp, offset=None, pad_mode='constant', pad_const=0, [ 6, 7]]) >>> # Take the rightmost two columns and extend rows symmetrically >>> # downwards - >>> resize_array([[1, 2, 3, 4], - ... [5, 6, 7, 8], - ... [9, 10, 11, 12]], (5, 2), pad_mode='symmetric', + >>> resize_array(np.array([[1, 2, 3, 4], + ... [5, 6, 7, 8], + ... [9, 10, 11, 12]]), + ... (5, 2), pad_mode='symmetric', ... offset=[0, 2]) array([[ 3, 4], [ 7, 8], @@ -422,22 +437,19 @@ def resize_array(arr, newshp, offset=None, pad_mode='constant', pad_const=0, raise TypeError('`newshp` must be a sequence, got {!r}'.format(newshp)) if out is not None: - if not isinstance(out, np.ndarray): - raise TypeError('`out` must be a `numpy.ndarray` instance, got ' - '{!r}'.format(out)) if out.shape != newshp: raise ValueError('`out` must have shape {}, got {}' ''.format(newshp, out.shape)) + out, backend = get_array_and_backend(out) - order = 'C' if out.flags.c_contiguous else 'F' - arr = np.asarray(arr, dtype=out.dtype, order=order) + arr = backend.array_constructor(arr, dtype=out.dtype) if arr.ndim != out.ndim: raise ValueError('number of axes of `arr` and `out` do not match ' '({} != {})'.format(arr.ndim, out.ndim)) else: - arr = np.asarray(arr) - order = 'C' if arr.flags.c_contiguous else 'F' - out = np.empty(newshp, dtype=arr.dtype, order=order) + arr, backend = get_array_and_backend(arr) + out = backend.array_namespace.empty(newshp, dtype=arr.dtype) + if len(newshp) != arr.ndim: raise ValueError('number of axes of `arr` and `len(newshp)` do ' 'not match ({} != {})' @@ -458,13 +470,13 @@ def resize_array(arr, newshp, offset=None, pad_mode='constant', pad_const=0, if (pad_mode == 'constant' and any(n_new > n_orig for n_orig, n_new in zip(arr.shape, out.shape))): - try: - pad_const_scl = np.array([pad_const], out.dtype) - assert(pad_const_scl == np.array([pad_const])) - except Exception as e: - raise ValueError('`pad_const` {} cannot be safely cast to the data ' - 'type {} of the output array' - ''.format(pad_const, out.dtype)) + + if isinstance(pad_const, backend.array_type): + pad_const_scl = pad_const.reshape([]) + else: + pad_const_scl = backend.array_constructor([pad_const], dtype=out.dtype) + if pad_const_scl != pad_const: + raise ValueError(f"Padding constant {pad_const} cannot be safely converted to {out.dtype}.") # Handle direction direction, direction_in = str(direction).lower(), direction @@ -477,9 +489,9 @@ def resize_array(arr, newshp, offset=None, pad_mode='constant', pad_const=0, "got {}".format(pad_const)) if direction == 'forward' and pad_mode == 'constant' and pad_const != 0: - out.fill(pad_const) + out.fill(pad_const) if backend.impl in ['numpy'] else out.fill_(pad_const) else: - out.fill(0) + out.fill(0) if backend.impl in ['numpy'] else out.fill_(0) # Perform the resizing if direction == 'forward': @@ -497,7 +509,11 @@ def resize_array(arr, newshp, offset=None, pad_mode='constant', pad_const=0, else: # Apply adjoint padding to a copy of the input and copy the inner # part when finished - tmp = arr.copy() + # TODO (Justus) copying to a temporary is inefficient and largely + # defeats the point of using in-place updates. This could be avoided + # by changin `_apply_padding` to read data from its RHS, and writing + # directly to `out`. + tmp = backend.array_constructor(arr, copy=True) _apply_padding(tmp, out, offset, pad_mode, 'adjoint') _assign_intersection(out, tmp, offset) @@ -610,6 +626,59 @@ def _padding_slices_inner(lhs_arr, rhs_arr, axis, offset, pad_mode): return pad_slc_l, pad_slc_r +def _flip_slice(slc: slice) -> slice: + """Turn around a slice, so that `arr[_flip_slice(slc)] == arr[slc].flip`. + Only confirmed to work correctly for slices with step +1 or -1 (which is + the case for all the slices used in this module). Probably would not work + for general step sizes.""" + step = -1 if slc.step is None else -slc.step + if slc.start is None: + assert(step < 0) + slc = slice(0, slc.stop, slc.step) + if slc.start == -1 and slc.stop != -1: + stop = None + else: + stop = slc.start+step if slc.start>=-step or slc.start==slc.stop else None + if slc.stop is None: + if step < 0: + return slice(-1, stop, step) + else: + return slice(0, stop, step) + return slice(slc.stop+step, stop, step) + +def _slice_array_anystep(arr, slices: list[slice], backend: ArrayBackend): + """Workaround for PyTorch's current inability (https://github.com/pytorch/pytorch/issues/59786) + to perform slices with a negative step size.""" + if backend.impl in ['numpy','pytorch']: + posstep_slices = [] + flip_dims = [] + for i,slc in enumerate(slices): + if slc.step is not None and slc.step < 0: + posstep_slices.append(_flip_slice(slc)) + if slc.stop != slc.start: + flip_dims.append(i) + else: + posstep_slices.append(slc) + return backend.array_namespace.flip(arr[tuple(posstep_slices)], axis=flip_dims) + else: + return arr[slices] + +def _make_left_slice_positivestepped(lslc: slice, rslc: slice) -> tuple[slice, slice]: + """Flip the steps in both slices so that `lslc` has positive step. If that + is already the case, leave both as they are.""" + if lslc.step is not None and lslc.step < 0: + return (_flip_slice(lslc), _flip_slice(rslc)) + else: + return (lslc, rslc) + +def _make_left_slices_positivestepped(lslcs: tuple[slice, ...], rslcs: tuple[slice, ...] + ) -> tuple[tuple[slice, ...], tuple[slice, ...]]: + """Multi-slice version of `_make_left_slice_positivestepped`.""" + tweaked_slices = [_make_left_slice_positivestepped(lslc, rslc) + for lslc, rslc in zip(lslcs, rslcs)] + return ( tuple(lslc for lslc, _ in tweaked_slices) + , tuple(rslc for _, rslc in tweaked_slices) ) + def _apply_padding(lhs_arr, rhs_arr, offset, pad_mode, direction): """Apply padding to ``lhs_arr`` according to ``pad_mode``. @@ -625,6 +694,16 @@ def _apply_padding(lhs_arr, rhs_arr, offset, pad_mode, direction): """ if pad_mode not in ('periodic', 'symmetric', 'order0', 'order1'): return + + lhs_arr, lhs_backend = get_array_and_backend(lhs_arr) + rhs_arr, rhs_backend = get_array_and_backend(rhs_arr) + + assert lhs_backend == rhs_backend + backend = lhs_backend + + assert lhs_arr.device == rhs_arr.device + + ns = backend.array_namespace full_slc = [slice(None)] * lhs_arr.ndim intersec_slc, _ = _intersection_slice_tuples(lhs_arr, rhs_arr, offset) @@ -696,19 +775,26 @@ def _apply_padding(lhs_arr, rhs_arr, offset, pad_mode, direction): if direction == 'forward': rhs_slc_l[axis] = pad_slc_inner_l rhs_slc_r[axis] = pad_slc_inner_r + lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) - lhs_arr[lhs_slc_l] = lhs_arr[rhs_slc_l] - lhs_arr[lhs_slc_r] = lhs_arr[rhs_slc_r] + try: + lhs_arr[lhs_slc_l] = _slice_array_anystep(lhs_arr, rhs_slc_l, backend=backend) + lhs_arr[lhs_slc_r] = _slice_array_anystep(lhs_arr, rhs_slc_r, backend=backend) + except ValueError: + raise ValueError(f"Problem with slices {rhs_slc_l=}, {rhs_slc_r=} for {pad_mode=}") else: lhs_slc_l[axis] = pad_slc_inner_l lhs_slc_r[axis] = pad_slc_inner_r lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) - lhs_arr[lhs_slc_l] += lhs_arr[rhs_slc_l] - lhs_arr[lhs_slc_r] += lhs_arr[rhs_slc_r] + lhs_slc_pos, rhs_slc_adp = _make_left_slices_positivestepped(lhs_slc_l, rhs_slc_l) + lhs_arr[lhs_slc_pos] += _slice_array_anystep(lhs_arr, rhs_slc_adp, backend=backend) + + lhs_slc_pos, rhs_slc_adp = _make_left_slices_positivestepped(lhs_slc_r, rhs_slc_r) + lhs_arr[lhs_slc_pos] += _slice_array_anystep(lhs_arr, rhs_slc_adp, backend=backend) elif pad_mode == 'order0': # The `_padding_slices_inner` helper returns the slices for the @@ -730,10 +816,10 @@ def _apply_padding(lhs_arr, rhs_arr, offset, pad_mode, direction): lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) - lhs_arr[lhs_slc_l] += np.sum( + lhs_arr[lhs_slc_l] += ns.sum( lhs_arr[rhs_slc_l], axis=axis, keepdims=True, dtype=lhs_arr.dtype) - lhs_arr[lhs_slc_r] += np.sum( + lhs_arr[lhs_slc_r] += ns.sum( lhs_arr[rhs_slc_r], axis=axis, keepdims=True, dtype=lhs_arr.dtype) @@ -767,13 +853,17 @@ def _apply_padding(lhs_arr, rhs_arr, offset, pad_mode, direction): slope_slc_r[axis] = slice(right_slc.start - 1, right_slc.stop) slope_slc_r = tuple(slope_slc_r) + slope_dtype = backend.available_dtypes[real_dtype(lhs_arr.dtype)] + # The `np.arange`s, broadcast along `axis`, are used to create the # constant-slope continuation (forward) or to calculate the # first order moments (adjoint). - arange_l = np.arange(-n_pad_l, 0, - dtype=lhs_arr.dtype)[bcast_slc] - arange_r = np.arange(1, n_pad_r + 1, - dtype=lhs_arr.dtype)[bcast_slc] + arange_l = ns.arange(-n_pad_l, 0, + dtype=slope_dtype, + device=lhs_arr.device)[bcast_slc] + arange_r = ns.arange(1, n_pad_r + 1, + dtype=slope_dtype, + device=lhs_arr.device)[bcast_slc] lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r = map( tuple, [lhs_slc_l, rhs_slc_l, lhs_slc_r, rhs_slc_r]) @@ -781,33 +871,33 @@ def _apply_padding(lhs_arr, rhs_arr, offset, pad_mode, direction): if direction == 'forward': # Take first order difference to get the derivative # along `axis`. - slope_l = np.diff(lhs_arr[slope_slc_l], n=1, axis=axis) - slope_r = np.diff(lhs_arr[slope_slc_r], n=1, axis=axis) + slope_l = ns.diff(lhs_arr[slope_slc_l], n=1, axis=axis) + slope_r = ns.diff(lhs_arr[slope_slc_r], n=1, axis=axis) # Finally assign the constant slope values lhs_arr[lhs_slc_l] = lhs_arr[bdry_slc_l] + arange_l * slope_l lhs_arr[lhs_slc_r] = lhs_arr[bdry_slc_r] + arange_r * slope_r else: # Same as in 'order0' - lhs_arr[bdry_slc_l] += np.sum(lhs_arr[rhs_slc_l], + lhs_arr[bdry_slc_l] += ns.sum(lhs_arr[rhs_slc_l], axis=axis, keepdims=True, dtype=lhs_arr.dtype) - lhs_arr[bdry_slc_r] += np.sum(lhs_arr[rhs_slc_r], + lhs_arr[bdry_slc_r] += ns.sum(lhs_arr[rhs_slc_r], axis=axis, keepdims=True, dtype=lhs_arr.dtype) # Calculate the order 1 moments - moment1_l = np.sum(arange_l * lhs_arr[rhs_slc_l], + moment1_l = ns.sum(arange_l * lhs_arr[rhs_slc_l], axis=axis, keepdims=True, dtype=lhs_arr.dtype) - moment1_r = np.sum(arange_r * lhs_arr[rhs_slc_r], + moment1_r = ns.sum(arange_r * lhs_arr[rhs_slc_r], axis=axis, keepdims=True, dtype=lhs_arr.dtype) # Add moment1 at the "width-2 boundary layers", with the sign # corresponding to the sign in the derivative calculation # of the forward padding. - sign = np.array([-1, 1])[bcast_slc] + sign = backend.array_constructor([-1, 1], device=lhs_arr.device)[bcast_slc] lhs_arr[slope_slc_l] += moment1_l * sign lhs_arr[slope_slc_r] += moment1_r * sign @@ -967,5 +1057,5 @@ def binning(arr, bin_size, reduction=np.sum): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/util/utility.py b/odl/core/util/print_utils.py similarity index 67% rename from odl/util/utility.py rename to odl/core/util/print_utils.py index 65bf79b17a2..0fa8cfe7c5a 100644 --- a/odl/util/utility.py +++ b/odl/core/util/print_utils.py @@ -1,74 +1,40 @@ -# Copyright 2014-2020 The ODL contributors -# -# This file is part of ODL. -# -# This Source Code Form is subject to the terms of the Mozilla Public License, -# v. 2.0. If a copy of the MPL was not distributed with this file, You can -# obtain one at https://mozilla.org/MPL/2.0/. - -"""Utilities mainly for internal use.""" - -from __future__ import absolute_import, division, print_function -from future.moves.itertools import zip_longest - -import contextlib -from collections import OrderedDict +# Python imports +from itertools import zip_longest from contextlib import contextmanager -from itertools import product -from functools import lru_cache - +# ODL import +from odl.core.array_API_support.array_creation import asarray +from odl.core.array_API_support.utils import get_array_and_backend +from odl.core.util.dtype_utils import _universal_dtype_identifier +# Third-party import import numpy as np -__all__ = ( +__all__ = ( + 'is_string', + 'dtype_repr', + 'dtype_str', 'REPR_PRECISION', 'indent', 'dedent', 'npy_printoptions', 'array_str', - 'dtype_repr', - 'dtype_str', - 'is_numeric_dtype', - 'is_int_dtype', - 'is_floating_dtype', - 'is_real_dtype', - 'is_real_floating_dtype', - 'is_complex_floating_dtype', - 'real_dtype', - 'complex_dtype', - 'is_string', - 'nd_iterator', - 'conj_exponent', - 'nullcontext', - 'writable_array', 'signature_string', 'signature_string_parts', 'repr_string', 'attribute_repr_string', 'method_repr_string', - 'run_from_ipython', - 'npy_random_seed', - 'unique', ) -try: - SCTYPES = np.core.sctypes - assert isinstance(SCTYPES, dict) -except AttributeError: - # As of 29/04/25, we are aware that the module - # np.core might be removed in future versions. If that happens, the - # npy types will have to be queried in a different way. We advise to - # install the npy version listed in the odl/conda/meta.yaml - raise ImportError('You are using a numpy version that was not tested with ' \ - 'ODL. Please install the npy version listed in the odl/conda/meta.yaml') +def is_string(obj): + """Return ``True`` if ``obj`` behaves like a string, ``False`` else.""" + return isinstance(obj, str) -REPR_PRECISION = 4 # For printing scalars and array entries -TYPE_MAP_R2C = {np.dtype(dtype): np.result_type(dtype, 1j) - for dtype in SCTYPES['float']} +def dtype_repr(dtype): + return f"'{dtype_str(dtype)}'" -TYPE_MAP_C2R = {cdt: np.empty(0, dtype=cdt).real.dtype - for rdt, cdt in TYPE_MAP_R2C.items()} -TYPE_MAP_C2R.update({k: k for k in TYPE_MAP_R2C.keys()}) +def dtype_str(dtype): + return f"{_universal_dtype_identifier(dtype)}" +REPR_PRECISION = 4 def indent(string, indent_str=' '): """Return a copy of ``string`` indented by ``indent_str``. @@ -182,7 +148,6 @@ def num_indents(line): dedent_len = num_levels * len(indent_str) return '\n'.join(line[dedent_len:] for line in lines) - @contextmanager def npy_printoptions(**extra_opts): """Context manager to temporarily set NumPy print options. @@ -280,8 +245,9 @@ def array_str(a, nprint=6): >>> print(array_str((np.array([2.0]) ** 0.5) ** 2)) [ 2.] """ - a = np.asarray(a) - + a = asarray(a) + a, backend = get_array_and_backend(a) + a = backend.to_numpy(a) max_shape = tuple(n if n < nprint else nprint for n in a.shape) with npy_printoptions(threshold=int(np.prod(max_shape)), edgeitems=nprint // 2, @@ -290,335 +256,6 @@ def array_str(a, nprint=6): return a_str -def dtype_repr(dtype): - """Stringify ``dtype`` for ``repr`` with default for int and float.""" - dtype = np.dtype(dtype) - if dtype == np.dtype(int): - return "'int'" - elif dtype == np.dtype(float): - return "'float'" - elif dtype == np.dtype(complex): - return "'complex'" - elif dtype.shape: - return "('{}', {})".format(dtype.base, dtype.shape) - else: - return "'{}'".format(dtype) - - -def dtype_str(dtype): - """Stringify ``dtype`` for ``str`` with default for int and float.""" - dtype = np.dtype(dtype) - if dtype == np.dtype(int): - return 'int' - elif dtype == np.dtype(float): - return 'float' - elif dtype == np.dtype(complex): - return 'complex' - elif dtype.shape: - return "('{}', {})".format(dtype.base, dtype.shape) - else: - return '{}'.format(dtype) - - - -@lru_cache -def is_numeric_dtype(dtype): - """Return ``True`` if ``dtype`` is a numeric type.""" - dtype = np.dtype(dtype) - return np.issubdtype(getattr(dtype, 'base', None), np.number) - - -@lru_cache -def is_int_dtype(dtype): - """Return ``True`` if ``dtype`` is an integer type.""" - dtype = np.dtype(dtype) - return np.issubdtype(getattr(dtype, 'base', None), np.integer) - - -@lru_cache -def is_floating_dtype(dtype): - """Return ``True`` if ``dtype`` is a floating point type.""" - return is_real_floating_dtype(dtype) or is_complex_floating_dtype(dtype) - - -@lru_cache -def is_real_dtype(dtype): - """Return ``True`` if ``dtype`` is a real (including integer) type.""" - return is_numeric_dtype(dtype) and not is_complex_floating_dtype(dtype) - - -@lru_cache -def is_real_floating_dtype(dtype): - """Return ``True`` if ``dtype`` is a real floating point type.""" - dtype = np.dtype(dtype) - return np.issubdtype(getattr(dtype, 'base', None), np.floating) - - -@lru_cache -def is_complex_floating_dtype(dtype): - """Return ``True`` if ``dtype`` is a complex floating point type.""" - dtype = np.dtype(dtype) - return np.issubdtype(getattr(dtype, 'base', None), np.complexfloating) - - -def real_dtype(dtype, default=None): - """Return the real counterpart of ``dtype`` if existing. - - Parameters - ---------- - dtype : - Real or complex floating point data type. It can be given in any - way the `numpy.dtype` constructor understands. - default : - Object to be returned if no real counterpart is found for - ``dtype``, except for ``None``, in which case an error is raised. - - Returns - ------- - real_dtype : `numpy.dtype` - The real counterpart of ``dtype``. - - Raises - ------ - ValueError - if there is no real counterpart to the given data type and - ``default == None``. - - See Also - -------- - complex_dtype - - Examples - -------- - Convert scalar dtypes: - - >>> real_dtype(complex) - dtype('float64') - >>> real_dtype('complex64') - dtype('float32') - >>> real_dtype(float) - dtype('float64') - - Dtypes with shape are also supported: - - >>> real_dtype(np.dtype((complex, (3,)))) - dtype(('>> real_dtype(('complex64', (3,))) - dtype(('>> complex_dtype(float) - dtype('complex128') - >>> complex_dtype('float32') - dtype('complex64') - >>> complex_dtype(complex) - dtype('complex128') - - Dtypes with shape are also supported: - - >>> complex_dtype(np.dtype((float, (3,)))) - dtype(('>> complex_dtype(('float32', (3,))) - dtype(('>> for pt in nd_iterator([2, 2]): - ... print(pt) - (0, 0) - (0, 1) - (1, 0) - (1, 1) - """ - return product(*map(range, shape)) - - -def conj_exponent(exp): - """Conjugate exponent ``exp / (exp - 1)``. - - Parameters - ---------- - exp : positive float or inf - Exponent for which to calculate the conjugate. Must be - at least 1.0. - - Returns - ------- - conj : positive float or inf - Conjugate exponent. For ``exp=1``, return ``float('inf')``, - for ``exp=float('inf')`` return 1. In all other cases, return - ``exp / (exp - 1)``. - """ - if exp == 1.0: - return float('inf') - elif exp == float('inf'): - return 1.0 - else: - return exp / (exp - 1.0) - - -@contextmanager -def nullcontext(enter_result=None): - """Backport of the Python >=3.7 trivial context manager. - - See `the Python documentation - `_ - for details. - """ - try: - yield enter_result - finally: - pass - - -try: - nullcontext = contextlib.nullcontext -except AttributeError: - pass - - -@contextmanager -def writable_array(obj, **kwargs): - """Context manager that casts obj to a `numpy.array` and saves changes. - - Parameters - ---------- - obj : `array-like` - Object that should be made available as writable array. - It must be valid as input to `numpy.asarray` and needs to - support the syntax ``obj[:] = arr``. - kwargs : - Keyword arguments that should be passed to `numpy.asarray`. - - Examples - -------- - Convert list to array and use with numpy: - - >>> lst = [1, 2, 3] - >>> with writable_array(lst) as arr: - ... arr *= 2 - >>> lst - [2, 4, 6] - - Usage with ODL vectors: - - >>> space = odl.uniform_discr(0, 1, 3) - >>> x = space.element([1, 2, 3]) - >>> with writable_array(x) as arr: - ... arr += [1, 1, 1] - >>> x - uniform_discr(0.0, 1.0, 3).element([ 2., 3., 4.]) - - Additional keyword arguments are passed to `numpy.asarray`: - - >>> lst = [1, 2, 3] - >>> with writable_array(lst, dtype='complex') as arr: - ... print(arr) - [ 1.+0.j 2.+0.j 3.+0.j] - - Note that the changes are only saved upon exiting the context - manger exits. Before, the input object is unchanged: - - >>> lst = [1, 2, 3] - >>> with writable_array(lst) as arr: - ... arr *= 2 - ... print(lst) - [1, 2, 3] - >>> print(lst) - [2, 4, 6] - """ - arr = None - try: - arr = np.asarray(obj, **kwargs) - yield arr - finally: - if arr is not None: - obj[:] = arr - - def signature_string(posargs, optargs, sep=', ', mod='!r'): """Return a stringified signature from given arguments. @@ -1317,187 +954,4 @@ class that is created through a method, for instance :: meth_call_str = '(\n' + indent(full_arg_str) + '\n)' - return '.'.join(init_parts) + meth_call_str - - -def run_from_ipython(): - """If the process is run from IPython.""" - return '__IPYTHON__' in globals() - - -def pkg_supports(feature, pkg_version, pkg_feat_dict): - """Return bool indicating whether a package supports ``feature``. - - Parameters - ---------- - feature : str - Name of a potential feature of a package. - pkg_version : str - Version of the package that should be checked for presence of the - feature. - pkg_feat_dict : dict - Specification of features of a package. Each item has the - following form:: - - feature_name: version_specification - - Here, ``feature_name`` is a string that is matched against - ``feature``, and ``version_specification`` is a string or a - sequence of strings that specifies version sets. These - specifications are the same as for ``setuptools`` requirements, - just without the package name. - A ``None`` entry signals "no support in any version", i.e., - always ``False``. - If a sequence of requirements are given, they are OR-ed together. - See ``Examples`` for details. - - Returns - ------- - supports : bool - ``True`` if ``pkg_version`` of the package in question supports - ``feature``, ``False`` otherwise. - - Examples - -------- - >>> feat_dict = { - ... 'feat1': '==0.5.1', - ... 'feat2': '>0.6, <=0.9', # both required simultaneously - ... 'feat3': ['>0.6', '<=0.9'], # only one required, i.e. always True - ... 'feat4': ['==0.5.1', '>0.6, <=0.9'], - ... 'feat5': None - ... } - >>> pkg_supports('feat1', '0.5.1', feat_dict) - True - >>> pkg_supports('feat1', '0.4', feat_dict) - False - >>> pkg_supports('feat2', '0.5.1', feat_dict) - False - >>> pkg_supports('feat2', '0.6.1', feat_dict) - True - >>> pkg_supports('feat2', '0.9', feat_dict) - True - >>> pkg_supports('feat2', '1.0', feat_dict) - False - >>> pkg_supports('feat3', '0.4', feat_dict) - True - >>> pkg_supports('feat3', '1.0', feat_dict) - True - >>> pkg_supports('feat4', '0.5.1', feat_dict) - True - >>> pkg_supports('feat4', '0.6', feat_dict) - False - >>> pkg_supports('feat4', '0.6.1', feat_dict) - True - >>> pkg_supports('feat4', '1.0', feat_dict) - False - >>> pkg_supports('feat5', '0.6.1', feat_dict) - False - >>> pkg_supports('feat5', '1.0', feat_dict) - False - """ - from pkg_resources import parse_requirements - - feature = str(feature) - pkg_version = str(pkg_version) - supp_versions = pkg_feat_dict.get(feature, None) - if supp_versions is None: - return False - - # Make sequence from single string - if is_string(supp_versions): - supp_versions = [supp_versions] - - # Make valid package requirements - ver_specs = ['pkg' + supp_ver for supp_ver in supp_versions] - # Each parse_requirements list contains only one entry since we specify - # only one package - ver_reqs = [list(parse_requirements(ver_spec))[0] - for ver_spec in ver_specs] - - # If one of the requirements in the list is met, return True - for req in ver_reqs: - if req.specifier.contains(pkg_version, prereleases=True): - return True - - # No match - return False - - -@contextmanager -def npy_random_seed(seed): - """Context manager to temporarily set the NumPy random generator seed. - - Parameters - ---------- - seed : int or None - Seed value for the random number generator. - ``None`` is interpreted as keeping the current seed. - - Examples - -------- - Use this to make drawing pseudo-random numbers repeatable: - - >>> with npy_random_seed(42): - ... rand_int = np.random.randint(10) - >>> with npy_random_seed(42): - ... same_rand_int = np.random.randint(10) - >>> rand_int == same_rand_int - True - """ - do_seed = seed is not None - orig_rng_state = None - try: - if do_seed: - orig_rng_state = np.random.get_state() - np.random.seed(seed) - yield - - finally: - if do_seed and orig_rng_state is not None: - np.random.set_state(orig_rng_state) - - -def unique(seq): - """Return the unique values in a sequence. - - Parameters - ---------- - seq : sequence - Sequence with (possibly duplicate) elements. - - Returns - ------- - unique : list - Unique elements of ``seq``. - Order is guaranteed to be the same as in seq. - - Examples - -------- - Determine unique elements in list - - >>> unique([1, 2, 3, 3]) - [1, 2, 3] - - >>> unique((1, 'str', 'str')) - [1, 'str'] - - The utility also works with unhashable types: - - >>> unique((1, [1], [1])) - [1, [1]] - """ - # First check if all elements are hashable, if so O(n) can be done - try: - return list(OrderedDict.fromkeys(seq)) - except TypeError: - # Non-hashable, resort to O(n^2) - unique_values = [] - for i in seq: - if i not in unique_values: - unique_values.append(i) - return unique_values - - -if __name__ == '__main__': - from odl.util.testutils import run_doctests - run_doctests() + return '.'.join(init_parts) + meth_call_str \ No newline at end of file diff --git a/odl/util/pytest_config.py b/odl/core/util/pytest_config.py similarity index 75% rename from odl/util/pytest_config.py rename to odl/core/util/pytest_config.py index c02289ef2c6..77b035d1481 100644 --- a/odl/util/pytest_config.py +++ b/odl/core/util/pytest_config.py @@ -17,10 +17,11 @@ import numpy as np import odl -from odl.space.entry_points import tensor_space_impl_names +from odl.core.array_API_support import lookup_array_backend +from odl.core.space.entry_points import tensor_space_impl_names from odl.trafos.backends import PYFFTW_AVAILABLE, PYWT_AVAILABLE -from odl.util.testutils import simple_fixture -from odl.util.utility import SCTYPES +from odl.core.util.testutils import simple_fixture +from odl.core.util.dtype_utils import INTEGER_DTYPES, FLOAT_DTYPES, COMPLEX_DTYPES try: import pytest @@ -133,27 +134,41 @@ def pytest_ignore_collect(path, config): odl_tspace_impl = simple_fixture(name='tspace_impl', params=tensor_space_impl_names()) -real_floating_dtypes = SCTYPES['float'] -real_floating_dtype_params = [np.dtype(dt) for dt in real_floating_dtypes] +real_floating_dtypes = FLOAT_DTYPES odl_real_floating_dtype = simple_fixture(name='dtype', - params=real_floating_dtype_params, - fmt=' {name} = np.{value.name} ') + params=real_floating_dtypes) -floating_dtypes = SCTYPES['float'] + SCTYPES['complex'] -floating_dtype_params = [np.dtype(dt) for dt in floating_dtypes] +complex_floating_dtypes = COMPLEX_DTYPES +odl_complex_floating_dtype = simple_fixture(name='dtype', + params=complex_floating_dtypes) + +floating_dtypes = FLOAT_DTYPES + COMPLEX_DTYPES odl_floating_dtype = simple_fixture(name='dtype', - params=floating_dtype_params, - fmt=' {name} = np.{value.name} ') + params=floating_dtypes) -scalar_dtypes = floating_dtype_params + SCTYPES['int'] + SCTYPES['uint'] -scalar_dtype_params = [np.dtype(dt) for dt in floating_dtypes] +scalar_dtypes = INTEGER_DTYPES + FLOAT_DTYPES + COMPLEX_DTYPES odl_scalar_dtype = simple_fixture(name='dtype', - params=scalar_dtype_params, - fmt=' {name} = np.{value.name} ') + params=scalar_dtypes) + + +IMPL_DEVICE_PAIRS = [] + +for impl in tensor_space_impl_names(): + array_backend = lookup_array_backend(impl) + for device in array_backend.available_devices: + IMPL_DEVICE_PAIRS.append((impl, device)) + +odl_impl_device_pairs = simple_fixture(name='impl_device', params=IMPL_DEVICE_PAIRS) + +if 'pytorch' in tensor_space_impl_names(): + CUDA_DEVICES = [] + for device in lookup_array_backend('pytorch').available_devices: + CUDA_DEVICES.append(device) + + cuda_device = simple_fixture(name='cuda_device', params=CUDA_DEVICES) -odl_elem_order = simple_fixture(name='order', params=[None, 'C', 'F']) +odl_elem_order = simple_fixture(name='order', params=['C']) -odl_ufunc = simple_fixture('ufunc', [p[0] for p in odl.util.ufuncs.UFUNCS]) odl_reduction = simple_fixture('reduction', ['sum', 'prod', 'min', 'max']) # More complicated ones with non-trivial documentation diff --git a/odl/core/util/scipy_compatibility.py b/odl/core/util/scipy_compatibility.py new file mode 100644 index 00000000000..be34e1d6827 --- /dev/null +++ b/odl/core/util/scipy_compatibility.py @@ -0,0 +1,31 @@ +# check if environ['SCIPY_ARRAY_API']='1' +import warnings +from os import environ +if 'SCIPY_ARRAY_API' in environ and environ['SCIPY_ARRAY_API']=='1': + pass +else: + warnings.warn('The environment variable SCIPY_ARRAY_API must be set to 1. It should be by default when importing odl, but it seems that scipy was imported before odl. If not set, the array API support of scipy will be disabled, meaning that function calls such as ``xlogy`` on GPU will error and throw back pytorch Type errors. Please add the following lines before your first scipy import. \n' \ + 'from os import environ \n' \ + 'environ["SCIPY_ARRAY_API"]=="1" \n ' \ + '********End of Warning********', stacklevel=2) + +import scipy + +__all__ = ( + 'lambertw', + 'scipy_lambertw', + 'xlogy', + ) + +def _helper(operation:str, x1, x2=None, out=None, namespace=scipy.special, **kwargs): + return x1.space._elementwise_num_operation( + operation=operation, x1=x1, x2=x2, out=out, namespace=namespace, **kwargs) + +def lambertw(x, k=0, tol=1e-8): + return _helper('lambertw', x, k=k, tol=tol) + +def scipy_lambertw(x, k=0, tol=1e-8): + return scipy.special.lambertw(x, k, tol) + +def xlogy(x1, x2, out=None): + return _helper('xlogy', x1=x1, x2=x2, out=out) \ No newline at end of file diff --git a/odl/util/sparse.py b/odl/core/util/sparse.py similarity index 100% rename from odl/util/sparse.py rename to odl/core/util/sparse.py diff --git a/odl/util/testutils.py b/odl/core/util/testutils.py similarity index 86% rename from odl/util/testutils.py rename to odl/core/util/testutils.py index 553c9927414..956d75f59f0 100644 --- a/odl/util/testutils.py +++ b/odl/core/util/testutils.py @@ -16,14 +16,21 @@ from builtins import object from contextlib import contextmanager from time import time - +from odl.core.array_API_support.comparisons import allclose, isclose, odl_all_equal import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +import pytest -from future.moves.itertools import zip_longest +from odl.core.util.utility import is_string, run_from_ipython +from odl.core.util.dtype_utils import ( + is_boolean_dtype, is_signed_int_dtype, is_unsigned_int_dtype, + is_floating_dtype, is_complex_dtype) -from odl.util.utility import is_string, run_from_ipython +skip_if_no_pytorch = pytest.mark.skipif( + "not 'pytorch' in odl.core.space.entry_points.TENSOR_SPACE_IMPLS", + reason='pytorch not available not available', + ) +from itertools import zip_longest __all__ = ( 'dtype_ndigits', 'dtype_tol', @@ -108,13 +115,34 @@ def dtype_tol(dtype, default=None): def all_equal(iter1, iter2): - """Return ``True`` if all elements in ``a`` and ``b`` are equal.""" + """Return ``True`` if all elements in ``a`` and ``b`` are equal. + This is a more forgiving version of `odl_all_equal`, allowing also comparisons + between e.g. a list and a `LinearSpaceElement` rather than requiring both sides + to be compatible.""" + # Direct comparison for scalars, tuples or lists + + from odl.core.set.space import LinearSpaceElement + + if isinstance(iter1, LinearSpaceElement) and isinstance(iter2, LinearSpaceElement): + return iter1 == iter2 + elif isinstance(iter1, LinearSpaceElement): + try: + return iter1 == iter1.space.element(iter2) + except ValueError as e: + pass + except TypeError as e: + pass + elif isinstance(iter2, LinearSpaceElement): + return iter2.space.element(iter1) == iter2 + try: if iter1 == iter2: return True except ValueError: # Raised by NumPy when comparing arrays pass + except RuntimeError: # Raised by PyTorch when comparing tensors + pass # Special case for None if iter1 is None and iter2 is None: @@ -156,7 +184,7 @@ def all_almost_equal_array(v1, v2, ndigits): return False return True else: - return np.allclose(v1, v2, + return allclose(v1, v2, rtol=10 ** -ndigits, atol=10 ** -ndigits, equal_nan=True) @@ -164,10 +192,12 @@ def all_almost_equal_array(v1, v2, ndigits): def all_almost_equal(iter1, iter2, ndigits=None): """Return ``True`` if all elements in ``a`` and ``b`` are almost equal.""" try: - if iter1 is iter2 or iter1 == iter2: + if iter1 is iter2 or all_equal(iter1, iter2): return True except ValueError: pass + except RuntimeError: + pass if iter1 is None and iter2 is None: return True @@ -185,7 +215,7 @@ def all_almost_equal(iter1, iter2, ndigits=None): except TypeError: if ndigits is None: ndigits = _ndigits(iter1, iter2, None) - return np.isclose(iter1, iter2, + return isclose(iter1, iter2, atol=10 ** -ndigits, rtol=10 ** -ndigits, equal_nan=True) @@ -304,7 +334,7 @@ def noise_array(space): Notes ----- This method is intended for internal testing purposes. For more explicit - example elements see ``odl.phantoms`` and ``LinearSpaceElement.examples``. + example elements see ``odl.core.phantoms`` and ``LinearSpaceElement.examples``. Parameters ---------- @@ -328,14 +358,14 @@ def noise_array(space): -------- noise_element noise_elements - odl.set.space.LinearSpace.examples : Examples of elements + odl.core.set.space.LinearSpace.examples : Examples of elements typical to the space. """ - from odl.space import ProductSpace + from odl.core.space import ProductSpace if isinstance(space, ProductSpace): if space.is_power_space: - return np.array([noise_array(si) for si in space]) + return [noise_array(si) for si in space] # Non-power–product-space elements are represented as arrays of arrays, # each in general with a different shape. These cannot be monolithic @@ -345,29 +375,29 @@ def noise_array(space): # outer array with dtype=object but store the inner elements as for the # constituent spaces. The resulting ragged arrays support some, but not # all numerical operations. - result = np.array([None for si in space], dtype=object) + result = [None for si in space] for i, si in enumerate(space): result[i] = noise_array(si) return result else: - if space.dtype == bool: - arr = np.random.randint(0, 2, size=space.shape, dtype=bool) - elif np.issubdtype(space.dtype, np.unsignedinteger): + dtype = space.dtype_identifier + if is_boolean_dtype(dtype): + arr = np.random.randint(0, 2, size=space.shape, dtype=dtype) + elif is_unsigned_int_dtype(dtype): arr = np.random.randint(0, 10, space.shape) - elif np.issubdtype(space.dtype, np.signedinteger): + elif is_signed_int_dtype(dtype): arr = np.random.randint(-10, 10, space.shape) - elif np.issubdtype(space.dtype, np.floating): + elif is_floating_dtype(dtype): arr = np.random.randn(*space.shape) - elif np.issubdtype(space.dtype, np.complexfloating): + elif is_complex_dtype(dtype): arr = ( np.random.randn(*space.shape) + 1j * np.random.randn(*space.shape) ) / np.sqrt(2.0) else: raise ValueError('bad dtype {}'.format(space.dtype)) - - return arr.astype(space.dtype, copy=AVOID_UNNECESSARY_COPY) + return space.element(arr).data def noise_element(space): @@ -382,13 +412,13 @@ def noise_element(space): Notes ----- This method is intended for internal testing purposes. For more explicit - example elements see ``odl.phantoms`` and ``LinearSpaceElement.examples``. + example elements see ``odl.core.phantoms`` and ``LinearSpaceElement.examples``. Parameters ---------- space : `LinearSpace` Space in which to create an element. The - `odl.set.space.LinearSpace.element` method of the space needs to + `odl.core.set.space.LinearSpace.element` method of the space needs to accept input of `numpy.ndarray` type. Returns @@ -406,7 +436,7 @@ def noise_element(space): -------- noise_array noise_elements - odl.set.space.LinearSpace.examples : Examples of elements typical + odl.core.set.space.LinearSpace.examples : Examples of elements typical to the space. """ return space.element(noise_array(space)) @@ -426,13 +456,13 @@ def noise_elements(space, n=1): Notes ----- This method is intended for internal testing purposes. For more explicit - example elements see ``odl.phantoms`` and ``LinearSpaceElement.examples``. + example elements see ``odl.core.phantoms`` and ``LinearSpaceElement.examples``. Parameters ---------- space : `LinearSpace` Space in which to create an element. The - `odl.set.space.LinearSpace.element` method of the space needs to + `odl.core.set.space.LinearSpace.element` method of the space needs to accept input of `numpy.ndarray` type. n : int, optional Number of elements to create. @@ -463,7 +493,7 @@ def noise_elements(space, n=1): arrs = tuple(noise_array(space) for _ in range(n)) # Make space elements from arrays - elems = tuple(space.element(arr.copy()) for arr in arrs) + elems = tuple(space.element(arr) for arr in arrs) if n == 1: return tuple(arrs + elems) @@ -750,5 +780,24 @@ def test_file(file, args=None): pytest.main(args) +# What types will auto-chosen in expressions like `odl.vector([1.0,2.0])` is +# backend-dependent, with NumPy prioritizing precision and PyTorch speed. This +# follows what the underlying `np.array` / `torch.tensor` constructors choose. +# Note that this differs from what happens when `float` is explicitly specified +# as the `dtype` - this will always be interpreted as double precision +# (see `DTYPE_SHORTHANDS`). +default_precision_dict = { + 'pytorch':{ + 'integer' : 'int32', + 'float' : 'float32', + 'complex' : 'complex64' + }, + 'numpy':{ + 'integer' : 'int64', + 'float' : 'float64', + 'complex' : 'complex128' + } +} + if __name__ == '__main__': run_doctests() diff --git a/odl/core/util/utility.py b/odl/core/util/utility.py new file mode 100644 index 00000000000..ae2237b68bc --- /dev/null +++ b/odl/core/util/utility.py @@ -0,0 +1,324 @@ +# Copyright 2014-2020 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Utilities mainly for internal use.""" + +from __future__ import absolute_import, division, print_function + +import contextlib +from collections import OrderedDict +from contextlib import contextmanager +from itertools import product +from odl.core.util.print_utils import is_string +import numpy as np + +__all__ = ( + 'nd_iterator', + 'conj_exponent', + 'nullcontext', + 'writable_array', + 'run_from_ipython', + 'npy_random_seed', + 'unique', +) + +def nd_iterator(shape): + """Iterator over n-d cube with shape. + + Parameters + ---------- + shape : sequence of int + The number of points per axis + + Returns + ------- + nd_iterator : generator + Generator returning tuples of integers of length ``len(shape)``. + + Examples + -------- + >>> for pt in nd_iterator([2, 2]): + ... print(pt) + (0, 0) + (0, 1) + (1, 0) + (1, 1) + """ + return product(*map(range, shape)) + + +def conj_exponent(exp): + """Conjugate exponent ``exp / (exp - 1)``. + + Parameters + ---------- + exp : positive float or inf + Exponent for which to calculate the conjugate. Must be + at least 1.0. + + Returns + ------- + conj : positive float or inf + Conjugate exponent. For ``exp=1``, return ``float('inf')``, + for ``exp=float('inf')`` return 1. In all other cases, return + ``exp / (exp - 1)``. + """ + if exp == 1.0: + return float('inf') + elif exp == float('inf'): + return 1.0 + else: + return exp / (exp - 1.0) + + +@contextmanager +def nullcontext(enter_result=None): + """Backport of the Python >=3.7 trivial context manager. + + See `the Python documentation + `_ + for details. + """ + try: + yield enter_result + finally: + pass + + +try: + nullcontext = contextlib.nullcontext +except AttributeError: + pass + + +@contextmanager +def writable_array(obj, must_be_contiguous: bool =False): + """Context manager that casts `obj` to a backend-specific array and saves changes + made on that array back into `obj`. + + Parameters + ---------- + obj : `array-like` + Object that should be made available as writable array. + It must be valid as input to `numpy.asarray` and needs to + support the syntax ``obj[:] = arr``. + must_be_contiguous : bool + Whether the writable array should guarantee standard C order. + + Examples + -------- + Usage with ODL vectors: + + >>> space = odl.uniform_discr(0, 1, 3) + >>> x = space.element([1, 2, 3]) + >>> with writable_array(x) as arr: + ... arr += [1, 1, 1] + >>> x + uniform_discr(0.0, 1.0, 3).element([ 2., 3., 4.]) + + Note that the changes are in general only saved upon exiting the + context manager. Before, the input object may remain unchanged. + """ + if isinstance(obj, np.ndarray): + if must_be_contiguous and not obj.data.c_contiguous: + # Needs to convert to contiguous array + arr = np.ascontiguousarray(obj) + try: + yield arr + finally: + obj[:] = arr + else: + try: + yield obj + finally: + pass + else: + with obj.writable_array(must_be_contiguous=must_be_contiguous) as arr: + yield arr + +def run_from_ipython(): + """If the process is run from IPython.""" + return '__IPYTHON__' in globals() + + +def pkg_supports(feature, pkg_version, pkg_feat_dict): + """Return bool indicating whether a package supports ``feature``. + + Parameters + ---------- + feature : str + Name of a potential feature of a package. + pkg_version : str + Version of the package that should be checked for presence of the + feature. + pkg_feat_dict : dict + Specification of features of a package. Each item has the + following form:: + + feature_name: version_specification + + Here, ``feature_name`` is a string that is matched against + ``feature``, and ``version_specification`` is a string or a + sequence of strings that specifies version sets. These + specifications are the same as for ``setuptools`` requirements, + just without the package name. + A ``None`` entry signals "no support in any version", i.e., + always ``False``. + If a sequence of requirements are given, they are OR-ed together. + See ``Examples`` for details. + + Returns + ------- + supports : bool + ``True`` if ``pkg_version`` of the package in question supports + ``feature``, ``False`` otherwise. + + Examples + -------- + >>> feat_dict = { + ... 'feat1': '==0.5.1', + ... 'feat2': '>0.6, <=0.9', # both required simultaneously + ... 'feat3': ['>0.6', '<=0.9'], # only one required, i.e. always True + ... 'feat4': ['==0.5.1', '>0.6, <=0.9'], + ... 'feat5': None + ... } + >>> pkg_supports('feat1', '0.5.1', feat_dict) + True + >>> pkg_supports('feat1', '0.4', feat_dict) + False + >>> pkg_supports('feat2', '0.5.1', feat_dict) + False + >>> pkg_supports('feat2', '0.6.1', feat_dict) + True + >>> pkg_supports('feat2', '0.9', feat_dict) + True + >>> pkg_supports('feat2', '1.0', feat_dict) + False + >>> pkg_supports('feat3', '0.4', feat_dict) + True + >>> pkg_supports('feat3', '1.0', feat_dict) + True + >>> pkg_supports('feat4', '0.5.1', feat_dict) + True + >>> pkg_supports('feat4', '0.6', feat_dict) + False + >>> pkg_supports('feat4', '0.6.1', feat_dict) + True + >>> pkg_supports('feat4', '1.0', feat_dict) + False + >>> pkg_supports('feat5', '0.6.1', feat_dict) + False + >>> pkg_supports('feat5', '1.0', feat_dict) + False + """ + from pkg_resources import parse_requirements + + feature = str(feature) + pkg_version = str(pkg_version) + supp_versions = pkg_feat_dict.get(feature, None) + if supp_versions is None: + return False + + # Make sequence from single string + if is_string(supp_versions): + supp_versions = [supp_versions] + + # Make valid package requirements + ver_specs = ['pkg' + supp_ver for supp_ver in supp_versions] + # Each parse_requirements list contains only one entry since we specify + # only one package + ver_reqs = [list(parse_requirements(ver_spec))[0] + for ver_spec in ver_specs] + + # If one of the requirements in the list is met, return True + for req in ver_reqs: + if req.specifier.contains(pkg_version, prereleases=True): + return True + + # No match + return False + + +@contextmanager +def npy_random_seed(seed): + """Context manager to temporarily set the NumPy random generator seed. + + Parameters + ---------- + seed : int or None + Seed value for the random number generator. + ``None`` is interpreted as keeping the current seed. + + Examples + -------- + Use this to make drawing pseudo-random numbers repeatable: + + >>> with npy_random_seed(42): + ... rand_int = np.random.randint(10) + >>> with npy_random_seed(42): + ... same_rand_int = np.random.randint(10) + >>> rand_int == same_rand_int + True + """ + do_seed = seed is not None + orig_rng_state = None + try: + if do_seed: + orig_rng_state = np.random.get_state() + np.random.seed(seed) + yield + + finally: + if do_seed and orig_rng_state is not None: + np.random.set_state(orig_rng_state) + + +def unique(seq): + """Return the unique values in a sequence. + + Parameters + ---------- + seq : sequence + Sequence with (possibly duplicate) elements. + + Returns + ------- + unique : list + Unique elements of ``seq``. + Order is guaranteed to be the same as in seq. + + Examples + -------- + Determine unique elements in list + + >>> unique([1, 2, 3, 3]) + [1, 2, 3] + + >>> unique((1, 'str', 'str')) + [1, 'str'] + + The utility also works with unhashable types: + + >>> unique((1, [1], [1])) + [1, [1]] + """ + # First check if all elements are hashable, if so O(n) can be done + try: + return list(OrderedDict.fromkeys(seq)) + except TypeError: + # Non-hashable, resort to O(n^2) + unique_values = [] + for i in seq: + if i not in unique_values: + unique_values.append(i) + return unique_values + + +if __name__ == '__main__': + from odl.core.util.testutils import run_doctests + run_doctests() diff --git a/odl/util/vectorization.py b/odl/core/util/vectorization.py similarity index 89% rename from odl/util/vectorization.py rename to odl/core/util/vectorization.py index 460fa62e81c..ba9ee6c7f1c 100644 --- a/odl/util/vectorization.py +++ b/odl/core/util/vectorization.py @@ -13,6 +13,8 @@ from functools import wraps import numpy as np +from odl.core.array_API_support import get_array_and_backend + __all__ = ('is_valid_input_array', 'is_valid_input_meshgrid', 'out_shape_from_meshgrid', 'out_shape_from_array', @@ -22,9 +24,13 @@ def is_valid_input_array(x, ndim=None): """Test if ``x`` is a correctly shaped point array in R^d.""" try: - x = np.asarray(x) + x, backend = get_array_and_backend(x) except ValueError: - return False + # raising a ValueError here will be problematic when cheking lists/tuple. + if isinstance(x, (list, tuple)): + x = np.asarray(x) + else: + return False if ndim is None or ndim == 1: return x.ndim == 1 and x.size > 1 or x.ndim == 2 and x.shape[0] == 1 @@ -59,12 +65,24 @@ def out_shape_from_meshgrid(mesh): if len(mesh) == 1: return (len(mesh[0]),) else: - return np.broadcast(*mesh).shape - - + # Ragged arrays are a liability in the current implementation + _, backend = get_array_and_backend(mesh[0]) + namespace = backend.array_namespace + if backend.impl == 'numpy': + return namespace.broadcast(*mesh).shape + elif backend.impl == 'pytorch': + mesh_size = namespace.broadcast_shapes( + *(t.shape for t in mesh)) + return list(mesh_size) + else: + raise NotImplementedError(f'Not implemented for impl {backend.impl}') + def out_shape_from_array(arr): """Get the output shape from an array.""" - arr = np.asarray(arr) + if isinstance(arr, (float, int, complex, list, tuple)): + arr = np.asarray(arr) + else: + arr,_ = get_array_and_backend(arr) if arr.ndim == 1: return arr.shape else: @@ -292,6 +310,7 @@ def _func(*x, **kw): out[:] = self.vfunc(*x, **kwargs) + if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/functional/__init__.py b/odl/functional/__init__.py similarity index 100% rename from odl/solvers/functional/__init__.py rename to odl/functional/__init__.py diff --git a/odl/solvers/functional/default_functionals.py b/odl/functional/default_functionals.py similarity index 95% rename from odl/solvers/functional/default_functionals.py rename to odl/functional/default_functionals.py index dbafcfb93b9..4c9cf249f95 100644 --- a/odl/solvers/functional/default_functionals.py +++ b/odl/functional/default_functionals.py @@ -14,10 +14,10 @@ import numpy as np -from odl.operator import ( +from odl.core.operator import ( ConstantOperator, DiagonalOperator, Operator, PointwiseNorm, ScalingOperator, ZeroOperator) -from odl.solvers.functional.functional import ( +from odl.functional.functional import ( Functional, FunctionalQuadraticPerturb) from odl.solvers.nonsmooth.proximal_operators import ( combine_proximals, proj_simplex, proximal_box_constraint, @@ -26,8 +26,13 @@ proximal_convex_conj_l1_l2, proximal_convex_conj_l2, proximal_convex_conj_linfty, proximal_huber, proximal_l1, proximal_l1_l2, proximal_l2, proximal_l2_squared, proximal_linfty) -from odl.space import ProductSpace -from odl.util import conj_exponent +from odl.core.space import ProductSpace +from odl.core.util import conj_exponent + +from odl.core.array_API_support import (all as odl_all, +abs as odl_abs, sign, pow, square, log, isfinite, exp, +max, min, sum as odl_sum) +from odl.core.util.scipy_compatibility import xlogy __all__ = ('ZeroFunctional', 'ConstantFunctional', 'ScalingFunctional', 'IdentityFunctional', @@ -82,17 +87,17 @@ def _call(self, x): if self.exponent == 0: return self.domain.one().inner(np.not_equal(x, 0)) elif self.exponent == 1: - return x.ufuncs.absolute().inner(self.domain.one()) + return odl_abs(x).inner(self.domain.one()) elif self.exponent == 2: return np.sqrt(x.inner(x)) elif np.isfinite(self.exponent): - tmp = x.ufuncs.absolute() - tmp.ufuncs.power(self.exponent, out=tmp) + tmp = odl_abs(x) + pow(tmp, self.exponent, out=tmp) return np.power(tmp.inner(self.domain.one()), 1 / self.exponent) elif self.exponent == np.inf: - return x.ufuncs.absolute().ufuncs.max() + return max(odl_abs(x)) elif self.exponent == -np.inf: - return x.ufuncs.absolute().ufuncs.min() + return min(odl_abs(x)) else: raise RuntimeError('unknown exponent') @@ -144,7 +149,7 @@ def __init__(self): def _call(self, x): """Apply the gradient operator to the given point.""" - return x.ufuncs.sign() + return sign(x) def derivative(self, x): """Derivative is a.e. zero.""" @@ -297,7 +302,7 @@ def __init__(self): def _call(self, x, out): """Return ``self(x)``.""" pwnorm_x = functional.pointwise_norm(x) - pwnorm_x.ufuncs.sign(out=pwnorm_x) + sign(pwnorm_x, out=pwnorm_x) functional.pointwise_norm.derivative(x).adjoint(pwnorm_x, out=out) @@ -384,7 +389,7 @@ def __init__(self, vfspace, exponent=None): def _call(self, x): """Return ``self(x)``.""" - x_norm = self.pointwise_norm(x).ufuncs.max() + x_norm = max(self.pointwise_norm(x)) if x_norm > 1: return np.inf @@ -565,7 +570,7 @@ class L1Norm(LpNorm): The `proximal` factory allows using vector-valued stepsizes: >>> space = odl.rn(3) - >>> f = odl.solvers.L1Norm(space) + >>> f = odl.functional.L1Norm(space) >>> x = space.one() >>> f.proximal([0.5, 1.0, 1.5])(x) rn(3).element([ 0.5, 0. , 0. ]) @@ -649,7 +654,7 @@ class L2NormSquared(Functional): The `proximal` factory allows using vector-valued stepsizes: >>> space = odl.rn(3) - >>> f = odl.solvers.L2NormSquared(space) + >>> f = odl.functional.L2NormSquared(space) >>> x = space.one() >>> f.proximal([0.5, 1.5, 2.0])(x) rn(3).element([ 0.5 , 0.25, 0.2 ]) @@ -824,7 +829,7 @@ class IdentityFunctional(ScalingFunctional): See Also -------- - odl.operator.default_ops.IdentityOperator + odl.core.operator.default_ops.IdentityOperator """ def __init__(self, field): @@ -1087,7 +1092,7 @@ def __init__(self, space, prior=None): >>> space = odl.rn(3) >>> prior = 3 * space.one() - >>> func = odl.solvers.KullbackLeibler(space, prior=prior) + >>> func = odl.functional.KullbackLeibler(space, prior=prior) >>> func(prior) 0.0 @@ -1095,7 +1100,7 @@ def __init__(self, space, prior=None): Test that zeros in the prior are handled correctly >>> prior = space.zero() - >>> func = odl.solvers.KullbackLeibler(space, prior=prior) + >>> func = odl.functional.KullbackLeibler(space, prior=prior) >>> x = space.one() >>> func(x) 3.0 @@ -1121,15 +1126,13 @@ def _call(self, x): If any components of ``x`` is non-positive, the value is positive infinity. """ - # Lazy import to improve `import odl` time - import scipy.special with np.errstate(invalid='ignore', divide='ignore'): if self.prior is None: - res = (x - 1 - np.log(x)).inner(self.domain.one()) + res = (x - 1 - log(x)).inner(self.domain.one()) else: - xlogy = scipy.special.xlogy(self.prior, self.prior / x) - res = (x - self.prior + xlogy).inner(self.domain.one()) + plogpx = xlogy(self.prior, self.prior / x) + res = (x - self.prior + plogpx).inner(self.domain.one()) if not np.isfinite(res): # In this case, some element was less than or equal to zero @@ -1255,14 +1258,12 @@ def _call(self, x): If any components of ``x`` is larger than or equal to 1, the value is positive infinity. """ - # Lazy import to improve `import odl` time - import scipy.special with np.errstate(invalid='ignore'): if self.prior is None: - res = -(np.log(1 - x)).inner(self.domain.one()) + res = -(log(1 - x)).inner(self.domain.one()) else: - xlogy = scipy.special.xlogy(self.prior, 1 - x) + xlogy = self.prior * log(1-x) res = -self.domain.element(xlogy).inner(self.domain.one()) if not np.isfinite(res): @@ -1402,16 +1403,15 @@ def _call(self, x): If any components of ``x`` is non-positive, the value is positive infinity. """ - # Lazy import to improve `import odl` time - import scipy.special - + with np.errstate(invalid='ignore', divide='ignore'): if self.prior is None: - xlogx = scipy.special.xlogy(x, x) + xlogx = xlogy(x, x) res = (1 - x + xlogx).inner(self.domain.one()) else: - xlogy = scipy.special.xlogy(x, x / self.prior) - res = (self.prior - x + xlogy).inner(self.domain.one()) + # xlogy = scipy.special.xlogy(x, x / self.prior) + xlogx = xlogy(x, x/self.prior) + res = (self.prior - x + xlogx).inner(self.domain.one()) if not np.isfinite(res): # In this case, some element was less than or equal to zero @@ -1444,11 +1444,11 @@ def _call(self, x): than or equal to zero. """ if functional.prior is None: - tmp = np.log(x) + tmp = log(x) else: - tmp = np.log(x / functional.prior) + tmp = log(x / functional.prior) - if np.all(np.isfinite(tmp)): + if odl_all(isfinite(tmp)): return tmp else: # The derivative is not defined. @@ -1530,9 +1530,9 @@ def prior(self): def _call(self, x): """Return the value in the point ``x``.""" if self.prior is None: - tmp = self.domain.element((np.exp(x) - 1)).inner(self.domain.one()) + tmp = self.domain.element((exp(x) - 1)).inner(self.domain.one()) else: - tmp = (self.prior * (np.exp(x) - 1)).inner(self.domain.one()) + tmp = (self.prior * (exp(x) - 1)).inner(self.domain.one()) return tmp # TODO: replace this when UFuncOperators is in place: PL #576 @@ -1553,9 +1553,9 @@ def __init__(self): def _call(self, x): """Apply the gradient operator to the given point.""" if functional.prior is None: - return self.domain.element(np.exp(x)) + return self.domain.element(exp(x)) else: - return functional.prior * np.exp(x) + return functional.prior * exp(x) return KLCrossEntCCGradient() @@ -1648,9 +1648,9 @@ def __init__(self, *functionals): Create functional ``f([x1, x2]) = ||x1||_1 + ||x2||_2``: >>> space = odl.rn(3) - >>> l1 = odl.solvers.L1Norm(space) - >>> l2 = odl.solvers.L2Norm(space) - >>> f_sum = odl.solvers.SeparableSum(l1, l2) + >>> l1 = odl.functional.L1Norm(space) + >>> l2 = odl.functional.L2Norm(space) + >>> f_sum = odl.functional.SeparableSum(l1, l2) The `proximal` factory allows using vector-valued stepsizes: @@ -1663,19 +1663,19 @@ def __init__(self, *functionals): Create functional ``f([x1, ... ,xn]) = \sum_i ||xi||_1``: - >>> f_sum = odl.solvers.SeparableSum(l1, 5) + >>> f_sum = odl.functional.SeparableSum(l1, 5) """ # Make a power space if the second argument is an integer if (len(functionals) == 2 and isinstance(functionals[1], Integral)): functionals = [functionals[0]] * functionals[1] - if not all(isinstance(op, Functional) for op in functionals): + if not np.all(isinstance(op, Functional) for op in functionals): raise TypeError('all arguments must be `Functional` instances') domains = [func.domain for func in functionals] domain = ProductSpace(*domains) - linear = all(func.is_linear for func in functionals) + linear = np.all(func.is_linear for func in functionals) super(SeparableSum, self).__init__(space=domain, linear=linear) self.__functionals = tuple(functionals) @@ -1705,9 +1705,9 @@ def __getitem__(self, indices): Examples -------- >>> space = odl.rn(3) - >>> l1 = odl.solvers.L1Norm(space) - >>> l2 = odl.solvers.L2Norm(space) - >>> f_sum = odl.solvers.SeparableSum(l1, l2, 2*l2) + >>> l1 = odl.functional.L1Norm(space) + >>> l2 = odl.functional.L2Norm(space) + >>> f_sum = odl.functional.SeparableSum(l1, l2, 2*l2) Extract single sub-functional via integer index: @@ -2262,7 +2262,7 @@ def __init__(self, space, diameter=1, sum_rtol=None): ... and one where it lies inside the unit simplex. - >>> x /= x.ufuncs.sum() + >>> x /= odl.sum(x) >>> ind_simplex(x) 0 """ @@ -2280,9 +2280,9 @@ def __init__(self, space, diameter=1, sum_rtol=None): def _call(self, x): """Return ``self(x)``.""" - sum_constr = abs(x.ufuncs.sum() / self.diameter - 1) <= self.sum_rtol - - nonneq_constr = x.ufuncs.greater_equal(0).asarray().all() + sum_constr = abs(odl_sum(x) / self.diameter - 1) <= self.sum_rtol + nonneq_constr = all(0 <= x) + # nonneq_constr = x.ufuncs.greater_equal(0).asarray().all() if sum_constr and nonneq_constr: return 0 @@ -2380,7 +2380,7 @@ def __init__(self, space, sum_value=1, sum_rtol=None): ... and one where it does. - >>> x /= x.ufuncs.sum() + >>> x /= odl.sum(x) >>> ind_sum(x) 0 """ @@ -2398,7 +2398,7 @@ def __init__(self, space, sum_value=1, sum_rtol=None): def _call(self, x): """Return ``self(x)``.""" - if abs(x.ufuncs.sum() / self.sum_value - 1) <= self.sum_rtol: + if abs(odl_sum(x) / self.sum_value - 1) <= self.sum_rtol: return 0 else: return np.inf @@ -2428,7 +2428,7 @@ def __init__(self, sigma): def _call(self, x, out): - offset = 1 / x.size * (self.sum_value - x.ufuncs.sum()) + offset = 1 / x.size * (self.sum_value - odl_sum(x)) out.assign(x) out += offset @@ -2510,7 +2510,7 @@ def __init__(self, functional, sigma=1.0): Create smoothed l1 norm: >>> space = odl.rn(3) - >>> l1_norm = odl.solvers.L1Norm(space) + >>> l1_norm = odl.functional.L1Norm(space) >>> smoothed_l1 = MoreauEnvelope(l1_norm) """ super(MoreauEnvelope, self).__init__( @@ -2578,7 +2578,7 @@ def __init__(self, space, gamma): >>> space = odl.uniform_discr(0, 1, 14) >>> gamma = 0.1 - >>> huber_norm = odl.solvers.Huber(space, gamma=0.1) + >>> huber_norm = odl.functional.Huber(space, gamma=0.1) Check that if all elements are > ``gamma`` we get the L1-norm up to a constant: @@ -2586,7 +2586,7 @@ def __init__(self, space, gamma): >>> x = 2 * gamma * space.one() >>> tol = 1e-5 >>> constant = gamma / 2 * space.one().inner(space.one()) - >>> f = odl.solvers.L1Norm(space) - constant + >>> f = odl.functional.L1Norm(space) - constant >>> abs(huber_norm(x) - f(x)) < tol True @@ -2594,15 +2594,15 @@ def __init__(self, space, gamma): times the weight ``1/(2*gamma)``: >>> x = gamma / 2 * space.one() - >>> f = 1 / (2 * gamma) * odl.solvers.L2NormSquared(space) + >>> f = 1 / (2 * gamma) * odl.functional.L2NormSquared(space) >>> abs(huber_norm(x) - f(x)) < tol True Compare Huber- and L1-norm for vanishing smoothing ``gamma=0``: - >>> x = odl.phantom.white_noise(space) - >>> huber_norm = odl.solvers.Huber(space, gamma=0) - >>> l1_norm = odl.solvers.L1Norm(space) + >>> x = odl.core.phantom.white_noise(space) + >>> huber_norm = odl.functional.Huber(space, gamma=0) + >>> l1_norm = odl.functional.L1Norm(space) >>> abs(huber_norm(x) - l1_norm(x)) < tol True @@ -2610,9 +2610,9 @@ def __init__(self, space, gamma): >>> domain = odl.uniform_discr([0, 0], [1, 1], [5, 5]) >>> space = odl.ProductSpace(domain, 2) - >>> x = odl.phantom.white_noise(space) - >>> huber_norm = odl.solvers.Huber(space, gamma=0) - >>> l1_norm = odl.solvers.GroupL1Norm(space, 2) + >>> x = odl.core.phantom.white_noise(space) + >>> huber_norm = odl.functional.Huber(space, gamma=0) + >>> l1_norm = odl.functional.GroupL1Norm(space, 2) >>> abs(huber_norm(x) - l1_norm(x)) < tol True """ @@ -2636,13 +2636,14 @@ def _call(self, x): if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: - norm = x.ufuncs.absolute() + norm = odl_abs(x) if self.gamma > 0: - tmp = norm.ufuncs.square() + tmp = square(norm) tmp *= 1 / (2 * self.gamma) - index = norm.ufuncs.greater_equal(self.gamma) + # index = norm.ufuncs.greater_equal(self.gamma) + index = self.gamma <= norm tmp[index] = norm[index] - self.gamma / 2 else: tmp = norm @@ -2690,8 +2691,8 @@ def gradient(self): >>> space = odl.uniform_discr(0, 1, 14) >>> norm_one = space.one().norm() - >>> x = odl.phantom.white_noise(space) - >>> huber_norm = odl.solvers.Huber(space, gamma=0.1) + >>> x = odl.core.phantom.white_noise(space) + >>> huber_norm = odl.functional.Huber(space, gamma=0.1) >>> grad = huber_norm.gradient(x) >>> tol = 1e-5 >>> grad.norm() <= norm_one + tol @@ -2702,8 +2703,8 @@ def gradient(self): >>> domain = odl.uniform_discr([0, 0], [1, 1], [5, 5]) >>> space = odl.ProductSpace(domain, 2) >>> norm_one = space.one().norm() - >>> x = odl.phantom.white_noise(space) - >>> huber_norm = odl.solvers.Huber(space, gamma=0.2) + >>> x = odl.core.phantom.white_noise(space) + >>> huber_norm = odl.functional.Huber(space, gamma=0.2) >>> grad = huber_norm.gradient(x) >>> tol = 1e-5 >>> grad.norm() <= norm_one + tol @@ -2726,11 +2727,12 @@ def _call(self, x): if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: - norm = x.ufuncs.absolute() + norm = odl_abs(x) grad = x / functional.gamma - index = norm.ufuncs.greater_equal(functional.gamma) + # index = norm.ufuncs.greater_equal(functional.gamma) + index = functional.gamma <= norm if isinstance(self.domain, ProductSpace): for xi, gi in zip(x, grad): gi[index] = xi[index] / norm[index] @@ -2748,5 +2750,5 @@ def __repr__(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/functional/derivatives.py b/odl/functional/derivatives.py similarity index 94% rename from odl/solvers/functional/derivatives.py rename to odl/functional/derivatives.py index 428c01a1540..64fc62a0e75 100644 --- a/odl/solvers/functional/derivatives.py +++ b/odl/functional/derivatives.py @@ -11,9 +11,9 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.solvers.functional.functional import Functional -from odl.operator import Operator -from odl.space.base_tensors import TensorSpace +from odl.functional.functional import Functional +from odl.core.operator import Operator +from odl.core.space.base_tensors import TensorSpace __all__ = ('NumericalDerivative', 'NumericalGradient',) @@ -50,7 +50,7 @@ def __init__(self, operator, point, method='forward', step=None): L2 norm: >>> space = odl.rn(3) - >>> func = odl.solvers.L2NormSquared(space) + >>> func = odl.functional.L2NormSquared(space) >>> hess = NumericalDerivative(func.gradient, [1, 1, 1]) >>> hess([0, 0, 1]) rn(3).element([ 0., 0., 2.]) @@ -108,9 +108,9 @@ def __init__(self, operator, point, method='forward', step=None): # Use half of the number of digits as machine epsilon, this # "usually" gives a good balance between precision and numerical # stability. - self.step = np.sqrt(np.finfo(operator.domain.dtype).eps) - else: - self.step = float(step) + step = np.sqrt(np.finfo(operator.domain.dtype).eps) + + self.step = float(step) self.method, method_in = str(method).lower(), method if self.method not in ('backward', 'forward', 'central'): @@ -168,7 +168,7 @@ def __init__(self, functional, method='forward', step=None): Examples -------- >>> space = odl.rn(3) - >>> func = odl.solvers.L2NormSquared(space) + >>> func = odl.functional.L2NormSquared(space) >>> grad = NumericalGradient(func) >>> grad([1, 1, 1]) rn(3).element([ 2., 2., 2.]) @@ -227,9 +227,9 @@ def __init__(self, functional, method='forward', step=None): # Use half of the number of digits as machine epsilon, this # "usually" gives a good balance between precision and numerical # stability. - self.step = np.sqrt(np.finfo(functional.domain.dtype).eps) - else: - self.step = float(step) + step = np.sqrt(np.finfo(functional.domain.dtype).eps) + + self.step = float(step) self.method, method_in = str(method).lower(), method if self.method not in ('backward', 'forward', 'central'): @@ -290,7 +290,7 @@ def derivative(self, point): Compute a numerical estimate of the derivative of the squared L2 norm: >>> space = odl.rn(3) - >>> func = odl.solvers.L2NormSquared(space) + >>> func = odl.functional.L2NormSquared(space) >>> grad = NumericalGradient(func) >>> hess = grad.derivative([1, 1, 1]) >>> hess([1, 0, 0]) @@ -307,5 +307,5 @@ def derivative(self, point): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/functional/example_funcs.py b/odl/functional/example_funcs.py similarity index 96% rename from odl/solvers/functional/example_funcs.py rename to odl/functional/example_funcs.py index c66ef364f7e..8d3be05acc2 100644 --- a/odl/solvers/functional/example_funcs.py +++ b/odl/functional/example_funcs.py @@ -11,9 +11,9 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.solvers.functional.functional import Functional -from odl.operator import Operator, MatrixOperator -from odl.space.base_tensors import TensorSpace +from odl.functional.functional import Functional +from odl.core.operator import Operator, MatrixOperator +from odl.core.space.base_tensors import TensorSpace __all__ = ('RosenbrockFunctional',) @@ -158,5 +158,5 @@ def derivative(self, x): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/functional/functional.py b/odl/functional/functional.py similarity index 97% rename from odl/solvers/functional/functional.py rename to odl/functional/functional.py index 25622c50e98..7aae92a1fcc 100644 --- a/odl/solvers/functional/functional.py +++ b/odl/functional/functional.py @@ -11,14 +11,14 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.operator.operator import ( +from odl.core.operator.operator import ( Operator, OperatorComp, OperatorLeftScalarMult, OperatorRightScalarMult, OperatorRightVectorMult, OperatorSum, OperatorPointwiseProduct) -from odl.operator.default_ops import (IdentityOperator, ConstantOperator) +from odl.core.operator.default_ops import (IdentityOperator, ConstantOperator) from odl.solvers.nonsmooth import (proximal_arg_scaling, proximal_translation, proximal_quadratic_perturbation, proximal_const_func, proximal_convex_conj) -from odl.util import signature_string, indent +from odl.core.util import signature_string, indent __all__ = ('Functional', 'FunctionalLeftScalarMult', @@ -320,7 +320,7 @@ def __mul__(self, other): # Left multiplication is more efficient, so we can use this in the # case of linear functional. if other == 0: - from odl.solvers.functional.default_functionals import ( + from odl.functional.default_functionals import ( ConstantFunctional) return ConstantFunctional(self.domain, self(self.domain.zero())) @@ -385,7 +385,7 @@ def __rmul__(self, other): """ if other in self.range: if other == 0: - from odl.solvers.functional.default_functionals import ( + from odl.functional.default_functionals import ( ZeroFunctional) return ZeroFunctional(self.domain) else: @@ -691,7 +691,7 @@ def functional(self): @property def gradient(self): """Gradient operator of the functional.""" - return self.vector * self.operator.gradient * self.vector + return self.vector @ self.operator.gradient @ self.vector @property def convex_conj(self): @@ -755,7 +755,7 @@ def __init__(self, func, scalar): The scalar to be added to the functional. The `field` of the ``domain`` is the range of the functional. """ - from odl.solvers.functional.default_functionals import ( + from odl.functional.default_functionals import ( ConstantFunctional) if not isinstance(func, Functional): @@ -910,9 +910,9 @@ def __init__(self, left, right): Examples -------- >>> space = odl.rn(3) - >>> l1 = odl.solvers.L1Norm(space) - >>> l2 = odl.solvers.L2Norm(space) - >>> f = odl.solvers.InfimalConvolution(l1.convex_conj, l2.convex_conj) + >>> l1 = odl.functional.L1Norm(space) + >>> l2 = odl.functional.L2Norm(space) + >>> f = odl.functional.InfimalConvolution(l1.convex_conj, l2.convex_conj) >>> x = f.domain.one() >>> f.convex_conj(x) - (l1(x) + l2(x)) 0.0 @@ -1137,9 +1137,9 @@ def __init__(self, left, right): Construct the functional || . ||_2^2 * 3 >>> space = odl.rn(2) - >>> func1 = odl.solvers.L2NormSquared(space) - >>> func2 = odl.solvers.ConstantFunctional(space, 3) - >>> prod = odl.solvers.FunctionalProduct(func1, func2) + >>> func1 = odl.functional.L2NormSquared(space) + >>> func2 = odl.functional.ConstantFunctional(space, 3) + >>> prod = odl.functional.FunctionalProduct(func1, func2) >>> prod([2, 3]) # expect (2**2 + 3**2) * 3 = 39 39.0 """ @@ -1196,9 +1196,9 @@ def __init__(self, dividend, divisor): Construct the functional || . ||_2 / 5 >>> space = odl.rn(2) - >>> func1 = odl.solvers.L2Norm(space) - >>> func2 = odl.solvers.ConstantFunctional(space, 5) - >>> prod = odl.solvers.FunctionalQuotient(func1, func2) + >>> func1 = odl.functional.L2Norm(space) + >>> func2 = odl.functional.ConstantFunctional(space, 5) + >>> prod = odl.functional.FunctionalQuotient(func1, func2) >>> prod([3, 4]) # expect sqrt(3**2 + 4**2) / 5 = 1 1.0 """ @@ -1378,10 +1378,10 @@ def __init__(self, functional, point, subgrad): Example of initializing the Bregman distance functional: >>> space = odl.uniform_discr(0, 1, 10) - >>> l2_squared = odl.solvers.L2NormSquared(space) + >>> l2_squared = odl.functional.L2NormSquared(space) >>> point = space.one() >>> subgrad = l2_squared.gradient(point) - >>> bregman_dist = odl.solvers.BregmanDistance( + >>> bregman_dist = odl.functional.BregmanDistance( ... l2_squared, point, subgrad) This is gives squared L2 distance to the given point, ||x - 1||^2: @@ -1593,5 +1593,5 @@ def convex_conj(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/__init__.py b/odl/solvers/__init__.py index 5b4d497ede4..27ced0c3c2d 100644 --- a/odl/solvers/__init__.py +++ b/odl/solvers/__init__.py @@ -10,7 +10,6 @@ from __future__ import absolute_import -from .functional import * from .iterative import * from .nonsmooth import * from .smooth import * @@ -18,7 +17,6 @@ __all__ = () -__all__ += functional.__all__ __all__ += iterative.__all__ __all__ += nonsmooth.__all__ __all__ += smooth.__all__ diff --git a/odl/solvers/iterative/iterative.py b/odl/solvers/iterative/iterative.py index b9d7a996f47..57ee985fdca 100644 --- a/odl/solvers/iterative/iterative.py +++ b/odl/solvers/iterative/iterative.py @@ -12,8 +12,8 @@ from builtins import next import numpy as np -from odl.operator import IdentityOperator, OperatorComp, OperatorSum -from odl.util import normalized_scalar_param_list +from odl.core.operator import IdentityOperator, OperatorComp, OperatorSum +from odl.core.util import normalized_scalar_param_list __all__ = ('landweber', 'conjugate_gradient', 'conjugate_gradient_normal', @@ -523,5 +523,5 @@ def kaczmarz(ops, x, rhs, niter, omega=1, projection=None, random=False, if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/iterative/statistical.py b/odl/solvers/iterative/statistical.py index 88ca62ad853..1d420309d1d 100644 --- a/odl/solvers/iterative/statistical.py +++ b/odl/solvers/iterative/statistical.py @@ -10,6 +10,7 @@ from __future__ import print_function, division, absolute_import import numpy as np +from odl.core.array_API_support import maximum, any, log, sum __all__ = ('mlem', 'osmlem', 'poisson_log_likelihood') @@ -156,13 +157,13 @@ def osmlem(op, x, data, niter, callback=None, **kwargs): # TODO: let users give this. eps = 1e-8 - if np.any(np.less(x, 0)): + if any(x < 0): raise ValueError('`x` must be non-negative') # Extract the sensitivites parameter sensitivities = kwargs.pop('sensitivities', None) if sensitivities is None: - sensitivities = [np.maximum(opi.adjoint(opi.range.one()), eps) + sensitivities = [maximum(opi.adjoint(opi.range.one()), eps) for opi in op] else: # Make sure the sensitivities is a list of the correct size. @@ -177,7 +178,8 @@ def osmlem(op, x, data, niter, callback=None, **kwargs): for _ in range(niter): for i in range(n_ops): op[i](x, out=tmp_ran[i]) - tmp_ran[i].ufuncs.maximum(eps, out=tmp_ran[i]) + maximum(tmp_ran[i], eps, out=tmp_ran[i]) + data[i].divide(tmp_ran[i], out=tmp_ran[i]) op[i].adjoint(tmp_ran[i], out=tmp_dom) @@ -199,7 +201,7 @@ def poisson_log_likelihood(x, data): data : ``op.range`` element Data whose log-likelihood given ``x`` shall be calculated. """ - if np.any(np.less(x, 0)): + if any(x < 0): raise ValueError('`x` must be non-negative') - return np.sum(data * np.log(x + 1e-8) - x) + return sum(data * log(x + 1e-8) - x) diff --git a/odl/solvers/nonsmooth/admm.py b/odl/solvers/nonsmooth/admm.py index 3a76428d016..ea4e49e6607 100644 --- a/odl/solvers/nonsmooth/admm.py +++ b/odl/solvers/nonsmooth/admm.py @@ -11,7 +11,7 @@ from __future__ import division from builtins import range -from odl.operator import Operator, OpDomainError +from odl.core.operator import Operator, OpDomainError __all__ = ('admm_linearized',) diff --git a/odl/solvers/nonsmooth/douglas_rachford.py b/odl/solvers/nonsmooth/douglas_rachford.py index c512cc2b159..1195a3a03cf 100644 --- a/odl/solvers/nonsmooth/douglas_rachford.py +++ b/odl/solvers/nonsmooth/douglas_rachford.py @@ -12,7 +12,7 @@ import numpy as np -from odl.operator import Operator +from odl.core.operator import Operator __all__ = ('douglas_rachford_pd', 'douglas_rachford_pd_stepsize') diff --git a/odl/solvers/nonsmooth/forward_backward.py b/odl/solvers/nonsmooth/forward_backward.py index 4f1f26376bb..f4a4bc3e58d 100644 --- a/odl/solvers/nonsmooth/forward_backward.py +++ b/odl/solvers/nonsmooth/forward_backward.py @@ -10,7 +10,7 @@ from __future__ import print_function, division, absolute_import -from odl.operator import Operator +from odl.core.operator import Operator __all__ = ('forward_backward_pd',) diff --git a/odl/solvers/nonsmooth/primal_dual_hybrid_gradient.py b/odl/solvers/nonsmooth/primal_dual_hybrid_gradient.py index ae7aea3cdd9..c986a7c2dfd 100644 --- a/odl/solvers/nonsmooth/primal_dual_hybrid_gradient.py +++ b/odl/solvers/nonsmooth/primal_dual_hybrid_gradient.py @@ -15,7 +15,7 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.operator import Operator +from odl.core.operator import Operator __all__ = ('pdhg', 'pdhg_stepsize') @@ -372,5 +372,5 @@ def pdhg_stepsize(L, tau=None, sigma=None): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/nonsmooth/proximal_gradient_solvers.py b/odl/solvers/nonsmooth/proximal_gradient_solvers.py index 88291196893..37baf2790bf 100644 --- a/odl/solvers/nonsmooth/proximal_gradient_solvers.py +++ b/odl/solvers/nonsmooth/proximal_gradient_solvers.py @@ -214,5 +214,5 @@ def accelerated_proximal_gradient(x, f, g, gamma, niter, callback=None, if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/nonsmooth/proximal_operators.py b/odl/solvers/nonsmooth/proximal_operators.py index 0d83472ff27..625e43f96ca 100644 --- a/odl/solvers/nonsmooth/proximal_operators.py +++ b/odl/solvers/nonsmooth/proximal_operators.py @@ -22,13 +22,21 @@ """ from __future__ import print_function, division, absolute_import -import numpy as np -from odl.operator import ( +import warnings + +import numpy as np +import math +from odl.core.operator import ( Operator, IdentityOperator, ConstantOperator, DiagonalOperator, PointwiseNorm, MultiplyOperator) -from odl.space import ProductSpace -from odl.set.space import LinearSpaceElement +from odl.core.space.pspace import ProductSpace, ProductSpaceElement +from odl.core.space.base_tensors import Tensor +from odl.core.set.space import LinearSpace, LinearSpaceElement +from odl.core.array_API_support.element_wise import maximum, minimum, abs, divide, sign, square, sqrt, less_equal, logical_not, exp +from odl.core.array_API_support.statistical import sum +from odl.core.util.scipy_compatibility import lambertw, scipy_lambertw +from odl.core.util.dtype_utils import is_complex_dtype __all__ = ('combine_proximals', 'proximal_convex_conj', 'proximal_translation', @@ -44,6 +52,15 @@ 'proximal_convex_conj_kl', 'proximal_convex_conj_kl_cross_entropy', 'proximal_huber') +def _numerical_epsilon(space: LinearSpace): + """Determine numerical precision, preferably from the data type of the space, + else defaulting to double-precision float in case there is no single dtype + (e.g. `ProductSpaces`).""" + dtype_id = getattr(space, 'dtype_identifier', 'float64') + # Always use NumPy for resolution, assuming that different backends would offer + # the same precision for the corresponding types. + eps = np.finfo(dtype_id).resolution * 10 + return eps def combine_proximals(*factory_list): r"""Combine proximal operators into a diagonal product space operator. @@ -274,15 +291,17 @@ def proximal_arg_scaling(prox_factory, scaling): # the others. # Since these checks are computationally expensive, we do not execute them # unconditionally, but only if the scaling factor is a scalar: - if np.isscalar(scaling): + domain = prox_factory(1.0).domain + if isinstance(scaling, (int, float, complex)): if scaling == 0: - return proximal_const_func(prox_factory(1.0).domain) + return proximal_const_func(domain) elif scaling.imag != 0: raise ValueError("Complex scaling not supported.") else: scaling = float(scaling.real) + else: - scaling = np.asarray(scaling) + assert scaling in domain, f"The scaling {scaling} was passed as a {type(scaling)}, which is not supported. Please pass it either as a float or as an element of the domain of the prox_factory." def arg_scaling_prox_factory(sigma): """Create proximal for the translation with a given sigma. @@ -380,22 +399,19 @@ def quadratic_perturbation_prox_factory(sigma): The proximal operator of ``sigma * (F(x) + a * \|x\|^2 + )``, where ``sigma`` is the step size """ - if np.isscalar(sigma): - sigma = float(sigma) - else: - sigma = np.asarray(sigma) - const = 1.0 / np.sqrt(sigma * 2.0 * a + 1) + const = 1.0 / math.sqrt(sigma * 2.0 * a + 1) prox = proximal_arg_scaling(prox_factory, const)(sigma) + if u is not None: - return (MultiplyOperator(const, domain=u.space, range=u.space) * - prox * + return (MultiplyOperator(const, domain=u.space, range=u.space) @ + prox @ (MultiplyOperator(const, domain=u.space, range=u.space) - sigma * const * u)) else: space = prox.domain - return (MultiplyOperator(const, domain=space, range=space) * - prox * MultiplyOperator(const, domain=space, range=space)) + return (MultiplyOperator(const, domain=space, range=space) @ + prox @ MultiplyOperator(const, domain=space, range=space)) return quadratic_perturbation_prox_factory @@ -608,12 +624,12 @@ def __init__(self, sigma): def _call(self, x, out): """Apply the operator to ``x`` and store the result in ``out``.""" if lower is not None and upper is None: - x.ufuncs.maximum(lower, out=out) + maximum(x, lower, out=out) elif lower is None and upper is not None: - x.ufuncs.minimum(upper, out=out) + minimum(x, upper, out=out) elif lower is not None and upper is not None: - x.ufuncs.maximum(lower, out=out) - out.ufuncs.minimum(upper, out=out) + maximum(x, lower, out=out) + minimum(out, upper, out=out) else: out.assign(x) @@ -781,16 +797,16 @@ def __init__(self, sigma): def _call(self, x, out): """Apply the operator to ``x`` and stores the result in ``out``.""" - dtype = getattr(self.domain, 'dtype', float) - eps = np.finfo(dtype).resolution * 10 + + eps = _numerical_epsilon(self.domain) if g is None: x_norm = x.norm() * (1 + eps) if x_norm > 0: step = self.sigma * lam / x_norm else: - step = np.infty - + step = np.inf + if step < 1.0: out.lincomb(1.0 - step, x) else: @@ -801,7 +817,7 @@ def _call(self, x, out): if x_norm > 0: step = self.sigma * lam / x_norm else: - step = np.infty + step = np.inf if step < 1.0: out.lincomb(1.0 - step, x, step, g) @@ -992,7 +1008,7 @@ def _call(self, x, out): 2 * sig * lam / (1 + 2 * sig * lam), g) else: # sig in space if g is None: - x.divide(1 + 2 * sig * lam, out=out) + divide(x, 1 + 2 * sig * lam, out=out) else: if x is out: # Can't write to `out` since old `x` is still needed @@ -1079,8 +1095,7 @@ def proximal_convex_conj_l1(space, lam=1, g=None): proximal_l1 : proximal without convex conjugate """ # Fix for rounding errors - dtype = getattr(space, 'dtype', float) - eps = np.finfo(dtype).resolution * 10 + eps = _numerical_epsilon(space) lam = float(lam * (1 - eps)) if g is not None and g not in space: @@ -1124,12 +1139,12 @@ def _call(self, x, out): diff = x # out = max( |x-sig*g|, lam ) / lam - diff.ufuncs.absolute(out=out) - out.ufuncs.maximum(lam, out=out) + abs(diff, out=out) + maximum(out, lam, out=out) out /= lam # out = diff / ... - diff.divide(out, out=out) + divide(diff, out, out=out) return ProximalConvexConjL1 @@ -1191,8 +1206,7 @@ def proximal_convex_conj_l1_l2(space, lam=1, g=None): proximal_convex_conj_l1 : Scalar or non-isotropic vectorial variant """ # Fix for rounding errors - dtype = getattr(space, 'dtype', float) - eps = np.finfo(dtype).resolution * 10 + eps = _numerical_epsilon(space) lam = float(lam * (1 - eps)) if g is not None and g not in space: @@ -1228,7 +1242,7 @@ def _call(self, x, out): # denom = max( |x-sig*g|_2, lam ) / lam (|.|_2 pointwise) pwnorm = PointwiseNorm(self.domain, exponent=2) denom = pwnorm(diff) - denom.ufuncs.maximum(lam, out=denom) + maximum(denom, lam, out=denom) denom /= lam # Pointwise division @@ -1336,12 +1350,12 @@ def _call(self, x, out): # We write the operator as # x - (x - g) / max(|x - g| / sig*lam, 1) - denom = diff.ufuncs.absolute() + denom = abs(diff) denom /= self.sigma * lam - denom.ufuncs.maximum(1, out=denom) + maximum(denom, 1, out=denom) # out = (x - g) / denom - diff.ufuncs.divide(denom, out=out) + divide(diff, denom, out=out) # out = x - ... out.lincomb(1, x, -1, out) @@ -1436,11 +1450,11 @@ def _call(self, x, out): pwnorm = PointwiseNorm(self.domain, exponent=2) denom = pwnorm(diff) denom /= self.sigma * lam - denom.ufuncs.maximum(1, out=denom) + maximum(denom, 1, out=denom) # out = (x - g) / denom for out_i, diff_i in zip(out, diff): - diff_i.divide(denom, out=out_i) + divide(diff_i, denom, out=out_i) # out = x - ... out.lincomb(1, x, -1, out) @@ -1607,11 +1621,11 @@ def proj_l1(x, radius=1, out=None): if out is None: out = x.space.element() - u = x.ufuncs.absolute() - if u.ufuncs.sum() <= radius: + u = abs(x) + if sum(u) <= radius: out[:] = x else: - v = x.ufuncs.sign() + v = sign(u) proj_simplex(u, radius, out) out *= v @@ -1787,7 +1801,7 @@ def _call(self, x, out): else: out.assign(x) out -= lam - out.ufuncs.square(out=out) + square(out, out=out) # out = ... + 4*lam*sigma*g # If g is None, it is taken as the one element @@ -1797,7 +1811,7 @@ def _call(self, x, out): out.lincomb(1, out, 4.0 * lam * self.sigma, g) # out = x - sqrt(...) + lam - out.ufuncs.sqrt(out=out) + sqrt(out, out=out) out.lincomb(1, x, -1, out) out += lam @@ -1906,26 +1920,51 @@ def __init__(self, sigma): sigma : positive float """ self.sigma = float(sigma) + nonlocal g + self.g = g super(ProximalConvexConjKLCrossEntropy, self).__init__( domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" # Lazy import to improve `import odl` time - import scipy.special - - if g is None: - # If g is None, it is taken as the one element - # Different branches of lambertw is not an issue, see Notes - lambw = scipy.special.lambertw( - (self.sigma / lam) * np.exp(x / lam)) + if isinstance(x, ProductSpaceElement) and x[0].space.device!= 'cpu': + warnings.warn(f'The function ``_call`` of ``ProximalConvexConjKLCrossEntropy`` involves a ``lambertw`` call. At present, ODL relies on scipy to perform it and it does not support GPU inputs for that specific function. As such, the input will be moved to the cpu, which will slow down the algorithm.', stacklevel=2) + # FML + namespace = x[0].space.array_namespace + if g is None: + lambw = [scipy_lambertw( + (self.sigma / lam) * namespace.exp(sub_x.to('cpu') / lam)) for sub_x in x.asarray()] + else: + lambw = [scipy_lambertw( + (self.sigma / lam) * sub_g.to('cpu')* namespace.exp(sub_x.to('cpu') / lam)) for (sub_g, sub_x) in zip(self.g.asarray(), x.asarray())] + if not is_complex_dtype(self.domain.dtype): + lambw = [lambw_.real for lambw_ in lambw] + elif isinstance(x, Tensor) and x.space.device!= 'cpu': + namespace = x.space.array_namespace + if g is None: + lambw = scipy_lambertw( + (self.sigma / lam) * namespace.exp(x.asarray().to('cpu') / lam)) + else: + lambw = scipy_lambertw( + (self.sigma / lam) * self.g.asarray().to('cpu')* namespace.exp(x.asarray().to('cpu') / lam)) + if not is_complex_dtype(self.domain.dtype): + lambw = [lambw_.real for lambw_ in lambw] else: - # Different branches of lambertw is not an issue, see Notes - lambw = scipy.special.lambertw( - (self.sigma / lam) * g * np.exp(x / lam)) + print('ELSE branch') + print(type(x)) + if g is None: + # If g is None, it is taken as the one element + # Different branches of lambertw is not an issue, see Notes + lambw = lambertw( + (self.sigma / lam) * exp(x / lam)) + else: + # Different branches of lambertw is not an issue, see Notes + lambw = lambertw( + (self.sigma / lam) * self.g * exp(x / lam)) - if not np.issubdtype(self.domain.dtype, np.complexfloating): - lambw = lambw.real + if not is_complex_dtype(self.domain.dtype): + lambw = lambw.real lambw = x.space.element(lambw) @@ -1982,13 +2021,13 @@ def _call(self, x, out): if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: - norm = x.ufuncs.absolute() + norm = abs(x) - mask = norm.ufuncs.less_equal(gamma + self.sigma) + mask = less_equal(norm, gamma + self.sigma) out[mask] = gamma / (gamma + self.sigma) * x[mask] - mask.ufuncs.logical_not(out=mask) - sign_x = x.ufuncs.sign() + logical_not(mask, out=mask) + sign_x = sign(x) out[mask] = x[mask] - self.sigma * sign_x[mask] return out @@ -1997,5 +2036,5 @@ def _call(self, x, out): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/smooth/gradient.py b/odl/solvers/smooth/gradient.py index 79e5c4504c0..739cca34d3e 100644 --- a/odl/solvers/smooth/gradient.py +++ b/odl/solvers/smooth/gradient.py @@ -9,10 +9,11 @@ """Gradient-based optimization schemes.""" from __future__ import print_function, division, absolute_import -import numpy as np +import math from odl.solvers.util import ConstantLineSearch +from odl.core.array_API_support import sqrt __all__ = ('steepest_descent', 'adam') @@ -92,7 +93,7 @@ def steepest_descent(f, x, line_search=1.0, maxiter=1000, tol=1e-16, grad(x, out=grad_x) dir_derivative = -grad_x.norm() ** 2 - if np.abs(dir_derivative) < tol: + if abs(dir_derivative) < tol: return # we have converged step = line_search(x, -grad_x, dir_derivative) @@ -172,14 +173,14 @@ def adam(f, x, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, m.lincomb(beta1, m, 1 - beta1, grad_x) v.lincomb(beta2, v, 1 - beta2, grad_x ** 2) - step = learning_rate * np.sqrt(1 - beta2) / (1 - beta1) + step = learning_rate * math.sqrt(1 - beta2) / (1 - beta1) - x.lincomb(1, x, -step, m / (np.sqrt(v) + eps)) + x.lincomb(1, x, -step, m / (sqrt(v) + eps)) if callback is not None: callback(x) if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/smooth/newton.py b/odl/solvers/smooth/newton.py index b963ed29a98..0a985c86ea8 100644 --- a/odl/solvers/smooth/newton.py +++ b/odl/solvers/smooth/newton.py @@ -491,5 +491,5 @@ def broydens_method(f, x, line_search=1.0, impl='first', maxiter=1000, if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/util/callback.py b/odl/solvers/util/callback.py index bdb80aa55b2..c41537f13ee 100644 --- a/odl/solvers/util/callback.py +++ b/odl/solvers/util/callback.py @@ -19,7 +19,7 @@ import numpy as np -from odl.util import signature_string +from odl.core.util import signature_string __all__ = ('Callback', 'CallbackStore', 'CallbackApply', 'CallbackPrintTiming', 'CallbackPrintIteration', 'CallbackPrint', 'CallbackPrintNorm', @@ -290,7 +290,7 @@ def __init__(self, function, step=1): By default, the function is called on each iterate: >>> def func(x): - ... print(np.max(x)) + ... print(odl.max(x)) >>> callback = CallbackApply(func) >>> x = odl.rn(3).element([1, 2, 3]) >>> callback(x) @@ -571,8 +571,8 @@ class CallbackShow(Callback): See Also -------- - odl.discr.discr_space.DiscretizedSpaceElement.show - odl.space.base_tensors.Tensor.show + odl.core.discr.discr_space.DiscretizedSpaceElement.show + odl.core.space.base_tensors.Tensor.show """ def __init__(self, title=None, step=1, saveto=None, **kwargs): @@ -860,9 +860,9 @@ def __init__(self, functional, title='convergence', logx=False, logy=False, self.ax.set_ylabel('function value') self.ax.set_title(title) if logx: - self.ax.set_xscale("log", nonposx='clip') + self.ax.set_xscale("log") if logy: - self.ax.set_yscale("log", nonposy='clip') + self.ax.set_yscale("log") def __call__(self, x): """Implement ``self(x)``.""" @@ -1138,5 +1138,5 @@ def __call__(self, x): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/solvers/util/steplen.py b/odl/solvers/util/steplen.py index 95e87d0d748..256f5aea820 100644 --- a/odl/solvers/util/steplen.py +++ b/odl/solvers/util/steplen.py @@ -96,7 +96,7 @@ def __init__(self, function, tau=0.5, discount=0.01, alpha=1.0, Create line search >>> r3 = odl.rn(3) - >>> func = odl.solvers.L2NormSquared(r3) + >>> func = odl.functional.L2NormSquared(r3) >>> line_search = BacktrackingLineSearch(func) Find step in point x and direction d that decreases the function value. @@ -135,7 +135,7 @@ def __init__(self, function, tau=0.5, discount=0.01, alpha=1.0, # machine epsilon. if max_num_iter is None: try: - dtype = self.function.domain.dtype + dtype = self.function.domain.dtype_identifier except AttributeError: dtype = float eps = 10 * np.finfo(dtype).resolution @@ -290,5 +290,5 @@ def __call__(self, x, direction, dir_derivative): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/space/base_tensors.py b/odl/space/base_tensors.py deleted file mode 100644 index d2b5b9f858f..00000000000 --- a/odl/space/base_tensors.py +++ /dev/null @@ -1,1009 +0,0 @@ -# Copyright 2014-2020 The ODL contributors -# -# This file is part of ODL. -# -# This Source Code Form is subject to the terms of the Mozilla Public License, -# v. 2.0. If a copy of the MPL was not distributed with this file, You can -# obtain one at https://mozilla.org/MPL/2.0/. - -"""Base classes for implementations of tensor spaces.""" - -from __future__ import absolute_import, division, print_function - -from numbers import Integral - -import numpy as np - -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY - -from odl.set.sets import ComplexNumbers, RealNumbers -from odl.set.space import LinearSpace, LinearSpaceElement -from odl.util import ( - array_str, dtype_str, indent, is_complex_floating_dtype, is_floating_dtype, - is_numeric_dtype, is_real_dtype, is_real_floating_dtype, safe_int_conv, - signature_string, writable_array) -from odl.util.ufuncs import TensorSpaceUfuncs -from odl.util.utility import TYPE_MAP_C2R, TYPE_MAP_R2C, nullcontext - -__all__ = ('TensorSpace',) - - -class TensorSpace(LinearSpace): - - """Base class for sets of tensors of arbitrary data type. - - A tensor is, in the most general sense, a multi-dimensional array - that allows operations per entry (keep the rank constant), - reductions / contractions (reduce the rank) and broadcasting - (raises the rank). - For non-numeric data type like ``object``, the range of valid - operations is rather limited since such a set of tensors does not - define a vector space. - Any numeric data type, on the other hand, is considered valid for - a tensor space, although certain operations - like division with - integer dtype - are not guaranteed to yield reasonable results. - - Under these restrictions, all basic vector space operations are - supported by this class, along with reductions based on arithmetic - or comparison, and element-wise mathematical functions ("ufuncs"). - - See the `Wikipedia article on tensors`_ for further details. - See also [Hac2012] "Part I Algebraic Tensors" for a rigorous - treatment of tensors with a definition close to this one. - - Note also that this notion of tensors is the same as in popular - Deep Learning frameworks. - - References - ---------- - [Hac2012] Hackbusch, W. *Tensor Spaces and Numerical Tensor Calculus*. - Springer, 2012. - - .. _Wikipedia article on tensors: https://en.wikipedia.org/wiki/Tensor - """ - - def __init__(self, shape, dtype): - """Initialize a new instance. - - Parameters - ---------- - shape : nonnegative int or sequence of nonnegative ints - Number of entries of type ``dtype`` per axis in this space. A - single integer results in a space with rank 1, i.e., 1 axis. - dtype : - Data type of elements in this space. Can be provided - in any way the `numpy.dtype` constructor understands, e.g. - as built-in type or as a string. - For a data type with a ``dtype.shape``, these extra dimensions - are added *to the left* of ``shape``. - """ - # Handle shape and dtype, taking care also of dtypes with shape - try: - shape, shape_in = tuple(safe_int_conv(s) for s in shape), shape - except TypeError: - shape, shape_in = (safe_int_conv(shape),), shape - if any(s < 0 for s in shape): - raise ValueError('`shape` must have only nonnegative entries, got ' - '{}'.format(shape_in)) - dtype = np.dtype(dtype) - - # We choose this order in contrast to Numpy, since we usually want - # to represent discretizations of vector- or tensor-valued functions, - # i.e., if dtype.shape == (3,) we expect f[0] to have shape `shape`. - self.__shape = dtype.shape + shape - self.__dtype = dtype.base - - if is_real_dtype(self.dtype): - # real includes non-floating-point like integers - field = RealNumbers() - self.__real_dtype = self.dtype - self.__real_space = self - self.__complex_dtype = TYPE_MAP_R2C.get(self.dtype, None) - self.__complex_space = None # Set in first call of astype - elif is_complex_floating_dtype(self.dtype): - field = ComplexNumbers() - self.__real_dtype = TYPE_MAP_C2R[self.dtype] - self.__real_space = None # Set in first call of astype - self.__complex_dtype = self.dtype - self.__complex_space = self - else: - field = None - - LinearSpace.__init__(self, field) - - ########## static methods ########## - @staticmethod - def available_dtypes(): - """Return the set of data types available in this implementation. - - This method should be overridden by subclasses. - """ - raise NotImplementedError('abstract method') - - @staticmethod - def default_dtype(field=None): - """Return the default data type for a given field. - - This method should be overridden by subclasses. - - Parameters - ---------- - field : `Field`, optional - Set of numbers to be represented by a data type. - - Returns - ------- - dtype : - Numpy data type specifier. - """ - raise NotImplementedError('abstract method') - - ########## Attributes ########## - @property - def complex_dtype(self): - """The complex dtype corresponding to this space's `dtype`. - - Raises - ------ - NotImplementedError - If `dtype` is not a numeric data type. - """ - if not is_numeric_dtype(self.dtype): - raise NotImplementedError( - '`complex_dtype` not defined for non-numeric `dtype`') - return self.__complex_dtype - - @property - def complex_space(self): - """The space corresponding to this space's `complex_dtype`. - - Raises - ------ - ValueError - If `dtype` is not a numeric data type. - """ - if not is_numeric_dtype(self.dtype): - raise ValueError( - '`complex_space` not defined for non-numeric `dtype`') - return self.astype(self.complex_dtype) - - @property - def default_order(self): - """Default storage order for new elements in this space. - - This property should be overridden by subclasses. - """ - raise NotImplementedError('abstract method') - - @property - def dtype(self): - """Scalar data type of each entry in an element of this space.""" - return self.__dtype - - @property - def element_type(self): - """Type of elements in this space: `Tensor`.""" - return Tensor - - @property - def examples(self): - """Return example random vectors.""" - # Always return the same numbers - rand_state = np.random.get_state() - np.random.seed(1337) - - if is_numeric_dtype(self.dtype): - yield ('Linearly spaced samples', self.element( - np.linspace(0, 1, self.size).reshape(self.shape))) - yield ('Normally distributed noise', - self.element(np.random.standard_normal(self.shape))) - - if self.is_real: - yield ('Uniformly distributed noise', - self.element(np.random.uniform(size=self.shape))) - elif self.is_complex: - yield ('Uniformly distributed noise', - self.element(np.random.uniform(size=self.shape) + - np.random.uniform(size=self.shape) * 1j)) - else: - # TODO: return something that always works, like zeros or ones? - raise NotImplementedError('no examples available for non-numeric' - 'data type') - - np.random.set_state(rand_state) - - @property - def impl(self): - """Name of the implementation back-end of this tensor set. - - This property should be overridden by subclasses. - """ - raise NotImplementedError('abstract method') - - @property - def itemsize(self): - """Size in bytes of one entry in an element of this space.""" - return int(self.dtype.itemsize) - - @property - def is_complex(self): - """True if this is a space of complex tensors.""" - return is_complex_floating_dtype(self.dtype) - - @property - def is_real(self): - """True if this is a space of real tensors.""" - return is_real_floating_dtype(self.dtype) - - @property - def nbytes(self): - """Total number of bytes in memory used by an element of this space.""" - return self.size * self.itemsize - - @property - def ndim(self): - """Number of axes (=dimensions) of this space, also called "rank".""" - return len(self.shape) - - @property - def real_dtype(self): - """The real dtype corresponding to this space's `dtype`. - - Raises - ------ - NotImplementedError - If `dtype` is not a numeric data type. - """ - if not is_numeric_dtype(self.dtype): - raise NotImplementedError( - '`real_dtype` not defined for non-numeric `dtype`') - return self.__real_dtype - - @property - def real_space(self): - """The space corresponding to this space's `real_dtype`. - - Raises - ------ - ValueError - If `dtype` is not a numeric data type. - """ - if not is_numeric_dtype(self.dtype): - raise ValueError( - '`real_space` not defined for non-numeric `dtype`') - return self.astype(self.real_dtype) - - @property - def shape(self): - """Number of scalar elements per axis. - - .. note:: - If `dtype` has a shape, we add it to the **left** of the given - ``shape`` in the class creation. This is in contrast to NumPy, - which adds extra axes to the **right**. We do this since we - usually want to represent discretizations of vector- or - tensor-valued functions by this, i.e., if - ``dtype.shape == (3,)`` we expect ``f[0]`` to have shape - ``shape``. - """ - return self.__shape - - @property - def size(self): - """Total number of entries in an element of this space.""" - return (0 if self.shape == () else - int(np.prod(self.shape, dtype='int64'))) - - ########## public methods ########## - def astype(self, dtype): - """Return a copy of this space with new ``dtype``. - - Parameters - ---------- - dtype : - Scalar data type of the returned space. Can be provided - in any way the `numpy.dtype` constructor understands, e.g. - as built-in type or as a string. Data types with non-trivial - shapes are not allowed. - - Returns - ------- - newspace : `TensorSpace` - Version of this space with given data type. - """ - if dtype is None: - # Need to filter this out since Numpy iterprets it as 'float' - raise ValueError('`None` is not a valid data type') - - dtype = np.dtype(dtype) - if dtype == self.dtype: - return self - - if is_numeric_dtype(self.dtype): - # Caching for real and complex versions (exact dtype mappings) - if dtype == self.__real_dtype: - if self.__real_space is None: - self.__real_space = self._astype(dtype) - return self.__real_space - elif dtype == self.__complex_dtype: - if self.__complex_space is None: - self.__complex_space = self._astype(dtype) - return self.__complex_space - else: - return self._astype(dtype) - else: - return self._astype(dtype) - - def one(self): - """Return a tensor of all ones. - - This method should be overridden by subclasses. - - Returns - ------- - one : `Tensor` - A tensor of all one. - """ - raise NotImplementedError('abstract method') - - def zero(self): - """Return a tensor of all zeros. - - This method should be overridden by subclasses. - - Returns - ------- - zero : `Tensor` - A tensor of all zeros. - """ - raise NotImplementedError('abstract method') - - ######### magic methods ######### - def __contains__(self, other): - """Return ``other in self``. - - Returns - ------- - contains : bool - ``True`` if ``other`` has a ``space`` attribute that is equal - to this space, ``False`` otherwise. - - Examples - -------- - Elements created with the `TensorSpace.element` method are - guaranteed to be contained in the same space: - - >>> spc = odl.tensor_space((2, 3), dtype='uint64') - >>> spc.element() in spc - True - >>> x = spc.element([[0, 1, 2], - ... [3, 4, 5]]) - >>> x in spc - True - - Sizes, data types and other essential properties characterize - spaces and decide about membership: - - >>> smaller_spc = odl.tensor_space((2, 2), dtype='uint64') - >>> y = smaller_spc.element([[0, 1], - ... [2, 3]]) - >>> y in spc - False - >>> x in smaller_spc - False - >>> other_dtype_spc = odl.tensor_space((2, 3), dtype='uint32') - >>> z = other_dtype_spc.element([[0, 1, 2], - ... [3, 4, 5]]) - >>> z in spc - False - >>> x in other_dtype_spc - False - - On the other hand, spaces are not unique: - - >>> spc2 = odl.tensor_space((2, 3), dtype='uint64') - >>> spc2 == spc - True - >>> x2 = spc2.element([[5, 4, 3], - ... [2, 1, 0]]) - >>> x2 in spc - True - >>> x in spc2 - True - - Of course, random garbage is not in the space: - - >>> spc = odl.tensor_space((2, 3), dtype='uint64') - >>> None in spc - False - >>> object in spc - False - >>> False in spc - False - """ - return getattr(other, 'space', None) == self - - def __eq__(self, other): - """Return ``self == other``. - - Returns - ------- - equals : bool - True if ``self`` and ``other`` have the same type, `shape` - and `dtype`, otherwise ``False``. - - Examples - -------- - Sizes, data types and other essential properties characterize - spaces and decide about equality: - - >>> spc = odl.tensor_space(3, dtype='uint64') - >>> spc == spc - True - >>> spc2 = odl.tensor_space(3, dtype='uint64') - >>> spc2 == spc - True - >>> smaller_spc = odl.tensor_space(2, dtype='uint64') - >>> spc == smaller_spc - False - >>> other_dtype_spc = odl.tensor_space(3, dtype='uint32') - >>> spc == other_dtype_spc - False - >>> other_shape_spc = odl.tensor_space((3, 1), dtype='uint64') - >>> spc == other_shape_spc - False - """ - if other is self: - return True - - return (type(other) is type(self) and - self.shape == other.shape and - self.dtype == other.dtype) - - def __hash__(self): - """Return ``hash(self)``.""" - return hash((type(self), self.shape, self.dtype)) - - def __len__(self): - """Number of tensor entries along the first axis.""" - return int(self.shape[0]) - - def __repr__(self): - """Return ``repr(self)``.""" - posargs = [self.shape, dtype_str(self.dtype)] - return "{}({})".format(self.__class__.__name__, - signature_string(posargs, [])) - - def __str__(self): - """Return ``str(self)``.""" - return repr(self) - - ########## _underscore methods ########## - def _astype(self, dtype): - """Internal helper for `astype`. - - Subclasses with differing init parameters should overload this - method. - """ - kwargs = {} - if is_floating_dtype(dtype): - # Use weighting only for floating-point types, otherwise, e.g., - # `space.astype(bool)` would fail - weighting = getattr(self, 'weighting', None) - if weighting is not None: - kwargs['weighting'] = weighting - - return type(self)(self.shape, dtype=dtype, **kwargs) - - def _divide(self, x1, x2, out): - """The entry-wise quotient of two tensors, assigned to ``out``. - - This method should be overridden by subclasses. - """ - raise NotImplementedError('abstract method') - - def _multiply(self, x1, x2, out): - """The entry-wise product of two tensors, assigned to ``out``. - - This method should be overridden by subclasses. - """ - raise NotImplementedError('abstract method') - -class Tensor(LinearSpaceElement): - - """Abstract class for representation of `TensorSpace` elements.""" - - ######### static methods ######### - - ######### Attributes ######### - @property - def itemsize(self): - """Size in bytes of one tensor entry.""" - return self.space.itemsize - - @property - def nbytes(self): - """Total number of bytes in memory occupied by this tensor.""" - return self.space.nbytes - - @property - def impl(self): - """Name of the implementation back-end of this tensor.""" - return self.space.impl - - @property - def shape(self): - """Number of elements per axis.""" - return self.space.shape - - @property - def dtype(self): - """Data type of each entry.""" - return self.space.dtype - - @property - def size(self): - """Total number of entries.""" - return self.space.size - - @property - def ndim(self): - """Number of axes (=dimensions) of this tensor.""" - return self.space.ndim - - @property - def ufuncs(self): - """Access to Numpy style universal functions. - - These default ufuncs are always available, but may or may not be - optimized for the specific space in use. - - .. note:: - This interface is will be deprecated when Numpy 1.13 becomes - the minimum required version. Use Numpy ufuncs directly, e.g., - ``np.sqrt(x)`` instead of ``x.ufuncs.sqrt()``. - """ - return TensorSpaceUfuncs(self) - - - ######### public methods ######### - def asarray(self, out=None): - """Extract the data of this tensor as a Numpy array. - - This method should be overridden by subclasses. - - Parameters - ---------- - out : `numpy.ndarray`, optional - Array to write the result to. - - Returns - ------- - asarray : `numpy.ndarray` - Numpy array of the same data type and shape as the space. - If ``out`` was given, the returned object is a reference - to it. - """ - raise NotImplementedError('abstract method') - - def astype(self, dtype): - """Return a copy of this element with new ``dtype``. - - Parameters - ---------- - dtype : - Scalar data type of the returned space. Can be provided - in any way the `numpy.dtype` constructor understands, e.g. - as built-in type or as a string. Data types with non-trivial - shapes are not allowed. - - Returns - ------- - newelem : `Tensor` - Version of this element with given data type. - """ - raise NotImplementedError('abstract method') - - def show(self, title=None, method='', indices=None, force_show=False, - fig=None, **kwargs): - """Display the function graphically. - - Parameters - ---------- - title : string, optional - Set the title of the figure - - method : string, optional - 1d methods: - - ``'plot'`` : graph plot - - ``'scatter'`` : scattered 2d points (2nd axis <-> value) - - 2d methods: - - ``'imshow'`` : image plot with coloring according to - value, including a colorbar. - - ``'scatter'`` : cloud of scattered 3d points - (3rd axis <-> value) - - indices : index expression, optional - Display a slice of the array instead of the full array. The - index expression is most easily created with the `numpy.s_` - constructor, i.e. supply ``np.s_[:, 1, :]`` to display the - first slice along the second axis. - For data with 3 or more dimensions, the 2d slice in the first - two axes at the "middle" along the remaining axes is shown - (semantically ``[:, :, shape[2:] // 2]``). - This option is mutually exclusive to ``coords``. - - force_show : bool, optional - Whether the plot should be forced to be shown now or deferred until - later. Note that some backends always displays the plot, regardless - of this value. - - fig : `matplotlib.figure.Figure`, optional - The figure to show in. Expected to be of same "style", as - the figure given by this function. The most common use case - is that ``fig`` is the return value of an earlier call to - this function. - - kwargs : {'figsize', 'saveto', 'clim', ...}, optional - Extra keyword arguments passed on to the display method. - See the Matplotlib functions for documentation of extra - options. - - Returns - ------- - fig : `matplotlib.figure.Figure` - The resulting figure. It is also shown to the user. - - See Also - -------- - odl.util.graphics.show_discrete_data : Underlying implementation - """ - from odl.discr import uniform_grid - from odl.util.graphics import show_discrete_data - - # Default to showing x-y slice "in the middle" - if indices is None and self.ndim >= 3: - indices = tuple( - [slice(None)] * 2 + [n // 2 for n in self.space.shape[2:]] - ) - - if isinstance(indices, (Integral, slice)): - indices = (indices,) - elif indices is None or indices == Ellipsis: - indices = (slice(None),) * self.ndim - else: - indices = tuple(indices) - - # Replace None by slice(None) - indices = tuple(slice(None) if idx is None else idx for idx in indices) - - if Ellipsis in indices: - # Replace Ellipsis with the correct number of [:] expressions - pos = indices.index(Ellipsis) - indices = (indices[:pos] + - (np.s_[:], ) * (self.ndim - len(indices) + 1) + - indices[pos + 1:]) - - if len(indices) < self.ndim: - raise ValueError('too few axes ({} < {})'.format(len(indices), - self.ndim)) - if len(indices) > self.ndim: - raise ValueError('too many axes ({} > {})'.format(len(indices), - self.ndim)) - - # Squeeze grid and values according to the index expression - full_grid = uniform_grid([0] * self.ndim, np.array(self.shape) - 1, - self.shape) - grid = full_grid[indices].squeeze() - values = self.asarray()[indices].squeeze() - - return show_discrete_data(values, grid, title=title, method=method, - force_show=force_show, fig=fig, **kwargs) - - ######### magic methods ######### - - def __array__(self, dtype=None): - """Return a Numpy array from this tensor. - - Parameters - ---------- - dtype : - Specifier for the data type of the output array. - - Returns - ------- - array : `numpy.ndarray` - """ - if dtype is None: - return self.asarray() - else: - return self.asarray().astype(dtype, copy=AVOID_UNNECESSARY_COPY) - - def __array_wrap__(self, array): - """Return a new tensor wrapping the ``array``. - - Parameters - ---------- - array : `numpy.ndarray` - Array to be wrapped. - - Returns - ------- - wrapper : `Tensor` - Tensor wrapping ``array``. - """ - if array.ndim == 0: - return self.space.field.element(array) - else: - return self.space.element(array) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - """Interface to Numpy's ufunc machinery. - - This method is called by Numpy version 1.13 and higher as a single - point for the ufunc dispatch logic. An object implementing - ``__array_ufunc__`` takes over control when a `numpy.ufunc` is - called on it, allowing it to use custom implementations and - output types. - - This includes handling of in-place arithmetic like - ``npy_array += custom_obj``. In this case, the custom object's - ``__array_ufunc__`` takes precedence over the baseline - `numpy.ndarray` implementation. It will be called with - ``npy_array`` as ``out`` argument, which ensures that the - returned object is a Numpy array. For this to work properly, - ``__array_ufunc__`` has to accept Numpy arrays as ``out`` arguments. - - See the `corresponding NEP`_ and the `interface documentation`_ - for further details. See also the `general documentation on - Numpy ufuncs`_. - - .. note:: - This basic implementation casts inputs and - outputs to Numpy arrays and evaluates ``ufunc`` on those. - For `numpy.ndarray` based data storage, this incurs no - significant overhead compared to direct usage of Numpy arrays. - - For other (in particular non-local) implementations, e.g., - GPU arrays or distributed memory, overhead is significant due - to copies to CPU main memory. In those classes, the - ``__array_ufunc__`` mechanism should be overridden in favor of - a native implementations if possible. - - .. note:: - If no ``out`` parameter is provided, this implementation - just returns the raw array and does not attempt to wrap the - result in any kind of space. - - Parameters - ---------- - ufunc : `numpy.ufunc` - Ufunc that should be called on ``self``. - method : str - Method on ``ufunc`` that should be called on ``self``. - Possible values: - - ``'__call__'``, ``'accumulate'``, ``'at'``, ``'outer'``, - ``'reduce'``, ``'reduceat'`` - - input1, ..., inputN: - Positional arguments to ``ufunc.method``. - kwargs: - Keyword arguments to ``ufunc.method``. - - Returns - ------- - ufunc_result : `Tensor`, `numpy.ndarray` or tuple - Result of the ufunc evaluation. If no ``out`` keyword argument - was given, the result is a `Tensor` or a tuple - of such, depending on the number of outputs of ``ufunc``. - If ``out`` was provided, the returned object or tuple entries - refer(s) to ``out``. - - References - ---------- - .. _corresponding NEP: - https://docs.scipy.org/doc/numpy/neps/ufunc-overrides.html - - .. _interface documentation: - https://docs.scipy.org/doc/numpy/reference/arrays.classes.html\ - #numpy.class.__array_ufunc__ - - .. _general documentation on Numpy ufuncs: - https://docs.scipy.org/doc/numpy/reference/ufuncs.html - - .. _reduceat documentation: - https://docs.scipy.org/doc/numpy/reference/generated/\ - numpy.ufunc.reduceat.html - """ - # --- Process `out` --- # - - # Unwrap out if provided. The output parameters are all wrapped - # in one tuple, even if there is only one. - out_tuple = kwargs.pop('out', ()) - - # Check number of `out` args, depending on `method` - if method == '__call__' and len(out_tuple) not in (0, ufunc.nout): - raise ValueError( - "ufunc {}: need 0 or {} `out` arguments for " - "`method='__call__'`, got {}" - ''.format(ufunc.__name__, ufunc.nout, len(out_tuple))) - elif method != '__call__' and len(out_tuple) not in (0, 1): - raise ValueError( - 'ufunc {}: need 0 or 1 `out` arguments for `method={!r}`, ' - 'got {}'.format(ufunc.__name__, method, len(out_tuple))) - - # We allow our own tensors, the data container type and - # `numpy.ndarray` objects as `out` (see docs for reason for the - # latter) - valid_types = (type(self), type(self.data), np.ndarray) - if not all(isinstance(o, valid_types) or o is None - for o in out_tuple): - return NotImplemented - - # Assign to `out` or `out1` and `out2`, respectively - out = out1 = out2 = None - if len(out_tuple) == 1: - out = out_tuple[0] - elif len(out_tuple) == 2: - out1 = out_tuple[0] - out2 = out_tuple[1] - - # --- Process `inputs` --- # - - # Convert inputs that are ODL tensors or their data containers to - # Numpy arrays so that the native Numpy ufunc is called later - inputs = tuple( - np.asarray(inp) if isinstance(inp, (type(self), type(self.data))) - else inp - for inp in inputs) - - # --- Get some parameters for later --- # - - # Arguments for `writable_array` and/or space constructors - out_dtype = kwargs.get('dtype', None) - if out_dtype is None: - array_kwargs = {} - else: - array_kwargs = {'dtype': out_dtype} - - # --- Evaluate ufunc --- # - - if method == '__call__': - if ufunc.nout == 1: - # Make context for output (trivial one returns `None`) - if out is None: - out_ctx = nullcontext() - else: - out_ctx = writable_array(out, **array_kwargs) - - # Evaluate ufunc - with out_ctx as out_arr: - kwargs['out'] = out_arr - res = ufunc(*inputs, **kwargs) - - # Return result (may be a raw array or a space element) - return res - - elif ufunc.nout == 2: - # Make contexts for outputs (trivial ones return `None`) - if out1 is not None: - out1_ctx = writable_array(out1, **array_kwargs) - else: - out1_ctx = nullcontext() - if out2 is not None: - out2_ctx = writable_array(out2, **array_kwargs) - else: - out2_ctx = nullcontext() - - # Evaluate ufunc - with out1_ctx as out1_arr, out2_ctx as out2_arr: - kwargs['out'] = (out1_arr, out2_arr) - res1, res2 = ufunc(*inputs, **kwargs) - - # Return results (may be raw arrays or space elements) - return res1, res2 - - else: - raise NotImplementedError('nout = {} not supported' - ''.format(ufunc.nout)) - - else: # method != '__call__' - # Make context for output (trivial one returns `None`) - if out is None: - out_ctx = nullcontext() - else: - out_ctx = writable_array(out, **array_kwargs) - - # Evaluate ufunc method - if method == 'at': - with writable_array(inputs[0]) as inp_arr: - res = ufunc.at(inp_arr, *inputs[1:], **kwargs) - else: - with out_ctx as out_arr: - kwargs['out'] = out_arr - res = getattr(ufunc, method)(*inputs, **kwargs) - - # Return result (may be scalar, raw array or space element) - return res - - def __bool__(self): - """Return ``bool(self)``.""" - if self.size > 1: - raise ValueError('The truth value of an array with more than one ' - 'element is ambiguous. ' - 'Use np.any(a) or np.all(a)') - else: - return bool(self.asarray()) - - def __getitem__(self, indices): - """Return ``self[indices]``. - - This method should be overridden by subclasses. - - Parameters - ---------- - indices : index expression - Integer, slice or sequence of these, defining the positions - of the data array which should be accessed. - - Returns - ------- - values : `TensorSpace.dtype` or `Tensor` - The value(s) at the given indices. Note that depending on - the implementation, the returned object may be a (writable) - view into the original array. - """ - raise NotImplementedError('abstract method') - - def __len__(self): - """Return ``len(self)``. - - The length is equal to the number of entries along axis 0. - """ - return len(self.space) - - def __repr__(self): - """Return ``repr(self)``.""" - maxsize_full_print = 2 * np.get_printoptions()['edgeitems'] - self_str = array_str(self, nprint=maxsize_full_print) - if self.ndim == 1 and self.size <= maxsize_full_print: - return '{!r}.element({})'.format(self.space, self_str) - else: - return '{!r}.element(\n{}\n)'.format(self.space, indent(self_str)) - - def __setitem__(self, indices, values): - """Implement ``self[indices] = values``. - - This method should be overridden by subclasses. - - Parameters - ---------- - indices : index expression - Integer, slice or sequence of these, defining the positions - of the data array which should be written to. - values : scalar, `array-like` or `Tensor` - The value(s) that are to be assigned. - - If ``index`` is an integer, ``value`` must be a scalar. - - If ``index`` is a slice or a sequence of slices, ``value`` - must be broadcastable to the shape of the slice. - """ - raise NotImplementedError('abstract method') - - def __str__(self): - """Return ``str(self)``.""" - return array_str(self) - - ######### private methods ######### - - -if __name__ == '__main__': - from odl.util.testutils import run_doctests - run_doctests() diff --git a/odl/space/npy_tensors.py b/odl/space/npy_tensors.py deleted file mode 100644 index c3b7ba31197..00000000000 --- a/odl/space/npy_tensors.py +++ /dev/null @@ -1,2403 +0,0 @@ -# Copyright 2014-2020 The ODL contributors -# -# This file is part of ODL. -# -# This Source Code Form is subject to the terms of the Mozilla Public License, -# v. 2.0. If a copy of the MPL was not distributed with this file, You can -# obtain one at https://mozilla.org/MPL/2.0/. - -"""NumPy implementation of tensor spaces.""" - -from __future__ import absolute_import, division, print_function -from future.utils import native - -import ctypes -from builtins import object -from functools import partial - -import numpy as np - -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY - -from odl.set.sets import ComplexNumbers, RealNumbers -from odl.set.space import (LinearSpaceTypeError, - SupportedNumOperationParadigms, NumOperationParadigmSupport) -from odl.space.base_tensors import Tensor, TensorSpace -from odl.space.weighting import ( - ArrayWeighting, ConstWeighting, CustomDist, CustomInner, CustomNorm, - Weighting) -from odl.util import ( - dtype_str, is_floating_dtype, is_numeric_dtype, is_real_dtype, nullcontext, - signature_string, writable_array) - -__all__ = ('NumpyTensorSpace',) - - -_BLAS_DTYPES = (np.dtype('float32'), np.dtype('float64'), - np.dtype('complex64'), np.dtype('complex128')) - -# Define size thresholds to switch implementations -THRESHOLD_SMALL = 100 -THRESHOLD_MEDIUM = 50000 - - -class NumpyTensorSpace(TensorSpace): - - """Set of tensors of arbitrary data type, implemented with NumPy. - - A tensor is, in the most general sense, a multi-dimensional array - that allows operations per entry (keep the rank constant), - reductions / contractions (reduce the rank) and broadcasting - (raises the rank). - For non-numeric data type like ``object``, the range of valid - operations is rather limited since such a set of tensors does not - define a vector space. - Any numeric data type, on the other hand, is considered valid for - a tensor space, although certain operations - like division with - integer dtype - are not guaranteed to yield reasonable results. - - Under these restrictions, all basic vector space operations are - supported by this class, along with reductions based on arithmetic - or comparison, and element-wise mathematical functions ("ufuncs"). - - This class is implemented using `numpy.ndarray`'s as back-end. - - See the `Wikipedia article on tensors`_ for further details. - See also [Hac2012] "Part I Algebraic Tensors" for a rigorous - treatment of tensors with a definition close to this one. - - Note also that this notion of tensors is the same as in popular - Deep Learning frameworks. - - References - ---------- - [Hac2012] Hackbusch, W. *Tensor Spaces and Numerical Tensor Calculus*. - Springer, 2012. - - .. _Wikipedia article on tensors: https://en.wikipedia.org/wiki/Tensor - """ - - def __init__(self, shape, dtype=None, **kwargs): - r"""Initialize a new instance. - - Parameters - ---------- - shape : positive int or sequence of positive ints - Number of entries per axis for elements in this space. A - single integer results in a space with rank 1, i.e., 1 axis. - dtype : - Data type of each element. Can be provided in any - way the `numpy.dtype` function understands, e.g. - as built-in type or as a string. For ``None``, - the `default_dtype` of this space (``float64``) is used. - exponent : positive float, optional - Exponent of the norm. For values other than 2.0, no - inner product is defined. - - This option has no impact if either ``dist``, ``norm`` or - ``inner`` is given, or if ``dtype`` is non-numeric. - - Default: 2.0 - - Other Parameters - ---------------- - weighting : optional - Use weighted inner product, norm, and dist. The following - types are supported as ``weighting``: - - ``None``: no weighting, i.e. weighting with ``1.0`` (default). - - `Weighting`: Use this weighting as-is. Compatibility - with this space's elements is not checked during init. - - ``float``: Weighting by a constant. - - array-like: Pointwise weighting by an array. - - This option cannot be combined with ``dist``, - ``norm`` or ``inner``. It also cannot be used in case of - non-numeric ``dtype``. - - dist : callable, optional - Distance function defining a metric on the space. - It must accept two `NumpyTensor` arguments and return - a non-negative real number. See ``Notes`` for - mathematical requirements. - - By default, ``dist(x, y)`` is calculated as ``norm(x - y)``. - - This option cannot be combined with ``weight``, - ``norm`` or ``inner``. It also cannot be used in case of - non-numeric ``dtype``. - - norm : callable, optional - The norm implementation. It must accept a - `NumpyTensor` argument, return a non-negative real number. - See ``Notes`` for mathematical requirements. - - By default, ``norm(x)`` is calculated as ``inner(x, x)``. - - This option cannot be combined with ``weight``, - ``dist`` or ``inner``. It also cannot be used in case of - non-numeric ``dtype``. - - inner : callable, optional - The inner product implementation. It must accept two - `NumpyTensor` arguments and return an element of the field - of the space (usually real or complex number). - See ``Notes`` for mathematical requirements. - - This option cannot be combined with ``weight``, - ``dist`` or ``norm``. It also cannot be used in case of - non-numeric ``dtype``. - - kwargs : - Further keyword arguments are passed to the weighting - classes. - - See Also - -------- - odl.space.space_utils.rn : constructor for real tensor spaces - odl.space.space_utils.cn : constructor for complex tensor spaces - odl.space.space_utils.tensor_space : - constructor for tensor spaces of arbitrary scalar data type - - Notes - ----- - - A distance function or metric on a space :math:`\mathcal{X}` - is a mapping - :math:`d:\mathcal{X} \times \mathcal{X} \to \mathbb{R}` - satisfying the following conditions for all space elements - :math:`x, y, z`: - - * :math:`d(x, y) \geq 0`, - * :math:`d(x, y) = 0 \Leftrightarrow x = y`, - * :math:`d(x, y) = d(y, x)`, - * :math:`d(x, y) \leq d(x, z) + d(z, y)`. - - - A norm on a space :math:`\mathcal{X}` is a mapping - :math:`\| \cdot \|:\mathcal{X} \to \mathbb{R}` - satisfying the following conditions for all - space elements :math:`x, y`: and scalars :math:`s`: - - * :math:`\| x\| \geq 0`, - * :math:`\| x\| = 0 \Leftrightarrow x = 0`, - * :math:`\| sx\| = |s| \cdot \| x \|`, - * :math:`\| x+y\| \leq \| x\| + - \| y\|`. - - - An inner product on a space :math:`\mathcal{X}` over a field - :math:`\mathbb{F} = \mathbb{R}` or :math:`\mathbb{C}` is a - mapping - :math:`\langle\cdot, \cdot\rangle: \mathcal{X} \times - \mathcal{X} \to \mathbb{F}` - satisfying the following conditions for all - space elements :math:`x, y, z`: and scalars :math:`s`: - - * :math:`\langle x, y\rangle = - \overline{\langle y, x\rangle}`, - * :math:`\langle sx + y, z\rangle = s \langle x, z\rangle + - \langle y, z\rangle`, - * :math:`\langle x, x\rangle = 0 \Leftrightarrow x = 0`. - - Examples - -------- - Explicit initialization with the class constructor: - - >>> space = NumpyTensorSpace(3, float) - >>> space - rn(3) - >>> space.shape - (3,) - >>> space.dtype - dtype('float64') - - A more convenient way is to use factory functions: - - >>> space = odl.rn(3, weighting=[1, 2, 3]) - >>> space - rn(3, weighting=[1, 2, 3]) - >>> space = odl.tensor_space((2, 3), dtype=int) - >>> space - tensor_space((2, 3), dtype=int) - """ - super(NumpyTensorSpace, self).__init__(shape, dtype) - if self.dtype.char not in self.available_dtypes(): - raise ValueError('`dtype` {!r} not supported' - ''.format(dtype_str(dtype))) - - dist = kwargs.pop('dist', None) - norm = kwargs.pop('norm', None) - inner = kwargs.pop('inner', None) - weighting = kwargs.pop('weighting', None) - exponent = kwargs.pop('exponent', getattr(weighting, 'exponent', 2.0)) - - if (not is_numeric_dtype(self.dtype) and - any(x is not None for x in (dist, norm, inner, weighting))): - raise ValueError('cannot use any of `weighting`, `dist`, `norm` ' - 'or `inner` for non-numeric `dtype` {}' - ''.format(dtype)) - if exponent != 2.0 and any(x is not None for x in (dist, norm, inner)): - raise ValueError('cannot use any of `dist`, `norm` or `inner` ' - 'for exponent != 2') - # Check validity of option combination (0 or 1 may be provided) - num_extra_args = sum(a is not None - for a in (dist, norm, inner, weighting)) - if num_extra_args > 1: - raise ValueError('invalid combination of options `weighting`, ' - '`dist`, `norm` and `inner`') - - # Set the weighting - if weighting is not None: - if isinstance(weighting, Weighting): - if weighting.impl != 'numpy': - raise ValueError("`weighting.impl` must be 'numpy', " - '`got {!r}'.format(weighting.impl)) - if weighting.exponent != exponent: - raise ValueError('`weighting.exponent` conflicts with ' - '`exponent`: {} != {}' - ''.format(weighting.exponent, exponent)) - self.__weighting = weighting - else: - self.__weighting = _weighting(weighting, exponent) - - # Check (afterwards) that the weighting input was sane - if isinstance(self.weighting, NumpyTensorSpaceArrayWeighting): - if self.weighting.array.dtype == object: - raise ValueError('invalid `weighting` argument: {}' - ''.format(weighting)) - elif not np.can_cast(self.weighting.array.dtype, self.dtype): - raise ValueError( - 'cannot cast from `weighting` data type {} to ' - 'the space `dtype` {}' - ''.format(dtype_str(self.weighting.array.dtype), - dtype_str(self.dtype))) - if self.weighting.array.shape != self.shape: - raise ValueError('array-like weights must have same ' - 'shape {} as this space, got {}' - ''.format(self.shape, - self.weighting.array.shape)) - - elif dist is not None: - self.__weighting = NumpyTensorSpaceCustomDist(dist) - elif norm is not None: - self.__weighting = NumpyTensorSpaceCustomNorm(norm) - elif inner is not None: - self.__weighting = NumpyTensorSpaceCustomInner(inner) - else: - # No weighting, i.e., weighting with constant 1.0 - self.__weighting = NumpyTensorSpaceConstWeighting(1.0, exponent) - - self.__use_in_place_ops = kwargs.pop('use_in_place_ops', True) - - # Make sure there are no leftover kwargs - if kwargs: - raise TypeError('got unknown keyword arguments {}'.format(kwargs)) - - ########## static methods ########## - @staticmethod - def available_dtypes(): - """Return the set of data types available in this implementation. - - Notes - ----- - This is all dtypes available in Numpy. See ``numpy.sctypeDict`` - for more information. - - The available dtypes may depend on the specific system used. - """ - all_dtypes = [] - for dtype in np.sctypeDict.values(): - if dtype not in (object, np.void): - all_dtypes.append(np.dtype(dtype)) - # Need to add these manually since they are not contained - # in np.sctypeDict. - all_dtypes.extend([np.dtype('S'), np.dtype('U')]) - return tuple(sorted(set(all_dtypes))) - - @staticmethod - def default_dtype(field=None): - """Return the default data type of this class for a given field. - - Parameters - ---------- - field : `Field`, optional - Set of numbers to be represented by a data type. - Currently supported : `RealNumbers`, `ComplexNumbers` - The default ``None`` means `RealNumbers` - - Returns - ------- - dtype : `numpy.dtype` - Numpy data type specifier. The returned defaults are: - - ``RealNumbers()`` : ``np.dtype('float64')`` - - ``ComplexNumbers()`` : ``np.dtype('complex128')`` - """ - if field is None or field == RealNumbers(): - return np.dtype('float64') - elif field == ComplexNumbers(): - return np.dtype('complex128') - else: - raise ValueError('no default data type defined for field {}' - ''.format(field)) - - ########## Attributes ########## - @property - def byaxis(self): - """Return the subspace defined along one or several dimensions. - - Examples - -------- - Indexing with integers or slices: - - >>> space = odl.rn((2, 3, 4)) - >>> space.byaxis[0] - rn(2) - >>> space.byaxis[1:] - rn((3, 4)) - - Lists can be used to stack spaces arbitrarily: - - >>> space.byaxis[[2, 1, 2]] - rn((4, 3, 4)) - """ - space = self - - class NpyTensorSpacebyaxis(object): - - """Helper class for indexing by axis.""" - - def __getitem__(self, indices): - """Return ``self[indices]``.""" - try: - iter(indices) - except TypeError: - newshape = space.shape[indices] - else: - newshape = tuple(space.shape[i] for i in indices) - - if isinstance(space.weighting, ArrayWeighting): - new_array = np.asarray(space.weighting.array[indices]) - weighting = NumpyTensorSpaceArrayWeighting( - new_array, space.weighting.exponent) - else: - weighting = space.weighting - - return type(space)(newshape, space.dtype, weighting=weighting) - - def __repr__(self): - """Return ``repr(self)``.""" - return repr(space) + '.byaxis' - - return NpyTensorSpacebyaxis() - - @property - def default_order(self): - """Default storage order for new elements in this space: ``'C'``.""" - return 'C' - - @property - def element_type(self): - """Type of elements in this space: `NumpyTensor`.""" - return NumpyTensor - - @property - def exponent(self): - """Exponent of the norm and the distance.""" - return self.weighting.exponent - - @property - def impl(self): - """Name of the implementation back-end: ``'numpy'``.""" - return 'numpy' - - @property - def is_weighted(self): - """Return ``True`` if the space is not weighted by constant 1.0.""" - return not ( - isinstance(self.weighting, NumpyTensorSpaceConstWeighting) and - self.weighting.const == 1.0) - - @property - def supported_num_operation_paradigms(self) -> NumOperationParadigmSupport: - """NumPy has full support for in-place operation, which is usually - advantageous to reduce memory allocations. - This can be deactivated, mostly for testing purposes, by setting - `use_in_place_ops = False` when constructing the space.""" - if self.__use_in_place_ops: - return SupportedNumOperationParadigms( - in_place = NumOperationParadigmSupport.PREFERRED, - out_of_place = NumOperationParadigmSupport.SUPPORTED) - else: - return SupportedNumOperationParadigms( - in_place = NumOperationParadigmSupport.NOT_SUPPORTED, - out_of_place = NumOperationParadigmSupport.PREFERRED) - - @property - def weighting(self): - """This space's weighting scheme.""" - return self.__weighting - - ######### public methods ######### - def element(self, inp=None, data_ptr=None, order=None): - """Create a new element. - - Parameters - ---------- - inp : `array-like`, optional - Input used to initialize the new element. - - If ``inp`` is `None`, an empty element is created with no - guarantee of its state (memory allocation only). - The new element will use ``order`` as storage order if - provided, otherwise `default_order`. - - Otherwise, a copy is avoided whenever possible. This requires - correct `shape` and `dtype`, and if ``order`` is provided, - also contiguousness in that ordering. If any of these - conditions is not met, a copy is made. - - data_ptr : int, optional - Pointer to the start memory address of a contiguous Numpy array - or an equivalent raw container with the same total number of - bytes. For this option, ``order`` must be either ``'C'`` or - ``'F'``. - The option is also mutually exclusive with ``inp``. - order : {None, 'C', 'F'}, optional - Storage order of the returned element. For ``'C'`` and ``'F'``, - contiguous memory in the respective ordering is enforced. - The default ``None`` enforces no contiguousness. - - Returns - ------- - element : `NumpyTensor` - The new element, created from ``inp`` or from scratch. - - Examples - -------- - Without arguments, an uninitialized element is created. With an - array-like input, the element can be initialized: - - >>> space = odl.rn(3) - >>> empty = space.element() - >>> empty.shape - (3,) - >>> empty.space - rn(3) - >>> x = space.element([1, 2, 3]) - >>> x - rn(3).element([ 1., 2., 3.]) - - If the input already is a `numpy.ndarray` of correct `dtype`, it - will merely be wrapped, i.e., both array and space element access - the same memory, such that mutations will affect both: - - >>> arr = np.array([1, 2, 3], dtype=float) - >>> elem = odl.rn(3).element(arr) - >>> elem[0] = 0 - >>> elem - rn(3).element([ 0., 2., 3.]) - >>> arr - array([ 0., 2., 3.]) - - Elements can also be constructed from a data pointer, resulting - again in shared memory: - - >>> int_space = odl.tensor_space((2, 3), dtype=int) - >>> arr = np.array([[1, 2, 3], - ... [4, 5, 6]], dtype=int, order='F') - >>> ptr = arr.ctypes.data - >>> y = int_space.element(data_ptr=ptr, order='F') - >>> y - tensor_space((2, 3), dtype=int).element( - [[1, 2, 3], - [4, 5, 6]] - ) - >>> y[0, 1] = -1 - >>> arr - array([[ 1, -1, 3], - [ 4, 5, 6]]) - """ - if order is not None and str(order).upper() not in ('C', 'F'): - raise ValueError("`order` {!r} not understood".format(order)) - - if inp is None and data_ptr is None: - if order is None: - arr = np.empty(self.shape, dtype=self.dtype, - order=self.default_order) - else: - arr = np.empty(self.shape, dtype=self.dtype, order=order) - - return self.element_type(self, arr) - - elif inp is None and data_ptr is not None: - if order is None: - raise ValueError('`order` cannot be None for element ' - 'creation from pointer') - - ctype_array_def = ctypes.c_byte * self.nbytes - as_ctype_array = ctype_array_def.from_address(data_ptr) - as_numpy_array = np.ctypeslib.as_array(as_ctype_array) - arr = as_numpy_array.view(dtype=self.dtype) - arr = arr.reshape(self.shape, order=order) - return self.element_type(self, arr) - - elif inp is not None and data_ptr is None: - if inp in self and order is None: - # Short-circuit for space elements and no enforced ordering - return inp - - # Try to not copy but require dtype and order if given - # (`order=None` is ok as np.array argument) - arr = np.array(inp, copy=AVOID_UNNECESSARY_COPY, dtype=self.dtype, ndmin=self.ndim, - order=order) - # Make sure the result is writeable, if not make copy. - # This happens for e.g. results of `np.broadcast_to()`. - if not arr.flags.writeable: - arr = arr.copy() - if arr.shape != self.shape: - raise ValueError('shape of `inp` not equal to space shape: ' - '{} != {}'.format(arr.shape, self.shape)) - return self.element_type(self, arr) - - else: - raise TypeError('cannot provide both `inp` and `data_ptr`') - - def one(self): - """Return a tensor of all ones. - - Examples - -------- - >>> space = odl.rn(3) - >>> x = space.one() - >>> x - rn(3).element([ 1., 1., 1.]) - """ - return self.element(np.ones(self.shape, dtype=self.dtype, - order=self.default_order)) - - def zero(self): - """Return a tensor of all zeros. - - Examples - -------- - >>> space = odl.rn(3) - >>> x = space.zero() - >>> x - rn(3).element([ 0., 0., 0.]) - """ - return self.element(np.zeros(self.shape, dtype=self.dtype, - order=self.default_order)) - - ######### magic methods ######### - def __eq__(self, other): - """Return ``self == other``. - - Returns - ------- - equals : bool - True if ``other`` is an instance of ``type(self)`` - with the same `NumpyTensorSpace.shape`, `NumpyTensorSpace.dtype` - and `NumpyTensorSpace.weighting`, otherwise False. - - Examples - -------- - >>> space = odl.rn(3) - >>> same_space = odl.rn(3, exponent=2) - >>> same_space == space - True - - Different `shape`, `exponent` or `dtype` all result in different - spaces: - - >>> diff_space = odl.rn((3, 4)) - >>> diff_space == space - False - >>> diff_space = odl.rn(3, exponent=1) - >>> diff_space == space - False - >>> diff_space = odl.rn(3, dtype='float32') - >>> diff_space == space - False - >>> space == object - False - """ - if other is self: - return True - - return (super(NumpyTensorSpace, self).__eq__(other) and - self.weighting == other.weighting) - - def __hash__(self): - """Return ``hash(self)``.""" - return hash((super(NumpyTensorSpace, self).__hash__(), - self.weighting)) - - ######### private methods ######### - def _dist(self, x1, x2): - """Return the distance between ``x1`` and ``x2``. - - This function is part of the subclassing API. Do not - call it directly. - - Parameters - ---------- - x1, x2 : `NumpyTensor` - Elements whose mutual distance is calculated. - - Returns - ------- - dist : `float` - Distance between the elements. - - Examples - -------- - Different exponents result in difference metrics: - - >>> space_2 = odl.rn(3, exponent=2) - >>> x = space_2.element([-1, -1, 2]) - >>> y = space_2.one() - >>> space_2.dist(x, y) - 3.0 - - >>> space_1 = odl.rn(3, exponent=1) - >>> x = space_1.element([-1, -1, 2]) - >>> y = space_1.one() - >>> space_1.dist(x, y) - 5.0 - - Weighting is supported, too: - - >>> space_1_w = odl.rn(3, exponent=1, weighting=[2, 1, 1]) - >>> x = space_1_w.element([-1, -1, 2]) - >>> y = space_1_w.one() - >>> space_1_w.dist(x, y) - 7.0 - """ - return self.weighting.dist(x1, x2) - - def _divide(self, x1, x2, out): - """Compute the entry-wise quotient ``x1 / x2``. - - This function is part of the subclassing API. Do not - call it directly. - - Parameters - ---------- - x1, x2 : `NumpyTensor` - Dividend and divisor in the quotient. - out : `NumpyTensor` - Element to which the result is written. - - Examples - -------- - >>> space = odl.rn(3) - >>> x = space.element([2, 0, 4]) - >>> y = space.element([1, 1, 2]) - >>> space.divide(x, y) - rn(3).element([ 2., 0., 2.]) - >>> out = space.element() - >>> result = space.divide(x, y, out=out) - >>> result - rn(3).element([ 2., 0., 2.]) - >>> result is out - True - """ - if out is None: - return np.divide(x1.data, x2.data) - else: - np.divide(x1.data, x2.data, out=out.data) - - def _inner(self, x1, x2): - """Return the inner product of ``x1`` and ``x2``. - - This function is part of the subclassing API. Do not - call it directly. - - Parameters - ---------- - x1, x2 : `NumpyTensor` - Elements whose inner product is calculated. - - Returns - ------- - inner : `field` `element` - Inner product of the elements. - - Examples - -------- - >>> space = odl.rn(3) - >>> x = space.element([1, 0, 3]) - >>> y = space.one() - >>> space.inner(x, y) - 4.0 - - Weighting is supported, too: - - >>> space_w = odl.rn(3, weighting=[2, 1, 1]) - >>> x = space_w.element([1, 0, 3]) - >>> y = space_w.one() - >>> space_w.inner(x, y) - 5.0 - """ - return self.weighting.inner(x1, x2) - - def _lincomb(self, a, x1, b, x2, out): - """Implement the linear combination of ``x1`` and ``x2``. - - Compute ``out = a*x1 + b*x2`` using optimized - BLAS routines if possible. - - This function is part of the subclassing API. Do not - call it directly. - - Parameters - ---------- - a, b : `TensorSpace.field` element - Scalars to multiply ``x1`` and ``x2`` with. - x1, x2 : `NumpyTensor` - Summands in the linear combination. - out : `NumpyTensor` - Tensor to which the result is written. - - Examples - -------- - >>> space = odl.rn(3) - >>> x = space.element([0, 1, 1]) - >>> y = space.element([0, 0, 1]) - >>> out = space.element() - >>> result = space.lincomb(1, x, 2, y, out) - >>> result - rn(3).element([ 0., 1., 3.]) - >>> result is out - True - """ - if self.__use_in_place_ops: - assert(out is not None) - _lincomb_impl(a, x1, b, x2, out) - else: - assert(out is None) - return self.element(a * x1.data + b * x2.data) - - def _multiply(self, x1, x2, out): - """Compute the entry-wise product ``out = x1 * x2``. - - This function is part of the subclassing API. Do not - call it directly. - - Parameters - ---------- - x1, x2 : `NumpyTensor` - Factors in the product. - out : `NumpyTensor` - Element to which the result is written. - - Examples - -------- - >>> space = odl.rn(3) - >>> x = space.element([1, 0, 3]) - >>> y = space.element([-1, 1, -1]) - >>> space.multiply(x, y) - rn(3).element([-1., 0., -3.]) - >>> out = space.element() - >>> result = space.multiply(x, y, out=out) - >>> result - rn(3).element([-1., 0., -3.]) - >>> result is out - True - """ - if out is None: - return np.multiply(x1.data, x2.data) - else: - np.multiply(x1.data, x2.data, out=out.data) - - def _norm(self, x): - """Return the norm of ``x``. - - This function is part of the subclassing API. Do not - call it directly. - - Parameters - ---------- - x : `NumpyTensor` - Element whose norm is calculated. - - Returns - ------- - norm : `float` - Norm of the element. - - Examples - -------- - Different exponents result in difference norms: - - >>> space_2 = odl.rn(3, exponent=2) - >>> x = space_2.element([3, 0, 4]) - >>> space_2.norm(x) - 5.0 - >>> space_1 = odl.rn(3, exponent=1) - >>> x = space_1.element([3, 0, 4]) - >>> space_1.norm(x) - 7.0 - - Weighting is supported, too: - - >>> space_1_w = odl.rn(3, exponent=1, weighting=[2, 1, 1]) - >>> x = space_1_w.element([3, 0, 4]) - >>> space_1_w.norm(x) - 10.0 - """ - return self.weighting.norm(x) - - def __repr__(self): - """Return ``repr(self)``.""" - if self.ndim == 1: - posargs = [self.size] - else: - posargs = [self.shape] - - if self.is_real: - ctor_name = 'rn' - elif self.is_complex: - ctor_name = 'cn' - else: - ctor_name = 'tensor_space' - - if (ctor_name == 'tensor_space' or - not is_numeric_dtype(self.dtype) or - self.dtype != self.default_dtype(self.field)): - optargs = [('dtype', dtype_str(self.dtype), '')] - if self.dtype in (float, complex, int, bool): - optmod = '!s' - else: - optmod = '' - else: - optargs = [] - optmod = '' - - inner_str = signature_string(posargs, optargs, mod=['', optmod]) - weight_str = self.weighting.repr_part - if weight_str: - inner_str += ', ' + weight_str - - return '{}({})'.format(ctor_name, inner_str) - -class NumpyTensor(Tensor): - - """Representation of a `NumpyTensorSpace` element.""" - - def __init__(self, space, data): - """Initialize a new instance.""" - Tensor.__init__(self, space) - self.__data = data - - ######### static methods ######### - - ######### Attributes ######### - @property - def data(self): - """The `numpy.ndarray` representing the data of ``self``.""" - return self.__data - - @property - def data_ptr(self): - """A raw pointer to the data container of ``self``. - - Examples - -------- - >>> import ctypes - >>> space = odl.tensor_space(3, dtype='uint16') - >>> x = space.element([1, 2, 3]) - >>> arr_type = ctypes.c_uint16 * 3 # C type "array of 3 uint16" - >>> buffer = arr_type.from_address(x.data_ptr) - >>> arr = np.frombuffer(buffer, dtype='uint16') - >>> arr - array([1, 2, 3], dtype=uint16) - - In-place modification via pointer: - - >>> arr[0] = 42 - >>> x - tensor_space(3, dtype='uint16').element([42, 2, 3]) - """ - return self.data.ctypes.data - - @property - def imag(self): - """Imaginary part of ``self``. - - Returns - ------- - imag : `NumpyTensor` - Imaginary part this element as an element of a - `NumpyTensorSpace` with real data type. - - Examples - -------- - Get the imaginary part: - - >>> space = odl.cn(3) - >>> x = space.element([1 + 1j, 2, 3 - 3j]) - >>> x.imag - rn(3).element([ 1., 0., -3.]) - - Set the imaginary part: - - >>> space = odl.cn(3) - >>> x = space.element([1 + 1j, 2, 3 - 3j]) - >>> zero = odl.rn(3).zero() - >>> x.imag = zero - >>> x - cn(3).element([ 1.+0.j, 2.+0.j, 3.+0.j]) - - Other array-like types and broadcasting: - - >>> x.imag = 1.0 - >>> x - cn(3).element([ 1.+1.j, 2.+1.j, 3.+1.j]) - >>> x.imag = [2, 3, 4] - >>> x - cn(3).element([ 1.+2.j, 2.+3.j, 3.+4.j]) - """ - if self.space.is_real: - return self.space.zero() - elif self.space.is_complex: - real_space = self.space.astype(self.space.real_dtype) - return real_space.element(self.data.imag) - else: - raise NotImplementedError('`imag` not defined for non-numeric ' - 'dtype {}'.format(self.dtype)) - - @property - def real(self): - """Real part of ``self``. - - Returns - ------- - real : `NumpyTensor` - Real part of this element as a member of a - `NumpyTensorSpace` with corresponding real data type. - - Examples - -------- - Get the real part: - - >>> space = odl.cn(3) - >>> x = space.element([1 + 1j, 2, 3 - 3j]) - >>> x.real - rn(3).element([ 1., 2., 3.]) - - Set the real part: - - >>> space = odl.cn(3) - >>> x = space.element([1 + 1j, 2, 3 - 3j]) - >>> zero = odl.rn(3).zero() - >>> x.real = zero - >>> x - cn(3).element([ 0.+1.j, 0.+0.j, 0.-3.j]) - - Other array-like types and broadcasting: - - >>> x.real = 1.0 - >>> x - cn(3).element([ 1.+1.j, 1.+0.j, 1.-3.j]) - >>> x.real = [2, 3, 4] - >>> x - cn(3).element([ 2.+1.j, 3.+0.j, 4.-3.j]) - """ - if self.space.is_real: - return self - elif self.space.is_complex: - real_space = self.space.astype(self.space.real_dtype) - return real_space.element(self.data.real) - else: - raise NotImplementedError('`real` not defined for non-numeric ' - 'dtype {}'.format(self.dtype)) - - - ######### Public methods ######### - def asarray(self, out=None): - """Extract the data of this array as a ``numpy.ndarray``. - - This method is invoked when calling `numpy.asarray` on this - tensor. - - Parameters - ---------- - out : `numpy.ndarray`, optional - Array in which the result should be written in-place. - Has to be contiguous and of the correct dtype. - - Returns - ------- - asarray : `numpy.ndarray` - Numpy array with the same data type as ``self``. If - ``out`` was given, the returned object is a reference - to it. - - Examples - -------- - >>> space = odl.rn(3, dtype='float32') - >>> x = space.element([1, 2, 3]) - >>> x.asarray() - array([ 1., 2., 3.], dtype=float32) - >>> np.asarray(x) is x.asarray() - True - >>> out = np.empty(3, dtype='float32') - >>> result = x.asarray(out=out) - >>> out - array([ 1., 2., 3.], dtype=float32) - >>> result is out - True - >>> space = odl.rn((2, 3)) - >>> space.one().asarray() - array([[ 1., 1., 1.], - [ 1., 1., 1.]]) - """ - if out is None: - return self.data - else: - out[:] = self.data - return out - - def astype(self, dtype): - """Return a copy of this element with new ``dtype``. - - Parameters - ---------- - dtype : - Scalar data type of the returned space. Can be provided - in any way the `numpy.dtype` constructor understands, e.g. - as built-in type or as a string. Data types with non-trivial - shapes are not allowed. - - Returns - ------- - newelem : `NumpyTensor` - Version of this element with given data type. - """ - return self.space.astype(dtype).element(self.data.astype(dtype)) - - def conj(self, out=None): - """Return the complex conjugate of ``self``. - - Parameters - ---------- - out : `NumpyTensor`, optional - Element to which the complex conjugate is written. - Must be an element of ``self.space``. - - Returns - ------- - out : `NumpyTensor` - The complex conjugate element. If ``out`` was provided, - the returned object is a reference to it. - - Examples - -------- - >>> space = odl.cn(3) - >>> x = space.element([1 + 1j, 2, 3 - 3j]) - >>> x.conj() - cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j]) - >>> out = space.element() - >>> result = x.conj(out=out) - >>> result - cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j]) - >>> result is out - True - - In-place conjugation: - - >>> result = x.conj(out=x) - >>> x - cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j]) - >>> result is x - True - """ - if self.space.is_real: - if out is None: - return self - else: - out[:] = self - return out - - if not is_numeric_dtype(self.space.dtype): - raise NotImplementedError('`conj` not defined for non-numeric ' - 'dtype {}'.format(self.dtype)) - - if out is None: - return self.space.element(self.data.conj()) - else: - if out not in self.space: - raise LinearSpaceTypeError('`out` {!r} not in space {!r}' - ''.format(out, self.space)) - self.data.conj(out.data) - return out - - def copy(self): - """Return an identical (deep) copy of this tensor. - - Parameters - ---------- - None - - Returns - ------- - copy : `NumpyTensor` - The deep copy - - Examples - -------- - >>> space = odl.rn(3) - >>> x = space.element([1, 2, 3]) - >>> y = x.copy() - >>> y == x - True - >>> y is x - False - """ - return self.space.element(self.data.copy()) - - @imag.setter - def imag(self, newimag): - """Setter for the imaginary part. - - This method is invoked by ``x.imag = other``. - - Parameters - ---------- - newimag : array-like or scalar - Values to be assigned to the imaginary part of this element. - - Raises - ------ - ValueError - If the space is real, i.e., no imagninary part can be set. - """ - if self.space.is_real: - raise ValueError('cannot set imaginary part in real spaces') - self.imag.data[:] = newimag - - @real.setter - def real(self, newreal): - """Setter for the real part. - - This method is invoked by ``x.real = other``. - - Parameters - ---------- - newreal : array-like or scalar - Values to be assigned to the real part of this element. - """ - self.real.data[:] = newreal - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - """Interface to Numpy's ufunc machinery. - - This method is called by Numpy version 1.13 and higher as a single - point for the ufunc dispatch logic. An object implementing - ``__array_ufunc__`` takes over control when a `numpy.ufunc` is - called on it, allowing it to use custom implementations and - output types. - - This includes handling of in-place arithmetic like - ``npy_array += custom_obj``. In this case, the custom object's - ``__array_ufunc__`` takes precedence over the baseline - `numpy.ndarray` implementation. It will be called with - ``npy_array`` as ``out`` argument, which ensures that the - returned object is a Numpy array. For this to work properly, - ``__array_ufunc__`` has to accept Numpy arrays as ``out`` arguments. - - See the `corresponding NEP`_ and the `interface documentation`_ - for further details. See also the `general documentation on - Numpy ufuncs`_. - - .. note:: - This basic implementation casts inputs and - outputs to Numpy arrays and evaluates ``ufunc`` on those. - For `numpy.ndarray` based data storage, this incurs no - significant overhead compared to direct usage of Numpy arrays. - - For other (in particular non-local) implementations, e.g., - GPU arrays or distributed memory, overhead is significant due - to copies to CPU main memory. In those classes, the - ``__array_ufunc__`` mechanism should be overridden to use - native implementations if possible. - - .. note:: - When using operations that alter the shape (like ``reduce``), - or the data type (can be any of the methods), - the resulting array is wrapped in a space of the same - type as ``self.space``, propagating space properties like - `exponent` or `weighting` as closely as possible. - - Parameters - ---------- - ufunc : `numpy.ufunc` - Ufunc that should be called on ``self``. - method : str - Method on ``ufunc`` that should be called on ``self``. - Possible values: - - ``'__call__'``, ``'accumulate'``, ``'at'``, ``'outer'``, - ``'reduce'``, ``'reduceat'`` - - input1, ..., inputN : - Positional arguments to ``ufunc.method``. - kwargs : - Keyword arguments to ``ufunc.method``. - - Returns - ------- - ufunc_result : `Tensor`, `numpy.ndarray` or tuple - Result of the ufunc evaluation. If no ``out`` keyword argument - was given, the result is a `Tensor` or a tuple - of such, depending on the number of outputs of ``ufunc``. - If ``out`` was provided, the returned object or tuple entries - refer(s) to ``out``. - - Examples - -------- - We apply `numpy.add` to ODL tensors: - - >>> r3 = odl.rn(3) - >>> x = r3.element([1, 2, 3]) - >>> y = r3.element([-1, -2, -3]) - >>> x.__array_ufunc__(np.add, '__call__', x, y) - rn(3).element([ 0., 0., 0.]) - >>> np.add(x, y) # same mechanism for Numpy >= 1.13 - rn(3).element([ 0., 0., 0.]) - - As ``out``, a Numpy array or an ODL tensor can be given (wrapped - in a sequence): - - >>> out = r3.element() - >>> res = x.__array_ufunc__(np.add, '__call__', x, y, out=(out,)) - >>> out - rn(3).element([ 0., 0., 0.]) - >>> res is out - True - >>> out_arr = np.empty(3) - >>> res = x.__array_ufunc__(np.add, '__call__', x, y, out=(out_arr,)) - >>> out_arr - array([ 0., 0., 0.]) - >>> res is out_arr - True - - With multiple dimensions: - - >>> r23 = odl.rn((2, 3)) - >>> x = y = r23.one() - >>> x.__array_ufunc__(np.add, '__call__', x, y) - rn((2, 3)).element( - [[ 2., 2., 2.], - [ 2., 2., 2.]] - ) - - The ``ufunc.accumulate`` method retains the original `shape` and - `dtype`. The latter can be changed with the ``dtype`` parameter: - - >>> x = r3.element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'accumulate', x) - rn(3).element([ 1., 3., 6.]) - >>> np.add.accumulate(x) # same mechanism for Numpy >= 1.13 - rn(3).element([ 1., 3., 6.]) - >>> x.__array_ufunc__(np.add, 'accumulate', x, dtype=complex) - cn(3).element([ 1.+0.j, 3.+0.j, 6.+0.j]) - - For multi-dimensional tensors, an optional ``axis`` parameter - can be provided: - - >>> z = r23.one() - >>> z.__array_ufunc__(np.add, 'accumulate', z, axis=1) - rn((2, 3)).element( - [[ 1., 2., 3.], - [ 1., 2., 3.]] - ) - - The ``ufunc.at`` method operates in-place. Here we add the second - operand ``[5, 10]`` to ``x`` at indices ``[0, 2]``: - - >>> x = r3.element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'at', x, [0, 2], [5, 10]) - >>> x - rn(3).element([ 6., 2., 13.]) - - For outer-product-type operations, i.e., operations where the result - shape is the sum of the individual shapes, the ``ufunc.outer`` - method can be used: - - >>> x = odl.rn(2).element([0, 3]) - >>> y = odl.rn(3).element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'outer', x, y) - rn((2, 3)).element( - [[ 1., 2., 3.], - [ 4., 5., 6.]] - ) - >>> y.__array_ufunc__(np.add, 'outer', y, x) - rn((3, 2)).element( - [[ 1., 4.], - [ 2., 5.], - [ 3., 6.]] - ) - - Using ``ufunc.reduce`` produces a scalar, which can be avoided with - ``keepdims=True``: - - >>> x = r3.element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'reduce', x) - 6.0 - >>> x.__array_ufunc__(np.add, 'reduce', x, keepdims=True) - rn(1).element([ 6.]) - - In multiple dimensions, ``axis`` can be provided for reduction over - selected axes: - - >>> z = r23.element([[1, 2, 3], - ... [4, 5, 6]]) - >>> z.__array_ufunc__(np.add, 'reduce', z, axis=1) - rn(2).element([ 6., 15.]) - - Finally, ``add.reduceat`` is a combination of ``reduce`` and - ``at`` with rather flexible and complex semantics (see the - `reduceat documentation`_ for details): - - >>> x = r3.element([1, 2, 3]) - >>> x.__array_ufunc__(np.add, 'reduceat', x, [0, 1]) - rn(2).element([ 1., 5.]) - - References - ---------- - .. _corresponding NEP: - https://docs.scipy.org/doc/numpy/neps/ufunc-overrides.html - - .. _interface documentation: - https://docs.scipy.org/doc/numpy/reference/arrays.classes.html\ -#numpy.class.__array_ufunc__ - - .. _general documentation on Numpy ufuncs: - https://docs.scipy.org/doc/numpy/reference/ufuncs.html - - .. _reduceat documentation: - https://docs.scipy.org/doc/numpy/reference/generated/\ -numpy.ufunc.reduceat.html - """ - # Remark: this method differs from the parent implementation only - # in the propagation of additional space properties. - - # --- Process `out` --- # - - # Unwrap out if provided. The output parameters are all wrapped - # in one tuple, even if there is only one. - out_tuple = kwargs.pop('out', ()) - - # Check number of `out` args, depending on `method` - if method == '__call__' and len(out_tuple) not in (0, ufunc.nout): - raise ValueError( - "ufunc {}: need 0 or {} `out` arguments for " - "`method='__call__'`, got {}" - ''.format(ufunc.__name__, ufunc.nout, len(out_tuple))) - elif method != '__call__' and len(out_tuple) not in (0, 1): - raise ValueError( - 'ufunc {}: need 0 or 1 `out` arguments for `method={!r}`, ' - 'got {}'.format(ufunc.__name__, method, len(out_tuple))) - - # We allow our own tensors, the data container type and - # `numpy.ndarray` objects as `out` (see docs for reason for the - # latter) - valid_types = (type(self), type(self.data), np.ndarray) - if not all(isinstance(o, valid_types) or o is None - for o in out_tuple): - return NotImplemented - - # Assign to `out` or `out1` and `out2`, respectively - out = out1 = out2 = None - if len(out_tuple) == 1: - out = out_tuple[0] - elif len(out_tuple) == 2: - out1 = out_tuple[0] - out2 = out_tuple[1] - - # --- Process `inputs` --- # - - # Convert inputs that are ODL tensors to Numpy arrays so that the - # native Numpy ufunc is called later - inputs = tuple( - inp.asarray() if isinstance(inp, type(self)) else inp - for inp in inputs) - - # --- Get some parameters for later --- # - - # Arguments for `writable_array` and/or space constructors - out_dtype = kwargs.get('dtype', None) - if out_dtype is None: - array_kwargs = {} - else: - array_kwargs = {'dtype': out_dtype} - - exponent = self.space.exponent - weighting = self.space.weighting - - # --- Evaluate ufunc --- # - - if method == '__call__': - if ufunc.nout == 1: - # Make context for output (trivial one returns `None`) - if out is None: - out_ctx = nullcontext() - else: - out_ctx = writable_array(out, **array_kwargs) - - # Evaluate ufunc - with out_ctx as out_arr: - kwargs['out'] = out_arr - res = ufunc(*inputs, **kwargs) - - # Wrap result if necessary (lazily) - if out is None: - if is_floating_dtype(res.dtype): - # Weighting contains exponent - spc_kwargs = {'weighting': weighting} - else: - # No `exponent` or `weighting` applicable - spc_kwargs = {} - out_space = type(self.space)(self.shape, res.dtype, - **spc_kwargs) - out = out_space.element(res) - - return out - - elif ufunc.nout == 2: - # Make contexts for outputs (trivial ones return `None`) - if out1 is not None: - out1_ctx = writable_array(out1, **array_kwargs) - else: - out1_ctx = nullcontext() - if out2 is not None: - out2_ctx = writable_array(out2, **array_kwargs) - else: - out2_ctx = nullcontext() - - # Evaluate ufunc - with out1_ctx as out1_arr, out2_ctx as out2_arr: - kwargs['out'] = (out1_arr, out2_arr) - res1, res2 = ufunc(*inputs, **kwargs) - - # Wrap results if necessary (lazily) - # We don't use exponents or weightings since we don't know - # how to map them to the spaces - if out1 is None: - out1_space = type(self.space)(self.shape, res1.dtype) - out1 = out1_space.element(res1) - if out2 is None: - out2_space = type(self.space)(self.shape, res2.dtype) - out2 = out2_space.element(res2) - - return out1, out2 - - else: - raise NotImplementedError('nout = {} not supported' - ''.format(ufunc.nout)) - - else: # method != '__call__' - # Make context for output (trivial one returns `None`) - if out is None: - out_ctx = nullcontext() - else: - out_ctx = writable_array(out, **array_kwargs) - - # Evaluate ufunc method - with out_ctx as out_arr: - if method != 'at': - # No kwargs allowed for 'at' - kwargs['out'] = out_arr - res = getattr(ufunc, method)(*inputs, **kwargs) - - # Shortcut for scalar or no return value - if np.isscalar(res) or res is None: - # The first occurs for `reduce` with all axes, - # the second for in-place stuff (`at` currently) - return res - - # Wrap result if necessary (lazily) - if out is None: - if is_floating_dtype(res.dtype): - if res.shape != self.shape: - # Don't propagate weighting if shape changes - weighting = NumpyTensorSpaceConstWeighting(1.0, - exponent) - spc_kwargs = {'weighting': weighting} - else: - spc_kwargs = {} - - out_space = type(self.space)(res.shape, res.dtype, - **spc_kwargs) - out = out_space.element(res) - - return out - - def __complex__(self): - """Return ``complex(self)``.""" - if self.size != 1: - raise TypeError('only size-1 tensors can be converted to ' - 'Python scalars') - return complex(self.data.ravel()[0]) - - def __copy__(self): - """Return ``copy(self)``. - - This implements the (shallow) copy interface of the ``copy`` - module of the Python standard library. - - See Also - -------- - copy - - Examples - -------- - >>> from copy import copy - >>> space = odl.rn(3) - >>> x = space.element([1, 2, 3]) - >>> y = copy(x) - >>> y == x - True - >>> y is x - False - """ - return self.copy() - - def __eq__(self, other): - """Return ``self == other``. - - Returns - ------- - equals : bool - True if all entries of ``other`` are equal to this - the entries of ``self``, False otherwise. - - Examples - -------- - >>> space = odl.rn(3) - >>> x = space.element([1, 2, 3]) - >>> y = space.element([1, 2, 3]) - >>> x == y - True - - >>> y = space.element([-1, 2, 3]) - >>> x == y - False - >>> x == object - False - - Space membership matters: - - >>> space2 = odl.tensor_space(3, dtype='int64') - >>> y = space2.element([1, 2, 3]) - >>> x == y or y == x - False - """ - if other is self: - return True - elif other not in self.space: - return False - else: - return np.array_equal(self.data, other.data) - - def __float__(self): - """Return ``float(self)``.""" - return float(self.data) - - def __int__(self): - """Return ``int(self)``.""" - return int(self.data) - - def __ipow__(self, other): - """Return ``self **= other``.""" - try: - if other == int(other): - return super(NumpyTensor, self).__ipow__(other) - except TypeError: - pass - - np.power(self.data, other, out=self.data) - return self - - def __getitem__(self, indices): - """Return ``self[indices]``. - - Parameters - ---------- - indices : index expression - Integer, slice or sequence of these, defining the positions - of the data array which should be accessed. - - Returns - ------- - values : `NumpyTensorSpace.dtype` or `NumpyTensor` - The value(s) at the given indices. Note that the returned - object is a writable view into the original tensor, except - for the case when ``indices`` is a list. - - Examples - -------- - For one-dimensional spaces, indexing is as in linear arrays: - - >>> space = odl.rn(3) - >>> x = space.element([1, 2, 3]) - >>> x[0] - 1.0 - >>> x[1:] - rn(2).element([ 2., 3.]) - - In higher dimensions, the i-th index expression accesses the - i-th axis: - - >>> space = odl.rn((2, 3)) - >>> x = space.element([[1, 2, 3], - ... [4, 5, 6]]) - >>> x[0, 1] - 2.0 - >>> x[:, 1:] - rn((2, 2)).element( - [[ 2., 3.], - [ 5., 6.]] - ) - - Slices can be assigned to, except if lists are used for indexing: - - >>> y = x[:, ::2] # view into x - >>> y[:] = -9 - >>> x - rn((2, 3)).element( - [[-9., 2., -9.], - [-9., 5., -9.]] - ) - >>> y = x[[0, 1], [1, 2]] # not a view, won't modify x - >>> y - rn(2).element([ 2., -9.]) - >>> y[:] = 0 - >>> x - rn((2, 3)).element( - [[-9., 2., -9.], - [-9., 5., -9.]] - ) - """ - # Lazy implementation: index the array and deal with it - if isinstance(indices, NumpyTensor): - indices = indices.data - arr = self.data[indices] - - if np.isscalar(arr): - if self.space.field is not None: - return self.space.field.element(arr) - else: - return arr - else: - if is_numeric_dtype(self.dtype): - weighting = self.space.weighting - else: - weighting = None - space = type(self.space)( - arr.shape, dtype=self.dtype, exponent=self.space.exponent, - weighting=weighting) - return space.element(arr) - - def __setitem__(self, indices, values): - """Implement ``self[indices] = values``. - - Parameters - ---------- - indices : index expression - Integer, slice or sequence of these, defining the positions - of the data array which should be written to. - values : scalar, array-like or `NumpyTensor` - The value(s) that are to be assigned. - - If ``index`` is an integer, ``value`` must be a scalar. - - If ``index`` is a slice or a sequence of slices, ``value`` - must be broadcastable to the shape of the slice. - - Examples - -------- - For 1d spaces, entries can be set with scalars or sequences of - correct shape: - - >>> space = odl.rn(3) - >>> x = space.element([1, 2, 3]) - >>> x[0] = -1 - >>> x[1:] = (0, 1) - >>> x - rn(3).element([-1., 0., 1.]) - - It is also possible to use tensors of other spaces for - casting and assignment: - - >>> space = odl.rn((2, 3)) - >>> x = space.element([[1, 2, 3], - ... [4, 5, 6]]) - >>> x[0, 1] = -1 - >>> x - rn((2, 3)).element( - [[ 1., -1., 3.], - [ 4., 5., 6.]] - ) - >>> short_space = odl.tensor_space((2, 2), dtype='short') - >>> y = short_space.element([[-1, 2], - ... [0, 0]]) - >>> x[:, :2] = y - >>> x - rn((2, 3)).element( - [[-1., 2., 3.], - [ 0., 0., 6.]] - ) - - The Numpy assignment and broadcasting rules apply: - - >>> x[:] = np.array([[0, 0, 0], - ... [1, 1, 1]]) - >>> x - rn((2, 3)).element( - [[ 0., 0., 0.], - [ 1., 1., 1.]] - ) - >>> x[:, 1:] = [7, 8] - >>> x - rn((2, 3)).element( - [[ 0., 7., 8.], - [ 1., 7., 8.]] - ) - >>> x[:, ::2] = -2. - >>> x - rn((2, 3)).element( - [[-2., 7., -2.], - [-2., 7., -2.]] - ) - """ - if isinstance(indices, type(self)): - indices = indices.data - if isinstance(values, type(self)): - values = values.data - - self.data[indices] = values - - def _assign(self, other, avoid_deep_copy): - """Assign the values of ``other``, which is assumed to be in the - same space, to ``self``.""" - if avoid_deep_copy: - self.__data = other.__data - else: - self.__data[:] = other.__data - - -def _blas_is_applicable(*args): - """Whether BLAS routines can be applied or not. - - BLAS routines are available for single and double precision - float or complex data only. If the arrays are non-contiguous, - BLAS methods are usually slower, and array-writing routines do - not work at all. Hence, only contiguous arrays are allowed. - - Parameters - ---------- - x1,...,xN : `NumpyTensor` - The tensors to be tested for BLAS conformity. - - Returns - ------- - blas_is_applicable : bool - ``True`` if all mentioned requirements are met, ``False`` otherwise. - """ - if any(x.dtype != args[0].dtype for x in args[1:]): - return False - elif any(x.dtype not in _BLAS_DTYPES for x in args): - return False - elif not (all(x.flags.f_contiguous for x in args) or - all(x.flags.c_contiguous for x in args)): - return False - elif any(x.size > np.iinfo('int32').max for x in args): - # Temporary fix for 32 bit int overflow in BLAS - # TODO: use chunking instead - return False - else: - return True - - -def _lincomb_impl(a, x1, b, x2, out): - """Optimized implementation of ``out[:] = a * x1 + b * x2``.""" - # Lazy import to improve `import odl` time - import scipy.linalg - - size = native(x1.size) - - if size < THRESHOLD_SMALL: - # Faster for small arrays - out.data[:] = a * x1.data + b * x2.data - return - - elif (size < THRESHOLD_MEDIUM or - not _blas_is_applicable(x1.data, x2.data, out.data)): - - def fallback_axpy(x1, x2, n, a): - """Fallback axpy implementation avoiding copy.""" - if a != 0: - x2 /= a - x2 += x1 - x2 *= a - return x2 - - def fallback_scal(a, x, n): - """Fallback scal implementation.""" - x *= a - return x - - def fallback_copy(x1, x2, n): - """Fallback copy implementation.""" - x2[...] = x1[...] - return x2 - - axpy, scal, copy = (fallback_axpy, fallback_scal, fallback_copy) - x1_arr = x1.data - x2_arr = x2.data - out_arr = out.data - - else: - # Need flat data for BLAS, otherwise in-place does not work. - # Raveling must happen in fixed order for non-contiguous out, - # otherwise 'A' is applied to arrays, which makes the outcome - # dependent on their respective contiguousness. - if out.data.flags.f_contiguous: - ravel_order = 'F' - else: - ravel_order = 'C' - - x1_arr = x1.data.ravel(order=ravel_order) - x2_arr = x2.data.ravel(order=ravel_order) - out_arr = out.data.ravel(order=ravel_order) - axpy, scal, copy = scipy.linalg.blas.get_blas_funcs( - ['axpy', 'scal', 'copy'], arrays=(x1_arr, x2_arr, out_arr)) - - if x1 is x2 and b != 0: - # x1 is aligned with x2 -> out = (a+b)*x1 - _lincomb_impl(a + b, x1, 0, x1, out) - elif out is x1 and out is x2: - # All the vectors are aligned -> out = (a+b)*out - if (a + b) != 0: - scal(a + b, out_arr, size) - else: - out_arr[:] = 0 - elif out is x1: - # out is aligned with x1 -> out = a*out + b*x2 - if a != 1: - scal(a, out_arr, size) - if b != 0: - axpy(x2_arr, out_arr, size, b) - elif out is x2: - # out is aligned with x2 -> out = a*x1 + b*out - if b != 1: - scal(b, out_arr, size) - if a != 0: - axpy(x1_arr, out_arr, size, a) - else: - # We have exhausted all alignment options, so x1 is not x2 is not out - # We now optimize for various values of a and b - if b == 0: - if a == 0: # Zero assignment -> out = 0 - out_arr[:] = 0 - else: # Scaled copy -> out = a*x1 - copy(x1_arr, out_arr, size) - if a != 1: - scal(a, out_arr, size) - - else: # b != 0 - if a == 0: # Scaled copy -> out = b*x2 - copy(x2_arr, out_arr, size) - if b != 1: - scal(b, out_arr, size) - - elif a == 1: # No scaling in x1 -> out = x1 + b*x2 - copy(x1_arr, out_arr, size) - axpy(x2_arr, out_arr, size, b) - else: # Generic case -> out = a*x1 + b*x2 - copy(x2_arr, out_arr, size) - if b != 1: - scal(b, out_arr, size) - axpy(x1_arr, out_arr, size, a) - - -def _weighting(weights, exponent): - """Return a weighting whose type is inferred from the arguments.""" - if np.isscalar(weights): - weighting = NumpyTensorSpaceConstWeighting(weights, exponent) - elif weights is None: - weighting = NumpyTensorSpaceConstWeighting(1.0, exponent) - else: # last possibility: make an array - arr = np.asarray(weights) - weighting = NumpyTensorSpaceArrayWeighting(arr, exponent) - return weighting - - -def npy_weighted_inner(weights): - """Weighted inner product on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the inner product. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - - Returns - ------- - inner : `callable` - Inner product function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=2.0).inner - - -def npy_weighted_norm(weights, exponent=2.0): - """Weighted norm on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the norm. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - exponent : positive `float` - Exponent of the norm. - - Returns - ------- - norm : `callable` - Norm function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=exponent).norm - - -def npy_weighted_dist(weights, exponent=2.0): - """Weighted distance on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the distance. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - exponent : positive `float` - Exponent of the norm. - - Returns - ------- - dist : `callable` - Distance function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=exponent).dist - - -def _norm_default(x): - """Default Euclidean norm implementation.""" - # Lazy import to improve `import odl` time - import scipy.linalg - - if _blas_is_applicable(x.data): - nrm2 = scipy.linalg.blas.get_blas_funcs('nrm2', dtype=x.dtype) - norm = partial(nrm2, n=native(x.size)) - else: - norm = np.linalg.norm - return norm(x.data.ravel()) - - -def _pnorm_default(x, p): - """Default p-norm implementation.""" - return np.linalg.norm(x.data.ravel(), ord=p) - - -def _pnorm_diagweight(x, p, w): - """Diagonally weighted p-norm implementation.""" - # Ravel both in the same order (w is a numpy array) - order = 'F' if all(a.flags.f_contiguous for a in (x.data, w)) else 'C' - - # This is faster than first applying the weights and then summing with - # BLAS dot or nrm2 - xp = np.abs(x.data.ravel(order)) - if p == float('inf'): - xp *= w.ravel(order) - return np.max(xp) - else: - xp = np.power(xp, p, out=xp) - xp *= w.ravel(order) - return np.sum(xp) ** (1 / p) - - -def _inner_default(x1, x2): - """Default Euclidean inner product implementation.""" - # Ravel both in the same order - order = 'F' if all(a.data.flags.f_contiguous for a in (x1, x2)) else 'C' - - if is_real_dtype(x1.dtype): - if x1.size > THRESHOLD_MEDIUM: - # This is as fast as BLAS dotc - return np.tensordot(x1, x2, [range(x1.ndim)] * 2) - else: - # Several times faster for small arrays - return np.dot(x1.data.ravel(order), - x2.data.ravel(order)) - else: - # x2 as first argument because we want linearity in x1 - return np.vdot(x2.data.ravel(order), - x1.data.ravel(order)) - - -# TODO: implement intermediate weighting schemes with arrays that are -# broadcast, i.e. between scalar and full-blown in dimensionality? - - -class NumpyTensorSpaceArrayWeighting(ArrayWeighting): - - """Weighting of a `NumpyTensorSpace` by an array. - - This class defines a weighting by an array that has the same shape - as the tensor space. Since the space is not known to this class, - no checks of shape or data type are performed. - See ``Notes`` for mathematical details. - """ - - def __init__(self, array, exponent=2.0): - r"""Initialize a new instance. - - Parameters - ---------- - array : `array-like`, one-dim. - Weighting array of the inner product, norm and distance. - All its entries must be positive, however this is not - verified during initialization. - exponent : positive `float` - Exponent of the norm. For values other than 2.0, no inner - product is defined. - - Notes - ----- - - For exponent 2.0, a new weighted inner product with array - :math:`W` is defined as - - .. math:: - \langle A, B\rangle_W := - \langle W \odot A, B\rangle = - \langle w \odot a, b\rangle = - b^{\mathrm{H}} (w \odot a), - - where :math:`a, b, w` are the "flattened" counterparts of - tensors :math:`A, B, W`, respectively, :math:`b^{\mathrm{H}}` - stands for transposed complex conjugate and :math:`w \odot a` - for element-wise multiplication. - - - For other exponents, only norm and dist are defined. In the - case of exponent :math:`\infty`, the weighted norm is - - .. math:: - \| A\|_{W, \infty} := - \| W \odot A\|_{\infty} = - \| w \odot a\|_{\infty}, - - otherwise it is (using point-wise exponentiation) - - .. math:: - \| A\|_{W, p} := - \| W^{1/p} \odot A\|_{p} = - \| w^{1/p} \odot a\|_{\infty}. - - - Note that this definition does **not** fulfill the limit - property in :math:`p`, i.e. - - .. math:: - \| A\|_{W, p} \not\to - \| A\|_{W, \infty} \quad (p \to \infty) - - unless all weights are equal to 1. - - - The array :math:`W` may only have positive entries, otherwise - it does not define an inner product or norm, respectively. This - is not checked during initialization. - """ - if isinstance(array, NumpyTensor): - array = array.data - elif not isinstance(array, np.ndarray): - array = np.asarray(array) - super(NumpyTensorSpaceArrayWeighting, self).__init__( - array, impl='numpy', exponent=exponent) - - def __hash__(self): - """Return ``hash(self)``.""" - return hash((type(self), self.array.tobytes(), self.exponent)) - - def inner(self, x1, x2): - """Return the weighted inner product of ``x1`` and ``x2``. - - Parameters - ---------- - x1, x2 : `NumpyTensor` - Tensors whose inner product is calculated. - - Returns - ------- - inner : float or complex - The inner product of the two provided vectors. - """ - if self.exponent != 2.0: - raise NotImplementedError('no inner product defined for ' - 'exponent != 2 (got {})' - ''.format(self.exponent)) - else: - inner = _inner_default(x1 * self.array, x2) - if is_real_dtype(x1.dtype): - return float(inner) - else: - return complex(inner) - - def norm(self, x): - """Return the weighted norm of ``x``. - - Parameters - ---------- - x : `NumpyTensor` - Tensor whose norm is calculated. - - Returns - ------- - norm : float - The norm of the provided tensor. - """ - if self.exponent == 2.0: - norm_squared = self.inner(x, x).real # TODO: optimize?! - if norm_squared < 0: - norm_squared = 0.0 # Compensate for numerical error - return float(np.sqrt(norm_squared)) - else: - return float(_pnorm_diagweight(x, self.exponent, self.array)) - - -class NumpyTensorSpaceConstWeighting(ConstWeighting): - - """Weighting of a `NumpyTensorSpace` by a constant. - - See ``Notes`` for mathematical details. - """ - - def __init__(self, const, exponent=2.0): - r"""Initialize a new instance. - - Parameters - ---------- - const : positive float - Weighting constant of the inner product, norm and distance. - exponent : positive float - Exponent of the norm. For values other than 2.0, the inner - product is not defined. - - Notes - ----- - - For exponent 2.0, a new weighted inner product with constant - :math:`c` is defined as - - .. math:: - \langle a, b\rangle_c := - c \, \langle a, b\rangle_c = - c \, b^{\mathrm{H}} a, - - where :math:`b^{\mathrm{H}}` standing for transposed complex - conjugate. - - - For other exponents, only norm and dist are defined. In the - case of exponent :math:`\infty`, the weighted norm is defined - as - - .. math:: - \| a \|_{c, \infty} := - c\, \| a \|_{\infty}, - - otherwise it is - - .. math:: - \| a \|_{c, p} := - c^{1/p}\, \| a \|_{p}. - - - Note that this definition does **not** fulfill the limit - property in :math:`p`, i.e. - - .. math:: - \| a\|_{c, p} \not\to - \| a \|_{c, \infty} \quad (p \to \infty) - - unless :math:`c = 1`. - - - The constant must be positive, otherwise it does not define an - inner product or norm, respectively. - """ - super(NumpyTensorSpaceConstWeighting, self).__init__( - const, impl='numpy', exponent=exponent) - - def inner(self, x1, x2): - """Return the weighted inner product of ``x1`` and ``x2``. - - Parameters - ---------- - x1, x2 : `NumpyTensor` - Tensors whose inner product is calculated. - - Returns - ------- - inner : float or complex - The inner product of the two provided tensors. - """ - if self.exponent != 2.0: - raise NotImplementedError('no inner product defined for ' - 'exponent != 2 (got {})' - ''.format(self.exponent)) - else: - inner = self.const * _inner_default(x1, x2) - if x1.space.field is None: - return inner - else: - return x1.space.field.element(inner) - - def norm(self, x): - """Return the weighted norm of ``x``. - - Parameters - ---------- - x1 : `NumpyTensor` - Tensor whose norm is calculated. - - Returns - ------- - norm : float - The norm of the tensor. - """ - if self.exponent == 2.0: - return float(np.sqrt(self.const) * _norm_default(x)) - elif self.exponent == float('inf'): - return float(self.const * _pnorm_default(x, self.exponent)) - else: - return float((self.const ** (1 / self.exponent) * - _pnorm_default(x, self.exponent))) - - def dist(self, x1, x2): - """Return the weighted distance between ``x1`` and ``x2``. - - Parameters - ---------- - x1, x2 : `NumpyTensor` - Tensors whose mutual distance is calculated. - - Returns - ------- - dist : float - The distance between the tensors. - """ - if self.exponent == 2.0: - return float(np.sqrt(self.const) * _norm_default(x1 - x2)) - elif self.exponent == float('inf'): - return float(self.const * _pnorm_default(x1 - x2, self.exponent)) - else: - return float((self.const ** (1 / self.exponent) * - _pnorm_default(x1 - x2, self.exponent))) - - -class NumpyTensorSpaceCustomInner(CustomInner): - - """Class for handling a user-specified inner product.""" - - def __init__(self, inner): - """Initialize a new instance. - - Parameters - ---------- - inner : `callable` - The inner product implementation. It must accept two - `Tensor` arguments, return an element from their space's - field (real or complex number) and satisfy the following - conditions for all vectors ``x, y, z`` and scalars ``s``: - - - `` = conj()`` - - `` = s * + `` - - `` = 0`` if and only if ``x = 0`` - """ - super(NumpyTensorSpaceCustomInner, self).__init__(inner, impl='numpy') - - -class NumpyTensorSpaceCustomNorm(CustomNorm): - - """Class for handling a user-specified norm. - - Note that this removes ``inner``. - """ - - def __init__(self, norm): - """Initialize a new instance. - - Parameters - ---------- - norm : `callable` - The norm implementation. It must accept a `Tensor` - argument, return a `float` and satisfy the following - conditions for all any two elements ``x, y`` and scalars - ``s``: - - - ``||x|| >= 0`` - - ``||x|| = 0`` if and only if ``x = 0`` - - ``||s * x|| = |s| * ||x||`` - - ``||x + y|| <= ||x|| + ||y||`` - """ - super(NumpyTensorSpaceCustomNorm, self).__init__(norm, impl='numpy') - - -class NumpyTensorSpaceCustomDist(CustomDist): - - """Class for handling a user-specified distance in `TensorSpace`. - - Note that this removes ``inner`` and ``norm``. - """ - - def __init__(self, dist): - """Initialize a new instance. - - Parameters - ---------- - dist : `callable` - The distance function defining a metric on `TensorSpace`. It - must accept two `Tensor` arguments, return a `float` and - fulfill the following mathematical conditions for any three - elements ``x, y, z``: - - - ``dist(x, y) >= 0`` - - ``dist(x, y) = 0`` if and only if ``x = y`` - - ``dist(x, y) = dist(y, x)`` - - ``dist(x, y) <= dist(x, z) + dist(z, y)`` - """ - super(NumpyTensorSpaceCustomDist, self).__init__(dist, impl='numpy') - - -if __name__ == '__main__': - from odl.util.testutils import run_doctests - run_doctests() diff --git a/odl/test/tomo/backends/astra_cpu_test.py b/odl/test/applications/tomo/backends/astra_cpu_test.py similarity index 59% rename from odl/test/tomo/backends/astra_cpu_test.py rename to odl/test/applications/tomo/backends/astra_cpu_test.py index 5726ea23a35..ff6dca1bb15 100644 --- a/odl/test/tomo/backends/astra_cpu_test.py +++ b/odl/test/applications/tomo/backends/astra_cpu_test.py @@ -14,9 +14,9 @@ import sys import odl -from odl.tomo.backends.astra_cpu import ( - astra_cpu_forward_projector, astra_cpu_back_projector) -from odl.tomo.util.testutils import skip_if_no_astra +from odl.applications.tomo.backends.astra_cpu import ( + astra_cpu_projector) +from odl.applications.tomo.util.testutils import skip_if_no_astra # TODO: clean up and improve tests @@ -24,62 +24,66 @@ @pytest.mark.xfail(sys.platform == 'win32', run=False, reason="Crashes on windows") @skip_if_no_astra -def test_astra_cpu_projector_parallel2d(): +def test_astra_cpu_projector_parallel2d(odl_impl_device_pairs): """ASTRA CPU forward and back projection for 2d parallel geometry.""" - + impl, device = odl_impl_device_pairs # Create reco space and a phantom - reco_space = odl.uniform_discr([-4, -5], [4, 5], (4, 5), dtype='float32') - phantom = odl.phantom.cuboid(reco_space, min_pt=[0, 0], max_pt=[4, 5]) + reco_space = odl.uniform_discr([-4, -5], [4, 5], (4, 5), dtype='float32', impl=impl, device=device) + phantom = odl.core.phantom.cuboid(reco_space, min_pt=[0, 0], max_pt=[4, 5]) # Create parallel geometry angle_part = odl.uniform_partition(0, 2 * np.pi, 8) det_part = odl.uniform_partition(-6, 6, 6) - geom = odl.tomo.Parallel2dGeometry(angle_part, det_part) + geom = odl.applications.tomo.Parallel2dGeometry(angle_part, det_part) # Make projection space - proj_space = odl.uniform_discr_frompartition(geom.partition, - dtype='float32') + proj_space = odl.uniform_discr_frompartition( + geom.partition, + dtype='float32', + impl=impl, + device=device + ) # Forward evaluation - proj_data = astra_cpu_forward_projector(phantom, geom, proj_space) + proj_data = astra_cpu_projector('forward', phantom, geom, proj_space) assert proj_data.shape == proj_space.shape assert proj_data.norm() > 0 # Backward evaluation - backproj = astra_cpu_back_projector(proj_data, geom, reco_space) + backproj = astra_cpu_projector('backward', proj_data, geom, reco_space) assert backproj.shape == reco_space.shape assert backproj.norm() > 0 @skip_if_no_astra -def test_astra_cpu_projector_fanflat(): +def test_astra_cpu_projector_fanflat(odl_impl_device_pairs): """ASTRA CPU forward and back projection for fanflat geometry.""" - # Create reco space and a phantom - reco_space = odl.uniform_discr([-4, -5], [4, 5], (4, 5), dtype='float32') - phantom = odl.phantom.cuboid(reco_space, min_pt=[0, 0], max_pt=[4, 5]) + impl, device = odl_impl_device_pairs + reco_space = odl.uniform_discr([-4, -5], [4, 5], (4, 5), dtype='float32', impl=impl, device=device) + phantom = odl.core.phantom.cuboid(reco_space, min_pt=[0, 0], max_pt=[4, 5]) # Create fan beam geometry with flat detector angle_part = odl.uniform_partition(0, 2 * np.pi, 8) det_part = odl.uniform_partition(-6, 6, 6) src_rad = 100 det_rad = 10 - geom = odl.tomo.FanBeamGeometry(angle_part, det_part, src_rad, det_rad) + geom = odl.applications.tomo.FanBeamGeometry(angle_part, det_part, src_rad, det_rad) # Make projection space proj_space = odl.uniform_discr_frompartition(geom.partition, - dtype='float32') + dtype='float32', impl=impl, device=device) # Forward evaluation - proj_data = astra_cpu_forward_projector(phantom, geom, proj_space) + proj_data = astra_cpu_projector('forward', phantom, geom, proj_space) assert proj_data.shape == proj_space.shape assert proj_data.norm() > 0 # Backward evaluation - backproj = astra_cpu_back_projector(proj_data, geom, reco_space) + backproj = astra_cpu_projector('backward', proj_data, geom, reco_space) assert backproj.shape == reco_space.shape assert backproj.norm() > 0 if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/tomo/backends/astra_cuda_test.py b/odl/test/applications/tomo/backends/astra_cuda_test.py similarity index 65% rename from odl/test/tomo/backends/astra_cuda_test.py rename to odl/test/applications/tomo/backends/astra_cuda_test.py index 83c1fd69ac8..d37c10961d4 100644 --- a/odl/test/tomo/backends/astra_cuda_test.py +++ b/odl/test/applications/tomo/backends/astra_cuda_test.py @@ -14,8 +14,8 @@ import pytest import odl -from odl.tomo.backends.astra_cuda import AstraCudaImpl -from odl.tomo.util.testutils import skip_if_no_astra_cuda +from odl.applications.tomo.backends.astra_cuda import AstraCudaImpl +from odl.applications.tomo.util.testutils import skip_if_no_astra_cuda # --- pytest fixtures --- # @@ -33,7 +33,8 @@ @pytest.fixture(scope="module", params=projectors, ids=space_and_geometry_ids) -def space_and_geometry(request): +def space_and_geometry(request, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs dtype = 'float32' geom = request.param @@ -41,35 +42,35 @@ def space_and_geometry(request): if geom == 'par2d': reco_space = odl.uniform_discr([-4, -5], [4, 5], (4, 5), - dtype=dtype) + dtype=dtype, impl=impl, device=device) dpart = odl.uniform_partition(-6, 6, 6) - geom = odl.tomo.Parallel2dGeometry(apart, dpart) + geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart) elif geom == 'par3d': reco_space = odl.uniform_discr([-4, -5, -6], [4, 5, 6], (4, 5, 6), - dtype=dtype) + dtype=dtype, impl=impl, device=device) dpart = odl.uniform_partition([-7, -8], [7, 8], (7, 8)) - geom = odl.tomo.Parallel3dAxisGeometry(apart, dpart) + geom = odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart) elif geom == 'cone2d': reco_space = odl.uniform_discr([-4, -5], [4, 5], (4, 5), - dtype=dtype) + dtype=dtype, impl=impl, device=device) dpart = odl.uniform_partition(-6, 6, 6) - geom = odl.tomo.FanBeamGeometry(apart, dpart, src_radius=100, + geom = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_radius=100, det_radius=10) elif geom == 'cone3d': reco_space = odl.uniform_discr([-4, -5, -6], [4, 5, 6], (4, 5, 6), - dtype=dtype) + dtype=dtype, impl=impl, device=device) dpart = odl.uniform_partition([-7, -8], [7, 8], (7, 8)) - geom = odl.tomo.ConeBeamGeometry(apart, dpart, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_radius=200, det_radius=100) elif geom == 'helical': reco_space = odl.uniform_discr([-4, -5, -6], [4, 5, 6], (4, 5, 6), - dtype=dtype) + dtype=dtype, impl=impl, device=device) # overwrite angle apart = odl.uniform_partition(0, 2 * 2 * np.pi, 18) dpart = odl.uniform_partition([-7, -8], [7, 8], (7, 8)) - geom = odl.tomo.ConeBeamGeometry(apart, dpart, pitch=1.0, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, pitch=1.0, src_radius=200, det_radius=100) else: raise ValueError('geom not valid') @@ -85,27 +86,31 @@ def test_astra_cuda_projector(space_and_geometry): # Create reco space and a phantom vol_space, geom = space_and_geometry - phantom = odl.phantom.cuboid(vol_space) + phantom = odl.core.phantom.cuboid(vol_space) # Make projection space - proj_space = odl.uniform_discr_frompartition(geom.partition, - dtype=vol_space.dtype) + proj_space = odl.uniform_discr_frompartition( + geom.partition, + dtype=vol_space.dtype_identifier, + impl=vol_space.impl, + device=vol_space.device) # create RayTransform implementation astra_cuda = AstraCudaImpl(geom, vol_space, proj_space) + out = astra_cuda.proj_space.zero() # Forward evaluation proj_data = astra_cuda.call_forward(phantom) assert proj_data in proj_space assert proj_data.norm() > 0 - assert np.all(proj_data.asarray() >= 0) + assert odl.all(0 <= proj_data) # Backward evaluation backproj = astra_cuda.call_backward(proj_data) assert backproj in vol_space assert backproj.norm() > 0 - assert np.all(proj_data.asarray() >= 0) + assert odl.all(0 <= backproj) if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/tomo/backends/astra_setup_test.py b/odl/test/applications/tomo/backends/astra_setup_test.py similarity index 61% rename from odl/test/tomo/backends/astra_setup_test.py rename to odl/test/applications/tomo/backends/astra_setup_test.py index d3502cf5bc2..7ca4bb6f461 100644 --- a/odl/test/tomo/backends/astra_setup_test.py +++ b/odl/test/applications/tomo/backends/astra_setup_test.py @@ -14,20 +14,22 @@ import pytest import odl -from odl.tomo.backends.astra_setup import ( +from odl.applications.tomo.backends.astra_setup import ( astra_algorithm, astra_data, astra_projection_geometry, astra_projector, astra_supports, astra_volume_geometry) -from odl.util.testutils import is_subdict - +from odl.core.util.testutils import is_subdict +from odl.core.util.testutils import simple_fixture try: import astra except ImportError: pass -pytestmark = pytest.mark.skipif("not odl.tomo.ASTRA_AVAILABLE") +pytestmark = pytest.mark.skipif("not odl.applications.tomo.ASTRA_AVAILABLE") + +astra_impl = simple_fixture('astra_impl', params=['cpu', 'cuda']) -def _discrete_domain(ndim): +def _discrete_domain(ndim, impl, device): """Create `DiscretizedSpace` space with isotropic grid stride. Parameters @@ -44,10 +46,10 @@ def _discrete_domain(ndim): min_pt = -max_pt shape = np.arange(1, ndim + 1) * 10 - return odl.uniform_discr(min_pt, max_pt, shape=shape, dtype='float32') + return odl.uniform_discr(min_pt, max_pt, impl=impl, device=device, shape=shape, dtype='float32') -def _discrete_domain_anisotropic(ndim): +def _discrete_domain_anisotropic(ndim, impl, device): """Create `DiscretizedSpace` space with anisotropic grid stride. Parameters @@ -64,55 +66,90 @@ def _discrete_domain_anisotropic(ndim): max_pt = [1] * ndim shape = np.arange(1, ndim + 1) * 10 - return odl.uniform_discr(min_pt, max_pt, shape=shape, dtype='float32') + return odl.uniform_discr(min_pt, max_pt, impl=impl, device=device, shape=shape, dtype='float32') + -def test_vol_geom_2d(): +def test_vol_geom_2d(astra_impl, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Check correctness of ASTRA 2D volume geometries.""" x_pts = 10 # x_pts = Rows y_pts = 20 # y_pts = Columns # Isotropic voxel case - discr_dom = _discrete_domain(2) + discr_dom = _discrete_domain(2, impl=impl, device=device) correct_dict = { - 'GridColCount': y_pts, - 'GridRowCount': x_pts, - 'option': { - 'WindowMinX': -2.0, # y_min - 'WindowMaxX': 2.0, # y_max - 'WindowMinY': -1.0, # x_min - 'WindowMaxY': 1.0}} # x_amx - - vol_geom = astra_volume_geometry(discr_dom) - assert vol_geom == correct_dict + 'cpu': { + 'GridColCount': y_pts, + 'GridRowCount': x_pts, + 'option': { + 'WindowMinX': -2.0, # y_min + 'WindowMaxX': 2.0, # y_max + 'WindowMinY': -1.0, # x_min + 'WindowMaxY': 1.0} # x_amx + }, + 'cuda': { + 'GridColCount': y_pts, + 'GridRowCount': x_pts, + 'GridSliceCount': 1, + 'option': { + 'WindowMinX': -2.0, # y_min + 'WindowMaxX': 2.0, # y_max + 'WindowMinY': -1.0, # x_min + 'WindowMaxY': 1.0, # x_amx + 'WindowMinZ': -1, # z_min + 'WindowMaxZ': 1, # z_max + } + } + } + + vol_geom = astra_volume_geometry(discr_dom, astra_impl) + assert vol_geom == correct_dict[astra_impl] # Anisotropic voxel case - discr_dom = _discrete_domain_anisotropic(2) + discr_dom = _discrete_domain_anisotropic(2, impl=impl, device=device) correct_dict = { + 'cpu': { 'GridColCount': y_pts, 'GridRowCount': x_pts, 'option': { 'WindowMinX': -1.0, # y_min 'WindowMaxX': 1.0, # y_max 'WindowMinY': -1.0, # x_min - 'WindowMaxY': 1.0}} # x_amx + 'WindowMaxY': 1.0} # x_max + }, + 'cuda': { + 'GridColCount': y_pts, + 'GridRowCount': x_pts, + 'GridSliceCount': 1, + 'option': { + 'WindowMinX': -1.0, # y_min + 'WindowMaxX': 1.0, # y_max + 'WindowMinY': -1.0, # x_min + 'WindowMaxY': 1.0, # x_amx + 'WindowMinZ': -1, # z_min + 'WindowMaxZ': 1, # z_max + } + } + } if astra_supports('anisotropic_voxels_2d'): - vol_geom = astra_volume_geometry(discr_dom) - assert vol_geom == correct_dict + vol_geom = astra_volume_geometry(discr_dom,astra_impl) + assert vol_geom == correct_dict[astra_impl] else: with pytest.raises(NotImplementedError): - astra_volume_geometry(discr_dom) + astra_volume_geometry(discr_dom, astra_impl) -def test_vol_geom_3d(): +def test_vol_geom_3d(astra_impl, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Check correctness of ASTRA 3D volume geometies.""" x_pts = 10 y_pts = 20 z_pts = 30 # Isotropic voxel case - discr_dom = _discrete_domain(3) + discr_dom = _discrete_domain(3, impl=impl, device=device) # x = columns, y = rows, z = slices correct_dict = { 'GridColCount': z_pts, @@ -126,10 +163,10 @@ def test_vol_geom_3d(): 'WindowMinZ': -1.0, # x_min 'WindowMaxZ': 1.0}} # x_amx - vol_geom = astra_volume_geometry(discr_dom) + vol_geom = astra_volume_geometry(discr_dom, astra_impl) assert vol_geom == correct_dict - discr_dom = _discrete_domain_anisotropic(3) + discr_dom = _discrete_domain_anisotropic(3, impl=impl, device=device) # x = columns, y = rows, z = slices correct_dict = { 'GridColCount': z_pts, @@ -144,35 +181,47 @@ def test_vol_geom_3d(): 'WindowMaxZ': 1.0}} # x_amx if astra_supports('anisotropic_voxels_3d'): - vol_geom = astra_volume_geometry(discr_dom) + vol_geom = astra_volume_geometry(discr_dom, astra_impl) assert vol_geom == correct_dict else: with pytest.raises(NotImplementedError): - astra_volume_geometry(discr_dom) + astra_volume_geometry(discr_dom, astra_impl) -def test_proj_geom_parallel_2d(): +def test_proj_geom_parallel_2d(astra_impl): """Create ASTRA 2D projection geometry.""" apart = odl.uniform_partition(0, 2, 5) dpart = odl.uniform_partition(-1, 1, 10) - geom = odl.tomo.Parallel2dGeometry(apart, dpart) + geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart) - proj_geom = astra_projection_geometry(geom) + proj_geom = astra_projection_geometry(geom, astra_impl) correct_subdict = { - 'type': 'parallel', - 'DetectorCount': 10, 'DetectorWidth': 0.2} - - assert is_subdict(correct_subdict, proj_geom) - assert 'ProjectionAngles' in proj_geom + 'cpu' : { + 'type': 'parallel', + 'DetectorCount': 10, + 'DetectorWidth': 0.2 + }, + 'cuda' : { + 'type': 'parallel3d_vec', + 'DetectorRowCount': 1, + 'DetectorColCount': 10 + }, + } + + assert is_subdict(correct_subdict[astra_impl], proj_geom) + if astra_impl == 'cpu': + assert 'ProjectionAngles' in proj_geom + else: + assert 'Vectors' in proj_geom -def test_astra_projection_geometry(): +def test_astra_projection_geometry(astra_impl): """Create ASTRA projection geometry from geometry objects.""" with pytest.raises(TypeError): - astra_projection_geometry(None) + astra_projection_geometry(None, astra_impl=astra_impl) apart = odl.uniform_partition(0, 2 * np.pi, 5) dpart = odl.uniform_partition(-40, 40, 10) @@ -180,44 +229,49 @@ def test_astra_projection_geometry(): # motion sampling grid, detector sampling grid but not uniform dpart_0 = odl.RectPartition(odl.IntervalProd(0, 3), odl.RectGrid([0, 1, 3])) - geom_p2d = odl.tomo.Parallel2dGeometry(apart, dpart=dpart_0) + geom_p2d = odl.applications.tomo.Parallel2dGeometry(apart, dpart=dpart_0) with pytest.raises(ValueError): - astra_projection_geometry(geom_p2d) + astra_projection_geometry(geom_p2d, astra_impl) # detector sampling grid, motion sampling grid - geom_p2d = odl.tomo.Parallel2dGeometry(apart, dpart) - astra_projection_geometry(geom_p2d) + geom_p2d = odl.applications.tomo.Parallel2dGeometry(apart, dpart) + astra_projection_geometry(geom_p2d, astra_impl) # Parallel 2D geometry - geom_p2d = odl.tomo.Parallel2dGeometry(apart, dpart) - astra_geom = astra_projection_geometry(geom_p2d) - assert astra_geom['type'] == 'parallel' - + geom_p2d = odl.applications.tomo.Parallel2dGeometry(apart, dpart) + astra_geom = astra_projection_geometry(geom_p2d, astra_impl) + if astra_impl == 'cpu': + assert astra_geom['type'] == 'parallel' + else: + assert astra_geom['type'] == 'parallel3d_vec' # Fan flat src_rad = 10 det_rad = 5 - geom_ff = odl.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad) - astra_geom = astra_projection_geometry(geom_ff) - assert astra_geom['type'] == 'fanflat_vec' + geom_ff = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad) + astra_geom = astra_projection_geometry(geom_ff, astra_impl) + if astra_impl == 'cpu': + assert astra_geom['type'] == 'fanflat_vec' + else: + assert astra_geom['type'] == 'cone_vec' dpart = odl.uniform_partition([-40, -3], [40, 3], (10, 5)) # Parallel 3D geometry - geom_p3d = odl.tomo.Parallel3dAxisGeometry(apart, dpart) - astra_projection_geometry(geom_p3d) - astra_geom = astra_projection_geometry(geom_p3d) + geom_p3d = odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart) + astra_projection_geometry(geom_p3d,astra_impl) + astra_geom = astra_projection_geometry(geom_p3d, astra_impl) assert astra_geom['type'] == 'parallel3d_vec' # Circular conebeam flat - geom_ccf = odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad) - astra_geom = astra_projection_geometry(geom_ccf) + geom_ccf = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad) + astra_geom = astra_projection_geometry(geom_ccf, astra_impl) assert astra_geom['type'] == 'cone_vec' # Helical conebeam flat pitch = 1 - geom_hcf = odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, + geom_hcf = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, pitch=pitch) - astra_geom = astra_projection_geometry(geom_hcf) + astra_geom = astra_projection_geometry(geom_hcf, astra_impl) assert astra_geom['type'] == 'cone_vec' @@ -235,7 +289,7 @@ def test_volume_data_2d(): assert data_out.shape == (10, 20) # From existing - discr_dom = _discrete_domain(2) + discr_dom = _discrete_domain(2, impl='numpy', device='cpu') data_in = discr_dom.element(np.ones((10, 20), dtype='float32')) data_id = astra_data(VOL_GEOM_2D, 'volume', data=data_in) data_out = astra.data2d.get_shared(data_id) @@ -256,7 +310,7 @@ def test_volume_data_3d(): assert data_out.shape == (10, 20, 30) # From existing - discr_dom = _discrete_domain(3) + discr_dom = _discrete_domain(3, impl='numpy', device='cpu') data_in = discr_dom.element(np.ones((10, 20, 30), dtype='float32')) data_id = astra_data(VOL_GEOM_3D, 'volume', data=data_in) data_out = astra.data3d.get_shared(data_id) @@ -294,7 +348,7 @@ def test_parallel_3d_projector(): astra_projector('linear3d', VOL_GEOM_3D, PROJ_GEOM_3D, ndim=3) -@pytest.mark.skipif(not odl.tomo.ASTRA_CUDA_AVAILABLE, +@pytest.mark.skipif(not odl.applications.tomo.ASTRA_CUDA_AVAILABLE, reason="ASTRA CUDA not available") def test_parallel_3d_projector_gpu(): """Create ASTRA 3D projectors on GPU.""" @@ -319,7 +373,7 @@ def test_astra_algorithm(): astra_algorithm('none', ndim, vol_id, sino_id, proj_id, 'none') with pytest.raises(ValueError): astra_algorithm( - 'backward', ndim, vol_id, sino_id, proj_id=None, impl='cpu' + 'backward', ndim, vol_id, sino_id, proj_id=None, astra_impl='cpu' ) alg_id = astra_algorithm(direction, ndim, vol_id, sino_id, proj_id, impl) astra.algorithm.delete(alg_id) @@ -334,7 +388,7 @@ def test_astra_algorithm(): astra.algorithm.delete(alg_id) -@pytest.mark.skipif(not odl.tomo.ASTRA_CUDA_AVAILABLE, +@pytest.mark.skipif(not odl.applications.tomo.ASTRA_CUDA_AVAILABLE, reason="ASTRA cuda not available") def test_astra_algorithm_gpu(): """Create ASTRA algorithm object on GPU.""" @@ -350,13 +404,13 @@ def test_astra_algorithm_gpu(): # 2D CUDA FP alg_id = astra_algorithm( - 'forward', ndim, vol_id, sino_id, proj_id=proj_id, impl='cuda' + 'forward', ndim, vol_id, sino_id, proj_id=proj_id, astra_impl='cuda' ) astra.algorithm.delete(alg_id) # 2D CUDA BP alg_id = astra_algorithm( - 'backward', ndim, rec_id, sino_id, proj_id=proj_id, impl='cuda' + 'backward', ndim, rec_id, sino_id, proj_id=proj_id, astra_impl='cuda' ) astra.algorithm.delete(alg_id) @@ -367,12 +421,12 @@ def test_astra_algorithm_gpu(): with pytest.raises(NotImplementedError): astra_algorithm( - direction, ndim, vol_id, sino_id, proj_id=proj_id, impl='cpu' + direction, ndim, vol_id, sino_id, proj_id=proj_id, astra_impl='cpu' ) for direction in {'forward', 'backward'}: astra_algorithm( - direction, ndim, vol_id, sino_id, proj_id=proj_id, impl='cuda' + direction, ndim, vol_id, sino_id, proj_id=proj_id, astra_impl='cuda' ) @@ -385,24 +439,24 @@ def test_geom_to_vec(): # Fanbeam flat src_rad = 10 det_rad = 5 - geom_ff = odl.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad) - vec = odl.tomo.astra_conebeam_2d_geom_to_vec(geom_ff) + geom_ff = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad) + vec = odl.applications.tomo.astra_conebeam_2d_geom_to_vec(geom_ff) assert vec.shape == (apart.size, 6) # Circular cone flat dpart = odl.uniform_partition([-40, -3], [40, 3], (10, 5)) - geom_ccf = odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad) - vec = odl.tomo.astra_conebeam_3d_geom_to_vec(geom_ccf) + geom_ccf = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad) + vec = odl.applications.tomo.astra_conebeam_3d_geom_to_vec(geom_ccf) assert vec.shape == (apart.size, 12) # Helical cone flat pitch = 1 - geom_hcf = odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, + geom_hcf = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, pitch=pitch) - vec = odl.tomo.astra_conebeam_3d_geom_to_vec(geom_hcf) + vec = odl.applications.tomo.astra_conebeam_3d_geom_to_vec(geom_hcf) assert vec.shape == (apart.size, 12) if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/tomo/backends/skimage_test.py b/odl/test/applications/tomo/backends/skimage_test.py similarity index 80% rename from odl/test/tomo/backends/skimage_test.py rename to odl/test/applications/tomo/backends/skimage_test.py index 44622a0659f..28bb5452008 100644 --- a/odl/test/tomo/backends/skimage_test.py +++ b/odl/test/applications/tomo/backends/skimage_test.py @@ -12,9 +12,9 @@ import numpy as np import odl -from odl.tomo.backends.skimage_radon import ( +from odl.applications.tomo.backends.skimage_radon import ( skimage_radon_forward_projector, skimage_radon_back_projector) -from odl.tomo.util.testutils import skip_if_no_skimage +from odl.applications.tomo.util.testutils import skip_if_no_skimage @skip_if_no_skimage @@ -23,12 +23,12 @@ def test_skimage_radon_projector_parallel2d(): # Create reco space and a phantom reco_space = odl.uniform_discr([-5, -5], [5, 5], (5, 5)) - phantom = odl.phantom.cuboid(reco_space, min_pt=[0, 0], max_pt=[5, 5]) + phantom = odl.core.phantom.cuboid(reco_space, min_pt=[0, 0], max_pt=[5, 5]) # Create parallel geometry angle_part = odl.uniform_partition(0, np.pi, 5) det_part = odl.uniform_partition(-6, 6, 6) - geom = odl.tomo.Parallel2dGeometry(angle_part, det_part) + geom = odl.applications.tomo.Parallel2dGeometry(angle_part, det_part) # Make projection space proj_space = odl.uniform_discr_frompartition(geom.partition) @@ -45,4 +45,4 @@ def test_skimage_radon_projector_parallel2d(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/tomo/geometry/geometry_test.py b/odl/test/applications/tomo/geometry/geometry_test.py similarity index 87% rename from odl/test/tomo/geometry/geometry_test.py rename to odl/test/applications/tomo/geometry/geometry_test.py index 9390b7d35e0..48b2524f89c 100644 --- a/odl/test/tomo/geometry/geometry_test.py +++ b/odl/test/applications/tomo/geometry/geometry_test.py @@ -15,7 +15,7 @@ import numpy as np import odl -from odl.util.testutils import all_almost_equal, all_equal, simple_fixture +from odl.core.util.testutils import all_almost_equal, all_equal, simple_fixture # --- pytest fixtures --- # @@ -48,10 +48,10 @@ def test_parallel_2d_props(shift): apart = odl.uniform_partition(0, full_angle, 10) dpart = odl.uniform_partition(0, 1, 10) translation = np.array([shift, shift], dtype=float) - geom = odl.tomo.Parallel2dGeometry(apart, dpart, translation=translation) + geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart, translation=translation) assert geom.ndim == 2 - assert isinstance(geom.detector, odl.tomo.Flat1dDetector) + assert isinstance(geom.detector, odl.applications.tomo.Flat1dDetector) # Check defaults assert all_almost_equal(geom.det_pos_init, translation + [0, 1]) @@ -99,7 +99,7 @@ def test_parallel_2d_orientation(det_pos_init_2d): apart = odl.uniform_partition(0, full_angle, 10) dpart = odl.uniform_partition(0, 1, 10) det_pos_init = det_pos_init_2d - geom = odl.tomo.Parallel2dGeometry(apart, dpart, det_pos_init=det_pos_init) + geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart, det_pos_init=det_pos_init) assert all_almost_equal(geom.det_pos_init, det_pos_init) assert all_almost_equal(geom.det_refpoint(0), det_pos_init) @@ -128,7 +128,7 @@ def test_parallel_2d_slanted_detector(): # Detector forms a 45 degree angle with the x axis at initial position, # with positive direction upwards init_axis = [1, 1] - geom = odl.tomo.Parallel2dGeometry(apart, dpart, det_pos_init=[0, 1], + geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart, det_pos_init=[0, 1], det_axis_init=init_axis) assert all_almost_equal(geom.det_pos_init, [0, 1]) @@ -160,7 +160,7 @@ def test_parallel_2d_frommatrix(): # Start at [0, 1] with extra rotation by 135 degrees, making 225 degrees # in total for the initial position (at the bisector in the 3rd quardant) - geom = odl.tomo.Parallel2dGeometry.frommatrix(apart, dpart, rot_matrix) + geom = odl.applications.tomo.Parallel2dGeometry.frommatrix(apart, dpart, rot_matrix) init_pos = np.array([-1, -1], dtype=float) init_pos /= np.linalg.norm(init_pos) @@ -172,7 +172,7 @@ def test_parallel_2d_frommatrix(): # With translation (1, 1) matrix = np.hstack([rot_matrix, [[1], [1]]]) - geom = odl.tomo.Parallel2dGeometry.frommatrix(apart, dpart, matrix) + geom = odl.applications.tomo.Parallel2dGeometry.frommatrix(apart, dpart, matrix) assert all_almost_equal(geom.translation, [1, 1]) @@ -185,7 +185,7 @@ def test_parallel_2d_frommatrix(): sing_mat = [[1, 1], [1, 1]] with pytest.raises(np.linalg.LinAlgError): - geom = odl.tomo.Parallel2dGeometry.frommatrix(apart, dpart, sing_mat) + geom = odl.applications.tomo.Parallel2dGeometry.frommatrix(apart, dpart, sing_mat) def test_parallel_3d_props(shift): @@ -194,11 +194,11 @@ def test_parallel_3d_props(shift): apart = odl.uniform_partition(0, full_angle, 10) dpart = odl.uniform_partition([0, 0], [1, 1], (10, 10)) translation = np.array([shift, shift, shift], dtype=float) - geom = odl.tomo.Parallel3dAxisGeometry(apart, dpart, + geom = odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart, translation=translation) assert geom.ndim == 3 - assert isinstance(geom.detector, odl.tomo.Flat2dDetector) + assert isinstance(geom.detector, odl.applications.tomo.Flat2dDetector) # Check defaults assert all_almost_equal(geom.axis, [0, 0, 1]) @@ -242,15 +242,15 @@ def test_parallel_3d_props(shift): # Zero not allowed as axis with pytest.raises(ValueError): - odl.tomo.Parallel3dAxisGeometry(apart, dpart, axis=[0, 0, 0]) + odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart, axis=[0, 0, 0]) # Detector axex should not be parallel or otherwise result in a # linear dependent triplet with pytest.raises(ValueError): - odl.tomo.Parallel3dAxisGeometry( + odl.applications.tomo.Parallel3dAxisGeometry( apart, dpart, det_axes_init=([0, 1, 0], [0, 1, 0])) with pytest.raises(ValueError): - odl.tomo.Parallel3dAxisGeometry( + odl.applications.tomo.Parallel3dAxisGeometry( apart, dpart, det_axes_init=([0, 0, 0], [0, 1, 0])) # Check that str and repr work without crashing and return something @@ -263,7 +263,7 @@ def test_parallel_3d_orientation(axis): full_angle = np.pi apart = odl.uniform_partition(0, full_angle, 10) dpart = odl.uniform_partition([0, 0], [1, 1], (10, 10)) - geom = odl.tomo.Parallel3dAxisGeometry(apart, dpart, axis=axis) + geom = odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart, axis=axis) norm_axis = np.array(axis, dtype=float) / np.linalg.norm(axis) assert all_almost_equal(geom.axis, norm_axis) @@ -302,7 +302,7 @@ def test_parallel_3d_slanted_detector(): # angle with the x-y plane. init_axis_0 = [1, 1, 0] init_axis_1 = [-1, 1, 1] - geom = odl.tomo.Parallel3dAxisGeometry( + geom = odl.applications.tomo.Parallel3dAxisGeometry( apart, dpart, det_axes_init=[init_axis_0, init_axis_1]) assert all_almost_equal(geom.det_pos_init, [0, 1, 0]) @@ -345,7 +345,7 @@ def test_parallel_3d_frommatrix(): rot_matrix = np.array([[0, 0, -1], [1, 0, 0], [0, -1, 0]], dtype=float) - geom = odl.tomo.Parallel3dAxisGeometry.frommatrix(apart, dpart, rot_matrix) + geom = odl.applications.tomo.Parallel3dAxisGeometry.frommatrix(apart, dpart, rot_matrix) # Axis was [0, 0, 1], gets mapped to [-1, 0, 0] assert all_almost_equal(geom.axis, [-1, 0, 0]) @@ -359,7 +359,7 @@ def test_parallel_3d_frommatrix(): # With translation (1, 1, 1) matrix = np.hstack([rot_matrix, [[1], [1], [1]]]) - geom = odl.tomo.Parallel3dAxisGeometry.frommatrix(apart, dpart, matrix) + geom = odl.applications.tomo.Parallel3dAxisGeometry.frommatrix(apart, dpart, matrix) assert all_almost_equal(geom.translation, (1, 1, 1)) assert all_almost_equal(geom.det_pos_init, geom.translation + [0, 0, -1]) @@ -372,7 +372,7 @@ def test_parallel_beam_geometry_helper(): """ # --- 2d case --- space = odl.uniform_discr([-1, -1], [1, 1], [20, 20]) - geometry = odl.tomo.parallel_beam_geometry(space) + geometry = odl.applications.tomo.parallel_beam_geometry(space) rho = np.sqrt(2) omega = np.pi * 10.0 @@ -389,7 +389,7 @@ def test_parallel_beam_geometry_helper(): # --- 3d case --- space = odl.uniform_discr([-1, -1, 0], [1, 1, 2], [20, 20, 40]) - geometry = odl.tomo.parallel_beam_geometry(space) + geometry = odl.applications.tomo.parallel_beam_geometry(space) # Validate angles assert geometry.motion_partition.is_uniform @@ -410,7 +410,7 @@ def test_parallel_beam_geometry_helper(): # --- offset geometry --- space = odl.uniform_discr([0, 0], [2, 2], [20, 20]) - geometry = odl.tomo.parallel_beam_geometry(space) + geometry = odl.applications.tomo.parallel_beam_geometry(space) rho = np.sqrt(2) * 2 omega = np.pi * 10.0 @@ -434,15 +434,15 @@ def test_fanbeam_props(detector_type, shift): det_rad = 5 curve_rad = src_rad + det_rad + 1 if detector_type != "flat" else None translation = np.array([shift, shift], dtype=float) - geom = odl.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad, + geom = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad, det_curvature_radius=curve_rad, translation=translation) assert geom.ndim == 2 if detector_type != 'flat': - assert isinstance(geom.detector, odl.tomo.CircularDetector) + assert isinstance(geom.detector, odl.applications.tomo.CircularDetector) else: - assert isinstance(geom.detector, odl.tomo.Flat1dDetector) + assert isinstance(geom.detector, odl.applications.tomo.Flat1dDetector) # Check defaults assert all_almost_equal(geom.src_to_det_init, [0, 1]) @@ -498,7 +498,7 @@ def test_fanbeam_props(detector_type, shift): # Both radii zero with pytest.raises(ValueError): - odl.tomo.FanBeamGeometry(apart, dpart, src_radius=0, det_radius=0) + odl.applications.tomo.FanBeamGeometry(apart, dpart, src_radius=0, det_radius=0) # Check that str and repr work without crashing and return something assert str(geom) @@ -518,7 +518,7 @@ def test_fanbeam_frommatrix(): # Start at [0, 1] with extra rotation by 135 degrees, making 225 degrees # in total for the initial position (at the bisector in the 3rd quardant) - geom = odl.tomo.FanBeamGeometry.frommatrix(apart, dpart, src_rad, det_rad, + geom = odl.applications.tomo.FanBeamGeometry.frommatrix(apart, dpart, src_rad, det_rad, rot_matrix) init_src_to_det = np.array([-1, -1], dtype=float) @@ -531,7 +531,7 @@ def test_fanbeam_frommatrix(): # With translation (1, 1) matrix = np.hstack([rot_matrix, [[1], [1]]]) - geom = odl.tomo.FanBeamGeometry.frommatrix(apart, dpart, src_rad, det_rad, + geom = odl.applications.tomo.FanBeamGeometry.frommatrix(apart, dpart, src_rad, det_rad, matrix) assert all_almost_equal(geom.translation, [1, 1]) @@ -546,7 +546,7 @@ def test_fanbeam_frommatrix(): sing_mat = [[1, 1], [1, 1]] with pytest.raises(np.linalg.LinAlgError): - geom = odl.tomo.FanBeamGeometry.frommatrix(apart, dpart, src_rad, + geom = odl.applications.tomo.FanBeamGeometry.frommatrix(apart, dpart, src_rad, det_rad, sing_mat) @@ -564,10 +564,10 @@ def test_fanbeam_src_det_shifts(init1=None): shift2 = np.array([-2.0, 3.0]) init = np.array([1, 0], dtype=np.float32) - ffs = partial(odl.tomo.flying_focal_spot, + ffs = partial(odl.applications.tomo.flying_focal_spot, apart=apart, shifts=[shift1, shift2]) - geom_ffs = odl.tomo.FanBeamGeometry(apart, dpart, + geom_ffs = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad, src_to_det_init=init, src_shift_func=ffs) @@ -582,9 +582,9 @@ def test_fanbeam_src_det_shifts(init1=None): # radius also changes when a shift is applied src_rad1 = np.linalg.norm(np.array([src_rad, 0]) + shift1) src_rad2 = np.linalg.norm(np.array([src_rad, 0]) + shift2) - geom1 = odl.tomo.FanBeamGeometry(apart1, dpart, src_rad1, det_rad, + geom1 = odl.applications.tomo.FanBeamGeometry(apart1, dpart, src_rad1, det_rad, src_to_det_init=init1) - geom2 = odl.tomo.FanBeamGeometry(apart2, dpart, src_rad2, det_rad, + geom2 = odl.applications.tomo.FanBeamGeometry(apart2, dpart, src_rad2, det_rad, src_to_det_init=init2) sp1 = geom1.src_position(geom1.angles) @@ -594,7 +594,7 @@ def test_fanbeam_src_det_shifts(init1=None): assert all_almost_equal(sp[1::2], sp2) # detector positions are not affected by flying focal spot - geom = odl.tomo.FanBeamGeometry(apart, dpart, + geom = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad, src_to_det_init=init) assert all_almost_equal(geom.det_refpoint(geom.angles), @@ -603,16 +603,16 @@ def test_fanbeam_src_det_shifts(init1=None): # However, detector can be shifted similarly as the source def det_shift(angle): return ffs(angle) / src_rad * det_rad - geom_ds = odl.tomo.FanBeamGeometry( + geom_ds = odl.applications.tomo.FanBeamGeometry( apart, dpart, src_rad, det_rad, src_to_det_init=init, det_shift_func=det_shift) det_rad1 = src_rad1 / src_rad * det_rad det_rad2 = src_rad2 / src_rad * det_rad - geom1 = odl.tomo.FanBeamGeometry(apart1, dpart, src_rad, det_rad1, + geom1 = odl.applications.tomo.FanBeamGeometry(apart1, dpart, src_rad, det_rad1, src_to_det_init=init1) - geom2 = odl.tomo.FanBeamGeometry(apart2, dpart, src_rad, det_rad2, + geom2 = odl.applications.tomo.FanBeamGeometry(apart2, dpart, src_rad, det_rad2, src_to_det_init=init2) dr1 = geom1.det_refpoint(geom1.angles) dr2 = geom2.det_refpoint(geom2.angles) @@ -624,6 +624,26 @@ def det_shift(angle): assert all_almost_equal(geom.src_position(geom.angles), geom_ds.src_position(geom_ds.angles)) +# def test_helical_pitch_interface(detector_type, shift): +# full_angle = 2 * np.pi +# apart = odl.uniform_partition(0, full_angle, 13) +# dpart = odl.uniform_partition([0, 0], [1, 1], (10, 10)) +# src_rad = 10 +# det_rad = 5 +# pitch = 2.0 +# translation = np.array([shift, shift, shift], dtype=float) +# if detector_type == 'spherical': +# curve_rad = [src_rad + det_rad + 1] * 2 +# elif detector_type == 'cylindrical': +# curve_rad = [src_rad + det_rad + 1, None] +# else: +# curve_rad = None +# for pitch in [2.0, np.linspace(0,2,13), list(np.linspace(0,2,13))]: +# geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, +# det_curvature_radius=curve_rad, +# pitch=pitch, translation=translation) +# geom.det_refpoint(np.linspace(0,2,13)) + def test_helical_cone_beam_props(detector_type, shift): """Test basic properties of 3D helical cone beam geometries.""" @@ -640,17 +660,17 @@ def test_helical_cone_beam_props(detector_type, shift): curve_rad = [src_rad + det_rad + 1, None] else: curve_rad = None - geom = odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, det_curvature_radius=curve_rad, pitch=pitch, translation=translation) assert geom.ndim == 3 if detector_type == 'spherical': - assert isinstance(geom.detector, odl.tomo.SphericalDetector) + assert isinstance(geom.detector, odl.applications.tomo.SphericalDetector) elif detector_type == 'cylindrical': - assert isinstance(geom.detector, odl.tomo.CylindricalDetector) + assert isinstance(geom.detector, odl.applications.tomo.CylindricalDetector) else: - assert isinstance(geom.detector, odl.tomo.Flat2dDetector) + assert isinstance(geom.detector, odl.applications.tomo.Flat2dDetector) # Check defaults assert all_almost_equal(geom.axis, [0, 0, 1]) @@ -724,7 +744,7 @@ def test_helical_cone_beam_props(detector_type, shift): [0, 0, 1]]) # offset_along_axis - geom = odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, pitch=pitch, offset_along_axis=0.5) assert all_almost_equal(geom.det_refpoint(0), [0, det_rad, 0.5]) @@ -738,23 +758,23 @@ def test_helical_cone_beam_props(detector_type, shift): # Zero not allowed as axis with pytest.raises(ValueError): - odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, + odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, pitch=pitch, axis=[0, 0, 0]) # Detector axex should not be parallel or otherwise result in a # linear dependent triplet with pytest.raises(ValueError): - odl.tomo.ConeBeamGeometry( + odl.applications.tomo.ConeBeamGeometry( apart, dpart, src_rad, det_rad, pitch=pitch, det_axes_init=([0, 1, 0], [0, 1, 0])) with pytest.raises(ValueError): - odl.tomo.ConeBeamGeometry( + odl.applications.tomo.ConeBeamGeometry( apart, dpart, src_rad, det_rad, pitch=pitch, det_axes_init=([0, 0, 0], [0, 1, 0])) # Both radii zero with pytest.raises(ValueError): - odl.tomo.ConeBeamGeometry(apart, dpart, src_radius=0, det_radius=0, + odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_radius=0, det_radius=0, pitch=pitch) # Check that str and repr work without crashing and return something @@ -776,10 +796,10 @@ def test_conebeam_source_detector_shifts(): shift1 = np.array([2.0, -3.0, 1.0]) shift2 = np.array([-2.0, 3.0, -1.0]) init = np.array([1, 0, 0], dtype=np.float32) - ffs = partial(odl.tomo.flying_focal_spot, + ffs = partial(odl.applications.tomo.flying_focal_spot, apart=apart, shifts=[shift1, shift2]) - geom_ffs = odl.tomo.ConeBeamGeometry(apart, dpart, + geom_ffs = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, src_to_det_init=init, src_shift_func=ffs, @@ -795,11 +815,11 @@ def test_conebeam_source_detector_shifts(): # radius also changes when a shift is applied src_rad1 = np.linalg.norm(np.array([src_rad + shift1[0], shift1[1], 0])) src_rad2 = np.linalg.norm(np.array([src_rad + shift2[0], shift2[1], 0])) - geom1 = odl.tomo.ConeBeamGeometry(apart1, dpart, src_rad1, det_rad, + geom1 = odl.applications.tomo.ConeBeamGeometry(apart1, dpart, src_rad1, det_rad, src_to_det_init=init1, offset_along_axis=shift1[2], pitch=pitch) - geom2 = odl.tomo.ConeBeamGeometry(apart2, dpart, src_rad2, det_rad, + geom2 = odl.applications.tomo.ConeBeamGeometry(apart2, dpart, src_rad2, det_rad, src_to_det_init=init2, offset_along_axis=shift2[2], pitch=pitch) @@ -811,7 +831,7 @@ def test_conebeam_source_detector_shifts(): assert all_almost_equal(sp[1::2], sp2) # detector positions are not affected by flying focal spot - geom = odl.tomo.ConeBeamGeometry(apart, dpart, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, src_to_det_init=init, pitch=pitch) @@ -822,18 +842,18 @@ def test_conebeam_source_detector_shifts(): coef = det_rad / src_rad def det_shift(angle): return ffs(angle) * coef - geom_ds = odl.tomo.ConeBeamGeometry(apart, dpart, + geom_ds = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, src_to_det_init=init, det_shift_func=det_shift, pitch=pitch) det_rad1 = src_rad1 / src_rad * det_rad det_rad2 = src_rad2 / src_rad * det_rad - geom1 = odl.tomo.ConeBeamGeometry(apart1, dpart, src_rad, det_rad1, + geom1 = odl.applications.tomo.ConeBeamGeometry(apart1, dpart, src_rad, det_rad1, src_to_det_init=init1, offset_along_axis=shift1[2] * coef, pitch=pitch) - geom2 = odl.tomo.ConeBeamGeometry(apart2, dpart, src_rad, det_rad2, + geom2 = odl.applications.tomo.ConeBeamGeometry(apart2, dpart, src_rad, det_rad2, src_to_det_init=init2, offset_along_axis=shift2[2] * coef, pitch=pitch) @@ -859,7 +879,7 @@ def test_cone_beam_slanted_detector(): # angle with the x-y plane. init_axis_0 = [1, 0, 1] init_axis_1 = [-1, 0, 1] - geom = odl.tomo.ConeBeamGeometry(apart, dpart, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_radius=1, det_radius=1, det_curvature_radius=(1, None), det_axes_init=[init_axis_0, init_axis_1]) @@ -887,7 +907,7 @@ def test_cone_beam_slanted_detector(): # axes are not perpendicular with pytest.raises(ValueError): - odl.tomo.ConeBeamGeometry(apart, dpart, + odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_radius=5, det_radius=10, det_curvature_radius=(1, None), det_axes_init=[init_axis_0, [-2, 0, 1]]) @@ -907,7 +927,7 @@ def test_cone_beam_geometry_helper(): src_radius = 3 det_radius = 9 magnification = (src_radius + det_radius) / src_radius - geometry = odl.tomo.cone_beam_geometry(space, src_radius, det_radius) + geometry = odl.applications.tomo.cone_beam_geometry(space, src_radius, det_radius) rho = np.sqrt(2) omega = np.pi * 10.0 @@ -927,14 +947,14 @@ def test_cone_beam_geometry_helper(): # Short scan option fan_angle = 2 * np.arctan(det_width / (2 * r)) - geometry = odl.tomo.cone_beam_geometry(space, src_radius, det_radius, + geometry = odl.applications.tomo.cone_beam_geometry(space, src_radius, det_radius, short_scan=True) assert geometry.motion_params.extent == pytest.approx(np.pi + fan_angle) # --- 3d case --- space = odl.uniform_discr([-1, -1, 0], [1, 1, 2], [20, 20, 40]) - geometry = odl.tomo.cone_beam_geometry(space, src_radius, det_radius) + geometry = odl.applications.tomo.cone_beam_geometry(space, src_radius, det_radius) # Validate angles assert geometry.motion_partition.is_uniform @@ -954,7 +974,7 @@ def test_cone_beam_geometry_helper(): # --- offset geometry (2d) --- space = odl.uniform_discr([0, 0], [2, 2], [20, 20]) - geometry = odl.tomo.cone_beam_geometry(space, src_radius, det_radius) + geometry = odl.applications.tomo.cone_beam_geometry(space, src_radius, det_radius) rho = np.sqrt(2) * 2 omega = np.pi * 10.0 @@ -989,7 +1009,7 @@ def test_helical_geometry_helper(): # Create object space = odl.uniform_discr([-1, -1, -2], [1, 1, 2], [20, 20, 40]) - geometry = odl.tomo.helical_geometry(space, src_radius, det_radius, + geometry = odl.applications.tomo.helical_geometry(space, src_radius, det_radius, num_turns=num_turns) # Validate angles @@ -1029,20 +1049,20 @@ def check_shifts(ffs, shifts): n_shifts = np.random.randint(1, n_angles+1) shift_dim = 3 shifts = np.random.uniform(size=(n_shifts, shift_dim)) - ffs = odl.tomo.flying_focal_spot(part_angles, apart, shifts) + ffs = odl.applications.tomo.flying_focal_spot(part_angles, apart, shifts) check_shifts(ffs, shifts) shift_dim = 2 shifts = np.random.uniform(size=(n_shifts, shift_dim)) - ffs = odl.tomo.flying_focal_spot(part_angles, apart, shifts) + ffs = odl.applications.tomo.flying_focal_spot(part_angles, apart, shifts) check_shifts(ffs, shifts) # shifts at other angles ar defined by nearest neighbor interpolation d = np.random.uniform(-0.49, 0.49) * apart.cell_volume shifts = np.random.uniform(size=(n_shifts, shift_dim)) - ffs = odl.tomo.flying_focal_spot(part_angles + d, apart, shifts) + ffs = odl.applications.tomo.flying_focal_spot(part_angles + d, apart, shifts) check_shifts(ffs, shifts) if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/tomo/geometry/spect_geometry_test.py b/odl/test/applications/tomo/geometry/spect_geometry_test.py similarity index 79% rename from odl/test/tomo/geometry/spect_geometry_test.py rename to odl/test/applications/tomo/geometry/spect_geometry_test.py index eb37d20bb32..24a0b9a446d 100644 --- a/odl/test/tomo/geometry/spect_geometry_test.py +++ b/odl/test/applications/tomo/geometry/spect_geometry_test.py @@ -12,8 +12,8 @@ import numpy as np import odl -from odl.util.testutils import all_equal -from odl.tomo.geometry.spect import ParallelHoleCollimatorGeometry +from odl.core.util.testutils import all_equal +from odl.applications.tomo.geometry.spect import ParallelHoleCollimatorGeometry def test_spect(): @@ -29,9 +29,9 @@ def test_spect(): apart = odl.uniform_partition(0, 2 * np.pi, n_proj) geom = ParallelHoleCollimatorGeometry(apart, dpart, det_radius) - assert isinstance(geom.detector, odl.tomo.Flat2dDetector) + assert isinstance(geom.detector, odl.applications.tomo.Flat2dDetector) assert all_equal(geom.det_radius, det_radius) if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/tomo/operators/ray_trafo_test.py b/odl/test/applications/tomo/operators/ray_trafo_test.py similarity index 63% rename from odl/test/tomo/operators/ray_trafo_test.py rename to odl/test/applications/tomo/operators/ray_trafo_test.py index 179438af11e..6ae9dd1d906 100644 --- a/odl/test/tomo/operators/ray_trafo_test.py +++ b/odl/test/applications/tomo/operators/ray_trafo_test.py @@ -11,16 +11,17 @@ from __future__ import division import numpy as np +import math import pytest from packaging.version import parse as parse_version from functools import partial import odl -from odl.tomo.backends import ASTRA_AVAILABLE, ASTRA_VERSION -from odl.tomo.util.testutils import ( - skip_if_no_astra, skip_if_no_astra_cuda, skip_if_no_skimage) -from odl.util.testutils import all_almost_equal, simple_fixture - +from odl.applications.tomo.backends import ASTRA_AVAILABLE, ASTRA_VERSION +from odl.applications.tomo.util.testutils import ( + skip_if_no_astra, skip_if_no_astra_cuda, skip_if_no_skimage, skip_if_no_pytorch) +from odl.core.util.testutils import all_equal, all_almost_equal, simple_fixture +from odl.core.array_API_support.comparisons import odl_all_equal # --- pytest fixtures --- # @@ -44,25 +45,25 @@ def geometry(request): if geom == 'par2d': apart = odl.uniform_partition(0, np.pi, n_angles) dpart = odl.uniform_partition(-30, 30, m) - return odl.tomo.Parallel2dGeometry(apart, dpart) + return odl.applications.tomo.Parallel2dGeometry(apart, dpart) elif geom == 'par3d': apart = odl.uniform_partition(0, np.pi, n_angles) dpart = odl.uniform_partition([-30, -30], [30, 30], (m, m)) - return odl.tomo.Parallel3dAxisGeometry(apart, dpart) + return odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart) elif geom == 'cone2d': apart = odl.uniform_partition(0, 2 * np.pi, n_angles) dpart = odl.uniform_partition(-30, 30, m) - return odl.tomo.FanBeamGeometry(apart, dpart, src_radius=200, + return odl.applications.tomo.FanBeamGeometry(apart, dpart, src_radius=200, det_radius=100) elif geom == 'cone3d': apart = odl.uniform_partition(0, 2 * np.pi, n_angles) dpart = odl.uniform_partition([-60, -60], [60, 60], (m, m)) - return odl.tomo.ConeBeamGeometry(apart, dpart, + return odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_radius=200, det_radius=100) elif geom == 'helical': apart = odl.uniform_partition(0, 8 * 2 * np.pi, n_angles) dpart = odl.uniform_partition([-30, -3], [30, 3], (m, m)) - return odl.tomo.ConeBeamGeometry(apart, dpart, pitch=5.0, + return odl.applications.tomo.ConeBeamGeometry(apart, dpart, pitch=5.0, src_radius=200, det_radius=100) else: raise ValueError('geom not valid') @@ -76,50 +77,81 @@ def geometry(request): projectors = [] projectors.extend( (pytest.param(proj_cfg, marks=skip_if_no_astra) - for proj_cfg in ['par2d astra_cpu uniform', - 'par2d astra_cpu nonuniform', - 'par2d astra_cpu random', - 'cone2d astra_cpu uniform', - 'cone2d astra_cpu nonuniform', - 'cone2d astra_cpu random']) + for proj_cfg in ['par2d astra_cpu uniform numpy cpu', + 'par2d astra_cpu nonuniform numpy cpu', + 'par2d astra_cpu random numpy cpu', + 'cone2d astra_cpu uniform numpy cpu', + 'cone2d astra_cpu nonuniform numpy cpu', + 'cone2d astra_cpu random numpy cpu', + ]) +) +projectors.extend( + (pytest.param(proj_cfg, marks=[skip_if_no_astra, skip_if_no_pytorch]) + for proj_cfg in [ + 'par2d astra_cpu uniform pytorch cpu', + 'par2d astra_cpu nonuniform pytorch cpu', + 'par2d astra_cpu random pytorch cpu', + 'cone2d astra_cpu uniform pytorch cpu', + 'cone2d astra_cpu nonuniform pytorch cpu', + 'cone2d astra_cpu random pytorch cpu' + ]) + ) projectors.extend( (pytest.param(proj_cfg, marks=skip_if_no_astra_cuda) - for proj_cfg in ['par2d astra_cuda uniform', - 'par2d astra_cuda half_uniform', - 'par2d astra_cuda nonuniform', - 'par2d astra_cuda random', - 'cone2d astra_cuda uniform', - 'cone2d astra_cuda nonuniform', - 'cone2d astra_cuda random', - 'par3d astra_cuda uniform', - 'par3d astra_cuda nonuniform', - 'par3d astra_cuda random', - 'cone3d astra_cuda uniform', - 'cone3d astra_cuda nonuniform', - 'cone3d astra_cuda random', - 'helical astra_cuda uniform']) + for proj_cfg in ['par2d astra_cuda uniform numpy cpu', + 'par2d astra_cuda half_uniform numpy cpu', + 'par2d astra_cuda nonuniform numpy cpu', + 'par2d astra_cuda random numpy cpu', + 'cone2d astra_cuda uniform numpy cpu', + 'cone2d astra_cuda nonuniform numpy cpu', + 'cone2d astra_cuda random numpy cpu', + 'par3d astra_cuda uniform numpy cpu', + 'par3d astra_cuda nonuniform numpy cpu', + 'par3d astra_cuda random numpy cpu', + 'cone3d astra_cuda uniform numpy cpu', + 'cone3d astra_cuda nonuniform numpy cpu', + 'cone3d astra_cuda random numpy cpu', + ]) ) + +projectors.extend( + (pytest.param(proj_cfg, marks=[skip_if_no_astra, skip_if_no_pytorch]) + for proj_cfg in [ + 'par2d astra_cuda uniform pytorch cuda:0', + 'par2d astra_cuda half_uniform pytorch cuda:0', + 'par2d astra_cuda nonuniform pytorch cuda:0', + 'par2d astra_cuda random pytorch cuda:0', + 'cone2d astra_cuda uniform pytorch cuda:0', + 'cone2d astra_cuda nonuniform pytorch cuda:0', + 'cone2d astra_cuda random pytorch cuda:0', + 'par3d astra_cuda uniform pytorch cuda:0', + 'par3d astra_cuda nonuniform pytorch cuda:0', + 'par3d astra_cuda random pytorch cuda:0', + 'cone3d astra_cuda uniform pytorch cuda:0', + 'cone3d astra_cuda nonuniform pytorch cuda:0', + 'cone3d astra_cuda random pytorch cuda:0', + 'helical astra_cuda uniform pytorch cuda:0']) +) + projectors.extend( (pytest.param(proj_cfg, marks=skip_if_no_skimage) - for proj_cfg in ['par2d skimage uniform', - 'par2d skimage half_uniform']) + for proj_cfg in ['par2d skimage uniform numpy cpu', + 'par2d skimage half_uniform numpy cpu']) ) projector_ids = [ - " geom='{}' - impl='{}' - angles='{}' ".format(*p.values[0].split()) + " geom='{}' - astra_impl='{}' - angles='{}' - tspace_impl='{}' - tspace_device='{}'".format(*p.values[0].split()) for p in projectors ] - @pytest.fixture(scope='module', params=projectors, ids=projector_ids) def projector(request): n = 100 m = 100 n_angles = 100 dtype = 'float32' - - geom, impl, angle = request.param.split() + geom, astra_impl, angle, tspace_impl, tspace_device = request.param.split() if angle == 'uniform': apart = odl.uniform_partition(0, 2 * np.pi, n_angles) @@ -144,59 +176,52 @@ def projector(request): if geom == 'par2d': # Reconstruction space reco_space = odl.uniform_discr([-20] * 2, [20] * 2, [n] * 2, - dtype=dtype) + dtype=dtype, impl=tspace_impl, device=tspace_device) # Geometry dpart = odl.uniform_partition(-30, 30, m) - geom = odl.tomo.Parallel2dGeometry(apart, dpart) - # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) + geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart) elif geom == 'par3d': # Reconstruction space reco_space = odl.uniform_discr([-20] * 3, [20] * 3, [n] * 3, - dtype=dtype) + dtype=dtype, impl=tspace_impl, device=tspace_device) # Geometry dpart = odl.uniform_partition([-30] * 2, [30] * 2, [m] * 2) - geom = odl.tomo.Parallel3dAxisGeometry(apart, dpart) - # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) + geom = odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart) elif geom == 'cone2d': # Reconstruction space reco_space = odl.uniform_discr([-20] * 2, [20] * 2, [n] * 2, - dtype=dtype) + dtype=dtype, impl=tspace_impl, device=tspace_device) # Geometry dpart = odl.uniform_partition(-30, 30, m) - geom = odl.tomo.FanBeamGeometry(apart, dpart, src_radius=200, + geom = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_radius=200, det_radius=100) - # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) elif geom == 'cone3d': # Reconstruction space reco_space = odl.uniform_discr([-20] * 3, [20] * 3, [n] * 3, - dtype=dtype) + dtype=dtype, impl=tspace_impl, device=tspace_device) # Geometry dpart = odl.uniform_partition([-60] * 2, [60] * 2, [m] * 2) - geom = odl.tomo.ConeBeamGeometry(apart, dpart, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_radius=200, det_radius=100) - # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) elif geom == 'helical': # Reconstruction space reco_space = odl.uniform_discr([-20, -20, 0], [20, 20, 40], - [n] * 3, dtype=dtype) + [n] * 3, dtype=dtype, impl=tspace_impl, device=tspace_device) # Geometry, overwriting angle partition apart = odl.uniform_partition(0, 8 * 2 * np.pi, n_angles) dpart = odl.uniform_partition([-30, -3], [30, 3], [m] * 2) - geom = odl.tomo.ConeBeamGeometry(apart, dpart, pitch=5.0, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, pitch=5.0, src_radius=200, det_radius=100) - # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) else: raise ValueError('geom not valid') + + # Ray transform + return odl.applications.tomo.RayTransform(reco_space, geom, impl=astra_impl, use_cache=False) @pytest.fixture(scope='module', @@ -226,8 +251,8 @@ def test_projector(projector, in_place): proj = projector(vol) # We expect maximum value to be along diagonal - expected_max = projector.domain.partition.extent[0] * np.sqrt(2) - assert proj.ufuncs.max() == pytest.approx(expected_max, rel=rtol) + expected_max = projector.domain.partition.extent[0] * math.sqrt(2) + assert odl.max(proj) == pytest.approx(expected_max, rel=rtol) def test_adjoint(projector): @@ -237,14 +262,14 @@ def test_adjoint(projector): if ( ASTRA_AVAILABLE and parse_version(ASTRA_VERSION) < parse_version('1.8rc1') - and isinstance(projector.geometry, odl.tomo.ConeBeamGeometry) + and isinstance(projector.geometry, odl.applications.tomo.ConeBeamGeometry) ): rtol = 0.1 else: rtol = 0.05 # Create Shepp-Logan phantom - vol = odl.phantom.shepp_logan(projector.domain, modified=True) + vol = odl.core.phantom.shepp_logan(projector.domain, modified=True) # Calculate projection proj = projector(vol) @@ -260,7 +285,7 @@ def test_adjoint_of_adjoint(projector): """Test Ray transform adjoint of adjoint.""" # Create Shepp-Logan phantom - vol = odl.phantom.shepp_logan(projector.domain, modified=True) + vol = odl.core.phantom.shepp_logan(projector.domain, modified=True) # Calculate projection proj = projector(vol) @@ -280,17 +305,19 @@ def test_adjoint_of_adjoint(projector): def test_angles(projector): """Test Ray transform angle conventions.""" + ns = projector.domain.tspace.array_namespace + # Smoothed line/hyperplane with offset vol = projector.domain.element( - lambda x: np.exp(-(2 * x[0] - 10 + x[1]) ** 2)) + lambda x: ns.exp(-(2 * x[0] - 10 + x[1]) ** 2)) # Create projection - result = projector(vol).asarray() + result = projector(vol).asarray() # Find the angle where the projection has a maximum (along the line). # TODO: center of mass would be more robust axes = 1 if projector.domain.ndim == 2 else (1, 2) - ind_angle = np.argmax(np.max(result, axis=axes)) + ind_angle = ns.argmax(ns.max(result, axis=axes)) # Restrict to [0, 2 * pi) for helical maximum_angle = np.fmod(projector.geometry.angles[ind_angle], 2 * np.pi) @@ -302,16 +329,21 @@ def test_angles(projector): # Find the pixel where the projection has a maximum at that angle axes = () if projector.domain.ndim == 2 else 1 - ind_pixel = np.argmax(np.max(result[ind_angle], axis=axes)) + ind_pixel = ns.argmax(ns.max(result[ind_angle], axis=axes)) + + # We must convert the ind_pixel back to a float on the cpu + if projector.domain.tspace.impl == 'pytorch': + ind_pixel = int(ind_pixel.detach().cpu()) + max_pixel = projector.geometry.det_partition[ind_pixel, ...].mid_pt[0] # The line is at distance 2 * sqrt(5) from the origin, which translates # to the same distance from the detector midpoint, with positive sign # if the angle is smaller than pi and negative sign otherwise. - expected = 2 * np.sqrt(5) if maximum_angle < np.pi else -2 * np.sqrt(5) + expected = 2 * math.sqrt(5) if maximum_angle < np.pi else -2 * math.sqrt(5) # We need to scale with the magnification factor if applicable - if isinstance(projector.geometry, odl.tomo.DivergentBeamGeometry): + if isinstance(projector.geometry, odl.applications.tomo.DivergentBeamGeometry): src_to_det = ( projector.geometry.src_radius + projector.geometry.det_radius @@ -322,22 +354,27 @@ def test_angles(projector): assert max_pixel == pytest.approx(expected, abs=0.2) -def test_complex(impl): +def test_complex(impl, odl_impl_device_pairs): + tspace_impl, device = odl_impl_device_pairs """Test transform of complex input for parallel 2d geometry.""" - space_c = odl.uniform_discr([-1, -1], [1, 1], (10, 10), dtype='complex64') + space_c = odl.uniform_discr([-1, -1], [1, 1], (10, 10), dtype='complex64', impl=tspace_impl, device=device) space_r = space_c.real_space - geom = odl.tomo.parallel_beam_geometry(space_c) - ray_trafo_c = odl.tomo.RayTransform(space_c, geom, impl=impl) - ray_trafo_r = odl.tomo.RayTransform(space_r, geom, impl=impl) - vol = odl.phantom.shepp_logan(space_c) - vol.imag = odl.phantom.cuboid(space_r) + geom = odl.applications.tomo.parallel_beam_geometry(space_c) + + if tspace_impl == 'pytorch' and impl == 'skimage': + pytest.skip(f'Skimage backend not available with pytorch') + + ray_trafo_c = odl.applications.tomo.RayTransform(space_c, geom, impl=impl) + ray_trafo_r = odl.applications.tomo.RayTransform(space_r, geom, impl=impl) + vol = odl.core.phantom.shepp_logan(space_c) + vol.imag = odl.core.phantom.cuboid(space_r) data = ray_trafo_c(vol) true_data_re = ray_trafo_r(vol.real) true_data_im = ray_trafo_r(vol.imag) - assert all_almost_equal(data.real, true_data_re) - assert all_almost_equal(data.imag, true_data_im) + assert all_equal(data.real, true_data_re) + assert all_equal(data.imag, true_data_im) # test adjoint for complex data backproj_r = ray_trafo_r.adjoint @@ -346,24 +383,25 @@ def test_complex(impl): true_vol_im = backproj_r(data.imag) backproj_vol = backproj_c(data) - assert all_almost_equal(backproj_vol.real, true_vol_re) - assert all_almost_equal(backproj_vol.imag, true_vol_im) + assert all_equal(backproj_vol.real, true_vol_re) + assert all_equal(backproj_vol.imag, true_vol_im) -def test_anisotropic_voxels(geometry): +def test_anisotropic_voxels(geometry, odl_impl_device_pairs): + tspace_impl, device = odl_impl_device_pairs """Test projection and backprojection with anisotropic voxels.""" ndim = geometry.ndim shape = [10] * (ndim - 1) + [5] space = odl.uniform_discr([-1] * ndim, [1] * ndim, shape=shape, - dtype='float32') + dtype='float32', impl=tspace_impl, device=device) # If no implementation is available, skip - if ndim == 2 and not odl.tomo.ASTRA_AVAILABLE: + if ndim == 2 and not odl.applications.tomo.ASTRA_AVAILABLE: pytest.skip(reason='ASTRA not available, skipping 2d test') - elif ndim == 3 and not odl.tomo.ASTRA_CUDA_AVAILABLE: + elif ndim == 3 and not odl.applications.tomo.ASTRA_CUDA_AVAILABLE: pytest.skip(reason='ASTRA_CUDA not available, skipping 3d test') - ray_trafo = odl.tomo.RayTransform(space, geometry) + ray_trafo = odl.applications.tomo.RayTransform(space, geometry) vol_one = ray_trafo.domain.one() data_one = ray_trafo.range.one() @@ -383,7 +421,7 @@ def test_anisotropic_voxels(geometry): assert False -def test_shifted_volume(geometry_type): +def test_shifted_volume(impl, geometry_type, odl_impl_device_pairs): """Check that geometry shifts are handled correctly. We forward project a square/cube of all ones and check that the @@ -398,27 +436,29 @@ def test_shifted_volume(geometry_type): effect. """ apart = odl.nonuniform_partition([0, np.pi / 2, np.pi, 3 * np.pi / 2]) - if geometry_type == 'par2d' and odl.tomo.ASTRA_AVAILABLE: + if geometry_type == 'par2d' and odl.applications.tomo.ASTRA_AVAILABLE: ndim = 2 dpart = odl.uniform_partition(-30, 30, 30) - geometry = odl.tomo.Parallel2dGeometry(apart, dpart) - elif geometry_type == 'par3d' and odl.tomo.ASTRA_CUDA_AVAILABLE: + geometry = odl.applications.tomo.Parallel2dGeometry(apart, dpart) + elif geometry_type == 'par3d' and odl.applications.tomo.ASTRA_CUDA_AVAILABLE: ndim = 3 dpart = odl.uniform_partition([-30, -30], [30, 30], (30, 30)) - geometry = odl.tomo.Parallel3dAxisGeometry(apart, dpart) - if geometry_type == 'cone2d' and odl.tomo.ASTRA_AVAILABLE: + geometry = odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart) + if geometry_type == 'cone2d' and odl.applications.tomo.ASTRA_AVAILABLE: ndim = 2 dpart = odl.uniform_partition(-30, 30, 30) - geometry = odl.tomo.FanBeamGeometry(apart, dpart, + geometry = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_radius=200, det_radius=100) - elif geometry_type == 'cone3d' and odl.tomo.ASTRA_CUDA_AVAILABLE: + elif geometry_type == 'cone3d' and odl.applications.tomo.ASTRA_CUDA_AVAILABLE: ndim = 3 dpart = odl.uniform_partition([-30, -30], [30, 30], (30, 30)) - geometry = odl.tomo.ConeBeamGeometry(apart, dpart, + geometry = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_radius=200, det_radius=100) else: pytest.skip('no projector available for geometry type') + impl, device = odl_impl_device_pairs + min_pt = np.array([-5.0] * ndim) max_pt = np.array([5.0] * ndim) shift_len = 6 # enough to move the projection to one side of the detector @@ -428,8 +468,8 @@ def test_shifted_volume(geometry_type): shift[0] = -shift_len # Generate 4 projections with 90 degrees increment - space = odl.uniform_discr(min_pt + shift, max_pt + shift, [10] * ndim) - ray_trafo = odl.tomo.RayTransform(space, geometry) + space = odl.uniform_discr(min_pt + shift, max_pt + shift, [10] * ndim, dtype='float32', impl=impl, device=device) + ray_trafo = odl.applications.tomo.RayTransform(space, geometry) proj = ray_trafo(space.one()) # Check that the object is projected to the correct place. With the @@ -437,60 +477,68 @@ def test_shifted_volume(geometry_type): # part of the volume, yielding a value around 10 (=side length). # 0 degrees: All on the left - assert np.max(proj[0, :15]) > 5 - assert np.max(proj[0, 15:]) == 0 + assert odl.max(proj[0, :15]) > 5 + assert odl.max(proj[0, 15:]) == 0 # 90 degrees: Left and right - assert np.max(proj[1, :15]) > 5 - assert np.max(proj[1, 15:]) > 5 + assert odl.max(proj[1, :15]) > 5 + assert odl.max(proj[1, 15:]) > 5 # 180 degrees: All on the right - assert np.max(proj[2, :15]) == 0 - assert np.max(proj[2, 15:]) > 5 + assert odl.max(proj[2, :15]) == 0 + assert odl.max(proj[2, 15:]) > 5 # 270 degrees: Left and right - assert np.max(proj[3, :15]) > 5 - assert np.max(proj[3, 15:]) > 5 + assert odl.max(proj[3, :15]) > 5 + assert odl.max(proj[3, 15:]) > 5 # Do the same for axis 1 shift = np.zeros(ndim) shift[1] = -shift_len - space = odl.uniform_discr(min_pt + shift, max_pt + shift, [10] * ndim) - ray_trafo = odl.tomo.RayTransform(space, geometry) + space = odl.uniform_discr(min_pt + shift, max_pt + shift, [10] * ndim, dtype='float32', impl=impl, device=device) + ray_trafo = odl.applications.tomo.RayTransform(space, geometry) proj = ray_trafo(space.one()) # 0 degrees: Left and right - assert np.max(proj[0, :15]) > 5 - assert np.max(proj[0, 15:]) > 5 + assert odl.max(proj[0, :15]) > 5 + assert odl.max(proj[0, 15:]) > 5 # 90 degrees: All on the left - assert np.max(proj[1, :15]) > 5 - assert np.max(proj[1, 15:]) == 0 + assert odl.max(proj[1, :15]) > 5 + assert odl.max(proj[1, 15:]) == 0 # 180 degrees: Left and right - assert np.max(proj[2, :15]) > 5 - assert np.max(proj[2, 15:]) > 5 + assert odl.max(proj[2, :15]) > 5 + assert odl.max(proj[2, 15:]) > 5 # 270 degrees: All on the right - assert np.max(proj[3, :15]) == 0 - assert np.max(proj[3, 15:]) > 5 + assert odl.max(proj[3, :15]) == 0 + assert odl.max(proj[3, 15:]) > 5 -def test_detector_shifts_2d(): +def test_detector_shifts_2d(impl, odl_impl_device_pairs): """Check that detector shifts are handled correctly. We forward project a cubic phantom and check that ray transform and back-projection with and without detector shifts are numerically close (the error depends on domain discretization). """ - - if not odl.tomo.ASTRA_AVAILABLE: + astra_impl = impl + tspace_impl, device = odl_impl_device_pairs + if not odl.applications.tomo.ASTRA_AVAILABLE: pytest.skip(reason='ASTRA not available, skipping 2d test') + if astra_impl == 'astra_cuda': + pytest.skip(reason='This test produces a known error for astra_cuda, passing') + + if impl == 'skimage': + pytest.skip(f'Skimage backend not available with pytofor Fan-Beam Geometry') + d = 10 - space = odl.uniform_discr([-1] * 2, [1] * 2, [d] * 2) - phantom = odl.phantom.cuboid(space, [-1 / 3] * 2, [1 / 3] * 2) + space = odl.uniform_discr([-1] * 2, [1] * 2, [d] * 2, dtype='float32', impl=tspace_impl, device=device) + ns = space.array_namespace + phantom = odl.core.phantom.cuboid(space, [-1 / 3] * 2, [1 / 3] * 2) full_angle = 2 * np.pi n_angles = 2 * 10 @@ -498,10 +546,10 @@ def test_detector_shifts_2d(): det_rad = 2 apart = odl.uniform_partition(0, full_angle, n_angles) dpart = odl.uniform_partition(-4, 4, 8 * d) - geom = odl.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad) + geom = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad) k = 3 shift = k * dpart.cell_sides[0] - geom_shift = odl.tomo.FanBeamGeometry( + geom_shift = odl.applications.tomo.FanBeamGeometry( apart, dpart, src_rad, det_rad, det_shift_func=lambda angle: [0.0, shift] ) @@ -517,23 +565,23 @@ def test_detector_shifts_2d(): + shift * geom_shift.det_axis(angles)) # check ray transform - op = odl.tomo.RayTransform(space, geom) - op_shift = odl.tomo.RayTransform(space, geom_shift) + op = odl.applications.tomo.RayTransform(space, geom, impl=impl) + op_shift = odl.applications.tomo.RayTransform(space, geom_shift, impl=astra_impl) y = op(phantom).asarray() y_shift = op_shift(phantom).asarray() # projection on the shifted detector is shifted regular projection - data_error = np.max(np.abs(y[:, :-k] - y_shift[:, k:])) + data_error = ns.max(ns.abs(y[:, :-k] - y_shift[:, k:])) assert data_error < space.cell_volume # check back-projection im = op.adjoint(y).asarray() im_shift = op_shift.adjoint(y_shift).asarray() - error = np.abs(im_shift - im) - rel_error = np.max(error[im > 0] / im[im > 0]) + error = ns.abs(im_shift - im) + rel_error = ns.max(error[im > 0] / im[im > 0]) assert rel_error < space.cell_volume -def test_source_shifts_2d(): +def test_source_shifts_2d(odl_impl_device_pairs): """Check that source shifts are handled correctly. We forward project a Shepp-Logan phantom and check that reconstruction @@ -541,13 +589,18 @@ def test_source_shifts_2d(): geometries which mimic ffs by using initial angular offsets and detector shifts """ + tspace_impl, device = odl_impl_device_pairs - if not odl.tomo.ASTRA_AVAILABLE: + if not odl.applications.tomo.ASTRA_AVAILABLE: pytest.skip(reason='ASTRA required but not available') + if tspace_impl == 'pytorch' and impl == 'skimage': + pytest.skip(f'Skimage backend not available with pytorch') + d = 10 - space = odl.uniform_discr([-1] * 2, [1] * 2, [d] * 2) - phantom = odl.phantom.cuboid(space, [-1 / 3] * 2, [1 / 3] * 2) + space = odl.uniform_discr([-1] * 2, [1] * 2, [d] * 2, dtype='float32', impl=tspace_impl, device=device) + ns = space.array_namespace + phantom = odl.core.phantom.cuboid(space, [-1 / 3] * 2, [1 / 3] * 2) full_angle = 2 * np.pi n_angles = 2 * 10 @@ -562,10 +615,10 @@ def test_source_shifts_2d(): init = np.array([1, 0], dtype=np.float32) det_init = np.array([0, -1], dtype=np.float32) - ffs = partial(odl.tomo.flying_focal_spot, + ffs = partial(odl.applications.tomo.flying_focal_spot, apart=apart, shifts=[shift1, shift2]) - geom_ffs = odl.tomo.FanBeamGeometry(apart, dpart, + geom_ffs = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_rad, det_rad, src_to_det_init=init, det_axis_init=det_init, @@ -586,19 +639,19 @@ def test_source_shifts_2d(): np.array([det_rad, shift1[1] / src_rad * det_rad])) det_rad2 = np.linalg.norm( np.array([det_rad, shift2[1] / src_rad * det_rad])) - geom1 = odl.tomo.FanBeamGeometry(apart1, dpart, + geom1 = odl.applications.tomo.FanBeamGeometry(apart1, dpart, src_rad1, det_rad1, src_to_det_init=init1, det_axis_init=det_init) - geom2 = odl.tomo.FanBeamGeometry(apart2, dpart, + geom2 = odl.applications.tomo.FanBeamGeometry(apart2, dpart, src_rad2, det_rad2, src_to_det_init=init2, det_axis_init=det_init) # check ray transform - op_ffs = odl.tomo.RayTransform(space, geom_ffs) - op1 = odl.tomo.RayTransform(space, geom1) - op2 = odl.tomo.RayTransform(space, geom2) + op_ffs = odl.applications.tomo.RayTransform(space, geom_ffs) + op1 = odl.applications.tomo.RayTransform(space, geom1) + op2 = odl.applications.tomo.RayTransform(space, geom2) y_ffs = op_ffs(phantom) y1 = op1(phantom).asarray() y2 = op2(phantom).asarray() @@ -610,23 +663,25 @@ def test_source_shifts_2d(): im1 = op1.adjoint(y1).asarray() im2 = op2.adjoint(y2).asarray() im_combined = (im1 + im2) / 2 - rel_error = np.abs((im - im_combined)[im > 0] / im[im > 0]) - assert np.max(rel_error) < 1e-6 + rel_error = ns.abs((im - im_combined)[im > 0] / im[im > 0]) + assert ns.max(rel_error) < 1e-6 -def test_detector_shifts_3d(): +def test_detector_shifts_3d(impl, odl_impl_device_pairs): """Check that detector shifts are handled correctly. We forward project a cubic phantom and check that ray transform and back-projection with and without detector shifts are numerically close (the error depends on domain discretization). """ - if not odl.tomo.ASTRA_CUDA_AVAILABLE: + tspace_impl, device = odl_impl_device_pairs + if not odl.applications.tomo.ASTRA_CUDA_AVAILABLE: pytest.skip(reason='ASTRA CUDA required but not available') d = 100 - space = odl.uniform_discr([-1] * 3, [1] * 3, [d] * 3) - phantom = odl.phantom.cuboid(space, [-1 / 3] * 3, [1 / 3] * 3) + space = odl.uniform_discr([-1] * 3, [1] * 3, [d] * 3, dtype='float32', impl=tspace_impl, device=device) + ns = space.array_namespace + phantom = odl.core.phantom.cuboid(space, [-1 / 3] * 3, [1 / 3] * 3) full_angle = 2 * np.pi n_angles = 2 * 100 @@ -634,11 +689,11 @@ def test_detector_shifts_3d(): det_rad = 2 apart = odl.uniform_partition(0, full_angle, n_angles) dpart = odl.uniform_partition([-4] * 2, [4] * 2, [8 * d] * 2) - geom = odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad) + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad) k = 3 l = 2 shift = np.array([0, k, l]) * dpart.cell_sides[0] - geom_shift = odl.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, + geom_shift = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, det_shift_func=lambda angle: shift) angles = geom.angles @@ -654,21 +709,21 @@ def test_detector_shifts_3d(): - geom_shift.det_axes(angles)[:, 1] * shift[2]) # check forward pass - op = odl.tomo.RayTransform(space, geom) - op_shift = odl.tomo.RayTransform(space, geom_shift) + op = odl.applications.tomo.RayTransform(space, geom) + op_shift = odl.applications.tomo.RayTransform(space, geom_shift) y = op(phantom).asarray() y_shift = op_shift(phantom).asarray() - data_error = np.max(np.abs(y[:, :-k, l:] - y_shift[:, k:, :-l])) + data_error = ns.max(ns.abs(y[:, :-k, l:] - y_shift[:, k:, :-l])) assert data_error < 1e-3 # check back-projection im = op.adjoint(y).asarray() im_shift = op_shift.adjoint(y_shift).asarray() - error = np.max(np.abs(im_shift - im)) + error = ns.max(ns.abs(im_shift - im)) assert error < 1e-3 -def test_source_shifts_3d(): +def test_source_shifts_3d(odl_impl_device_pairs): """Check that source shifts are handled correctly. We forward project a Shepp-Logan phantom and check that reconstruction @@ -676,12 +731,14 @@ def test_source_shifts_3d(): geometries which mimic ffs by using initial angular offsets and detector shifts """ - if not odl.tomo.ASTRA_CUDA_AVAILABLE: + impl, device = odl_impl_device_pairs + if not odl.applications.tomo.ASTRA_CUDA_AVAILABLE: pytest.skip(reason='ASTRA_CUDA not available, skipping 3d test') d = 10 - space = odl.uniform_discr([-1] * 3, [1] * 3, [d] * 3) - phantom = odl.phantom.cuboid(space, [-1 / 3] * 3, [1 / 3] * 3) + space = odl.uniform_discr([-1] * 3, [1] * 3, [d] * 3, dtype='float32', impl=impl, device=device) + ns = space.array_namespace + phantom = odl.core.phantom.cuboid(space, [-1 / 3] * 3, [1 / 3] * 3) full_angle = 2 * np.pi n_angles = 2 * 10 @@ -696,10 +753,10 @@ def test_source_shifts_3d(): shift2 = np.array([0.0, 0.2, -0.1]) init = np.array([1, 0, 0], dtype=np.float32) det_init = np.array([[0, -1, 0], [0, 0, 1]], dtype=np.float32) - ffs = partial(odl.tomo.flying_focal_spot, + ffs = partial(odl.applications.tomo.flying_focal_spot, apart=apart, shifts=[shift1, shift2]) - geom_ffs = odl.tomo.ConeBeamGeometry(apart, dpart, + geom_ffs = odl.applications.tomo.ConeBeamGeometry(apart, dpart, src_rad, det_rad, src_to_det_init=init, det_axes_init=det_init, @@ -721,12 +778,12 @@ def test_source_shifts_3d(): np.array([det_rad, det_rad / src_rad * shift1[1], 0])) det_rad2 = np.linalg.norm( np.array([det_rad, det_rad / src_rad * shift2[1], 0])) - geom1 = odl.tomo.ConeBeamGeometry(apart1, dpart, src_rad1, det_rad1, + geom1 = odl.applications.tomo.ConeBeamGeometry(apart1, dpart, src_rad1, det_rad1, src_to_det_init=init1, det_axes_init=det_init, offset_along_axis=shift1[2], pitch=pitch) - geom2 = odl.tomo.ConeBeamGeometry(apart2, dpart, src_rad2, det_rad2, + geom2 = odl.applications.tomo.ConeBeamGeometry(apart2, dpart, src_rad2, det_rad2, src_to_det_init=init2, det_axes_init=det_init, offset_along_axis=shift2[2], @@ -747,23 +804,23 @@ def test_source_shifts_3d(): assert all_almost_equal(geom_ffs.det_axes(geom_ffs.angles)[1::2], geom2.det_axes(geom2.angles)) - op_ffs = odl.tomo.RayTransform(space, geom_ffs) - op1 = odl.tomo.RayTransform(space, geom1) - op2 = odl.tomo.RayTransform(space, geom2) + op_ffs = odl.applications.tomo.RayTransform(space, geom_ffs) + op1 = odl.applications.tomo.RayTransform(space, geom1) + op2 = odl.applications.tomo.RayTransform(space, geom2) y_ffs = op_ffs(phantom) y1 = op1(phantom) y2 = op2(phantom) - assert all_almost_equal(np.mean(y_ffs[::2], axis=(1, 2)), - np.mean(y1, axis=(1, 2))) - assert all_almost_equal(np.mean(y_ffs[1::2], axis=(1, 2)), - np.mean(y2, axis=(1, 2))) + assert all_almost_equal(odl.mean(y_ffs[::2], axis=(1, 2)), + odl.mean(y1, axis=(1, 2))) + assert all_almost_equal(odl.mean(y_ffs[1::2], axis=(1, 2)), + odl.mean(y2, axis=(1, 2))) im = op_ffs.adjoint(y_ffs).asarray() im_combined = (op1.adjoint(y1).asarray() + op2.adjoint(y2).asarray()) # the scaling is a bit off for older versions of astra - im_combined = im_combined / np.sum(im_combined) * np.sum(im) - rel_error = np.abs((im - im_combined)[im > 0] / im[im > 0]) - assert np.max(rel_error) < 1e-6 + im_combined = im_combined / ns.sum(im_combined) * ns.sum(im) + rel_error = ns.abs((im - im_combined)[im > 0] / im[im > 0]) + assert ns.max(rel_error) < 1e-6 if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/core/array_API_support/test_array_creation.py b/odl/test/core/array_API_support/test_array_creation.py new file mode 100644 index 00000000000..e7b8812eed2 --- /dev/null +++ b/odl/test/core/array_API_support/test_array_creation.py @@ -0,0 +1,93 @@ +import pytest + +import odl + +from odl.core.array_API_support import odl_all_equal + +from odl.core.util.pytest_config import IMPL_DEVICE_PAIRS +from odl.core.util.testutils import ( + noise_elements, simple_fixture) + +DEFAULT_SHAPE = (4,4) + +DEFAULT_FILL = 5 + +from_array = simple_fixture( + 'from_array', ["asarray", "empty_like", "full_like", 'ones_like', 'tril', 'triu', 'zeros_like'] + ) + +from_impl = simple_fixture( + 'from_impl', ['arange', 'empty', 'eye', "full", 'linspace', 'meshgrid', 'ones', 'zeros'] + ) + + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def float_tspace(request, odl_real_floating_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_real_floating_dtype, + impl=impl, + device=device + ) + +def test_from_array(float_tspace, from_array): + ns = float_tspace.array_namespace + arr_fn = getattr(ns, from_array) + odl_fn = getattr(odl, from_array) + + x_arr, x = noise_elements(float_tspace, 1) + + if from_array == 'full_like': + y_arr = arr_fn(x_arr, fill_value=DEFAULT_FILL) + y = odl_fn(x, fill_value=DEFAULT_FILL) + else: + y_arr = arr_fn(x_arr) + y = odl_fn(x) + if from_array == 'empty_like': + pytest.skip("Skipping equality check for empty_like") + + else: + assert odl_all_equal(y_arr, y) + +# Pytorch and Numpy API still vary, making the systematic testing of these functions premature +# def test_from_impl(float_tspace, from_impl): +# ns = float_tspace.array_namespace +# arr_fn = getattr(ns, from_impl) +# odl_fn = getattr(odl, from_impl) + +# # x_arr, x = noise_elements(float_tspace, 1) +# args = () +# kwargs = { +# 'shape' : (4,4), +# 'dtype' : float_tspace.dtype_identifier, +# 'device' : float_tspace.device +# } +# if from_impl == 'arange': +# args = [1] +# kwargs['start'] = 1 +# kwargs['stop'] = 10 +# kwargs['step'] = 1 + +# elif from_impl == 'eye': +# kwargs['n_rows'] = 4 +# kwargs['n_cols'] = 4 +# kwargs['k'] = 0 + +# elif from_impl == 'meshgrid': +# args = [ +# float_tspace.array_backend.array_constructor([0,1,2,3], +# device = float_tspace.device, +# dtype = float_tspace.dtype), +# float_tspace.array_backend.array_constructor([0,1,2,3], +# device = float_tspace.device, +# dtype = float_tspace.dtype) +# ] + +# elif from_impl == 'tril' or from_impl == 'triu': +# kwargs['k'] = 2 + +# print(args, kwargs) +# assert odl_all_equal( +# arr_fn(*args, **kwargs), odl_fn(*args, **kwargs) + # ) diff --git a/odl/test/core/array_API_support/test_comparisons.py b/odl/test/core/array_API_support/test_comparisons.py new file mode 100644 index 00000000000..954fdcb760c --- /dev/null +++ b/odl/test/core/array_API_support/test_comparisons.py @@ -0,0 +1,85 @@ +import pytest + +import odl + +from odl.core.util.pytest_config import IMPL_DEVICE_PAIRS +from odl.core.util.testutils import ( + noise_elements, simple_fixture) + +DEFAULT_SHAPE = (4,4) + +elementwise_comparison = simple_fixture( + 'elementwise', ["isclose" ] + ) + +reduction_comparison = simple_fixture( + 'reduction', ["allclose", "odl_all_equal"] + ) + +truth_value_comparison = simple_fixture( + 'truth_value', ["all", "any",] + ) + + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def float_tspace(request, odl_real_floating_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_real_floating_dtype, + impl=impl, + device=device + ) + +def test_elementwise(float_tspace, elementwise_comparison): + ns = float_tspace.array_namespace + arr_fn = getattr(ns, elementwise_comparison) + odl_fn = getattr(odl, elementwise_comparison) + + xarr0, x0 = noise_elements(float_tspace, 1) + xarr1, x1 = noise_elements(float_tspace, 1) + + assert (arr_fn(xarr0, xarr0) == odl_fn(x0, x0)).all() + assert (arr_fn(xarr0, xarr1) == odl_fn(x0, x1)).all() + assert (arr_fn(xarr1, xarr0) == odl_fn(x1, x0)).all() + +def test_reduction(float_tspace, reduction_comparison): + ns = float_tspace.array_namespace + xarr0, x0 = noise_elements(float_tspace, 1) + xarr1, x1 = noise_elements(float_tspace, 1) + odl_fn = getattr(odl, reduction_comparison) + + if reduction_comparison == 'allclose': + arr_fn = getattr(ns, reduction_comparison) + + elif reduction_comparison == 'odl_all_equal': + all_fn = getattr(ns, 'all') + equal_fn = getattr(ns, 'equal') + def arr_fn(x, y): + return all_fn(equal_fn(x, y)) + + else: + raise ValueError + + assert arr_fn(xarr0, xarr0) == odl_fn(x0, x0) + assert arr_fn(xarr0, xarr1) == odl_fn(x0, x1) + assert arr_fn(xarr1, xarr0) == odl_fn(x1, x0) + +def test_array_truth_value(float_tspace, truth_value_comparison): + ns = float_tspace.array_namespace + arr_fn = getattr(ns, truth_value_comparison) + odl_fn = getattr(odl, truth_value_comparison) + + xarr0, x0 = noise_elements(float_tspace, 1) + xarr1, x1 = noise_elements(float_tspace, 1) + + arr_isclose = getattr(ns, 'isclose') + odl_isclose = getattr(odl, 'isclose') + + expr_0 = arr_isclose(xarr0, xarr0) == odl_isclose(x0, x0) + expr_1 = arr_isclose(xarr0, xarr1) == odl_isclose(x0, x1) + expr_2 = arr_isclose(xarr1, xarr0) == odl_isclose(x1, x0) + assert arr_fn(expr_0) == odl_fn(expr_0) + assert arr_fn(expr_1) == odl_fn(expr_1) + assert arr_fn(expr_2) == odl_fn(expr_2) + diff --git a/odl/test/core/array_API_support/test_element_wise.py b/odl/test/core/array_API_support/test_element_wise.py new file mode 100644 index 00000000000..42d721a3d77 --- /dev/null +++ b/odl/test/core/array_API_support/test_element_wise.py @@ -0,0 +1,198 @@ +import pytest + +import odl +from odl.core.util.pytest_config import IMPL_DEVICE_PAIRS +from odl.core.util.testutils import ( + all_almost_equal, all_equal, noise_array, noise_element, noise_elements, + isclose, simple_fixture) + + + +DEFAULT_SHAPE = (4,4) + +one_operand_op = simple_fixture( + 'one_operand_op', + ['abs', 'asinh', 'atan', 'conj', 'cos', 'cosh', 'exp', 'expm1', 'floor', 'imag', 'isfinite', 'isinf', 'isnan', 'log', 'log1p', 'log2', 'log10', 'logical_not', 'positive', 'real', 'reciprocal', 'round', 'sign', 'signbit', 'sin', 'sinh', 'sqrt', 'square', 'tan', 'tanh', 'trunc'] + ) + +domain_restricted_op = simple_fixture( + 'domain_restricted_op', + ['acos', 'acosh', 'asin', 'atanh'] + ) + +integer_op = simple_fixture( + 'integer_op', + ['bitwise_invert',] + ) + +two_operands_op = simple_fixture( + 'two_operands_op', + ['add', 'atan2', 'copysign', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'hypot', 'less', 'less_equal', 'logaddexp', 'logical_and', 'logical_or', 'logical_xor', 'maximum', 'minimum', 'multiply', 'nextafter', 'not_equal', 'pow', 'remainder', 'subtract'] + ) + +two_operands_op_integer = simple_fixture( + 'two_operands_op_integer', + ['bitwise_and', 'bitwise_left_shift', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor'] + ) + +kwargs_op = simple_fixture( + 'kwargs_op', + ['clip'] + ) + +inplace = simple_fixture( + 'inplace', + [True, False] + ) + + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def float_tspace(request, odl_real_floating_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_real_floating_dtype, + impl=impl, + device=device + ) + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def integer_tspace(request): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype='int64', + impl=impl, + device=device + ) + +def test_one_operand_op_real(float_tspace, one_operand_op, inplace): + if one_operand_op == 'imag' and float_tspace.impl == 'pytorch': + pytest.skip(f'imag is not implemented for tensors with non-complex dtypes in Pytorch.') + ns = float_tspace.array_namespace + arr_fn = getattr(ns, one_operand_op) + odl_fn = getattr(odl, one_operand_op) + + x_arr, x = noise_elements(float_tspace, 1) + x_arr = ns.abs(x_arr) + 0.1 + x = odl.abs(x) + 0.1 + + if inplace: + if one_operand_op in ['imag', 'sign', 'real', 'positive', 'isnan', 'isinf', 'isfinite']: + pytest.skip(f'{one_operand_op} is not supported for inplace updates') + if one_operand_op == 'signbit': + out = odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=bool, + impl=float_tspace.impl, + device=float_tspace.device + ).element() + else: + out = float_tspace.element() + out_arr = out.data + y = odl_fn(x, out=out) + y_arr = arr_fn(x_arr, out=out_arr) + assert all_equal(y, y_arr) + assert all_equal(y, out) + + else: + y = odl_fn(x) + y_arr = arr_fn(x_arr) + assert all_equal(y, y_arr) + +def test_one_operand_op_real_kwargs(float_tspace, kwargs_op, inplace): + ns = float_tspace.array_namespace + arr_fn = getattr(ns, kwargs_op) + odl_fn = getattr(odl, kwargs_op) + + x_arr, x = noise_elements(float_tspace, 1) + if inplace: + out = float_tspace.element() + out_arr = out.data + y = odl_fn(x, out=out) + y_arr = arr_fn(x_arr, out=out_arr) + assert all_equal(y, y_arr) + assert all_equal(y, out) + else: + y = odl_fn(x, min=0, max=1) + y_arr = arr_fn(x_arr, min=0, max=1) + assert all_equal(y, y_arr) + +def test_one_operand_op_integer(integer_tspace, integer_op, inplace): + ns = integer_tspace.array_namespace + arr_fn = getattr(ns, integer_op) + odl_fn = getattr(odl, integer_op) + + x_arr, x = noise_elements(integer_tspace, 1) + ### ODL operation + if inplace: + out = integer_tspace.element() + out_arr = out.data + y = odl_fn(x, out=out) + y_arr = arr_fn(x_arr, out=out_arr) + assert all_equal(y, y_arr) + assert all_equal(y, out) + + else: + y = odl_fn(x) + y_arr = arr_fn(x_arr) + + assert all_equal(y, y_arr) + +def test_domain_restricted_op(float_tspace, domain_restricted_op): + ns = float_tspace.array_namespace + arr_fn = getattr(ns, domain_restricted_op) + odl_fn = getattr(odl, domain_restricted_op) + + x = 0.5 * float_tspace.one() + x_arr = x.data + if inplace: + out = float_tspace.element() + out_arr = out.data + y = odl_fn(x, out=out) + y_arr = arr_fn(x_arr, out=out_arr) + assert all_almost_equal(y, y_arr) + assert all_almost_equal(y, out) + assert all_almost_equal(y_arr, out_arr) + else: + y = odl_fn(x) + y_arr = arr_fn(x_arr) + assert all_almost_equal(y, y_arr) + +def test_two_operands_op_real(float_tspace, two_operands_op): + ns = float_tspace.array_namespace + + arr_fn = getattr(ns, two_operands_op) + odl_fn = getattr(odl, two_operands_op) + + [x_arr, y_arr], [x, y] = noise_elements(float_tspace, 2) + if inplace: + out = float_tspace.element() + out_arr = out.data + z = odl_fn(x, y, out=out) + z_arr = arr_fn(x_arr, y_arr, out=out_arr) + assert all_almost_equal(z, z_arr) + assert all_almost_equal(z, out) + assert all_almost_equal(z_arr, out_arr) + else: + z = odl_fn(x, y) + z_arr = arr_fn(x_arr, y_arr) + assert all_almost_equal(z, z_arr) + +def test_two_operands_op_integer(integer_tspace, two_operands_op_integer): + ns = integer_tspace.array_namespace + arr_fn = getattr(ns, two_operands_op_integer) + odl_fn = getattr(odl, two_operands_op_integer) + + [x_arr, y_arr], [x, y] = noise_elements(integer_tspace, 2) + if inplace: + out = integer_tspace.element() + out_arr = out.data + z = odl_fn(x, y, out=out) + z_arr = arr_fn(x_arr, y_arr, out=out_arr) + assert all_equal(z, z_arr) + assert all_equal(z, out) + else: + z = odl_fn(x, y) + z_arr = arr_fn(x_arr, y_arr) + assert all_almost_equal(z, z_arr) diff --git a/odl/test/core/array_API_support/test_multi_backends.py b/odl/test/core/array_API_support/test_multi_backends.py new file mode 100644 index 00000000000..c5c46f04c9b --- /dev/null +++ b/odl/test/core/array_API_support/test_multi_backends.py @@ -0,0 +1,112 @@ +import pytest + +import odl +from odl.core.util.pytest_config import IMPL_DEVICE_PAIRS +from odl.core.util.testutils import all_almost_equal + +try: + import torch +except ImportError: + pass + +skip_if_no_pytorch = pytest.mark.skipif( + "'pytorch' not in odl.core.space.entry_points.TENSOR_SPACE_IMPLS", + reason='PYTORCH not available', + ) + +IMPLS = [pytest.param(value, marks=skip_if_no_pytorch) for value in IMPL_DEVICE_PAIRS] + +DEFAULT_SHAPE = (4,4) + +@pytest.fixture(scope='module', params=IMPLS) +def tspace(request, odl_floating_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_floating_dtype, + impl=impl, + device=device + ) + +@pytest.fixture(scope='module') +def numpy_tspace(odl_floating_dtype): + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_floating_dtype, + impl='numpy', + device='cpu' + ) + +@pytest.fixture(scope='module') +def pytorch_tspace_cpu(odl_floating_dtype): + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_floating_dtype, + impl='pytorch', + device='cpu' + ) + +@pytest.fixture(scope='module') +def pytorch_tspace_gpu(odl_floating_dtype): + if torch.cuda.device_count() == 0: + pytest.skip(reason="No Cuda-capable GPU available") + + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_floating_dtype, + impl='pytorch', + device='cuda:0' + ) + +def test_same_backend_same_device(tspace, odl_arithmetic_op): + """Test that operations between two elements on separate spaces with the same backend are possible""" + x = next(tspace.examples)[1] + y = next(tspace.examples)[1] + op = odl_arithmetic_op + z_arr = op(x.data, y.data) + z = op(x, y) + assert all_almost_equal([x, y, z], [x.data, y.data, z_arr]) + +@skip_if_no_pytorch +def test_different_backends( + numpy_tspace, pytorch_tspace_cpu, pytorch_tspace_gpu, + odl_arithmetic_op + ): + """Test that operations between two elements on separate spaces with different device or impl are not possible""" + x_np = next(numpy_tspace.examples)[1] + x_pt_cpu = next(pytorch_tspace_cpu.examples)[1] + x_pt_gpu = next(pytorch_tspace_gpu.examples)[1] + op = odl_arithmetic_op + + # Same device, different backend + with pytest.raises(AssertionError): + res = op(x_np, x_pt_cpu) + + with pytest.raises(TypeError): + res = op(x_np, x_pt_cpu.data) + + with pytest.raises(TypeError): + res = op(x_np.data, x_pt_cpu) + + # Same backend, different device + with pytest.raises(AssertionError): + res = op(x_pt_gpu, x_pt_cpu) + + with pytest.raises(TypeError): + res = op(x_pt_gpu.data, x_pt_cpu) + + with pytest.raises(TypeError): + res = op(x_pt_gpu, x_pt_cpu.data) + + # Different device, different backend + with pytest.raises(AssertionError): + res = op(x_np, x_pt_gpu) + + with pytest.raises(TypeError): + res = op(x_np, x_pt_gpu.data) + + with pytest.raises(TypeError): + res = op(x_np.data, x_pt_gpu) + + + \ No newline at end of file diff --git a/odl/test/core/array_API_support/test_statistical.py b/odl/test/core/array_API_support/test_statistical.py new file mode 100644 index 00000000000..dfb8ea16233 --- /dev/null +++ b/odl/test/core/array_API_support/test_statistical.py @@ -0,0 +1,69 @@ +import pytest + +import odl + +from odl.core.array_API_support.comparisons import odl_all_equal + +from odl.core.util.pytest_config import IMPL_DEVICE_PAIRS +from odl.core.util.testutils import ( + noise_elements, simple_fixture) + +DEFAULT_SHAPE = (4,4) + +keepdims_function = simple_fixture( + 'keepdims', + ['max', + 'mean', + 'min', + 'prod', + 'std', + 'sum', + 'var' ] + ) + +cumulative_function = simple_fixture( + 'cumulative', + ['cumulative_prod', + 'cumulative_sum'] + ) + +keepdims = simple_fixture( + 'keepdims', + [True, False] + ) + +axis = simple_fixture( + 'axis', + [0, 1] + ) + + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def float_tspace(request, odl_real_floating_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_real_floating_dtype, + impl=impl, + device=device + ) + +def test_keepdims_function(float_tspace, keepdims_function, keepdims): + ns = float_tspace.array_namespace + arr_fn = getattr(ns, keepdims_function) + odl_fn = getattr(odl, keepdims_function) + + x_arr, x = noise_elements(float_tspace, 1) + y = odl_fn(x, keepdims=keepdims) + y_arr = arr_fn(x_arr, keepdims=keepdims) + assert odl_all_equal(y, y_arr) + +def test_cumulative_function(float_tspace, cumulative_function, axis): + ns = float_tspace.array_namespace + arr_fn = getattr(ns, cumulative_function) + odl_fn = getattr(odl, cumulative_function) + + x_arr, x = noise_elements(float_tspace, 1) + y = odl_fn(x, axis=axis) + y_arr = arr_fn(x_arr, axis=axis) + assert odl_all_equal(y, y_arr) diff --git a/odl/test/discr/diff_ops_test.py b/odl/test/core/discr/diff_ops_test.py similarity index 72% rename from odl/test/discr/diff_ops_test.py rename to odl/test/core/discr/diff_ops_test.py index d8c6caab752..d1f37a3c1a1 100644 --- a/odl/test/discr/diff_ops_test.py +++ b/odl/test/core/discr/diff_ops_test.py @@ -10,14 +10,14 @@ from __future__ import division -import numpy as np import pytest import odl -from odl.discr.diff_ops import ( +from odl.core.discr.diff_ops import ( Divergence, Gradient, Laplacian, PartialDerivative, finite_diff) -from odl.util.testutils import ( +from odl.core.util.testutils import ( all_almost_equal, all_equal, dtype_tol, noise_element, simple_fixture) +from odl.core.array_API_support import get_array_and_backend, odl_all_equal # --- pytest fixtures --- # @@ -29,55 +29,56 @@ @pytest.fixture(scope="module", params=[1, 2, 3], ids=['1d', '2d', '3d']) -def space(request, odl_tspace_impl): - impl = odl_tspace_impl +def space(request, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs ndim = request.param - return odl.uniform_discr([0] * ndim, [1] * ndim, [5] * ndim, impl=impl) - - -# Test data -DATA_1D = np.array([0.5, 1, 3.5, 2, -.5, 3, -1, -1, 0, 3]) - + return odl.uniform_discr([0] * ndim, [1] * ndim, [5] * ndim, impl=impl, device=device) +@pytest.fixture(scope="module") +def data(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + return odl.uniform_discr(0, 1, 10, impl=impl, device=device).element([0.5, 1, 3.5, 2, -.5, 3, -1, -1, 0, 3]) # --- finite_diff --- # -def test_finite_diff_invalid_args(): +def test_finite_diff_invalid_args(data): """Test finite difference function for invalid arguments.""" - + arr, backend = get_array_and_backend(data) # Test that old "edge order" argument fails. with pytest.raises(TypeError): - finite_diff(DATA_1D, axis=0, edge_order=0) + finite_diff(data, axis=0, edge_order=0) # at least a two-element array is required with pytest.raises(ValueError): - finite_diff(np.array([0.0]), axis=0) + finite_diff(backend.array_constructor([0.0]), axis=0) # axis with pytest.raises(IndexError): - finite_diff(DATA_1D, axis=2) + finite_diff(data, axis=2) # in-place argument - out = np.zeros(DATA_1D.size + 1) + # size is an attribute of numpy arrays but of method of pytorch tensors + out = backend.array_namespace.zeros(len(arr) + 1) with pytest.raises(ValueError): - finite_diff(DATA_1D, axis=0, out=out) + finite_diff(data, axis=0, out=out) with pytest.raises(ValueError): - finite_diff(DATA_1D, axis=0, dx=0) + finite_diff(data, axis=0, dx=0) # wrong method with pytest.raises(ValueError): - finite_diff(DATA_1D, axis=0, method='non-method') + finite_diff(data, axis=0, method='non-method') -def test_finite_diff_explicit(): +def test_finite_diff_explicit(data): """Compare finite differences function to explicit computation.""" # phantom data - arr = DATA_1D + arr, backend = get_array_and_backend(data) + ns = backend.array_namespace # explicitly calculated finite difference - diff_ex = np.zeros_like(arr) + diff_ex = ns.zeros_like(arr) # interior: second-order accurate differences diff_ex[1:-1] = (arr[2:] - arr[:-2]) / 2.0 @@ -85,128 +86,128 @@ def test_finite_diff_explicit(): # default: out=None, axis=0, dx=1.0, zero_padding=None, method='forward' diff = finite_diff(arr, axis=0, dx=1.0, out=None, pad_mode='constant') - assert all_equal(diff, finite_diff(arr, axis=0)) + assert odl_all_equal(diff, finite_diff(arr, axis=0)) # boundary: one-sided second-order accurate forward/backward difference diff = finite_diff(arr, axis=0, dx=1.0, out=None, method='central', pad_mode='order2') diff_ex[0] = -(3 * arr[0] - 4 * arr[1] + arr[2]) / 2.0 diff_ex[-1] = (3 * arr[-1] - 4 * arr[-2] + arr[-3]) / 2.0 - assert all_equal(diff, diff_ex) + assert odl_all_equal(diff, diff_ex) # non-unit step length dx = 0.5 diff = finite_diff(arr, axis=0, dx=dx, method='central', out=None, pad_mode='order2') - assert all_equal(diff, diff_ex / dx) + assert odl_all_equal(diff, diff_ex / dx) # boundary: second-order accurate central differences with zero padding diff = finite_diff(arr, axis=0, method='central', pad_mode='constant', pad_const=0) diff_ex[0] = arr[1] / 2.0 diff_ex[-1] = -arr[-2] / 2.0 - assert all_equal(diff, diff_ex) + assert odl_all_equal(diff, diff_ex) # boundary: one-sided first-order forward/backward difference without zero # padding diff = finite_diff(arr, axis=0, method='central', pad_mode='order1') diff_ex[0] = arr[1] - arr[0] # 1st-order accurate forward difference diff_ex[-1] = arr[-1] - arr[-2] # 1st-order accurate backward diff. - assert all_equal(diff, diff_ex) + assert odl_all_equal(diff, diff_ex) # different edge order really differ df1 = finite_diff(arr, axis=0, method='central', pad_mode='order1') df2 = finite_diff(arr, axis=0, method='central', pad_mode='order2') - assert all_equal(df1[1:-1], diff_ex[1:-1]) - assert all_equal(df2[1:-1], diff_ex[1:-1]) + assert odl_all_equal(df1[1:-1], diff_ex[1:-1]) + assert odl_all_equal(df2[1:-1], diff_ex[1:-1]) assert df1[0] != df2[0] assert df1[-1] != df2[-1] # in-place evaluation - out = np.zeros_like(arr) + out = ns.zeros_like(arr) assert out is finite_diff(arr, axis=0, out=out) - assert all_equal(out, finite_diff(arr, axis=0)) + assert odl_all_equal(out, finite_diff(arr, axis=0)) assert out is not finite_diff(arr, axis=0) # axis - arr = np.array([[0., 2., 4., 6., 8.], + arr = backend.array_constructor([[0., 2., 4., 6., 8.], [1., 3., 5., 7., 9.]]) df0 = finite_diff(arr, axis=0, pad_mode='order1') - darr0 = 1 * np.ones(arr.shape) - assert all_equal(df0, darr0) - darr1 = 2 * np.ones(arr.shape) + darr0 = 1 * ns.ones(arr.shape) + assert odl_all_equal(df0, darr0) + darr1 = 2 * ns.ones(arr.shape) df1 = finite_diff(arr, axis=1, pad_mode='order1') - assert all_equal(df1, darr1) + assert odl_all_equal(df1, darr1) # complex arrays - arr = np.array([0., 1., 2., 3., 4.]) + 1j * np.array([10., 9., 8., 7., + arr = backend.array_constructor([0., 1., 2., 3., 4.]) + 1j * backend.array_constructor([10., 9., 8., 7., 6.]) diff = finite_diff(arr, axis=0, pad_mode='order1') assert all(diff.real == 1) assert all(diff.imag == -1) -def test_finite_diff_symmetric_padding(): +def test_finite_diff_symmetric_padding(data): """Finite difference using replicate padding.""" # Using replicate padding forward and backward differences have zero # derivative at the upper or lower endpoint, respectively - assert finite_diff(DATA_1D, axis=0, method='forward', + assert finite_diff(data, axis=0, method='forward', pad_mode='symmetric')[-1] == 0 - assert finite_diff(DATA_1D, axis=0, method='backward', + assert finite_diff(data, axis=0, method='backward', pad_mode='symmetric')[0] == 0 - diff = finite_diff(DATA_1D, axis=0, method='central', pad_mode='symmetric') - assert diff[0] == (DATA_1D[1] - DATA_1D[0]) / 2 - assert diff[-1] == (DATA_1D[-1] - DATA_1D[-2]) / 2 + diff = finite_diff(data, axis=0, method='central', pad_mode='symmetric') + assert diff[0] == (data[1] - data[0]) / 2 + assert diff[-1] == (data[-1] - data[-2]) / 2 -def test_finite_diff_constant_padding(): +def test_finite_diff_constant_padding(data): """Finite difference using constant padding.""" for pad_const in [-1, 0, 1]: - diff_forward = finite_diff(DATA_1D, axis=0, method='forward', + diff_forward = finite_diff(data, axis=0, method='forward', pad_mode='constant', pad_const=pad_const) - assert diff_forward[0] == DATA_1D[1] - DATA_1D[0] - assert diff_forward[-1] == pad_const - DATA_1D[-1] + assert diff_forward[0] == data[1] - data[0] + assert diff_forward[-1] == pad_const - data[-1] - diff_backward = finite_diff(DATA_1D, axis=0, method='backward', + diff_backward = finite_diff(data, axis=0, method='backward', pad_mode='constant', pad_const=pad_const) - assert diff_backward[0] == DATA_1D[0] - pad_const - assert diff_backward[-1] == DATA_1D[-1] - DATA_1D[-2] + assert diff_backward[0] == data[0] - pad_const + assert diff_backward[-1] == data[-1] - data[-2] - diff_central = finite_diff(DATA_1D, axis=0, method='central', + diff_central = finite_diff(data, axis=0, method='central', pad_mode='constant', pad_const=pad_const) - assert diff_central[0] == (DATA_1D[1] - pad_const) / 2 - assert diff_central[-1] == (pad_const - DATA_1D[-2]) / 2 + assert diff_central[0] == (data[1] - pad_const) / 2 + assert diff_central[-1] == (pad_const - data[-2]) / 2 -def test_finite_diff_periodic_padding(): +def test_finite_diff_periodic_padding(data): """Finite difference using periodic padding.""" - diff_forward = finite_diff(DATA_1D, axis=0, method='forward', + diff_forward = finite_diff(data, axis=0, method='forward', pad_mode='periodic') - assert diff_forward[0] == DATA_1D[1] - DATA_1D[0] - assert diff_forward[-1] == DATA_1D[0] - DATA_1D[-1] + assert diff_forward[0] == data[1] - data[0] + assert diff_forward[-1] == data[0] - data[-1] - diff_backward = finite_diff(DATA_1D, axis=0, method='backward', + diff_backward = finite_diff(data, axis=0, method='backward', pad_mode='periodic') - assert diff_backward[0] == DATA_1D[0] - DATA_1D[-1] - assert diff_backward[-1] == DATA_1D[-1] - DATA_1D[-2] + assert diff_backward[0] == data[0] - data[-1] + assert diff_backward[-1] == data[-1] - data[-2] - diff_central = finite_diff(DATA_1D, axis=0, method='central', + diff_central = finite_diff(data, axis=0, method='central', pad_mode='periodic') - assert diff_central[0] == (DATA_1D[1] - DATA_1D[-1]) / 2 - assert diff_central[-1] == (DATA_1D[0] - DATA_1D[-2]) / 2 + assert diff_central[0] == (data[1] - data[-1]) / 2 + assert diff_central[-1] == (data[0] - data[-2]) / 2 # --- PartialDerivative --- # @@ -251,8 +252,8 @@ def test_part_deriv(space, method, padding): # Compare to helper function dx = space.cell_sides[axis] diff = finite_diff(dom_vec_arr, axis=axis, dx=dx, method=method, - pad_mode=pad_mode, - pad_const=pad_const) + pad_mode=pad_mode, + pad_const=pad_const) partial_vec = partial(dom_vec) assert all_almost_equal(partial_vec, diff) @@ -274,9 +275,10 @@ def test_part_deriv(space, method, padding): # --- Gradient --- # -def test_gradient_init(): +def test_gradient_init(odl_impl_device_pairs): """Check initialization of ``Gradient``.""" - space = odl.uniform_discr([0, 0], [1, 1], (4, 5)) + impl, device = odl_impl_device_pairs + space = odl.uniform_discr([0, 0], [1, 1], (4, 5), impl=impl, device=device) vspace = space ** 2 op = Gradient(space) @@ -326,8 +328,8 @@ def test_gradient(space, method, padding): # computation of gradient components with helper function for axis, dx in enumerate(space.cell_sides): diff = finite_diff(dom_vec_arr, axis=axis, dx=dx, method=method, - pad_mode=pad_mode, - pad_const=pad_const) + pad_mode=pad_mode, + pad_const=pad_const) assert all_almost_equal(grad_vec[axis].asarray(), diff) @@ -348,7 +350,7 @@ def test_gradient(space, method, padding): lin_size = 3 for ndim in [1, 3, 6]: space = odl.uniform_discr([0.] * ndim, [1.] * ndim, [lin_size] * ndim) - dom_vec = odl.phantom.cuboid(space, [0.2] * ndim, [0.8] * ndim) + dom_vec = odl.core.phantom.cuboid(space, [0.2] * ndim, [0.8] * ndim) grad = Gradient(space, method=method, pad_mode=pad_mode, pad_const=pad_const) @@ -357,9 +359,10 @@ def test_gradient(space, method, padding): # --- Divergence --- # -def test_divergence_init(): +def test_divergence_init(odl_impl_device_pairs): """Check initialization of ``Divergence``.""" - space = odl.uniform_discr([0, 0], [1, 1], (4, 5)) + impl, device = odl_impl_device_pairs + space = odl.uniform_discr([0, 0], [1, 1], (4, 5), impl=impl, device=device) vspace = space ** 2 op = Divergence(vspace) @@ -398,19 +401,19 @@ def test_divergence(space, method, padding): # Operator instance div = Divergence(range=space, method=method, - pad_mode=pad_mode, - pad_const=pad_const) + pad_mode=pad_mode, + pad_const=pad_const) # Apply operator dom_vec = noise_element(div.domain) div_dom_vec = div(dom_vec) # computation of divergence with helper function - expected_result = np.zeros(space.shape) + expected_result = space.array_namespace.zeros(space.shape, dtype=space.dtype, device=space.device) for axis, dx in enumerate(space.cell_sides): expected_result += finite_diff(dom_vec[axis], axis=axis, dx=dx, - method=method, pad_mode=pad_mode, - pad_const=pad_const) + method=method, pad_mode=pad_mode, + pad_const=pad_const) assert all_almost_equal(expected_result, div_dom_vec.asarray()) @@ -431,9 +434,10 @@ def test_divergence(space, method, padding): # --- Laplacian --- # -def test_laplacian_init(): +def test_laplacian_init(odl_impl_device_pairs): """Check initialization of ``Laplacian``.""" - space = odl.uniform_discr([0, 0], [1, 1], (4, 5)) + impl, device = odl_impl_device_pairs + space = odl.uniform_discr([0, 0], [1, 1], (4, 5), impl=impl, device=device) op = Laplacian(space) assert repr(op) != '' @@ -466,14 +470,14 @@ def test_laplacian(space, padding): div_dom_vec = lap(dom_vec) # computation of divergence with helper function - expected_result = np.zeros(space.shape) + expected_result = space.array_namespace.zeros(space.shape, device=space.device, dtype=space.dtype) for axis, dx in enumerate(space.cell_sides): diff_f = finite_diff(dom_vec.asarray(), axis=axis, dx=dx ** 2, - method='forward', pad_mode=pad_mode, - pad_const=pad_const) + method='forward', pad_mode=pad_mode, + pad_const=pad_const) diff_b = finite_diff(dom_vec.asarray(), axis=axis, dx=dx ** 2, - method='backward', pad_mode=pad_mode, - pad_const=pad_const) + method='backward', pad_mode=pad_mode, + pad_const=pad_const) expected_result += diff_f - diff_b assert all_almost_equal(expected_result, div_dom_vec.asarray()) @@ -495,4 +499,4 @@ def test_laplacian(space, padding): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/discr/discr_ops_test.py b/odl/test/core/discr/discr_ops_test.py similarity index 70% rename from odl/test/discr/discr_ops_test.py rename to odl/test/core/discr/discr_ops_test.py index 8841ffe734f..910caa3ded9 100644 --- a/odl/test/discr/discr_ops_test.py +++ b/odl/test/core/discr/discr_ops_test.py @@ -14,11 +14,10 @@ import pytest import odl -from odl.discr.discr_ops import _SUPPORTED_RESIZE_PAD_MODES -from odl.space.entry_points import tensor_space_impl -from odl.util import is_numeric_dtype, is_real_floating_dtype -from odl.util.testutils import dtype_tol, noise_element +from odl.core.discr.discr_ops import _SUPPORTED_RESIZE_PAD_MODES +from odl.core.util.testutils import dtype_tol, noise_element, all_equal +from odl.core.util.dtype_utils import AVAILABLE_DTYPES, SCALAR_DTYPES, FLOAT_DTYPES, REAL_DTYPES # --- pytest fixtures --- # @@ -45,13 +44,17 @@ def padding(request): # --- ResizingOperator tests --- # -def test_resizing_op_init(odl_tspace_impl, padding): +def test_resizing_op_init(odl_impl_device_pairs, padding): # Test if the different init patterns run - impl = odl_tspace_impl + impl, device = odl_impl_device_pairs pad_mode, pad_const = padding - space = odl.uniform_discr([0, -1], [1, 1], (10, 5), impl=impl) - res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15), impl=impl) + space = odl.uniform_discr( + [0, -1], [1, 1], (10, 5), impl=impl, device=device + ) + res_space = odl.uniform_discr( + [0, -3], [2, 3], (20, 15), impl=impl, device=device + ) odl.ResizingOperator(space, res_space) odl.ResizingOperator(space, ran_shp=(20, 15)) @@ -63,7 +66,7 @@ def test_resizing_op_init(odl_tspace_impl, padding): discr_kwargs={'nodes_on_bdry': True}) -def test_resizing_op_raise(): +def test_resizing_op_raise(odl_impl_device_pairs): """Validate error checking in ResizingOperator.""" # Domain not a uniformly discretized Lp with pytest.raises(TypeError): @@ -71,21 +74,24 @@ def test_resizing_op_raise(): grid = odl.RectGrid([0, 2, 3]) part = odl.RectPartition(odl.IntervalProd(0, 3), grid) - tspace = odl.rn(3) + + impl, device = odl_impl_device_pairs + + tspace = odl.rn(3, impl=impl, device=device) space = odl.DiscretizedSpace(part, tspace) with pytest.raises(ValueError): odl.ResizingOperator(space, ran_shp=(10,)) # Different cell sides in domain and range - space = odl.uniform_discr(0, 1, 10) - res_space = odl.uniform_discr(0, 1, 15) + space = odl.uniform_discr(0, 1, 10, impl=impl, device=device) + res_space = odl.uniform_discr(0, 1, 15, impl=impl, device=device) with pytest.raises(ValueError): odl.ResizingOperator(space, res_space) # Non-integer multiple of cell sides used as shift (grid of the # resized space shifted) - space = odl.uniform_discr(0, 1, 5) - res_space = odl.uniform_discr(-0.5, 1.5, 10) + space = odl.uniform_discr(0, 1, 5, impl=impl, device=device) + res_space = odl.uniform_discr(-0.5, 1.5, 10, impl=impl, device=device) with pytest.raises(ValueError): odl.ResizingOperator(space, res_space) @@ -94,8 +100,8 @@ def test_resizing_op_raise(): odl.ResizingOperator(space) # Offset cannot be combined with range - space = odl.uniform_discr([0, -1], [1, 1], (10, 5)) - res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15)) + space = odl.uniform_discr([0, -1], [1, 1], (10, 5), impl=impl, device=device) + res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15), impl=impl, device=device) with pytest.raises(ValueError): odl.ResizingOperator(space, res_space, offset=(0, 0)) @@ -104,18 +110,16 @@ def test_resizing_op_raise(): odl.ResizingOperator(space, res_space, pad_mode='something') -def test_resizing_op_properties(odl_tspace_impl, padding): +def test_resizing_op_properties(odl_impl_device_pairs, padding): - impl = odl_tspace_impl - dtypes = [dt for dt in tensor_space_impl(impl).available_dtypes() - if is_numeric_dtype(dt)] + impl, device = odl_impl_device_pairs pad_mode, pad_const = padding - for dtype in dtypes: + for dtype in SCALAR_DTYPES: # Explicit range - space = odl.uniform_discr([0, -1], [1, 1], (10, 5), dtype=dtype) - res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15), dtype=dtype) + space = odl.uniform_discr([0, -1], [1, 1], (10, 5), dtype=dtype, impl=impl, device=device) + res_space = odl.uniform_discr([0, -3], [2, 3], (20, 15), dtype=dtype, impl=impl, device=device) res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode, pad_const=pad_const) @@ -145,53 +149,58 @@ def test_resizing_op_properties(odl_tspace_impl, padding): assert res_op.is_linear -def test_resizing_op_call(odl_tspace_impl): - - impl = odl_tspace_impl - dtypes = [dt for dt in tensor_space_impl(impl).available_dtypes() - if is_numeric_dtype(dt)] +def test_resizing_op_call(odl_impl_device_pairs): - for dtype in dtypes: + impl, device = odl_impl_device_pairs + + for dtype in AVAILABLE_DTYPES: # Minimal test since this operator only wraps resize_array space = odl.uniform_discr( - [0, -1], [1, 1], (4, 5), dtype=dtype, impl=impl + [0, -1], [1, 1], (4, 5), dtype=dtype, impl=impl, device=device ) res_space = odl.uniform_discr( - [0, -0.6], [2, 0.2], (8, 2), dtype=dtype, impl=impl + [0, -0.6], [2, 0.2], (8, 2), dtype=dtype, impl=impl, device=device ) res_op = odl.ResizingOperator(space, res_space) out = res_op(space.one()) true_res = np.zeros((8, 2), dtype=dtype) true_res[:4, :] = 1 - assert np.array_equal(out, true_res) + assert all_equal(out, true_res) out = res_space.element() res_op(space.one(), out=out) - assert np.array_equal(out, true_res) + assert all_equal(out, true_res) # Test also mapping to default impl for other 'impl' - if impl != 'numpy': - space = odl.uniform_discr( - [0, -1], [1, 1], (4, 5), dtype=dtype, impl=impl - ) - res_space = odl.uniform_discr( - [0, -0.6], [2, 0.2], (8, 2), dtype=dtype - ) - res_op = odl.ResizingOperator(space, res_space) - out = res_op(space.one()) - true_res = np.zeros((8, 2), dtype=dtype) - true_res[:4, :] = 1 - assert np.array_equal(out, true_res) - - out = res_space.element() - res_op(space.one(), out=out) - assert np.array_equal(out, true_res) - - -def test_resizing_op_deriv(padding): + # if impl != 'numpy': + # space = odl.uniform_discr( + # [0, -1], [1, 1], (4, 5), dtype=dtype, impl=impl + # ) + # res_space = odl.uniform_discr( + # [0, -0.6], [2, 0.2], (8, 2), dtype=dtype + # ) + # res_op = odl.ResizingOperator(space, res_space) + # out = res_op(space.one()) + # true_res = np.zeros((8, 2), dtype=dtype) + # true_res[:4, :] = 1 + # assert all_equal(out, true_res) + + # out = res_space.element() + # res_op(space.one(), out=out) + # assert all_equal(out, true_res) + + +def test_resizing_op_deriv(padding, odl_impl_device_pairs): + + impl, device = odl_impl_device_pairs + pad_mode, pad_const = padding - space = odl.uniform_discr([0, -1], [1, 1], (4, 5)) - res_space = odl.uniform_discr([0, -0.6], [2, 0.2], (8, 2)) + space = odl.uniform_discr( + [0, -1], [1, 1], (4, 5), impl=impl, device=device + ) + res_space = odl.uniform_discr( + [0, -0.6], [2, 0.2], (8, 2), impl=impl, device=device + ) res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode, pad_const=pad_const) res_op_deriv = res_op.derivative(space.one()) @@ -204,14 +213,12 @@ def test_resizing_op_deriv(padding): assert res_op_deriv is res_op -def test_resizing_op_inverse(padding, odl_tspace_impl): +def test_resizing_op_inverse(padding, odl_impl_device_pairs): - impl = odl_tspace_impl + impl, device = odl_impl_device_pairs pad_mode, pad_const = padding - dtypes = [dt for dt in tensor_space_impl(impl).available_dtypes() - if is_numeric_dtype(dt)] - for dtype in dtypes: + for dtype in SCALAR_DTYPES: if pad_mode == 'order1' and ( np.issubdtype(dtype, np.unsignedinteger) @@ -223,9 +230,9 @@ def test_resizing_op_inverse(padding, odl_tspace_impl): continue space = odl.uniform_discr([0, -1], [1, 1], (4, 5), dtype=dtype, - impl=impl) + impl=impl, device=device) res_space = odl.uniform_discr([0, -1.4], [1.5, 1.4], (6, 7), - dtype=dtype, impl=impl) + dtype=dtype, impl=impl, device=device) res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode, pad_const=pad_const) @@ -234,18 +241,15 @@ def test_resizing_op_inverse(padding, odl_tspace_impl): assert res_op.inverse(res_op(x)) == x -def test_resizing_op_adjoint(padding, odl_tspace_impl): +def test_resizing_op_adjoint(padding, odl_impl_device_pairs): - impl = odl_tspace_impl + impl, device = odl_impl_device_pairs pad_mode, pad_const = padding - dtypes = [dt for dt in tensor_space_impl(impl).available_dtypes() - if is_real_floating_dtype(dt)] - - for dtype in dtypes: + for dtype in FLOAT_DTYPES: space = odl.uniform_discr([0, -1], [1, 1], (4, 5), dtype=dtype, - impl=impl) + impl=impl, device=device) res_space = odl.uniform_discr([0, -1.4], [1.5, 1.4], (6, 7), - dtype=dtype, impl=impl) + dtype=dtype, impl=impl, device=device) res_op = odl.ResizingOperator(space, res_space, pad_mode=pad_mode, pad_const=pad_const) @@ -263,12 +267,15 @@ def test_resizing_op_adjoint(padding, odl_tspace_impl): abs = 1e-2 * space.size * dtype_tol(dtype) * elem.norm() * res_elem.norm()) -def test_resizing_op_mixed_uni_nonuni(): +def test_resizing_op_mixed_uni_nonuni(odl_impl_device_pairs): """Check if resizing along uniform axes in mixed discretizations works.""" + + impl, device = odl_impl_device_pairs + nonuni_part = odl.nonuniform_partition([0, 1, 4]) uni_part = odl.uniform_partition(-1, 1, 4) part = uni_part.append(nonuni_part, uni_part, nonuni_part) - tspace = odl.rn(part.shape) + tspace = odl.rn(part.shape, impl=impl, device=device) space = odl.DiscretizedSpace(part, tspace) # Keep non-uniform axes fixed @@ -279,7 +286,7 @@ def test_resizing_op_mixed_uni_nonuni(): # Evaluation test with a simpler case part = uni_part.append(nonuni_part) - tspace = odl.rn(part.shape) + tspace = odl.rn(part.shape, impl=impl, device=device) space = odl.DiscretizedSpace(part, tspace) res_op = odl.ResizingOperator(space, ran_shp=(6, 3)) result = res_op(space.one()) @@ -289,7 +296,7 @@ def test_resizing_op_mixed_uni_nonuni(): [1, 1, 1], [1, 1, 1], [0, 0, 0]] - assert np.array_equal(result, true_result) + assert all_equal(result, true_result) # Test adjoint elem = noise_element(space) @@ -300,4 +307,4 @@ def test_resizing_op_mixed_uni_nonuni(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/discr/discr_space_test.py b/odl/test/core/discr/discr_space_test.py similarity index 56% rename from odl/test/discr/discr_space_test.py rename to odl/test/core/discr/discr_space_test.py index eac02b9740a..06b0fa626ba 100644 --- a/odl/test/discr/discr_space_test.py +++ b/odl/test/core/discr/discr_space_test.py @@ -14,13 +14,13 @@ import odl import pytest -from odl.discr.discr_space import DiscretizedSpace, DiscretizedSpaceElement -from odl.space.base_tensors import TensorSpace -from odl.space.npy_tensors import NumpyTensor -from odl.space.weighting import ConstWeighting -from odl.util.testutils import ( - all_almost_equal, all_equal, noise_elements, simple_fixture) - +from odl.core.discr.discr_space import DiscretizedSpace, DiscretizedSpaceElement +from odl.core.space.base_tensors import TensorSpace, default_dtype +from odl.backends.arrays.npy_tensors import NumpyTensor +from odl.core.util.dtype_utils import COMPLEX_DTYPES, DTYPE_SHORTHANDS +from odl.core.util.testutils import ( + all_almost_equal, all_equal, noise_elements, simple_fixture, default_precision_dict) +from odl.core.array_API_support import lookup_array_backend # --- Pytest fixtures --- # @@ -33,11 +33,12 @@ # --- DiscretizedSpace --- # -def test_discretizedspace_init(): +def test_discretizedspace_init(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test initialization and basic properties of DiscretizedSpace.""" # Real space part = odl.uniform_partition([0, 0], [1, 1], (2, 4)) - tspace = odl.rn(part.shape) + tspace = odl.rn(part.shape, impl=impl, device=device) discr = DiscretizedSpace(part, tspace) assert discr.tspace == tspace @@ -47,7 +48,7 @@ def test_discretizedspace_init(): assert discr.is_real # Complex space - tspace_c = odl.cn(part.shape) + tspace_c = odl.cn(part.shape, impl=impl, device=device) discr = DiscretizedSpace(part, tspace_c) assert discr.is_complex @@ -64,37 +65,38 @@ def test_discretizedspace_init(): DiscretizedSpace(part_diffshp, tspace) # shape mismatch -def test_empty(): +def test_empty(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Check if empty spaces behave as expected and all methods work.""" - discr = odl.uniform_discr([], [], ()) + discr = odl.uniform_discr([], [], (), impl=impl, device=device) assert discr.axis_labels == () assert discr.tangent_bundle == odl.ProductSpace(field=odl.RealNumbers()) - assert discr.complex_space == odl.uniform_discr([], [], (), dtype=complex) + assert discr.complex_space == odl.uniform_discr([], [], (), dtype=complex, impl=impl, device=device) hash(discr) assert repr(discr) != '' elem = discr.element(1.0) - assert np.array_equal(elem.asarray(), 1.0) - assert np.array_equal(elem.real, 1.0) - assert np.array_equal(elem.imag, 0.0) - assert np.array_equal(elem.conj(), 1.0) + assert all_equal(elem.asarray(), 1.0) + assert all_equal(elem.real, 1.0) + assert all_equal(elem.imag, 0.0) + assert all_equal(elem.conj(), 1.0) # --- uniform_discr --- # -def test_factory_dtypes(odl_tspace_impl): +def test_factory_dtypes(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Check dtypes of spaces from factory function.""" - impl = odl_tspace_impl - real_float_dtypes = [np.float32, np.float64] - nonfloat_dtypes = [np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64] - complex_float_dtypes = [np.complex64, np.complex128] + real_float_dtypes = ["float32", "float64"] + nonfloat_dtypes = ["int8", "int16", "int32", "int64", + "uint8", "uint16", "uint32", "uint64"] + complex_float_dtypes = ["complex64", "complex128"] for dtype in real_float_dtypes: try: - discr = odl.uniform_discr(0, 1, 10, impl=impl, dtype=dtype) + discr = odl.uniform_discr(0, 1, 10, impl=impl, dtype=dtype, device=device) except TypeError: continue else: @@ -104,38 +106,38 @@ def test_factory_dtypes(odl_tspace_impl): for dtype in nonfloat_dtypes: try: - discr = odl.uniform_discr(0, 1, 10, impl=impl, dtype=dtype) + discr = odl.uniform_discr(0, 1, 10, impl=impl, dtype=dtype, device=device) except TypeError: continue else: assert isinstance(discr.tspace, TensorSpace) assert discr.tspace.impl == impl - assert discr.tspace.element().space.dtype == dtype + assert discr.tspace.element().space.dtype_identifier == dtype for dtype in complex_float_dtypes: try: - discr = odl.uniform_discr(0, 1, 10, impl=impl, dtype=dtype) + discr = odl.uniform_discr(0, 1, 10, impl=impl, dtype=dtype, device=device) except TypeError: continue else: assert isinstance(discr.tspace, TensorSpace) assert discr.tspace.impl == impl assert discr.is_complex - assert discr.tspace.element().space.dtype == dtype + assert discr.tspace.element().space.dtype_identifier == dtype -def test_uniform_discr_init_real(odl_tspace_impl): +def test_uniform_discr_init_real(odl_impl_device_pairs): """Test initialization and basic properties with uniform_discr, real.""" - impl = odl_tspace_impl + impl, device = odl_impl_device_pairs # 1D - discr = odl.uniform_discr(0, 1, 10, impl=impl) + discr = odl.uniform_discr(0, 1, 10, impl=impl, device=device) assert isinstance(discr, DiscretizedSpace) assert isinstance(discr.tspace, TensorSpace) assert discr.impl == impl assert discr.is_real assert discr.tspace.exponent == 2.0 - assert discr.dtype == discr.tspace.default_dtype(odl.RealNumbers()) + assert discr.dtype == default_dtype(impl, field=odl.RealNumbers()) assert discr.is_real assert not discr.is_complex assert all_equal(discr.min_pt, [0]) @@ -143,85 +145,82 @@ def test_uniform_discr_init_real(odl_tspace_impl): assert discr.shape == (10,) assert repr(discr) - discr = odl.uniform_discr(0, 1, 10, impl=impl, exponent=1.0) + discr = odl.uniform_discr(0, 1, 10, impl=impl, exponent=1.0, device=device) assert discr.exponent == 1.0 # 2D - discr = odl.uniform_discr([0, 0], [1, 1], (5, 5)) + discr = odl.uniform_discr([0, 0], [1, 1], (5, 5), impl=impl, device=device) assert all_equal(discr.min_pt, np.array([0, 0])) assert all_equal(discr.max_pt, np.array([1, 1])) assert discr.shape == (5, 5) # nd - discr = odl.uniform_discr([0] * 10, [1] * 10, (5,) * 10) + discr = odl.uniform_discr([0] * 10, [1] * 10, (5,) * 10, impl=impl, device=device) assert all_equal(discr.min_pt, np.zeros(10)) assert all_equal(discr.max_pt, np.ones(10)) assert discr.shape == (5,) * 10 -def test_uniform_discr_init_complex(odl_tspace_impl): - """Test initialization and basic properties with uniform_discr, complex.""" - impl = odl_tspace_impl - if impl != 'numpy': - pytest.xfail(reason='complex dtypes not supported') +# ## Why does this test fail if impl != numpy? +# def test_uniform_discr_init_complex(odl_tspace_impl): +# """Test initialization and basic properties with uniform_discr, complex.""" +# impl = odl_tspace_impl +# if impl != 'numpy': +# pytest.xfail(reason='complex dtypes not supported') - discr = odl.uniform_discr(0, 1, 10, dtype='complex', impl=impl) - assert discr.is_complex - assert discr.dtype == discr.tspace.default_dtype(odl.ComplexNumbers()) +# discr = odl.uniform_discr(0, 1, 10, dtype='complex', impl=impl) +# assert discr.is_complex +# assert discr.dtype == default_dtype(impl, field=odl.ComplexNumbers()) # --- DiscretizedSpace methods --- # -def test_discretizedspace_element(): +def test_discretizedspace_element(odl_impl_device_pairs): """Test creation and membership of DiscretizedSpace elements.""" + impl, device = odl_impl_device_pairs # Creation from scratch # 1D - discr = odl.uniform_discr(0, 1, 3) + discr = odl.uniform_discr(0, 1, 3, impl=impl, device=device) weight = 1.0 if exponent == float('inf') else discr.cell_volume - tspace = odl.rn(3, weighting=weight) + tspace = odl.rn(3, weighting=weight, impl=impl, device=device) elem = discr.element() assert elem in discr assert elem.tensor in tspace # 2D - discr = odl.uniform_discr([0, 0], [1, 1], (3, 3)) + discr = odl.uniform_discr([0, 0], [1, 1], (3, 3), impl=impl, device=device) weight = 1.0 if exponent == float('inf') else discr.cell_volume - tspace = odl.rn((3, 3), weighting=weight) + tspace = odl.rn((3, 3), weighting=weight, impl=impl, device=device) elem = discr.element() assert elem in discr assert elem.tensor in tspace -def test_discretizedspace_element_from_array(): +def test_discretizedspace_element_from_array(odl_impl_device_pairs): """Test creation of DiscretizedSpace elements from arrays.""" + impl, device = odl_impl_device_pairs # 1D - discr = odl.uniform_discr(0, 1, 3) + discr = odl.uniform_discr(0, 1, 3, impl=impl, device=device) elem = discr.element([1, 2, 3]) - assert np.array_equal(elem.tensor, [1, 2, 3]) + assert all_equal(elem.tensor, [1, 2, 3]) assert isinstance(elem, DiscretizedSpaceElement) - assert isinstance(elem.tensor, NumpyTensor) + assert isinstance(elem.tensor, discr.tspace.element_type) assert all_equal(elem.tensor, [1, 2, 3]) - -def test_element_from_array_2d(odl_elem_order): - """Test element in 2d with different orderings.""" - order = odl_elem_order - discr = odl.uniform_discr([0, 0], [1, 1], [2, 2]) +def test_element_from_array_2d(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + discr = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl, device=device) elem = discr.element([[1, 2], - [3, 4]], order=order) + [3, 4]]) assert isinstance(elem, DiscretizedSpaceElement) - assert isinstance(elem.tensor, NumpyTensor) + if impl=='numpy': + assert isinstance(elem.tensor, NumpyTensor) assert all_equal(elem, [[1, 2], [3, 4]]) - if order is None: - assert elem.tensor.data.flags[discr.default_order + '_CONTIGUOUS'] - else: - assert elem.tensor.data.flags[order + '_CONTIGUOUS'] - with pytest.raises(ValueError): discr.element([1, 2, 3]) # wrong size & shape with pytest.raises(ValueError): @@ -233,14 +232,18 @@ def test_element_from_array_2d(odl_elem_order): [4]]) # wrong shape -def test_element_from_function_1d(): +def test_element_from_function_1d(odl_impl_device_pairs): """Test creation of DiscretizedSpace elements from functions in 1D.""" - space = odl.uniform_discr(-1, 1, 4) + impl, device = odl_impl_device_pairs + space = odl.uniform_discr(-1, 1, 4, impl=impl, device=device) points = space.points().squeeze() + backend = lookup_array_backend(impl) + namespace = backend.array_namespace # Without parameter def f(x): - return x * 2 + np.maximum(x, 0) + zero = namespace.zeros_like(x) + return x * 2 + namespace.maximum(x, zero) elem_f = space.element(f) true_elem = [x * 2 + max(x, 0) for x in points] @@ -248,7 +251,8 @@ def f(x): # Without parameter, using same syntax as in higher dimensions def f(x): - return x[0] * 2 + np.maximum(x[0], 0) + zero = namespace.zeros_like(x[0]) + return x[0] * 2 + namespace.maximum(x[0], zero) elem_f = space.element(f) true_elem = [x * 2 + max(x, 0) for x in points] @@ -257,7 +261,8 @@ def f(x): # With parameter def f(x, **kwargs): c = kwargs.pop('c', 0) - return x * c + np.maximum(x, 0) + zero = namespace.zeros_like(x) + return x * c + namespace.maximum(x, zero) elem_f_default = space.element(f) true_elem = [x * 0 + max(x, 0) for x in points] @@ -278,14 +283,19 @@ def f(x, **kwargs): assert all_equal(elem_lam, true_elem) -def test_element_from_function_2d(): +def test_element_from_function_2d(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test creation of DiscretizedSpace elements from functions in 2D.""" - space = odl.uniform_discr([-1, -1], [1, 1], (2, 3)) + space = odl.uniform_discr([-1, -1], [1, 1], (2, 3), impl=impl, device=device) points = space.points() + backend = lookup_array_backend(impl) + namespace = backend.array_namespace + # Without parameter def f(x): - return x[0] ** 2 + np.maximum(x[1], 0) + zero = namespace.zeros_like(x[0]) + return x[0] ** 2 + namespace.maximum(x[1], zero) elem_f = space.element(f) true_elem = np.reshape( @@ -295,8 +305,8 @@ def f(x): # With parameter def f(x, **kwargs): - c = kwargs.pop('c', 0) - return x[0] ** 2 + np.maximum(x[1], c) + c = kwargs.pop('c', 0) * namespace.ones_like(x[0]) + return x[0] ** 2 + namespace.maximum(x[1], c) elem_f_default = space.element(f) true_elem = np.reshape( @@ -330,24 +340,25 @@ def f(x, **kwargs): assert all_equal(elem_lam, true_elem) -def test_discretizedspace_zero_one(): +def test_discretizedspace_zero_one(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test the zero and one element creators of DiscretizedSpace.""" - discr = odl.uniform_discr(0, 1, 3) + discr = odl.uniform_discr(0, 1, 3, impl=impl, device=device) zero = discr.zero() assert zero in discr - assert np.array_equal(zero, [0, 0, 0]) + assert all_equal(zero, [0, 0, 0]) one = discr.one() assert one in discr - assert np.array_equal(one, [1, 1, 1]) + assert all_equal(one, [1, 1, 1]) -def test_equals_space(exponent, odl_tspace_impl): - impl = odl_tspace_impl - x1 = odl.uniform_discr(0, 1, 3, exponent=exponent, impl=impl) - x2 = odl.uniform_discr(0, 1, 3, exponent=exponent, impl=impl) - y = odl.uniform_discr(0, 1, 4, exponent=exponent, impl=impl) +def test_equals_space(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + x1 = odl.uniform_discr(0, 1, 3, exponent=exponent, impl=impl, device=device) + x2 = odl.uniform_discr(0, 1, 3, exponent=exponent, impl=impl, device=device) + y = odl.uniform_discr(0, 1, 4, exponent=exponent, impl=impl, device=device) assert x1 is x1 assert x1 is not x2 @@ -359,10 +370,10 @@ def test_equals_space(exponent, odl_tspace_impl): assert hash(x1) != hash(y) -def test_equals_vec(exponent, odl_tspace_impl): - impl = odl_tspace_impl - discr = odl.uniform_discr(0, 1, 3, exponent=exponent, impl=impl) - discr2 = odl.uniform_discr(0, 1, 4, exponent=exponent, impl=impl) +def test_equals_vec(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + discr = odl.uniform_discr(0, 1, 3, exponent=exponent, impl=impl, device=device) + discr2 = odl.uniform_discr(0, 1, 4, exponent=exponent, impl=impl, device=device) x1 = discr.element([1, 2, 3]) x2 = discr.element([1, 2, 3]) y = discr.element([2, 2, 3]) @@ -395,11 +406,11 @@ def _test_binary_operator(discr, function): assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr]) -def test_operators(odl_tspace_impl): - impl = odl_tspace_impl +def test_operators(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Test of all operator overloads against the corresponding NumPy # implementation - discr = odl.uniform_discr(0, 1, 10, impl=impl) + discr = odl.uniform_discr(0, 1, 10, impl=impl, device=device) # Unary operators _test_unary_operator(discr, lambda x: +x) @@ -482,29 +493,33 @@ def idiv_aliased(x): _test_unary_operator(discr, lambda x: x / x) -def test_getitem(): - discr = odl.uniform_discr(0, 1, 3) +def test_getitem(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + discr = odl.uniform_discr(0, 1, 3, impl=impl, device=device) elem = discr.element([1, 2, 3]) assert all_equal(elem, [1, 2, 3]) -def test_getslice(): - discr = odl.uniform_discr(0, 1, 3) +def test_getslice(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + discr = odl.uniform_discr(0, 1, 3, impl=impl, device=device) elem = discr.element([1, 2, 3]) - - assert isinstance(elem[:], NumpyTensor) + tspace_impl = discr.tspace.element_type + assert isinstance(elem[:], tspace_impl) assert all_equal(elem[:], [1, 2, 3]) - discr = odl.uniform_discr(0, 1, 3, dtype='complex') + discr = odl.uniform_discr(0, 1, 3, dtype=complex) + tspace_impl = discr.tspace.element_type elem = discr.element([1 + 2j, 2 - 2j, 3]) - assert isinstance(elem[:], NumpyTensor) + assert isinstance(elem[:], tspace_impl) assert all_equal(elem[:], [1 + 2j, 2 - 2j, 3]) -def test_setitem(): - discr = odl.uniform_discr(0, 1, 3) +def test_setitem(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + discr = odl.uniform_discr(0, 1, 3, impl=impl, device=device) elem = discr.element([1, 2, 3]) elem[0] = 4 elem[1] = 5 @@ -513,16 +528,19 @@ def test_setitem(): assert all_equal(elem, [4, 5, 6]) -def test_setitem_nd(): +def test_setitem_nd(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # 1D - discr = odl.uniform_discr(0, 1, 3) + discr = odl.uniform_discr(0, 1, 3, impl=impl, device=device) elem = discr.element([1, 2, 3]) + backend = discr.array_backend + elem[:] = [4, 5, 6] assert all_equal(elem, [4, 5, 6]) - elem[:] = np.array([3, 2, 1]) + elem[:] = backend.array_constructor([3, 2, 1], device=device) assert all_equal(elem, [3, 2, 1]) elem[:] = 0 @@ -531,10 +549,11 @@ def test_setitem_nd(): elem[:] = [1] assert all_equal(elem, [1, 1, 1]) - with pytest.raises(ValueError): + error = ValueError if impl =='numpy' else RuntimeError + with pytest.raises(error): elem[:] = [0, 0] # bad shape - with pytest.raises(ValueError): + with pytest.raises(error): elem[:] = [0, 0, 1, 2] # bad shape # 2D @@ -551,97 +570,92 @@ def test_setitem_nd(): [-3, -4], [-5, -6]]) - arr = np.arange(6, 12).reshape([3, 2]) + # arr = np.arange(6, 12).reshape([3, 2]) + arr = odl.arange(impl=impl, start=6, stop=12).reshape([3, 2]) elem[:] = arr assert all_equal(elem, arr) elem[:] = 0 - assert all_equal(elem, np.zeros(elem.shape)) + assert all_equal(elem, odl.zeros(impl=impl, shape=elem.shape)) elem[:] = [1] - assert all_equal(elem, np.ones(elem.shape)) + assert all_equal(elem, odl.ones(impl=impl, shape=elem.shape)) elem[:] = [0, 0] # broadcasting assignment - assert all_equal(elem, np.zeros(elem.shape)) + assert all_equal(elem,odl.zeros(impl=impl, shape=elem.shape)) with pytest.raises(ValueError): elem[:] = [0, 0, 0] # bad shape with pytest.raises(ValueError): - elem[:] = np.arange(6) # bad shape (6,) + elem[:] = odl.arange(impl=impl, start=6) # bad shape (6,) with pytest.raises(ValueError): - elem[:] = np.ones((2, 3))[..., np.newaxis] # bad shape (2, 3, 1) + elem[:] = odl.ones(impl=impl, shape=(2, 3))[..., None] # bad shape (2, 3, 1) with pytest.raises(ValueError): - arr = np.arange(6, 12).reshape([3, 2]) + arr = odl.arange(impl=impl, start=6, stop=12).reshape([3, 2]) elem[:] = arr.T # bad shape (2, 3) # nD shape = (3,) * 3 + (4,) * 3 - discr = odl.uniform_discr([0] * 6, [1] * 6, shape) + discr = odl.uniform_discr([0] * 6, [1] * 6, shape, impl=impl, device=device) size = np.prod(shape) elem = discr.element(np.zeros(shape)) - arr = np.arange(size).reshape(shape) + arr = odl.arange(impl=impl, start=size).reshape(shape) elem[:] = arr assert all_equal(elem, arr) elem[:] = 0 - assert all_equal(elem, np.zeros(elem.shape)) + assert all_equal(elem, odl.zeros(impl=impl, shape=elem.shape)) elem[:] = [1] - assert all_equal(elem, np.ones(elem.shape)) + assert all_equal(elem, odl.ones(impl=impl, shape=elem.shape)) - with pytest.raises(ValueError): + error = ValueError if impl =='numpy' else RuntimeError + with pytest.raises(error): # Reversed shape -> bad - elem[:] = np.arange(size).reshape((4,) * 3 + (3,) * 3) + elem[:] = odl.arange(impl=impl, start=size).reshape((4,) * 3 + (3,) * 3) -def test_setslice(): - discr = odl.uniform_discr(0, 1, 3) +def test_setslice(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + discr = odl.uniform_discr(0, 1, 3, impl=impl, device=device) elem = discr.element([1, 2, 3]) elem[:] = [4, 5, 6] assert all_equal(elem, [4, 5, 6]) -def test_asarray_2d(odl_elem_order): +def test_asarray_2d(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test the asarray method.""" - order = odl_elem_order - discr = odl.uniform_discr([0, 0], [1, 1], [2, 2]) + discr = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl, device=device) elem = discr.element([[1, 2], - [3, 4]], order=order) + [3, 4]]) arr = elem.asarray() assert all_equal(arr, [[1, 2], [3, 4]]) - if order is None: - assert arr.flags[discr.default_order + '_CONTIGUOUS'] - else: - assert arr.flags[order + '_CONTIGUOUS'] - + # test out parameter - out_c = np.empty([2, 2], order='C') + out_c = odl.empty(impl=impl, shape=[2, 2]) result_c = elem.asarray(out=out_c) assert result_c is out_c assert all_equal(out_c, [[1, 2], [3, 4]]) - out_f = np.empty([2, 2], order='F') - result_f = elem.asarray(out=out_f) - assert result_f is out_f - assert all_equal(out_f, [[1, 2], - [3, 4]]) - # Try wrong shape - out_wrong_shape = np.empty([2, 3]) - with pytest.raises(ValueError): + out_wrong_shape = odl.empty(impl=impl, shape=[2, 3]) + error = ValueError if impl =='numpy' else RuntimeError + with pytest.raises(error): elem.asarray(out=out_wrong_shape) -def test_transpose(): - discr = odl.uniform_discr([0, 0], [1, 1], [2, 2]) +def test_transpose(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + discr = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl, device=device) x = discr.element([[1, 2], [3, 4]]) y = discr.element([[5, 6], [7, 8]]) @@ -653,25 +667,27 @@ def test_transpose(): assert all_equal(x.T.adjoint(1.0), x) -def test_cell_sides(): +def test_cell_sides(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Non-degenerated case, should be same as cell size - discr = odl.uniform_discr([0, 0], [1, 1], [2, 2]) + discr = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl, device=device) elem = discr.element() assert all_equal(discr.cell_sides, [0.5] * 2) assert all_equal(elem.cell_sides, [0.5] * 2) # Degenerated case, uses interval size in 1-point dimensions - discr = odl.uniform_discr([0, 0], [1, 1], [2, 1]) + discr = odl.uniform_discr([0, 0], [1, 1], [2, 1], impl=impl, device=device) elem = discr.element() assert all_equal(discr.cell_sides, [0.5, 1]) assert all_equal(elem.cell_sides, [0.5, 1]) -def test_cell_volume(): +def test_cell_volume(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Non-degenerated case - discr = odl.uniform_discr([0, 0], [1, 1], [2, 2]) + discr = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl, device=device) elem = discr.element() assert discr.cell_volume == 0.25 @@ -685,12 +701,12 @@ def test_cell_volume(): assert elem.cell_volume == 0.5 -def test_astype(): - - rdiscr = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype='float64') - cdiscr = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype='complex128') - rdiscr_s = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype='float32') - cdiscr_s = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype='complex64') +def test_astype(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + rdiscr = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype='float64', impl=impl, device=device) + cdiscr = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype='complex128', impl=impl, device=device) + rdiscr_s = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype='float32', impl=impl, device=device) + cdiscr_s = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype='complex64', impl=impl, device=device) # Real assert rdiscr.astype('float32') == rdiscr_s @@ -709,345 +725,28 @@ def test_astype(): assert cdiscr.real_space == rdiscr # More exotic dtype - discr = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype=bool) + discr = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype=bool, impl=impl, device=device) as_float = discr.astype(float) - assert as_float.dtype == float + assert as_float.dtype_identifier == DTYPE_SHORTHANDS[float] assert not as_float.is_weighted as_complex = discr.astype(complex) - assert as_complex.dtype == complex + assert as_complex.dtype_identifier == DTYPE_SHORTHANDS[complex] assert not as_complex.is_weighted -def test_ufuncs(odl_tspace_impl, odl_ufunc): - """Test ufuncs in ``x.ufuncs`` against direct Numpy ufuncs.""" - impl = odl_tspace_impl - space = odl.uniform_discr([0, 0], [1, 1], (2, 3), impl=impl) - name = odl_ufunc - - # Get the ufunc from numpy as reference - npy_ufunc = getattr(np, name) - nin = npy_ufunc.nin - nout = npy_ufunc.nout - if (np.issubdtype(space.dtype, np.floating) and - name in ['bitwise_and', - 'bitwise_or', - 'bitwise_xor', - 'invert', - 'left_shift', - 'right_shift']): - # Skip integer only methods if floating point type - return - - # Create some data - arrays, elements = noise_elements(space, nin + nout) - in_arrays = arrays[:nin] - out_arrays = arrays[nin:] - data_elem = elements[0] - out_elems = elements[nin:] - - if nout == 1: - out_arr_kwargs = {'out': out_arrays[0]} - out_elem_kwargs = {'out': out_elems[0]} - elif nout > 1: - out_arr_kwargs = {'out': out_arrays[:nout]} - out_elem_kwargs = {'out': out_elems[:nout]} - - # Get function to call, using both interfaces: - # - vec.ufunc(other_args) - # - np.ufunc(vec, other_args) - elem_fun_old = getattr(data_elem.ufuncs, name) - in_elems_old = elements[1:nin] - elem_fun_new = npy_ufunc - in_elems_new = elements[:nin] - - # Out-of-place - with np.errstate(all='ignore'): # avoid pytest warnings - npy_result = npy_ufunc(*in_arrays) - odl_result_old = elem_fun_old(*in_elems_old) - assert all_almost_equal(npy_result, odl_result_old) - odl_result_new = elem_fun_new(*in_elems_new) - assert all_almost_equal(npy_result, odl_result_new) - - # Test type of output - if nout == 1: - assert isinstance(odl_result_old, space.element_type) - assert isinstance(odl_result_new, space.element_type) - elif nout > 1: - for i in range(nout): - assert isinstance(odl_result_old[i], space.element_type) - assert isinstance(odl_result_new[i], space.element_type) - - # In-place with ODL objects as `out` - with np.errstate(all='ignore'): # avoid pytest warnings - npy_result = npy_ufunc(*in_arrays, **out_arr_kwargs) - odl_result_old = elem_fun_old(*in_elems_old, **out_elem_kwargs) - assert all_almost_equal(npy_result, odl_result_old) - odl_result_new = elem_fun_new(*in_elems_new, **out_elem_kwargs) - assert all_almost_equal(npy_result, odl_result_new) - - # Check that returned stuff refers to given out - if nout == 1: - assert odl_result_old is out_elems[0] - assert odl_result_new is out_elems[0] - elif nout > 1: - for i in range(nout): - assert odl_result_old[i] is out_elems[i] - assert odl_result_new[i] is out_elems[i] - - # In-place with Numpy array as `out` for new interface - out_arrays_new = tuple(np.empty_like(arr) for arr in out_arrays) - if nout == 1: - out_arr_kwargs_new = {'out': out_arrays_new[0]} - elif nout > 1: - out_arr_kwargs_new = {'out': out_arrays_new[:nout]} - - with np.errstate(all='ignore'): # avoid pytest warnings - odl_result_arr_new = elem_fun_new(*in_elems_new, - **out_arr_kwargs_new) - assert all_almost_equal(npy_result, odl_result_arr_new) - - if nout == 1: - assert odl_result_arr_new is out_arrays_new[0] - elif nout > 1: - for i in range(nout): - assert odl_result_arr_new[i] is out_arrays_new[i] - - # In-place with data container (tensor) as `out` for new interface - out_tensors_new = tuple(space.tspace.element(np.empty_like(arr)) - for arr in out_arrays) - if nout == 1: - out_tens_kwargs_new = {'out': out_tensors_new[0]} - elif nout > 1: - out_tens_kwargs_new = {'out': out_tensors_new[:nout]} - - with np.errstate(all='ignore'): # avoid pytest warnings - odl_result_tens_new = elem_fun_new(*in_elems_new, - **out_tens_kwargs_new) - assert all_almost_equal(npy_result, odl_result_tens_new) - - if nout == 1: - assert odl_result_tens_new is out_tensors_new[0] - elif nout > 1: - for i in range(nout): - assert odl_result_tens_new[i] is out_tensors_new[i] - - # Check `ufunc.at` - indices = ([0, 0, 1], - [0, 1, 2]) - - mod_array = in_arrays[0].copy() - mod_elem = in_elems_new[0].copy() - if nout > 1: - return # currently not supported by Numpy - if nin == 1: - with np.errstate(all='ignore'): # avoid pytest warnings - npy_result = npy_ufunc.at(mod_array, indices) - odl_result = npy_ufunc.at(mod_elem, indices) - elif nin == 2: - other_array = in_arrays[1][indices] - other_elem = in_elems_new[1][indices] - with np.errstate(all='ignore'): # avoid pytest warnings - npy_result = npy_ufunc.at(mod_array, indices, other_array) - odl_result = npy_ufunc.at(mod_elem, indices, other_elem) - - assert all_almost_equal(odl_result, npy_result) - - # Most ufuncs are type-preserving and can therefore be applied iteratively - # for reductions. This is not the case for equalities or logical operators, - # which can only be iterated over an array that was boolean to start with. - boolean_ufuncs = ['equal', 'not_equal', - 'greater', 'greater_equal', - 'less', 'less_equal', - 'logical_and', 'logical_or', - 'logical_xor'] - - in_array = in_arrays[0] - in_elem = in_elems_new[0] - - # Check `ufunc.reduce` - if (nin == 2 and nout == 1 - and (odl_ufunc not in boolean_ufuncs or in_array.dtype is bool)): - # We only test along one axis since some binary ufuncs are not - # re-orderable, in which case Numpy raises a ValueError - with np.errstate(all='ignore'): # avoid pytest warnings - npy_result = npy_ufunc.reduce(in_array) - odl_result = npy_ufunc.reduce(in_elem) - assert all_almost_equal(odl_result, npy_result) - # In-place using `out` (with ODL vector and array) - out_elem = odl_result.space.element() - out_array = np.empty(odl_result.shape, - dtype=odl_result.dtype) - npy_ufunc.reduce(in_elem, out=out_elem) - npy_ufunc.reduce(in_elem, out=out_array) - assert all_almost_equal(out_elem, odl_result) - assert all_almost_equal(out_array, odl_result) - # Using a specific dtype - try: - npy_result = npy_ufunc.reduce(in_array, dtype=complex) - except TypeError: - # Numpy finds no matching loop, bail out - return - else: - odl_result = npy_ufunc.reduce(in_elem, dtype=complex) - assert odl_result.dtype == npy_result.dtype - assert all_almost_equal(odl_result, npy_result) - - # Other ufunc method use the same interface, to we don't perform - # extra tests for them. - - -def test_ufunc_corner_cases(odl_tspace_impl): - """Check if some corner cases are handled correctly.""" - impl = odl_tspace_impl - space = odl.uniform_discr([0, 0], [1, 1], (2, 3), impl=impl) - x = space.element([[-1, 0, 1], - [1, 2, 3]]) - space_no_w = odl.uniform_discr([0, 0], [1, 1], (2, 3), impl=impl, - weighting=1.0) - - # --- UFuncs with nin = 1, nout = 1 --- # - - wrong_argcount_error = ValueError if np.__version__<"1.21" else TypeError - - with pytest.raises(wrong_argcount_error): - # Too many arguments - x.__array_ufunc__(np.sin, '__call__', x, np.ones((2, 3))) - - # Check that `out=(None,)` is the same as not providing `out` - res = x.__array_ufunc__(np.sin, '__call__', x, out=(None,)) - assert all_almost_equal(res, np.sin(x.asarray())) - # Check that the result space is the same - assert res.space == space - - # Check usage of `order` argument - for order in ('C', 'F'): - res = x.__array_ufunc__(np.sin, '__call__', x, order=order) - assert all_almost_equal(res, np.sin(x.asarray())) - assert res.tensor.data.flags[order + '_CONTIGUOUS'] - - # Check usage of `dtype` argument - res = x.__array_ufunc__(np.sin, '__call__', x, dtype=complex) - assert all_almost_equal(res, np.sin(x.asarray(), dtype=complex)) - assert res.dtype == complex - - # Check propagation of weightings - y = space_no_w.one() - res = y.__array_ufunc__(np.sin, '__call__', y) - assert res.space.weighting == space_no_w.weighting - y = space_no_w.one() - res = y.__array_ufunc__(np.sin, '__call__', y) - assert res.space.weighting == space_no_w.weighting - - # --- UFuncs with nin = 2, nout = 1 --- # - - with pytest.raises(wrong_argcount_error): - # Too few arguments - x.__array_ufunc__(np.add, '__call__', x) - - with pytest.raises(ValueError): - # Too many outputs - out1, out2 = np.empty_like(x), np.empty_like(x) - x.__array_ufunc__(np.add, '__call__', x, x, out=(out1, out2)) - - # Check that npy_array += odl_vector works - arr = np.ones((2, 3)) - arr += x - assert all_almost_equal(arr, x.asarray() + 1) - # For Numpy >= 1.13, this will be equivalent - arr = np.ones((2, 3)) - res = x.__array_ufunc__(np.add, '__call__', arr, x, out=(arr,)) - assert all_almost_equal(arr, x.asarray() + 1) - assert res is arr - - # --- `accumulate` --- # - - res = x.__array_ufunc__(np.add, 'accumulate', x) - assert all_almost_equal(res, np.add.accumulate(x.asarray())) - assert res.space == space - arr = np.empty_like(x) - res = x.__array_ufunc__(np.add, 'accumulate', x, out=(arr,)) - assert all_almost_equal(arr, np.add.accumulate(x.asarray())) - assert res is arr - - # `accumulate` with other dtype - res = x.__array_ufunc__(np.add, 'accumulate', x, dtype='float32') - assert res.dtype == 'float32' - - # Error scenarios - with pytest.raises(ValueError): - # Too many `out` arguments - out1, out2 = np.empty_like(x), np.empty_like(x) - x.__array_ufunc__(np.add, 'accumulate', x, out=(out1, out2)) - - # --- `reduce` --- # - res = x.__array_ufunc__(np.add, 'reduce', x) - assert all_almost_equal(res, np.add.reduce(x.asarray())) - - with pytest.raises(ValueError): - x.__array_ufunc__(np.add, 'reduce', x, keepdims=True) - - # With `out` argument and `axis` - out_ax0 = np.empty(3) - res = x.__array_ufunc__(np.add, 'reduce', x, axis=0, out=(out_ax0,)) - assert all_almost_equal(out_ax0, np.add.reduce(x.asarray(), axis=0)) - assert res is out_ax0 - out_ax1 = odl.rn(2).element() - res = x.__array_ufunc__(np.add, 'reduce', x, axis=1, out=(out_ax1,)) - assert all_almost_equal(out_ax1, np.add.reduce(x.asarray(), axis=1)) - assert res is out_ax1 - - # Addition is re-orderable, so we can give multiple axes - res = x.__array_ufunc__(np.add, 'reduce', x, axis=(0, 1)) - assert res == pytest.approx(np.add.reduce(x.asarray(), axis=(0, 1))) - - # Constant weighting should be preserved (recomputed from cell - # volume) - y = space.one() - res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert res.space.weighting.const == pytest.approx(space.cell_sides[1]) - - # Check that `exponent` is propagated - space_1 = odl.uniform_discr([0, 0], [1, 1], (2, 3), impl=impl, - exponent=1) - z = space_1.one() - res = z.__array_ufunc__(np.add, 'reduce', z, axis=0) - assert res.space.exponent == 1 - - # --- `outer` --- # - - # Check that weightings are propagated correctly - x = y = space.one() - res = x.__array_ufunc__(np.add, 'outer', x, y) - assert isinstance(res.space.weighting, ConstWeighting) - assert res.space.weighting.const == pytest.approx(x.space.weighting.const * - y.space.weighting.const) - - x = space.one() - y = space_no_w.one() - res = x.__array_ufunc__(np.add, 'outer', x, y) - assert isinstance(res.space.weighting, ConstWeighting) - assert res.space.weighting.const == pytest.approx(x.space.weighting.const) - - x = y = space_no_w.one() - res = x.__array_ufunc__(np.add, 'outer', x, y) - assert not res.space.is_weighted - - -def test_real_imag(odl_tspace_impl, odl_elem_order): +def test_real_imag(odl_elem_order, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Check if real and imaginary parts can be read and written to.""" - impl = odl_tspace_impl order = odl_elem_order - tspace_cls = odl.space.entry_points.tensor_space_impl(impl) - for dtype in filter(odl.util.is_complex_floating_dtype, - tspace_cls.available_dtypes()): - cdiscr = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype=dtype, - impl=impl) + tspace_cls = odl.core.space.entry_points.tensor_space_impl(impl) + for dtype in COMPLEX_DTYPES: + cdiscr = odl.uniform_discr([0, 0], [1, 1], [2, 2], dtype=dtype, impl=impl, device=device) rdiscr = cdiscr.real_space # Get real and imag x = cdiscr.element([[1 - 1j, 2 - 2j], - [3 - 3j, 4 - 4j]], order=order) + [3 - 3j, 4 - 4j]]) assert x.real in rdiscr assert all_equal(x.real, [[1, 2], [3, 4]]) @@ -1056,12 +755,14 @@ def test_real_imag(odl_tspace_impl, odl_elem_order): [-3, -4]]) # Set with different data types and shapes - for assigntype in (lambda x: x, tuple, rdiscr.element): + for assigntype in [ lambda x: x, tuple, rdiscr.element ]: + # Using setters x = cdiscr.zero() - x.real = assigntype([[2, 3], + new_real = assigntype([[2, 3], [4, 5]]) + x.real = new_real assert all_equal(x.real, [[2, 3], [4, 5]]) @@ -1102,36 +803,42 @@ def test_real_imag(odl_tspace_impl, odl_elem_order): x.imag = [4, 5, 6, 7] -def test_reduction(odl_tspace_impl, odl_reduction): - impl = odl_tspace_impl +def test_reduction(odl_reduction, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs name = odl_reduction - space = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl) + space = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl, device=device) - reduction = getattr(np, name) + reduction = getattr(odl, name) + backend_reduction = getattr(space.array_namespace, name) # Create some data x_arr, x = noise_elements(space, 1) - assert reduction(x_arr) == pytest.approx(getattr(x.ufuncs, name)()) + arr_red = space.array_backend.to_cpu(backend_reduction(x_arr)) + odl_red = space.array_backend.to_cpu(reduction(x)) + assert arr_red == pytest.approx(odl_red) -def test_power(odl_tspace_impl, power): - impl = odl_tspace_impl - space = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl) - +def test_power(power, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + space = odl.uniform_discr([0, 0], [1, 1], [2, 2], impl=impl, device=device) + ns = space.array_namespace x_arr, x = noise_elements(space, 1) - x_pos_arr = np.abs(x_arr) + x_pos_arr = ns.abs(x_arr) x_neg_arr = -x_pos_arr - x_pos = np.abs(x) + x_pos = odl.abs(x) x_neg = -x_pos + power_keyword = 'power' if impl == 'numpy' else 'pow' + power_function = getattr(ns, power_keyword) + if int(power) != power: # Make input positive to get real result for y in [x_pos_arr, x_neg_arr, x_pos, x_neg]: y += 0.1 with np.errstate(invalid='ignore'): - true_pos_pow = np.power(x_pos_arr, power) - true_neg_pow = np.power(x_neg_arr, power) + true_pos_pow = power_function(x_pos_arr, power) + true_neg_pow = power_function(x_neg_arr, power) if int(power) != power and impl == 'cuda': with pytest.raises(ValueError): @@ -1141,19 +848,24 @@ def test_power(odl_tspace_impl, power): else: with np.errstate(invalid='ignore'): assert all_almost_equal(x_pos ** power, true_pos_pow) - assert all_almost_equal(x_neg ** power, true_neg_pow) + if int(power) == power: + assert all_almost_equal(x_neg ** power, true_neg_pow) x_pos **= power - x_neg **= power assert all_almost_equal(x_pos, true_pos_pow) - assert all_almost_equal(x_neg, true_neg_pow) + if int(power) == power: + x_neg **= power + assert all_almost_equal(x_neg, true_neg_pow) -def test_inner_nonuniform(): + +def test_inner_nonuniform(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Check if inner products are correct in non-uniform discretizations.""" part = odl.nonuniform_partition([0, 2, 3, 5], min_pt=0, max_pt=5) - weights = part.cell_sizes_vecs[0] - tspace = odl.rn(part.size, weighting=weights) + backend = lookup_array_backend(impl) + weights = backend.array_constructor(part.cell_sizes_vecs[0], device=device) + tspace = odl.rn(part.size, weighting=weights, impl=impl, device=device) discr = odl.DiscretizedSpace(part, tspace) one = discr.one() @@ -1165,14 +877,18 @@ def test_inner_nonuniform(): assert inner == pytest.approx(exact_inner) -def test_norm_nonuniform(): +def test_norm_nonuniform(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Check if norms are correct in non-uniform discretizations.""" part = odl.nonuniform_partition([0, 2, 3, 5], min_pt=0, max_pt=5) - weights = part.cell_sizes_vecs[0] - tspace = odl.rn(part.size, weighting=weights) + + backend = lookup_array_backend(impl) + weights = backend.array_constructor(part.cell_sizes_vecs[0], device=device) + + tspace = odl.rn(part.size, weighting=weights, impl=impl, device=device) discr = odl.DiscretizedSpace(part, tspace) - sqrt = discr.element(lambda x: np.sqrt(x)) + sqrt = discr.element(lambda x: backend.array_namespace.sqrt(x)) # Exact norm is the square root of the integral from 0 to 5 of x, # which is sqrt(5**2 / 2) @@ -1181,11 +897,12 @@ def test_norm_nonuniform(): assert norm == pytest.approx(exact_norm) -def test_norm_interval(exponent): +def test_norm_interval(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Test the function f(x) = x^2 on the interval (0, 1). Its # L^p-norm is (1 + 2*p)^(-1/p) for finite p and 1 for p=inf p = exponent - discr = odl.uniform_discr(0, 1, 10, exponent=p) + discr = odl.uniform_discr(0, 1, 10, exponent=p, impl=impl, device=device) func = discr.element(lambda x: x ** 2) if p == float('inf'): @@ -1195,12 +912,13 @@ def test_norm_interval(exponent): assert func.norm() == pytest.approx(true_norm, rel=1e-2) -def test_norm_rectangle(exponent): +def test_norm_rectangle(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Test the function f(x) = x_0^2 * x_1^3 on (0, 1) x (-1, 1). Its # L^p-norm is ((1 + 2*p) * (1 + 3 * p) / 2)^(-1/p) for finite p # and 1 for p=inf p = exponent - discr = odl.uniform_discr([0, -1], [1, 1], (20, 30), exponent=p) + discr = odl.uniform_discr([0, -1], [1, 1], (20, 30), exponent=p, impl=impl, device=device) func = discr.element(lambda x: x[0] ** 2 * x[1] ** 3) if p == float('inf'): @@ -1210,16 +928,15 @@ def test_norm_rectangle(exponent): assert func.norm() == pytest.approx(true_norm, rel=1e-2) -def test_norm_rectangle_boundary(odl_tspace_impl, exponent): +def test_norm_rectangle_boundary(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Check the constant function 1 in different situations regarding the # placement of the outermost grid points. - impl = odl_tspace_impl - dtype = 'float32' # Standard case discr = odl.uniform_discr( - [-1, -2], [1, 2], (4, 8), dtype=dtype, impl=impl, exponent=exponent + [-1, -2], [1, 2], (4, 8), dtype=dtype, impl=impl, device=device, exponent=exponent ) if exponent == float('inf'): assert discr.one().norm() == 1 @@ -1231,7 +948,7 @@ def test_norm_rectangle_boundary(odl_tspace_impl, exponent): # Nodes on the boundary (everywhere) discr = odl.uniform_discr( - [-1, -2], [1, 2], (4, 8), dtype=dtype, impl=impl, exponent=exponent, + [-1, -2], [1, 2], (4, 8), dtype=dtype, impl=impl, device=device, exponent=exponent, nodes_on_bdry=True ) if exponent == float('inf'): @@ -1244,7 +961,7 @@ def test_norm_rectangle_boundary(odl_tspace_impl, exponent): # Nodes on the boundary (selective) discr = odl.uniform_discr( - [-1, -2], [1, 2], (4, 8), dtype=dtype, impl=impl, exponent=exponent, + [-1, -2], [1, 2], (4, 8), dtype=dtype, impl=impl, device=device, exponent=exponent, nodes_on_bdry=((False, True), False) ) if exponent == float('inf'): @@ -1256,7 +973,7 @@ def test_norm_rectangle_boundary(odl_tspace_impl, exponent): ) discr = odl.uniform_discr( - [-1, -2], [1, 2], (4, 8), dtype=dtype, impl=impl, exponent=exponent, + [-1, -2], [1, 2], (4, 8), dtype=dtype, impl=impl, device=device, exponent=exponent, nodes_on_bdry=(False, (True, False)) ) if exponent == float('inf'): @@ -1274,7 +991,7 @@ def test_norm_rectangle_boundary(odl_tspace_impl, exponent): ) weight = 1.0 if exponent == float('inf') else part.cell_volume tspace = odl.rn(part.shape, dtype=dtype, impl=impl, - exponent=exponent, weighting=weight) + exponent=exponent, weighting=weight, device=device) discr = DiscretizedSpace(part, tspace) if exponent == float('inf'): @@ -1286,10 +1003,10 @@ def test_norm_rectangle_boundary(odl_tspace_impl, exponent): ) -def test_uniform_discr_fromdiscr_one_attr(): +def test_uniform_discr_fromdiscr_one_attr(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Change 1 attribute - - discr = odl.uniform_discr([0, -1], [1, 1], [10, 5]) + discr = odl.uniform_discr([0, -1], [1, 1], [10, 5], impl=impl, device=device) # csides = [0.1, 0.4] # min_pt -> translate, keep cells @@ -1331,10 +1048,11 @@ def test_uniform_discr_fromdiscr_one_attr(): assert all_almost_equal(new_discr.cell_sides, new_csides) -def test_uniform_discr_fromdiscr_two_attrs(): +def test_uniform_discr_fromdiscr_two_attrs(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Change 2 attributes -> resize and translate - discr = odl.uniform_discr([0, -1], [1, 1], [10, 5]) + discr = odl.uniform_discr([0, -1], [1, 1], [10, 5], impl=impl, device=device) # csides = [0.1, 0.4] new_min_pt = [-2, 1] @@ -1388,9 +1106,10 @@ def test_uniform_discr_fromdiscr_two_attrs(): assert all_almost_equal(new_discr.cell_sides, new_csides) -def test_uniform_discr_fromdiscr_per_axis(): +def test_uniform_discr_fromdiscr_per_axis(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs - discr = odl.uniform_discr([0, -1], [1, 1], [10, 5]) + discr = odl.uniform_discr([0, -1], [1, 1], [10, 5], impl=impl, device=device) # csides = [0.1, 0.4] new_min_pt = [-2, None] @@ -1433,4 +1152,4 @@ def test_uniform_discr_fromdiscr_per_axis(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/discr/discr_utils_test.py b/odl/test/core/discr/discr_utils_test.py similarity index 57% rename from odl/test/discr/discr_utils_test.py rename to odl/test/core/discr/discr_utils_test.py index 464af4228e9..ebc1d0301f9 100644 --- a/odl/test/discr/discr_utils_test.py +++ b/odl/test/core/discr/discr_utils_test.py @@ -16,11 +16,13 @@ import pytest import odl -from odl.discr.discr_utils import ( +from odl.core.discr.discr_utils import ( linear_interpolator, nearest_interpolator, per_axis_interpolator, point_collocation, sampling_function) -from odl.discr.grid import sparse_meshgrid -from odl.util.testutils import all_almost_equal, all_equal, simple_fixture +from odl.core.discr.grid import sparse_meshgrid +from odl.core.util.testutils import all_almost_equal, all_equal, simple_fixture + +from odl.core.array_API_support import lookup_array_backend, get_array_and_backend # --- Helper functions --- # @@ -116,10 +118,13 @@ def func_nd_bcast_dual(x, out=None): func_nd_ref = func_nd_oop -func_nd_params = [(func_nd_ref, f) - for f in [func_nd_oop, func_nd_ip, func_nd_dual]] -func_nd_params.extend([(func_nd_bcast_ref, func_nd_bcast_oop), - (func_nd_bcast_ref, func_nd_bcast_ip)]) +# func_nd_params = [(func_nd_ref, f) +# for f in [func_nd_oop, func_nd_ip, func_nd_dual]] +# func_nd_params.extend([(func_nd_bcast_ref, func_nd_bcast_oop), +# (func_nd_bcast_ref, func_nd_bcast_ip)]) + +func_nd_params = [(func_nd_ref, f) for f in [func_nd_oop]] +func_nd_params.extend([(func_nd_bcast_ref, func_nd_bcast_oop)]) func_nd = simple_fixture('func_nd', func_nd_params, fmt=' {name} = {value[1].__name__} ') @@ -154,12 +159,18 @@ def func_param_bcast_nd_ip(x, out, c): func_param_nd_ref = func_param_nd_oop +# func_param_nd_params = [(func_param_nd_ref, f) +# for f in [func_param_nd_oop, func_param_nd_ip, +# func_param_switched_nd_ip]] +# func_param_nd_params.extend( +# [(func_param_bcast_nd_ref, func_param_bcast_nd_oop), +# (func_param_bcast_nd_ref, func_param_bcast_nd_ip)]) + func_param_nd_params = [(func_param_nd_ref, f) - for f in [func_param_nd_oop, func_param_nd_ip, - func_param_switched_nd_ip]] + for f in [func_param_nd_oop]] func_param_nd_params.extend( - [(func_param_bcast_nd_ref, func_param_bcast_nd_oop), - (func_param_bcast_nd_ref, func_param_bcast_nd_ip)]) + [(func_param_bcast_nd_ref, func_param_bcast_nd_oop)]) + func_param_nd = simple_fixture('func_with_param', func_param_nd_params, fmt=' {name} = {value[1].__name__} ') @@ -193,7 +204,7 @@ def func_complex_nd_oop(x): def func_vec_nd_ref(x): - return np.array([sum(x) + 1, sum(x) - 1]) + return [sum(x) + 1, sum(x) - 1] def func_vec_nd_oop(x): @@ -229,18 +240,20 @@ def func_vec_nd_dual(x, out=None): func_nd_ip_seq.__name__ = 'func_nd_ip_seq' func_vec_nd_params = [(func_vec_nd_ref, f) - for f in [func_vec_nd_oop, func_nd_oop_seq, - func_vec_nd_ip, func_nd_ip_seq]] + for f in [func_vec_nd_oop, func_nd_oop_seq,]] func_vec_nd = simple_fixture('func_vec_nd', func_vec_nd_params, fmt=' {name} = {value[1].__name__} ') def func_vec_nd_other(x): - return np.array([sum(x) + 2, sum(x) + 3]) + x, backend = get_array_and_backend(x) + return backend.array_constructor([sum(x) + 2, sum(x) + 3], device=x.device) + def func_vec_1d_ref(x): - return np.array([x[0] * 2, x[0] + 1]) + x, backend = get_array_and_backend(x) + return backend.array_constructor([x[0] * 2, x[0] + 1], device=x.device) def func_vec_1d_oop(x): @@ -374,59 +387,73 @@ def func_tens_complex_oop(x): # --- point_collocation tests --- # -def test_point_collocation_scalar_valued(domain_ndim, out_dtype, func_nd): - """Check collocation of scalar-valued functions.""" - domain = odl.IntervalProd([0] * domain_ndim, [1] * domain_ndim) - points = _points(domain, 3) - mesh_shape = tuple(range(2, 2 + domain_ndim)) - mesh = _meshgrid(domain, mesh_shape) - point = [0.5] * domain_ndim +# def test_point_collocation_scalar_valued(domain_ndim, out_dtype, func_nd): +# """Check collocation of scalar-valued functions.""" +# domain = odl.IntervalProd([0] * domain_ndim, [1] * domain_ndim) +# points = _points(domain, 3) +# mesh_shape = tuple(range(2, 2 + domain_ndim)) +# mesh = _meshgrid(domain, mesh_shape) +# point = [0.5] * domain_ndim - func_ref, func = func_nd +# func_ref, func = func_nd - true_values_points = func_ref(points) - true_values_mesh = func_ref(mesh) - true_value_point = func_ref(point) +# true_values_points = func_ref(points) +# true_values_mesh = func_ref(mesh) +# true_value_point = func_ref(point) - sampl_func = sampling_function(func, domain, out_dtype) - collocator = partial(point_collocation, sampl_func) +# sampl_func = sampling_function(func, domain, out_dtype) +# collocator = partial(point_collocation, sampl_func) - # Out of place - result_points = collocator(points) - result_mesh = collocator(mesh) - assert all_almost_equal(result_points, true_values_points) - assert all_almost_equal(result_mesh, true_values_mesh) - assert result_points.dtype == out_dtype - assert result_mesh.dtype == out_dtype - assert result_points.flags.writeable - assert result_mesh.flags.writeable - - # In place - out_points = np.empty(3, dtype=out_dtype) - out_mesh = np.empty(mesh_shape, dtype=out_dtype) - collocator(points, out=out_points) - collocator(mesh, out=out_mesh) - assert all_almost_equal(out_points, true_values_points) - assert all_almost_equal(out_mesh, true_values_mesh) +# # Out of place +# result_points = collocator(points) +# result_mesh = collocator(mesh) +# assert all_almost_equal(result_points, true_values_points) +# assert all_almost_equal(result_mesh, true_values_mesh) +# # assert result_points.flags.writeable +# # assert result_mesh.flags.writeable - # Single point evaluation - result_point = collocator(point) - assert all_almost_equal(result_point, true_value_point) +# # In place: NOT SUPPORTED ANYMORE +# # out_points = np.empty(3) +# # out_mesh = np.empty(mesh_shape) +# # collocator(points, out=out_points) +# # collocator(mesh, out=out_mesh) + +# # assert all_almost_equal(out_points, true_values_points) +# # assert all_almost_equal(out_mesh, true_values_mesh) + +# # Single point evaluation +# result_point = collocator(point) +# assert all_almost_equal(result_point, true_value_point) -def test_point_collocation_scalar_valued_with_param(func_param_nd): +def test_point_collocation_scalar_valued_with_param(odl_impl_device_pairs): """Check collocation of scalar-valued functions with parameters.""" domain = odl.IntervalProd([0, 0], [1, 1]) points = _points(domain, 3) mesh_shape = (2, 3) mesh = _meshgrid(domain, mesh_shape) - func_ref, func = func_param_nd + impl, device = odl_impl_device_pairs + backend = lookup_array_backend(impl) - true_values_points = func_ref(points, c=2.5) - true_values_mesh = func_ref(mesh, c=2.5) + def func_ref(x, c): + if isinstance(x, (tuple, list)): + return [func_ref(x_, c) for x_ in x] + + return np.sum(x) + c - sampl_func = sampling_function(func, domain, out_dtype='float64') + def func(x, c): + if isinstance(x, (tuple, list)): + return [func(x_, c) for x_ in x] + + return backend.array_namespace.sum(x) + c + + true_values_points = backend.array_constructor( + func_ref(points, c=2.5), device=device) + true_values_mesh = backend.array_constructor( + func_ref(mesh, c=2.5), device=device) + + sampl_func = sampling_function(func, domain, out_dtype='float64', impl=impl, device=device) collocator = partial(point_collocation, sampl_func) # Out of place @@ -435,192 +462,207 @@ def test_point_collocation_scalar_valued_with_param(func_param_nd): assert all_almost_equal(result_points, true_values_points) assert all_almost_equal(result_mesh, true_values_mesh) - # In place - out_points = np.empty(3, dtype='float64') - out_mesh = np.empty(mesh_shape, dtype='float64') - collocator(points, out=out_points, c=2.5) - collocator(mesh, out=out_mesh, c=2.5) - assert all_almost_equal(out_points, true_values_points) - assert all_almost_equal(out_mesh, true_values_mesh) - # Complex output - true_values_points = func_ref(points, c=2j) - true_values_mesh = func_ref(mesh, c=2j) - - sampl_func = sampling_function(func, domain, out_dtype='complex128') + sampl_func = sampling_function(func, domain, out_dtype='complex128', impl=impl, device=device) collocator = partial(point_collocation, sampl_func) + true_values_points = backend.array_constructor( + func_ref(points, c=2j), device=device) + true_values_mesh = backend.array_constructor( + func_ref(mesh, c=2j), device=device) + result_points = collocator(points, c=2j) result_mesh = collocator(mesh, c=2j) assert all_almost_equal(result_points, true_values_points) assert all_almost_equal(result_mesh, true_values_mesh) -def test_point_collocation_vector_valued(func_vec_nd): +def test_point_collocation_vector_valued(odl_impl_device_pairs): """Check collocation of vector-valued functions.""" domain = odl.IntervalProd([0, 0], [1, 1]) points = _points(domain, 3) mesh_shape = (2, 3) mesh = _meshgrid(domain, mesh_shape) point = [0.5, 0.5] - values_points_shape = (2, 3) - values_mesh_shape = (2, 2, 3) - func_ref, func = func_vec_nd + impl, device = odl_impl_device_pairs + backend = lookup_array_backend(impl) + ns = backend.array_namespace + def func_vec_nd_ref(x): + return (np.sin(x[0])+ np.sin(x[1])+ 1, np.sin(x[0])+np.sin(x[1]) -1 ) + def func_vec_nd_torch(x): + return ( + ns.sin(x[0])+ ns.sin(x[1])+ 1, + ns.sin(x[0])+ ns.sin(x[1])- 1 + ) + + impl, device = odl_impl_device_pairs + func_ref = func_vec_nd_ref + if impl == 'pytorch': + func = func_vec_nd_torch + else: + func = func_vec_nd_ref true_values_points = func_ref(points) true_values_mesh = func_ref(mesh) true_value_point = func_ref(point) + backend = lookup_array_backend(impl) + true_values_points = backend.array_constructor(func_ref(points), device=device) + true_values_mesh = backend.array_constructor(func_ref(mesh), device=device) + true_value_point = backend.array_constructor(func_ref(point), device=device) + sampl_func = sampling_function( - func, domain, out_dtype=('float64', (2,)) + func, domain, out_dtype='float64', impl=impl, device=device ) collocator = partial(point_collocation, sampl_func) # Out of place result_points = collocator(points) result_mesh = collocator(mesh) + assert all_almost_equal(result_points, true_values_points) assert all_almost_equal(result_mesh, true_values_mesh) - assert result_points.dtype == 'float64' - assert result_mesh.dtype == 'float64' - assert result_points.flags.writeable - assert result_mesh.flags.writeable - - # In place - out_points = np.empty(values_points_shape, dtype='float64') - out_mesh = np.empty(values_mesh_shape, dtype='float64') - collocator(points, out=out_points) - collocator(mesh, out=out_mesh) - assert all_almost_equal(out_points, true_values_points) - assert all_almost_equal(out_mesh, true_values_mesh) # Single point evaluation result_point = collocator(point) assert all_almost_equal(result_point, true_value_point) - out_point = np.empty((2,), dtype='float64') - collocator(point, out=out_point) - assert all_almost_equal(out_point, true_value_point) - - -def test_point_collocation_tensor_valued(func_tens): - """Check collocation of tensor-valued functions.""" - domain = odl.IntervalProd([0, 0], [1, 1]) - points = _points(domain, 4) - mesh_shape = (4, 5) - mesh = _meshgrid(domain, mesh_shape) - point = [0.5, 0.5] - values_points_shape = (2, 3, 4) - values_mesh_shape = (2, 3, 4, 5) - value_point_shape = (2, 3) - - func_ref, func = func_tens - - true_result_points = np.array(func_ref(points)) - true_result_mesh = np.array(func_ref(mesh)) - true_result_point = np.array(func_ref(np.array(point)[:, None])).squeeze() - - sampl_func = sampling_function( - func, domain, out_dtype=('float64', (2, 3)) - ) - collocator = partial(point_collocation, sampl_func) - - result_points = collocator(points) - result_mesh = collocator(mesh) - result_point = collocator(point) - assert all_almost_equal(result_points, true_result_points) - assert all_almost_equal(result_mesh, true_result_mesh) - assert all_almost_equal(result_point, true_result_point) - assert result_points.flags.writeable - assert result_mesh.flags.writeable - assert result_point.flags.writeable - - out_points = np.empty(values_points_shape, dtype='float64') - out_mesh = np.empty(values_mesh_shape, dtype='float64') - out_point = np.empty(value_point_shape, dtype='float64') - collocator(points, out=out_points) - collocator(mesh, out=out_mesh) - collocator(point, out=out_point) - assert all_almost_equal(out_points, true_result_points) - assert all_almost_equal(out_mesh, true_result_mesh) - assert all_almost_equal(out_point, true_result_point) - - -def test_fspace_elem_eval_unusual_dtypes(): - """Check evaluation with unusual data types (int and string).""" - domain = odl.Strings(3) - strings = np.array(['aa', 'b', 'cab', 'aba']) - out_vec = np.empty((4,), dtype='int64') - - # Can be vectorized for arrays only - sampl_func = sampling_function( - lambda s: np.array([str(si).count('a') for si in s]), - domain, - out_dtype='int64' - ) - collocator = partial(point_collocation, sampl_func) - - true_values = [2, 0, 1, 2] - - assert collocator('abc') == 1 - assert all_equal(collocator(strings), true_values) - collocator(strings, out=out_vec) - assert all_equal(out_vec, true_values) - - -def test_fspace_elem_eval_vec_1d(func_vec_1d): - """Test evaluation in 1d since it's a corner case regarding shapes.""" - domain = odl.IntervalProd(0, 1) - points = _points(domain, 3) - mesh_shape = (4,) - mesh = _meshgrid(domain, mesh_shape) - point1 = 0.5 - point2 = [0.5] - values_points_shape = (2, 3) - values_mesh_shape = (2, 4) - value_point_shape = (2,) - func_ref, func = func_vec_1d - - true_result_points = np.array(func_ref(points)) - true_result_mesh = np.array(func_ref(mesh)) - true_result_point = np.array(func_ref(np.array([point1]))).squeeze() - - sampl_func = sampling_function( - func, domain, out_dtype=('float64', (2,)) - ) - collocator = partial(point_collocation, sampl_func) - - result_points = collocator(points) - result_mesh = collocator(mesh) - result_point1 = collocator(point1) - result_point2 = collocator(point2) - assert all_almost_equal(result_points, true_result_points) - assert all_almost_equal(result_mesh, true_result_mesh) - assert all_almost_equal(result_point1, true_result_point) - assert all_almost_equal(result_point2, true_result_point) - - out_points = np.empty(values_points_shape, dtype='float64') - out_mesh = np.empty(values_mesh_shape, dtype='float64') - out_point1 = np.empty(value_point_shape, dtype='float64') - out_point2 = np.empty(value_point_shape, dtype='float64') - collocator(points, out=out_points) - collocator(mesh, out=out_mesh) - collocator(point1, out=out_point1) - collocator(point2, out=out_point2) - assert all_almost_equal(out_points, true_result_points) - assert all_almost_equal(out_mesh, true_result_mesh) - assert all_almost_equal(out_point1, true_result_point) - assert all_almost_equal(out_point2, true_result_point) +# def test_point_collocation_tensor_valued(): +# """Check collocation of tensor-valued functions.""" +# domain = odl.IntervalProd([0, 0], [1, 1]) +# points = _points(domain, 4) +# mesh_shape = (4, 5) +# mesh = _meshgrid(domain, mesh_shape) +# point = [0.5, 0.5] +# values_points_shape = (2, 3, 4) +# values_mesh_shape = (2, 3, 4, 5) +# value_point_shape = (2, 3) + +# def func_tens_oop(x): +# # Output shape 2x3, input 2-dimensional. Broadcasting supported. +# return [[x[0] - x[1], 0, x[1]], +# [1, x[0], sum(x)]] + +# func_ref = func_tens_oop +# func = func_tens_oop + +# true_result_points = np.array(func_ref(points)) +# true_result_mesh = np.array(func_ref(mesh)) +# true_result_point = np.array(func_ref(np.array(point)[:, None])).squeeze() + +# sampl_func = sampling_function( +# func, domain, out_dtype='float64' +# ) +# collocator = partial(point_collocation, sampl_func) + +# result_points = collocator(points) +# result_mesh = collocator(mesh) +# result_point = collocator(point) +# assert all_almost_equal(result_points, true_result_points) +# print(result_mesh) +# print(true_result_mesh) +# assert all_almost_equal(result_mesh, true_result_mesh) +# assert all_almost_equal(result_point, true_result_point) + # assert result_points.flags.writeable + # assert result_mesh.flags.writeable + # assert result_point.flags.writeable + + # out_points = np.empty(values_points_shape, dtype='float64') + # out_mesh = np.empty(values_mesh_shape, dtype='float64') + # out_point = np.empty(value_point_shape, dtype='float64') + # collocator(points, out=out_points) + # collocator(mesh, out=out_mesh) + # collocator(point, out=out_point) + # assert all_almost_equal(out_points, true_result_points) + # assert all_almost_equal(out_mesh, true_result_mesh) + # assert all_almost_equal(out_point, true_result_point) + + +# def test_fspace_elem_eval_unusual_dtypes(): +# """Check evaluation with unusual data types (int and string).""" +# domain = odl.Strings(3) +# strings = np.array(['aa', 'b', 'cab', 'aba']) +# out_vec = np.empty((4,), dtype='int64') + +# # Can be vectorized for arrays only +# sampl_func = sampling_function( +# lambda s: np.array([str(si).count('a') for si in s]), +# domain, +# out_dtype='int64' +# ) +# collocator = partial(point_collocation, sampl_func) + +# true_values = [2, 0, 1, 2] + +# assert collocator('abc') == 1 +# assert all_equal(collocator(strings), true_values) +# collocator(strings, out=out_vec) +# assert all_equal(out_vec, true_values) + + +# def test_fspace_elem_eval_vec_1d(func_vec_1d): +# """Test evaluation in 1d since it's a corner case regarding shapes.""" +# domain = odl.IntervalProd(0, 1) +# points = _points(domain, 3) +# mesh_shape = (4,) +# mesh = _meshgrid(domain, mesh_shape) +# point1 = 0.5 +# point2 = [0.5] +# values_points_shape = (2, 3) +# values_mesh_shape = (2, 4) +# value_point_shape = (2,) + +# func_ref, func = func_vec_1d + +# true_result_points = np.array(func_ref(points)) +# true_result_mesh = np.array(func_ref(mesh)) +# true_result_point = np.array(func_ref(np.array([point1]))).squeeze() + +# sampl_func = sampling_function( +# func, domain, out_dtype=('float64', (2,)) +# ) +# collocator = partial(point_collocation, sampl_func) + +# result_points = collocator(points) +# result_mesh = collocator(mesh) +# result_point1 = collocator(point1) +# result_point2 = collocator(point2) +# assert all_almost_equal(result_points, true_result_points) +# assert all_almost_equal(result_mesh, true_result_mesh) +# assert all_almost_equal(result_point1, true_result_point) +# assert all_almost_equal(result_point2, true_result_point) + +# out_points = np.empty(values_points_shape, dtype='float64') +# out_mesh = np.empty(values_mesh_shape, dtype='float64') +# out_point1 = np.empty(value_point_shape, dtype='float64') +# out_point2 = np.empty(value_point_shape, dtype='float64') +# collocator(points, out=out_points) +# collocator(mesh, out=out_mesh) +# collocator(point1, out=out_point1) +# collocator(point2, out=out_point2) +# assert all_almost_equal(out_points, true_result_points) +# assert all_almost_equal(out_mesh, true_result_mesh) +# assert all_almost_equal(out_point1, true_result_point) +# assert all_almost_equal(out_point2, true_result_point) # --- interpolation tests --- # - -def test_nearest_interpolation_1d_complex(): +def test_nearest_interpolation_1d_complex(odl_impl_device_pairs): """Test nearest neighbor interpolation in 1d with complex values.""" coord_vecs = [[0.1, 0.3, 0.5, 0.7, 0.9]] - f = np.array([0 + 1j, 1 + 2j, 2 + 3j, 3 + 4j, 4 + 5j], dtype="complex128") + + impl, device = odl_impl_device_pairs + backend = lookup_array_backend(impl) + if impl == 'pytorch': + pytest.skip('Interpolator class not implemented for pytorch complex dtypes') + dtype = backend.available_dtypes["complex128"] + f = backend.array_constructor( + [0 + 1j, 1 + 2j, 2 + 3j, 3 + 4j, 4 + 5j], + dtype=dtype, device=device + ) + interpolator = nearest_interpolator(f, coord_vecs) # Evaluate at single point @@ -633,7 +675,7 @@ def test_nearest_interpolation_1d_complex(): # Should also work with a (1, N) array pts = pts[None, :] assert all_equal(interpolator(pts), true_arr) - out = np.empty(4, dtype='complex128') + out = backend.array_namespace.empty(4, dtype=dtype, device=device) interpolator(pts, out=out) assert all_equal(out, true_arr) # Input meshgrid, with and without output array @@ -645,13 +687,18 @@ def test_nearest_interpolation_1d_complex(): assert all_equal(out, true_mg) -def test_nearest_interpolation_2d(): +def test_nearest_interpolation_2d(odl_impl_device_pairs): """Test nearest neighbor interpolation in 2d.""" coord_vecs = [[0.125, 0.375, 0.625, 0.875], [0.25, 0.75]] - f = np.array([[0, 1], + + impl, device = odl_impl_device_pairs + backend = lookup_array_backend(impl) + dtype = backend.available_dtypes["float64"] + + f = backend.array_constructor([[0, 1], [2, 3], [4, 5], - [6, 7]], dtype="float64") + [6, 7]], dtype=dtype, device=device) interpolator = nearest_interpolator(f, coord_vecs) # Evaluate at single point @@ -662,7 +709,7 @@ def test_nearest_interpolation_2d(): [1.0, 1.0]]) true_arr = [3, 7] assert all_equal(interpolator(pts.T), true_arr) - out = np.empty(2, dtype='float64') + out = backend.array_namespace.empty(2, dtype=dtype, device=device) interpolator(pts.T, out=out) assert all_equal(out, true_arr) # Input meshgrid, with and without output array @@ -671,46 +718,22 @@ def test_nearest_interpolation_2d(): true_mg = [[2, 3], [6, 7]] assert all_equal(interpolator(mg), true_mg) - out = np.empty((2, 2), dtype='float64') + out = backend.array_namespace.empty((2, 2), dtype=dtype, device=device) interpolator(mg, out=out) assert all_equal(out, true_mg) -def test_nearest_interpolation_2d_string(): - """Test nearest neighbor interpolation in 2d with string values.""" - coord_vecs = [[0.125, 0.375, 0.625, 0.875], [0.25, 0.75]] - f = np.array([['m', 'y'], - ['s', 't'], - ['r', 'i'], - ['n', 'g']], dtype='U1') - interpolator = nearest_interpolator(f, coord_vecs) - - # Evaluate at single point - val = interpolator([0.3, 0.6]) # closest to index (1, 1) -> 3 - assert val == u't' - # Input array, with and without output array - pts = np.array([[0.3, 0.6], - [1.0, 1.0]]) - true_arr = np.array(['t', 'g'], dtype='U1') - assert all_equal(interpolator(pts.T), true_arr) - out = np.empty(2, dtype='U1') - interpolator(pts.T, out=out) - assert all_equal(out, true_arr) - # Input meshgrid, with and without output array - mg = sparse_meshgrid([0.3, 1.0], [0.4, 1.0]) - # Indices: (1, 3) x (0, 1) - true_mg = np.array([['s', 't'], - ['n', 'g']], dtype='U1') - assert all_equal(interpolator(mg), true_mg) - out = np.empty((2, 2), dtype='U1') - interpolator(mg, out=out) - assert all_equal(out, true_mg) - - -def test_linear_interpolation_1d(): +def test_linear_interpolation_1d(odl_impl_device_pairs): """Test linear interpolation in 1d.""" coord_vecs = [[0.1, 0.3, 0.5, 0.7, 0.9]] - f = np.array([1, 2, 3, 4, 5], dtype="float64") + impl, device = odl_impl_device_pairs + backend = lookup_array_backend(impl) + dtype = backend.available_dtypes["float64"] + + f = backend.array_constructor( + [1, 2, 3, 4, 5], dtype=dtype, device=device + ) + interpolator = linear_interpolator(f, coord_vecs) # Evaluate at single point @@ -720,17 +743,25 @@ def test_linear_interpolation_1d(): # Input array, with and without output array pts = np.array([0.4, 0.0, 0.65, 0.95]) - true_arr = [2.5, 0.5, 3.75, 3.75] + true_arr = backend.array_constructor([2.5, 0.5, 3.75, 3.75], dtype=dtype, device=device) assert all_almost_equal(interpolator(pts), true_arr) -def test_linear_interpolation_2d(): +def test_linear_interpolation_2d(odl_impl_device_pairs): """Test linear interpolation in 2d.""" coord_vecs = [[0.125, 0.375, 0.625, 0.875], [0.25, 0.75]] - f = np.array([[1, 2], - [3, 4], - [5, 6], - [7, 8]], dtype='float64') + + impl, device = odl_impl_device_pairs + backend = lookup_array_backend(impl) + dtype = backend.available_dtypes["float64"] + + f = backend.array_constructor( + [[1, 2], + [3, 4], + [5, 6], + [7, 8]], dtype=dtype, device=device + ) + interpolator = linear_interpolator(f, coord_vecs) # Evaluate at single point @@ -743,6 +774,8 @@ def test_linear_interpolation_2d(): + l1 * (1 - l2) * f[1, 0] + l1 * l2 * f[1, 1] ) + if impl == 'pytorch': + true_val = true_val.detach().cpu() assert val == pytest.approx(true_val) # Input array, with and without output array @@ -766,7 +799,7 @@ def test_linear_interpolation_2d(): true_arr = [true_val_1, true_val_2, true_val_3] assert all_equal(interpolator(pts.T), true_arr) - out = np.empty(3, dtype='float64') + out = backend.array_namespace.empty(3, dtype=dtype, device=device) interpolator(pts.T, out=out) assert all_equal(out, true_arr) @@ -795,19 +828,25 @@ def test_linear_interpolation_2d(): true_mg = [[true_val_11, true_val_12], [true_val_21, true_val_22]] assert all_equal(interpolator(mg), true_mg) - out = np.empty((2, 2), dtype='float64') + out = backend.array_namespace.empty((2, 2), dtype=dtype, device=device) interpolator(mg, out=out) assert all_equal(out, true_mg) -def test_per_axis_interpolation(): +def test_per_axis_interpolation(odl_impl_device_pairs): """Test different interpolation schemes per axis.""" coord_vecs = [[0.125, 0.375, 0.625, 0.875], [0.25, 0.75]] interp = ['linear', 'nearest'] - f = np.array([[1, 2], - [3, 4], - [5, 6], - [7, 8]], dtype='float64') + impl, device = odl_impl_device_pairs + backend = lookup_array_backend(impl) + dtype = backend.available_dtypes["float64"] + + f = backend.array_constructor( + [[1, 2], + [3, 4], + [5, 6], + [7, 8]], dtype=dtype, device=device + ) interpolator = per_axis_interpolator(f, coord_vecs, interp) # Evaluate at single point @@ -815,6 +854,8 @@ def test_per_axis_interpolation(): l1 = (0.3 - 0.125) / (0.375 - 0.125) # 0.5 equally far from both neighbors -> NN chooses 0.75 true_val = (1 - l1) * f[0, 1] + l1 * f[1, 1] + if impl == 'pytorch': + true_val = true_val.detach().cpu() assert val == pytest.approx(true_val) # Input array, with and without output array @@ -830,7 +871,7 @@ def test_per_axis_interpolation(): true_arr = [true_val_1, true_val_2, true_val_3] assert all_equal(interpolator(pts.T), true_arr) - out = np.empty(3, dtype='float64') + out = backend.array_namespace.empty(3, dtype=dtype, device=device) interpolator(pts.T, out=out) assert all_equal(out, true_arr) @@ -846,20 +887,26 @@ def test_per_axis_interpolation(): true_mg = [[true_val_11, true_val_12], [true_val_21, true_val_22]] assert all_equal(interpolator(mg), true_mg) - out = np.empty((2, 2), dtype='float64') + out = backend.array_namespace.empty((2, 2), dtype=dtype, device=device) interpolator(mg, out=out) assert all_equal(out, true_mg) -def test_collocation_interpolation_identity(): +def test_collocation_interpolation_identity(odl_impl_device_pairs): """Check if collocation is left-inverse to interpolation.""" # Interpolation followed by collocation on the same grid should be # the identity coord_vecs = [[0.125, 0.375, 0.625, 0.875], [0.25, 0.75]] - f = np.array([[1, 2], - [3, 4], - [5, 6], - [7, 8]], dtype='float64') + impl, device = odl_impl_device_pairs + backend = lookup_array_backend(impl) + dtype = backend.available_dtypes["float64"] + + f = backend.array_constructor( + [[1, 2], + [3, 4], + [5, 6], + [7, 8]], dtype=dtype, device=device + ) interpolators = [ nearest_interpolator(f, coord_vecs), linear_interpolator(f, coord_vecs), @@ -873,4 +920,4 @@ def test_collocation_interpolation_identity(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/discr/grid_test.py b/odl/test/core/discr/grid_test.py similarity index 99% rename from odl/test/discr/grid_test.py rename to odl/test/core/discr/grid_test.py index e91827038ba..68cb67ab90d 100644 --- a/odl/test/discr/grid_test.py +++ b/odl/test/core/discr/grid_test.py @@ -11,8 +11,8 @@ import numpy as np import odl -from odl.discr.grid import RectGrid, uniform_grid, sparse_meshgrid -from odl.util.testutils import all_equal +from odl.core.discr.grid import RectGrid, uniform_grid, sparse_meshgrid +from odl.core.util.testutils import all_equal # ---- RectGrid ---- # @@ -944,4 +944,4 @@ def test_sparse_meshgrid(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/discr/partition_test.py b/odl/test/core/discr/partition_test.py similarity index 99% rename from odl/test/discr/partition_test.py rename to odl/test/core/discr/partition_test.py index 333c272fb15..4c4360060de 100644 --- a/odl/test/discr/partition_test.py +++ b/odl/test/core/discr/partition_test.py @@ -11,7 +11,7 @@ import numpy as np import odl -from odl.util.testutils import all_equal, all_almost_equal +from odl.core.util.testutils import all_equal, all_almost_equal # ---- RectPartition ---- # @@ -498,4 +498,4 @@ def test_uniform_partition(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/operator/operator_test.py b/odl/test/core/operator/operator_test.py similarity index 68% rename from odl/test/operator/operator_test.py rename to odl/test/core/operator/operator_test.py index df7c771a97d..5f2f9f90c26 100644 --- a/odl/test/operator/operator_test.py +++ b/odl/test/core/operator/operator_test.py @@ -20,10 +20,12 @@ OperatorComp, OperatorLeftScalarMult, OperatorLeftVectorMult, OperatorRightScalarMult, OperatorRightVectorMult, OperatorSum, OpRangeError, OpTypeError) -from odl.operator.operator import _dispatch_call_args, _function_signature -from odl.util.testutils import ( +from odl.core.operator.operator import _dispatch_call_args, _function_signature +from odl.core.util.testutils import ( all_almost_equal, noise_element, noise_elements, simple_fixture) +from odl.core.array_API_support.utils import get_array_and_backend, lookup_array_backend + try: getargspec = inspect.getfullargspec except AttributeError: @@ -45,19 +47,26 @@ class MultiplyAndSquareOp(Operator): """Example of a nonlinear operator, x --> (mat*x)**2.""" def __init__(self, matrix, domain=None, range=None): - dom = (odl.rn(matrix.shape[1]) + matrix, backend = get_array_and_backend(matrix) + + dom = (odl.rn(matrix.shape[1], impl=backend.impl, device=str(matrix.device), dtype=matrix.dtype) if domain is None else domain) - ran = (odl.rn(matrix.shape[0]) + + ran = (odl.rn(matrix.shape[0], impl=backend.impl, device=str(matrix.device), dtype=matrix.dtype) if range is None else range) super(MultiplyAndSquareOp, self).__init__(dom, ran) self.matrix = matrix def _call(self, x, out=None): + out_of_place = False if out is None: + out_of_place = True out = self.range.element() - out[:] = np.dot(self.matrix, x.data) + out[:] = self.matrix @ x.data out **= 2 + if out_of_place: + return out def derivative(self, x): return 2 * odl.MatrixOperator(self.matrix) @@ -65,10 +74,12 @@ def derivative(self, x): def __str__(self): return "MaS: " + str(self.matrix) + " ** 2" - def mult_sq_np(mat, x): """NumPy reference implementation of MultiplyAndSquareOp.""" - return np.dot(mat, x) ** 2 + mat, backend_mat = get_array_and_backend(mat) + x, backend_x = get_array_and_backend(x) + assert backend_mat == backend_x + return (x @ mat.T) ** 2 def check_call(operator, x, expected): @@ -90,30 +101,34 @@ def check_call(operator, x, expected): # --- Unit tests --- # - -def test_operator_call(dom_eq_ran): - """Check operator evaluation against NumPy reference.""" +@pytest.fixture(scope="module", ids=['True', 'False'], params=[True, False]) +def dom_eq_ran_mat(request, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + dom_eq_ran = request if dom_eq_ran: - mat = np.random.rand(3, 3) - op = MultiplyAndSquareOp(mat) - assert op.domain == op.range + shape = (3,3) else: - mat = np.random.rand(4, 3) - op = MultiplyAndSquareOp(mat) + shape = (3,4) + space = odl.rn(shape, impl=impl, device=device) + mat, _ = noise_elements(space) + return mat +def test_operator_call__(dom_eq_ran_mat): + """Check operator evaluation against NumPy reference.""" + op = MultiplyAndSquareOp(dom_eq_ran_mat) xarr, x = noise_elements(op.domain) - assert all_almost_equal(op(x), mult_sq_np(mat, xarr)) - + assert all_almost_equal(op(x), mult_sq_np(dom_eq_ran_mat, xarr)) -def test_operator_call_in_place_wrong_return(): +def test_operator_call_in_place_wrong_return(odl_impl_device_pairs): """Test that operator with out parameter actually returns out.""" + impl, device = odl_impl_device_pairs class BadInplaceOperator(odl.Operator): def _call(self, x, out): # badly implemented operator out = 42 return out - space = odl.rn(3) + space = odl.rn(3, impl=impl, device=device) op = BadInplaceOperator(space, space) with pytest.raises(ValueError): @@ -124,15 +139,10 @@ def _call(self, x, out): op(space.zero(), out=out) -def test_operator_sum(dom_eq_ran): +def test_operator_sum(dom_eq_ran_mat): """Check operator sum against NumPy reference.""" - if dom_eq_ran: - mat1 = np.random.rand(3, 3) - mat2 = np.random.rand(3, 3) - else: - mat1 = np.random.rand(4, 3) - mat2 = np.random.rand(4, 3) - + mat1 = dom_eq_ran_mat + mat2 = dom_eq_ran_mat + 0.5 op1 = MultiplyAndSquareOp(mat1) op2 = MultiplyAndSquareOp(mat2) xarr, x = noise_elements(op1.domain) @@ -156,12 +166,9 @@ def test_operator_sum(dom_eq_ran): OperatorSum(op1, op_wrong_ran) -def test_operator_scaling(dom_eq_ran): +def test_operator_scaling(dom_eq_ran_mat): """Check operator scaling against NumPy reference.""" - if dom_eq_ran: - mat = np.random.rand(3, 3) - else: - mat = np.random.rand(4, 3) + mat = dom_eq_ran_mat op = MultiplyAndSquareOp(mat) xarr, x = noise_elements(op.domain) @@ -200,39 +207,35 @@ def test_operator_scaling(dom_eq_ran): wrongscalar * op -def test_operator_vector_mult(dom_eq_ran): +def test_operator_vector_mult(dom_eq_ran_mat): """Check operator-vector multiplication against NumPy reference.""" - if dom_eq_ran: - mat = np.random.rand(3, 3) - else: - mat = np.random.rand(4, 3) + mat = dom_eq_ran_mat op = MultiplyAndSquareOp(mat) right = op.domain.element(np.arange(op.domain.size)) left = op.range.element(np.arange(op.range.size)) xarr, x = noise_elements(op.domain) + right_as_array = right.asarray() + left_as_array = left.asarray() + rmult_op = OperatorRightVectorMult(op, right) lmult_op = OperatorLeftVectorMult(op, left) assert not rmult_op.is_linear assert not lmult_op.is_linear - check_call(rmult_op, x, mult_sq_np(mat, right * xarr)) - check_call(lmult_op, x, left * mult_sq_np(mat, xarr)) + check_call(rmult_op, x, mult_sq_np(mat, right_as_array * xarr)) + check_call(lmult_op, x, left_as_array * mult_sq_np(mat, xarr)) # Using operator overloading - check_call(op * right, x, mult_sq_np(mat, right * xarr)) - check_call(left * op, x, left * mult_sq_np(mat, xarr)) + check_call(op @ right, x, mult_sq_np(mat, right_as_array * xarr)) + check_call(left @ op, x, left_as_array * mult_sq_np(mat, xarr)) -def test_operator_composition(dom_eq_ran): +def test_operator_composition(dom_eq_ran_mat): """Check operator composition against NumPy reference.""" - if dom_eq_ran: - mat1 = np.random.rand(3, 3) - mat2 = np.random.rand(3, 3) - else: - mat1 = np.random.rand(5, 4) - mat2 = np.random.rand(4, 3) + mat1 = dom_eq_ran_mat + mat2 = dom_eq_ran_mat + 0.5 op1 = MultiplyAndSquareOp(mat1) op2 = MultiplyAndSquareOp(mat2) @@ -248,41 +251,34 @@ def test_operator_composition(dom_eq_ran): OperatorComp(op2, op1) -def test_linear_operator_call(dom_eq_ran): +def test_linear_operator_call(dom_eq_ran_mat): """Check call of a linear operator against NumPy, and ``is_linear``.""" - if dom_eq_ran: - mat = np.random.rand(3, 3) - else: - mat = np.random.rand(4, 3) + mat = dom_eq_ran_mat op = MatrixOperator(mat) + _, backend = get_array_and_backend(mat) assert op.is_linear - xarr, x = noise_elements(op.domain) - check_call(op, x, np.dot(mat, xarr)) + check_call(op, x, backend.array_namespace.matmul(mat, xarr)) -def test_linear_operator_adjoint(dom_eq_ran): + +def test_linear_operator_adjoint(dom_eq_ran_mat): """Check adjoint of a linear operator against NumPy.""" - if dom_eq_ran: - mat = np.random.rand(3, 3) - else: - mat = np.random.rand(4, 3) + mat = dom_eq_ran_mat op = MatrixOperator(mat) + _, backend = get_array_and_backend(mat) xarr, x = noise_elements(op.range) - check_call(op.adjoint, x, np.dot(mat.T, xarr)) + check_call(op.adjoint, x, backend.array_namespace.matmul(mat.T, xarr)) -def test_linear_operator_addition(dom_eq_ran): +def test_linear_operator_addition(dom_eq_ran_mat): """Check call and adjoint of a sum of linear operators.""" - if dom_eq_ran: - mat1 = np.random.rand(3, 3) - mat2 = np.random.rand(3, 3) - else: - mat1 = np.random.rand(4, 3) - mat2 = np.random.rand(4, 3) - + mat1 = dom_eq_ran_mat + mat2 = dom_eq_ran_mat + 0.5 + _, backend = get_array_and_backend(mat1) + ns = backend.array_namespace op1 = MatrixOperator(mat1) op2 = MatrixOperator(mat2) xarr, x = noise_elements(op1.domain) @@ -292,26 +288,26 @@ def test_linear_operator_addition(dom_eq_ran): sum_op = OperatorSum(op1, op2) assert sum_op.is_linear assert sum_op.adjoint.is_linear - check_call(sum_op, x, np.dot(mat1, xarr) + np.dot(mat2, xarr)) - check_call(sum_op.adjoint, y, np.dot(mat1.T, yarr) + np.dot(mat2.T, yarr)) + check_call(sum_op, x, ns.matmul(mat1, xarr) + ns.matmul(mat2, xarr)) + check_call(sum_op.adjoint, y, ns.matmul(mat1.T, yarr) + ns.matmul(mat2.T, yarr)) # Using operator overloading - check_call(op1 + op2, x, np.dot(mat1, xarr) + np.dot(mat2, xarr)) + check_call(op1 + op2, x, ns.matmul(mat1, xarr) + ns.matmul(mat2, xarr)) check_call((op1 + op2).adjoint, - y, np.dot(mat1.T, yarr) + np.dot(mat2.T, yarr)) + y, ns.matmul(mat1.T, yarr) + ns.matmul(mat2.T, yarr)) -def test_linear_operator_scaling(dom_eq_ran): +def test_linear_operator_scaling(dom_eq_ran_mat): """Check call and adjoint of a scaled linear operator.""" - if dom_eq_ran: - mat = np.random.rand(3, 3) - else: - mat = np.random.rand(4, 3) + mat = dom_eq_ran_mat op = MatrixOperator(mat) + _, backend = get_array_and_backend(mat) + ns = backend.array_namespace + xarr, x = noise_elements(op.domain) yarr, y = noise_elements(op.range) - + # Test a range of scalars (scalar multiplication could implement # optimizations for (-1, 0, 1). scalars = [-1.432, -1, 0, 1, 3.14] @@ -320,24 +316,23 @@ def test_linear_operator_scaling(dom_eq_ran): scaled_op = OperatorRightScalarMult(op, scalar) assert scaled_op.is_linear assert scaled_op.adjoint.is_linear - check_call(scaled_op, x, scalar * np.dot(mat, xarr)) - check_call(scaled_op.adjoint, y, scalar * np.dot(mat.T, yarr)) + check_call(scaled_op, x, scalar * ns.matmul(mat, xarr)) + check_call(scaled_op.adjoint, y, scalar * ns.matmul(mat.T, yarr)) # Using operator overloading - check_call(scalar * op, x, scalar * np.dot(mat, xarr)) - check_call(op * scalar, x, scalar * np.dot(mat, xarr)) - check_call((scalar * op).adjoint, y, scalar * np.dot(mat.T, yarr)) - check_call((op * scalar).adjoint, y, scalar * np.dot(mat.T, yarr)) + check_call(scalar * op, x, scalar * ns.matmul(mat, xarr)) + check_call(op * scalar, x, scalar * ns.matmul(mat, xarr)) + check_call((scalar * op).adjoint, y, scalar * ns.matmul(mat.T, yarr)) + check_call((op * scalar).adjoint, y, scalar * ns.matmul(mat.T, yarr)) -def test_linear_right_vector_mult(dom_eq_ran): +def test_linear_right_vector_mult(dom_eq_ran_mat): """Check call and adjoint of linear operator x vector.""" - if dom_eq_ran: - mat = np.random.rand(3, 3) - else: - mat = np.random.rand(4, 3) + mat = dom_eq_ran_mat op = MatrixOperator(mat) + _, backend = get_array_and_backend(mat) + ns = backend.array_namespace (xarr, mul_arr), (x, mul) = noise_elements(op.domain, n=2) yarr, y = noise_elements(op.range) @@ -345,22 +340,21 @@ def test_linear_right_vector_mult(dom_eq_ran): rmult_op = OperatorRightVectorMult(op, mul) assert rmult_op.is_linear assert rmult_op.adjoint.is_linear - check_call(rmult_op, x, np.dot(mat, mul_arr * xarr)) - check_call(rmult_op.adjoint, y, mul_arr * np.dot(mat.T, yarr)) + check_call(rmult_op, x, ns.matmul(mat, mul_arr * xarr)) + check_call(rmult_op.adjoint, y, mul_arr * ns.matmul(mat.T, yarr)) # Using operator overloading - check_call(op * mul, x, np.dot(mat, mul_arr * xarr)) - check_call((op * mul).adjoint, y, mul_arr * np.dot(mat.T, yarr)) + check_call(op * mul, x, ns.matmul(mat, mul_arr * xarr)) + check_call((op * mul).adjoint, y, mul_arr * ns.matmul(mat.T, yarr)) -def test_linear_left_vector_mult(dom_eq_ran): +def test_linear_left_vector_mult(dom_eq_ran_mat): """Check call and adjoint of vector x linear operator.""" - if dom_eq_ran: - mat = np.random.rand(3, 3) - else: - mat = np.random.rand(4, 3) - + mat = dom_eq_ran_mat + op = MatrixOperator(mat) + _, backend = get_array_and_backend(mat) + ns = backend.array_namespace xarr, x = noise_elements(op.domain) (yarr, mul_arr), (y, mul) = noise_elements(op.range, n=2) @@ -368,25 +362,23 @@ def test_linear_left_vector_mult(dom_eq_ran): lmult_op = OperatorLeftVectorMult(op, mul) assert lmult_op.is_linear assert lmult_op.adjoint.is_linear - check_call(lmult_op, x, mul_arr * np.dot(mat, xarr)) - check_call(lmult_op.adjoint, y, np.dot(mat.T, mul_arr * yarr)) + check_call(lmult_op, x, mul_arr * ns.matmul(mat, xarr)) + check_call(lmult_op.adjoint, y, ns.matmul(mat.T, mul_arr * yarr)) # Using operator overloading - check_call(mul * op, x, mul_arr * np.dot(mat, xarr)) - check_call((mul * op).adjoint, y, np.dot(mat.T, mul_arr * yarr)) + check_call(mul @ op, x, mul_arr * ns.matmul(mat, xarr)) + check_call((mul @ op).adjoint, y, ns.matmul(mat.T, mul_arr * yarr)) -def test_linear_operator_composition(dom_eq_ran): +def test_linear_operator_composition(dom_eq_ran_mat): """Check call and adjoint of linear operator composition.""" - if dom_eq_ran: - mat1 = np.random.rand(3, 3) - mat2 = np.random.rand(3, 3) - else: - mat1 = np.random.rand(4, 3) - mat2 = np.random.rand(3, 4) + mat1 = dom_eq_ran_mat + mat2 = dom_eq_ran_mat + 0.5 op1 = MatrixOperator(mat1) op2 = MatrixOperator(mat2) + _, backend = get_array_and_backend(mat1) + ns = backend.array_namespace xarr, x = noise_elements(op2.domain) yarr, y = noise_elements(op1.range) @@ -394,19 +386,21 @@ def test_linear_operator_composition(dom_eq_ran): comp_op = OperatorComp(op1, op2) assert comp_op.is_linear assert comp_op.adjoint.is_linear - check_call(comp_op, x, np.dot(mat1, np.dot(mat2, xarr))) - check_call(comp_op.adjoint, y, np.dot(mat2.T, np.dot(mat1.T, yarr))) + check_call(comp_op, x, ns.matmul(mat1, ns.matmul(mat2, xarr))) + check_call(comp_op.adjoint, y, ns.matmul(mat2.T, ns.matmul(mat1.T, yarr))) # Using operator overloading - check_call(op1 * op2, x, np.dot(mat1, np.dot(mat2, xarr))) - check_call((op1 * op2).adjoint, y, np.dot(mat2.T, np.dot(mat1.T, yarr))) + check_call(op1 * op2, x, ns.matmul(mat1, ns.matmul(mat2, xarr))) + check_call((op1 * op2).adjoint, y, ns.matmul(mat2.T, ns.matmul(mat1.T, yarr))) -def test_type_errors(): - r3 = odl.rn(3) - r4 = odl.rn(4) +def test_type_errors(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device = device) + r4 = odl.rn(4, impl=impl, device = device) + space = odl.rn((3,3), impl=impl, device=device) - op = MatrixOperator(np.random.rand(3, 3)) + op = MatrixOperator(space.element()) r3_elem1 = r3.zero() r3_elem2 = r3.zero() r4_elem1 = r4.zero() @@ -442,18 +436,12 @@ def test_type_errors(): op.adjoint(r4_elem1, r4_elem2) -def test_arithmetic(dom_eq_ran): +def test_arithmetic(dom_eq_ran_mat): """Test that all standard arithmetic works.""" - if dom_eq_ran: - mat1 = np.random.rand(3, 3) - mat2 = np.random.rand(3, 3) - mat3 = np.random.rand(3, 3) - mat4 = np.random.rand(3, 3) - else: - mat1 = np.random.rand(4, 3) - mat2 = np.random.rand(4, 3) - mat3 = np.random.rand(3, 3) - mat4 = np.random.rand(4, 4) + mat1 = dom_eq_ran_mat + mat2 = dom_eq_ran_mat + 1 + mat3 = dom_eq_ran_mat + 2 + mat4 = dom_eq_ran_mat + 3 op = MultiplyAndSquareOp(mat1) op2 = MultiplyAndSquareOp(mat2) @@ -474,10 +462,10 @@ def test_arithmetic(dom_eq_ran): check_call((op * scalar) * scalar, x, op(scalar**2 * x)) check_call(op + op2, x, op(x) + op2(x)) check_call(op - op2, x, op(x) - op2(x)) - check_call(op * op3, x, op(op3(x))) - check_call(op4 * op, x, op4(op(x))) - check_call(z * op, x, z * op(x)) - check_call(z * (z * op), x, (z * z) * op(x)) + check_call(op @ op3, x, op(op3(x))) + check_call(op4 @ op, x, op4(op(x))) + check_call(z @ op, x, z * op(x)) + check_call(z @ (z @ op), x, (z * z)* op(x)) check_call(op * y, x, op(x * y)) check_call((op * y) * y, x, op((y * y) * x)) check_call(op + z, x, op(x) + z) @@ -490,14 +478,10 @@ def test_arithmetic(dom_eq_ran): check_call(scalar - op, x, scalar - op(x)) -def test_operator_pointwise_product(): +def test_operator_pointwise_product(dom_eq_ran_mat): """Check call and adjoint of operator pointwise multiplication.""" - if dom_eq_ran: - mat1 = np.random.rand(3, 3) - mat2 = np.random.rand(3, 3) - else: - mat1 = np.random.rand(4, 3) - mat2 = np.random.rand(4, 3) + mat1 = dom_eq_ran_mat + mat2 = dom_eq_ran_mat + 1 op1 = MultiplyAndSquareOp(mat1) op2 = MultiplyAndSquareOp(mat2) @@ -535,7 +519,7 @@ def __init__(self, domain): super(SumFunctional, self).__init__(domain, domain.field, linear=True) def _call(self, x): - return np.sum(x) + return odl.sum(x) @property def adjoint(self): @@ -557,8 +541,9 @@ def adjoint(self): return SumFunctional(self.range) -def test_functional(): - r3 = odl.rn(3) +def test_functional(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) x = r3.element([1, 2, 3]) op = SumFunctional(r3) @@ -566,8 +551,9 @@ def test_functional(): assert op(x) == 6 -def test_functional_out(): - r3 = odl.rn(3) +def test_functional_out(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) x = r3.element([1, 2, 3]) op = SumFunctional(r3) @@ -578,8 +564,9 @@ def test_functional_out(): op(x, out=out) -def test_functional_adjoint(): - r3 = odl.rn(3) +def test_functional_adjoint(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) op = SumFunctional(r3) @@ -589,8 +576,9 @@ def test_functional_adjoint(): assert op.adjoint.adjoint(x) == op(x) -def test_functional_addition(): - r3 = odl.rn(3) +def test_functional_addition(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) op = SumFunctional(r3) op2 = SumFunctional(r3) @@ -603,19 +591,20 @@ def test_functional_addition(): assert C.is_linear assert C.adjoint.is_linear - assert C(x) == 2 * np.sum(x) + assert C(x) == 2 * odl.sum(x) # Test adjoint - assert all_almost_equal(C.adjoint(y), y * 2 * np.ones(3)) + assert all_almost_equal(C.adjoint(y), y * 2 * r3.one()) assert all_almost_equal(C.adjoint.adjoint(x), C(x)) # Using operator overloading - assert (op + op2)(x) == 2 * np.sum(x) - assert all_almost_equal((op + op2).adjoint(y), y * 2 * np.ones(3)) + assert (op + op2)(x) == 2 * odl.sum(x) + assert all_almost_equal((op + op2).adjoint(y), y * 2 * r3.one()) -def test_functional_scale(): - r3 = odl.rn(3) +def test_functional_scale(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) op = SumFunctional(r3) x = r3.element([1, 2, 3]) @@ -630,22 +619,23 @@ def test_functional_scale(): assert C.is_linear assert C.adjoint.is_linear - assert C(x) == scalar * np.sum(x) - assert all_almost_equal(C.adjoint(y), scalar * y * np.ones(3)) + assert C(x) == scalar * odl.sum(x) + assert all_almost_equal(C.adjoint(y), scalar * y * r3.one()) assert all_almost_equal(C.adjoint.adjoint(x), C(x)) # Using operator overloading - assert (scalar * op)(x) == scalar * np.sum(x) - assert (op * scalar)(x) == scalar * np.sum(x) + assert (scalar * op)(x) == scalar * odl.sum(x) + assert (op * scalar)(x) == scalar * odl.sum(x) assert all_almost_equal((scalar * op).adjoint(y), - scalar * y * np.ones(3)) + scalar * y * r3.one()) assert all_almost_equal((op * scalar).adjoint(y), - scalar * y * np.ones(3)) + scalar * y * r3.one()) -def test_functional_left_vector_mult(): - r3 = odl.rn(3) - r4 = odl.rn(4) +def test_functional_left_vector_mult(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) + r4 = odl.rn(4, impl=impl, device=device) op = SumFunctional(r3) x = r3.element([1, 2, 3]) @@ -658,19 +648,20 @@ def test_functional_left_vector_mult(): assert C.is_linear assert C.adjoint.is_linear - assert all_almost_equal(C(x), y * np.sum(x)) - assert all_almost_equal(C.adjoint(y), y.inner(y) * np.ones(3)) + assert all_almost_equal(C(x), y * odl.sum(x)) + assert all_almost_equal(C.adjoint(y), y.inner(y) * r3.one()) assert all_almost_equal(C.adjoint.adjoint(x), C(x)) # Using operator overloading - assert all_almost_equal((y * op)(x), - y * np.sum(x)) - assert all_almost_equal((y * op).adjoint(y), - y.inner(y) * np.ones(3)) + assert all_almost_equal((y @ op)(x), + y * odl.sum(x)) + assert all_almost_equal((y @ op).adjoint(y), + y.inner(y) * r3.one()) -def test_functional_right_vector_mult(): - r3 = odl.rn(3) +def test_functional_right_vector_mult(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) op = SumFunctional(r3) vec = r3.element([1, 2, 3]) @@ -684,19 +675,20 @@ def test_functional_right_vector_mult(): assert C.is_linear assert C.adjoint.is_linear - assert all_almost_equal(C(x), np.sum(vec * x)) + assert all_almost_equal(C(x), odl.sum(vec * x)) assert all_almost_equal(C.adjoint(y), vec * y) assert all_almost_equal(C.adjoint.adjoint(x), C(x)) # Using operator overloading assert all_almost_equal((op * vec)(x), - np.sum(vec * x)) + odl.sum(vec * x)) assert all_almost_equal((op * vec).adjoint(y), vec * y) -def test_functional_composition(): - r3 = odl.rn(3) +def test_functional_composition(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) op = SumFunctional(r3) op2 = ConstantVector(r3) @@ -708,17 +700,17 @@ def test_functional_composition(): assert C.is_linear assert C.adjoint.is_linear - assert all_almost_equal(C(x), np.sum(x) * np.ones(3)) - assert all_almost_equal(C.adjoint(x), np.sum(x) * np.ones(3)) + assert all_almost_equal(C(x), odl.sum(x) * r3.one()) + assert all_almost_equal(C.adjoint(x), odl.sum(x) * r3.one()) assert all_almost_equal(C.adjoint.adjoint(x), C(x)) # Using operator overloading assert (op * op2)(y) == y * 3 assert (op * op2).adjoint(y) == y * 3 assert all_almost_equal((op2 * op)(x), - np.sum(x) * np.ones(3)) + odl.sum(x) * r3.one()) assert all_almost_equal((op2 * op).adjoint(x), - np.sum(x) * np.ones(3)) + odl.sum(x) * r3.one()) class SumSquaredFunctional(Operator): @@ -730,20 +722,22 @@ def __init__(self, domain): domain, domain.field, linear=False) def _call(self, x): - return np.sum(x ** 2) + return odl.sum(x ** 2) -def test_nonlinear_functional(): - r3 = odl.rn(3) +def test_nonlinear_functional(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) x = r3.element([1, 2, 3]) op = SumSquaredFunctional(r3) - assert op(x) == pytest.approx(np.sum(x ** 2)) + assert op(x) == pytest.approx(odl.sum(x ** 2)) -def test_nonlinear_functional_out(): - r3 = odl.rn(3) +def test_nonlinear_functional_out(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) x = r3.element([1, 2, 3]) op = SumSquaredFunctional(r3) @@ -753,8 +747,9 @@ def test_nonlinear_functional_out(): op(x, out=out) -def test_nonlinear_functional_operators(): - r3 = odl.rn(3) +def test_nonlinear_functional_operators(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, impl=impl, device=device) x = r3.element([1, 2, 3]) mat = SumSquaredFunctional(r3) @@ -956,4 +951,4 @@ def _call(cls, x, out=None): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/operator/oputils_test.py b/odl/test/core/operator/oputils_test.py similarity index 60% rename from odl/test/operator/oputils_test.py rename to odl/test/core/operator/oputils_test.py index 8fa84326a62..4b68bf156eb 100644 --- a/odl/test/operator/oputils_test.py +++ b/odl/test/core/operator/oputils_test.py @@ -11,54 +11,65 @@ import pytest import odl -from odl.operator.oputils import matrix_representation, power_method_opnorm -from odl.operator.pspace_ops import ProductSpaceOperator -from odl.util.testutils import all_almost_equal +from odl.core.operator.oputils import matrix_representation, power_method_opnorm +from odl.core.operator.pspace_ops import ProductSpaceOperator +from odl.core.util.testutils import all_almost_equal, noise_elements +from odl.core.array_API_support.utils import get_array_and_backend -def test_matrix_representation(): - """Verify that the matrix repr returns the correct matrix""" - n = 3 - A = np.random.rand(n, n) +@pytest.fixture(scope="module", ids=['True', 'False'], params=[True, False]) +def dom_eq_ran_mat(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + shape = (3,3) + space = odl.rn(shape, impl=impl, device=device) + mat, _ = noise_elements(space) + return mat - Aop = odl.MatrixOperator(A) + +def test_matrix_representation(dom_eq_ran_mat): + """Verify that the matrix repr returns the correct matrix""" + Aop = odl.MatrixOperator(dom_eq_ran_mat) matrix_repr = matrix_representation(Aop) - assert all_almost_equal(A, matrix_repr) + assert all_almost_equal(dom_eq_ran_mat, matrix_repr) -def test_matrix_representation_product_to_lin_space(): +def test_matrix_representation_product_to_lin_space(dom_eq_ran_mat): """Verify that the matrix repr works for product spaces. Here, since the domain shape ``(2, 3)`` and the range has shape ``(1, 3)``, the shape of the matrix representation will be ``(2, 3, 1, 3)``. """ - n = 3 - A = np.random.rand(n, n) + A = dom_eq_ran_mat Aop = odl.MatrixOperator(A) - B = np.random.rand(n, n) + B = dom_eq_ran_mat+0.1 Bop = odl.MatrixOperator(B) ABop = ProductSpaceOperator([[Aop, Bop]]) matrix_repr = matrix_representation(ABop) - assert matrix_repr.shape == (1, n, 2, n) - assert np.linalg.norm(A - matrix_repr[0, :, 0, :]) == pytest.approx(0) - assert np.linalg.norm(B - matrix_repr[0, :, 1, :]) == pytest.approx(0) + assert matrix_repr.shape == (1, 3, 2, 3) + + _, backend = get_array_and_backend(A) + + assert backend.to_cpu( + backend.array_namespace.linalg.norm(A - matrix_repr[0, :, 0, :])) == pytest.approx(0) + assert backend.to_cpu( + backend.array_namespace.linalg.norm(B - matrix_repr[0, :, 1, :])) == pytest.approx(0) -def test_matrix_representation_lin_space_to_product(): +def test_matrix_representation_lin_space_to_product(dom_eq_ran_mat): """Verify that the matrix repr works for product spaces. Here, since the domain shape ``(1, 3)`` and the range has shape ``(2, 3)``, the shape of the matrix representation will be ``(2, 3, 1, 3)``. """ - n = 3 - A = np.random.rand(n, n) + n=3 + A = dom_eq_ran_mat Aop = odl.MatrixOperator(A) - B = np.random.rand(n, n) + B = dom_eq_ran_mat+0.1 Bop = odl.MatrixOperator(B) ABop = ProductSpaceOperator([[Aop], @@ -66,22 +77,25 @@ def test_matrix_representation_lin_space_to_product(): matrix_repr = matrix_representation(ABop) + _, backend = get_array_and_backend(A) assert matrix_repr.shape == (2, n, 1, n) - assert np.linalg.norm(A - matrix_repr[0, :, 0, :]) == pytest.approx(0) - assert np.linalg.norm(B - matrix_repr[1, :, 0, :]) == pytest.approx(0) + assert backend.to_cpu( + backend.array_namespace.linalg.norm(A - matrix_repr[0, :, 0, :])) == pytest.approx(0) + assert backend.to_cpu( + backend.array_namespace.linalg.norm(B - matrix_repr[1, :, 0, :])) == pytest.approx(0) -def test_matrix_representation_product_to_product(): +def test_matrix_representation_product_to_product(dom_eq_ran_mat): """Verify that the matrix repr works for product spaces. Here, since the domain and range has shape ``(2, 3)``, the shape of the matrix representation will be ``(2, 3, 2, 3)``. """ n = 3 - A = np.random.rand(n, n) + A = dom_eq_ran_mat Aop = odl.MatrixOperator(A) - B = np.random.rand(n, n) + B = dom_eq_ran_mat+0.1 Bop = odl.MatrixOperator(B) ABop = ProductSpaceOperator([[Aop, 0], @@ -89,62 +103,75 @@ def test_matrix_representation_product_to_product(): matrix_repr = matrix_representation(ABop) assert matrix_repr.shape == (2, n, 2, n) - assert np.linalg.norm(A - matrix_repr[0, :, 0, :]) == pytest.approx(0) - assert np.linalg.norm(B - matrix_repr[1, :, 1, :]) == pytest.approx(0) + _, backend = get_array_and_backend(A) + assert matrix_repr.shape == (2, n, 2, n) + assert backend.to_cpu( + backend.array_namespace.linalg.norm(A - matrix_repr[0, :, 0, :])) == pytest.approx(0) + assert backend.to_cpu( + backend.array_namespace.linalg.norm(B - matrix_repr[1, :, 1, :])) == pytest.approx(0) + -def test_matrix_representation_not_linear_op(): +def test_matrix_representation_not_linear_op(odl_impl_device_pairs): """Verify error when operator is non-linear""" + impl, device = odl_impl_device_pairs class MyNonLinOp(odl.Operator): """Small nonlinear test operator.""" def _call(self, x): return x ** 2 - nonlin_op = MyNonLinOp(domain=odl.rn(3), range=odl.rn(3), linear=False) + nonlin_op = MyNonLinOp( + domain=odl.rn(3,impl=impl, device=device), + range=odl.rn(3,impl=impl, device=device), + linear=False) with pytest.raises(ValueError): matrix_representation(nonlin_op) -def test_matrix_representation_wrong_domain(): +def test_matrix_representation_wrong_domain(odl_impl_device_pairs): """Verify that the matrix representation function gives correct error""" + impl, device = odl_impl_device_pairs class MyOp(odl.Operator): """Small test operator.""" def __init__(self): super(MyOp, self).__init__( - domain=odl.rn(3) * odl.rn(3) ** 2, - range=odl.rn(4), + domain=odl.rn(3,impl=impl, device=device) * odl.rn(3,impl=impl, device=device) ** 2, + range=odl.rn(4,impl=impl, device=device), linear=True) def _call(self, x, out): - return odl.rn(np.random.rand(4)) + return odl.rn([4], impl=impl, device=device) nonlin_op = MyOp() with pytest.raises(TypeError): matrix_representation(nonlin_op) -def test_matrix_representation_wrong_range(): +def test_matrix_representation_wrong_range(odl_impl_device_pairs): """Verify that the matrix representation function gives correct error""" + impl, device = odl_impl_device_pairs class MyOp(odl.Operator): """Small test operator.""" def __init__(self): super(MyOp, self).__init__( - domain=odl.rn(3), - range=odl.rn(3) * odl.rn(3) ** 2, + domain=odl.rn(3,impl=impl, device=device), + range=odl.rn(3,impl=impl, device=device) * odl.rn(3,impl=impl, device=device) ** 2, linear=True) def _call(self, x, out): - return odl.rn(np.random.rand(4)) + return odl.rn([4], impl=impl, device=device) nonlin_op = MyOp() with pytest.raises(TypeError): matrix_representation(nonlin_op) -def test_power_method_opnorm_symm(): +def test_power_method_opnorm_symm(odl_impl_device_pairs): """Test the power method on a symmetrix matrix operator""" + impl, device = odl_impl_device_pairs # Test matrix with singular values 1.2 and 1.0 - mat = np.array([[0.9509044, -0.64566614], + space = odl.rn([2,2], impl=impl, device=device) + mat = space.element([[0.9509044, -0.64566614], [-0.44583952, -0.95923051]]) op = odl.MatrixOperator(mat) @@ -168,15 +195,17 @@ def test_power_method_opnorm_symm(): assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2) # Start at a deterministic point. This should _always_ succeed. - xstart = odl.rn(2).element([0.8, 0.5]) + xstart = odl.rn(2, impl=impl, device=device).element([0.8, 0.5]) opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=100) assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2) -def test_power_method_opnorm_nonsymm(): +def test_power_method_opnorm_nonsymm(odl_impl_device_pairs): """Test the power method on a nonsymmetrix matrix operator""" + impl, device = odl_impl_device_pairs # Singular values 5.5 and 6 - mat = np.array([[-1.52441557, 5.04276365], + space = odl.rn([3,2], impl=impl, device=device) + mat = space.element([[-1.52441557, 5.04276365], [1.90246927, 2.54424763], [5.32935411, 0.04573162]]) @@ -184,19 +213,20 @@ def test_power_method_opnorm_nonsymm(): true_opnorm = 6 # Start vector (1, 1) is close to the wrong eigenvector - xstart = odl.rn(2).element([1, 1]) + xstart = odl.rn(2, impl=impl, device=device).element([1, 1]) opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=50) assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2) # Start close to the correct eigenvector, converges very fast - xstart = odl.rn(2).element([-0.8, 0.5]) + xstart = odl.rn(2, impl=impl, device=device).element([-0.8, 0.5]) opnorm_est = power_method_opnorm(op, xstart=xstart, maxiter=6) assert opnorm_est == pytest.approx(true_opnorm, rel=1e-2) -def test_power_method_opnorm_exceptions(): +def test_power_method_opnorm_exceptions(odl_impl_device_pairs): """Test the exceptions""" - space = odl.rn(2) + impl, device = odl_impl_device_pairs + space = odl.rn(2, impl=impl, device=device) op = odl.IdentityOperator(space) with pytest.raises(ValueError): @@ -227,4 +257,4 @@ def test_power_method_opnorm_exceptions(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/operator/pspace_ops_test.py b/odl/test/core/operator/pspace_ops_test.py similarity index 70% rename from odl/test/operator/pspace_ops_test.py rename to odl/test/core/operator/pspace_ops_test.py index 7f7e1f572b7..7283f59ac4d 100644 --- a/odl/test/operator/pspace_ops_test.py +++ b/odl/test/core/operator/pspace_ops_test.py @@ -10,18 +10,33 @@ import pytest import odl -from odl.util.testutils import all_almost_equal, simple_fixture - - -base_op = simple_fixture( - 'base_op', - [odl.IdentityOperator(odl.rn(3)), - odl.BroadcastOperator(odl.IdentityOperator(odl.rn(3)), 2), - odl.ReductionOperator(odl.IdentityOperator(odl.rn(3)), 2), - odl.DiagonalOperator(odl.IdentityOperator(odl.rn(3)), 2), - ], - fmt=' {name}={value.__class__.__name__}') - +from odl.core.util.testutils import all_almost_equal, simple_fixture + + +# base_op = simple_fixture( +# 'base_op', +# [odl.IdentityOperator(odl.rn(3)), +# odl.BroadcastOperator(odl.IdentityOperator(odl.rn(3)), 2), +# odl.ReductionOperator(odl.IdentityOperator(odl.rn(3)), 2), +# odl.DiagonalOperator(odl.IdentityOperator(odl.rn(3)), 2), +# ], +# fmt=' {name}={value.__class__.__name__}') + +op_name = simple_fixture(name='op_name', params=['Identity', 'Broadcast', 'Reduction', 'Diagonal']) + +@pytest.fixture(scope="module", ) +def base_op(odl_impl_device_pairs, op_name): + impl, device = odl_impl_device_pairs + space = odl.rn(3, impl=impl, device=device) + if op_name == 'Identity': + op = odl.IdentityOperator(space) + elif op_name == 'Broadcast': + op = odl.BroadcastOperator(odl.IdentityOperator(space), 2) + elif op_name == 'Reduction': + op = odl.ReductionOperator(odl.IdentityOperator(space), 2) + elif op_name == 'Diagonal': + op = odl.DiagonalOperator(odl.IdentityOperator(space), 2) + return op def test_pspace_op_init(base_op): """Test initialization with different base operators.""" @@ -94,9 +109,9 @@ def test_pspace_op_adjoint(base_op): assert all_almost_equal(adj(y), true_adj(y)) -def test_pspace_op_weighted_init(): - - r3 = odl.rn(3) +def test_pspace_op_weighted_init(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) ran = odl.ProductSpace(r3, 2, weighting=[1, 2]) A = odl.IdentityOperator(r3) @@ -105,8 +120,9 @@ def test_pspace_op_weighted_init(): [0]], range=ran) -def test_pspace_op_sum_call(): - r3 = odl.rn(3) +def test_pspace_op_sum_call(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) A = odl.IdentityOperator(r3) op = odl.ProductSpaceOperator([[A, A]]) @@ -118,8 +134,9 @@ def test_pspace_op_sum_call(): assert all_almost_equal(op(z, out=op.range.element())[0], x + y) -def test_pspace_op_project_call(): - r3 = odl.rn(3) +def test_pspace_op_project_call(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) A = odl.IdentityOperator(r3) op = odl.ProductSpaceOperator([[A], [A]]) @@ -133,8 +150,9 @@ def test_pspace_op_project_call(): assert x == op(z, out=op.range.element())[1] -def test_pspace_op_diagonal_call(): - r3 = odl.rn(3) +def test_pspace_op_diagonal_call(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) A = odl.IdentityOperator(r3) op = odl.ProductSpaceOperator([[A, 0], [0, A]]) @@ -147,8 +165,9 @@ def test_pspace_op_diagonal_call(): assert z == op(z, out=op.range.element()) -def test_pspace_op_swap_call(): - r3 = odl.rn(3) +def test_pspace_op_swap_call(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) A = odl.IdentityOperator(r3) op = odl.ProductSpaceOperator([[0, A], [A, 0]]) @@ -162,8 +181,9 @@ def test_pspace_op_swap_call(): assert result == op(z, out=op.range.element()) -def test_comp_proj(): - r3 = odl.rn(3) +def test_comp_proj(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) r3xr3 = odl.ProductSpace(r3, 2) x = r3xr3.element([[1, 2, 3], @@ -177,8 +197,9 @@ def test_comp_proj(): assert x[1] == proj_1(x, out=proj_1.range.element()) -def test_comp_proj_slice(): - r3 = odl.rn(3) +def test_comp_proj_slice(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) r33 = odl.ProductSpace(r3, 3) x = r33.element([[1, 2, 3], @@ -190,8 +211,9 @@ def test_comp_proj_slice(): assert x[0:2] == proj(x, out=proj.range.element()) -def test_comp_proj_indices(): - r3 = odl.rn(3) +def test_comp_proj_indices(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) r33 = odl.ProductSpace(r3, 3) x = r33.element([[1, 2, 3], @@ -203,8 +225,9 @@ def test_comp_proj_indices(): assert x[[0, 2]] == proj(x, out=proj.range.element()) -def test_comp_proj_adjoint(): - r3 = odl.rn(3) +def test_comp_proj_adjoint(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) r3xr3 = odl.ProductSpace(r3, 2) x = r3.element([1, 2, 3]) @@ -224,8 +247,9 @@ def test_comp_proj_adjoint(): assert result_1 == proj_1.adjoint(x, out=proj_1.domain.element()) -def test_comp_proj_adjoint_slice(): - r3 = odl.rn(3) +def test_comp_proj_adjoint_slice(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r3=odl.rn(3, impl=impl, device=device) r33 = odl.ProductSpace(r3, 3) x = r33[0:2].element([[1, 2, 3], @@ -241,4 +265,4 @@ def test_comp_proj_adjoint_slice(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/operator/tensor_ops_test.py b/odl/test/core/operator/tensor_ops_test.py similarity index 52% rename from odl/test/operator/tensor_ops_test.py rename to odl/test/core/operator/tensor_ops_test.py index 60040e9ec50..108796585c2 100644 --- a/odl/test/operator/tensor_ops_test.py +++ b/odl/test/core/operator/tensor_ops_test.py @@ -15,11 +15,14 @@ import odl import pytest -from odl.operator.tensor_ops import ( +from odl.core.operator.tensor_ops import ( MatrixOperator, PointwiseInner, PointwiseNorm, PointwiseSum) -from odl.space.pspace import ProductSpace -from odl.util.testutils import ( - all_almost_equal, all_equal, noise_element, noise_elements, simple_fixture) +from odl.core.space.pspace import ProductSpace +from odl.core.util.testutils import ( + all_almost_equal, all_equal, noise_element, noise_elements, simple_fixture, skip_if_no_pytorch) +from odl.core.space.entry_points import tensor_space_impl_names +from odl.core.sparse import SparseMatrix +from odl.core.array_API_support import lookup_array_backend, get_array_and_backend matrix_dtype = simple_fixture( name='matrix_dtype', @@ -27,25 +30,26 @@ @pytest.fixture(scope='module') -def matrix(matrix_dtype): - dtype = np.dtype(matrix_dtype) - if np.issubdtype(dtype, np.floating): - return np.ones((3, 4), dtype=dtype) - elif np.issubdtype(dtype, np.complexfloating): - return np.ones((3, 4), dtype=dtype) * (1 + 1j) - else: - assert 0 +def matrix(matrix_dtype, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + space = odl.rn((3, 4), impl=impl, device=device, dtype=matrix_dtype) + return space.one() + # else: + # assert 0 -exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 3.5, 1.5]) +exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 3.5, 1.5]) +sparse_matrix_backend = simple_fixture('backend', ['scipy', 'pytorch']) +sparse_matrix_format = simple_fixture('format', ['COO']) # ---- PointwiseNorm ---- -def test_pointwise_norm_init_properties(): +def test_pointwise_norm_init_properties(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # 1d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 1, exponent=1) # Make sure the code runs and test the properties @@ -64,7 +68,7 @@ def test_pointwise_norm_init_properties(): assert pwnorm.is_weighted # 3d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 3, exponent=1) # Make sure the code runs and test the properties @@ -96,16 +100,17 @@ def test_pointwise_norm_init_properties(): PointwiseNorm(vfspace, weighting=[1, 0, 1]) # 0 invalid -def test_pointwise_norm_real(exponent): +def test_pointwise_norm_real(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # 1d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 1) pwnorm = PointwiseNorm(vfspace, exponent) - testarr = np.array([[[1, 2], - [3, 4]]]) + testarr = fspace.array_backend.array_constructor([[[1, 2], + [3, 4]]], dtype=float, device=device) - true_norm = np.linalg.norm(testarr, ord=exponent, axis=0) + true_norm = fspace.array_namespace.linalg.norm(testarr, ord=exponent, axis=0) func = vfspace.element(testarr) func_pwnorm = pwnorm(func) @@ -116,18 +121,18 @@ def test_pointwise_norm_real(exponent): assert all_almost_equal(out, true_norm) # 3d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 3) pwnorm = PointwiseNorm(vfspace, exponent) - testarr = np.array([[[1, 2], + testarr = fspace.array_backend.array_constructor([[[1, 2], [3, 4]], [[0, -1], [0, 1]], [[1, 1], - [1, 1]]]) + [1, 1]]], dtype=float, device=device) - true_norm = np.linalg.norm(testarr, ord=exponent, axis=0) + true_norm = fspace.array_namespace.linalg.norm(testarr, ord=exponent, axis=0) func = vfspace.element(testarr) func_pwnorm = pwnorm(func) @@ -138,47 +143,53 @@ def test_pointwise_norm_real(exponent): assert all_almost_equal(out, true_norm) -def test_pointwise_norm_complex(exponent): - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex) +def test_pointwise_norm_complex(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex, impl=impl, device=device) vfspace = ProductSpace(fspace, 3) pwnorm = PointwiseNorm(vfspace, exponent) - testarr = np.array([[[1 + 1j, 2], + testarr = fspace.array_backend.array_constructor([[[1 + 1j, 2], [3, 4 - 2j]], [[0, -1], [0, 1]], [[1j, 1j], - [1j, 1j]]]) + [1j, 1j]]], device=device, dtype=complex) - true_norm = np.linalg.norm(testarr, ord=exponent, axis=0) + true_norm = fspace.array_namespace.linalg.norm(testarr, ord=exponent, axis=0) func = vfspace.element(testarr) func_pwnorm = pwnorm(func) assert all_almost_equal(func_pwnorm, true_norm) - out = fspace.element() + out = pwnorm.range.element() pwnorm(func, out=out) - assert all_almost_equal(out, true_norm) + assert all_almost_equal(out.real, true_norm) + +def test_pointwise_norm_weighted(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) + + ns = fspace.array_namespace + backend = fspace.array_backend -def test_pointwise_norm_weighted(exponent): - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) vfspace = ProductSpace(fspace, 3) - weight = np.array([1.0, 2.0, 3.0]) + weight = backend.array_constructor([1.0, 2.0, 3.0], device=device) pwnorm = PointwiseNorm(vfspace, exponent, weighting=weight) - testarr = np.array([[[1, 2], + testarr = backend.array_constructor([[[1, 2], [3, 4]], [[0, -1], [0, 1]], [[1, 1], - [1, 1]]]) + [1, 1]]], device=device, dtype=float) if exponent in (1.0, float('inf')): - true_norm = np.linalg.norm(weight[:, None, None] * testarr, + true_norm = ns.linalg.norm(weight[:, None, None] * testarr, ord=exponent, axis=0) else: - true_norm = np.linalg.norm( + true_norm = ns.linalg.norm( weight[:, None, None] ** (1 / exponent) * testarr, ord=exponent, axis=0) @@ -191,10 +202,11 @@ def test_pointwise_norm_weighted(exponent): assert all_almost_equal(out, true_norm) -def test_pointwise_norm_gradient_real(exponent): +def test_pointwise_norm_gradient_real(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # The operator is not differentiable for exponent 'inf' if exponent == float('inf'): - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 1) pwnorm = PointwiseNorm(vfspace, exponent) point = vfspace.one() @@ -203,7 +215,7 @@ def test_pointwise_norm_gradient_real(exponent): return # 1d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 1) pwnorm = PointwiseNorm(vfspace, exponent) @@ -211,10 +223,10 @@ def test_pointwise_norm_gradient_real(exponent): direction = noise_element(vfspace) # Computing expected result - tmp = pwnorm(point).ufuncs.power(1 - exponent) + tmp = odl.pow(pwnorm(point), 1 - exponent) v_field = vfspace.element() for i in range(len(v_field)): - v_field[i] = tmp * point[i] * np.abs(point[i]) ** (exponent - 2) + v_field[i] = tmp * point[i] * odl.abs(point[i]) ** (exponent - 2) pwinner = odl.PointwiseInner(vfspace, v_field) expected_result = pwinner(direction) @@ -223,7 +235,7 @@ def test_pointwise_norm_gradient_real(exponent): assert all_almost_equal(func_pwnorm(direction), expected_result) # 3d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 3) pwnorm = PointwiseNorm(vfspace, exponent) @@ -231,10 +243,10 @@ def test_pointwise_norm_gradient_real(exponent): direction = noise_element(vfspace) # Computing expected result - tmp = pwnorm(point).ufuncs.power(1 - exponent) + tmp = odl.pow(pwnorm(point), 1 - exponent) v_field = vfspace.element() for i in range(len(v_field)): - v_field[i] = tmp * point[i] * np.abs(point[i]) ** (exponent - 2) + v_field[i] = tmp * point[i] * odl.abs(point[i]) ** (exponent - 2) pwinner = odl.PointwiseInner(vfspace, v_field) expected_result = pwinner(direction) @@ -242,7 +254,10 @@ def test_pointwise_norm_gradient_real(exponent): assert all_almost_equal(func_pwnorm(direction), expected_result) -def test_pointwise_norm_gradient_real_with_zeros(exponent): +def test_pointwise_norm_gradient_real_with_zeros( + exponent, + odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # The gradient is only well-defined in points with zeros if the exponent is # >= 2 and < inf if exponent < 2 or exponent == float('inf'): @@ -250,50 +265,57 @@ def test_pointwise_norm_gradient_real_with_zeros(exponent): 'exponent') # 1d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 1) pwnorm = PointwiseNorm(vfspace, exponent) - test_point = np.array([[[0, 0], # This makes the point singular for p < 2 - [1, 2]]]) - test_direction = np.array([[[1, 2], - [4, 5]]]) + backend = fspace.array_backend + + # This makes the point singular for p < 2 + test_point = backend.array_constructor( + [[[0, 0], [1, 2]]], device=device) + test_direction = backend.array_constructor( + [[[1, 2], [4, 5]]], device=device) point = vfspace.element(test_point) direction = vfspace.element(test_direction) func_pwnorm = pwnorm.derivative(point) - assert not np.any(np.isnan(func_pwnorm(direction))) + assert not odl.any(odl.isnan(func_pwnorm(direction))) # 3d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 3) pwnorm = PointwiseNorm(vfspace, exponent) - test_point = np.array([[[0, 0], # This makes the point singular for p < 2 - [1, 2]], - [[3, 4], - [0, 0]], # This makes the point singular for p < 2 - [[5, 6], - [7, 8]]]) - test_direction = np.array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]], - [[8, 9], - [0, 1]]]) + # This makes the point singular for p < 2 + test_point = backend.array_constructor( + [[[0, 0], + [1, 2]], + [[3, 4], + [0, 0]], + [[5, 6], + [7, 8]]], device=device) + test_direction = backend.array_constructor( + [[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]], + [[8, 9], + [0, 1]]], device=device) point = vfspace.element(test_point) direction = vfspace.element(test_direction) func_pwnorm = pwnorm.derivative(point) - assert not np.any(np.isnan(func_pwnorm(direction))) + assert not odl.any(odl.isnan(func_pwnorm(direction))) # ---- PointwiseInner ---- -def test_pointwise_inner_init_properties(): - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) +def test_pointwise_inner_init_properties(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 3, exponent=2) # Make sure the code runs and test the properties @@ -319,18 +341,23 @@ def test_pointwise_inner_init_properties(): """ -def test_pointwise_inner_real(): +def test_pointwise_inner_real(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # 1d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) + + backend = fspace.array_backend + vfspace = ProductSpace(fspace, 1) - array = np.array([[[-1, -3], - [2, 0]]]) + array = backend.array_constructor( + [[[-1, -3], [2, 0]]], device=device) + pwinner = PointwiseInner(vfspace, vecfield=array) - testarr = np.array([[[1, 2], - [3, 4]]]) + testarr = backend.array_constructor( + [[[1, 2], [3, 4]]], device=device) - true_inner = np.sum(testarr * array, axis=0) + true_inner = backend.array_namespace.sum(testarr * array, axis=0) func = vfspace.element(testarr) func_pwinner = pwinner(func) @@ -341,24 +368,24 @@ def test_pointwise_inner_real(): assert all_almost_equal(out, true_inner) # 3d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 3) - array = np.array([[[-1, -3], + array = backend.array_constructor([[[-1, -3], [2, 0]], [[0, 0], [0, 1]], [[-1, 1], - [1, 1]]]) + [1, 1]]], device=device) pwinner = PointwiseInner(vfspace, vecfield=array) - testarr = np.array([[[1, 2], + testarr = backend.array_constructor([[[1, 2], [3, 4]], [[0, -1], [0, 1]], [[1, 1], - [1, 1]]]) + [1, 1]]], device=device) - true_inner = np.sum(testarr * array, axis=0) + true_inner = backend.array_namespace.sum(testarr * array, axis=0) func = vfspace.element(testarr) func_pwinner = pwinner(func) @@ -369,25 +396,30 @@ def test_pointwise_inner_real(): assert all_almost_equal(out, true_inner) -def test_pointwise_inner_complex(): - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex) +def test_pointwise_inner_complex(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex, impl=impl, device=device) vfspace = ProductSpace(fspace, 3) - array = np.array([[[-1 - 1j, -3], + + backend = fspace.array_backend + + array = backend.array_constructor([[[-1 - 1j, -3], [2, 2j]], [[-1j, 0], [0, 1]], [[-1, 1 + 2j], - [1, 1]]]) + [1, 1]]], device=device) + pwinner = PointwiseInner(vfspace, vecfield=array) - testarr = np.array([[[1 + 1j, 2], + testarr = backend.array_constructor([[[1 + 1j, 2], [3, 4 - 2j]], [[0, -1], [0, 1]], [[1j, 1j], - [1j, 1j]]]) + [1j, 1j]]], device=device) - true_inner = np.sum(testarr * array.conj(), axis=0) + true_inner = backend.array_namespace.sum(testarr * array.conj(), axis=0) func = vfspace.element(testarr) func_pwinner = pwinner(func) @@ -398,27 +430,31 @@ def test_pointwise_inner_complex(): assert all_almost_equal(out, true_inner) -def test_pointwise_inner_weighted(): - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) +def test_pointwise_inner_weighted(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) + + backend = fspace.array_backend + vfspace = ProductSpace(fspace, 3) - array = np.array([[[-1, -3], + array = backend.array_constructor([[[-1, -3], [2, 0]], [[0, 0], [0, 1]], [[-1, 1], - [1, 1]]]) + [1, 1]]], device=device) - weight = np.array([1.0, 2.0, 3.0]) + weight = backend.array_constructor([1.0, 2.0, 3.0], device=device) pwinner = PointwiseInner(vfspace, vecfield=array, weighting=weight) - testarr = np.array([[[1, 2], + testarr = backend.array_constructor([[[1, 2], [3, 4]], [[0, -1], [0, 1]], [[1, 1], - [1, 1]]]) + [1, 1]]], device=device) - true_inner = np.sum(weight[:, None, None] * testarr * array, axis=0) + true_inner = backend.array_namespace.sum(weight[:, None, None] * testarr * array, axis=0) func = vfspace.element(testarr) func_pwinner = pwinner(func) @@ -429,16 +465,20 @@ def test_pointwise_inner_weighted(): assert all_almost_equal(out, true_inner) -def test_pointwise_inner_adjoint(): +def test_pointwise_inner_adjoint(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # 1d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex, impl=impl, device=device) + + backend = fspace.array_backend + vfspace = ProductSpace(fspace, 1) - array = np.array([[[-1, -3], - [2, 0]]]) + array = backend.array_constructor([[[-1, -3], + [2, 0]]], device=device) pwinner = PointwiseInner(vfspace, vecfield=array) - testarr = np.array([[1 + 1j, 2], - [3, 4 - 2j]]) + testarr = backend.array_constructor([[1 + 1j, 2], + [3, 4 - 2j]], device=device) true_inner_adj = testarr[None, :, :] * array @@ -451,18 +491,18 @@ def test_pointwise_inner_adjoint(): assert all_almost_equal(out, true_inner_adj) # 3d - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex, impl=impl, device=device) vfspace = ProductSpace(fspace, 3) - array = np.array([[[-1 - 1j, -3], + array = backend.array_constructor([[[-1 - 1j, -3], [2, 2j]], [[-1j, 0], [0, 1]], [[-1, 1 + 2j], - [1, 1]]]) + [1, 1]]], device=device) pwinner = PointwiseInner(vfspace, vecfield=array) - testarr = np.array([[1 + 1j, 2], - [3, 4 - 2j]]) + testarr = backend.array_constructor([[1 + 1j, 2], + [3, 4 - 2j]], device=device) true_inner_adj = testarr[None, :, :] * array @@ -475,20 +515,22 @@ def test_pointwise_inner_adjoint(): assert all_almost_equal(out, true_inner_adj) -def test_pointwise_inner_adjoint_weighted(): +def test_pointwise_inner_adjoint_weighted(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Weighted product space only - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), dtype=complex, impl=impl, device=device) + backend = fspace.array_backend vfspace = ProductSpace(fspace, 3, weighting=[2, 4, 6]) - array = np.array([[[-1 - 1j, -3], + array = backend.array_constructor([[[-1 - 1j, -3], [2, 2j]], [[-1j, 0], [0, 1]], [[-1, 1 + 2j], - [1, 1]]]) + [1, 1]]], device=device) pwinner = PointwiseInner(vfspace, vecfield=array) - testarr = np.array([[1 + 1j, 2], - [3, 4 - 2j]]) + testarr = backend.array_constructor([[1 + 1j, 2], + [3, 4 - 2j]], device=device) true_inner_adj = testarr[None, :, :] * array # same as unweighted case @@ -503,8 +545,8 @@ def test_pointwise_inner_adjoint_weighted(): # Using different weighting in the inner product pwinner = PointwiseInner(vfspace, vecfield=array, weighting=[4, 8, 12]) - testarr = np.array([[1 + 1j, 2], - [3, 4 - 2j]]) + testarr = backend.array_constructor([[1 + 1j, 2], + [3, 4 - 2j]], device=device) true_inner_adj = 2 * testarr[None, :, :] * array # w / v = (2, 2, 2) @@ -520,10 +562,11 @@ def test_pointwise_inner_adjoint_weighted(): # ---- PointwiseSum ---- -def test_pointwise_sum(): +def test_pointwise_sum(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """PointwiseSum currently depends on PointwiseInner, we verify that.""" - fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2)) + fspace = odl.uniform_discr([0, 0], [1, 1], (2, 2), impl=impl, device=device) vfspace = ProductSpace(fspace, 3, exponent=2) # Make sure the code runs and test the properties @@ -535,32 +578,137 @@ def test_pointwise_sum(): # ---- MatrixOperator ---- # +def sparse_scipy_input(sparse_matrix_format): + dense_matrix = np.ones((3, 4)) + if sparse_matrix_format == 'COO': + sparse_matrix = SparseMatrix('COO', 'scipy', dense_matrix) + else: + raise NotImplementedError + return dense_matrix, sparse_matrix + +def sparse_pytorch_input(sparse_matrix_format, cuda_device): + assert sparse_matrix_format == 'COO', NotImplementedError + indices = [ + #1st row|2nd row|3rd row + [0,0,0,0,1,1,1,1,2,2,2,2], + [0,1,2,3,0,1,2,3,0,1,2,3] + ] + values = [ + 1.0,1.0,1.0,1.0, + 1.0,1.0,1.0,1.0, + 1.0,1.0,1.0,1.0 + ] + array = [ + [1.0,1.0,1.0,1.0], + [1.0,1.0,1.0,1.0], + [1.0,1.0,1.0,1.0] + ] + backend = lookup_array_backend('pytorch') + dense_matrix = backend.array_constructor(array, device=cuda_device) + sparse_matrix = SparseMatrix('COO', 'pytorch', indices, values, device=cuda_device) + return dense_matrix, sparse_matrix + + + +sparse_configs = [] +sparse_configs.extend( + (pytest.param(proj_cfg) + for proj_cfg in ['COO scipy cpu']) +) + +if 'pytorch' in tensor_space_impl_names(): + pytorch_cfgs = [] + for device in lookup_array_backend('pytorch').available_devices: + pytorch_cfgs.append(f'COO pytorch {device}') + + sparse_configs.extend( + (pytest.param(proj_cfg, marks=skip_if_no_pytorch) + for proj_cfg in pytorch_cfgs) + ) + +sparse_ids = [ + " format='{}' - backend='{}' - device='{}' ".format(*s.values[0].split()) + for s in sparse_configs +] + +@pytest.fixture(scope='module', params=sparse_configs, ids=sparse_ids) +def matrix_input(request): + format, backend, device = request.param.split() + if backend == 'scipy': + return sparse_scipy_input(format) + elif backend == 'pytorch': + return sparse_pytorch_input(format, device) + else: + raise ValueError + +def invertible_sparse_scipy_input(sparse_matrix_format): + assert sparse_matrix_format == 'COO', NotImplementedError + dense_matrix = np.ones((3, 3)) + 4.0 * np.eye(3) # invertible + sparse_matrix = SparseMatrix('COO', 'scipy', dense_matrix) + return dense_matrix, sparse_matrix + +def invertible_sparse_pytorch_input(sparse_matrix_format, cuda_device): + assert sparse_matrix_format == 'COO', NotImplementedError + indices = [ + #1st row|2nd row|3rd row + [0,0,0,1,1,1,2,2,2], + [0,1,2,0,1,2,0,1,2] + ] + values = [ + 5.0,1.0,1.0, + 1.0,5.0,1.0, + 1.0,1.0,5.0 + ] + array = [ + [5.0,1.0,1.0], + [1.0,5.0,1.0], + [1.0,1.0,5.0] + ] + backend = lookup_array_backend('pytorch') + dense_matrix = backend.array_constructor(array, device=cuda_device) + sparse_matrix = SparseMatrix('COO', 'pytorch', indices, values, device=cuda_device) + return dense_matrix, sparse_matrix + +@pytest.fixture(scope='module', params=sparse_configs, ids=sparse_ids) +def invertible_matrix_input(request): + format, backend, device = request.param.split() + if backend == 'scipy': + return invertible_sparse_scipy_input(format) + elif backend == 'pytorch': + return invertible_sparse_pytorch_input(format, device) + else: + raise ValueError - -def test_matrix_op_init(matrix): +def test_matrix_op_init(matrix_input): """Test initialization and properties of matrix operators.""" - dense_matrix = matrix - sparse_matrix = scipy.sparse.coo_matrix(dense_matrix) + dense_matrix, sparse_matrix = matrix_input + dense_matrix, backend = get_array_and_backend(dense_matrix) + impl = backend.impl + device = dense_matrix.device # Just check if the code runs MatrixOperator(dense_matrix) MatrixOperator(sparse_matrix) # Test default domain and range mat_op = MatrixOperator(dense_matrix) - assert mat_op.domain == odl.tensor_space(4, matrix.dtype) - assert mat_op.range == odl.tensor_space(3, matrix.dtype) - assert np.all(mat_op.matrix == dense_matrix) + assert mat_op.domain == odl.tensor_space(4, dense_matrix.dtype, impl=impl, device=device) + assert mat_op.range == odl.tensor_space(3, dense_matrix.dtype, impl=impl, device=device) + assert odl.all(mat_op.matrix == dense_matrix) - sparse_matrix = scipy.sparse.coo_matrix(dense_matrix) mat_op = MatrixOperator(sparse_matrix) - assert mat_op.domain == odl.tensor_space(4, matrix.dtype) - assert mat_op.range == odl.tensor_space(3, matrix.dtype) - assert (mat_op.matrix != sparse_matrix).getnnz() == 0 - + assert mat_op.domain == odl.tensor_space(4, dense_matrix.dtype, impl=impl, device=device) + assert mat_op.range == odl.tensor_space(3, dense_matrix.dtype, impl=impl, device=device) + if impl == 'numpy': + assert (mat_op.matrix != sparse_matrix).getnnz() == 0 + # Pytorch does not support == and != betweend sparse tensors + elif impl == 'pytorch': + assert len(mat_op.matrix) == len(sparse_matrix) + else: + raise NotImplementedError # Explicit domain and range - dom = odl.tensor_space(4, matrix.dtype) - ran = odl.tensor_space(3, matrix.dtype) + dom = odl.tensor_space(4, dense_matrix.dtype, impl=impl, device=device) + ran = odl.tensor_space(3, dense_matrix.dtype, impl=impl, device=device) mat_op = MatrixOperator(dense_matrix, domain=dom, range=ran) assert mat_op.domain == dom @@ -572,55 +720,67 @@ def test_matrix_op_init(matrix): # Bad 1d sizes with pytest.raises(ValueError): - MatrixOperator(dense_matrix, domain=odl.cn(4), range=odl.cn(4)) + MatrixOperator(dense_matrix, domain=odl.cn(4, impl=impl, device=device), range=odl.cn(4, impl=impl, device=device)) with pytest.raises(ValueError): - MatrixOperator(dense_matrix, range=odl.cn(4)) + MatrixOperator(dense_matrix, range=odl.cn(4, impl=impl, device=device)) # Invalid range dtype with pytest.raises(ValueError): - MatrixOperator(dense_matrix.astype(complex), range=odl.rn(4)) + if impl == 'numpy': + MatrixOperator(dense_matrix.astype(complex), range=odl.rn(4, impl=impl, device=device)) + elif impl == 'pytorch': + MatrixOperator(dense_matrix.to(complex), range=odl.rn(4, impl=impl, device=device)) + else: + raise NotImplementedError # Data type promotion # real space, complex matrix -> complex space - dom = odl.rn(4) - mat_op = MatrixOperator(dense_matrix.astype(complex), domain=dom) + dom = odl.rn(4, impl=impl, device=device) + if impl == 'numpy': + mat_op = MatrixOperator(dense_matrix.astype(complex), domain=dom, impl=impl, device=device) + + elif impl == 'pytorch': + mat_op = MatrixOperator(dense_matrix.to(complex), domain=dom, + impl=impl, device=device) + else: + raise NotImplementedError assert mat_op.domain == dom - assert mat_op.range == odl.cn(3) + assert mat_op.range == odl.cn(3, impl=impl, device=device) # complex space, real matrix -> complex space - dom = odl.cn(4) + dom = odl.cn(4, impl=impl, device=device) mat_op = MatrixOperator(dense_matrix.real, domain=dom) assert mat_op.domain == dom - assert mat_op.range == odl.cn(3) + assert mat_op.range == odl.cn(3, impl=impl, device=device) # Multi-dimensional spaces - dom = odl.tensor_space((6, 5, 4), matrix.dtype) - ran = odl.tensor_space((6, 5, 3), matrix.dtype) + dom = odl.tensor_space((6, 5, 4), dense_matrix.dtype, impl=impl, device=device) + ran = odl.tensor_space((6, 5, 3), dense_matrix.dtype, impl=impl, device=device) mat_op = MatrixOperator(dense_matrix, domain=dom, axis=2) assert mat_op.range == ran mat_op = MatrixOperator(dense_matrix, domain=dom, range=ran, axis=2) assert mat_op.range == ran with pytest.raises(ValueError): - bad_dom = odl.tensor_space((6, 6, 6), matrix.dtype) # wrong shape + bad_dom = odl.tensor_space((6, 6, 6), dense_matrix.dtype) # wrong shape MatrixOperator(dense_matrix, domain=bad_dom) with pytest.raises(ValueError): - dom = odl.tensor_space((6, 5, 4), matrix.dtype) - bad_ran = odl.tensor_space((6, 6, 6), matrix.dtype) # wrong shape + dom = odl.tensor_space((6, 5, 4), dense_matrix.dtype) + bad_ran = odl.tensor_space((6, 6, 6), dense_matrix.dtype) # wrong shape MatrixOperator(dense_matrix, domain=dom, range=bad_ran) with pytest.raises(ValueError): MatrixOperator(dense_matrix, domain=dom, axis=1) with pytest.raises(ValueError): MatrixOperator(dense_matrix, domain=dom, axis=0) with pytest.raises(ValueError): - bad_ran = odl.tensor_space((6, 3, 4), matrix.dtype) + bad_ran = odl.tensor_space((6, 3, 4), dense_matrix.dtype, impl=impl, device=device) MatrixOperator(dense_matrix, domain=dom, range=bad_ran, axis=2) with pytest.raises(ValueError): - bad_dom_for_sparse = odl.rn((6, 5, 4)) - MatrixOperator(sparse_matrix, domain=bad_dom_for_sparse, axis=2) + bad_dom_for_sparse = odl.rn((6, 5, 4), impl=impl, device=device) + MatrixOperator(sparse_matrix, domain=bad_dom_for_sparse, axis=2, impl=impl, device=device) # Init with uniform_discr space (subclass of TensorSpace) - dom = odl.uniform_discr(0, 1, 4, dtype=dense_matrix.dtype) - ran = odl.uniform_discr(0, 1, 3, dtype=dense_matrix.dtype) + dom = odl.uniform_discr(0, 1, 4, dtype=dense_matrix.dtype, impl=impl, device=device) + ran = odl.uniform_discr(0, 1, 3, dtype=dense_matrix.dtype, impl=impl, device=device) MatrixOperator(dense_matrix, domain=dom, range=ran) # Make sure this runs and returns something string-like @@ -628,17 +788,24 @@ def test_matrix_op_init(matrix): assert repr(mat_op) > '' -def test_matrix_op_call(matrix): +def test_matrix_op_call_implicit(matrix_input): """Validate result from calls to matrix operators against Numpy.""" - dense_matrix = matrix - sparse_matrix = scipy.sparse.coo_matrix(dense_matrix) + dense_matrix, sparse_matrix = matrix_input + + dense_matrix, backend = get_array_and_backend(dense_matrix) + impl = backend.impl + device = dense_matrix.device + ns = backend.array_namespace # Default 1d case dmat_op = MatrixOperator(dense_matrix) smat_op = MatrixOperator(sparse_matrix) xarr, x = noise_elements(dmat_op.domain) - - true_result = dense_matrix.dot(xarr) + # if impl == 'numpy': + # true_result = dense_matrix.dot(xarr) + # elif impl == 'pytorch': + + true_result = ns.tensordot(dense_matrix, xarr, axes=([1], [0])) assert all_almost_equal(dmat_op(x), true_result) assert all_almost_equal(smat_op(x), true_result) out = dmat_op.range.element() @@ -648,43 +815,60 @@ def test_matrix_op_call(matrix): assert all_almost_equal(out, true_result) # Multi-dimensional case - domain = odl.rn((2, 2, 4)) + + + domain = odl.rn((2, 2, 4),impl=impl,device=device) mat_op = MatrixOperator(dense_matrix, domain, axis=2) xarr, x = noise_elements(mat_op.domain) - true_result = np.moveaxis(np.tensordot(dense_matrix, xarr, (1, 2)), 0, 2) + true_result = ns.moveaxis(ns.tensordot(dense_matrix, xarr, axes=([1], [2])), 0, 2) assert all_almost_equal(mat_op(x), true_result) out = mat_op.range.element() mat_op(x, out=out) assert all_almost_equal(out, true_result) -def test_matrix_op_call_explicit(): +def test_matrix_op_call_explicit(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Validate result from call to matrix op against explicit calculation.""" - mat = np.ones((3, 2)) - xarr = np.array([[[0, 1], + + space = odl.rn((3,2), impl=impl, device=device) + mat = space.one().data + + backend = space.array_backend + ns = space.array_namespace + + xarr = backend.array_constructor([[[0, 1], [2, 3]], [[4, 5], - [6, 7]]], dtype=float) + [6, 7]]], dtype=float, device=device) # Multiplication along `axis` with `mat` is the same as summation # along `axis` and stacking 3 times along the same axis for axis in range(3): - mat_op = MatrixOperator(mat, domain=odl.rn(xarr.shape), + mat_op = MatrixOperator(mat, domain=odl.rn(xarr.shape, impl=impl, device=device), axis=axis) result = mat_op(xarr) - true_result = np.repeat(np.sum(xarr, axis=axis, keepdims=True), + if impl == 'numpy': + true_result = ns.repeat(ns.sum(xarr, axis=axis, keepdims=True), + repeats=3, axis=axis) + elif impl == 'pytorch': + true_result = ns.repeat_interleave(ns.sum(xarr, axis=axis, keepdims=True), repeats=3, axis=axis) + else: + raise ValueError(f'Not implemented for impl = {impl}') assert result.shape == true_result.shape - assert np.allclose(result, true_result) + assert odl.allclose(result, true_result) -def test_matrix_op_adjoint(matrix): +def test_matrix_op_adjoint(matrix_input): """Test if the adjoint of matrix operators is correct.""" - dense_matrix = matrix - sparse_matrix = scipy.sparse.coo_matrix(dense_matrix) - - tol = 2 * matrix.size * np.finfo(matrix.dtype).resolution + dense_matrix, sparse_matrix = matrix_input + dense_matrix, backend = get_array_and_backend(dense_matrix) + impl = backend.impl + device = dense_matrix.device + ns = backend.array_namespace + tol = 2 * len(dense_matrix) * ns.finfo(dense_matrix.dtype).resolution # Default 1d case dmat_op = MatrixOperator(dense_matrix) smat_op = MatrixOperator(sparse_matrix) @@ -699,8 +883,8 @@ def test_matrix_op_adjoint(matrix): assert inner_ran == pytest.approx(inner_dom, rel=tol, abs=tol) # Multi-dimensional case - domain = odl.tensor_space((2, 2, 4), matrix.dtype) - mat_op = MatrixOperator(dense_matrix, domain, axis=2) + domain = odl.tensor_space((2, 2, 4), impl=impl, device=device) + mat_op = MatrixOperator(dense_matrix, domain, axis=2, impl=impl, device=device) x = noise_element(mat_op.domain) y = noise_element(mat_op.range) inner_ran = mat_op(x).inner(y) @@ -708,10 +892,9 @@ def test_matrix_op_adjoint(matrix): assert inner_ran == pytest.approx(inner_dom, rel=tol, abs=tol) -def test_matrix_op_inverse(): +def test_matrix_op_inverse(invertible_matrix_input): """Test if the inverse of matrix operators is correct.""" - dense_matrix = np.ones((3, 3)) + 4 * np.eye(3) # invertible - sparse_matrix = scipy.sparse.coo_matrix(dense_matrix) + dense_matrix, sparse_matrix = invertible_matrix_input # Default 1d case dmat_op = MatrixOperator(dense_matrix) @@ -725,7 +908,10 @@ def test_matrix_op_inverse(): assert all_almost_equal(x, msinv_ms_x) # Multi-dimensional case - domain = odl.tensor_space((2, 2, 3), dense_matrix.dtype) + dense_matrix, backend = get_array_and_backend(dense_matrix) + impl = backend.impl + device = dense_matrix.device + domain = odl.tensor_space((2, 2, 3), impl=impl, device=device) mat_op = MatrixOperator(dense_matrix, domain, axis=2) x = noise_element(mat_op.domain) m_x = mat_op(x) @@ -733,10 +919,11 @@ def test_matrix_op_inverse(): assert all_almost_equal(x, minv_m_x) -def test_sampling_operator_adjoint(): +def test_sampling_operator_adjoint(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Validate basic properties of `SamplingOperator.adjoint`.""" # 1d space - space = odl.uniform_discr([-1], [1], shape=(3)) + space = odl.uniform_discr([-1], [1], shape=(3), impl=impl, device=device) sampling_points = [[0, 1, 1, 0]] x = space.element([1, 2, 3]) op = odl.SamplingOperator(space, sampling_points) @@ -746,7 +933,7 @@ def test_sampling_operator_adjoint(): assert op.adjoint(op(x)).inner(x) == pytest.approx(op(x).inner(op(x))) # 2d space - space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) + space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3), impl=impl, device=device) x = space.element([[1, 2, 3], [4, 5, 6]]) sampling_points = [[0, 1, 1, 0], @@ -761,4 +948,4 @@ def test_sampling_operator_adjoint(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/set/domain_test.py b/odl/test/core/set/domain_test.py similarity index 98% rename from odl/test/set/domain_test.py rename to odl/test/core/set/domain_test.py index 8de3cf81ab4..fd02f868432 100644 --- a/odl/test/set/domain_test.py +++ b/odl/test/core/set/domain_test.py @@ -12,9 +12,9 @@ import pytest import odl -from odl.discr.grid import sparse_meshgrid -from odl.set.domain import IntervalProd -from odl.util.testutils import all_equal +from odl.core.discr.grid import sparse_meshgrid +from odl.core.set.domain import IntervalProd +from odl.core.util.testutils import all_equal def random_point(set_): @@ -426,4 +426,4 @@ def test_rectangle_area(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/set/sets_test.py b/odl/test/core/set/sets_test.py similarity index 96% rename from odl/test/set/sets_test.py rename to odl/test/core/set/sets_test.py index a8b1288d476..0a58d9b4fe2 100644 --- a/odl/test/set/sets_test.py +++ b/odl/test/core/set/sets_test.py @@ -10,7 +10,7 @@ import pytest import odl -from odl.set.sets import (EmptySet, UniversalSet, Strings, ComplexNumbers, +from odl.core.set.sets import (EmptySet, UniversalSet, Strings, ComplexNumbers, RealNumbers, Integers) @@ -193,4 +193,4 @@ def test_integers(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/set/space_test.py b/odl/test/core/set/space_test.py similarity index 86% rename from odl/test/set/space_test.py rename to odl/test/core/set/space_test.py index 2356c2918b4..4519b41b807 100644 --- a/odl/test/set/space_test.py +++ b/odl/test/core/set/space_test.py @@ -9,7 +9,7 @@ from __future__ import division import pytest import odl -from odl.util.testutils import simple_fixture, noise_element +from odl.core.util.testutils import simple_fixture, noise_element # --- pytest fixtures --- # @@ -65,15 +65,15 @@ def test_comparsion(linear_space): x = noise_element(linear_space) y = noise_element(linear_space) - with pytest.raises(TypeError): - x <= y - with pytest.raises(TypeError): - x < y - with pytest.raises(TypeError): - x >= y - with pytest.raises(TypeError): - x > y + + x <= y + + x < y + + x >= y + + x > y if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/space/pspace_test.py b/odl/test/core/space/pspace_test.py similarity index 67% rename from odl/test/space/pspace_test.py rename to odl/test/core/space/pspace_test.py index daecb89b7d7..f02ee6fba52 100644 --- a/odl/test/space/pspace_test.py +++ b/odl/test/core/space/pspace_test.py @@ -12,9 +12,10 @@ import operator import odl -from odl.util.testutils import ( +from odl.core.set.sets import ComplexNumbers, RealNumbers +from odl.core.util.testutils import ( all_equal, all_almost_equal, noise_elements, noise_element, simple_fixture) - +from odl.core.array_API_support.utils import get_array_and_backend exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 0.5, 1.5]) @@ -27,14 +28,14 @@ @pytest.fixture(scope="module", ids=space_ids, params=space_params) -def space(request): +def space(request, odl_impl_device_pairs): name = request.param.strip() - + impl, device = odl_impl_device_pairs if name == 'product_space': - space = odl.ProductSpace(odl.uniform_discr(0, 1, 3, dtype=complex), - odl.cn(2)) + space = odl.ProductSpace(odl.cn(3, impl=impl, device=device), + odl.cn(2, impl=impl, device=device)) elif name == 'power_space': - space = odl.ProductSpace(odl.uniform_discr(0, 1, 3, dtype=complex), 2) + space = odl.ProductSpace(odl.cn(3, impl=impl, device=device), 2) else: raise ValueError('undefined space') @@ -86,8 +87,9 @@ def test_emptyproduct(): spc[0] -def test_RxR(): - H = odl.rn(2) +def test_RxR(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) HxH = odl.ProductSpace(H, H) # Check the basic properties @@ -111,8 +113,9 @@ def test_RxR(): assert all_equal([v1, v2], u) -def test_equals_space(exponent): - r2 = odl.rn(2) +def test_equals_space(odl_impl_device_pairs, exponent): + impl, device = odl_impl_device_pairs + r2 = odl.rn(2, impl=impl, device=device) r2x3_1 = odl.ProductSpace(r2, 3, exponent=exponent) r2x3_2 = odl.ProductSpace(r2, 3, exponent=exponent) r2x4 = odl.ProductSpace(r2, 4, exponent=exponent) @@ -127,8 +130,9 @@ def test_equals_space(exponent): assert hash(r2x3_1) != hash(r2x4) -def test_equals_vec(exponent): - r2 = odl.rn(2) +def test_equals_vec(odl_impl_device_pairs, exponent): + impl, device = odl_impl_device_pairs + r2 = odl.rn(2, impl=impl, device=device) r2x3 = odl.ProductSpace(r2, 3, exponent=exponent) r2x4 = odl.ProductSpace(r2, 4, exponent=exponent) @@ -146,8 +150,9 @@ def test_equals_vec(exponent): assert x1 != z -def test_is_power_space(): - r2 = odl.rn(2) +def test_is_power_space(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r2 = odl.rn(2, impl=impl, device=device) r2x3 = odl.ProductSpace(r2, 3) assert len(r2x3) == 3 assert r2x3.is_power_space @@ -159,10 +164,11 @@ def test_is_power_space(): assert r2x3 == r2r2r2 -def test_mixed_space(): +def test_mixed_space(odl_impl_device_pairs): """Verify that a mixed productspace is handled properly.""" - r2_1 = odl.rn(2, dtype='float64') - r2_2 = odl.rn(2, dtype='float32') + impl, device = odl_impl_device_pairs + r2_1 = odl.rn(2, dtype='float64', impl=impl, device=device) + r2_2 = odl.rn(2, dtype='float32', impl=impl, device=device) pspace = odl.ProductSpace(r2_1, r2_2) assert not pspace.is_power_space @@ -176,15 +182,17 @@ def test_mixed_space(): pspace.dtype -def test_element(): - H = odl.rn(2) +def test_element(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) HxH = odl.ProductSpace(H, H) HxH.element([[1, 2], [3, 4]]) # wrong length with pytest.raises(ValueError): - HxH.element([[1, 2]]) + # The user tries to input a list of length 1. This would be broadcasted to all parts of the space if cast is True. Hence we need to explicitely set it to False if the strict semantics are desired. + HxH.element([[1, 2]], cast = False) with pytest.raises(ValueError): HxH.element([[1, 2], [3, 4], [5, 6]]) @@ -197,8 +205,9 @@ def test_element(): HxH.element([[1, 2], [3, 4, 5]]) -def test_lincomb(): - H = odl.rn(2) +def test_lincomb(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) HxH = odl.ProductSpace(H, H) v1 = H.element([1, 2]) @@ -219,8 +228,9 @@ def test_lincomb(): assert all_almost_equal(z, expected) -def test_multiply(): - H = odl.rn(2) +def test_multiply(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) HxH = odl.ProductSpace(H, H) v1 = H.element([1, 2]) @@ -233,13 +243,18 @@ def test_multiply(): z = HxH.element() expected = [v1 * u1, v2 * u2] - HxH.multiply(v, u, out=z) + z = v * u + + assert all_almost_equal(z, expected) + odl.multiply(v, u, out=z) assert all_almost_equal(z, expected) -def test_metric(): - H = odl.rn(2) + +def test_metric(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) v11 = H.element([1, 2]) v12 = H.element([5, 3]) @@ -270,7 +285,9 @@ def test_metric(): pytest.approx(max(H.dist(v11, v21), H.dist(v12, v22)))) -def test_norm(): +def test_norm(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) H = odl.rn(2) v1 = H.element([1, 2]) v2 = H.element([5, 3]) @@ -292,8 +309,9 @@ def test_norm(): assert HxH.norm(w) == pytest.approx(max(H.norm(v1), H.norm(v2))) -def test_inner(): - H = odl.rn(2) +def test_inner(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) v1 = H.element([1, 2]) v2 = H.element([5, 3]) @@ -306,13 +324,14 @@ def test_inner(): assert HxH.inner(v, u) == pytest.approx(H.inner(v1, u1) + H.inner(v2, u2)) -def test_vector_weighting(exponent): - r2 = odl.rn(2) +def test_vector_weighting(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r2 = odl.rn(2, impl=impl, device=device) r2x = r2.element([1, -1]) r2y = r2.element([-2, 3]) # inner = -5, dist = 5, norms = (sqrt(2), sqrt(13)) - r3 = odl.rn(3) + r3 = odl.rn(3, impl=impl, device=device) r3x = r3.element([3, 4, 4]) r3y = r3.element([1, -2, 1]) # inner = -1, dist = 7, norms = (sqrt(41), sqrt(6)) @@ -350,13 +369,14 @@ def test_vector_weighting(exponent): assert all_almost_equal(x.dist(y), true_dist) -def test_const_weighting(exponent): - r2 = odl.rn(2) +def test_const_weighting(exponent, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r2 = odl.rn(2, impl=impl, device=device) r2x = r2.element([1, -1]) r2y = r2.element([-2, 3]) # inner = -5, dist = 5, norms = (sqrt(2), sqrt(13)) - r3 = odl.rn(3) + r3 = odl.rn(3, impl=impl, device=device) r3x = r3.element([3, 4, 4]) r3y = r3.element([1, -2, 1]) # inner = -1, dist = 7, norms = (sqrt(41), sqrt(6)) @@ -394,7 +414,7 @@ def test_const_weighting(exponent): def custom_inner(x1, x2): inners = np.fromiter( (x1p.inner(x2p) for x1p, x2p in zip(x1.parts, x2.parts)), - dtype=x1.space[0].dtype, count=len(x1)) + dtype=x1.space[0].dtype_identifier, count=len(x1)) return x1.space.field.element(np.sum(inners)) @@ -402,7 +422,7 @@ def custom_inner(x1, x2): def custom_norm(x): norms = np.fromiter( (xp.norm() for xp in x.parts), - dtype=x.space[0].dtype, count=len(x)) + dtype=x.space[0].dtype_identifier, count=len(x)) return float(np.linalg.norm(norms, ord=1)) @@ -410,21 +430,23 @@ def custom_norm(x): def custom_dist(x1, x2): dists = np.fromiter( (x1p.dist(x2p) for x1p, x2p in zip(x1.parts, x2.parts)), - dtype=x1.space[0].dtype, count=len(x1)) + dtype=x1.space[0].dtype_identifier, count=len(x1)) return float(np.linalg.norm(dists, ord=1)) -def test_custom_funcs(): +def test_custom_funcs(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r2 = odl.rn(2, impl=impl, device=device) # Checking the standard 1-norm and standard inner product, just to # see that the functions are handled correctly. - r2 = odl.rn(2) + r2 = odl.rn(2, impl=impl, device=device) r2x = r2.element([1, -1]) r2y = r2.element([-2, 3]) # inner = -5, dist = 5, norms = (sqrt(2), sqrt(13)) - r3 = odl.rn(3) + r3 = odl.rn(3, impl=impl, device=device) r3x = r3.element([3, 4, 4]) r3y = r3.element([1, -2, 1]) # inner = -1, dist = 7, norms = (sqrt(41), sqrt(6)) @@ -476,8 +498,9 @@ def test_custom_funcs(): odl.ProductSpace(r2, r3, inner=custom_inner, weighting=2.0) -def test_power_RxR(): - H = odl.rn(2) +def test_power_RxR(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) HxH = odl.ProductSpace(H, 2) assert len(HxH) == 2 @@ -508,10 +531,11 @@ def _test_shape(space, expected_shape): assert len(space_el) == expected_shape[0] -def test_power_shape(): +def test_power_shape(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r2 = odl.rn(2, impl=impl, device=device) + r3 = odl.rn(3, impl=impl, device=device) """Check if shape and size are correct for higher-order power spaces.""" - r2 = odl.rn(2) - r3 = odl.rn(3) empty = odl.ProductSpace(field=odl.RealNumbers()) empty2 = odl.ProductSpace(r2, 0) @@ -531,8 +555,9 @@ def test_power_shape(): _test_shape(r2xr3_5_4, (5, 4, 2)) -def test_power_lincomb(): - H = odl.rn(2) +def test_power_lincomb(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) HxH = odl.ProductSpace(H, 2) v1 = H.element([1, 2]) @@ -553,8 +578,9 @@ def test_power_lincomb(): assert all_almost_equal(z, expected) -def test_power_in_place_modify(): - H = odl.rn(2) +def test_power_in_place_modify(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.rn(2, impl=impl, device=device) HxH = odl.ProductSpace(H, 2) v1 = H.element([1, 2]) @@ -577,9 +603,10 @@ def test_power_in_place_modify(): assert all_almost_equal(z, [z1, z2]) -def test_getitem_single(): - r1 = odl.rn(1) - r2 = odl.rn(2) +def test_getitem_single(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r1 = odl.rn(1, impl=impl, device=device) + r2 = odl.rn(2, impl=impl, device=device) H = odl.ProductSpace(r1, r2) assert H[-2] is r1 @@ -595,10 +622,11 @@ def test_getitem_single(): H[0, 1] -def test_getitem_slice(): - r1 = odl.rn(1) - r2 = odl.rn(2) - r3 = odl.rn(3) +def test_getitem_slice(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r1 = odl.rn(1, impl=impl, device=device) + r2 = odl.rn(2, impl=impl, device=device) + r3 = odl.rn(3, impl=impl, device=device) H = odl.ProductSpace(r1, r2, r3) assert H[:2] == odl.ProductSpace(r1, r2) @@ -608,10 +636,11 @@ def test_getitem_slice(): assert H[3:] == odl.ProductSpace(field=r1.field) -def test_getitem_fancy(): - r1 = odl.rn(1) - r2 = odl.rn(2) - r3 = odl.rn(3) +def test_getitem_fancy(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + r1 = odl.rn(1, impl=impl, device=device) + r2 = odl.rn(2, impl=impl, device=device) + r3 = odl.rn(3, impl=impl, device=device) H = odl.ProductSpace(r1, r2, r3) assert H[[0, 2]] == odl.ProductSpace(r1, r3) @@ -619,8 +648,12 @@ def test_getitem_fancy(): assert H[[0, 2]][1] is r3 -def test_element_equals(): - H = odl.ProductSpace(odl.rn(1), odl.rn(2)) +def test_element_equals(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device) + ) x = H.element([[0], [1, 2]]) assert x != 0 # test == not always true @@ -636,9 +669,13 @@ def test_element_equals(): assert x != x_4 -def test_element_getitem_int(): +def test_element_getitem_int(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test indexing of product space elements with one or several integers.""" - pspace = odl.ProductSpace(odl.rn(1), odl.rn(2)) + pspace = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device) + ) # One level of product space x0 = pspace[0].element([0]) @@ -663,10 +700,15 @@ def test_element_getitem_int(): assert z[1, 1, 1] == 2 -def test_element_getitem_slice(): +def test_element_getitem_slice(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test indexing of product space elements with slices.""" # One level of product space - pspace = odl.ProductSpace(odl.rn(1), odl.rn(2), odl.rn(3)) + pspace = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device), + odl.rn(3, impl=impl, device=device) + ) x0 = pspace[0].element([0]) x1 = pspace[1].element([1, 2]) @@ -678,8 +720,13 @@ def test_element_getitem_slice(): assert x[:2][1] is x1 -def test_element_getitem_fancy(): - pspace = odl.ProductSpace(odl.rn(1), odl.rn(2), odl.rn(3)) +def test_element_getitem_fancy(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + pspace = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device), + odl.rn(3, impl=impl, device=device) + ) x0 = pspace[0].element([0]) x1 = pspace[1].element([1, 2]) @@ -691,9 +738,13 @@ def test_element_getitem_fancy(): assert x[[0, 2]][1] is x2 -def test_element_getitem_multi(): +def test_element_getitem_multi(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test element access with multiple indices.""" - pspace = odl.ProductSpace(odl.rn(1), odl.rn(2)) + pspace = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device) + ) pspace2 = odl.ProductSpace(pspace, 3) pspace3 = odl.ProductSpace(pspace2, 2) z = pspace3.element( @@ -728,9 +779,13 @@ def test_element_getitem_multi(): [8]]]) -def test_element_setitem_single(): +def test_element_setitem_single(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test assignment of pspace parts with single indices.""" - pspace = odl.ProductSpace(odl.rn(1), odl.rn(2)) + pspace = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device), + ) x0 = pspace[0].element([0]) x1 = pspace[1].element([1, 2]) @@ -761,9 +816,14 @@ def test_element_setitem_single(): x[2] = x0 -def test_element_setitem_slice(): +def test_element_setitem_slice(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test assignment of pspace parts with slices.""" - pspace = odl.ProductSpace(odl.rn(1), odl.rn(2), odl.rn(3)) + pspace = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device), + odl.rn(3, impl=impl, device=device), + ) x0 = pspace[0].element([0]) x1 = pspace[1].element([1, 2]) @@ -789,9 +849,14 @@ def test_element_setitem_slice(): assert all_equal(x[:2][1], [-2, -2]) -def test_element_setitem_fancy(): +def test_element_setitem_fancy(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test assignment of pspace parts with lists.""" - pspace = odl.ProductSpace(odl.rn(1), odl.rn(2), odl.rn(3)) + pspace = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device), + odl.rn(3, impl=impl, device=device), + ) x0 = pspace[0].element([0]) x1 = pspace[1].element([1, 2]) @@ -817,9 +882,12 @@ def test_element_setitem_fancy(): assert all_equal(x[[0, 2]][1], [-2, -2, -2]) -def test_element_setitem_broadcast(): +def test_element_setitem_broadcast(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Test assignment of power space parts with broadcasting.""" - pspace = odl.ProductSpace(odl.rn(2), 3) + pspace = odl.ProductSpace( + odl.rn(2, impl=impl, device=device), + 3) x0 = pspace[0].element([0, 1]) x1 = pspace[1].element([2, 3]) x2 = pspace[2].element([4, 5]) @@ -835,27 +903,27 @@ def test_element_setitem_broadcast(): assert x[1] is old_x1 assert x[1] == new_x0 - -def test_unary_ops(): +def test_unary_ops(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Verify that the unary operators (`+x` and `-x`) work as expected - - space = odl.rn(3) + space = odl.rn(3, impl=impl, device=device) pspace = odl.ProductSpace(space, 2) for op in [operator.pos, operator.neg]: x_arr, x = noise_elements(pspace) - y_arr = op(x_arr) + y_arr = [op(x_) for x_ in x_arr] y = op(x) assert all_almost_equal([x, y], [x_arr, y_arr]) - -def test_operators(odl_arithmetic_op): +def test_operators(odl_arithmetic_op, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Test of the operators `+`, `-`, etc work as expected by numpy + op = odl_arithmetic_op - space = odl.rn(3) + space = odl.rn(3, impl=impl, device=device) pspace = odl.ProductSpace(space, 2) # Interactions with scalars @@ -869,7 +937,7 @@ def test_operators(odl_arithmetic_op): with pytest.raises(ZeroDivisionError): y = op(x, scalar) else: - y_arr = op(x_arr, scalar) + y_arr = [op(x_, scalar) for x_ in x_arr] y = op(x, scalar) assert all_almost_equal([x, y], [x_arr, y_arr]) @@ -877,7 +945,7 @@ def test_operators(odl_arithmetic_op): # Right op x_arr, x = noise_elements(pspace) - y_arr = op(scalar, x_arr) + y_arr = [op(scalar, x_) for x_ in x_arr] y = op(scalar, x) assert all_almost_equal([x, y], [x_arr, y_arr]) @@ -892,39 +960,42 @@ def test_operators(odl_arithmetic_op): with pytest.raises(TypeError): z = op(x, y) else: - z_arr = op(x_arr, y_arr) + z_arr = [op(x_arr, y_) for y_ in y_arr] z = op(x, y) assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr]) # non-aliased right - z_arr = op(y_arr, x_arr) + z_arr = [op(y_, x_arr) for y_ in y_arr] z = op(y, x) - assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr]) # aliased operation - z_arr = op(y_arr, y_arr) + z_arr = [op(y_arr[i], y_arr[i]) for i in range(len(y_arr))] z = op(y, y) assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr]) -def test_ufuncs(): +def test_ufuncs(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs # Cannot use fixture due to bug in pytest - H = odl.ProductSpace(odl.rn(1), odl.rn(2)) + H = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device), + ) # one arg x = H.element([[-1], [-2, -3]]) - z = x.ufuncs.absolute() + z = odl.abs(x) assert all_almost_equal(z, [[1], [2, 3]]) # one arg with out x = H.element([[-1], [-2, -3]]) y = H.element() - z = x.ufuncs.absolute(out=y) + z = odl.abs(x, out=y) assert y is z assert all_almost_equal(z, [[1], [2, 3]]) @@ -933,7 +1004,7 @@ def test_ufuncs(): y = H.element([[4], [5, 6]]) w = H.element() - z = x.ufuncs.add(y) + z = odl.add(x, y) assert all_almost_equal(z, [[5], [7, 9]]) # Two args with out @@ -941,43 +1012,46 @@ def test_ufuncs(): y = H.element([[4], [5, 6]]) w = H.element() - z = x.ufuncs.add(y, out=w) + z = odl.add(x, y, out=w) assert w is z assert all_almost_equal(z, [[5], [7, 9]]) -def test_reductions(): - H = odl.ProductSpace(odl.rn(1), odl.rn(2)) +def test_reductions(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + H = odl.ProductSpace( + odl.rn(1, impl=impl, device=device), + odl.rn(2, impl=impl, device=device), + ) x = H.element([[1], [2, 3]]) - assert x.ufuncs.sum() == 6.0 - assert x.ufuncs.prod() == 6.0 - assert x.ufuncs.min() == 1.0 - assert x.ufuncs.max() == 3.0 - + assert odl.sum(x) == 6.0 + assert odl.prod(x) == 6.0 + assert odl.min(x) == 1.0 + assert odl.max(x) == 3.0 -def test_np_reductions(): - """Check that reductions via NumPy functions work.""" - H = odl.ProductSpace(odl.rn(2), 3) - x = 2 * H.one() - assert np.sum(x) == 2 * 6 - assert np.prod(x) == 2 ** 6 -def test_array_wrap_method(): +def test_array_wrap_method(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Verify that the __array_wrap__ method for NumPy works.""" - space = odl.ProductSpace(odl.uniform_discr(0, 1, 10), 2) + sub_space = odl.rn(10, impl=impl, device=device) + space = odl.ProductSpace(sub_space, 2) x_arr, x = noise_elements(space) - y_arr = np.sin(x_arr) - y = np.sin(x) # Should yield again an ODL product space element + + y_arr = [sub_space.array_namespace.sin(sub_part) for sub_part in x_arr] + y = odl.sin(x) # Should yield again an ODL product space element assert y in space assert all_equal(y, y_arr) -def test_real_imag_and_conj(): +def test_real_imag_and_conj(odl_impl_device_pairs): + impl, device = odl_impl_device_pairs """Verify that .real .imag and .conj() work for product space elements.""" - space = odl.ProductSpace(odl.uniform_discr(0, 1, 3, dtype=complex), - odl.cn(2)) + space = odl.ProductSpace( + odl.cn(3, impl=impl, device=device), + odl.cn(2, impl=impl, device=device) + ) x = noise_element(space) # Test real @@ -996,72 +1070,81 @@ def test_real_imag_and_conj(): assert x_conj[1] == expected_result[1] -def test_real_setter_product_space(space, newpart): - """Verify that the setter for the real part of an element works. - What setting the real part means depends on the inputs; we perform a - recursive deconstruction to cover the possible cases. - Barring deeply nested products, the recursion will only be shallow - (depth 2 for a simple product space). We limit it to a depth of at - most 4, to avoid that if some bug causes an infinite recursion, - the user would get a cryptic stack-overflow error.""" - - def verify_result(x, expected_result, recursion_limit=4): - if recursion_limit <= 0: - return False - try: - # Catch scalar argument - iter(expected_result) - except TypeError: - return verify_result(x, expected_result * space.one(), - recursion_limit - 1) - if expected_result in space: - return all_equal(x.real, expected_result.real) - elif all_equal(x.real, expected_result): - return True - elif space.is_power_space: - return verify_result(x, [expected_result for _ in space], - recursion_limit - 1) - - x = noise_element(space) - x.real = newpart - - assert x in space - assert(verify_result(x, newpart)) - - return - - -def test_imag_setter_product_space(space, newpart): - """Like test_real_setter_product_space but for imaginary part.""" - - def verify_result(x, expected_result, recursion_limit=4): - if recursion_limit <= 0: - return False - try: - # Catch scalar argument - iter(expected_result) - except TypeError: - return verify_result(x, expected_result * space.one(), - recursion_limit - 1) - if expected_result in space: - # The imaginary part is by definition real, and thus the new - # imaginary part is thus the real part of the element we try to set - # the value to - return all_equal(x.imag, expected_result.real) - elif all_equal(x.imag, expected_result): - return True - elif space.is_power_space: - return verify_result(x, [expected_result for _ in space], - recursion_limit - 1) - - x = noise_element(space) - x.imag = newpart - - assert x in space - assert(verify_result(x, newpart)) - - return +# def test_real_setter_product_space(space, newpart): +# """Verify that the setter for the real part of an element works. +# What setting the real part means depends on the inputs; we perform a +# recursive deconstruction to cover the possible cases. +# Barring deeply nested products, the recursion will only be shallow +# (depth 2 for a simple product space). We limit it to a depth of at +# most 4, to avoid that if some bug causes an infinite recursion, +# the user would get a cryptic stack-overflow error.""" + +# if getattr(newpart, 'space', odl.rn(1)).field == ComplexNumbers(): +# # It is not possible to set a real part to a complex number, skip this case +# return + +# def verify_result(x, expected_result, recursion_limit=4): +# if recursion_limit <= 0: +# return False +# try: +# # Catch scalar argument +# iter(expected_result) +# except TypeError: +# return verify_result(x, expected_result * space.one(), +# recursion_limit - 1) +# if expected_result in space: +# return all_equal(x.real, expected_result.real) +# elif all_equal(x.real, expected_result): +# return True +# elif space.is_power_space: +# return verify_result(x, [expected_result for _ in space], +# recursion_limit - 1) + +# x = noise_element(space) +# x.real = newpart + +# assert x in space +# assert(verify_result(x, newpart)) + +# return + + +# def test_imag_setter_product_space(space, newpart): +# """Like test_real_setter_product_space but for imaginary part.""" + +# if getattr(newpart, 'space', odl.rn(1)).field == ComplexNumbers(): +# # The imaginary part is itself a real quantity, and +# # cannot be set to a complex value. Skip test. +# return + +# def verify_result(x, expected_result, recursion_limit=4): +# if recursion_limit <= 0: +# return False +# try: +# # Catch scalar argument +# iter(expected_result) +# except TypeError: +# return verify_result(x, expected_result * space.one(), +# recursion_limit - 1) +# if expected_result in space: +# # The imaginary part is by definition real, and thus the new +# # imaginary part is thus the real part of the element we try to set +# # the value to +# return all_equal(x.imag, expected_result.real) +# elif all_equal(x.imag, expected_result): +# return True +# elif space.is_power_space: +# return verify_result(x, [expected_result for _ in space], +# recursion_limit - 1) + +# x = noise_element(space) +# x.imag = newpart + +# assert x in space +# assert(verify_result(x, newpart)) + +# return if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/core/space/space_utils_test.py b/odl/test/core/space/space_utils_test.py new file mode 100644 index 00000000000..20aa909fc43 --- /dev/null +++ b/odl/test/core/space/space_utils_test.py @@ -0,0 +1,85 @@ +# Copyright 2014-2019 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +from __future__ import division + +import odl +from odl import vector +from odl.core.space.entry_points import TENSOR_SPACE_IMPLS +from odl.core.util.testutils import all_equal, default_precision_dict +import pytest + +error_dict = { + 'pytorch' : TypeError, + 'numpy' : ValueError +} + +def test_vector_numpy(odl_impl_device_pairs): + + impl, device = odl_impl_device_pairs + tspace = TENSOR_SPACE_IMPLS[impl]((0)) + tspace_element_type = tspace.element_type + + inp = [[1.0, 2.0, 3.0], + [4.0, 5.0, 6.0]] + + x = vector(inp, impl=impl, device=device) + + assert isinstance(x, tspace_element_type) + assert x.dtype_identifier == default_precision_dict[impl]['float'] + assert all_equal(x, inp) + + x = vector([1.0, 2.0, float('inf')], impl=impl, device=device) + assert x.dtype_identifier == default_precision_dict[impl]['float'] + assert isinstance(x, tspace_element_type) + + x = vector([1.0, 2.0, float('nan')], impl=impl, device=device) + assert x.dtype_identifier == default_precision_dict[impl]['float'] + assert isinstance(x, tspace_element_type) + + x = vector([1, 2, 3], dtype='float32', impl=impl, device=device) + assert x.dtype_identifier == 'float32' + assert isinstance(x, tspace_element_type) + + # Cn + inp = [[1 + 1j, 2, 3 - 2j], + [4 + 1j, 5, 6 - 1j]] + + x = vector(inp, impl=impl, device=device) + assert isinstance(x, tspace_element_type) + assert x.dtype_identifier == default_precision_dict[impl]['complex'] + assert all_equal(x, inp) + + x = vector([1, 2, 3], dtype='complex64', impl=impl, device=device) + assert isinstance(x, tspace_element_type) + + # Generic TensorSpace + inp = [1, 2, 3] + x = vector(inp,impl=impl, device=device) + assert isinstance(x, tspace_element_type) + assert x.dtype_identifier == 'int64' + assert all_equal(x, inp) + + inp = ['a', 'b', 'c'] + with pytest.raises(ValueError): + x = vector(inp ,impl=impl, device=device) + + inp = [1, 2, 'inf'] + with pytest.raises(error_dict[impl]): + x = vector(inp,impl=impl, device=device) + + # Scalar or empty input + x = vector(5.0 ,impl=impl, device=device) # becomes 1d, size 1 + assert x.shape == () + + x = vector([]) # becomes 1d, size 0 + assert x.shape == (0,) + + +if __name__ == '__main__': + odl.core.util.test_file(__file__) diff --git a/odl/test/core/space/tensors_test.py b/odl/test/core/space/tensors_test.py new file mode 100644 index 00000000000..b3291ee7135 --- /dev/null +++ b/odl/test/core/space/tensors_test.py @@ -0,0 +1,1336 @@ +# Copyright 2014-2020 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Unit tests for Numpy-based tensors.""" + +from __future__ import division + +import operator +import math +import pytest + +import odl +from odl.core.set.space import LinearSpaceTypeError +from odl.core.space.entry_points import TENSOR_SPACE_IMPLS +from odl.core.util.testutils import ( + all_almost_equal, all_equal, noise_array, noise_element, noise_elements, + isclose, simple_fixture) +from odl.core.array_API_support import lookup_array_backend +from odl.core.util.pytest_config import IMPL_DEVICE_PAIRS + +from odl.core.util.dtype_utils import is_complex_dtype + +# --- Test helpers --- # + +# Functions to return arrays and classes corresponding to impls. Extend +# when a new impl is available. + + +def _pos_array(space): + """Create an array with positive real entries in ``space``.""" + ns = space.array_backend.array_namespace + return ns.abs(noise_array(space)) + 0.1 + +# --- Pytest fixtures --- # + +exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 0.5, 1.5]) + +setitem_indices_params = [ + 0, [1], (1,), (0, 1), (0, 1, 2), slice(None), slice(None, None, 2), + (0, slice(None)), (slice(None), 0, slice(None, None, 2))] +setitem_indices = simple_fixture('indices', setitem_indices_params) + +getitem_indices_params = (setitem_indices_params + + [([0, 1, 1, 0], [0, 1, 1, 2]), (Ellipsis, None)]) +getitem_indices = simple_fixture('indices', getitem_indices_params) + +DEFAULT_SHAPE = (3,4) + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def tspace(request, odl_floating_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_floating_dtype, + impl=impl, + device=device + ) + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def floating_tspace(request, odl_floating_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_floating_dtype, + impl=impl, + device=device + ) + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def real_tspace(request, odl_real_floating_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_real_floating_dtype, + impl=impl, + device=device + ) + +@pytest.fixture(scope='module', params=IMPL_DEVICE_PAIRS) +def scalar_tspace(request, odl_scalar_dtype): + impl, device = request.param + return odl.tensor_space( + shape=DEFAULT_SHAPE, + dtype=odl_scalar_dtype, + impl=impl, + device=device + ) + +# --- Tests --- # +def test_init_tspace(floating_tspace): + shape = floating_tspace.shape + impl = floating_tspace.impl + dtype = floating_tspace.dtype + device = floating_tspace.device + + # Weights + constant_weighting = odl.core.space_weighting( + impl, + weight = 1.5 + ) + array_weighting = odl.core.space_weighting( + impl, + device, + weight = _pos_array(odl.rn( + shape, + impl=impl, dtype=dtype, device=device + ) + )) + + tspace_impl = TENSOR_SPACE_IMPLS[impl] + + for weighting in [constant_weighting, array_weighting, None]: + tspace_impl( + DEFAULT_SHAPE, + dtype=dtype, + device=device, + weighting=weighting + ) + +def test_properties(odl_impl_device_pairs): + """Test that the space and element properties are as expected.""" + impl, device = odl_impl_device_pairs + space = odl.tensor_space(DEFAULT_SHAPE, dtype='float32', exponent=1, weighting=2,impl=impl, device=device) + x = space.element() + + ns = space.array_namespace + assert x.space is space + assert x.ndim == space.ndim == 2 + assert x.dtype == space.dtype == getattr(ns, 'float32') + assert x.size == space.size == 12 + assert x.shape == space.shape == DEFAULT_SHAPE + assert x.itemsize == 4 + assert x.nbytes == 4 * 3 * 4 + assert x.device == device + + +def test_size(odl_tspace_impl, odl_scalar_dtype): + """Test that size handles corner cases appropriately.""" + impl = odl_tspace_impl + space = odl.tensor_space(DEFAULT_SHAPE,dtype=odl_scalar_dtype, impl=impl) + assert space.size == 12 + assert type(space.size) == int + + # Size 0 + space = odl.tensor_space((), impl=impl) + assert space.size == 0 + assert type(space.size) == int + + # Overflow test + large_space = odl.tensor_space((10000,) * 3, impl=impl) + assert large_space.size == 10000 ** 3 + assert type(space.size) == int + +def test_equals_space(tspace): + """Test equality check of spaces.""" + impl = tspace.impl + device = tspace.device + dtype=tspace.dtype + space = odl.tensor_space(3, impl=impl, dtype=dtype, device=device) + same_space = odl.tensor_space(3, impl=impl, dtype=dtype, device=device) + other_space = odl.tensor_space(4, impl=impl, dtype=dtype, device=device) + + assert space == space + assert space == same_space + assert space != other_space + assert hash(space) == hash(same_space) + assert hash(space) != hash(other_space) + + +def test_equals_elem(odl_impl_device_pairs): + """Test equality check of space elements.""" + impl, device = odl_impl_device_pairs + r3 = odl.rn(3, exponent=2, impl=impl, device=device) + r3_1 = odl.rn(3, exponent=1, impl=impl, device=device) + r4 = odl.rn(4, exponent=2, impl=impl, device=device) + r3_elem = r3.element([1, 2, 3]) + r3_same_elem = r3.element([1, 2, 3]) + r3_other_elem = r3.element([2, 2, 3]) + r3_1_elem = r3_1.element([1, 2, 3]) + r4_elem = r4.element([1, 2, 3, 4]) + + assert r3_elem == r3_elem + assert r3_elem == r3_same_elem + assert r3_elem != r3_other_elem + assert r3_elem != r3_1_elem + assert r3_elem != r4_elem + + +def test_tspace_astype(odl_impl_device_pairs): + """Test creation of a space counterpart with new dtype.""" + impl, device = odl_impl_device_pairs + real_space = odl.rn(DEFAULT_SHAPE, impl=impl, device=device) + int_space = odl.tensor_space(DEFAULT_SHAPE, dtype=int, impl=impl, device=device) + assert real_space.astype(int) == int_space + + # Test propagation of weightings and the `[real/complex]_space` properties + real = odl.rn(DEFAULT_SHAPE, weighting=1.5, impl=impl, device=device) + cplx = odl.cn(DEFAULT_SHAPE, weighting=1.5, impl=impl, device=device) + real_s = odl.rn(DEFAULT_SHAPE, weighting=1.5, dtype='float32', impl=impl, device=device) + cplx_s = odl.cn(DEFAULT_SHAPE, weighting=1.5, dtype='complex64', impl=impl, device=device) + + # Real + assert real.astype('float32') == real_s + assert real.astype('float64') is real + assert real.real_space is real + assert real.astype('complex64') == cplx_s + assert real.astype('complex128') == cplx + assert real.complex_space == cplx + + # Complex + assert cplx.astype('complex64') == cplx_s + assert cplx.astype('complex128') is cplx + assert cplx.real_space == real + assert cplx.astype('float32') == real_s + assert cplx.astype('float64') == real + assert cplx.complex_space is cplx + + +def _test_lincomb(space, a, b, discontig): + """Validate lincomb against direct result using arrays.""" + # Set slice for discontiguous arrays and get result space of slicing + # What the actual fuck + if discontig: + slc = tuple( + [slice(None)] * (space.ndim - 1) + [slice(None, None, 2)] + ) + res_space = space.element()[slc].space + else: + res_space = space + + # Unaliased arguments + [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) + if discontig: + x, y, z = x[slc], y[slc], z[slc] + xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] + zarr[:] = a * xarr + b * yarr + res_space.lincomb(a, x, b, y, out=z) + assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) + + # First argument aliased with output + [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) + if discontig: + x, y, z = x[slc], y[slc], z[slc] + xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] + + zarr[:] = a * zarr + b * yarr + res_space.lincomb(a, z, b, y, out=z) + assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) + + # Second argument aliased with output + [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) + if discontig: + x, y, z = x[slc], y[slc], z[slc] + xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] + + zarr[:] = a * xarr + b * zarr + res_space.lincomb(a, x, b, z, out=z) + assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) + + # Both arguments aliased with each other + [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) + if discontig: + x, y, z = x[slc], y[slc], z[slc] + xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] + + zarr[:] = a * xarr + b * xarr + res_space.lincomb(a, x, b, x, out=z) + assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) + + # All aliased + [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) + if discontig: + x, y, z = x[slc], y[slc], z[slc] + xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] + + zarr[:] = a * zarr + b * zarr + res_space.lincomb(a, z, b, z, out=z) + assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) + + +def test_lincomb(tspace): + """Validate lincomb against direct result using arrays and some scalars.""" + scalar_values = [0, 1, -1, 3.41] + for a in scalar_values: + for b in scalar_values: + _test_lincomb(tspace, a, b, discontig=False) + + +def test_lincomb_discontig(odl_impl_device_pairs): + """Test lincomb with discontiguous input.""" + impl, device = odl_impl_device_pairs + + scalar_values = [0, 1, -1, 3.41] + + # Use small size for small array case + tspace = odl.rn(DEFAULT_SHAPE, impl=impl, device=device) + + for a in scalar_values: + for b in scalar_values: + _test_lincomb(tspace, a, b, discontig=True) + + # Use medium size to test fallback impls + tspace = odl.rn((30, 40), impl=impl, device=device) + + for a in scalar_values: + for b in scalar_values: + _test_lincomb(tspace, a, b, discontig=True) + + +def test_lincomb_exceptions(tspace): + """Test whether lincomb raises correctly for bad output element.""" + other_space = odl.rn((4, 3), impl=tspace.impl) + + other_x = other_space.zero() + x, y, z = tspace.zero(), tspace.zero(), tspace.zero() + + with pytest.raises(LinearSpaceTypeError): + tspace.lincomb(1, other_x, 1, y, z) + + with pytest.raises(LinearSpaceTypeError): + tspace.lincomb(1, y, 1, other_x, z) + + with pytest.raises(LinearSpaceTypeError): + tspace.lincomb(1, y, 1, z, other_x) + + with pytest.raises(LinearSpaceTypeError): + tspace.lincomb([], x, 1, y, z) + + with pytest.raises(LinearSpaceTypeError): + tspace.lincomb(1, x, [], y, z) + + +def test_multiply(tspace): + """Test multiply against direct array multiplication.""" + [x_arr, y_arr, out_arr], [x, y, out] = noise_elements(tspace, 3) + out_arr = x_arr * y_arr + + tspace.multiply(x, y, out) + assert all_almost_equal([x_arr, y_arr, out_arr], [x, y, out]) + + # member method + [x_arr, y_arr, out_arr], [x, y, out] = noise_elements(tspace, 3) + out_arr = x_arr * y_arr + + x.multiply(y, out=out) + assert all_almost_equal([x_arr, y_arr, out_arr], [x, y, out]) + + +def test_multiply_exceptions(tspace): + """Test if multiply raises correctly for bad input.""" + other_space = odl.rn((4, 3)) + + other_x = other_space.zero() + x, y = tspace.zero(), tspace.zero() + + with pytest.raises(AssertionError): + tspace.multiply(other_x, x, y) + + with pytest.raises(AssertionError): + tspace.multiply(x, other_x, y) + + with pytest.raises(AssertionError): + tspace.multiply(x, y, other_x) + + +def test_power(tspace): + """Test ``**`` against direct array exponentiation.""" + [x_arr, y_arr], [x, y] = noise_elements(tspace, n=2) + y_pos = tspace.element(odl.abs(y) + 0.1) + ns = tspace.array_namespace + y_pos_arr = ns.abs(y_arr) + 0.1 + + # Testing standard positive integer power out-of-place and in-place + assert all_almost_equal(x ** 2, x_arr ** 2) + y **= 2 + y_arr **= 2 + assert all_almost_equal(y, y_arr) + if tspace.impl == 'pytorch' and is_complex_dtype(tspace.dtype): + pass + else: + # Real number and negative integer power + assert all_almost_equal(y_pos ** 1.3, y_pos_arr ** 1.3) + assert all_almost_equal(y_pos ** (-3), y_pos_arr ** (-3)) + y_pos **= 2.5 + y_pos_arr **= 2.5 + assert all_almost_equal(y_pos, y_pos_arr) + + # Array raised to the power of another array, entry-wise + assert all_almost_equal(y_pos ** x, y_pos_arr ** x_arr) + y_pos **= x.real + y_pos_arr **= x_arr.real + assert all_almost_equal(y_pos, y_pos_arr) + + +def test_unary_ops(tspace): + """Verify that the unary operators (`+x` and `-x`) work as expected.""" + for op in [operator.pos, operator.neg]: + x_arr, x = noise_elements(tspace) + + y_arr = op(x_arr) + y = op(x) + + assert all_almost_equal([x, y], [x_arr, y_arr]) + + +def test_scalar_operator(tspace, odl_arithmetic_op): + """Verify binary operations with scalars. + + Verifies that the statement y = op(x, scalar) gives equivalent results + to NumPy. + """ + op = odl_arithmetic_op + if op in (operator.truediv, operator.itruediv): + ndigits = int(-math.log10(tspace.finfo().resolution) // 2) + else: + ndigits = int(-math.log10(tspace.finfo().resolution)) + + for scalar in [-31.2, -1, 0, 1, 2.13]: + x_arr, x = noise_elements(tspace) + # Left op + if scalar == 0 and op in [operator.truediv, operator.itruediv]: + # Check for correct zero division behaviour + with pytest.raises(ZeroDivisionError): + y = op(x, scalar) + else: + y_arr = op(x_arr, scalar) + y = op(x, scalar) + assert all_almost_equal([x, y], [x_arr, y_arr], ndigits) + + # right op + x_arr, x = noise_elements(tspace) + + y_arr = op(scalar, x_arr) + y = op(scalar, x) + + + assert all_almost_equal([x, y], [x_arr, y_arr], ndigits) + + +def test_binary_operator(tspace, odl_arithmetic_op): + """Verify binary operations with tensors. + + Verifies that the statement z = op(x, y) gives equivalent results + to NumPy. + """ + op = odl_arithmetic_op + if op in (operator.truediv, operator.itruediv): + ndigits = int(-math.log10(tspace.finfo().resolution) // 2) + else: + ndigits = int(-math.log10(tspace.finfo().resolution)) + + [x_arr, y_arr], [x, y] = noise_elements(tspace, 2) + + # non-aliased left + z_arr = op(x_arr, y_arr) + z = op(x, y) + + assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr], ndigits) + + # non-aliased right + z_arr = op(y_arr, x_arr) + z = op(y, x) + + assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr], ndigits) + + # aliased operation + z_arr = op(x_arr, x_arr) + z = op(x, x) + + assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr], ndigits) + + +def test_assign(tspace): + """Test the assign method using ``==`` comparison.""" + x = noise_element(tspace) + x_old = x + y = noise_element(tspace) + + y.assign(x) + + assert y == x + assert y is not x + assert x is x_old + + # test alignment + x *= 2 + assert y != x + + +def test_inner(tspace): + """Test the inner method against numpy.vdot.""" + xarr, xd = noise_elements(tspace) + yarr, yd = noise_elements(tspace) + ns = tspace.array_namespace + # TODO: add weighting + correct_inner = tspace.array_backend.to_cpu( + ns.vdot(yarr.ravel(), xarr.ravel()) + ) + assert tspace.inner(xd, yd) == pytest.approx(correct_inner) + assert xd.inner(yd) == pytest.approx(correct_inner) + + +def test_inner_exceptions(tspace): + """Test if inner raises correctly for bad input.""" + other_space = odl.rn((4, 3)) + other_x = other_space.zero() + x = tspace.zero() + + with pytest.raises(LinearSpaceTypeError): + tspace.inner(other_x, x) + + with pytest.raises(LinearSpaceTypeError): + tspace.inner(x, other_x) + + +def test_norm(tspace): + """Test the norm method against numpy.linalg.norm.""" + xarr, x = noise_elements(tspace) + xarr, x = noise_elements(tspace) + + ns = tspace.array_namespace + correct_norm = tspace.array_backend.to_cpu( + ns.linalg.norm(xarr.ravel()) + ) + + array_backend = tspace.array_backend + real_dtype = array_backend.identifier_of_dtype(tspace.real_dtype) + if real_dtype == "float16": + tolerance = 5e-3 + elif real_dtype == "float32": + tolerance = 5e-7 + elif real_dtype == "float64" or real_dtype == float: + tolerance = 1e-15 + elif real_dtype == "float128": + tolerance = 1e-19 + else: + raise TypeError(f"No known tolerance for dtype {tspace.dtype}") + + assert tspace.norm(x) == pytest.approx(correct_norm, rel=tolerance) + assert x.norm() == pytest.approx(correct_norm, rel=tolerance) + + + correct_norm = ns.linalg.norm(xarr.ravel()) + +def test_norm_exceptions(tspace): + """Test if norm raises correctly for bad input.""" + other_space = odl.rn((4, 3)) + other_x = other_space.zero() + + with pytest.raises(LinearSpaceTypeError): + tspace.norm(other_x) + + +def test_pnorm(exponent, odl_impl_device_pairs): + """Test the norm method with p!=2 against numpy.linalg.norm.""" + impl, device = odl_impl_device_pairs + space_list = [ + odl.rn(DEFAULT_SHAPE, exponent=exponent,device=device, impl=impl), + odl.cn(DEFAULT_SHAPE, exponent=exponent,device=device, impl=impl) + ] + for tspace in space_list: + xarr, x = noise_elements(tspace) + ns = tspace.array_namespace + correct_norm = tspace.array_backend.to_cpu(ns.linalg.norm(xarr.ravel(), ord=exponent)) + + assert tspace.norm(x) == pytest.approx(correct_norm) + assert x.norm() == pytest.approx(correct_norm) + + +def test_dist(tspace): + """Test the dist method against numpy.linalg.norm of the difference.""" + + [xarr, yarr], [x, y] = noise_elements(tspace, n=2) + + [xarr, yarr], [x, y] = noise_elements(tspace, n=2) + + ns = tspace.array_namespace + correct_dist = tspace.array_backend.to_cpu( + ns.linalg.norm((xarr - yarr).ravel()) + ) + + array_backend = tspace.array_backend + real_dtype = array_backend.identifier_of_dtype(tspace.real_dtype) + + if real_dtype == "float16": + tolerance = 5e-3 + elif real_dtype == "float32": + tolerance = 5e-7 + elif real_dtype == "float64" or real_dtype == float: + tolerance = 1e-15 + elif real_dtype == "float128": + tolerance = 1e-19 + + else: + raise TypeError(f"No known tolerance for dtype {tspace.dtype}") + + assert tspace.dist(x, y) == pytest.approx(correct_dist, rel=tolerance) + assert x.dist(y) == pytest.approx(correct_dist, rel=tolerance) + + + +# def test_dist_exceptions(odl_tspace_impl): +# """Test if dist raises correctly for bad input.""" +# for device in AVAILABLE_DEVICES[odl_tspace_impl]: +# tspace = odl.tensor_space(DEFAULT_SHAPE, impl=odl_tspace_impl, device=device) +# other_space = odl.rn((4, 3)) +# other_x = other_space.zero() +# x = tspace.zero() + +# with pytest.raises(LinearSpaceTypeError): +# tspace.dist(other_x, x) + +# with pytest.raises(LinearSpaceTypeError): +# tspace.dist(x, other_x) + + +def test_pdist(odl_impl_device_pairs, exponent): + """Test the dist method with p!=2 against numpy.linalg.norm of diff.""" + impl, device = odl_impl_device_pairs + spaces = [ + odl.rn(DEFAULT_SHAPE, exponent=exponent, impl=impl, device=device), + odl.cn(DEFAULT_SHAPE, exponent=exponent, impl=impl, device=device) + ] + # cls = odl.core.space.entry_points.tensor_space_impl(impl) + + # if complex in cls.available_dtypes: + # spaces.append(odl.cn(DEFAULT_SHAPE, exponent=exponent, impl=impl)) + + for space in spaces: + [xarr, yarr], [x, y] = noise_elements(space, n=2) + ns = space.array_namespace + correct_dist = space.array_backend.to_cpu(ns.linalg.norm((xarr - yarr).ravel(), ord=exponent)) + assert space.dist(x, y) == pytest.approx(correct_dist) + assert x.dist(y) == pytest.approx(correct_dist) + + +def test_element_getitem(odl_impl_device_pairs, getitem_indices): + """Check if getitem produces correct values, shape and other stuff.""" + impl, device = odl_impl_device_pairs + space = odl.tensor_space((2, 3, 4), dtype='float32', exponent=1, + weighting=2, impl=impl, device=device) + x_arr, x = noise_elements(space) + + x_arr_sliced = x_arr[getitem_indices] + sliced_shape = x_arr_sliced.shape + x_sliced = x[getitem_indices] + + if x_arr_sliced.ndim == 0: + try: + assert x_arr_sliced == x_sliced + except IndexError: + assert x_arr_sliced[0] == x_sliced + else: + assert x_sliced.shape == sliced_shape + assert all_equal(x_sliced, x_arr_sliced) + + # Check that the space properties are preserved + sliced_spc = x_sliced.space + assert sliced_spc.shape == sliced_shape + assert sliced_spc.dtype == space.dtype + assert sliced_spc.exponent == space.exponent + assert sliced_spc.weighting == space.weighting + + # Check that we have a view that manipulates the original array + # (or not, depending on indexing style) + x_arr_sliced[:] = 0 + x_sliced[:] = 0 + assert all_equal(x_arr, x) + + +def test_element_setitem(setitem_indices, odl_impl_device_pairs): + """Check if setitem produces the same result as NumPy.""" + impl, device = odl_impl_device_pairs + space = odl.tensor_space((2, 3, 4), dtype='float32', exponent=1, + weighting=2, impl=impl, device=device) + x_arr, x = noise_elements(space) + + + + x_arr_sliced = x_arr[setitem_indices] + sliced_shape = x_arr_sliced.shape + + ns = space.array_namespace + # Setting values with scalars + x_arr[setitem_indices] = 2.3 + x[setitem_indices] = 2.3 + assert all_equal(x, x_arr) + + # Setting values with arrays + rhs_arr = ns.ones(sliced_shape, device=device) + x_arr[setitem_indices] = rhs_arr + x[setitem_indices] = rhs_arr + assert all_equal(x, x_arr) + + # Using a list of lists + rhs_list = (-ns.ones(sliced_shape, device=device)).tolist() + if impl != 'pytorch': + x_arr[setitem_indices] = rhs_list + x[setitem_indices] = rhs_list + assert all_equal(x, x_arr) + + +def test_element_getitem_bool_array(odl_impl_device_pairs): + """Check if getitem with boolean array yields the same result as NumPy.""" + impl, device = odl_impl_device_pairs + space = odl.tensor_space((2, 3, 4), dtype='float32', exponent=1, + weighting=2, impl=impl, device=device) + bool_space = odl.tensor_space((2, 3, 4), dtype=bool, impl=impl, device=device) + x_arr, x = noise_elements(space) + cond_arr, cond = noise_elements(bool_space) + + x_arr_sliced = x_arr[cond_arr] + x_sliced = x[cond] + assert all_equal(x_arr_sliced, x_sliced) + + # Check that the space properties are preserved + sliced_spc = x_sliced.space + assert sliced_spc.shape == x_arr_sliced.shape + assert sliced_spc.dtype == space.dtype + assert sliced_spc.exponent == space.exponent + assert sliced_spc.weighting == space.weighting + + +def test_element_setitem_bool_array(odl_impl_device_pairs): + """Check if setitem produces the same result as NumPy.""" + impl, device = odl_impl_device_pairs + space = odl.tensor_space((2, 3, 4), dtype='float32', exponent=1, + weighting=2, impl=impl, device=device) + bool_space = odl.tensor_space((2, 3, 4), dtype=bool, impl=impl, device=device) + x_arr, x = noise_elements(space) + cond_arr, cond = noise_elements(bool_space) + ns = space.array_namespace + + x_arr_sliced = x_arr[cond_arr] + sliced_shape = x_arr_sliced.shape + + # Setting values with scalars + x_arr[cond_arr] = 2.3 + x[cond] = 2.3 + assert all_equal(x, x_arr) + + # Setting values with arrays + rhs_arr = ns.ones(sliced_shape, device=device) + x_arr[cond_arr] = rhs_arr + x[cond] = rhs_arr + assert all_equal(x, x_arr) + + # Using a list of lists + rhs_list = (-ns.ones(sliced_shape, device=device)).tolist() + if impl == 'pytorch': + cond_arr = bool_space.array_backend.array_constructor(cond_arr, device=device) + rhs_list = bool_space.array_backend.array_constructor(rhs_list, device=device) + else: + x_arr[cond_arr] = rhs_list + x[cond] = rhs_list + assert all_equal(x, x_arr) + + +def test_transpose(odl_impl_device_pairs): + """Test the .T property of tensors against plain inner product.""" + impl, device = odl_impl_device_pairs + spaces = [ + odl.rn(DEFAULT_SHAPE, impl=impl, device=device), + odl.cn(DEFAULT_SHAPE, impl=impl, device=device) + ] + # cls = odl.core.space.entry_points.tensor_space_impl(impl) + # if complex in cls.available_dtypes(): + # spaces.append(odl.cn(DEFAULT_SHAPE, impl=impl)) + + for space in spaces: + x = noise_element(space) + y = noise_element(space) + + # Assert linear operator + assert isinstance(x.T, odl.Operator) + assert x.T.is_linear + + # Check result + assert x.T(y) == pytest.approx(y.inner(x)) + assert all_equal(x.T.adjoint(1.0), x) + + # x.T.T returns self + assert x.T.T == x + +# TODO: SHOULD that be supported??? +def test_multiply_by_scalar(tspace): + """Verify that mult. with NumPy scalars preserves the element type.""" + x = tspace.zero() + + # Simple scalar multiplication, as often performed in user code. + # This invokes the __mul__ and __rmul__ methods of the ODL space classes. + # Strictly speaking this operation loses precision if `tspace.dtype` has + # fewer than 64 bits (Python decimal literals are double precision), but + # it would be too cumbersome to force a change in the space's dtype. + output = x * 1.0 + assert x * 1.0 in tspace + assert 1.0 * x in tspace + + +def test_member_copy(odl_impl_device_pairs): + """Test copy method of elements.""" + impl, device = odl_impl_device_pairs + space = odl.tensor_space(DEFAULT_SHAPE, dtype='float32', exponent=1, weighting=2, impl=impl, device = device) + x = noise_element(space) + + y = x.copy() + assert x == y + assert y is not x + + # Check that result is not aliased + x *= 2 + assert x != y + + +def test_python_copy(odl_impl_device_pairs): + """Test compatibility with the Python copy module.""" + import copy + impl, device = odl_impl_device_pairs + space = odl.tensor_space(DEFAULT_SHAPE, dtype='float32', exponent=1, weighting=2, impl=impl, device = device) + x = noise_element(space) + + # Shallow copy + y = copy.copy(x) + assert x == y + assert y is not x + + # Check that result is not aliased + x *= 2 + assert x != y + + # Deep copy + z = copy.deepcopy(x) + assert x == z + assert z is not x + + # Check that result is not aliased + x *= 2 + assert x != z + +def test_conversion_to_scalar(odl_impl_device_pairs): + """Test conversion of size-1 vectors/tensors to scalars.""" + impl, device = odl_impl_device_pairs + space = odl.rn(1, impl=impl, device=device) + # Size 1 real space + value = 1.5 + element = space.element(value) + + assert int(element) == int(value) + assert float(element) == float(value) + assert complex(element) == complex(value) + + # Size 1 complex space + value = 1.5 + 0.5j + element = odl.cn(1).element(value) + assert complex(element) == complex(value) + + # Size 1 multi-dimensional space + value = 2.1 + element = odl.rn((1, 1, 1)).element(value) + assert float(element) == float(value) + + # Too large space + element = odl.rn(2).one() + + with pytest.raises(AssertionError): + int(element) + with pytest.raises(AssertionError): + float(element) + with pytest.raises(AssertionError): + complex(element) + +def test_bool_conversion(odl_impl_device_pairs): + """Verify that the __bool__ function works.""" + impl, device = odl_impl_device_pairs + space = odl.tensor_space(2, dtype='float32', impl=impl, device = device) + x = space.element([0, 1]) + + with pytest.raises(ValueError): + bool(x) + assert odl.any(x) + assert any(x) + assert not odl.all(x) + assert not all(x) + + space = odl.tensor_space(1, dtype='float32', impl=impl, device = device) + x = space.one() + + assert odl.any(x) + assert any(x) + assert odl.all(x) + assert all(x) + +def test_array_wrap_method(odl_impl_device_pairs): + """Verify that the __array_wrap__ method for NumPy works.""" + impl, device = odl_impl_device_pairs + space = odl.tensor_space(DEFAULT_SHAPE, dtype='float32', exponent=1, weighting=2,impl=impl, device=device) + x_arr, x = noise_elements(space) + y_arr = space.array_namespace.sin(x_arr) + y = odl.sin(x) # Should yield again an ODL tensor + + assert all_equal(y, y_arr) + assert y in space + + +def test_conj(tspace): + """Test complex conjugation of tensors.""" + xarr, x = noise_elements(tspace) + + xconj = x.conj() + assert all_equal(xconj, xarr.conj()) + + y = tspace.element() + xconj = x.conj(out=y) + assert xconj is y + assert all_equal(y, xarr.conj()) + + +# --- Weightings (Numpy) --- # + + +def test_array_weighting_init(real_tspace): + """Test initialization of array weightings.""" + exponent = 2 + array_backend = real_tspace.array_backend + impl = real_tspace.impl + weight_arr = _pos_array(real_tspace) + weight_elem = real_tspace.element(weight_arr) + + weighting_arr = odl.core.space_weighting(impl, device=real_tspace.device, weight=weight_arr, exponent=exponent) + weighting_elem = odl.core.space_weighting(impl, device=real_tspace.device, + weight=weight_elem, exponent=exponent) + + assert isinstance(weighting_arr.weight, array_backend.array_type) + assert isinstance(weighting_elem.weight, array_backend.array_type) + + +def test_array_weighting_array_is_valid(odl_impl_device_pairs): + """Test the is_valid method of array weightings.""" + impl, device = odl_impl_device_pairs + space = odl.rn(DEFAULT_SHAPE, impl=impl, device=device) + weight_arr = _pos_array(space) + + assert odl.core.space_weighting(impl, weight=weight_arr, device=device) + # Invalid + weight_arr[0] = 0 + with pytest.raises(ValueError): + odl.core.space_weighting(impl, weight=weight_arr, device=device) + + +def test_array_weighting_equals(odl_impl_device_pairs): + """Test the equality check method of array weightings.""" + impl, device = odl_impl_device_pairs + space = odl.rn(5, impl=impl, device=device) + weight_arr = _pos_array(space) + weight_elem = space.element(weight_arr) + + weighting_arr = odl.core.space_weighting(impl, weight=weight_arr, device=device) + weighting_arr2 = odl.core.space_weighting(impl, weight=weight_arr, device=device) + weighting_elem = odl.core.space_weighting(impl, weight=weight_elem, device=device) + weighting_elem_copy = odl.core.space_weighting(impl, weight=weight_elem.copy(), device=device) + weighting_elem2 = odl.core.space_weighting(impl, weight=weight_elem, device=device) + weighting_other_arr = odl.core.space_weighting(impl, weight=weight_arr +1 , device=device) + weighting_other_exp = odl.core.space_weighting(impl, weight=weight_arr +1, exponent=1, device=device) + + assert weighting_arr == weighting_arr2 + assert weighting_arr == weighting_elem + assert weighting_arr == weighting_elem_copy + assert weighting_elem == weighting_elem2 + assert weighting_arr != weighting_other_arr + assert weighting_arr != weighting_other_exp + + +def test_array_weighting_equiv(odl_impl_device_pairs): + """Test the equiv method of Numpy array weightings.""" + impl, device = odl_impl_device_pairs + space = odl.rn(5, impl=impl, device=device) + weight_arr = _pos_array(space) + weight_elem = space.element(weight_arr) + different_arr = weight_arr + 1 + w_arr = odl.core.space_weighting(impl, weight=weight_arr, device=device) + w_elem = odl.core.space_weighting(impl, weight=weight_elem, device=device) + w_different_arr = odl.core.space_weighting(impl, weight=different_arr, device=device) + + ns = space.array_namespace + + # Equal -> True + assert w_arr.equiv(w_arr) + assert w_arr.equiv(w_elem) + # Different array -> False + assert not w_arr.equiv(w_different_arr) + + # Test shortcuts in the implementation + const_arr = ns.ones(space.shape, device=device) * 1.5 + w_const_arr = odl.core.space_weighting(impl, weight=const_arr, device=device) + w_const = odl.core.space_weighting(impl, weight=1.5, device=device) + w_wrong_const = odl.core.space_weighting(impl, weight=1, device=device) + w_wrong_exp = odl.core.space_weighting(impl, weight=1.5, exponent=1, device=device) + + assert w_const_arr.equiv(w_const) + assert not w_const_arr.equiv(w_wrong_const) + assert not w_const_arr.equiv(w_wrong_exp) + + # Bogus input + assert not w_const_arr.equiv(True) + assert not w_const_arr.equiv(object) + assert not w_const_arr.equiv(None) + + +def test_array_weighting_inner(tspace): + """Test inner product in a weighted space.""" + [xarr, yarr], [x, y] = noise_elements(tspace, 2) + + weight_arr = _pos_array(tspace) + weighting = odl.core.space_weighting( + impl = tspace.impl, + weight = weight_arr, + device = tspace.device + ) + + ns = tspace.array_namespace + + true_inner = ns.vdot(yarr.ravel(), (xarr * weight_arr).ravel()) + assert weighting.inner(x.data, y.data) == pytest.approx(tspace.array_backend.to_cpu(true_inner)) + + # Exponent != 2 -> no inner product, should raise + with pytest.raises(NotImplementedError): + odl.core.space_weighting(impl = tspace.impl, weight =weight_arr, exponent=1.0, device = tspace.device).inner(x.data, y.data) + + +def test_array_weighting_norm(tspace, exponent): + """Test norm in a weighted space.""" + ns = tspace.array_namespace + rtol = math.sqrt(ns.finfo(tspace.dtype).resolution) + xarr, x = noise_elements(tspace) + + weight_arr = _pos_array(tspace) + weighting = odl.core.space_weighting(impl = tspace.impl, weight=weight_arr, exponent=exponent, device =tspace.device) + + if exponent == float('inf'): + true_norm = ns.linalg.vector_norm( + weight_arr * xarr, + ord=exponent) + else: + true_norm = ns.linalg.norm( + (weight_arr ** (1 / exponent) * xarr).ravel(), + ord=exponent) + + assert weighting.norm(x.data) == pytest.approx( + tspace.array_backend.to_cpu(true_norm), rel=rtol) + + +def test_array_weighting_dist(tspace, exponent): + """Test dist product in a weighted space.""" + ns = tspace.array_namespace + rtol = math.sqrt(ns.finfo(tspace.dtype).resolution) + [xarr, yarr], [x, y] = noise_elements(tspace, n=2) + + weight_arr = _pos_array(tspace) + weighting = odl.core.space_weighting(impl = tspace.impl, weight=weight_arr, exponent=exponent, device=tspace.device) + + if exponent == float('inf'): + true_dist = ns.linalg.norm( + (weight_arr * (xarr - yarr)).ravel(), + ord=float('inf')) + else: + true_dist = ns.linalg.norm( + (weight_arr ** (1 / exponent) * (xarr - yarr)).ravel(), + ord=exponent) + + assert weighting.dist(x.data, y.data) == pytest.approx( + tspace.array_backend.to_cpu(true_dist), rel=rtol) + + +def test_const_weighting_init(odl_impl_device_pairs, exponent): + """Test initialization of constant weightings.""" + impl, device = odl_impl_device_pairs + # Just test if the code runs + odl.core.space_weighting(impl=impl, weight=1.5, exponent=exponent, device=device) + with pytest.raises(ValueError): + odl.core.space_weighting(impl=impl, weight=0, exponent=exponent, device=device) + with pytest.raises(ValueError): + odl.core.space_weighting(impl=impl, weight=-1.5, exponent=exponent, device=device) + with pytest.raises(ValueError): + odl.core.space_weighting(impl=impl, weight=float('inf'), exponent=exponent, device=device) + + +def test_const_weighting_comparison(tspace): + """Test equality to and equivalence with const weightings.""" + odl_tspace_impl = tspace.impl + ns = tspace.array_namespace + constant = 1.5 + + w_const = odl.core.space_weighting(impl=odl_tspace_impl, weight=constant) + w_const2 = odl.core.space_weighting(impl=odl_tspace_impl, weight=constant) + w_other_const = odl.core.space_weighting(impl=odl_tspace_impl, weight=constant+1) + w_other_exp = odl.core.space_weighting(impl=odl_tspace_impl, weight=constant, exponent = 1) + + const_arr = constant * ns.ones(DEFAULT_SHAPE) + + w_const_arr = odl.core.space_weighting(impl=odl_tspace_impl, weight=const_arr) + other_const_arr = (constant + 1) * ns.ones(DEFAULT_SHAPE) + w_other_const_arr = odl.core.space_weighting(impl=odl_tspace_impl, weight=other_const_arr) + + assert w_const == w_const + assert w_const == w_const2 + assert w_const2 == w_const + # Different but equivalent + assert w_const.equiv(w_const_arr) + assert w_const != w_const_arr + + # Not equivalent + assert not w_const.equiv(w_other_exp) + assert w_const != w_other_exp + assert not w_const.equiv(w_other_const) + assert w_const != w_other_const + assert not w_const.equiv(w_other_const_arr) + assert w_const != w_other_const_arr + + # Bogus input + assert not w_const.equiv(True) + assert not w_const.equiv(object) + assert not w_const.equiv(None) + + +def test_const_weighting_inner(tspace): + """Test inner product with const weighting.""" + [xarr, yarr], [x, y] = noise_elements(tspace, 2) + + ns = tspace.array_namespace + + constant = 1.5 + true_result_const = constant * ns.vecdot(yarr.ravel(), xarr.ravel()) + + w_const = odl.core.space_weighting(impl=tspace.impl, weight=constant) + + assert w_const.inner(x, y) == true_result_const + + # Exponent != 2 -> no inner + w_const = odl.core.space_weighting(impl=tspace.impl, weight=constant, exponent=1) + with pytest.raises(NotImplementedError): + w_const.inner(x, y) + + +def test_const_weighting_norm(tspace, exponent): + """Test norm with const weighting.""" + xarr, x = noise_elements(tspace) + + ns = tspace.array_namespace + + constant = 1.5 + if exponent == float('inf'): + factor = constant + else: + factor = constant ** (1 / exponent) + + true_norm = float(factor * ns.linalg.norm(xarr.ravel(), ord=exponent)) + + w_const = odl.core.space_weighting(impl=tspace.impl, weight=constant, exponent=exponent) + + array_backend = tspace.array_backend + real_dtype = array_backend.identifier_of_dtype(tspace.real_dtype) + + if real_dtype == "float16": + tolerance = 5e-2 + elif real_dtype == "float32": + tolerance = 5e-6 + elif real_dtype == "float64" or real_dtype == float: + tolerance = 1e-15 + elif real_dtype == "float128": + tolerance = 1e-19 + else: + raise TypeError(f"No known tolerance for dtype {real_dtype}") + + # assert w_const.norm(x) == pytest.approx(true_norm, rel=tolerance) + assert isclose(w_const.norm(x), true_norm, rtol=tolerance) + + +def test_const_weighting_dist(tspace, exponent): + """Test dist with const weighting.""" + [xarr, yarr], [x, y] = noise_elements(tspace, 2) + + ns = tspace.array_namespace + + constant = 1.5 + if exponent == float('inf'): + factor = constant + else: + factor = constant ** (1 / exponent) + true_dist = float(factor * ns.linalg.norm((xarr - yarr).ravel(), ord=exponent)) + w_const = w_const = odl.core.space_weighting(impl=tspace.impl, weight=constant, exponent=exponent) + + array_backend = tspace.array_backend + real_dtype = array_backend.identifier_of_dtype(tspace.real_dtype) + if real_dtype == "float16": + tolerance = 5e-2 + elif real_dtype == "float32": + tolerance = 5e-7 + elif real_dtype == "float64" or real_dtype == float: + tolerance = 1e-15 + elif real_dtype == "float128": + tolerance = 1e-19 + else: + raise TypeError(f"No known tolerance for dtype {real_dtype}") + + # assert w_const.dist(x, y) == pytest.approx(true_dist, rel=tolerance) + assert isclose(w_const.dist(x,y), true_dist, rtol=tolerance) + + + +def test_custom_inner(tspace): + """Test weighting with a custom inner product.""" + ns = tspace.array_namespace + rtol = math.sqrt(ns.finfo(tspace.dtype).resolution) + + [xarr, yarr], [x, y] = noise_elements(tspace, 2) + + def inner(x, y): + return ns.linalg.vecdot(y.ravel(), x.ravel()) + + def inner_lspacelement(x, y): + return ns.linalg.vecdot(y.data.ravel(), x.data.ravel()) + + def dot(x,y): + return ns.dot(x,y) + + w = odl.core.space_weighting(impl=tspace.impl, inner=inner_lspacelement) + w_same = odl.core.space_weighting(impl=tspace.impl, inner=inner_lspacelement) + w_other = odl.core.space_weighting(impl=tspace.impl, inner=dot) + + assert w == w + assert w == w_same + assert w != w_other + + true_inner = inner(xarr, yarr) + assert isclose(w.inner(x, y), true_inner) + + true_norm = float(ns.linalg.norm(xarr.ravel())) + assert isclose(w.norm(x), true_norm) + + true_dist = float(ns.linalg.norm((xarr - yarr).ravel())) + assert isclose( w.dist(x, y), true_dist, rtol=rtol) + + with pytest.raises(ValueError): + odl.core.space_weighting(impl=tspace.impl, inner=inner, weight = 1) + + +def test_custom_norm(tspace): + """Test weighting with a custom norm.""" + [xarr, yarr], [x, y] = noise_elements(tspace, 2) + ns = tspace.array_namespace + + def norm(x): + return ns.linalg.norm(x) + + def other_norm(x): + return ns.linalg.norm(x, ord=1) + + w = odl.core.space_weighting(impl=tspace.impl, norm=norm) + w_same = odl.core.space_weighting(impl=tspace.impl, norm=norm) + w_other = odl.core.space_weighting(impl=tspace.impl, norm=other_norm) + + assert w == w + assert w == w_same + assert w != w_other + + with pytest.raises(NotImplementedError): + w.inner(x, y) + + true_norm = ns.linalg.norm(xarr.ravel()) + pytest.approx(tspace.norm(x), true_norm) + + true_dist = ns.linalg.norm((xarr - yarr).ravel()) + pytest.approx(tspace.dist(x, y), true_dist) + + with pytest.raises(ValueError): + odl.core.space_weighting(impl=tspace.impl, norm=norm, weight = 1) + + +def test_custom_dist(tspace): + """Test weighting with a custom dist.""" + [xarr, yarr], [x, y] = noise_elements(tspace, 2) + ns = tspace.array_namespace + def dist(x, y): + return ns.linalg.norm(x - y) + + def dist_lspace_element(x, y): + return ns.linalg.norm(x.data - y.data) + + def other_dist(x, y): + return ns.linalg.norm(x - y, ord=1) + + w = odl.core.space_weighting(impl=tspace.impl, dist=dist_lspace_element) + w_same = odl.core.space_weighting(impl=tspace.impl, dist=dist_lspace_element) + w_other = odl.core.space_weighting(impl=tspace.impl, dist=other_dist) + + assert w == w + assert w == w_same + assert w != w_other + + with pytest.raises(NotImplementedError): + w.inner(x, y) + + with pytest.raises(NotImplementedError): + w.norm(x) + + true_dist = ns.linalg.norm((xarr - yarr).ravel()) + pytest.approx(tspace.dist(x, y), true_dist) + + with pytest.raises(ValueError): + odl.core.space_weighting(impl=tspace.impl, dist=dist, weight = 1) + +def test_reduction(tspace): + """Check that the generated docstrings are not empty.""" + ## In Pytorch 2.6, max and min reductions are not implemented for ComplexDouble dtype + # Can randomly raise RuntimeWarning: overflow encountered in reduce + # Can randomly raise AssertionError: assert (nan+8.12708086701316e-308j) == tensor(nan+8.1271e-308j, dtype=torch.complex128) + x = tspace.element() + backend = tspace.array_backend.array_namespace + for name in ['sum', 'prod', 'min', 'max']: + reduction = getattr(odl, name) + reduction_arr = getattr(backend, name) + if name in ['min', 'max'] and is_complex_dtype(tspace.dtype) and tspace.impl == 'pytorch': + with pytest.raises(RuntimeError): + assert reduction(x) == reduction_arr(x.data) + else: + assert reduction(x) == reduction_arr(x.data) + + +if __name__ == '__main__': + odl.core.util.test_file(__file__) + diff --git a/odl/test/largescale/solvers/nonsmooth/default_functionals_slow_test.py b/odl/test/largescale/solvers/nonsmooth/default_functionals_slow_test.py index 865925cbd40..a0fc037916a 100644 --- a/odl/test/largescale/solvers/nonsmooth/default_functionals_slow_test.py +++ b/odl/test/largescale/solvers/nonsmooth/default_functionals_slow_test.py @@ -15,8 +15,8 @@ import scipy.special import odl -from odl.solvers.functional.functional import FunctionalDefaultConvexConjugate -from odl.util.testutils import all_almost_equal, noise_element, simple_fixture +from odl.functional.functional import FunctionalDefaultConvexConjugate +from odl.core.util.testutils import all_almost_equal, noise_element, simple_fixture # --- pytest fixtures --- # @@ -40,46 +40,46 @@ @pytest.fixture(scope="module", ids=func_ids, params=func_params) -def functional(request, linear_offset, quadratic_offset, dual): +def functional(request, linear_offset, quadratic_offset, dual, odl_impl_device_pairs): """Return functional whose proximal should be tested.""" name = request.param.strip() - - space = odl.uniform_discr(0, 1, 2) + impl, device = odl_impl_device_pairs + space = odl.uniform_discr(0, 1, 2, impl=impl, device=device) if name == 'l1': - func = odl.solvers.L1Norm(space) + func = odl.functional.L1Norm(space) elif name == 'l2': - func = odl.solvers.L2Norm(space) + func = odl.functional.L2Norm(space) elif name == 'l2^2': - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) elif name == 'kl': - func = odl.solvers.KullbackLeibler(space) + func = odl.functional.KullbackLeibler(space) elif name == 'kl_cross_ent': - func = odl.solvers.KullbackLeiblerCrossEntropy(space) + func = odl.functional.KullbackLeiblerCrossEntropy(space) elif name == 'const': - func = odl.solvers.ConstantFunctional(space, constant=2) + func = odl.functional.ConstantFunctional(space, constant=2) elif name.startswith('groupl1'): exponent = float(name.split('-')[1]) space = odl.ProductSpace(space, 2) - func = odl.solvers.GroupL1Norm(space, exponent=exponent) + func = odl.functional.GroupL1Norm(space, exponent=exponent) elif name.startswith('nuclearnorm'): outer_exp = float(name.split('-')[1]) singular_vector_exp = float(name.split('-')[2]) space = odl.ProductSpace(odl.ProductSpace(space, 2), 3) - func = odl.solvers.NuclearNorm(space, + func = odl.functional.NuclearNorm(space, outer_exp=outer_exp, singular_vector_exp=singular_vector_exp) elif name == 'quadratic': - func = odl.solvers.QuadraticForm( + func = odl.functional.QuadraticForm( operator=odl.IdentityOperator(space), vector=space.one(), constant=0.623, ) elif name == 'linear': - func = odl.solvers.QuadraticForm(vector=space.one(), constant=0.623) + func = odl.functional.QuadraticForm(vector=space.one(), constant=0.623) elif name == 'huber': - func = odl.solvers.Huber(space, gamma=0.162) + func = odl.functional.Huber(space, gamma=0.162) else: assert False @@ -87,19 +87,19 @@ def functional(request, linear_offset, quadratic_offset, dual): if linear_offset: g = noise_element(space) if name.startswith('kl'): - g = np.abs(g) + g = odl.abs(g) else: g = None quadratic_coeff = 1.32 - func = odl.solvers.FunctionalQuadraticPerturb( + func = odl.functional.FunctionalQuadraticPerturb( func, quadratic_coeff=quadratic_coeff, linear_term=g ) elif linear_offset: g = noise_element(space) if name.startswith('kl'): - g = np.abs(g) + g = odl.abs(g) func = func.translated(g) if dual: @@ -120,8 +120,8 @@ def proximal_objective(functional, x, y): return functional(y) + (1.0 / 2.0) * (x - y).norm() ** 2 -def test_proximal_defintion(functional, stepsize): - """Test the defintion of the proximal: +def test_proximal_definition(functional, stepsize): + """Test the definition of the proximal: prox[f](x) = argmin_y {f(y) + 1/2 ||x-y||^2} @@ -138,21 +138,21 @@ def test_proximal_defintion(functional, stepsize): # No implementation of the proximal for convex conj of # FunctionalQuadraticPerturb unless the quadratic term is 0. if ( - isinstance(functional, odl.solvers.FunctionalQuadraticPerturb) + isinstance(functional, odl.functional.FunctionalQuadraticPerturb) and functional.quadratic_coeff != 0 ): pytest.skip('functional has no proximal') return # No implementation of the proximal for quardartic form - if isinstance(functional, odl.solvers.QuadraticForm): + if isinstance(functional, odl.functional.QuadraticForm): pytest.skip('functional has no proximal') return # No implementation of the proximal for translations of quardartic form if ( - isinstance(functional, odl.solvers.FunctionalTranslation) - and isinstance(functional.functional, odl.solvers.QuadraticForm) + isinstance(functional, odl.functional.FunctionalTranslation) + and isinstance(functional.functional, odl.functional.QuadraticForm) ): pytest.skip('functional has no proximal') return @@ -160,8 +160,8 @@ def test_proximal_defintion(functional, stepsize): # No implementation of the proximal for convex conj of quardartic form, # except if the quadratic part is 0. if ( - isinstance(functional, odl.solvers.FunctionalQuadraticPerturb) - and isinstance(functional.functional, odl.solvers.QuadraticForm) + isinstance(functional, odl.functional.FunctionalQuadraticPerturb) + and isinstance(functional.functional, odl.functional.QuadraticForm) and functional.functional.operator is not None ): pytest.skip('functional has no proximal') @@ -198,7 +198,7 @@ def func_convex_conj_has_call(functional): return False elif ( - isinstance(f_cconj, odl.solvers.FunctionalTranslation) + isinstance(f_cconj, odl.functional.FunctionalTranslation) and isinstance(f_cconj.functional, FunctionalDefaultConvexConjugate) ): return False @@ -264,10 +264,10 @@ def test_proximal_convex_conj_kl_cross_entropy_solving_opt_problem(): id_op = odl.IdentityOperator(space) lin_ops = [id_op, id_op] lam_kl = 2.3 - kl_ce = odl.solvers.KullbackLeiblerCrossEntropy(space, prior=g) + kl_ce = odl.functional.KullbackLeiblerCrossEntropy(space, prior=g) g_funcs = [lam_kl * kl_ce, - 0.5 * odl.solvers.L2NormSquared(space).translated(a)] - f = odl.solvers.ZeroFunctional(space) + 0.5 * odl.functional.L2NormSquared(space).translated(a)] + f = odl.functional.ZeroFunctional(space) # Staring point x = space.zero() @@ -277,9 +277,9 @@ def test_proximal_convex_conj_kl_cross_entropy_solving_opt_problem(): # Explicit solution: x = W(g * exp(a)), where W is the Lambert W function. x_verify = lam_kl * scipy.special.lambertw( - (g / lam_kl) * np.exp(a / lam_kl)) + (g.data / lam_kl) * np.exp(a.data / lam_kl)) assert all_almost_equal(x, x_verify, ndigits=6) if __name__ == '__main__': - odl.util.test_file(__file__, ['-S', 'largescale']) + odl.core.util.test_file(__file__, ['-S', 'largescale']) diff --git a/odl/test/largescale/space/tensor_space_slow_test.py b/odl/test/largescale/space/tensor_space_slow_test.py index ff82ae74b9b..da311b32fc5 100644 --- a/odl/test/largescale/space/tensor_space_slow_test.py +++ b/odl/test/largescale/space/tensor_space_slow_test.py @@ -14,7 +14,8 @@ import pytest import odl -from odl.util.testutils import all_almost_equal, dtype_tol, noise_elements +from odl.core.util.dtype_utils import _universal_dtype_identifier +from odl.core.util.testutils import all_almost_equal, dtype_tol, noise_elements # --- pytest fixtures --- # @@ -28,17 +29,17 @@ @pytest.fixture(scope="module", ids=spc_ids, params=spc_params) -def tspace(odl_tspace_impl, request): +def tspace(odl_impl_device_pairs, request): spc = request.param - impl = odl_tspace_impl + impl, device = odl_impl_device_pairs if spc == 'rn': - return odl.rn(10 ** 5, impl=impl) + return odl.rn(10 ** 5, impl=impl, device=device) elif spc == '1d': - return odl.uniform_discr(0, 1, 10 ** 5, impl=impl) + return odl.uniform_discr(0, 1, 10 ** 5, impl=impl, device=device) elif spc == '3d': return odl.uniform_discr([0, 0, 0], [1, 1, 1], - [100, 100, 100], impl=impl) + [100, 100, 100], impl=impl, device=device) def test_element(tspace): @@ -46,27 +47,26 @@ def test_element(tspace): assert x in tspace # From array-like - y = tspace.element(np.zeros(tspace.shape, dtype=tspace.dtype).tolist()) + y = tspace.element(np.zeros(tspace.shape, dtype=_universal_dtype_identifier(tspace.dtype)).tolist()) assert y in tspace # Rewrap y2 = tspace.element(y) assert y2 is y - w = tspace.element(np.zeros(tspace.shape, dtype=tspace.dtype)) + w = tspace.element(np.zeros(tspace.shape, dtype=_universal_dtype_identifier(tspace.dtype))) assert w in tspace def test_zero(tspace): - assert np.allclose(tspace.zero(), 0) + assert all_almost_equal(tspace.zero(), 0) def test_one(tspace): - assert np.allclose(tspace.one(), 1) - + assert all_almost_equal(tspace.one(), 1) def test_ndarray_init(tspace): - x0 = np.arange(tspace.size).reshape(tspace.shape) + x0 = tspace.array_namespace.arange(tspace.size, device=tspace.device).reshape(tspace.shape) x = tspace.element(x0) assert all_almost_equal(x0, x) @@ -100,15 +100,15 @@ def test_inner(tspace): [xarr, yarr], [x, y] = noise_elements(tspace, 2) - correct_inner = np.vdot(yarr, xarr) * weighting_const + correct_inner = tspace.array_namespace.vdot(yarr.ravel(), xarr.ravel()) * weighting_const assert ( tspace.inner(x, y) - == pytest.approx(correct_inner, rel=dtype_tol(tspace.dtype)) + == pytest.approx(float(correct_inner), rel=dtype_tol(tspace.dtype)) ) assert ( x.inner(y) - == pytest.approx(correct_inner, rel=dtype_tol(tspace.dtype)) + == pytest.approx(float(correct_inner), rel=dtype_tol(tspace.dtype)) ) @@ -117,15 +117,15 @@ def test_norm(tspace): xarr, x = noise_elements(tspace) - correct_norm = np.linalg.norm(xarr) * np.sqrt(weighting_const) + correct_norm = tspace.array_namespace.linalg.norm(xarr) * np.sqrt(weighting_const) assert ( tspace.norm(x) - == pytest.approx(correct_norm, rel=dtype_tol(tspace.dtype)) + == pytest.approx(float(correct_norm), rel=dtype_tol(tspace.dtype)) ) assert ( x.norm() - == pytest.approx(correct_norm, rel=dtype_tol(tspace.dtype)) + == pytest.approx(float(correct_norm), rel=dtype_tol(tspace.dtype)) ) @@ -134,15 +134,15 @@ def test_dist(tspace): [xarr, yarr], [x, y] = noise_elements(tspace, 2) - correct_dist = np.linalg.norm(xarr - yarr) * np.sqrt(weighting_const) + correct_dist = tspace.array_namespace.linalg.norm(xarr - yarr) * np.sqrt(weighting_const) assert ( tspace.dist(x, y) - == pytest.approx(correct_dist, rel=dtype_tol(tspace.dtype)) + == pytest.approx(float(correct_dist), rel=dtype_tol(tspace.dtype)) ) assert ( x.dist(y) - == pytest.approx(correct_dist, rel=dtype_tol(tspace.dtype)) + == pytest.approx(float(correct_dist), rel=dtype_tol(tspace.dtype)) ) @@ -332,4 +332,4 @@ def idiv_aliased(x): if __name__ == '__main__': - odl.util.test_file(__file__, ['-S', 'largescale']) + odl.core.util.test_file(__file__, ['-S', 'largescale']) diff --git a/odl/test/largescale/tomo/analytic_slow_test.py b/odl/test/largescale/tomo/analytic_slow_test.py index 4fc8b7b9202..4e543d5ee63 100644 --- a/odl/test/largescale/tomo/analytic_slow_test.py +++ b/odl/test/largescale/tomo/analytic_slow_test.py @@ -14,10 +14,10 @@ import pytest import odl -import odl.tomo as tomo -from odl.tomo.util.testutils import ( +import odl.applications.tomo as tomo +from odl.applications.tomo.util.testutils import ( skip_if_no_astra, skip_if_no_astra_cuda, skip_if_no_skimage) -from odl.util.testutils import simple_fixture +from odl.core.util.testutils import simple_fixture # --- pytest fixtures --- # @@ -170,20 +170,20 @@ def test_fbp_reconstruction(projector): """Test filtered back-projection with various projectors.""" # Create Shepp-Logan phantom - vol = odl.phantom.shepp_logan(projector.domain, modified=False) + vol = odl.core.phantom.shepp_logan(projector.domain, modified=False) # Project data projections = projector(vol) # Create default FBP operator and apply to projections - fbp_operator = odl.tomo.fbp_op(projector) + fbp_operator = odl.applications.tomo.fbp_op(projector) # Add window if problem is in 3d. if ( - isinstance(projector.geometry, odl.tomo.ConeBeamGeometry) + isinstance(projector.geometry, odl.applications.tomo.ConeBeamGeometry) and projector.geometry.pitch != 0 ): - fbp_operator = fbp_operator * odl.tomo.tam_danielson_window(projector) + fbp_operator = fbp_operator * odl.applications.tomo.tam_danielson_window(projector) # Compute the FBP result fbp_result = fbp_operator(projections) @@ -212,13 +212,13 @@ def test_fbp_reconstruction_filters(filter_type, frequency_scaling, weighting): projector = tomo.RayTransform(discr_reco_space, geom, impl='astra_cuda') # Create Shepp-Logan phantom - vol = odl.phantom.shepp_logan(projector.domain, modified=False) + vol = odl.core.phantom.shepp_logan(projector.domain, modified=False) # Project data projections = projector(vol) # Create FBP operator with filters and apply to projections - fbp_operator = odl.tomo.fbp_op(projector, + fbp_operator = odl.applications.tomo.fbp_op(projector, filter_type=filter_type, frequency_scaling=frequency_scaling) @@ -230,4 +230,4 @@ def test_fbp_reconstruction_filters(filter_type, frequency_scaling, weighting): if __name__ == '__main__': - odl.util.test_file(__file__, ['-S', 'largescale']) + odl.core.util.test_file(__file__, ['-S', 'largescale']) diff --git a/odl/test/largescale/tomo/ray_transform_slow_test.py b/odl/test/largescale/tomo/ray_transform_slow_test.py index 583bedd936e..557f8b95aeb 100644 --- a/odl/test/largescale/tomo/ray_transform_slow_test.py +++ b/odl/test/largescale/tomo/ray_transform_slow_test.py @@ -15,9 +15,10 @@ from packaging.version import parse as parse_version import odl -from odl.tomo.util.testutils import ( +from odl.applications.tomo.util.testutils import ( skip_if_no_astra, skip_if_no_astra_cuda, skip_if_no_skimage) -from odl.util.testutils import all_almost_equal, simple_fixture +from odl.core.util.dtype_utils import _universal_dtype_identifier +from odl.core.util.testutils import all_almost_equal, simple_fixture # --- pytest fixtures --- # @@ -26,7 +27,7 @@ pytestmark = pytest.mark.suite('largescale') -dtype_params = ['float32', 'float64', 'complex64'] +dtype_params = ['float32', 'complex64'] dtype = simple_fixture('dtype', dtype_params) @@ -72,11 +73,13 @@ @pytest.fixture(scope="module", params=projectors, ids=projector_ids) -def projector(request, dtype, weighting): +def projector(request, dtype, weighting, odl_impl_device_pairs): + array_impl, device = odl_impl_device_pairs + print(f"{array_impl=}, {device=}") n_angles = 200 - geom, impl, angles = request.param.split() + geom, ray_impl, angles = request.param.split() if angles == 'uniform': apart = odl.uniform_partition(0, 2 * np.pi, n_angles) @@ -99,69 +102,69 @@ def projector(request, dtype, weighting): if geom == 'par2d': # Reconstruction space reco_space = odl.uniform_discr([-20, -20], [20, 20], [100, 100], - dtype=dtype, weighting=weighting) + dtype=dtype, weighting=weighting, impl=array_impl, device=device) # Geometry dpart = odl.uniform_partition(-30, 30, 200) - geom = odl.tomo.Parallel2dGeometry(apart, dpart) + geom = odl.applications.tomo.Parallel2dGeometry(apart, dpart) # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) + return odl.applications.tomo.RayTransform(reco_space, geom, impl=ray_impl) elif geom == 'par3d': # Reconstruction space reco_space = odl.uniform_discr([-20, -20, -20], [20, 20, 20], [100, 100, 100], - dtype=dtype, weighting=weighting) + dtype=dtype, weighting=weighting, impl=array_impl, device=device) # Geometry dpart = odl.uniform_partition([-30, -30], [30, 30], [200, 200]) - geom = odl.tomo.Parallel3dAxisGeometry(apart, dpart, axis=[1, 0, 0]) + geom = odl.applications.tomo.Parallel3dAxisGeometry(apart, dpart, axis=[1, 0, 0]) # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) + return odl.applications.tomo.RayTransform(reco_space, geom, impl=ray_impl) elif geom == 'cone2d': # Reconstruction space reco_space = odl.uniform_discr([-20, -20], [20, 20], [100, 100], - dtype=dtype) + dtype=dtype, impl=array_impl, device=device) # Geometry dpart = odl.uniform_partition(-30, 30, 200) - geom = odl.tomo.FanBeamGeometry(apart, dpart, src_radius=200, + geom = odl.applications.tomo.FanBeamGeometry(apart, dpart, src_radius=200, det_radius=100) # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) + return odl.applications.tomo.RayTransform(reco_space, geom, impl=ray_impl) elif geom == 'cone3d': # Reconstruction space reco_space = odl.uniform_discr([-20, -20, -20], [20, 20, 20], - [100, 100, 100], dtype=dtype) + [100, 100, 100], dtype=dtype, impl=array_impl, device=device) # Geometry dpart = odl.uniform_partition([-30, -30], [30, 30], [200, 200]) - geom = odl.tomo.ConeBeamGeometry( + geom = odl.applications.tomo.ConeBeamGeometry( apart, dpart, src_radius=200, det_radius=100, axis=[1, 0, 0]) # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) + return odl.applications.tomo.RayTransform(reco_space, geom, impl=ray_impl) elif geom == 'helical': # Reconstruction space reco_space = odl.uniform_discr([-20, -20, 0], [20, 20, 40], - [100, 100, 100], dtype=dtype) + [100, 100, 100], dtype=dtype, impl=array_impl, device=device) # Geometry # TODO: angles n_angles = 700 apart = odl.uniform_partition(0, 8 * 2 * np.pi, n_angles) dpart = odl.uniform_partition([-30, -3], [30, 3], [200, 20]) - geom = odl.tomo.ConeBeamGeometry(apart, dpart, pitch=5.0, + geom = odl.applications.tomo.ConeBeamGeometry(apart, dpart, pitch=5.0, src_radius=200, det_radius=100) # Ray transform - return odl.tomo.RayTransform(reco_space, geom, impl=impl) + return odl.applications.tomo.RayTransform(reco_space, geom, impl=ray_impl) else: raise ValueError('param not valid') @@ -174,15 +177,15 @@ def test_adjoint(projector): # Relative tolerance, still rather high due to imperfectly matched # adjoint in the cone beam case if ( - parse_version(odl.tomo.ASTRA_VERSION) < parse_version('1.8rc1') - and isinstance(projector.geometry, odl.tomo.ConeBeamGeometry) + parse_version(odl.applications.tomo.backends.ASTRA_VERSION) < parse_version('1.8rc1') + and isinstance(projector.geometry, odl.applications.tomo.ConeBeamGeometry) ): rtol = 0.1 else: rtol = 0.05 # Create Shepp-Logan phantom - vol = odl.phantom.shepp_logan(projector.domain, modified=True) + vol = odl.core.phantom.shepp_logan(projector.domain, modified=True) # Calculate projection proj = projector(vol) @@ -198,7 +201,7 @@ def test_adjoint_of_adjoint(projector): """Test RayTransform adjoint of adjoint.""" # Create Shepp-Logan phantom - vol = odl.phantom.shepp_logan(projector.domain, modified=True) + vol = odl.core.phantom.shepp_logan(projector.domain, modified=True) # Calculate projection proj = projector(vol) @@ -218,13 +221,13 @@ def test_adjoint_of_adjoint(projector): def test_reconstruction(projector): """Test RayTransform for reconstruction.""" if ( - isinstance(projector.geometry, odl.tomo.ConeBeamGeometry) + isinstance(projector.geometry, odl.applications.tomo.ConeBeamGeometry) and projector.geometry.pitch != 0 ): pytest.skip('reconstruction with CG is hopeless with so few angles') # Create Shepp-Logan phantom - vol = odl.phantom.shepp_logan(projector.domain, modified=True) + vol = odl.core.phantom.shepp_logan(projector.domain, modified=True) # Project data projections = projector(vol) @@ -236,11 +239,11 @@ def test_reconstruction(projector): # Make sure the result is somewhat close to the actual result maxerr = vol.norm() * 0.5 - if np.issubdtype(projector.domain.dtype, np.complexfloating): + if np.issubdtype(_universal_dtype_identifier(projector.domain.dtype), np.complexfloating): # Error has double the amount of components practically maxerr *= np.sqrt(2) assert recon.dist(vol) < maxerr if __name__ == '__main__': - odl.util.test_file(__file__, ['-S', 'largescale']) + odl.core.util.test_file(__file__, ['-S', 'largescale']) diff --git a/odl/test/largescale/trafos/fourier_slow_test.py b/odl/test/largescale/trafos/fourier_slow_test.py index 8515dfd11c1..3a1a2fb79f4 100644 --- a/odl/test/largescale/trafos/fourier_slow_test.py +++ b/odl/test/largescale/trafos/fourier_slow_test.py @@ -18,7 +18,7 @@ import pytest import odl -from odl.util.testutils import simple_fixture, skip_if_no_pyfftw +from odl.core.util.testutils import simple_fixture, skip_if_no_pyfftw # --- pytest fixtures --- # @@ -37,9 +37,9 @@ name='domain', params=[odl.uniform_discr(-2, 2, 10 ** 5), odl.uniform_discr([-2, -2, -2], [2, 2, 2], [200, 200, 200]), - odl.uniform_discr(-2, 2, 10 ** 5, dtype='complex'), + odl.uniform_discr(-2, 2, 10 ** 5, dtype=complex), odl.uniform_discr([-2, -2, -2], [2, 2, 2], [200, 200, 200], - dtype='complex')]) + dtype=complex)]) # --- FourierTransform tests --- # @@ -84,4 +84,4 @@ def charfun_freq_ball(x): if __name__ == '__main__': - odl.util.test_file(__file__, ['-S', 'largescale']) + odl.core.util.test_file(__file__, ['-S', 'largescale']) diff --git a/odl/test/solvers/functional/default_functionals_test.py b/odl/test/solvers/functional/default_functionals_test.py index 0b2f0a6cf8b..c1ea05f9785 100644 --- a/odl/test/solvers/functional/default_functionals_test.py +++ b/odl/test/solvers/functional/default_functionals_test.py @@ -10,12 +10,14 @@ from __future__ import division import numpy as np +import os +os.environ['SCIPY_ARRAY_API']='1' import scipy.special import pytest import odl -from odl.util.testutils import all_almost_equal, noise_element, simple_fixture -from odl.solvers.functional.default_functionals import ( +from odl.core.util.testutils import all_almost_equal, noise_element, simple_fixture +from odl.functional.default_functionals import ( KullbackLeiblerConvexConj, KullbackLeiblerCrossEntropyConvexConj) @@ -33,17 +35,17 @@ @pytest.fixture(scope="module", ids=space_ids, params=space_params) -def space(request, odl_tspace_impl): +def space(request, odl_impl_device_pairs): name = request.param.strip() - impl = odl_tspace_impl + impl, device = odl_impl_device_pairs if name == 'r10': - return odl.rn(10, impl=impl) + return odl.rn(10, impl=impl, device=device) elif name == 'uniform_discr': - return odl.uniform_discr(0, 1, 7, impl=impl) + return odl.uniform_discr(0, 1, 7, impl=impl, device=device) elif name == 'power_space_unif_discr': # Discretization parameters - space = odl.uniform_discr(0, 1, 7, impl=impl) + space = odl.uniform_discr(0, 1, 7, impl=impl, device=device) return odl.ProductSpace(space, 2) # --- functional tests --- # @@ -52,22 +54,22 @@ def space(request, odl_tspace_impl): def test_L1_norm(space, sigma): """Test the L1-norm.""" sigma = float(sigma) - func = odl.solvers.L1Norm(space) + func = odl.functional.L1Norm(space) x = noise_element(space) # Test functional evaluation - expected_result = np.abs(x).inner(space.one()) + expected_result = odl.abs(x).inner(space.one()) assert func(x) == pytest.approx(expected_result) # Test gradient - expecting sign function - expected_result = func.domain.element(np.sign(x)) + expected_result = func.domain.element(odl.sign(x)) assert all_almost_equal(func.gradient(x), expected_result) # Test proximal - expecting the following: # | x_i + sigma, if x_i < -sigma # z_i = { 0, if -sigma <= x_i <= sigma # | x_i - sigma, if x_i > sigma - tmp = np.zeros(space.shape) + tmp = space.zero().asarray() orig = x.asarray() tmp[orig > sigma] = orig[orig > sigma] - sigma tmp[orig < -sigma] = orig[orig < -sigma] + sigma @@ -76,13 +78,13 @@ def test_L1_norm(space, sigma): # Test convex conjugate - expecting 0 if |x|_inf <= 1, infty else func_cc = func.convex_conj - norm_larger_than_one = 1.1 * x / np.max(np.abs(x)) - assert func_cc(norm_larger_than_one) == np.inf + norm_larger_than_one = 1.1 * x / odl.max(odl.abs(x)) + assert func_cc(norm_larger_than_one) == float('inf') - norm_less_than_one = 0.9 * x / np.max(np.abs(x)) + norm_less_than_one = 0.9 * x / odl.max(odl.abs(x)) assert func_cc(norm_less_than_one) == 0 - norm_equal_to_one = x / np.max(np.abs(x)) + norm_equal_to_one = x / odl.max(odl.abs(x)) assert func_cc(norm_equal_to_one) == 0 # Gradient of the convex conjugate (not implemeted) @@ -90,12 +92,12 @@ def test_L1_norm(space, sigma): func_cc.gradient # Test proximal of the convex conjugate - expecting x / max(1, |x|) - expected_result = x / np.maximum(1, np.abs(x)) + expected_result = x / odl.maximum(1, odl.abs(x)) assert all_almost_equal(func_cc.proximal(sigma)(x), expected_result) # Verify that the biconjugate is the functional itself func_cc_cc = func_cc.convex_conj - assert isinstance(func_cc_cc, odl.solvers.L1Norm) + assert isinstance(func_cc_cc, odl.functional.L1Norm) def test_indicator_lp_unit_ball(space, sigma, exponent): @@ -103,11 +105,11 @@ def test_indicator_lp_unit_ball(space, sigma, exponent): x = noise_element(space) one_elem = space.one() - func = odl.solvers.IndicatorLpUnitBall(space, exponent) + func = odl.functional.IndicatorLpUnitBall(space, exponent) # Test functional evaluation p_norm_x = np.power( - func.domain.element(np.power(np.abs(x), exponent)).inner(one_elem), + func.domain.element(odl.pow(odl.abs(x), exponent)).inner(one_elem), 1.0 / exponent) norm_larger_than_one = 1.01 * x / p_norm_x @@ -119,7 +121,7 @@ def test_indicator_lp_unit_ball(space, sigma, exponent): def test_L2_norm(space, sigma): """Test the L2-norm.""" - func = odl.solvers.L2Norm(space) + func = odl.functional.L2Norm(space) x = noise_element(space) x_norm = x.norm() @@ -177,7 +179,7 @@ def test_L2_norm(space, sigma): def test_L2_norm_squared(space, sigma): """Test the squared L2-norm.""" - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) x = noise_element(space) x_norm = x.norm() @@ -221,7 +223,7 @@ def test_L2_norm_squared(space, sigma): def test_constant_functional(space, scalar): """Test the constant functional.""" constant = float(scalar) - func = odl.solvers.ConstantFunctional(space, constant=scalar) + func = odl.functional.ConstantFunctional(space, constant=scalar) x = noise_element(space) assert func.constant == constant @@ -253,31 +255,31 @@ def test_constant_functional(space, scalar): # Verify that the biconjugate is the functional itself func_cc_cc = func_cc.convex_conj - assert isinstance(func_cc_cc, odl.solvers.ConstantFunctional) + assert isinstance(func_cc_cc, odl.functional.ConstantFunctional) assert func_cc_cc.constant == constant def test_zero_functional(space): """Test the zero functional.""" - zero_func = odl.solvers.ZeroFunctional(space) - assert isinstance(zero_func, odl.solvers.ConstantFunctional) + zero_func = odl.functional.ZeroFunctional(space) + assert isinstance(zero_func, odl.functional.ConstantFunctional) assert zero_func.constant == 0 def test_kullback_leibler(space): """Test the kullback leibler functional and its convex conjugate.""" # The prior needs to be positive - prior = np.abs(noise_element(space)) + 0.1 + prior = odl.abs(noise_element(space)) + 0.1 - func = odl.solvers.KullbackLeibler(space, prior) + func = odl.functional.KullbackLeibler(space, prior) # The fucntional is only defined for positive elements - x = np.abs(noise_element(space)) + 0.1 + x = odl.abs(noise_element(space)) + 0.1 one_elem = space.one() # Evaluation of the functional expected_result = ( - x - prior + prior * np.log(prior / x) + x - prior + prior * odl.log(prior / x) ).inner(one_elem) assert func(x) == pytest.approx(expected_result) @@ -286,7 +288,7 @@ def test_kullback_leibler(space): # For elements with (a) negative components it should return inf x_neg = noise_element(space) - x_neg = x_neg - x_neg.ufuncs.max() + x_neg = x_neg - odl.max(x_neg) assert func(x_neg) == np.inf # The gradient @@ -307,14 +309,14 @@ def test_kullback_leibler(space): # The convex conjugate functional is only finite for elements with all # components smaller than 1. x = noise_element(space) - x = x - x.ufuncs.max() + 0.99 + x = x - odl.max(x) + 0.99 # Evaluation of convex conjugate - expected_result = - (prior * np.log(1 - x)).inner(one_elem) + expected_result = - (prior * odl.log(1 - x)).inner(one_elem) assert cc_func(x) == pytest.approx(expected_result) x_wrong = noise_element(space) - x_wrong = x_wrong - x_wrong.ufuncs.max() + 1.01 + x_wrong = x_wrong - odl.max(x_wrong) + 1.01 assert cc_func(x_wrong) == np.inf # The gradient of the convex conjugate @@ -322,7 +324,7 @@ def test_kullback_leibler(space): assert all_almost_equal(cc_func.gradient(x), expected_result) # The proximal of the convex conjugate - expected_result = 0.5 * (1 + x - np.sqrt((x - 1) ** 2 + 4 * sigma * prior)) + expected_result = 0.5 * (1 + x - odl.sqrt((x - 1) ** 2 + 4 * sigma * prior)) assert all_almost_equal(cc_func.proximal(sigma)(x), expected_result) # The biconjugate, which is the functional itself since it is proper, @@ -333,21 +335,21 @@ def test_kullback_leibler(space): assert cc_cc_func(x) == pytest.approx(func(x)) -def test_kullback_leibler_cross_entorpy(space): +def test_kullback_leibler_cross_entropy(space): """Test the kullback leibler cross entropy and its convex conjugate.""" # The prior needs to be positive prior = noise_element(space) - prior = space.element(np.abs(prior)) + prior = space.element(odl.abs(prior)) - func = odl.solvers.KullbackLeiblerCrossEntropy(space, prior) + func = odl.functional.KullbackLeiblerCrossEntropy(space, prior) # The fucntional is only defined for positive elements x = noise_element(space) - x = func.domain.element(np.abs(x)) + x = func.domain.element(odl.abs(x)) one_elem = space.one() # Evaluation of the functional - expected_result = ((prior - x + x * np.log(x / prior)) + expected_result = ((prior - x + x * odl.log(x / prior)) .inner(one_elem)) assert func(x) == pytest.approx(expected_result) @@ -356,11 +358,11 @@ def test_kullback_leibler_cross_entorpy(space): # For elements with (a) negative components it should return inf x_neg = noise_element(space) - x_neg = x_neg - x_neg.ufuncs.max() + x_neg = x_neg - odl.max(x_neg) assert func(x_neg) == np.inf # The gradient - expected_result = np.log(x / prior) + expected_result = odl.log(x / prior) assert all_almost_equal(func.gradient(x), expected_result) # The proximal operator @@ -379,17 +381,31 @@ def test_kullback_leibler_cross_entorpy(space): x = noise_element(space) # Evaluation of convex conjugate - expected_result = (prior * (np.exp(x) - 1)).inner(one_elem) + expected_result = (prior * (odl.exp(x) - 1)).inner(one_elem) assert cc_func(x) == pytest.approx(expected_result) # The gradient of the convex conjugate - expected_result = prior * np.exp(x) + expected_result = prior * odl.exp(x) assert all_almost_equal(cc_func.gradient(x), expected_result) # The proximal of the convex conjugate - expected_result = (x - - scipy.special.lambertw(sigma * prior * np.exp(x)).real) - assert all_almost_equal(cc_func.proximal(sigma)(x), expected_result) + if isinstance(space, odl.ProductSpace): + device = space[0].device + backend = space[0].array_backend + else: + device = space.device + backend = space.array_backend + arr = backend.to_cpu((prior * odl.exp(x)).asarray()) + x_arr = backend.to_cpu(x.asarray()) + + + expected_result = x_arr - scipy.special.lambertw(sigma * arr).real + if device != 'cpu': + expected_result = expected_result.to(device) + if not all_almost_equal(cc_func.proximal(sigma)(x), expected_result): + print(f'{cc_func.proximal(sigma)(x)=}') + print(f'{expected_result=}') + assert False # The biconjugate, which is the functional itself since it is proper, # convex and lower-semicontinuous @@ -404,7 +420,7 @@ def test_quadratic_form(space): operator = odl.IdentityOperator(space) vector = space.one() constant = 0.363 - func = odl.solvers.QuadraticForm(operator, vector, constant) + func = odl.functional.QuadraticForm(operator, vector, constant) x = noise_element(space) @@ -422,10 +438,10 @@ def test_quadratic_form(space): assert all_almost_equal(func.gradient(x), expected_gradient) # The convex conjugate - assert isinstance(func.convex_conj, odl.solvers.QuadraticForm) + assert isinstance(func.convex_conj, odl.functional.QuadraticForm) # Test for linear functional - func_no_operator = odl.solvers.QuadraticForm(vector=vector, + func_no_operator = odl.functional.QuadraticForm(vector=vector, constant=constant) expected_result = vector.inner(x) + constant assert func_no_operator(x) == pytest.approx(expected_result) @@ -436,31 +452,31 @@ def test_quadratic_form(space): # The convex conjugate is a translation of the IndicatorZero func_no_operator_cc = func_no_operator.convex_conj assert isinstance(func_no_operator_cc, - odl.solvers.FunctionalTranslation) + odl.functional.FunctionalTranslation) assert isinstance(func_no_operator_cc.functional, - odl.solvers.IndicatorZero) + odl.functional.IndicatorZero) assert func_no_operator_cc(vector) == -constant - assert np.isinf(func_no_operator_cc(vector + 2.463)) + assert odl.isinf(func_no_operator_cc(vector + 2.463)) # Test with no offset - func_no_offset = odl.solvers.QuadraticForm(operator, constant=constant) + func_no_offset = odl.functional.QuadraticForm(operator, constant=constant) expected_result = x.inner(operator(x)) + constant assert func_no_offset(x) == pytest.approx(expected_result) def test_separable_sum(space): """Test for the separable sum.""" - l1 = odl.solvers.L1Norm(space) - l2 = odl.solvers.L2Norm(space) + l1 = odl.functional.L1Norm(space) + l2 = odl.functional.L2Norm(space) x = noise_element(space) y = noise_element(space) # Initialization and calling - func = odl.solvers.SeparableSum(l1, l2) + func = odl.functional.SeparableSum(l1, l2) assert func([x, y]) == pytest.approx(l1(x) + l2(y)) - power_func = odl.solvers.SeparableSum(l1, 5) + power_func = odl.functional.SeparableSum(l1, 5) assert power_func([x, x, x, x, x]) == pytest.approx(5 * l1(x)) # Gradient @@ -482,17 +498,17 @@ def test_moreau_envelope_l1(): """Test for the Moreau envelope with L1 norm.""" space = odl.rn(3) - l1 = odl.solvers.L1Norm(space) + l1 = odl.functional.L1Norm(space) # Test l1 norm, gives "Huber norm" - smoothed_l1 = odl.solvers.MoreauEnvelope(l1) + smoothed_l1 = odl.functional.MoreauEnvelope(l1) assert all_almost_equal(smoothed_l1.gradient([0, -0.2, 0.7]), [0, -0.2, 0.7]) assert all_almost_equal(smoothed_l1.gradient([-3, 2, 10]), [-1, 1, 1]) # Test with different sigma - smoothed_l1 = odl.solvers.MoreauEnvelope(l1, sigma=0.5) + smoothed_l1 = odl.functional.MoreauEnvelope(l1, sigma=0.5) assert all_almost_equal(smoothed_l1.gradient([0, 0.2, 0.7]), [0, 0.4, 1.0]) @@ -502,9 +518,9 @@ def test_moreau_envelope_l2_sq(space, sigma): # Result is ||x||_2^2 / (1 + 2 sigma) # Gradient is x * 2 / (1 + 2 * sigma) - l2_sq = odl.solvers.L2NormSquared(space) + l2_sq = odl.functional.L2NormSquared(space) - smoothed_l2_sq = odl.solvers.MoreauEnvelope(l2_sq, sigma=sigma) + smoothed_l2_sq = odl.functional.MoreauEnvelope(l2_sq, sigma=sigma) x = noise_element(space) assert all_almost_equal(smoothed_l2_sq.gradient(x), x * 2 / (1 + 2 * sigma)) @@ -513,9 +529,9 @@ def test_moreau_envelope_l2_sq(space, sigma): def test_weighted_separablesum(space): """Test for the weighted proximal of a SeparableSum functional.""" - l1 = odl.solvers.L1Norm(space) - l2 = odl.solvers.L2Norm(space) - func = odl.solvers.SeparableSum(l1, l2) + l1 = odl.functional.L1Norm(space) + l2 = odl.functional.L2Norm(space) + func = odl.functional.SeparableSum(l1, l2) x = func.domain.one() @@ -530,11 +546,11 @@ def test_weighted_proximal_L2_norm_squared(space): """Test for the weighted proximal of the squared L2 norm""" # Define the functional on the space. - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) # Set the stepsize as a random element of the spaces # with elements between 1 and 10. - sigma = odl.phantom.uniform_noise(space, 1, 10) + sigma = odl.core.phantom.uniform_noise(space, 1, 10) # Start at the one vector. x = space.one() @@ -557,11 +573,11 @@ def test_weighted_proximal_L1_norm_far(space): """Test for the weighted proximal of the L1 norm away from zero""" # Define the functional on the space. - func = odl.solvers.L1Norm(space) + func = odl.functional.L1Norm(space) # Set the stepsize as a random element of the spaces # with elements between 1 and 10. - sigma = odl.phantom.noise.uniform_noise(space, 1, 10) + sigma = odl.core.phantom.noise.uniform_noise(space, 1, 10) # Start far away from zero so that the L1 norm will be differentiable # at the result. @@ -587,7 +603,7 @@ def test_weighted_proximal_L1_norm_close(space): space = odl.rn(5) # Define the functional on the space. - func = odl.solvers.L1Norm(space) + func = odl.functional.L1Norm(space) # Set the stepsize. sigma = [0.1, 0.2, 0.5, 1.0, 2.0] @@ -611,12 +627,12 @@ def test_weighted_proximal_L1_norm_close(space): def test_bregman_functional_no_gradient(space): """Test Bregman distance for functional without gradient.""" - ind_func = odl.solvers.IndicatorNonnegativity(space) - point = np.abs(noise_element(space)) + ind_func = odl.functional.IndicatorNonnegativity(space) + point = odl.abs(noise_element(space)) subgrad = noise_element(space) # Any element in the domain is ok - bregman_dist = odl.solvers.BregmanDistance(ind_func, point, subgrad) + bregman_dist = odl.functional.BregmanDistance(ind_func, point, subgrad) - x = np.abs(noise_element(space)) + x = odl.abs(noise_element(space)) expected_result = -subgrad.inner(x - point) assert all_almost_equal(bregman_dist(x), expected_result) @@ -631,12 +647,12 @@ def test_bregman_functional_l2_squared(space, sigma): """Test Bregman distance using l2 norm squared as underlying functional.""" sigma = float(sigma) - l2_sq = odl.solvers.L2NormSquared(space) + l2_sq = odl.functional.L2NormSquared(space) point = noise_element(space) subgrad = l2_sq.gradient(point) - bregman_dist = odl.solvers.BregmanDistance(l2_sq, point, subgrad) + bregman_dist = odl.functional.BregmanDistance(l2_sq, point, subgrad) - expected_func = odl.solvers.L2NormSquared(space).translated(point) + expected_func = odl.functional.L2NormSquared(space).translated(point) x = noise_element(space) @@ -659,4 +675,4 @@ def test_bregman_functional_l2_squared(space, sigma): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/functional/functional_test.py b/odl/test/solvers/functional/functional_test.py index 3616677ea85..e0c1c146325 100644 --- a/odl/test/solvers/functional/functional_test.py +++ b/odl/test/solvers/functional/functional_test.py @@ -13,11 +13,12 @@ import pytest import odl -from odl.operator import OpTypeError -from odl.util.testutils import ( +from odl.core.operator import OpTypeError +from odl.core.util.testutils import ( all_almost_equal, dtype_ndigits, dtype_tol, noise_element, simple_fixture) -from odl.solvers.functional.default_functionals import ( +from odl.functional.default_functionals import ( KullbackLeiblerConvexConj) +from odl.solvers.nonsmooth.proximal_operators import _numerical_epsilon # TODO: maybe add tests for if translations etc. belongs to the wrong space. @@ -36,18 +37,18 @@ @pytest.fixture(scope="module", ids=space_ids, params=space_params) -def space(request, odl_tspace_impl): +def space(request, odl_impl_device_pairs): name = request.param.strip() - impl = odl_tspace_impl + impl, device = odl_impl_device_pairs if name == 'r10': - return odl.rn(10, impl=impl) + return odl.rn(10, impl=impl, device=device) elif name == 'uniform_discr': # Discretization parameters - return odl.uniform_discr(0, 1, 7, impl=impl) + return odl.uniform_discr(0, 1, 7, impl=impl, device=device) elif name == 'power_space_unif_discr': # Discretization parameters - space = odl.uniform_discr(0, 1, 7, impl=impl) + space = odl.uniform_discr(0, 1, 7, impl=impl, device=device) return odl.ProductSpace(space, 2) @@ -60,9 +61,9 @@ def space(request, odl_tspace_impl): func_ids = [" functional='{}' ".format(p) for p in func_params] FUNCTIONALS_WITHOUT_DERIVATIVE = ( - odl.solvers.functional.IndicatorLpUnitBall, - odl.solvers.functional.IndicatorSimplex, - odl.solvers.functional.IndicatorSumConstraint) + odl.functional.IndicatorLpUnitBall, + odl.functional.IndicatorSimplex, + odl.functional.IndicatorSumConstraint) @pytest.fixture(scope="module", ids=func_ids, params=func_params) @@ -70,62 +71,62 @@ def functional(request, space): name = request.param.strip() if name == 'l1': - func = odl.solvers.functional.L1Norm(space) + func = odl.functional.L1Norm(space) elif name == 'l2': - func = odl.solvers.functional.L2Norm(space) + func = odl.functional.L2Norm(space) elif name == 'l2^2': - func = odl.solvers.functional.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) elif name == 'constant': - func = odl.solvers.functional.ConstantFunctional(space, 2) + func = odl.functional.ConstantFunctional(space, 2) elif name == 'zero': - func = odl.solvers.functional.ZeroFunctional(space) + func = odl.functional.ZeroFunctional(space) elif name == 'ind_unit_ball_1': - func = odl.solvers.functional.IndicatorLpUnitBall(space, 1) + func = odl.functional.IndicatorLpUnitBall(space, 1) elif name == 'ind_unit_ball_2': - func = odl.solvers.functional.IndicatorLpUnitBall(space, 2) + func = odl.functional.IndicatorLpUnitBall(space, 2) elif name == 'ind_unit_ball_pi': - func = odl.solvers.functional.IndicatorLpUnitBall(space, np.pi) + func = odl.functional.IndicatorLpUnitBall(space, np.pi) elif name == 'ind_unit_ball_inf': - func = odl.solvers.functional.IndicatorLpUnitBall(space, np.inf) + func = odl.functional.IndicatorLpUnitBall(space, np.inf) elif name == 'product': - left = odl.solvers.functional.L2Norm(space) - right = odl.solvers.functional.ConstantFunctional(space, 2) - func = odl.solvers.functional.FunctionalProduct(left, right) + left = odl.functional.L2Norm(space) + right = odl.functional.ConstantFunctional(space, 2) + func = odl.functional.FunctionalProduct(left, right) elif name == 'quotient': - dividend = odl.solvers.functional.L2Norm(space) - divisor = odl.solvers.functional.ConstantFunctional(space, 2) - func = odl.solvers.functional.FunctionalQuotient(dividend, divisor) + dividend = odl.functional.L2Norm(space) + divisor = odl.functional.ConstantFunctional(space, 2) + func = odl.functional.FunctionalQuotient(dividend, divisor) elif name == 'kl': - func = odl.solvers.functional.KullbackLeibler(space) + func = odl.functional.KullbackLeibler(space) elif name == 'kl_cc': - func = odl.solvers.KullbackLeibler(space).convex_conj + func = odl.functional.KullbackLeibler(space).convex_conj elif name == 'kl_cross_ent': - func = odl.solvers.functional.KullbackLeiblerCrossEntropy(space) + func = odl.functional.KullbackLeiblerCrossEntropy(space) elif name == 'kl_cc_cross_ent': - func = odl.solvers.KullbackLeiblerCrossEntropy(space).convex_conj + func = odl.functional.KullbackLeiblerCrossEntropy(space).convex_conj elif name == 'huber': - func = odl.solvers.Huber(space, gamma=0.1) + func = odl.functional.Huber(space, gamma=0.1) elif name == 'groupl1': if isinstance(space, odl.ProductSpace): pytest.skip("The `GroupL1Norm` is not supported on `ProductSpace`") space = odl.ProductSpace(space, 3) - func = odl.solvers.GroupL1Norm(space) + func = odl.functional.GroupL1Norm(space) elif name == 'bregman_l2squared': point = noise_element(space) - l2_squared = odl.solvers.L2NormSquared(space) + l2_squared = odl.functional.L2NormSquared(space) subgrad = l2_squared.gradient(point) - func = odl.solvers.BregmanDistance(l2_squared, point, subgrad) + func = odl.functional.BregmanDistance(l2_squared, point, subgrad) elif name == 'bregman_l1': point = noise_element(space) - l1 = odl.solvers.L1Norm(space) + l1 = odl.functional.L1Norm(space) subgrad = l1.gradient(point) - func = odl.solvers.BregmanDistance(l1, point, subgrad) + func = odl.functional.BregmanDistance(l1, point, subgrad) elif name == 'indicator_simplex': diameter = 1.23 - func = odl.solvers.IndicatorSimplex(space, diameter) + func = odl.functional.IndicatorSimplex(space, diameter) elif name == 'indicator_sum_constraint': sum_value = 1.23 - func = odl.solvers.IndicatorSumConstraint(space, sum_value) + func = odl.functional.IndicatorSumConstraint(space, sum_value) else: assert False @@ -151,19 +152,19 @@ def test_derivative(functional): x = noise_element(functional.domain) y = noise_element(functional.domain) - if (isinstance(functional, odl.solvers.KullbackLeibler) or - isinstance(functional, odl.solvers.KullbackLeiblerCrossEntropy)): + if (isinstance(functional, odl.functional.KullbackLeibler) or + isinstance(functional, odl.functional.KullbackLeiblerCrossEntropy)): # The functional is not defined for values <= 0 - x = x.ufuncs.absolute() - y = y.ufuncs.absolute() + x = odl.abs(x) + y = odl.abs(y) if isinstance(functional, KullbackLeiblerConvexConj): # The functional is not defined for values >= 1 - x = x - x.ufuncs.max() + 0.99 - y = y - y.ufuncs.max() + 0.99 + x = x - odl.max(x) + 0.99 + y = y - odl.max(y) + 0.99 # Compute a "small" step size according to dtype of space - step = float(np.sqrt(np.finfo(functional.domain.dtype).eps)) + step = float(np.sqrt(_numerical_epsilon(functional.domain.dtype))) # Numerical test of gradient, only low accuracy can be guaranteed. assert all_almost_equal((functional(x + step * y) - functional(x)) / step, @@ -180,8 +181,8 @@ def test_arithmetic(): space = odl.rn(3) # Create elements needed for later - functional = odl.solvers.L2Norm(space).translated([1, 2, 3]) - functional2 = odl.solvers.L2NormSquared(space) + functional = odl.functional.L2Norm(space).translated([1, 2, 3]) + functional2 = odl.functional.L2NormSquared(space) operator = odl.IdentityOperator(space) - space.element([4, 5, 6]) x = noise_element(functional.domain) y = noise_element(functional.domain) @@ -197,10 +198,10 @@ def test_arithmetic(): assert (functional + functional2)(x) == functional(x) + functional2(x) assert (functional - functional2)(x) == functional(x) - functional2(x) assert (functional * operator)(x) == functional(operator(x)) - assert all_almost_equal((y * functional)(x), y * functional(x)) - assert all_almost_equal((y * (y * functional))(x), (y * y) * functional(x)) - assert all_almost_equal((functional * y)(x), functional(y * x)) - assert all_almost_equal(((functional * y) * y)(x), functional((y * y) * x)) + assert all_almost_equal((y @ functional)(x), y * functional(x)) + assert all_almost_equal((y @ (y @ functional))(x), (y * y) * functional(x)) + assert all_almost_equal((functional @ y)(x), functional(y * x)) + assert all_almost_equal(((functional @ y) * y)(x), functional((y * y) * x)) def test_left_scalar_mult(space, scalar): @@ -210,11 +211,11 @@ def test_left_scalar_mult(space, scalar): rtol = dtype_tol(space.dtype) x = noise_element(space) - func = odl.solvers.functional.L2Norm(space) + func = odl.functional.L2Norm(space) lmul_func = scalar * func if scalar == 0: - assert isinstance(scalar * func, odl.solvers.ZeroFunctional) + assert isinstance(scalar * func, odl.functional.ZeroFunctional) return # Test functional evaluation @@ -257,12 +258,12 @@ def test_right_scalar_mult(space, scalar): rtol = dtype_tol(space.dtype) x = noise_element(space) - func = odl.solvers.functional.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) rmul_func = func * scalar if scalar == 0: # expecting the constant functional x -> func(0) - assert isinstance(rmul_func, odl.solvers.ConstantFunctional) + assert isinstance(rmul_func, odl.functional.ConstantFunctional) assert all_almost_equal(rmul_func(x), func(space.zero()), ndigits) # Nothing more to do, rest is part of ConstantFunctional test @@ -295,8 +296,8 @@ def test_right_scalar_mult(space, scalar): ndigits) # Verify that for linear functionals, left multiplication is used. - func = odl.solvers.ZeroFunctional(space) - assert isinstance(func * scalar, odl.solvers.FunctionalLeftScalarMult) + func = odl.functional.ZeroFunctional(space) + assert isinstance(func * scalar, odl.functional.FunctionalLeftScalarMult) def test_functional_composition(space): @@ -305,21 +306,21 @@ def test_functional_composition(space): ndigits = dtype_ndigits(space.dtype) rtol = dtype_tol(space.dtype) - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) # Verify that an error is raised if an invalid operator is used # (e.g. wrong range) scalar = 2.1 wrong_space = odl.uniform_discr(1, 2, 10) - op_wrong = odl.operator.ScalingOperator(wrong_space, scalar) + op_wrong = odl.core.operator.ScalingOperator(wrong_space, scalar) with pytest.raises(OpTypeError): func * op_wrong # Test composition with operator from the right - op = odl.operator.ScalingOperator(space, scalar) + op = odl.core.operator.ScalingOperator(space, scalar) func_op_comp = func * op - assert isinstance(func_op_comp, odl.solvers.Functional) + assert isinstance(func_op_comp, odl.functional.Functional) x = noise_element(space) assert func_op_comp(x) == pytest.approx(func(op(x)), rel=rtol) @@ -341,16 +342,16 @@ def test_functional_sum(space): ndigits = dtype_ndigits(space.dtype) rtol = dtype_tol(space.dtype) - func1 = odl.solvers.L2NormSquared(space) - func2 = odl.solvers.L2Norm(space) + func1 = odl.functional.L2NormSquared(space) + func2 = odl.functional.L2Norm(space) # Verify that an error is raised if one operand is "wrong" - op = odl.operator.IdentityOperator(space) + op = odl.core.operator.IdentityOperator(space) with pytest.raises(OpTypeError): func1 + op wrong_space = odl.uniform_discr(1, 2, 10) - func_wrong_domain = odl.solvers.L2Norm(wrong_space) + func_wrong_domain = odl.functional.L2Norm(wrong_space) with pytest.raises(OpTypeError): func1 + func_wrong_domain @@ -388,7 +389,7 @@ def test_functional_plus_scalar(space): ndigits = dtype_ndigits(space.dtype) rtol = dtype_tol(space.dtype) - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) scalar = -1.3 # Test for scalar not in the field (field of unifor_discr is RealNumbers) @@ -438,7 +439,7 @@ def test_translation_of_functional(space): # The translation; an element in the domain translation = noise_element(space) - test_functional = odl.solvers.L2NormSquared(space) + test_functional = odl.functional.L2NormSquared(space) translated_functional = test_functional.translated(translation) x = noise_element(space) @@ -463,7 +464,7 @@ def test_translation_of_functional(space): # Test for conjugate functional # The helper function below is tested explicitly further down in this file - expected_result = odl.solvers.FunctionalQuadraticPerturb( + expected_result = odl.functional.FunctionalQuadraticPerturb( test_functional.convex_conj, linear_term=translation)(x) assert all_almost_equal(translated_functional.convex_conj(x), expected_result, ndigits) @@ -490,29 +491,29 @@ def test_translation_of_functional(space): ) -def test_translation_proximal_stepsizes(): - """Test for stepsize types for proximal of a translated functional.""" - # Set up space, functional and a point where to evaluate the proximal. - space = odl.rn(2) - functional = odl.solvers.L2NormSquared(space) - translation = functional.translated([0.5, 0.5]) - x = space.one() +# def test_translation_proximal_stepsizes(): +# """Test for stepsize types for proximal of a translated functional.""" +# # Set up space, functional and a point where to evaluate the proximal. +# space = odl.rn(2) +# functional = odl.functional.L2NormSquared(space) +# translation = functional.translated([0.5, 0.5]) +# x = space.one() - # Define different forms of the same stepsize. - stepsize = space.element([0.5, 2.0]) - stepsize_list = [0.5, 2.0] - stepsize_array = np.asarray([0.5, 2.0]) +# # Define different forms of the same stepsize. +# stepsize = space.element([0.5, 2.0]) +# stepsize_list = [0.5, 2.0] +# stepsize_array = np.asarray([0.5, 2.0]) - # Calculate the proximals for each of the stepsizes. - y = translation.convex_conj.proximal(stepsize)(x) - y_list = translation.convex_conj.proximal(stepsize_list)(x) - y_array = translation.convex_conj.proximal(stepsize_array)(x) - expected_result = [0.6, 0.0] +# # Calculate the proximals for each of the stepsizes. +# y = translation.convex_conj.proximal(stepsize)(x) +# y_list = translation.convex_conj.proximal(stepsize_list)(x) +# y_array = translation.convex_conj.proximal(stepsize_array)(x) +# expected_result = [0.6, 0.0] - # Now, all the results should be equal to the expected result. - assert all_almost_equal(y, expected_result) - assert all_almost_equal(y_list, expected_result) - assert all_almost_equal(y_array, expected_result) +# # Now, all the results should be equal to the expected result. +# assert all_almost_equal(y, expected_result) +# assert all_almost_equal(y_list, expected_result) +# assert all_almost_equal(y_array, expected_result) def test_multiplication_with_vector(space): @@ -523,7 +524,7 @@ def test_multiplication_with_vector(space): x = noise_element(space) y = noise_element(space) - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) wrong_space = odl.uniform_discr(1, 2, 10) y_other_space = noise_element(wrong_space) @@ -531,7 +532,7 @@ def test_multiplication_with_vector(space): # Multiplication from the right. Make sure it is a # FunctionalRightVectorMult func_times_y = func * y - assert isinstance(func_times_y, odl.solvers.FunctionalRightVectorMult) + assert isinstance(func_times_y, odl.functional.FunctionalRightVectorMult) expected_result = func(y * x) assert func_times_y(x) == pytest.approx(expected_result, rel=rtol) @@ -554,7 +555,7 @@ def test_multiplication_with_vector(space): func * y_other_space # Multiplication from the left. Make sure it is a FunctionalLeftVectorMult - y_times_func = y * func + y_times_func = y @ func assert isinstance(y_times_func, odl.FunctionalLeftVectorMult) expected_result = y * func(x) @@ -562,7 +563,7 @@ def test_multiplication_with_vector(space): # Now, multiplication with vector from another space is ok (since it is the # same as scaling that vector with the scalar returned by the functional). - y_other_times_func = y_other_space * func + y_other_times_func = y_other_space @ func assert isinstance(y_other_times_func, odl.FunctionalLeftVectorMult) expected_result = y_other_space * func(x) @@ -581,7 +582,7 @@ def test_functional_quadratic_perturb(space, linear_term, quadratic_coeff): ndigits = dtype_ndigits(space.dtype) rtol = dtype_tol(space.dtype) - orig_func = odl.solvers.L2NormSquared(space) + orig_func = odl.functional.L2NormSquared(space) if linear_term: linear_term_arg = None @@ -590,7 +591,7 @@ def test_functional_quadratic_perturb(space, linear_term, quadratic_coeff): linear_term_arg = linear_term = noise_element(space) # Creating the functional ||x||_2^2 and add the quadratic perturbation - functional = odl.solvers.FunctionalQuadraticPerturb( + functional = odl.functional.FunctionalQuadraticPerturb( orig_func, quadratic_coeff=quadratic_coeff, linear_term=linear_term_arg) @@ -650,19 +651,19 @@ def test_bregman(functional): y = noise_element(functional.domain) x = noise_element(functional.domain) - if (isinstance(functional, odl.solvers.KullbackLeibler) or - isinstance(functional, odl.solvers.KullbackLeiblerCrossEntropy)): + if (isinstance(functional, odl.functional.KullbackLeibler) or + isinstance(functional, odl.functional.KullbackLeiblerCrossEntropy)): # The functional is not defined for values <= 0 - x = x.ufuncs.absolute() - y = y.ufuncs.absolute() + x = odl.abs(x) + y = odl.abs(y) if isinstance(functional, KullbackLeiblerConvexConj): # The functional is not defined for values >= 1 - x = x - x.ufuncs.max() + 0.99 - y = y - y.ufuncs.max() + 0.99 + x = x - odl.max(x) + 0.99 + y = y - odl.max(y) + 0.99 grad = functional.gradient(y) - quadratic_func = odl.solvers.QuadraticForm( + quadratic_func = odl.functional.QuadraticForm( vector=-grad, constant=-functional(y) + grad.inner(y)) expected_func = functional + quadratic_func @@ -673,4 +674,4 @@ def test_bregman(functional): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/iterative/iterative_test.py b/odl/test/solvers/iterative/iterative_test.py index c3f75c533a4..c7528598bad 100644 --- a/odl/test/solvers/iterative/iterative_test.py +++ b/odl/test/solvers/iterative/iterative_test.py @@ -10,7 +10,7 @@ from __future__ import division import odl -from odl.util.testutils import all_almost_equal +from odl.core.util.testutils import all_almost_equal import pytest import numpy as np @@ -32,13 +32,13 @@ def iterative_solver(request): if solver_name == 'steepest_descent': def solver(op, x, rhs): norm2 = op.adjoint(op(x)).norm() / x.norm() - func = odl.solvers.L2NormSquared(op.domain) * (op - rhs) + func = odl.functional.L2NormSquared(op.domain) * (op - rhs) odl.solvers.steepest_descent(func, x, line_search=0.5 / norm2) elif solver_name == 'adam': def solver(op, x, rhs): norm2 = op.adjoint(op(x)).norm() / x.norm() - func = odl.solvers.L2NormSquared(op.domain) * (op - rhs) + func = odl.functional.L2NormSquared(op.domain) * (op - rhs) odl.solvers.adam(func, x, learning_rate=4.0 / norm2, maxiter=150) elif solver_name == 'landweber': @@ -72,12 +72,13 @@ def solver(op, x, rhs): @pytest.fixture(scope="module", params=['MatVec', 'Identity']) -def optimization_problem(request): +def optimization_problem(request, odl_impl_device_pairs): problem_name = request.param - + impl, device = odl_impl_device_pairs if problem_name == 'MatVec': # Define problem op_arr = np.eye(5) * 5 + np.ones([5, 5]) + space = odl.tensor_space((5,5), impl=impl, device=device) op = odl.MatrixOperator(op_arr) # Simple right hand side @@ -89,7 +90,7 @@ def optimization_problem(request): return op, x, rhs elif problem_name == 'Identity': # Define problem - space = odl.uniform_discr(0, 1, 5) + space = odl.uniform_discr(0, 1, 5, impl = impl, device=device) op = odl.IdentityOperator(space) # Simple right hand side @@ -111,11 +112,12 @@ def test_solver(optimization_problem, iterative_solver): assert all_almost_equal(op(x), rhs, ndigits=2) -def test_steepst_descent(): +def test_steepst_descent(odl_impl_device_pairs): """Test steepest descent on the rosenbrock function in 3d.""" - space = odl.rn(3) + impl, device = odl_impl_device_pairs + space = odl.rn(3, impl = impl, device=device) scale = 1 # only mildly ill-behaved - rosenbrock = odl.solvers.RosenbrockFunctional(space, scale) + rosenbrock = odl.functional.RosenbrockFunctional(space, scale) line_search = odl.solvers.BacktrackingLineSearch( rosenbrock, 0.1, 0.01) @@ -127,4 +129,4 @@ def test_steepst_descent(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/nonsmooth/admm_test.py b/odl/test/solvers/nonsmooth/admm_test.py index a6011ba10ed..5d28a892ec8 100644 --- a/odl/test/solvers/nonsmooth/admm_test.py +++ b/odl/test/solvers/nonsmooth/admm_test.py @@ -12,7 +12,7 @@ import odl from odl.solvers import admm_linearized, Callback -from odl.util.testutils import all_almost_equal, noise_element +from odl.core.util.testutils import all_almost_equal, noise_element def test_admm_lin_input_handling(): @@ -21,7 +21,7 @@ def test_admm_lin_input_handling(): space = odl.uniform_discr(0, 1, 10) L = odl.ZeroOperator(space) - f = g = odl.solvers.ZeroFunctional(space) + f = g = odl.functional.ZeroFunctional(space) # Check that the algorithm runs. With the above operators and functionals, # the algorithm should not modify the initial value. @@ -60,11 +60,11 @@ def test_admm_lin_l1(): L = odl.IdentityOperator(space) - data_1 = odl.util.testutils.noise_element(space) - data_2 = odl.util.testutils.noise_element(space) + data_1 = odl.core.util.testutils.noise_element(space) + data_2 = odl.core.util.testutils.noise_element(space) - f = odl.solvers.L1Norm(space).translated(data_1) - g = 0.5 * odl.solvers.L1Norm(space).translated(data_2) + f = odl.functional.L1Norm(space).translated(data_1) + g = 0.5 * odl.functional.L1Norm(space).translated(data_2) x = space.zero() admm_linearized(x, f, g, L, tau=1.0, sigma=2.0, niter=10) @@ -73,4 +73,4 @@ def test_admm_lin_l1(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/nonsmooth/alternating_dual_updates_test.py b/odl/test/solvers/nonsmooth/alternating_dual_updates_test.py index fe7d5dea44a..098b59f96fe 100644 --- a/odl/test/solvers/nonsmooth/alternating_dual_updates_test.py +++ b/odl/test/solvers/nonsmooth/alternating_dual_updates_test.py @@ -13,7 +13,7 @@ from odl.solvers.nonsmooth import adupdates from odl.solvers.nonsmooth.alternating_dual_updates import adupdates_simple -from odl.util.testutils import all_almost_equal +from odl.core.util.testutils import all_almost_equal # Places for the accepted error when comparing results @@ -21,83 +21,83 @@ LOW_ACCURACY = 4 -def test_adupdates(): - """Test if the adupdates solver handles the following problem correctly: - - ( 1 1/2 1/3 1/4) (x_1) (a) - (1/2 1/3 1/4 1/5) (x_2) (b) - (1/3 1/4 1/5 1/6) (x_3) = (c) - (1/4 1/5 1/6 1/7) (x_4) (d). - - The matrix is the ill-conditined Hilbert matrix, the inverse of which - can be given in closed form. If we set - - (a) (25/12) (x_1) = (1) - (b) (77/60) (x_2) = (1) - (c) = (19/20) then (x_3) = (1) - (d) (319/420), (x_4) = (1). - - We solve the problem - - min ||Ax - b||^2 + TV(x) s.t. x >= 0 - - for the matrix A, the r.h.s. b as above and the total variation TV, which - is given as the (non-cyclic) sum of the distances of consecutive entries - of the solution. The solution of this problem is clearly x = (1, 1, 1, 1), - since it satisfies the additional constraint and minimizes both terms of - the objective function. - """ - - mat1 = [[1, 1 / 2, 1 / 3, 1 / 4], - [1 / 2, 1 / 3, 1 / 4, 1 / 5]] - mat2 = [[1 / 3, 1 / 4, 1 / 5, 1 / 6], - [1 / 4, 1 / 5, 1 / 6, 1 / 7]] - - # Create the linear operators - mat1op = odl.MatrixOperator(mat1) - mat2op = odl.MatrixOperator(mat2) - domain = mat1op.domain - tv1 = odl.MatrixOperator([[1.0, -1.0, 0.0, 0.0]]) - tv2 = odl.MatrixOperator([[0.0, 0.0, 1.0, -1.0]]) - tv3 = odl.MatrixOperator([[0.0, 1.0, -1.0, 0.0]]) - nneg = odl.IdentityOperator(domain) - ops = [mat1op, mat2op, odl.BroadcastOperator(tv1, tv2), tv3, nneg] - - # Create inner stepsizes for linear operators - mat1s = 1 / mat1op(mat1op.adjoint(mat1op.range.one())) - mat2s = 1 / mat2op(mat2op.adjoint(mat2op.range.one())) - tv1s = [0.5, 0.5] - tv2s = 0.5 - nnegs = nneg.range.element([1.0, 1.0, 1.0, 1.0]) - inner_stepsizes = [mat1s, mat2s, tv1s, tv2s, nnegs] - - expected_solution = domain.element([1, 1, 1, 1]) - # Create right-hand-sides of the equation - rhs1 = mat1op(expected_solution) - rhs2 = mat2op(expected_solution) - - # Create the functionals - fid1 = odl.solvers.L2NormSquared(mat1op.range).translated(rhs1) - fid2 = odl.solvers.L2NormSquared(mat2op.range).translated(rhs2) - reg1 = odl.solvers.L1Norm(tv1.range) - reg2 = odl.solvers.L1Norm(tv2.range) - reg3 = odl.solvers.L1Norm(tv3.range) - ind = odl.solvers.IndicatorNonnegativity(nneg.range) - funcs = [fid1, fid2, odl.solvers.SeparableSum(reg1, reg2), reg3, ind] - - # Start from zero - x = tv1.domain.zero() - x_simple = tv1.domain.zero() - - stepsize = 1.0 - niter = 10 - - adupdates(x, funcs, ops, stepsize, inner_stepsizes, niter) - adupdates_simple(x_simple, funcs, ops, stepsize, - inner_stepsizes, niter) - assert all_almost_equal(x, x_simple) - assert domain.dist(x, expected_solution) < 3e-2 +# def test_adupdates(): +# """Test if the adupdates solver handles the following problem correctly: + +# ( 1 1/2 1/3 1/4) (x_1) (a) +# (1/2 1/3 1/4 1/5) (x_2) (b) +# (1/3 1/4 1/5 1/6) (x_3) = (c) +# (1/4 1/5 1/6 1/7) (x_4) (d). + +# The matrix is the ill-conditined Hilbert matrix, the inverse of which +# can be given in closed form. If we set + +# (a) (25/12) (x_1) = (1) +# (b) (77/60) (x_2) = (1) +# (c) = (19/20) then (x_3) = (1) +# (d) (319/420), (x_4) = (1). + +# We solve the problem + +# min ||Ax - b||^2 + TV(x) s.t. x >= 0 + +# for the matrix A, the r.h.s. b as above and the total variation TV, which +# is given as the (non-cyclic) sum of the distances of consecutive entries +# of the solution. The solution of this problem is clearly x = (1, 1, 1, 1), +# since it satisfies the additional constraint and minimizes both terms of +# the objective function. +# """ + +# mat1 = [[1, 1 / 2, 1 / 3, 1 / 4], +# [1 / 2, 1 / 3, 1 / 4, 1 / 5]] +# mat2 = [[1 / 3, 1 / 4, 1 / 5, 1 / 6], +# [1 / 4, 1 / 5, 1 / 6, 1 / 7]] + +# # Create the linear operators +# mat1op = odl.MatrixOperator(mat1) +# mat2op = odl.MatrixOperator(mat2) +# domain = mat1op.domain +# tv1 = odl.MatrixOperator([[1.0, -1.0, 0.0, 0.0]]) +# tv2 = odl.MatrixOperator([[0.0, 0.0, 1.0, -1.0]]) +# tv3 = odl.MatrixOperator([[0.0, 1.0, -1.0, 0.0]]) +# nneg = odl.IdentityOperator(domain) +# ops = [mat1op, mat2op, odl.BroadcastOperator(tv1, tv2), tv3, nneg] + +# # Create inner stepsizes for linear operators +# mat1s = 1 / mat1op(mat1op.adjoint(mat1op.range.one())) +# mat2s = 1 / mat2op(mat2op.adjoint(mat2op.range.one())) +# tv1s = [0.5, 0.5] +# tv2s = 0.5 +# nnegs = nneg.range.element([1.0, 1.0, 1.0, 1.0]) +# inner_stepsizes = [mat1s, mat2s, tv1s, tv2s, nnegs] + +# expected_solution = domain.element([1, 1, 1, 1]) +# # Create right-hand-sides of the equation +# rhs1 = mat1op(expected_solution) +# rhs2 = mat2op(expected_solution) + +# # Create the functionals +# fid1 = odl.functional.L2NormSquared(mat1op.range).translated(rhs1) +# fid2 = odl.functional.L2NormSquared(mat2op.range).translated(rhs2) +# reg1 = odl.functional.L1Norm(tv1.range) +# reg2 = odl.functional.L1Norm(tv2.range) +# reg3 = odl.functional.L1Norm(tv3.range) +# ind = odl.functional.IndicatorNonnegativity(nneg.range) +# funcs = [fid1, fid2, odl.functional.SeparableSum(reg1, reg2), reg3, ind] + +# # Start from zero +# x = tv1.domain.zero() +# x_simple = tv1.domain.zero() + +# stepsize = 1.0 +# niter = 10 + +# adupdates(x, funcs, ops, stepsize, inner_stepsizes, niter) +# adupdates_simple(x_simple, funcs, ops, stepsize, +# inner_stepsizes, niter) +# assert all_almost_equal(x, x_simple) +# assert domain.dist(x, expected_solution) < 3e-2 if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/nonsmooth/difference_convex_test.py b/odl/test/solvers/nonsmooth/difference_convex_test.py index 1afa5ece7b7..8e38e03a44c 100644 --- a/odl/test/solvers/nonsmooth/difference_convex_test.py +++ b/odl/test/solvers/nonsmooth/difference_convex_test.py @@ -51,8 +51,8 @@ def test_dca(): b = 0.5 # This means -1/a = -2 < b = 0.5 < 1/a = 2. space = odl.rn(1) - f = a / 2 * odl.solvers.L2NormSquared(space).translated(b) - g = odl.solvers.L1Norm(space) + f = a / 2 * odl.functional.L2NormSquared(space).translated(b) + g = odl.functional.L1Norm(space) niter = 50 # Set up some space elements for the solvers to use @@ -63,7 +63,7 @@ def test_dca(): x_simpl = x.copy() # Some additional parameters for some of the solvers - phi = odl.solvers.ZeroFunctional(space) + phi = odl.functional.ZeroFunctional(space) y = space.element(3) y_simpl = y.copy() gamma = 1 diff --git a/odl/test/solvers/nonsmooth/douglas_rachford_test.py b/odl/test/solvers/nonsmooth/douglas_rachford_test.py index 58066921a4e..c074fe60114 100644 --- a/odl/test/solvers/nonsmooth/douglas_rachford_test.py +++ b/odl/test/solvers/nonsmooth/douglas_rachford_test.py @@ -13,7 +13,7 @@ import odl from odl.solvers import douglas_rachford_pd -from odl.util.testutils import all_almost_equal, noise_element +from odl.core.util.testutils import all_almost_equal, noise_element # Number of digits for the accepted error when comparing results @@ -27,9 +27,9 @@ def test_primal_dual_input_handling(): space1 = odl.uniform_discr(0, 1, 10) lin_ops = [odl.ZeroOperator(space1), odl.ZeroOperator(space1)] - g = [odl.solvers.ZeroFunctional(space1), - odl.solvers.ZeroFunctional(space1)] - f = odl.solvers.ZeroFunctional(space1) + g = [odl.functional.ZeroFunctional(space1), + odl.functional.ZeroFunctional(space1)] + f = odl.functional.ZeroFunctional(space1) # Check that the algorithm runs. With the above operators, the algorithm # returns the input. @@ -49,9 +49,9 @@ def test_primal_dual_input_handling(): sigma=[1.0], niter=niter) # Too many operators - g_too_many = [odl.solvers.ZeroFunctional(space1), - odl.solvers.ZeroFunctional(space1), - odl.solvers.ZeroFunctional(space1)] + g_too_many = [odl.functional.ZeroFunctional(space1), + odl.functional.ZeroFunctional(space1), + odl.functional.ZeroFunctional(space1)] with pytest.raises(ValueError): douglas_rachford_pd(x, f, g_too_many, lin_ops, tau=1.0, sigma=[1.0, 1.0], niter=niter) @@ -81,12 +81,12 @@ def test_primal_dual_l1(): L = [odl.IdentityOperator(space)] # Data - data_1 = odl.util.testutils.noise_element(space) - data_2 = odl.util.testutils.noise_element(space) + data_1 = odl.core.util.testutils.noise_element(space) + data_2 = odl.core.util.testutils.noise_element(space) # Proximals - f = odl.solvers.L1Norm(space).translated(data_1) - g = [0.5 * odl.solvers.L1Norm(space).translated(data_2)] + f = odl.functional.L1Norm(space).translated(data_1) + g = [0.5 * odl.functional.L1Norm(space).translated(data_2)] # Solve with f term dominating x = space.zero() @@ -112,10 +112,10 @@ def test_primal_dual_no_operator(): L = [] # Data - data_1 = odl.util.testutils.noise_element(space) + data_1 = odl.core.util.testutils.noise_element(space) # Proximals - f = odl.solvers.L1Norm(space).translated(data_1) + f = odl.functional.L1Norm(space).translated(data_1) g = [] # Solve with f term dominating @@ -142,9 +142,9 @@ def test_primal_dual_with_li(): space = odl.rn(1) lin_ops = [odl.IdentityOperator(space)] - g = [odl.solvers.IndicatorBox(space, lower=lower_lim, upper=upper_lim)] - f = odl.solvers.ZeroFunctional(space) - l = [odl.solvers.L2NormSquared(space)] + g = [odl.functional.IndicatorBox(space, lower=lower_lim, upper=upper_lim)] + f = odl.functional.ZeroFunctional(space) + l = [odl.functional.L2NormSquared(space)] # Centering around a point further away from [-3,-1]. x = space.element(10) @@ -156,4 +156,4 @@ def test_primal_dual_with_li(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/nonsmooth/forward_backward_test.py b/odl/test/solvers/nonsmooth/forward_backward_test.py index ab6c36cf245..9364e8db714 100644 --- a/odl/test/solvers/nonsmooth/forward_backward_test.py +++ b/odl/test/solvers/nonsmooth/forward_backward_test.py @@ -13,7 +13,7 @@ import odl from odl.solvers import forward_backward_pd -from odl.util.testutils import all_almost_equal, noise_element +from odl.core.util.testutils import all_almost_equal, noise_element # Places for the accepted error when comparing results HIGH_ACCURACY = 8 @@ -26,10 +26,10 @@ def test_forward_backward_input_handling(): space1 = odl.uniform_discr(0, 1, 10) lin_ops = [odl.ZeroOperator(space1), odl.ZeroOperator(space1)] - g = [odl.solvers.ZeroFunctional(space1), - odl.solvers.ZeroFunctional(space1)] - f = odl.solvers.ZeroFunctional(space1) - h = odl.solvers.ZeroFunctional(space1) + g = [odl.functional.ZeroFunctional(space1), + odl.functional.ZeroFunctional(space1)] + f = odl.functional.ZeroFunctional(space1) + h = odl.functional.ZeroFunctional(space1) # Check that the algorithm runs. With the above operators, the algorithm # returns the input. @@ -49,9 +49,9 @@ def test_forward_backward_input_handling(): sigma=[1.0], niter=niter) # Too many operators - g_too_many = [odl.solvers.ZeroFunctional(space1), - odl.solvers.ZeroFunctional(space1), - odl.solvers.ZeroFunctional(space1)] + g_too_many = [odl.functional.ZeroFunctional(space1), + odl.functional.ZeroFunctional(space1), + odl.functional.ZeroFunctional(space1)] with pytest.raises(ValueError): forward_backward_pd(x, f, g_too_many, lin_ops, h, tau=1.0, sigma=[1.0, 1.0], niter=niter) @@ -78,9 +78,9 @@ def test_forward_backward_basic(): space = odl.rn(10) lin_ops = [odl.ZeroOperator(space)] - g = [odl.solvers.ZeroFunctional(space)] - f = odl.solvers.ZeroFunctional(space) - h = odl.solvers.L2NormSquared(space) + g = [odl.functional.ZeroFunctional(space)] + f = odl.functional.ZeroFunctional(space) + h = odl.functional.L2NormSquared(space) x = noise_element(space) x_global_min = space.zero() @@ -108,11 +108,11 @@ def test_forward_backward_with_lin_ops(): b = noise_element(space) lin_ops = [alpha * odl.IdentityOperator(space)] - g = [odl.solvers.L2NormSquared(space)] - f = odl.solvers.ZeroFunctional(space) + g = [odl.functional.L2NormSquared(space)] + f = odl.functional.ZeroFunctional(space) # Gradient of two-norm square - h = odl.solvers.L2NormSquared(space).translated(b) + h = odl.functional.L2NormSquared(space).translated(b) x = noise_element(space) @@ -143,10 +143,10 @@ def test_forward_backward_with_li(): lin_op = odl.IdentityOperator(space) lin_ops = [lin_op] - g = [odl.solvers.IndicatorBox(space, lower=lower_lim, upper=upper_lim)] - f = odl.solvers.ZeroFunctional(space) - h = odl.solvers.ZeroFunctional(space) - l = [0.5 * odl.solvers.L2NormSquared(space)] + g = [odl.functional.IndicatorBox(space, lower=lower_lim, upper=upper_lim)] + f = odl.functional.ZeroFunctional(space) + h = odl.functional.ZeroFunctional(space) + l = [0.5 * odl.functional.L2NormSquared(space)] # Creating an element not to far away from [-3,-1], in order to converge in # a few number of iterations. @@ -177,10 +177,10 @@ def test_forward_backward_with_li_and_h(): space = odl.rn(1) lin_ops = [odl.IdentityOperator(space)] - g = [odl.solvers.IndicatorBox(space, lower=lower_lim, upper=upper_lim)] - f = odl.solvers.ZeroFunctional(space) - h = 0.5 * odl.solvers.L2NormSquared(space) - l = [0.5 * odl.solvers.L2NormSquared(space)] + g = [odl.functional.IndicatorBox(space, lower=lower_lim, upper=upper_lim)] + f = odl.functional.ZeroFunctional(space) + h = 0.5 * odl.functional.L2NormSquared(space) + l = [0.5 * odl.functional.L2NormSquared(space)] # Creating an element not to far away from -0.5, in order to converge in # a few number of iterations. @@ -194,4 +194,4 @@ def test_forward_backward_with_li_and_h(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/nonsmooth/primal_dual_hybrid_gradient_test.py b/odl/test/solvers/nonsmooth/primal_dual_hybrid_gradient_test.py index c2cbeb1cc8d..c7e4e57b5cc 100644 --- a/odl/test/solvers/nonsmooth/primal_dual_hybrid_gradient_test.py +++ b/odl/test/solvers/nonsmooth/primal_dual_hybrid_gradient_test.py @@ -13,7 +13,7 @@ import odl from odl.solvers import pdhg -from odl.util.testutils import all_almost_equal +from odl.core.util.testutils import all_almost_equal # Places for the accepted error when comparing results PLACES = 8 @@ -46,7 +46,7 @@ def test_pdhg_simple_space(): discr_dual = op.range.zero() # Functional, use the same functional for F^* and G - f = odl.solvers.ZeroFunctional(space) + f = odl.functional.ZeroFunctional(space) g = f.convex_conj # Run the algorithm @@ -122,8 +122,8 @@ def test_pdhg_product_space(): discr_vec = discr_vec_0.copy() # Proximal operator using the same factory function for F^* and G - f = odl.solvers.ZeroFunctional(prod_op.domain) - g = odl.solvers.ZeroFunctional(prod_op.range).convex_conj + f = odl.functional.ZeroFunctional(prod_op.domain) + g = odl.functional.ZeroFunctional(prod_op.range).convex_conj # Run the algorithm pdhg(discr_vec, f, g, prod_op, niter=1, tau=TAU, sigma=SIGMA, theta=THETA) @@ -134,4 +134,4 @@ def test_pdhg_product_space(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/nonsmooth/proximal_operator_test.py b/odl/test/solvers/nonsmooth/proximal_operator_test.py index e0f85b54e25..4e8892b5a48 100644 --- a/odl/test/solvers/nonsmooth/proximal_operator_test.py +++ b/odl/test/solvers/nonsmooth/proximal_operator_test.py @@ -20,8 +20,8 @@ proximal_l2, proximal_convex_conj_l2_squared, proximal_convex_conj_kl, proximal_convex_conj_kl_cross_entropy) -from odl.util.testutils import all_almost_equal - +from odl.core.util.testutils import all_almost_equal +from odl.core.util.scipy_compatibility import lambertw # Places for the accepted error when comparing results HIGH_ACC = 8 @@ -73,7 +73,7 @@ def test_proximal_box_constraint(): # Create reference lower_np = -np.inf if lower is None else lower upper_np = np.inf if upper is None else upper - result_np = np.minimum(np.maximum(x, lower_np), upper_np).asarray() + result_np = odl.minimum(odl.maximum(x, lower_np), upper_np).asarray() # Verify equal result assert all_almost_equal(result_np, result) @@ -386,7 +386,9 @@ def test_proximal_convconj_l1_product_space(): denom = np.maximum(lam, np.sqrt((x0_arr - sigma * g0_arr) ** 2 + (x1_arr - sigma * g1_arr) ** 2)) - x_verify = lam * (x - sigma * g) / denom + print(f'{denom=}') + print(len([denom])) + x_verify = lam * (x - sigma * g) / op_domain.element([denom]) # Compare components assert all_almost_equal(x_verify, x_opt) @@ -421,7 +423,7 @@ def test_proximal_convconj_kl_simple_space(): prox(x, x_opt) # Explicit computation: - x_verify = (lam + x - np.sqrt((x - lam) ** 2 + 4 * lam * sigma * g)) / 2 + x_verify = (lam + x - odl.sqrt((x - lam) ** 2 + 4 * lam * sigma * g)) / 2 assert all_almost_equal(x_opt, x_verify, HIGH_ACC) @@ -459,7 +461,7 @@ def test_proximal_convconj_kl_product_space(): prox(x, x_opt) # Explicit computation: - x_verify = (lam + x - np.sqrt((x - lam) ** 2 + 4 * lam * sigma * g)) / 2 + x_verify = (lam + x - odl.sqrt((x - lam) ** 2 + 4 * lam * sigma * g)) / 2 # Compare components assert all_almost_equal(x_verify, x_opt) @@ -490,8 +492,8 @@ def test_proximal_convconj_kl_cross_entropy(): prox_val = prox(x) # Explicit computation: - x_verify = x - lam * scipy.special.lambertw( - sigma / lam * g * np.exp(x / lam)).real + x_verify = x - lam * lambertw( + sigma / lam * g * odl.exp(x / lam)).real assert all_almost_equal(prox_val, x_verify, HIGH_ACC) @@ -509,19 +511,19 @@ def test_proximal_arg_scaling(): space = odl.uniform_discr(0, 1, 10) # Set the functional and the prox factory. - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) prox_factory = odl.solvers.proximal_l2_squared(space) # Set the point where the proximal operator will be evaluated. x = space.one() # Set the scaling parameters. - for alpha in [2, odl.phantom.noise.uniform_noise(space, 1, 10)]: + for alpha in [2, odl.core.phantom.noise.uniform_noise(space, 1, 10)]: # Scale the proximal factories prox_scaled = odl.solvers.proximal_arg_scaling(prox_factory, alpha) # Set the step size. - for sigma in [2, odl.phantom.noise.uniform_noise(space, 1, 10)]: + for sigma in [2, odl.core.phantom.noise.uniform_noise(space, 1, 10)]: # Evaluation of the proximals p = prox_scaled(sigma)(x) @@ -533,4 +535,4 @@ def test_proximal_arg_scaling(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/nonsmooth/proximal_utils_test.py b/odl/test/solvers/nonsmooth/proximal_utils_test.py index 57bd644a348..65921e12758 100644 --- a/odl/test/solvers/nonsmooth/proximal_utils_test.py +++ b/odl/test/solvers/nonsmooth/proximal_utils_test.py @@ -17,7 +17,7 @@ proximal_arg_scaling, proximal_composition, proximal_quadratic_perturbation, proximal_translation, proximal_l2_squared) -from odl.util.testutils import all_almost_equal, noise_element, simple_fixture +from odl.core.util.testutils import all_almost_equal, noise_element, simple_fixture # Number of digits for the accepted error when comparing results NDIGITS = 8 @@ -46,13 +46,17 @@ def test_proximal_arg_scaling(scalar, sigma): prox_factory = proximal_l2_squared(space, lam=lam) scaling_param = scalar - prox = proximal_arg_scaling(prox_factory, scaling_param)(sigma) + if isinstance(scaling_param, np.ndarray): + with pytest.raises(AssertionError): + prox = proximal_arg_scaling(prox_factory, scaling_param)(sigma) + else: + prox = proximal_arg_scaling(prox_factory, scaling_param)(sigma) - x = noise_element(space) - # works for scaling_param == 0, too - expected_result = x / (2 * sigma * lam * scaling_param ** 2 + 1) + x = noise_element(space) + # works for scaling_param == 0, too + expected_result = x / (2 * sigma * lam * scaling_param ** 2 + 1) - assert all_almost_equal(prox(x), expected_result, ndigits=NDIGITS) + assert all_almost_equal(prox(x), expected_result, ndigits=NDIGITS) def test_proximal_translation(sigma): @@ -123,10 +127,14 @@ def test_proximal_composition(pos_scalar, sigma): x = space.element(np.arange(-5, 5)) prox_x = prox(x) - equiv_prox = proximal_arg_scaling(prox_factory, scal)(sigma) - expected_result = equiv_prox(x) - assert all_almost_equal(prox_x, expected_result, ndigits=NDIGITS) + if isinstance(scal, np.ndarray): + with pytest.raises(AssertionError): + equiv_prox = proximal_arg_scaling(prox_factory, scal)(sigma) + else: + equiv_prox = proximal_arg_scaling(prox_factory, scal)(sigma) + expected_result = equiv_prox(x) + assert all_almost_equal(prox_x, expected_result, ndigits=NDIGITS) if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/smooth/smooth_test.py b/odl/test/solvers/smooth/smooth_test.py index 3bea3e366ac..c4b9bd8e6c3 100644 --- a/odl/test/solvers/smooth/smooth_test.py +++ b/odl/test/solvers/smooth/smooth_test.py @@ -11,10 +11,10 @@ from __future__ import division import pytest import odl -from odl.operator import OpNotImplementedError +from odl.core.operator import OpNotImplementedError -nonlinear_cg_beta = odl.util.testutils.simple_fixture('nonlinear_cg_beta', +nonlinear_cg_beta = odl.core.util.testutils.simple_fixture('nonlinear_cg_beta', ['FR', 'PR', 'HS', 'DY']) @@ -26,12 +26,12 @@ def functional(request): if name == 'l2_squared': space = odl.rn(3) - return odl.solvers.L2NormSquared(space) + return odl.functional.L2NormSquared(space) elif name == 'l2_squared_scaled': space = odl.uniform_discr(0, 1, 3) scaling = odl.MultiplyOperator(space.element([1, 2, 3]), domain=space) - return odl.solvers.L2NormSquared(space) * scaling + return odl.functional.L2NormSquared(space) * scaling elif name == 'quadratic_form': space = odl.rn(3) # Symmetric and diagonally dominant matrix @@ -43,11 +43,11 @@ def functional(request): # Calibrate so that functional is zero in optimal point constant = 1 / 4 * vector.inner(matrix.inverse(vector)) - return odl.solvers.QuadraticForm( + return odl.functional.QuadraticForm( operator=matrix, vector=vector, constant=constant) elif name == 'rosenbrock': # Moderately ill-behaved rosenbrock functional. - rosenbrock = odl.solvers.RosenbrockFunctional(odl.rn(2), scale=2) + rosenbrock = odl.functional.RosenbrockFunctional(odl.rn(2), scale=2) # Center at zero return rosenbrock.translated([-1, -1]) @@ -156,4 +156,4 @@ def test_conjguate_gradient_nonlinear(functional, nonlinear_cg_beta): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/solvers/util/steplen_test.py b/odl/test/solvers/util/steplen_test.py index 9e1987b4953..bbc6fbf442b 100644 --- a/odl/test/solvers/util/steplen_test.py +++ b/odl/test/solvers/util/steplen_test.py @@ -16,7 +16,7 @@ def test_backtracking_line_search(): """Test some basic properties of BacktrackingLineSearch.""" space = odl.rn(2) - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) line_search = odl.solvers.BacktrackingLineSearch(func) @@ -34,7 +34,7 @@ def test_constant_line_search(): """Test some basic properties of BacktrackingLineSearch.""" space = odl.rn(2) - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) line_search = odl.solvers.ConstantLineSearch(0.57) @@ -52,7 +52,7 @@ def test_line_search_from_iternum(): """Test some basic properties of LineSearchFromIterNum.""" space = odl.rn(2) - func = odl.solvers.L2NormSquared(space) + func = odl.functional.L2NormSquared(space) line_search = odl.solvers.LineSearchFromIterNum(lambda n: 1 / (n + 1)) @@ -67,4 +67,4 @@ def test_line_search_from_iternum(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/space/space_utils_test.py b/odl/test/space/space_utils_test.py deleted file mode 100644 index 91c0fa67f87..00000000000 --- a/odl/test/space/space_utils_test.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2014-2019 The ODL contributors -# -# This file is part of ODL. -# -# This Source Code Form is subject to the terms of the Mozilla Public License, -# v. 2.0. If a copy of the MPL was not distributed with this file, You can -# obtain one at https://mozilla.org/MPL/2.0/. - -from __future__ import division -import numpy as np - -import odl -from odl import vector -from odl.space.npy_tensors import NumpyTensor -from odl.util.testutils import all_equal - - -def test_vector_numpy(): - - # Rn - inp = [[1.0, 2.0, 3.0], - [4.0, 5.0, 6.0]] - - x = vector(inp) - assert isinstance(x, NumpyTensor) - assert x.dtype == np.dtype('float64') - assert all_equal(x, inp) - - x = vector([1.0, 2.0, float('inf')]) - assert x.dtype == np.dtype('float64') - assert isinstance(x, NumpyTensor) - - x = vector([1.0, 2.0, float('nan')]) - assert x.dtype == np.dtype('float64') - assert isinstance(x, NumpyTensor) - - x = vector([1, 2, 3], dtype='float32') - assert x.dtype == np.dtype('float32') - assert isinstance(x, NumpyTensor) - - # Cn - inp = [[1 + 1j, 2, 3 - 2j], - [4 + 1j, 5, 6 - 1j]] - - x = vector(inp) - assert isinstance(x, NumpyTensor) - assert x.dtype == np.dtype('complex128') - assert all_equal(x, inp) - - x = vector([1, 2, 3], dtype='complex64') - assert isinstance(x, NumpyTensor) - - # Generic TensorSpace - inp = [1, 2, 3] - x = vector(inp) - assert isinstance(x, NumpyTensor) - assert x.dtype == np.dtype('int') - assert all_equal(x, inp) - - inp = ['a', 'b', 'c'] - x = vector(inp) - assert isinstance(x, NumpyTensor) - assert np.issubdtype(x.dtype, np.str_) - assert all_equal(x, inp) - - x = vector([1, 2, 'inf']) # Becomes string type - assert isinstance(x, NumpyTensor) - assert np.issubdtype(x.dtype, np.str_) - assert all_equal(x, ['1', '2', 'inf']) - - # Scalar or empty input - x = vector(5.0) # becomes 1d, size 1 - assert x.shape == (1,) - - x = vector([]) # becomes 1d, size 0 - assert x.shape == (0,) - - -if __name__ == '__main__': - odl.util.test_file(__file__) diff --git a/odl/test/space/tensors_test.py b/odl/test/space/tensors_test.py deleted file mode 100644 index 4ac65a5a56f..00000000000 --- a/odl/test/space/tensors_test.py +++ /dev/null @@ -1,1787 +0,0 @@ -# Copyright 2014-2020 The ODL contributors -# -# This file is part of ODL. -# -# This Source Code Form is subject to the terms of the Mozilla Public License, -# v. 2.0. If a copy of the MPL was not distributed with this file, You can -# obtain one at https://mozilla.org/MPL/2.0/. - -"""Unit tests for Numpy-based tensors.""" - -from __future__ import division - -import operator -import sys - -import numpy as np -import pytest - -import odl -from odl.set.space import LinearSpaceTypeError -from odl.space.npy_tensors import ( - NumpyTensor, NumpyTensorSpace, NumpyTensorSpaceArrayWeighting, - NumpyTensorSpaceConstWeighting, NumpyTensorSpaceCustomDist, - NumpyTensorSpaceCustomInner, NumpyTensorSpaceCustomNorm) -from odl.util.utility import real_dtype -from odl.util.testutils import ( - all_almost_equal, all_equal, noise_array, noise_element, noise_elements, - simple_fixture) -from odl.util.ufuncs import UFUNCS - -# --- Test helpers --- # - -PYTHON2 = sys.version_info.major < 3 - - -# Functions to return arrays and classes corresponding to impls. Extend -# when a new impl is available. - - -def _pos_array(space): - """Create an array with positive real entries in ``space``.""" - return np.abs(noise_array(space)) + 0.1 - - -def _array_cls(impl): - """Return the array class for given impl.""" - if impl == 'numpy': - return np.ndarray - else: - assert False - - -def _odl_tensor_cls(impl): - """Return the ODL tensor class for given impl.""" - if impl == 'numpy': - return NumpyTensor - else: - assert False - - -def _weighting_cls(impl, kind): - """Return the weighting class for given impl and kind.""" - if impl == 'numpy': - if kind == 'array': - return NumpyTensorSpaceArrayWeighting - elif kind == 'const': - return NumpyTensorSpaceConstWeighting - elif kind == 'inner': - return NumpyTensorSpaceCustomInner - elif kind == 'norm': - return NumpyTensorSpaceCustomNorm - elif kind == 'dist': - return NumpyTensorSpaceCustomDist - else: - assert False - else: - assert False - - -# --- Pytest fixtures --- # - -exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 0.5, 1.5]) - -setitem_indices_params = [ - 0, [1], (1,), (0, 1), (0, 1, 2), slice(None), slice(None, None, 2), - (0, slice(None)), (slice(None), 0, slice(None, None, 2))] -setitem_indices = simple_fixture('indices', setitem_indices_params) - -getitem_indices_params = (setitem_indices_params + - [([0, 1, 1, 0], [0, 1, 1, 2]), (Ellipsis, None)]) -getitem_indices = simple_fixture('indices', getitem_indices_params) - -weight_params = [1.0, 0.5, _pos_array(odl.tensor_space((3, 4)))] -weight_ids = [' weight=1.0 ', ' weight=0.5 ', ' weight= '] - - -@pytest.fixture(scope='module', params=weight_params, ids=weight_ids) -def weight(request): - return request.param - - -@pytest.fixture(scope='module') -def tspace(odl_floating_dtype, odl_tspace_impl): - impl = odl_tspace_impl - dtype = odl_floating_dtype - return odl.tensor_space(shape=(3, 4), dtype=dtype, impl=impl) - - -# --- Tests --- # - - -def test_init_npy_tspace(): - """Test initialization patterns and options for ``NumpyTensorSpace``.""" - # Basic class constructor - NumpyTensorSpace((3, 4)) - NumpyTensorSpace((3, 4), dtype=int) - NumpyTensorSpace((3, 4), dtype=float) - NumpyTensorSpace((3, 4), dtype=complex) - NumpyTensorSpace((3, 4), dtype=complex, exponent=1.0) - NumpyTensorSpace((3, 4), dtype=complex, exponent=float('inf')) - NumpyTensorSpace((3, 4), dtype='S1') - - # Alternative constructor - odl.tensor_space((3, 4)) - odl.tensor_space((3, 4), dtype=int) - odl.tensor_space((3, 4), exponent=1.0) - - # Constructors for real spaces - odl.rn((3, 4)) - odl.rn((3, 4), dtype='float32') - odl.rn(3) - odl.rn(3, dtype='float32') - - # Works only for real data types - with pytest.raises(ValueError): - odl.rn((3, 4), complex) - with pytest.raises(ValueError): - odl.rn(3, int) - with pytest.raises(ValueError): - odl.rn(3, 'S1') - - # Constructors for complex spaces - odl.cn((3, 4)) - odl.cn((3, 4), dtype='complex64') - odl.cn(3) - odl.cn(3, dtype='complex64') - - # Works only for complex data types - with pytest.raises(ValueError): - odl.cn((3, 4), float) - with pytest.raises(ValueError): - odl.cn(3, 'S1') - - # Init with weights or custom space functions - weight_const = 1.5 - weight_arr = _pos_array(odl.rn((3, 4), float)) - - odl.rn((3, 4), weighting=weight_const) - odl.rn((3, 4), weighting=weight_arr) - - -def test_init_tspace_weighting(weight, exponent, odl_tspace_impl): - """Test if weightings during init give the correct weighting classes.""" - impl = odl_tspace_impl - space = odl.tensor_space((3, 4), weighting=weight, exponent=exponent, - impl=impl) - - if impl == 'numpy': - if isinstance(weight, np.ndarray): - weighting_cls = _weighting_cls(impl, 'array') - else: - weighting_cls = _weighting_cls(impl, 'const') - else: - assert False - - weighting = weighting_cls(weight, exponent) - - assert space.weighting == weighting - - # Using a weighting instance - space = odl.tensor_space((3, 4), weighting=weighting, exponent=exponent, - impl=impl) - assert space.weighting is weighting - - # Errors for bad input - with pytest.raises(ValueError): - badly_sized = np.ones((2, 4)) - odl.tensor_space((3, 4), weighting=badly_sized, impl=impl) - - if impl == 'numpy': - with pytest.raises(ValueError): - bad_dtype = np.ones((3, 4), dtype=complex) - odl.tensor_space((3, 4), weighting=bad_dtype) - - with pytest.raises(TypeError): - odl.tensor_space((3, 4), weighting=1j) # float() conversion - - -def test_properties(odl_tspace_impl): - """Test that the space and element properties are as expected.""" - impl = odl_tspace_impl - space = odl.tensor_space((3, 4), dtype='float32', exponent=1, weighting=2, - impl=impl) - x = space.element() - assert x.space is space - assert x.ndim == space.ndim == 2 - assert x.dtype == space.dtype == np.dtype('float32') - assert x.size == space.size == 12 - assert x.shape == space.shape == (3, 4) - assert x.itemsize == 4 - assert x.nbytes == 4 * 3 * 4 - - -def test_size(odl_tspace_impl): - """Test that size handles corner cases appropriately.""" - impl = odl_tspace_impl - space = odl.tensor_space((3, 4), impl=impl) - assert space.size == 12 - assert type(space.size) == int - - # Size 0 - space = odl.tensor_space((), impl=impl) - assert space.size == 0 - assert type(space.size) == int - - # Overflow test - large_space = odl.tensor_space((10000,) * 3, impl=impl) - assert large_space.size == 10000 ** 3 - assert type(space.size) == int - - -def test_element(tspace, odl_elem_order): - """Test creation of space elements.""" - order = odl_elem_order - # From scratch - elem = tspace.element(order=order) - assert elem.shape == elem.data.shape - assert elem.dtype == tspace.dtype == elem.data.dtype - if order is not None: - assert elem.data.flags[order + '_CONTIGUOUS'] - - # From space elements - other_elem = tspace.element(np.ones(tspace.shape)) - elem = tspace.element(other_elem, order=order) - assert all_equal(elem, other_elem) - if order is None: - assert elem is other_elem - else: - assert elem.data.flags[order + '_CONTIGUOUS'] - - # From Numpy array (C order) - arr_c = np.random.rand(*tspace.shape).astype(tspace.dtype) - elem = tspace.element(arr_c, order=order) - assert all_equal(elem, arr_c) - assert elem.shape == elem.data.shape - assert elem.dtype == tspace.dtype == elem.data.dtype - if order is None or order == 'C': - # None or same order should not lead to copy - assert np.may_share_memory(elem.data, arr_c) - if order is not None: - # Contiguousness in explicitly provided order should be guaranteed - assert elem.data.flags[order + '_CONTIGUOUS'] - - # From Numpy array (F order) - arr_f = np.asfortranarray(arr_c) - elem = tspace.element(arr_f, order=order) - assert all_equal(elem, arr_f) - assert elem.shape == elem.data.shape - assert elem.dtype == tspace.dtype == elem.data.dtype - if order is None or order == 'F': - # None or same order should not lead to copy - assert np.may_share_memory(elem.data, arr_f) - if order is not None: - # Contiguousness in explicitly provided order should be guaranteed - assert elem.data.flags[order + '_CONTIGUOUS'] - - # From pointer - arr_c_ptr = arr_c.ctypes.data - elem = tspace.element(data_ptr=arr_c_ptr, order='C') - assert all_equal(elem, arr_c) - assert np.may_share_memory(elem.data, arr_c) - arr_f_ptr = arr_f.ctypes.data - elem = tspace.element(data_ptr=arr_f_ptr, order='F') - assert all_equal(elem, arr_f) - assert np.may_share_memory(elem.data, arr_f) - - # Check errors - with pytest.raises(ValueError): - tspace.element(order='A') # only 'C' or 'F' valid - - with pytest.raises(ValueError): - tspace.element(data_ptr=arr_c_ptr) # need order argument - - with pytest.raises(TypeError): - tspace.element(arr_c, arr_c_ptr) # forbidden to give both - - -def test_equals_space(odl_tspace_impl): - """Test equality check of spaces.""" - impl = odl_tspace_impl - space = odl.tensor_space(3, impl=impl) - same_space = odl.tensor_space(3, impl=impl) - other_space = odl.tensor_space(4, impl=impl) - - assert space == space - assert space == same_space - assert space != other_space - assert hash(space) == hash(same_space) - assert hash(space) != hash(other_space) - - -def test_equals_elem(odl_tspace_impl): - """Test equality check of space elements.""" - impl = odl_tspace_impl - r3 = odl.rn(3, exponent=2, impl=impl) - r3_1 = odl.rn(3, exponent=1, impl=impl) - r4 = odl.rn(4, exponent=2, impl=impl) - r3_elem = r3.element([1, 2, 3]) - r3_same_elem = r3.element([1, 2, 3]) - r3_other_elem = r3.element([2, 2, 3]) - r3_1_elem = r3_1.element([1, 2, 3]) - r4_elem = r4.element([1, 2, 3, 4]) - - assert r3_elem == r3_elem - assert r3_elem == r3_same_elem - assert r3_elem != r3_other_elem - assert r3_elem != r3_1_elem - assert r3_elem != r4_elem - - -def test_tspace_astype(odl_tspace_impl): - """Test creation of a space counterpart with new dtype.""" - impl = odl_tspace_impl - real_space = odl.rn((3, 4), impl=impl) - int_space = odl.tensor_space((3, 4), dtype=int, impl=impl) - assert real_space.astype(int) == int_space - - # Test propagation of weightings and the `[real/complex]_space` properties - real = odl.rn((3, 4), weighting=1.5, impl=impl) - cplx = odl.cn((3, 4), weighting=1.5, impl=impl) - real_s = odl.rn((3, 4), weighting=1.5, dtype='float32', impl=impl) - cplx_s = odl.cn((3, 4), weighting=1.5, dtype='complex64', impl=impl) - - # Real - assert real.astype('float32') == real_s - assert real.astype('float64') is real - assert real.real_space is real - assert real.astype('complex64') == cplx_s - assert real.astype('complex128') == cplx - assert real.complex_space == cplx - - # Complex - assert cplx.astype('complex64') == cplx_s - assert cplx.astype('complex128') is cplx - assert cplx.real_space == real - assert cplx.astype('float32') == real_s - assert cplx.astype('float64') == real - assert cplx.complex_space is cplx - - -def _test_lincomb(space, a, b, discontig): - """Validate lincomb against direct result using arrays.""" - # Set slice for discontiguous arrays and get result space of slicing - if discontig: - slc = tuple( - [slice(None)] * (space.ndim - 1) + [slice(None, None, 2)] - ) - res_space = space.element()[slc].space - else: - res_space = space - - # Unaliased arguments - [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) - if discontig: - x, y, z = x[slc], y[slc], z[slc] - xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] - - zarr[:] = a * xarr + b * yarr - res_space.lincomb(a, x, b, y, out=z) - assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) - - # First argument aliased with output - [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) - if discontig: - x, y, z = x[slc], y[slc], z[slc] - xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] - - zarr[:] = a * zarr + b * yarr - res_space.lincomb(a, z, b, y, out=z) - assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) - - # Second argument aliased with output - [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) - if discontig: - x, y, z = x[slc], y[slc], z[slc] - xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] - - zarr[:] = a * xarr + b * zarr - res_space.lincomb(a, x, b, z, out=z) - assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) - - # Both arguments aliased with each other - [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) - if discontig: - x, y, z = x[slc], y[slc], z[slc] - xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] - - zarr[:] = a * xarr + b * xarr - res_space.lincomb(a, x, b, x, out=z) - assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) - - # All aliased - [xarr, yarr, zarr], [x, y, z] = noise_elements(space, 3) - if discontig: - x, y, z = x[slc], y[slc], z[slc] - xarr, yarr, zarr = xarr[slc], yarr[slc], zarr[slc] - - zarr[:] = a * zarr + b * zarr - res_space.lincomb(a, z, b, z, out=z) - assert all_almost_equal([x, y, z], [xarr, yarr, zarr]) - - -def test_lincomb(tspace): - """Validate lincomb against direct result using arrays and some scalars.""" - scalar_values = [0, 1, -1, 3.41] - for a in scalar_values: - for b in scalar_values: - _test_lincomb(tspace, a, b, discontig=False) - - -def test_lincomb_discontig(odl_tspace_impl): - """Test lincomb with discontiguous input.""" - impl = odl_tspace_impl - - scalar_values = [0, 1, -1, 3.41] - - # Use small size for small array case - tspace = odl.rn((3, 4), impl=impl) - - for a in scalar_values: - for b in scalar_values: - _test_lincomb(tspace, a, b, discontig=True) - - # Use medium size to test fallback impls - tspace = odl.rn((30, 40), impl=impl) - - for a in scalar_values: - for b in scalar_values: - _test_lincomb(tspace, a, b, discontig=True) - - -def test_lincomb_exceptions(tspace): - """Test whether lincomb raises correctly for bad output element.""" - other_space = odl.rn((4, 3), impl=tspace.impl) - - other_x = other_space.zero() - x, y, z = tspace.zero(), tspace.zero(), tspace.zero() - - with pytest.raises(LinearSpaceTypeError): - tspace.lincomb(1, other_x, 1, y, z) - - with pytest.raises(LinearSpaceTypeError): - tspace.lincomb(1, y, 1, other_x, z) - - with pytest.raises(LinearSpaceTypeError): - tspace.lincomb(1, y, 1, z, other_x) - - with pytest.raises(LinearSpaceTypeError): - tspace.lincomb([], x, 1, y, z) - - with pytest.raises(LinearSpaceTypeError): - tspace.lincomb(1, x, [], y, z) - - -def test_multiply(tspace): - """Test multiply against direct array multiplication.""" - # space method - [x_arr, y_arr, out_arr], [x, y, out] = noise_elements(tspace, 3) - out_arr = x_arr * y_arr - - tspace.multiply(x, y, out) - assert all_almost_equal([x_arr, y_arr, out_arr], [x, y, out]) - - # member method - [x_arr, y_arr, out_arr], [x, y, out] = noise_elements(tspace, 3) - out_arr = x_arr * y_arr - - x.multiply(y, out=out) - assert all_almost_equal([x_arr, y_arr, out_arr], [x, y, out]) - - -def test_multiply_exceptions(tspace): - """Test if multiply raises correctly for bad input.""" - other_space = odl.rn((4, 3)) - - other_x = other_space.zero() - x, y = tspace.zero(), tspace.zero() - - with pytest.raises(LinearSpaceTypeError): - tspace.multiply(other_x, x, y) - - with pytest.raises(LinearSpaceTypeError): - tspace.multiply(x, other_x, y) - - with pytest.raises(LinearSpaceTypeError): - tspace.multiply(x, y, other_x) - - -def test_power(tspace): - """Test ``**`` against direct array exponentiation.""" - [x_arr, y_arr], [x, y] = noise_elements(tspace, n=2) - y_pos = tspace.element(np.abs(y) + 0.1) - y_pos_arr = np.abs(y_arr) + 0.1 - - # Testing standard positive integer power out-of-place and in-place - assert all_almost_equal(x ** 2, x_arr ** 2) - y **= 2 - y_arr **= 2 - assert all_almost_equal(y, y_arr) - - # Real number and negative integer power - assert all_almost_equal(y_pos ** 1.3, y_pos_arr ** 1.3) - assert all_almost_equal(y_pos ** (-3), y_pos_arr ** (-3)) - y_pos **= 2.5 - y_pos_arr **= 2.5 - assert all_almost_equal(y_pos, y_pos_arr) - - # Array raised to the power of another array, entry-wise - assert all_almost_equal(y_pos ** x, y_pos_arr ** x_arr) - y_pos **= x.real - y_pos_arr **= x_arr.real - assert all_almost_equal(y_pos, y_pos_arr) - - -def test_unary_ops(tspace): - """Verify that the unary operators (`+x` and `-x`) work as expected.""" - for op in [operator.pos, operator.neg]: - x_arr, x = noise_elements(tspace) - - y_arr = op(x_arr) - y = op(x) - - assert all_almost_equal([x, y], [x_arr, y_arr]) - - -def test_scalar_operator(tspace, odl_arithmetic_op): - """Verify binary operations with scalars. - - Verifies that the statement y = op(x, scalar) gives equivalent results - to NumPy. - """ - op = odl_arithmetic_op - if op in (operator.truediv, operator.itruediv): - ndigits = int(-np.log10(np.finfo(tspace.dtype).resolution) // 2) - else: - ndigits = int(-np.log10(np.finfo(tspace.dtype).resolution)) - - for scalar in [-31.2, -1, 0, 1, 2.13]: - x_arr, x = noise_elements(tspace) - - # Left op - if scalar == 0 and op in [operator.truediv, operator.itruediv]: - # Check for correct zero division behaviour - with pytest.raises(ZeroDivisionError): - y = op(x, scalar) - else: - y_arr = op(x_arr, scalar) - y = op(x, scalar) - - assert all_almost_equal([x, y], [x_arr, y_arr], ndigits) - - # right op - x_arr, x = noise_elements(tspace) - - y_arr = op(scalar, x_arr) - y = op(scalar, x) - - assert all_almost_equal([x, y], [x_arr, y_arr], ndigits) - - -def test_binary_operator(tspace, odl_arithmetic_op): - """Verify binary operations with tensors. - - Verifies that the statement z = op(x, y) gives equivalent results - to NumPy. - """ - op = odl_arithmetic_op - if op in (operator.truediv, operator.itruediv): - ndigits = int(-np.log10(np.finfo(tspace.dtype).resolution) // 2) - else: - ndigits = int(-np.log10(np.finfo(tspace.dtype).resolution)) - - [x_arr, y_arr], [x, y] = noise_elements(tspace, 2) - - # non-aliased left - z_arr = op(x_arr, y_arr) - z = op(x, y) - - assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr], ndigits) - - # non-aliased right - z_arr = op(y_arr, x_arr) - z = op(y, x) - - assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr], ndigits) - - # aliased operation - z_arr = op(x_arr, x_arr) - z = op(x, x) - - assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr], ndigits) - - -def test_assign(tspace): - """Test the assign method using ``==`` comparison.""" - x = noise_element(tspace) - x_old = x - y = noise_element(tspace) - - y.assign(x) - - assert y == x - assert y is not x - assert x is x_old - - # test alignment - x *= 2 - assert y != x - - -def test_inner(tspace): - """Test the inner method against numpy.vdot.""" - xd = noise_element(tspace) - yd = noise_element(tspace) - - # TODO: add weighting - correct_inner = np.vdot(yd, xd) - assert tspace.inner(xd, yd) == pytest.approx(correct_inner) - assert xd.inner(yd) == pytest.approx(correct_inner) - - -def test_inner_exceptions(tspace): - """Test if inner raises correctly for bad input.""" - other_space = odl.rn((4, 3)) - other_x = other_space.zero() - x = tspace.zero() - - with pytest.raises(LinearSpaceTypeError): - tspace.inner(other_x, x) - - with pytest.raises(LinearSpaceTypeError): - tspace.inner(x, other_x) - - -def test_norm(tspace): - """Test the norm method against numpy.linalg.norm.""" - xarr, x = noise_elements(tspace) - - correct_norm = np.linalg.norm(xarr.ravel()) - - if tspace.real_dtype == np.float16: - tolerance = 1e-3 - elif tspace.real_dtype == np.float32: - tolerance = 2e-7 - elif tspace.real_dtype == np.float64: - tolerance = 1e-15 - elif tspace.real_dtype == np.float128: - tolerance = 1e-19 - else: - raise TypeError(f"No known tolerance for dtype {tspace.dtype}") - - assert tspace.norm(x) == pytest.approx(correct_norm, rel=tolerance) - assert x.norm() == pytest.approx(correct_norm, rel=tolerance) - - -def test_norm_exceptions(tspace): - """Test if norm raises correctly for bad input.""" - other_space = odl.rn((4, 3)) - other_x = other_space.zero() - - with pytest.raises(LinearSpaceTypeError): - tspace.norm(other_x) - - -def test_pnorm(exponent): - """Test the norm method with p!=2 against numpy.linalg.norm.""" - for tspace in (odl.rn((3, 4), exponent=exponent), - odl.cn((3, 4), exponent=exponent)): - xarr, x = noise_elements(tspace) - correct_norm = np.linalg.norm(xarr.ravel(), ord=exponent) - - assert tspace.norm(x) == pytest.approx(correct_norm) - assert x.norm() == pytest.approx(correct_norm) - - -def test_dist(tspace): - """Test the dist method against numpy.linalg.norm of the difference.""" - [xarr, yarr], [x, y] = noise_elements(tspace, n=2) - - correct_dist = np.linalg.norm((xarr - yarr).ravel()) - - if tspace.real_dtype == np.float16: - tolerance = 5e-3 - elif tspace.real_dtype == np.float32: - tolerance = 2e-7 - elif tspace.real_dtype == np.float64: - tolerance = 1e-15 - elif tspace.real_dtype == np.float128: - tolerance = 1e-19 - else: - raise TypeError(f"No known tolerance for dtype {tspace.dtype}") - - assert tspace.dist(x, y) == pytest.approx(correct_dist, rel=tolerance) - assert x.dist(y) == pytest.approx(correct_dist, rel=tolerance) - - -def test_dist_exceptions(tspace): - """Test if dist raises correctly for bad input.""" - other_space = odl.rn((4, 3)) - other_x = other_space.zero() - x = tspace.zero() - - with pytest.raises(LinearSpaceTypeError): - tspace.dist(other_x, x) - - with pytest.raises(LinearSpaceTypeError): - tspace.dist(x, other_x) - - -def test_pdist(odl_tspace_impl, exponent): - """Test the dist method with p!=2 against numpy.linalg.norm of diff.""" - impl = odl_tspace_impl - spaces = [odl.rn((3, 4), exponent=exponent, impl=impl)] - cls = odl.space.entry_points.tensor_space_impl(impl) - if complex in cls.available_dtypes(): - spaces.append(odl.cn((3, 4), exponent=exponent, impl=impl)) - for space in spaces: - [xarr, yarr], [x, y] = noise_elements(space, n=2) - - correct_dist = np.linalg.norm((xarr - yarr).ravel(), ord=exponent) - assert space.dist(x, y) == pytest.approx(correct_dist) - assert x.dist(y) == pytest.approx(correct_dist) - - -def test_element_getitem(odl_tspace_impl, getitem_indices): - """Check if getitem produces correct values, shape and other stuff.""" - impl = odl_tspace_impl - space = odl.tensor_space((2, 3, 4), dtype='float32', exponent=1, - weighting=2, impl=impl) - x_arr, x = noise_elements(space) - - x_arr_sliced = x_arr[getitem_indices] - sliced_shape = x_arr_sliced.shape - x_sliced = x[getitem_indices] - - if np.isscalar(x_arr_sliced): - assert x_arr_sliced == x_sliced - else: - assert x_sliced.shape == sliced_shape - assert all_equal(x_sliced, x_arr_sliced) - - # Check that the space properties are preserved - sliced_spc = x_sliced.space - assert sliced_spc.shape == sliced_shape - assert sliced_spc.dtype == space.dtype - assert sliced_spc.exponent == space.exponent - assert sliced_spc.weighting == space.weighting - - # Check that we have a view that manipulates the original array - # (or not, depending on indexing style) - x_arr_sliced[:] = 0 - x_sliced[:] = 0 - assert all_equal(x_arr, x) - - -def test_element_setitem(odl_tspace_impl, setitem_indices): - """Check if setitem produces the same result as NumPy.""" - impl = odl_tspace_impl - space = odl.tensor_space((2, 3, 4), dtype='float32', exponent=1, - weighting=2, impl=impl) - x_arr, x = noise_elements(space) - - x_arr_sliced = x_arr[setitem_indices] - sliced_shape = x_arr_sliced.shape - - # Setting values with scalars - x_arr[setitem_indices] = 2.3 - x[setitem_indices] = 2.3 - assert all_equal(x, x_arr) - - # Setting values with arrays - rhs_arr = np.ones(sliced_shape) - x_arr[setitem_indices] = rhs_arr - x[setitem_indices] = rhs_arr - assert all_equal(x, x_arr) - - # Using a list of lists - rhs_list = (-np.ones(sliced_shape)).tolist() - x_arr[setitem_indices] = rhs_list - x[setitem_indices] = rhs_list - assert all_equal(x, x_arr) - - -def test_element_getitem_bool_array(odl_tspace_impl): - """Check if getitem with boolean array yields the same result as NumPy.""" - impl = odl_tspace_impl - space = odl.tensor_space((2, 3, 4), dtype='float32', exponent=1, - weighting=2, impl=impl) - bool_space = odl.tensor_space((2, 3, 4), dtype=bool) - x_arr, x = noise_elements(space) - cond_arr, cond = noise_elements(bool_space) - - x_arr_sliced = x_arr[cond_arr] - x_sliced = x[cond] - assert all_equal(x_arr_sliced, x_sliced) - - # Check that the space properties are preserved - sliced_spc = x_sliced.space - assert sliced_spc.shape == x_arr_sliced.shape - assert sliced_spc.dtype == space.dtype - assert sliced_spc.exponent == space.exponent - assert sliced_spc.weighting == space.weighting - - -def test_element_setitem_bool_array(odl_tspace_impl): - """Check if setitem produces the same result as NumPy.""" - impl = odl_tspace_impl - space = odl.tensor_space((2, 3, 4), dtype='float32', exponent=1, - weighting=2, impl=impl) - bool_space = odl.tensor_space((2, 3, 4), dtype=bool) - x_arr, x = noise_elements(space) - cond_arr, cond = noise_elements(bool_space) - - x_arr_sliced = x_arr[cond_arr] - sliced_shape = x_arr_sliced.shape - - # Setting values with scalars - x_arr[cond_arr] = 2.3 - x[cond] = 2.3 - assert all_equal(x, x_arr) - - # Setting values with arrays - rhs_arr = np.ones(sliced_shape) - x_arr[cond_arr] = rhs_arr - x[cond] = rhs_arr - assert all_equal(x, x_arr) - - # Using a list of lists - rhs_list = (-np.ones(sliced_shape)).tolist() - x_arr[cond_arr] = rhs_list - x[cond] = rhs_list - assert all_equal(x, x_arr) - - -def test_transpose(odl_tspace_impl): - """Test the .T property of tensors against plain inner product.""" - impl = odl_tspace_impl - spaces = [odl.rn((3, 4), impl=impl)] - cls = odl.space.entry_points.tensor_space_impl(impl) - if complex in cls.available_dtypes(): - spaces.append(odl.cn((3, 4), impl=impl)) - - for space in spaces: - x = noise_element(space) - y = noise_element(space) - - # Assert linear operator - assert isinstance(x.T, odl.Operator) - assert x.T.is_linear - - # Check result - assert x.T(y) == pytest.approx(y.inner(x)) - assert all_equal(x.T.adjoint(1.0), x) - - # x.T.T returns self - assert x.T.T == x - - -def test_multiply_by_scalar(tspace): - """Verify that mult. with NumPy scalars preserves the element type.""" - x = tspace.zero() - - # Simple scalar multiplication, as often performed in user code. - # This invokes the __mul__ and __rmul__ methods of the ODL space classes. - # Strictly speaking this operation loses precision if `tspace.dtype` has - # fewer than 64 bits (Python decimal literals are double precision), but - # it would be too cumbersome to force a change in the space's dtype. - assert x * 1.0 in tspace - assert 1.0 * x in tspace - - # Multiplying with NumPy scalars is (since NumPy-2) more restrictive: - # multiplying a scalar on the left that has a higher precision than can - # be represented in the space would upcast `x` to another space that has - # the required precision. - if np.can_cast(np.float32, tspace.dtype): - assert x * np.float32(1.0) in tspace - assert np.float32(1.0) * x in tspace - - -def test_member_copy(odl_tspace_impl): - """Test copy method of elements.""" - impl = odl_tspace_impl - space = odl.tensor_space((3, 4), dtype='float32', exponent=1, weighting=2, - impl=impl) - x = noise_element(space) - - y = x.copy() - assert x == y - assert y is not x - - # Check that result is not aliased - x *= 2 - assert x != y - - -def test_python_copy(odl_tspace_impl): - """Test compatibility with the Python copy module.""" - import copy - impl = odl_tspace_impl - space = odl.tensor_space((3, 4), dtype='float32', exponent=1, weighting=2, - impl=impl) - x = noise_element(space) - - # Shallow copy - y = copy.copy(x) - assert x == y - assert y is not x - - # Check that result is not aliased - x *= 2 - assert x != y - - # Deep copy - z = copy.deepcopy(x) - assert x == z - assert z is not x - - # Check that result is not aliased - x *= 2 - assert x != z - - -def test_conversion_to_scalar(odl_tspace_impl): - """Test conversion of size-1 vectors/tensors to scalars.""" - impl = odl_tspace_impl - space = odl.rn(1, impl=impl) - # Size 1 real space - value = 1.5 - element = space.element(value) - - assert int(element) == int(value) - assert float(element) == float(value) - assert complex(element) == complex(value) - if PYTHON2: - assert long(element) == long(value) - - # Size 1 complex space - value = 1.5 + 0.5j - element = odl.cn(1).element(value) - assert complex(element) == complex(value) - - # Size 1 multi-dimensional space - value = 2.1 - element = odl.rn((1, 1, 1)).element(value) - assert float(element) == float(value) - - # Too large space - element = odl.rn(2).one() - - with pytest.raises(TypeError): - int(element) - with pytest.raises(TypeError): - float(element) - with pytest.raises(TypeError): - complex(element) - if PYTHON2: - with pytest.raises(TypeError): - long(element) - - -def test_bool_conversion(odl_tspace_impl): - """Verify that the __bool__ function works.""" - impl = odl_tspace_impl - space = odl.tensor_space(2, dtype='float32', impl=impl) - x = space.element([0, 1]) - - with pytest.raises(ValueError): - bool(x) - assert np.any(x) - assert any(x) - assert not np.all(x) - assert not all(x) - - space = odl.tensor_space(1, dtype='float32', impl=impl) - x = space.one() - - assert np.any(x) - assert any(x) - assert np.all(x) - assert all(x) - - -def test_numpy_array_interface(odl_tspace_impl): - """Verify that the __array__ interface for NumPy works.""" - impl = odl_tspace_impl - space = odl.tensor_space((3, 4), dtype='float32', exponent=1, weighting=2, - impl=impl) - x = space.one() - arr = x.__array__() - - assert isinstance(arr, np.ndarray) - assert np.array_equal(arr, np.ones(x.shape)) - - x_arr = np.array(x) - assert np.array_equal(x_arr, np.ones(x.shape)) - x_as_arr = np.asarray(x) - assert np.array_equal(x_as_arr, np.ones(x.shape)) - x_as_any_arr = np.asanyarray(x) - assert np.array_equal(x_as_any_arr, np.ones(x.shape)) - - -def test_array_wrap_method(odl_tspace_impl): - """Verify that the __array_wrap__ method for NumPy works.""" - impl = odl_tspace_impl - space = odl.tensor_space((3, 4), dtype='float32', exponent=1, weighting=2, - impl=impl) - x_arr, x = noise_elements(space) - y_arr = np.sin(x_arr) - y = np.sin(x) # Should yield again an ODL tensor - - assert all_equal(y, y_arr) - assert y in space - - -def test_conj(tspace): - """Test complex conjugation of tensors.""" - xarr, x = noise_elements(tspace) - - xconj = x.conj() - assert all_equal(xconj, xarr.conj()) - - y = tspace.element() - xconj = x.conj(out=y) - assert xconj is y - assert all_equal(y, xarr.conj()) - - -# --- Weightings (Numpy) --- # - - -def test_array_weighting_init(odl_tspace_impl, exponent): - """Test initialization of array weightings.""" - impl = odl_tspace_impl - space = odl.rn((3, 4), impl=impl) - weight_arr = _pos_array(space) - weight_elem = space.element(weight_arr) - - weighting_cls = _weighting_cls(impl, 'array') - weighting_arr = weighting_cls(weight_arr, exponent=exponent) - weighting_elem = weighting_cls(weight_elem, exponent=exponent) - - assert isinstance(weighting_arr.array, _array_cls(impl)) - assert isinstance(weighting_elem.array, _array_cls(impl)) - - -def test_array_weighting_array_is_valid(odl_tspace_impl): - """Test the is_valid method of array weightings.""" - impl = odl_tspace_impl - space = odl.rn((3, 4), impl=impl) - weight_arr = _pos_array(space) - - weighting_cls = _weighting_cls(impl, 'array') - weighting_arr = weighting_cls(weight_arr) - - assert weighting_arr.is_valid() - - # Invalid - weight_arr[0] = 0 - weighting_arr = NumpyTensorSpaceArrayWeighting(weight_arr) - assert not weighting_arr.is_valid() - - -def test_array_weighting_equals(odl_tspace_impl): - """Test the equality check method of array weightings.""" - impl = odl_tspace_impl - space = odl.rn(5, impl=impl) - weight_arr = _pos_array(space) - weight_elem = space.element(weight_arr) - - weighting_cls = _weighting_cls(impl, 'array') - weighting_arr = weighting_cls(weight_arr) - weighting_arr2 = weighting_cls(weight_arr) - weighting_elem = weighting_cls(weight_elem) - weighting_elem_copy = weighting_cls(weight_elem.copy()) - weighting_elem2 = weighting_cls(weight_elem) - weighting_other_arr = weighting_cls(weight_arr - 1) - weighting_other_exp = weighting_cls(weight_arr - 1, exponent=1) - - assert weighting_arr == weighting_arr2 - assert weighting_arr == weighting_elem - assert weighting_arr != weighting_elem_copy - assert weighting_elem == weighting_elem2 - assert weighting_arr != weighting_other_arr - assert weighting_arr != weighting_other_exp - - -def test_array_weighting_equiv(odl_tspace_impl): - """Test the equiv method of Numpy array weightings.""" - impl = odl_tspace_impl - space = odl.rn(5, impl=impl) - weight_arr = _pos_array(space) - weight_elem = space.element(weight_arr) - different_arr = weight_arr + 1 - - arr_weighting_cls = _weighting_cls(impl, 'array') - w_arr = arr_weighting_cls(weight_arr) - w_elem = arr_weighting_cls(weight_elem) - w_different_arr = arr_weighting_cls(different_arr) - - # Equal -> True - assert w_arr.equiv(w_arr) - assert w_arr.equiv(w_elem) - # Different array -> False - assert not w_arr.equiv(w_different_arr) - - # Test shortcuts in the implementation - const_arr = np.ones(space.shape) * 1.5 - - const_weighting_cls = _weighting_cls(impl, 'const') - w_const_arr = arr_weighting_cls(const_arr) - w_const = const_weighting_cls(1.5) - w_wrong_const = const_weighting_cls(1) - w_wrong_exp = const_weighting_cls(1.5, exponent=1) - - assert w_const_arr.equiv(w_const) - assert not w_const_arr.equiv(w_wrong_const) - assert not w_const_arr.equiv(w_wrong_exp) - - # Bogus input - assert not w_const_arr.equiv(True) - assert not w_const_arr.equiv(object) - assert not w_const_arr.equiv(None) - - -def test_array_weighting_inner(tspace): - """Test inner product in a weighted space.""" - [xarr, yarr], [x, y] = noise_elements(tspace, 2) - - weight_arr = _pos_array(tspace) - weighting = NumpyTensorSpaceArrayWeighting(weight_arr) - - true_inner = np.vdot(yarr, xarr * weight_arr) - assert weighting.inner(x, y) == pytest.approx(true_inner) - - # Exponent != 2 -> no inner product, should raise - with pytest.raises(NotImplementedError): - NumpyTensorSpaceArrayWeighting(weight_arr, exponent=1.0).inner(x, y) - - -def test_array_weighting_norm(tspace, exponent): - """Test norm in a weighted space.""" - rtol = np.sqrt(np.finfo(tspace.dtype).resolution) - xarr, x = noise_elements(tspace) - - weight_arr = _pos_array(tspace) - weighting = NumpyTensorSpaceArrayWeighting(weight_arr, exponent=exponent) - - if exponent == float('inf'): - true_norm = np.linalg.norm( - (weight_arr * xarr).ravel(), - ord=float('inf')) - else: - true_norm = np.linalg.norm( - (weight_arr ** (1 / exponent) * xarr).ravel(), - ord=exponent) - - assert weighting.norm(x) == pytest.approx(true_norm, rel=rtol) - - -def test_array_weighting_dist(tspace, exponent): - """Test dist product in a weighted space.""" - rtol = np.sqrt(np.finfo(tspace.dtype).resolution) - [xarr, yarr], [x, y] = noise_elements(tspace, n=2) - - weight_arr = _pos_array(tspace) - weighting = NumpyTensorSpaceArrayWeighting(weight_arr, exponent=exponent) - - if exponent == float('inf'): - true_dist = np.linalg.norm( - (weight_arr * (xarr - yarr)).ravel(), - ord=float('inf')) - else: - true_dist = np.linalg.norm( - (weight_arr ** (1 / exponent) * (xarr - yarr)).ravel(), - ord=exponent) - - assert weighting.dist(x, y) == pytest.approx(true_dist, rel=rtol) - - -def test_const_weighting_init(odl_tspace_impl, exponent): - """Test initialization of constant weightings.""" - impl = odl_tspace_impl - constant = 1.5 - - # Just test if the code runs - weighting_cls = _weighting_cls(impl, 'const') - weighting_cls(constant, exponent=exponent) - - with pytest.raises(ValueError): - weighting_cls(0) - with pytest.raises(ValueError): - weighting_cls(-1) - with pytest.raises(ValueError): - weighting_cls(float('inf')) - - -def test_const_weighting_comparison(odl_tspace_impl): - """Test equality to and equivalence with const weightings.""" - impl = odl_tspace_impl - constant = 1.5 - - const_weighting_cls = _weighting_cls(impl, 'const') - w_const = const_weighting_cls(constant) - w_const2 = const_weighting_cls(constant) - w_other_const = const_weighting_cls(constant + 1) - w_other_exp = const_weighting_cls(constant, exponent=1) - - const_arr = constant * np.ones((3, 4)) - - arr_weighting_cls = _weighting_cls(impl, 'array') - w_const_arr = arr_weighting_cls(const_arr) - other_const_arr = (constant + 1) * np.ones((3, 4)) - w_other_const_arr = arr_weighting_cls(other_const_arr) - - assert w_const == w_const - assert w_const == w_const2 - assert w_const2 == w_const - # Different but equivalent - assert w_const.equiv(w_const_arr) - assert w_const != w_const_arr - - # Not equivalent - assert not w_const.equiv(w_other_exp) - assert w_const != w_other_exp - assert not w_const.equiv(w_other_const) - assert w_const != w_other_const - assert not w_const.equiv(w_other_const_arr) - assert w_const != w_other_const_arr - - # Bogus input - assert not w_const.equiv(True) - assert not w_const.equiv(object) - assert not w_const.equiv(None) - - -def test_const_weighting_inner(tspace): - """Test inner product with const weighting.""" - [xarr, yarr], [x, y] = noise_elements(tspace, 2) - - constant = 1.5 - true_result_const = constant * np.vdot(yarr, xarr) - - w_const = NumpyTensorSpaceConstWeighting(constant) - assert w_const.inner(x, y) == pytest.approx(true_result_const) - - # Exponent != 2 -> no inner - w_const = NumpyTensorSpaceConstWeighting(constant, exponent=1) - with pytest.raises(NotImplementedError): - w_const.inner(x, y) - - -def test_const_weighting_norm(tspace, exponent): - """Test norm with const weighting.""" - xarr, x = noise_elements(tspace) - - constant = 1.5 - if exponent == float('inf'): - factor = constant - else: - factor = constant ** (1 / exponent) - - true_norm = factor * np.linalg.norm(xarr.ravel(), ord=exponent) - - w_const = NumpyTensorSpaceConstWeighting(constant, exponent=exponent) - - if tspace.real_dtype == np.float16: - tolerance = 5e-2 - elif tspace.real_dtype == np.float32: - tolerance = 1e-6 - elif tspace.real_dtype == np.float64: - tolerance = 1e-15 - elif tspace.real_dtype == np.float128: - tolerance = 1e-19 - else: - raise TypeError(f"No known tolerance for dtype {tspace.dtype}") - - assert w_const.norm(x) == pytest.approx(true_norm, rel=tolerance) - - -def test_const_weighting_dist(tspace, exponent): - """Test dist with const weighting.""" - [xarr, yarr], [x, y] = noise_elements(tspace, 2) - - constant = 1.5 - if exponent == float('inf'): - factor = constant - else: - factor = constant ** (1 / exponent) - true_dist = factor * np.linalg.norm((xarr - yarr).ravel(), ord=exponent) - - w_const = NumpyTensorSpaceConstWeighting(constant, exponent=exponent) - - if tspace.real_dtype == np.float16: - tolerance = 5e-2 - elif tspace.real_dtype == np.float32: - tolerance = 5e-7 - elif tspace.real_dtype == np.float64: - tolerance = 1e-15 - elif tspace.real_dtype == np.float128: - tolerance = 1e-19 - else: - raise TypeError(f"No known tolerance for dtype {tspace.dtype}") - - assert w_const.dist(x, y) == pytest.approx(true_dist, rel=tolerance) - - -def test_custom_inner(tspace): - """Test weighting with a custom inner product.""" - rtol = np.sqrt(np.finfo(tspace.dtype).resolution) - - [xarr, yarr], [x, y] = noise_elements(tspace, 2) - - def inner(x, y): - return np.vdot(y, x) - - w = NumpyTensorSpaceCustomInner(inner) - w_same = NumpyTensorSpaceCustomInner(inner) - w_other = NumpyTensorSpaceCustomInner(np.dot) - - assert w == w - assert w == w_same - assert w != w_other - - true_inner = inner(xarr, yarr) - assert w.inner(x, y) == pytest.approx(true_inner) - - true_norm = np.linalg.norm(xarr.ravel()) - assert w.norm(x) == pytest.approx(true_norm) - - true_dist = np.linalg.norm((xarr - yarr).ravel()) - assert w.dist(x, y) == pytest.approx(true_dist, rel=rtol) - - with pytest.raises(TypeError): - NumpyTensorSpaceCustomInner(1) - - -def test_custom_norm(tspace): - """Test weighting with a custom norm.""" - [xarr, yarr], [x, y] = noise_elements(tspace, 2) - - norm = np.linalg.norm - - def other_norm(x): - return np.linalg.norm(x, ord=1) - - w = NumpyTensorSpaceCustomNorm(norm) - w_same = NumpyTensorSpaceCustomNorm(norm) - w_other = NumpyTensorSpaceCustomNorm(other_norm) - - assert w == w - assert w == w_same - assert w != w_other - - with pytest.raises(NotImplementedError): - w.inner(x, y) - - true_norm = np.linalg.norm(xarr.ravel()) - assert w.norm(x) == pytest.approx(true_norm) - - true_dist = np.linalg.norm((xarr - yarr).ravel()) - assert w.dist(x, y) == pytest.approx(true_dist) - - with pytest.raises(TypeError): - NumpyTensorSpaceCustomNorm(1) - - -def test_custom_dist(tspace): - """Test weighting with a custom dist.""" - [xarr, yarr], [x, y] = noise_elements(tspace, 2) - - def dist(x, y): - return np.linalg.norm(x - y) - - def other_dist(x, y): - return np.linalg.norm(x - y, ord=1) - - w = NumpyTensorSpaceCustomDist(dist) - w_same = NumpyTensorSpaceCustomDist(dist) - w_other = NumpyTensorSpaceCustomDist(other_dist) - - assert w == w - assert w == w_same - assert w != w_other - - with pytest.raises(NotImplementedError): - w.inner(x, y) - - with pytest.raises(NotImplementedError): - w.norm(x) - - true_dist = np.linalg.norm((xarr - yarr).ravel()) - assert w.dist(x, y) == pytest.approx(true_dist) - - with pytest.raises(TypeError): - NumpyTensorSpaceCustomDist(1) - - -# --- Ufuncs & Reductions --- # - - -def test_ufuncs(tspace, odl_ufunc): - """Test ufuncs in ``x.ufuncs`` against direct Numpy ufuncs.""" - name = odl_ufunc - - # Get the ufunc from numpy as reference, plus some additional info - npy_ufunc = getattr(np, name) - nin = npy_ufunc.nin - nout = npy_ufunc.nout - - if (np.issubdtype(tspace.dtype, np.floating) or - np.issubdtype(tspace.dtype, np.complexfloating) and - name in ['bitwise_and', - 'bitwise_or', - 'bitwise_xor', - 'invert', - 'left_shift', - 'right_shift']): - # Skip integer only methods for floating point data types - return - - if (np.issubdtype(tspace.dtype, np.complexfloating) and - name in ['remainder', - 'floor_divide', - 'trunc', - 'signbit', - 'invert', - 'left_shift', - 'right_shift', - 'rad2deg', - 'deg2rad', - 'copysign', - 'mod', - 'modf', - 'fmod', - 'logaddexp2', - 'logaddexp', - 'hypot', - 'arctan2', - 'floor', - 'ceil']): - # Skip real-only methods for complex data types - return - - # Create some data - arrays, elements = noise_elements(tspace, nin + nout) - in_arrays = arrays[:nin] - out_arrays = arrays[nin:] - data_elem = elements[0] - - out_elems = elements[nin:] - if nout == 1: - out_arr_kwargs = {'out': out_arrays[0]} - out_elem_kwargs = {'out': out_elems[0]} - elif nout > 1: - out_arr_kwargs = {'out': out_arrays[:nout]} - out_elem_kwargs = {'out': out_elems[:nout]} - - # Get function to call, using both interfaces: - # - vec.ufunc(other_args) - # - np.ufunc(vec, other_args) - elem_fun_old = getattr(data_elem.ufuncs, name) - in_elems_old = elements[1:nin] - elem_fun_new = npy_ufunc - in_elems_new = elements[:nin] - - # Out-of-place - npy_result = npy_ufunc(*in_arrays) - odl_result_old = elem_fun_old(*in_elems_old) - assert all_almost_equal(npy_result, odl_result_old) - odl_result_new = elem_fun_new(*in_elems_new) - assert all_almost_equal(npy_result, odl_result_new) - - # Test type of output - if nout == 1: - assert isinstance(odl_result_old, tspace.element_type) - assert isinstance(odl_result_new, tspace.element_type) - elif nout > 1: - for i in range(nout): - assert isinstance(odl_result_old[i], tspace.element_type) - assert isinstance(odl_result_new[i], tspace.element_type) - - # In-place with ODL objects as `out` - npy_result = npy_ufunc(*in_arrays, **out_arr_kwargs) - odl_result_old = elem_fun_old(*in_elems_old, **out_elem_kwargs) - assert all_almost_equal(npy_result, odl_result_old) - # In-place will not work with Numpy < 1.13 - odl_result_new = elem_fun_new(*in_elems_new, **out_elem_kwargs) - assert all_almost_equal(npy_result, odl_result_new) - - # Check that returned stuff refers to given out - if nout == 1: - assert odl_result_old is out_elems[0] - assert odl_result_new is out_elems[0] - elif nout > 1: - for i in range(nout): - assert odl_result_old[i] is out_elems[i] - assert odl_result_new[i] is out_elems[i] - - # In-place with Numpy array as `out` for new interface - out_arrays_new = [np.empty_like(arr) for arr in out_arrays] - if nout == 1: - out_elem_kwargs_new = {'out': out_arrays_new[0]} - elif nout > 1: - out_elem_kwargs_new = {'out': out_arrays_new[:nout]} - - odl_result_elem_new = elem_fun_new(*in_elems_new, - **out_elem_kwargs_new) - assert all_almost_equal(npy_result, odl_result_elem_new) - - if nout == 1: - assert odl_result_elem_new is out_arrays_new[0] - elif nout > 1: - for i in range(nout): - assert odl_result_elem_new[i] is out_arrays_new[i] - - # Check `ufunc.at` - indices = ([0, 0, 1], - [0, 1, 2]) - - mod_array = in_arrays[0].copy() - mod_elem = in_elems_new[0].copy() - if nin == 1: - npy_result = npy_ufunc.at(mod_array, indices) - odl_result = npy_ufunc.at(mod_elem, indices) - elif nin == 2: - other_array = in_arrays[1][indices] - other_elem = in_elems_new[1][indices] - npy_result = npy_ufunc.at(mod_array, indices, other_array) - odl_result = npy_ufunc.at(mod_elem, indices, other_elem) - - assert all_almost_equal(odl_result, npy_result) - - # Most ufuncs are type-preserving and can therefore be applied iteratively - # for reductions. This is not the case for equalities or logical operators, - # which can only be iterated over an array that was boolean to start with. - boolean_ufuncs = ['equal', 'not_equal', - 'greater', 'greater_equal', - 'less', 'less_equal', - 'logical_and', 'logical_or', - 'logical_xor'] - - in_array = in_arrays[0] - in_elem = in_elems_new[0] - - # Check `ufunc.reduce` - if (nin == 2 and nout == 1 - and (odl_ufunc not in boolean_ufuncs or in_array.dtype is bool)): - - # We only test along one axis since some binary ufuncs are not - # re-orderable, in which case Numpy raises a ValueError - npy_result = npy_ufunc.reduce(in_array) - odl_result = npy_ufunc.reduce(in_elem) - assert all_almost_equal(odl_result, npy_result) - odl_result_keepdims = npy_ufunc.reduce(in_elem, keepdims=True) - assert odl_result_keepdims.shape == (1,) + in_elem.shape[1:] - # In-place using `out` (with ODL vector and array) - out_elem = odl_result_keepdims.space.element() - out_array = np.empty(odl_result_keepdims.shape, - dtype=odl_result_keepdims.dtype) - npy_ufunc.reduce(in_elem, out=out_elem, keepdims=True) - npy_ufunc.reduce(in_elem, out=out_array, keepdims=True) - assert all_almost_equal(out_elem, odl_result_keepdims) - assert all_almost_equal(out_array, odl_result_keepdims) - # Using a specific dtype - npy_result = npy_ufunc.reduce(in_array, dtype=complex) - odl_result = npy_ufunc.reduce(in_elem, dtype=complex) - assert odl_result.dtype == npy_result.dtype - assert all_almost_equal(odl_result, npy_result) - - # Other ufunc method use the same interface, to we don't perform - # extra tests for them. - - -def test_ufunc_corner_cases(odl_tspace_impl): - """Check if some corner cases are handled correctly.""" - impl = odl_tspace_impl - space = odl.rn((2, 3), impl=impl) - x = space.element([[-1, 0, 1], - [1, 2, 3]]) - space_const_w = odl.rn((2, 3), weighting=2, impl=impl) - weights = [[1, 2, 1], - [3, 2, 1]] - space_arr_w = odl.rn((2, 3), weighting=weights, impl=impl) - - # --- Ufuncs with nin = 1, nout = 1 --- # - - wrong_argcount_error = ValueError if np.__version__<"1.21" else TypeError - - with pytest.raises(wrong_argcount_error): - # Too many arguments - x.__array_ufunc__(np.sin, '__call__', x, np.ones((2, 3))) - - # Check that `out=(None,)` is the same as not providing `out` - res = x.__array_ufunc__(np.sin, '__call__', x, out=(None,)) - assert all_almost_equal(res, np.sin(x.asarray())) - # Check that the result space is the same - assert res.space == space - - # Check usage of `order` argument - for order in ('C', 'F'): - res = x.__array_ufunc__(np.sin, '__call__', x, order=order) - assert all_almost_equal(res, np.sin(x.asarray())) - assert res.data.flags[order + '_CONTIGUOUS'] - - # Check usage of `dtype` argument - res = x.__array_ufunc__(np.sin, '__call__', x, dtype='float32') - assert all_almost_equal(res, np.sin(x.asarray(), dtype='float32')) - assert res.dtype == 'float32' - - # Check propagation of weightings - y = space_const_w.one() - res = y.__array_ufunc__(np.sin, '__call__', y) - assert res.space.weighting == space_const_w.weighting - y = space_arr_w.one() - res = y.__array_ufunc__(np.sin, '__call__', y) - assert res.space.weighting == space_arr_w.weighting - - # --- Ufuncs with nin = 2, nout = 1 --- # - - with pytest.raises(wrong_argcount_error): - # Too few arguments - x.__array_ufunc__(np.add, '__call__', x) - - with pytest.raises(ValueError): - # Too many outputs - out1, out2 = np.empty_like(x), np.empty_like(x) - x.__array_ufunc__(np.add, '__call__', x, x, out=(out1, out2)) - - # Check that npy_array += odl_elem works - arr = np.ones((2, 3)) - arr += x - assert all_almost_equal(arr, x.asarray() + 1) - # For Numpy >= 1.13, this will be equivalent - arr = np.ones((2, 3)) - res = x.__array_ufunc__(np.add, '__call__', arr, x, out=(arr,)) - assert all_almost_equal(arr, x.asarray() + 1) - assert res is arr - - # --- `accumulate` --- # - - res = x.__array_ufunc__(np.add, 'accumulate', x) - assert all_almost_equal(res, np.add.accumulate(x.asarray())) - assert res.space == space - arr = np.empty_like(x) - res = x.__array_ufunc__(np.add, 'accumulate', x, out=(arr,)) - assert all_almost_equal(arr, np.add.accumulate(x.asarray())) - assert res is arr - - # `accumulate` with other dtype - res = x.__array_ufunc__(np.add, 'accumulate', x, dtype='float32') - assert res.dtype == 'float32' - - # Error scenarios - with pytest.raises(ValueError): - # Too many `out` arguments - out1, out2 = np.empty_like(x), np.empty_like(x) - x.__array_ufunc__(np.add, 'accumulate', x, out=(out1, out2)) - - # --- `reduce` --- # - - res = x.__array_ufunc__(np.add, 'reduce', x) - assert all_almost_equal(res, np.add.reduce(x.asarray())) - - # With `out` argument and `axis` - out_ax0 = np.empty(3) - res = x.__array_ufunc__(np.add, 'reduce', x, axis=0, out=(out_ax0,)) - assert all_almost_equal(out_ax0, np.add.reduce(x.asarray(), axis=0)) - assert res is out_ax0 - out_ax1 = odl.rn(2, impl=impl).element() - res = x.__array_ufunc__(np.add, 'reduce', x, axis=1, out=(out_ax1,)) - assert all_almost_equal(out_ax1, np.add.reduce(x.asarray(), axis=1)) - assert res is out_ax1 - - # Addition is reorderable, so we can give multiple axes - res = x.__array_ufunc__(np.add, 'reduce', x, axis=(0, 1)) - assert res == pytest.approx(np.add.reduce(x.asarray(), axis=(0, 1))) - - # Cannot propagate weightings in a meaningful way, check that there are - # none in the result - y = space_const_w.one() - res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert not res.space.is_weighted - y = space_arr_w.one() - res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert not res.space.is_weighted - - # Check that `exponent` is propagated - space_1 = odl.rn((2, 3), exponent=1) - z = space_1.one() - res = z.__array_ufunc__(np.add, 'reduce', z, axis=0) - assert res.space.exponent == 1 - - -def testodl_reduction(tspace, odl_reduction): - """Test reductions in x.ufunc against direct Numpy reduction.""" - name = odl_reduction - npy_reduction = getattr(np, name) - - x_arr, x = noise_elements(tspace, 1) - x_reduction = getattr(x.ufuncs, name) - - # Should be equal theoretically, but summation order, other stuff, ..., - # hence we use approx - - # Full reduction, produces scalar - result_npy = npy_reduction(x_arr) - result = x_reduction() - assert result == pytest.approx(result_npy) - result = x_reduction(axis=(0, 1)) - assert result == pytest.approx(result_npy) - - # Reduction along axes, produces element in reduced space - result_npy = npy_reduction(x_arr, axis=0) - result = x_reduction(axis=0) - assert isinstance(result, NumpyTensor) - assert result.shape == result_npy.shape - assert result.dtype == x.dtype - assert np.allclose(result, result_npy) - # Check reduced space properties - assert isinstance(result.space, NumpyTensorSpace) - assert result.space.exponent == x.space.exponent - assert result.space.weighting == x.space.weighting # holds true here - # Evaluate in-place - out = result.space.element() - x_reduction(axis=0, out=out) - assert np.allclose(out, result_npy) - - # Use keepdims parameter - result_npy = npy_reduction(x_arr, axis=1, keepdims=True) - result = x_reduction(axis=1, keepdims=True) - assert result.shape == result_npy.shape - assert np.allclose(result, result_npy) - # Evaluate in-place - out = result.space.element() - x_reduction(axis=1, keepdims=True, out=out) - assert np.allclose(out, result_npy) - - # Use dtype parameter - # These reductions have a `dtype` parameter - if name in ('cumprod', 'cumsum', 'mean', 'prod', 'std', 'sum', - 'trace', 'var'): - result_npy = npy_reduction(x_arr, axis=1, dtype='complex64') - result = x_reduction(axis=1, dtype='complex64') - assert result.dtype == np.dtype('complex64') - assert np.allclose(result, result_npy) - # Evaluate in-place - out = result.space.element() - x_reduction(axis=1, dtype='complex64', out=out) - assert np.allclose(out, result_npy) - - -def test_ufunc_reduction_docs_notempty(odl_tspace_impl): - """Check that the generated docstrings are not empty.""" - impl = odl_tspace_impl - x = odl.rn(3, impl=impl).element() - - for name, _, __, ___ in UFUNCS: - ufunc = getattr(x.ufuncs, name) - assert ufunc.__doc__.splitlines()[0] != '' - - for name in ['sum', 'prod', 'min', 'max']: - reduction = getattr(x.ufuncs, name) - assert reduction.__doc__.splitlines()[0] != '' - - -if __name__ == '__main__': - odl.util.test_file(__file__) diff --git a/odl/test/system/import_test.py b/odl/test/system/import_test.py index d966dad56cf..c3cee4e5976 100644 --- a/odl/test/system/import_test.py +++ b/odl/test/system/import_test.py @@ -14,19 +14,19 @@ def test_all_imports(): # Create Cn odl.cn(3) - odl.space.cn(3) - C3 = odl.space.space_utils.cn(3) + odl.core.space.cn(3) + C3 = odl.core.space.space_utils.cn(3) # Three ways of creating the identity odl.IdentityOperator(C3) - odl.operator.IdentityOperator(C3) - odl.operator.default_ops.IdentityOperator(C3) + odl.core.operator.IdentityOperator(C3) + odl.core.operator.default_ops.IdentityOperator(C3) # Test that utility needs to be explicitly imported - odl.util.utility.array_str + odl.core.util.print_utils.array_str with pytest.raises(AttributeError): odl.array_str if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/test_doc.py b/odl/test/test_doc.py index 60b9f4d3402..7270a8d772c 100644 --- a/odl/test/test_doc.py +++ b/odl/test/test_doc.py @@ -25,7 +25,7 @@ import pytest import odl -from odl.util.testutils import simple_fixture +from odl.core.util.testutils import simple_fixture try: import matplotlib @@ -68,4 +68,4 @@ def test_file(doc_src_file): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/test_examples.py b/odl/test/test_examples.py index 49452717843..d6c449be9c8 100644 --- a/odl/test/test_examples.py +++ b/odl/test/test_examples.py @@ -26,7 +26,7 @@ import pytest import odl -from odl.util.testutils import simple_fixture +from odl.core.util.testutils import simple_fixture try: import matplotlib @@ -67,4 +67,4 @@ def test_example(example): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/trafos/backends/pyfftw_bindings_test.py b/odl/test/trafos/backends/pyfftw_bindings_test.py index d380ea280af..11002d00f78 100644 --- a/odl/test/trafos/backends/pyfftw_bindings_test.py +++ b/odl/test/trafos/backends/pyfftw_bindings_test.py @@ -12,11 +12,11 @@ import odl from odl.trafos.backends import pyfftw_call, PYFFTW_AVAILABLE -from odl.util import ( +from odl.core.util import ( is_real_dtype, complex_dtype) -from odl.util.testutils import ( +from odl.core.util.testutils import ( all_almost_equal, simple_fixture) - +from odl.core.util.dtype_utils import FLOAT_DTYPES, COMPLEX_DTYPES pytestmark = pytest.mark.skipif(not PYFFTW_AVAILABLE, reason='`pyfftw` backend not available') @@ -141,7 +141,10 @@ def test_pyfftw_call_bad_input(direction): # Bad dtype dtype_in = np.dtype('complex128') arr_in = np.empty(3, dtype=dtype_in) - bad_dtypes_out = np.sctypes['float'] + np.sctypes['complex'] + backend = odl.lookup_array_backend('numpy') + float_dt = {backend.available_dtypes[dtype] for dtype in FLOAT_DTYPES} + complex_dt = {backend.available_dtypes[dtype] for dtype in COMPLEX_DTYPES} + bad_dtypes_out = float_dt.union( complex_dt) if dtype_in in bad_dtypes_out: # This one is correct, so we remove it bad_dtypes_out.remove(dtype_in) @@ -198,7 +201,10 @@ def test_pyfftw_call_bad_input(direction): # Bad dtype dtype_in = 'float64' arr_in = np.empty(10, dtype=dtype_in) - bad_dtypes_out = np.sctypes['float'] + np.sctypes['complex'] + backend = odl.lookup_array_backend('numpy') + float_dt = {backend.available_dtypes[dtype] for dtype in FLOAT_DTYPES} + complex_dt = {backend.available_dtypes[dtype] for dtype in COMPLEX_DTYPES} + bad_dtypes_out = float_dt.union( complex_dt) try: # This one is correct, so we remove it bad_dtypes_out.remove(np.dtype('complex128')) @@ -386,4 +392,4 @@ def test_pyfftw_call_backward_with_plan(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/trafos/backends/pywt_bindings_test.py b/odl/test/trafos/backends/pywt_bindings_test.py index 098ed49045f..57c4ef493ab 100644 --- a/odl/test/trafos/backends/pywt_bindings_test.py +++ b/odl/test/trafos/backends/pywt_bindings_test.py @@ -16,7 +16,7 @@ import odl from odl.trafos.backends.pywt_bindings import ( PYWT_AVAILABLE, PAD_MODES_ODL2PYWT, pywt_wavelet, pywt_pad_mode) -from odl.util.testutils import (simple_fixture) +from odl.core.util.testutils import (simple_fixture) pytestmark = pytest.mark.skipif(not PYWT_AVAILABLE, reason='`pywt` backend not available') @@ -50,4 +50,4 @@ def test_pywt_pad_errors(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/deform/linearized_deform_test.py b/odl/test/trafos/deform/linearized_deform_test.py similarity index 63% rename from odl/test/deform/linearized_deform_test.py rename to odl/test/trafos/deform/linearized_deform_test.py index ebd2136d4ef..f7933eaf5e5 100644 --- a/odl/test/deform/linearized_deform_test.py +++ b/odl/test/trafos/deform/linearized_deform_test.py @@ -14,28 +14,29 @@ import pytest import odl -from odl.deform import LinDeformFixedDisp, LinDeformFixedTempl -from odl.space.entry_points import tensor_space_impl -from odl.util.testutils import simple_fixture +from odl.trafos.deform import LinDeformFixedDisp, LinDeformFixedTempl +from odl.core.util.testutils import simple_fixture + +from odl.core.array_API_support import get_array_and_backend, exp # --- pytest fixtures --- # -dtype = simple_fixture('dtype', ['float', 'complex']) +dtype = simple_fixture('dtype', [float, complex]) interp = simple_fixture('interp', ['linear', 'nearest']) ndim = simple_fixture('ndim', [1, 2, 3]) @pytest.fixture -def space(request, ndim, dtype, odl_tspace_impl): +def space(ndim, dtype, odl_impl_device_pairs): """Provide a space for unit tests.""" - impl = odl_tspace_impl - supported_dtypes = tensor_space_impl(impl).available_dtypes() - if np.dtype(dtype) not in supported_dtypes: - pytest.skip('dtype not available for this backend') + impl, device = odl_impl_device_pairs + # supported_dtypes = odl.lookup_array_backend(impl).available_dtypes + # # if np.dtype(dtype) not in supported_dtypes: + # # pytest.skip('dtype not available for this backend') return odl.uniform_discr( - [-1] * ndim, [1] * ndim, [20] * ndim, impl=impl, dtype=dtype + [-1] * ndim, [1] * ndim, [20] * ndim, impl=impl, dtype=dtype, device=device ) @@ -62,16 +63,27 @@ def prod(x): return prod -def template_function(x): +def numpy_template_function(x): """Gaussian function with std SIGMA.""" return np.exp(-sum(xi ** 2 for xi in x) / SIGMA ** 2) +def torch_template_function(x): + """Gaussian function with std SIGMA.""" + import torch + return torch.exp(-sum(xi ** 2 for xi in x) / SIGMA ** 2) + +def numpy_template_grad_factory(n): + """Gradient of the gaussian.""" + def template_grad_i(i): + # Indirection for lambda capture + return lambda x: -2 * x[i] / SIGMA ** 2 * numpy_template_function(x) + return [template_grad_i(i) for i in range(n)] -def template_grad_factory(n): +def torch_template_grad_factory(n): """Gradient of the gaussian.""" def template_grad_i(i): # Indirection for lambda capture - return lambda x: -2 * x[i] / SIGMA ** 2 * template_function(x) + return lambda x: -2 * x[i] / SIGMA ** 2 * torch_template_function(x) return [template_grad_i(i) for i in range(n)] @@ -92,7 +104,7 @@ def coordinate_projection_i(i): return lst -def exp_div_inv_disp(x): +def numpy_exp_div_inv_disp(x): """Exponential of the divergence of the displacement field. In 1d: exp(- EPS) @@ -102,16 +114,30 @@ def exp_div_inv_disp(x): return np.exp(- EPS * (prod(x[1:]) + (len(x) - 1))) +def torch_exp_div_inv_disp(x): + """Exponential of the divergence of the displacement field. + + In 1d: exp(- EPS) + In 2d: exp(- EPS * (y + 1)) + In 2d: exp(- EPS * (yz + 2)) + """ + import torch + return torch.exp(- EPS * (prod(x[1:]) + (len(x) - 1))) + + def displaced_points(x): """Displaced coordinate points.""" disp = [dsp(x) for dsp in disp_field_factory(len(x))] return [xi + di for xi, di in zip(x, disp)] -def deformed_template(x): +def numpy_deformed_template(x): """Deformed template.""" - return template_function(displaced_points(x)) + return numpy_template_function(displaced_points(x)) +def torch_deformed_template(x): + """Deformed template.""" + return torch_template_function(displaced_points(x)) def vector_field_factory(n): """Vector field for the gradient. @@ -126,9 +152,9 @@ def vector_field_i(i): return [vector_field_i(i) for i in range(n)] -def template_deformed_grad_factory(n): +def numpy_template_deformed_grad_factory(n): """Deformed gradient.""" - templ_grad = template_grad_factory(n) + templ_grad = numpy_template_grad_factory(n) def template_deformed_gradi(i): # Indirection for lambda capture @@ -136,27 +162,55 @@ def template_deformed_gradi(i): return [template_deformed_gradi(i) for i in range(n)] +def torch_template_deformed_grad_factory(n): + """Deformed gradient.""" + templ_grad = torch_template_grad_factory(n) + + def template_deformed_gradi(i): + # Indirection for lambda capture + return lambda x: templ_grad[i](displaced_points(x)) + + return [template_deformed_gradi(i) for i in range(n)] -def fixed_templ_deriv(x): +def numpy_fixed_templ_deriv(x): """Derivative taken in disp_field and evaluated in vector_field.""" - dg = [tdgf(x) for tdgf in template_deformed_grad_factory(len(x))] + dg = [tdgf(x) for tdgf in numpy_template_deformed_grad_factory(len(x))] v = [vff(x) for vff in vector_field_factory(len(x))] return sum(dgi * vi for dgi, vi in zip(dg, v)) +def torch_fixed_templ_deriv(x): + """Derivative taken in disp_field and evaluated in vector_field.""" + dg = [tdgf(x) for tdgf in torch_template_deformed_grad_factory(len(x))] + v = [vff(x) for vff in vector_field_factory(len(x))] + return sum(dgi * vi for dgi, vi in zip(dg, v)) -def inv_deformed_template(x): + +def numpy_inv_deformed_template(x): """Analytic inverse deformation of the template function.""" disp = [dsp(x) for dsp in disp_field_factory(len(x))] disp_x = [xi - di for xi, di in zip(x, disp)] - return template_function(disp_x) + return numpy_template_function(disp_x) + +def torch_inv_deformed_template(x): + """Analytic inverse deformation of the template function.""" + disp = [dsp(x) for dsp in disp_field_factory(len(x))] + disp_x = [xi - di for xi, di in zip(x, disp)] + return torch_template_function(disp_x) # --- LinDeformFixedTempl --- # -def test_fixed_templ_init(): +def test_fixed_templ_init(odl_impl_device_pairs): """Test init and props of linearized deformation with fixed template.""" - space = odl.uniform_discr(0, 1, 5) + impl, device = odl_impl_device_pairs + space = odl.uniform_discr(0, 1, 5, impl=impl,device=device) + + if impl == 'numpy': + template_function = numpy_template_function + else: + template_function = torch_template_function + template = space.element(template_function) # Valid input @@ -170,15 +224,38 @@ def test_fixed_templ_init(): # template_function not a DiscretizedSpaceElement LinDeformFixedTempl(template_function) +@pytest.fixture +def space(odl_impl_device_pairs): + """Provide a space for unit tests.""" + impl, device = odl_impl_device_pairs + ndim = 2 + # supported_dtypes = odl.lookup_array_backend(impl).available_dtypes + # # if np.dtype(dtype) not in supported_dtypes: + # # pytest.skip('dtype not available for this backend') + + return odl.uniform_discr( + [-1] * ndim, [1] * ndim, [20] * ndim, impl=impl, device=device + ) + def test_fixed_templ_call(space, interp): """Test call of linearized deformation with fixed template.""" # Define the analytic template as the hat function and its gradient + if space.impl == 'numpy': + template_function = numpy_template_function + deformed_template = numpy_deformed_template + else: + if space.dtype_identifier == 'complex128': + return + template_function = torch_template_function + deformed_template = torch_deformed_template + template = space.element(template_function) deform_op = LinDeformFixedTempl(template, interp=interp) # Calculate result and exact result true_deformed_templ = space.element(deformed_template) + deformed_templ = deform_op(disp_field_factory(space.ndim)) # Verify that the result is within error limits @@ -192,6 +269,13 @@ def test_fixed_templ_deriv(space, interp): if not space.is_real: pytest.skip('derivative not implemented for complex dtypes') + if space.impl == 'numpy': + template_function = numpy_template_function + fixed_templ_deriv = numpy_fixed_templ_deriv + else: + template_function = torch_template_function + fixed_templ_deriv = torch_fixed_templ_deriv + # Set up template and displacement field template = space.element(template_function) disp_field = disp_field_factory(space.ndim) @@ -214,9 +298,10 @@ def test_fixed_templ_deriv(space, interp): # --- LinDeformFixedDisp --- # -def test_fixed_disp_init(): +def test_fixed_disp_init(odl_impl_device_pairs): """Test init and props of lin. deformation with fixed displacement.""" - space = odl.uniform_discr(0, 1, 5) + impl, device=odl_impl_device_pairs + space = odl.uniform_discr(0, 1, 5, impl=impl, device=device) disp_field = space.tangent_bundle.element( disp_field_factory(space.ndim)) @@ -247,6 +332,15 @@ def test_fixed_disp_init(): def test_fixed_disp_call(space, interp): """Test call of lin. deformation with fixed displacement.""" + if space.impl == 'numpy': + template_function = numpy_template_function + deformed_template = numpy_deformed_template + else: + if space.dtype_identifier == 'complex128': + return + template_function = torch_template_function + deformed_template = torch_deformed_template + template = space.element(template_function) disp_field = space.real_space.tangent_bundle.element( disp_field_factory(space.ndim)) @@ -266,6 +360,13 @@ def test_fixed_disp_call(space, interp): def test_fixed_disp_inv(space, interp): """Test inverse of lin. deformation with fixed displacement.""" + if space.impl == 'numpy': + template_function = numpy_template_function + else: + if space.dtype_identifier == 'complex128': + return + template_function = torch_template_function + # Set up template and displacement field template = space.element(template_function) disp_field = space.real_space.tangent_bundle.element( @@ -290,6 +391,18 @@ def test_fixed_disp_inv(space, interp): def test_fixed_disp_adj(space, interp): """Test adjoint of lin. deformation with fixed displacement.""" # Set up template and displacement field + + if space.impl == 'numpy': + template_function = numpy_template_function + inv_deformed_template = numpy_inv_deformed_template + exp_div_inv_disp = numpy_exp_div_inv_disp + else: + if space.dtype_identifier == 'complex128': + return + template_function = torch_template_function + inv_deformed_template = torch_inv_deformed_template + exp_div_inv_disp = torch_exp_div_inv_disp + template = space.element(template_function) disp_field = space.real_space.tangent_bundle.element( disp_field_factory(space.ndim)) @@ -318,4 +431,4 @@ def test_fixed_disp_adj(space, interp): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/trafos/fourier_test.py b/odl/test/trafos/fourier_test.py index 45bf60c5993..5effef0e07c 100644 --- a/odl/test/trafos/fourier_test.py +++ b/odl/test/trafos/fourier_test.py @@ -18,10 +18,12 @@ from odl.trafos.util.ft_utils import ( _interp_kernel_ft, dft_postprocess_data, dft_preprocess_data, reciprocal_grid) -from odl.util import ( +from odl.core.util import ( all_almost_equal, complex_dtype, conj_exponent, is_real_dtype, noise_element, skip_if_no_pyfftw) -from odl.util.testutils import simple_fixture +from odl.core.util.testutils import simple_fixture + +from odl.core.array_API_support import allclose # --- pytest fixtures --- # @@ -29,7 +31,8 @@ impl = simple_fixture( 'impl', [pytest.param('numpy'), - pytest.param('pyfftw', marks=skip_if_no_pyfftw)] + pytest.param('pyfftw', marks=skip_if_no_pyfftw), + pytest.param('default')] ) exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 1.5]) sign = simple_fixture('sign', ['-', '+']) @@ -46,6 +49,22 @@ def _params_from_dtype(dtype): return halfcomplex, complex_dtype(dtype) +@pytest.fixture +def _dft_complex_space(odl_complex_floating_dtype, odl_impl_device_pairs): + impl, device = odl_impl_device_pairs + shape = (4,5) + return odl.uniform_discr( + [0] * 2, + np.subtract(shape, 1), + shape, + dtype=odl_complex_floating_dtype, + nodes_on_bdry=True, + impl=impl, device=device + ) +def skip_incompatible_impl(impl, dft_space): + if dft_space.impl != 'numpy' and impl!= 'default': + pytest.skip('Currently, only the numpy backend supports fft implementations that are not array-API compatible (e.g pyfftw)') + def _dft_space(shape, dtype='float64'): try: ndim = len(shape) @@ -64,15 +83,21 @@ def sinc(x): # numpy.sinc scales by pi, we don't want that return np.sinc(x / np.pi) +def sinc_pytorch(x): + # torch.sinc scales by pi, we don't want that + import torch + return torch.sinc(x / np.pi) + # ---- DiscreteFourierTransform ---- # -def test_dft_init(impl): +def test_dft_init(impl, odl_impl_device_pairs): # Just check if the code runs at all + backend_impl, device = odl_impl_device_pairs shape = (4, 5) dom = _dft_space(shape) - dom_nonseq = odl.uniform_discr([0, 0], [1, 1], shape) + dom_nonseq = odl.uniform_discr([0, 0], [1, 1], shape, impl=backend_impl, device=device) dom_f32 = dom.astype('float32') ran = _dft_space(shape, dtype='complex128') ran_c64 = ran.astype('complex64') @@ -205,11 +230,11 @@ def test_idft_init(impl): impl=impl, halfcomplex=True) -def test_dft_call(impl): - +def test_dft_call(impl, _dft_complex_space): # 2d, complex, all ones and random back & forth - shape = (4, 5) - dft_dom = _dft_space(shape, dtype='complex64') + # dft_dom = _dft_space(shape, dtype='complex64') + skip_incompatible_impl(impl,_dft_complex_space) + dft_dom = _dft_complex_space dft = DiscreteFourierTransform(domain=dft_dom, impl=impl) idft = DiscreteFourierTransformInverse(range=dft_dom, impl=impl) @@ -224,16 +249,16 @@ def test_dft_call(impl): [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] - assert np.allclose(one_dft1, true_dft) - assert np.allclose(one_dft2, true_dft) - assert np.allclose(one_dft3, true_dft) + assert all_almost_equal(one_dft1, true_dft) + assert all_almost_equal(one_dft2, true_dft) + assert all_almost_equal(one_dft3, true_dft) one_idft1 = idft(one_dft1, flags=('FFTW_ESTIMATE',)) one_idft2 = dft.inverse(one_dft1, flags=('FFTW_ESTIMATE',)) one_idft3 = dft.adjoint(one_dft1, flags=('FFTW_ESTIMATE',)) - assert np.allclose(one_idft1, one) - assert np.allclose(one_idft2, one) - assert np.allclose(one_idft3, one) + assert all_almost_equal(one_idft1, one) + assert all_almost_equal(one_idft2, one) + assert all_almost_equal(one_idft3, one) rand_arr = noise_element(dft_dom) rand_arr_dft = dft(rand_arr, flags=('FFTW_ESTIMATE',)) @@ -241,9 +266,8 @@ def test_dft_call(impl): assert (rand_arr_idft - rand_arr).norm() < 1e-6 # 2d, halfcomplex, first axis - shape = (4, 5) axes = 0 - dft_dom = _dft_space(shape, dtype='float32') + dft_dom = _dft_complex_space.real_space dft = DiscreteFourierTransform(domain=dft_dom, impl=impl, halfcomplex=True, axes=axes) idft = DiscreteFourierTransformInverse(range=dft_dom, impl=impl, @@ -257,12 +281,12 @@ def test_dft_call(impl): true_dft = [[4, 4, 4, 4, 4], # transform axis shortened [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] - assert np.allclose(one_dft, true_dft) + assert all_almost_equal(one_dft, true_dft) one_idft1 = idft(one_dft, flags=('FFTW_ESTIMATE',)) one_idft2 = dft.inverse(one_dft, flags=('FFTW_ESTIMATE',)) - assert np.allclose(one_idft1, one) - assert np.allclose(one_idft2, one) + assert all_almost_equal(one_idft1, one) + assert all_almost_equal(one_idft2, one) rand_arr = noise_element(dft_dom) rand_arr_dft = dft(rand_arr, flags=('FFTW_ESTIMATE',)) @@ -270,13 +294,12 @@ def test_dft_call(impl): assert (rand_arr_idft - rand_arr).norm() < 1e-6 -def test_dft_sign(impl): +def test_dft_sign(impl, _dft_complex_space): # Test if the FT sign behaves as expected, i.e. that the FT with sign # '+' and '-' have same real parts and opposite imaginary parts. - # 2d, complex, all ones and random back & forth - shape = (4, 5) - dft_dom = _dft_space(shape, dtype='complex64') + skip_incompatible_impl(impl, _dft_complex_space) + dft_dom = _dft_complex_space dft_minus = DiscreteFourierTransform(domain=dft_dom, impl=impl, sign='-') dft_plus = DiscreteFourierTransform(domain=dft_dom, impl=impl, sign='+') @@ -295,9 +318,8 @@ def test_dft_sign(impl): assert all_almost_equal(dft_plus.inverse.inverse(arr), dft_plus(arr)) # 2d, halfcomplex, first axis - shape = (4, 5) axes = (0,) - dft_dom = _dft_space(shape, dtype='float32') + dft_dom = _dft_complex_space.real_space arr = dft_dom.element([[0, 0, 0, 0, 0], [0, 0, 1, 1, 0], [0, 0, 1, 1, 0], @@ -344,16 +366,17 @@ def test_dft_init_plan(impl): # ---- FourierTransform ---- # -def test_fourier_trafo_range(exponent, odl_floating_dtype): +def test_fourier_trafo_range(exponent, odl_floating_dtype, odl_impl_device_pairs): # Check if the range is initialized correctly. Encompasses the init test dtype = odl_floating_dtype # Testing R2C for real dtype, else C2C + impl, device = odl_impl_device_pairs # 1D shape = 10 space_discr = odl.uniform_discr(0, 1, shape, exponent=exponent, - impl='numpy', dtype=dtype) + impl=impl, device=device, dtype=dtype) dft = FourierTransform(space_discr, halfcomplex=True, shift=True) assert dft.range.field == odl.ComplexNumbers() @@ -366,7 +389,7 @@ def test_fourier_trafo_range(exponent, odl_floating_dtype): # 3D shape = (3, 4, 5) space_discr = odl.uniform_discr([0] * 3, [1] * 3, shape, exponent=exponent, - impl='numpy', dtype=dtype) + impl=impl, device=device, dtype=dtype) dft = FourierTransform(space_discr, halfcomplex=True, shift=True) assert dft.range.field == odl.ComplexNumbers() @@ -467,8 +490,9 @@ def test_fourier_trafo_create_temp(): assert ft._tmp_f is None -def test_fourier_trafo_call(impl, odl_floating_dtype): +def test_fourier_trafo_call(impl, odl_floating_dtype, odl_impl_device_pairs): # Test if all variants can be called without error + backend_impl, device = odl_impl_device_pairs dtype = odl_floating_dtype # Not supported, skip @@ -477,40 +501,50 @@ def test_fourier_trafo_call(impl, odl_floating_dtype): shape = 10 halfcomplex, _ = _params_from_dtype(dtype) - space_discr = odl.uniform_discr(0, 1, shape, dtype=dtype) + space_discr = odl.uniform_discr(0, 1, shape, dtype=dtype, impl=backend_impl, device=device) + + skip_incompatible_impl(impl, space_discr) ft = FourierTransform(space_discr, impl=impl, halfcomplex=halfcomplex) ift = ft.inverse one = space_discr.one() - assert np.allclose(ift(ft(one)), one) + assert odl.allclose(ift(ft(one)), one) # With temporaries ft.create_temporaries() ift = ft.inverse # shares temporaries one = space_discr.one() - assert np.allclose(ift(ft(one)), one) + assert odl.allclose(ift(ft(one)), one) -def test_fourier_trafo_charfun_1d(): +def test_fourier_trafo_charfun_1d(odl_impl_device_pairs): # Characteristic function of [0, 1], its Fourier transform is # given by exp(-1j * y / 2) * sinc(y/2) + impl, device = odl_impl_device_pairs def char_interval(x): - return (x >= 0) & (x <= 1) + return (x.real >= 0) & (x.real <= 1) def char_interval_ft(x): - return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) + if impl == 'numpy': + return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) + + elif impl == 'pytorch': + import torch + return torch.exp(-1j * x / 2) * sinc_pytorch(x / 2) / np.sqrt(2 * np.pi) + else: + raise NotImplementedError # Base version - discr = odl.uniform_discr(-2, 2, 40, impl='numpy') + discr = odl.uniform_discr(-2, 2, 40, impl=impl, device=device) dft_base = FourierTransform(discr) # Complex version, should be as good - discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='complex64') + discr = odl.uniform_discr(-2, 2, 40, impl=impl, device=device, dtype='complex64') dft_complex = FourierTransform(discr) # Without shift - discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='complex64') + discr = odl.uniform_discr(-2, 2, 40, impl=impl, device=device, dtype='complex64') dft_complex_shift = FourierTransform(discr, shift=False) for dft in [dft_base, dft_complex, dft_complex_shift]: @@ -519,18 +553,27 @@ def char_interval_ft(x): assert (func_dft - func_true_ft).norm() < 5e-6 -def test_fourier_trafo_scaling(): +def test_fourier_trafo_scaling(odl_impl_device_pairs): # Test if the FT scales correctly # Characteristic function of [0, 1], its Fourier transform is # given by exp(-1j * y / 2) * sinc(y/2) + impl, device = odl_impl_device_pairs + def char_interval(x): - return (x >= 0) & (x <= 1) + return (x.real >= 0) & (x.real <= 1) def char_interval_ft(x): - return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) + if impl == 'numpy': + return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) + + elif impl == 'pytorch': + import torch + return torch.exp(-1j * x / 2) * sinc_pytorch(x / 2) / np.sqrt(2 * np.pi) + else: + raise NotImplementedError - discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='complex128') + discr = odl.uniform_discr(-2, 2, 40, impl=impl, device=device, dtype='complex128') dft = FourierTransform(discr) for factor in (2, 1j, -2.5j, 1 - 4j): @@ -539,39 +582,51 @@ def char_interval_ft(x): assert (func_dft - func_true_ft).norm() < 1e-6 -def test_fourier_trafo_sign(impl, odl_real_floating_dtype): +def test_fourier_trafo_sign(impl,odl_real_floating_dtype,odl_impl_device_pairs): # Test if the FT sign behaves as expected, i.e. that the FT with sign # '+' and '-' have same real parts and opposite imaginary parts. - + impl_backend, device = odl_impl_device_pairs discrspace_dtype = complex_dtype(odl_real_floating_dtype) def char_interval(x): - return (x >= 0) & (x <= 1) + return (x.real >= 0) & (x.real <= 1) + + discr = odl.uniform_discr(-2, 2, 40, impl=impl_backend, device=device, dtype=discrspace_dtype) + + skip_incompatible_impl(impl,discr) - discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype=discrspace_dtype) ft_minus = FourierTransform(discr, sign='-', impl=impl) ft_plus = FourierTransform(discr, sign='+', impl=impl) func_ft_minus = ft_minus(char_interval) func_ft_plus = ft_plus(char_interval) - if odl_real_floating_dtype == np.float16: - tolerance = np.linalg.norm(func_ft_minus) * 1e-3 - elif odl_real_floating_dtype == np.float32: - tolerance = np.linalg.norm(func_ft_minus) * 1e-7 - elif odl_real_floating_dtype == np.float64: - tolerance = np.linalg.norm(func_ft_minus) * 1e-15 - elif odl_real_floating_dtype == np.float128: + data = func_ft_minus.data + if impl_backend == 'pytorch': + data = data.detach().cpu().numpy() + + if odl_real_floating_dtype == "float16": + tolerance = np.linalg.norm(data) * 1e-3 + elif odl_real_floating_dtype == "float32" or odl_real_floating_dtype == float: + tolerance = np.linalg.norm(data) * 1e-7 + elif odl_real_floating_dtype == "float64" : + tolerance = np.linalg.norm(data) * 1e-15 + elif odl_real_floating_dtype == "float128": if np.__version__<'2': # NumPy-1 does not use quadruple precision for the FFT, but double precision # and converts the result, so we do not achieve closer tolerance there. - tolerance = np.linalg.norm(func_ft_minus) * 1e-15 + tolerance = np.linalg.norm(data) * 1e-15 else: - tolerance = np.linalg.norm(func_ft_minus) * 1e-19 + tolerance = np.linalg.norm(data) * 1e-19 else: raise TypeError(f"No known tolerance for dtype {odl_real_floating_dtype}") def assert_close(x,y): + x = x.data + y = y.data + if impl_backend == 'pytorch': + x = x.detach().cpu().numpy() + y = y.detach().cpu().numpy() assert(np.linalg.norm(x-y) < tolerance) assert_close(func_ft_minus.real, func_ft_plus.real) @@ -579,21 +634,26 @@ def assert_close(x,y): assert_close(ft_minus.inverse.inverse(char_interval), ft_minus(char_interval)) assert_close(ft_plus.inverse.inverse(char_interval), ft_plus(char_interval)) - discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='float32') + discr = odl.uniform_discr(-2, 2, 40, impl=impl_backend, device=device, dtype='float32') with pytest.raises(ValueError): FourierTransform(discr, sign='+', impl=impl, halfcomplex=True) with pytest.raises(ValueError): FourierTransform(discr, sign=-1, impl=impl) -def test_fourier_trafo_inverse(impl, sign): +def test_fourier_trafo_inverse(impl, sign, odl_impl_device_pairs): # Test if the inverse really is the inverse + impl_backend, device = odl_impl_device_pairs + def char_interval(x): - return (x >= 0) & (x <= 1) + return (x.real >= 0) & (x.real <= 1) # Complex-to-complex - discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='complex64') + discr = odl.uniform_discr(-2, 2, 40, impl=impl_backend, device=device, dtype='complex64') + + skip_incompatible_impl(impl,discr) + discr_char = discr.element(char_interval) ft = FourierTransform(discr, sign=sign, impl=impl) @@ -601,16 +661,16 @@ def char_interval(x): assert all_almost_equal(ft.adjoint(ft(char_interval)), discr_char) # Half-complex - discr = odl.uniform_discr(-2, 2, 40, impl='numpy', dtype='float32') + discr = odl.uniform_discr(-2, 2, 40, impl=impl_backend, device=device,dtype='float32') ft = FourierTransform(discr, impl=impl, halfcomplex=True) - assert all_almost_equal(ft.inverse(ft(char_interval)), discr_char) + assert all_almost_equal(ft.inverse(ft(char_interval)), discr_char.real) def char_rect(x): - return (x[0] >= 0) & (x[0] <= 1) & (x[1] >= 0) & (x[1] <= 1) + return (x[0].real >= 0) & (x[0].real <= 1) & (x[1].real >= 0) & (x[1].real <= 1) # 2D with axes, C2C - discr = odl.uniform_discr([-2, -2], [2, 2], (20, 10), impl='numpy', - dtype='complex64') + discr = odl.uniform_discr([-2, -2], [2, 2], (20, 10), impl=impl_backend, + device=device, dtype='complex64') discr_rect = discr.element(char_rect) for axes in [(0,), 1]: @@ -619,8 +679,8 @@ def char_rect(x): assert all_almost_equal(ft.adjoint(ft(char_rect)), discr_rect) # 2D with axes, halfcomplex - discr = odl.uniform_discr([-2, -2], [2, 2], (20, 10), impl='numpy', - dtype='float32') + discr = odl.uniform_discr([-2, -2], [2, 2], (20, 10), impl=impl_backend, + device=device, dtype='float32') discr_rect = discr.element(char_rect) for halfcomplex in [False, True]: @@ -634,46 +694,84 @@ def char_rect(x): assert all_almost_equal(ft.adjoint(ft(char_rect)), discr_rect) -def test_fourier_trafo_hat_1d(): +def test_fourier_trafo_hat_1d(odl_impl_device_pairs): # Hat function as used in linear interpolation. It is not so # well discretized by nearest neighbor interpolation, so a larger # error is to be expected. + impl, device = odl_impl_device_pairs def hat_func(x): - out = np.where(x < 0, 1 + x, 1 - x) - out[x < -1] = 0 - out[x > 1] = 0 - return out + if impl == 'numpy': + out = np.where(x < 0, 1 + x, 1 - x) + out[x < -1] = 0 + out[x > 1] = 0 + return out + elif impl == 'pytorch': + import torch + out = torch.where(x < 0, 1 + x, 1 - x) + out[x < -1] = 0 + out[x > 1] = 0 + return out + else: + raise NotImplementedError def hat_func_ft(x): - return sinc(x / 2) ** 2 / np.sqrt(2 * np.pi) - + if impl == 'numpy': + return sinc(x / 2) ** 2 / np.sqrt(2 * np.pi) + elif impl == 'pytorch': + return sinc_pytorch(x / 2) ** 2 / np.sqrt(2 * np.pi) + else: + raise NotImplementedError # Using a single-precision implementation, should be as good - discr = odl.uniform_discr(-2, 2, 101, impl='numpy', dtype='float32') + discr = odl.uniform_discr(-2, 2, 101, impl=impl, device=device, dtype='float32') dft = FourierTransform(discr) func_true_ft = dft.range.element(hat_func_ft) func_dft = dft(hat_func) assert (func_dft - func_true_ft).norm() < 0.001 -def test_fourier_trafo_complex_sum(): +def test_fourier_trafo_complex_sum(odl_impl_device_pairs): # Sum of characteristic function and hat function, both with # known FT's. + + impl, device = odl_impl_device_pairs + def hat_func(x): - out = 1 - np.abs(x) - out[x < -1] = 0 - out[x > 1] = 0 - return out + if impl == 'numpy': + out = np.where(x < 0, 1 + x, 1 - x) + out[x < -1] = 0 + out[x > 1] = 0 + return out + elif impl == 'pytorch': + import torch + out = torch.where(x.real < 0, 1 + x.real, 1 - x.real) + out[x.real < -1] = 0 + out[x.real > 1] = 0 + return out + else: + raise NotImplementedError def hat_func_ft(x): - return sinc(x / 2) ** 2 / np.sqrt(2 * np.pi) + if impl == 'numpy': + return sinc(x / 2) ** 2 / np.sqrt(2 * np.pi) + elif impl == 'pytorch': + return sinc_pytorch(x / 2) ** 2 / np.sqrt(2 * np.pi) + else: + raise NotImplementedError def char_interval(x): - return (x >= 0) & (x <= 1) + return (x.real >= 0) & (x.real <= 1) def char_interval_ft(x): - return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) + if impl == 'numpy': + return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) + + elif impl == 'pytorch': + import torch + return torch.exp(-1j * x / 2) * sinc_pytorch(x / 2) / np.sqrt(2 * np.pi) + else: + raise NotImplementedError - discr = odl.uniform_discr(-2, 2, 200, impl='numpy', dtype='complex128') + discr = odl.uniform_discr(-2, 2, 200, impl=impl, device=device, dtype='complex128') dft = FourierTransform(discr, shift=False) func = discr.element(hat_func) + 1j * discr.element(char_interval) @@ -685,30 +783,50 @@ def char_interval_ft(x): assert (func_dft - func_true_ft).norm() < 0.001 -def test_fourier_trafo_gaussian_1d(): +def test_fourier_trafo_gaussian_1d(odl_impl_device_pairs): # Gaussian function, will be mapped to itself. Truncation error is # relatively large, though, we need a large support. - def gaussian(x): - return np.exp(-x ** 2 / 2) - discr = odl.uniform_discr(-10, 10, 201, impl='numpy') + impl, device = odl_impl_device_pairs + + def gaussian(x): + if impl == 'numpy': + return np.exp(-x ** 2 / 2) + elif impl == 'pytorch': + import torch + return torch.exp(-x ** 2 / 2) + else: + raise NotImplementedError + discr = odl.uniform_discr(-10, 10, 201, impl=impl, device=device) dft = FourierTransform(discr) func_true_ft = dft.range.element(gaussian) func_dft = dft(gaussian) assert (func_dft - func_true_ft).norm() < 0.001 -def test_fourier_trafo_freq_shifted_charfun_1d(): +def test_fourier_trafo_freq_shifted_charfun_1d(odl_impl_device_pairs): # Frequency-shifted characteristic function: mult. with # exp(-1j * b * x) corresponds to shifting the FT by b. + impl, device=odl_impl_device_pairs + def fshift_char_interval(x): - return np.exp(-1j * x * np.pi) * ((x >= -0.5) & (x <= 0.5)) + if impl == 'numpy': + return np.exp(-1j * x * np.pi) * ((x >= -0.5) & (x <= 0.5)) + elif impl== 'pytorch': + import torch + return torch.exp(-1j * x * np.pi) * ((x.real >= -0.5) & (x.real <= 0.5)) + else: + raise NotImplementedError def fshift_char_interval_ft(x): - return sinc((x + np.pi) / 2) / np.sqrt(2 * np.pi) - + if impl=='numpy': + return sinc((x + np.pi) / 2) / np.sqrt(2 * np.pi) + elif impl== 'pytorch': + return sinc_pytorch((x + np.pi) / 2) / np.sqrt(2 * np.pi) + else: + raise NotImplementedError # Number of points is very important here (aliasing) - discr = odl.uniform_discr(-2, 2, 400, impl='numpy', + discr = odl.uniform_discr(-2, 2, 400, impl=impl, device=device, dtype='complex64') dft = FourierTransform(discr) func_true_ft = dft.range.element(fshift_char_interval_ft) @@ -716,38 +834,54 @@ def fshift_char_interval_ft(x): assert (func_dft - func_true_ft).norm() < 0.001 -def test_dft_with_known_pairs_2d(): - +def test_dft_with_known_pairs_2d(odl_impl_device_pairs): + impl, device=odl_impl_device_pairs # Frequency-shifted product of characteristic functions def fshift_char_rect(x): # Characteristic function of the cuboid # [-1, 1] x [1, 2] - return (x[0] >= -1) & (x[0] <= 1) & (x[1] >= 1) & (x[1] <= 2) + if impl =='numpy': + return (x[0] >= -1) & (x[0] <= 1) & (x[1] >= 1) & (x[1] <= 2) + elif impl == 'pytorch': + return (x[0].real >= -1) & (x[0].real <= 1) & (x[1].real >= 1) & (x[1].real <= 2) + else: + raise NotImplementedError def fshift_char_rect_ft(x): # FT is a product of shifted and frequency-shifted sinc functions # 1st comp.: 2 * sinc(y) # 2nd comp.: exp(-1j * y * 3/2) * sinc(y/2) # Overall factor: (2 * pi)^(-1) - return ( - 2 * sinc(x[0]) - * np.exp(-1j * x[1] * 3 / 2) * sinc(x[1] / 2) - / (2 * np.pi) - ) + if impl =='numpy': + return ( + 2 * sinc(x[0]) + * np.exp(-1j * x[1] * 3 / 2) * sinc(x[1] / 2) + / (2 * np.pi) + ) + elif impl == 'pytorch': + import torch + return ( + 2 * sinc_pytorch(x[0]) + * torch.exp(-1j * x[1] * 3 / 2) * sinc_pytorch(x[1] / 2) + / (2 * np.pi) + ) - discr = odl.uniform_discr([-2] * 2, [2] * 2, (100, 400), impl='numpy', - dtype='complex64') + else: + raise NotImplementedError + + discr = odl.uniform_discr([-2] * 2, [2] * 2, (100, 400), impl=impl, device=device, dtype='complex64') dft = FourierTransform(discr) func_true_ft = dft.range.element(fshift_char_rect_ft) func_dft = dft(fshift_char_rect) assert (func_dft - func_true_ft).norm() < 0.001 -def test_fourier_trafo_completely(): +def test_fourier_trafo_completely(odl_impl_device_pairs): # Complete explicit test of all FT components on two small examples - + impl, device = odl_impl_device_pairs + # Discretization with 4 points - discr = odl.uniform_discr(-2, 2, 4, dtype='complex') + discr = odl.uniform_discr(-2, 2, 4, impl=impl, device=device, dtype=complex) # Interval boundaries -2, -1, 0, 1, 2 assert np.allclose(discr.partition.cell_boundary_vecs[0], [-2, -1, 0, 1, 2]) @@ -758,14 +892,25 @@ def test_fourier_trafo_completely(): # First test function, symmetric. Can be represented exactly in the # discretization. def f(x): - return (x >= -1) & (x <= 1) + if impl == 'numpy': + return (x >= -1) & (x <= 1) + elif impl == 'pytorch': + return (x.real >= -1) & (x.real <= 1) + else: + raise NotImplementedError + def fhat(x): - return np.sqrt(2 / np.pi) * sinc(x) + if impl == 'numpy': + return np.sqrt(2 / np.pi) * sinc(x) + elif impl == 'pytorch': + return np.sqrt(2 / np.pi) * sinc_pytorch(x) + else: + raise NotImplementedError # Discretize f, check values f_discr = discr.element(f) - assert np.allclose(f_discr, [0, 1, 1, 0]) + assert all_almost_equal(f_discr, [0, 1, 1, 0]) # "s" = shifted, "n" = not shifted @@ -779,9 +924,12 @@ def fhat(x): # Range range_part_s = odl.uniform_partition_fromgrid(recip_s) - range_s = odl.uniform_discr_frompartition(range_part_s, dtype='complex') + range_s = odl.uniform_discr_frompartition(range_part_s, dtype=complex, impl=impl, device=device) range_part_n = odl.uniform_partition_fromgrid(recip_n) - range_n = odl.uniform_discr_frompartition(range_part_n, dtype='complex') + range_n = odl.uniform_discr_frompartition(range_part_n, dtype=complex, impl=impl, device=device) + + namespace = discr.array_namespace + backend = discr.array_backend # Pre-processing preproc_s = [1, -1, 1, -1] @@ -789,19 +937,20 @@ def fhat(x): fpre_s = dft_preprocess_data(f_discr, shift=True) fpre_n = dft_preprocess_data(f_discr, shift=False) - assert np.allclose(fpre_s, f_discr * discr.element(preproc_s)) - assert np.allclose(fpre_n, f_discr * discr.element(preproc_n)) + assert all_almost_equal(fpre_s, f_discr * discr.element(preproc_s)) + assert all_almost_equal(fpre_n, f_discr * discr.element(preproc_n)) # FFT step, replicating the _call_numpy method - fft_s = np.fft.fftn(fpre_s, s=discr.shape, axes=[0]) - fft_n = np.fft.fftn(fpre_n, s=discr.shape, axes=[0]) - assert np.allclose(fft_s, [0, -1 + 1j, 2, -1 - 1j]) - assert np.allclose( - fft_n, - [np.exp(1j * np.pi * (3 - 2 * k) / 4) - + np.exp(1j * np.pi * (3 - 2 * k) / 2) - for k in range(4)] - ) + fft_s = namespace.fft.fftn(fpre_s, s=discr.shape, axes=[0]) + fft_n = namespace.fft.fftn(fpre_n, s=discr.shape, axes=[0]) + s = backend.array_constructor([0, -1 + 1j, 2, -1 - 1j], device=device, dtype=complex) + n = backend.array_constructor([ + np.exp(1j * np.pi * (3 - 2 * k) / 4) + + np.exp(1j * np.pi * (3 - 2 * k) / 2) + for k in range(4)], device=device, dtype=complex + ) + assert namespace.allclose(fft_s, s) + assert namespace.allclose(fft_n, n) # Interpolation kernel FT interp_s = np.sinc(np.linspace(-1 / 2, 1 / 4, 4)) / np.sqrt(2 * np.pi) @@ -823,13 +972,21 @@ def fhat(x): fpost_n = dft_postprocess_data( range_n.element(fft_n), real_grid=discr.grid, recip_grid=recip_n, shift=[False], axes=(0,), interp='nearest') + + postproc_s = backend.array_constructor(postproc_s, device=device) + postproc_n = backend.array_constructor(postproc_n, device=device) - assert np.allclose(fpost_s, fft_s * postproc_s * interp_s) - assert np.allclose(fpost_n, fft_n * postproc_n * interp_n) + interp_s = backend.array_constructor(interp_s, device=device) + interp_n = backend.array_constructor(interp_n, device=device) + + assert namespace.allclose(fpost_s, fft_s * postproc_s * interp_s) + assert namespace.allclose(fpost_n, fft_n * postproc_n * interp_n) # Comparing to the known result sqrt(2/pi) * sinc(x) - assert np.allclose(fpost_s, fhat(recip_s.coord_vectors[0])) - assert np.allclose(fpost_n, fhat(recip_n.coord_vectors[0])) + recip_s_array = backend.array_constructor(recip_s.coord_vectors[0], device=device) + recip_n_array = backend.array_constructor(recip_n.coord_vectors[0], device=device) + assert namespace.allclose(fpost_s.real, fhat(recip_s_array)) + assert namespace.allclose(fpost_n.real, fhat(recip_n_array)) # Doing the exact same with direct application of the FT operator ft_op_s = FourierTransform(discr, shift=True) @@ -839,32 +996,36 @@ def fhat(x): ft_f_s = ft_op_s(f) ft_f_n = ft_op_n(f) - assert np.allclose(ft_f_s, fhat(recip_s.coord_vectors[0])) - assert np.allclose(ft_f_n, fhat(recip_n.coord_vectors[0])) + assert all_almost_equal(ft_f_s.real, fhat(recip_s_array)) + assert all_almost_equal(ft_f_n.real, fhat(recip_n_array)) # Second test function, asymmetric. Can also be represented exactly in the # discretization. def f(x): - return (x >= 0) & (x <= 1) + return (x.real >= 0) & (x.real <= 1) def fhat(x): - return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) - + if impl == 'numpy': + return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) + elif impl == 'pytorch': + return namespace.exp(-1j * x / 2) * sinc_pytorch(x / 2) / np.sqrt(2 * np.pi) + else: + raise NotImplementedError # Discretize f, check values f_discr = discr.element(f) - assert np.allclose(f_discr, [0, 0, 1, 0]) + assert all_almost_equal(f_discr, [0, 0, 1, 0]) # Pre-processing fpre_s = dft_preprocess_data(f_discr, shift=True) fpre_n = dft_preprocess_data(f_discr, shift=False) - assert np.allclose(fpre_s, [0, 0, 1, 0]) - assert np.allclose(fpre_n, [0, 0, -1j, 0]) + assert all_almost_equal(fpre_s, [0, 0, 1, 0]) + assert all_almost_equal(fpre_n, backend.array_constructor([0, 0, -1j, 0], device=device, dtype=complex)) # FFT step - fft_s = np.fft.fftn(fpre_s, s=discr.shape, axes=[0]) - fft_n = np.fft.fftn(fpre_n, s=discr.shape, axes=[0]) - assert np.allclose(fft_s, [1, -1, 1, -1]) - assert np.allclose(fft_n, [-1j, 1j, -1j, 1j]) + fft_s = namespace.fft.fftn(fpre_s, s=discr.shape, axes=[0]) + fft_n = namespace.fft.fftn(fpre_n, s=discr.shape, axes=[0]) + assert all_almost_equal(fft_s, [1, -1, 1, -1]) + assert all_almost_equal(fft_n, backend.array_constructor([-1j, 1j, -1j, 1j], device=device, dtype=complex)) fpost_s = dft_postprocess_data( range_s.element(fft_s), real_grid=discr.grid, recip_grid=recip_s, @@ -873,19 +1034,21 @@ def fhat(x): range_n.element(fft_n), real_grid=discr.grid, recip_grid=recip_n, shift=[False], axes=(0,), interp='nearest') - assert np.allclose(fpost_s, fft_s * postproc_s * interp_s) - assert np.allclose(fpost_n, fft_n * postproc_n * interp_n) + assert all_almost_equal(fpost_s, fft_s * postproc_s * interp_s) + assert all_almost_equal(fpost_n, fft_n * postproc_n * interp_n) # Comparing to the known result exp(-1j*x/2) * sinc(x/2) / sqrt(2*pi) - assert np.allclose(fpost_s, fhat(recip_s.coord_vectors[0])) - assert np.allclose(fpost_n, fhat(recip_n.coord_vectors[0])) + recip_s = backend.array_constructor(recip_s.coord_vectors[0], device=device) + recip_n = backend.array_constructor(recip_n.coord_vectors[0], device=device) + assert all_almost_equal(fpost_s, fhat(recip_s)) + assert all_almost_equal(fpost_n, fhat(recip_n)) # Doing the exact same with direct application of the FT operator ft_f_s = ft_op_s(f) ft_f_n = ft_op_n(f) - assert np.allclose(ft_f_s, fhat(recip_s.coord_vectors[0])) - assert np.allclose(ft_f_n, fhat(recip_n.coord_vectors[0])) + assert all_almost_equal(ft_f_s, fhat(recip_s)) + assert all_almost_equal(ft_f_n, fhat(recip_n)) if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/trafos/util/ft_utils_test.py b/odl/test/trafos/util/ft_utils_test.py index 70f164c8d09..34748b5aba6 100644 --- a/odl/test/trafos/util/ft_utils_test.py +++ b/odl/test/trafos/util/ft_utils_test.py @@ -14,8 +14,8 @@ import odl from odl.trafos.util.ft_utils import ( reciprocal_grid, realspace_grid, dft_preprocess_data) -from odl.util import all_almost_equal, all_equal -from odl.util.testutils import simple_fixture +from odl.core.util import all_almost_equal, all_equal +from odl.core.util.testutils import simple_fixture # --- pytest fixtures --- # @@ -296,4 +296,4 @@ def test_dft_preprocess_data_with_axes(sign): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/trafos/wavelet_test.py b/odl/test/trafos/wavelet_test.py index c0fdc844cd9..439be01a572 100644 --- a/odl/test/trafos/wavelet_test.py +++ b/odl/test/trafos/wavelet_test.py @@ -11,7 +11,7 @@ import pytest import odl -from odl.util.testutils import ( +from odl.core.util.testutils import ( all_almost_equal, noise_element, simple_fixture, skip_if_no_pywavelets) @@ -114,4 +114,4 @@ def test_wavelet_transform(wave_impl, shape_setup, odl_floating_dtype, axes): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/util/normalize_test.py b/odl/test/util/normalize_test.py index b070a0bcc09..ce3324e6fdb 100644 --- a/odl/test/util/normalize_test.py +++ b/odl/test/util/normalize_test.py @@ -12,9 +12,9 @@ import pytest import odl -from odl.util.normalize import ( +from odl.core.util.normalize import ( normalized_axes_tuple, normalized_scalar_param_list) -from odl.util.testutils import simple_fixture +from odl.core.util.testutils import simple_fixture # --- pytest fixtures --- # @@ -195,4 +195,4 @@ def test_normalized_axes_tuple_raise(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/util/numerics_test.py b/odl/test/util/numerics_test.py index a592ba4fe18..1a82fd4c5ce 100644 --- a/odl/test/util/numerics_test.py +++ b/odl/test/util/numerics_test.py @@ -11,11 +11,11 @@ import numpy as np import odl import pytest -from odl.util import is_real_dtype -from odl.util.numerics import ( +from odl.core.util import is_real_dtype +from odl.core.util.numerics import ( _SUPPORTED_RESIZE_PAD_MODES, apply_on_boundary, binning, fast_1d_tensor_mult, resize_array) -from odl.util.testutils import ( +from odl.core.util.testutils import ( all_almost_equal, all_equal, dtype_tol, simple_fixture) # --- pytest fixtures --- # @@ -378,7 +378,7 @@ def test_fast_1d_tensor_mult_error(): x, y, z = (np.arange(size, dtype='float64') for size in shape) # No ndarray to operate on - with pytest.raises(TypeError): + with pytest.raises(ValueError): fast_1d_tensor_mult([[0, 0], [0, 0]], [x, x]) # No 1d arrays given @@ -410,8 +410,8 @@ def test_fast_1d_tensor_mult_error(): # --- resize_array --- # -def test_resize_array_fwd(resize_setup, odl_scalar_dtype): - dtype = odl_scalar_dtype +def test_resize_array_fwd(resize_setup, odl_floating_dtype): + dtype = odl_floating_dtype pad_mode, pad_const, newshp, offset, array_in, true_out = resize_setup array_in = np.array(array_in, dtype=dtype) true_out = np.array(true_out, dtype=dtype) @@ -518,7 +518,7 @@ def test_resize_array_raise(): resize_array(arr_1d, 19) # out given, but not an ndarray - with pytest.raises(TypeError): + with pytest.raises(AttributeError): resize_array(arr_1d, (10,), out=[]) # out has wrong shape @@ -541,7 +541,7 @@ def test_resize_array_raise(): # padding constant cannot be cast to output data type with pytest.raises(ValueError): resize_array(arr_1d, (10,), pad_const=1.5) # arr_1d has dtype int - with pytest.raises(ValueError): + with pytest.raises(TypeError): arr_1d_float = arr_1d.astype(float) resize_array(arr_1d_float, (10,), pad_const=1.0j) @@ -621,4 +621,4 @@ def test_binning_corner_cases(): if __name__ == '__main__': - odl.util.test_file(__file__) + odl.core.util.test_file(__file__) diff --git a/odl/test/util/utility_test.py b/odl/test/util/utility_test.py index 55dc0ed4b37..43782fb15d2 100644 --- a/odl/test/util/utility_test.py +++ b/odl/test/util/utility_test.py @@ -10,21 +10,22 @@ import odl import numpy as np -from odl.util.utility import ( - is_numeric_dtype, is_real_dtype, is_real_floating_dtype, - is_complex_floating_dtype, SCTYPES) - - -real_float_dtypes = SCTYPES['float'] -complex_float_dtypes = SCTYPES['complex'] -nonfloat_numeric_dtypes = SCTYPES['uint'] + SCTYPES['int'] +from odl.core.util.dtype_utils import ( + is_numeric_dtype, is_real_dtype, is_floating_dtype, + is_complex_dtype, + FLOAT_DTYPES, + COMPLEX_DTYPES, + INTEGER_DTYPES + ) + + +real_float_dtypes = FLOAT_DTYPES +complex_float_dtypes = COMPLEX_DTYPES +nonfloat_numeric_dtypes = INTEGER_DTYPES numeric_dtypes = (real_float_dtypes + complex_float_dtypes + nonfloat_numeric_dtypes) real_dtypes = real_float_dtypes + nonfloat_numeric_dtypes # Need to make concrete instances here (with string lengths) -nonnumeric_dtypes = [np.dtype('S1'), np.dtype('`_ for details. - By default, a suitable projector type for the given geometry is - selected, see `default_astra_proj_type`. - - Returns - ------- - out : ``proj_space`` element - Projection data resulting from the application of the projector. - If ``out`` was provided, the returned object is a reference to it. - """ - if not isinstance(vol_data, DiscretizedSpaceElement): - raise TypeError( - 'volume data {!r} is not a `DiscretizedSpaceElement` instance' - ''.format(vol_data) - ) - if vol_data.space.impl != 'numpy': - raise TypeError( - "`vol_data.space.impl` must be 'numpy', got {!r}" - "".format(vol_data.space.impl) - ) - if not isinstance(geometry, Geometry): - raise TypeError( - 'geometry {!r} is not a Geometry instance'.format(geometry) - ) - if not isinstance(proj_space, DiscretizedSpace): - raise TypeError( - '`proj_space` {!r} is not a DiscretizedSpace instance.' - ''.format(proj_space) - ) - if proj_space.impl != 'numpy': - raise TypeError( - "`proj_space.impl` must be 'numpy', got {!r}" - "".format(proj_space.impl) - ) - if vol_data.ndim != geometry.ndim: - raise ValueError( - 'dimensions {} of volume data and {} of geometry do not match' - ''.format(vol_data.ndim, geometry.ndim) - ) - if out is None: - out = proj_space.element() - else: - if out not in proj_space: - raise TypeError( - '`out` {} is neither None nor a `DiscretizedSpaceElement` ' - 'instance'.format(out) - ) - - ndim = vol_data.ndim - - # Create astra geometries - vol_geom = astra_volume_geometry(vol_data.space) - proj_geom = astra_projection_geometry(geometry) - - # Create projector - if astra_proj_type is None: - astra_proj_type = default_astra_proj_type(geometry) - proj_id = astra_projector(astra_proj_type, vol_geom, proj_geom, ndim) - - # Create ASTRA data structures - vol_data_arr = np.asarray(vol_data) - vol_id = astra_data(vol_geom, datatype='volume', data=vol_data_arr, - allow_copy=True) - - with writable_array(out, dtype='float32', order='C') as out_arr: - sino_id = astra_data(proj_geom, datatype='projection', data=out_arr, - ndim=proj_space.ndim) - - # Create algorithm - algo_id = astra_algorithm('forward', ndim, vol_id, sino_id, proj_id, - impl='cpu') - - # Run algorithm - astra.algorithm.run(algo_id) - - # Delete ASTRA objects - astra.algorithm.delete(algo_id) - astra.data2d.delete((vol_id, sino_id)) - astra.projector.delete(proj_id) - - return out - - -def astra_cpu_back_projector(proj_data, geometry, vol_space, out=None, - astra_proj_type=None): - """Run an ASTRA back-projection on the given data using the CPU. - - Parameters - ---------- - proj_data : `DiscretizedSpaceElement` - Projection data to which the back-projector is applied. - geometry : `Geometry` - Geometry defining the tomographic setup. - vol_space : `DiscretizedSpace` - Space to which the calling operator maps. - out : ``vol_space`` element, optional - Element of the reconstruction space to which the result is written. - If ``None``, an element in ``vol_space`` is created. - astra_proj_type : str, optional - Type of projector that should be used. See `the ASTRA documentation - `_ for details. - By default, a suitable projector type for the given geometry is - selected, see `default_astra_proj_type`. - - Returns - ------- - out : ``vol_space`` element - Reconstruction data resulting from the application of the backward - projector. If ``out`` was provided, the returned object is a - reference to it. - """ - if not isinstance(proj_data, DiscretizedSpaceElement): - raise TypeError( - 'projection data {!r} is not a `DiscretizedSpaceElement` ' - 'instance'.format(proj_data) - ) - if proj_data.space.impl != 'numpy': - raise TypeError( - '`proj_data` must be a `numpy.ndarray` based, container, ' - "got `impl` {!r}".format(proj_data.space.impl) - ) - if not isinstance(geometry, Geometry): - raise TypeError( - 'geometry {!r} is not a Geometry instance'.format(geometry) - ) - if not isinstance(vol_space, DiscretizedSpace): - raise TypeError( - 'volume space {!r} is not a DiscretizedSpace instance' - ''.format(vol_space) - ) - if vol_space.impl != 'numpy': - raise TypeError( - "`vol_space.impl` must be 'numpy', got {!r}".format(vol_space.impl) - ) - if vol_space.ndim != geometry.ndim: - raise ValueError( - 'dimensions {} of reconstruction space and {} of geometry ' - 'do not match' - ''.format(vol_space.ndim, geometry.ndim) - ) - if out is None: - out = vol_space.element() - else: - if out not in vol_space: - raise TypeError( - '`out` {} is neither None nor a `DiscretizedSpaceElement` ' - 'instance'.format(out) - ) - - ndim = proj_data.ndim - - # Create astra geometries - vol_geom = astra_volume_geometry(vol_space) - proj_geom = astra_projection_geometry(geometry) - - # Create ASTRA data structure - sino_id = astra_data( - proj_geom, datatype='projection', data=proj_data, allow_copy=True - ) - - # Create projector - if astra_proj_type is None: - astra_proj_type = default_astra_proj_type(geometry) - proj_id = astra_projector(astra_proj_type, vol_geom, proj_geom, ndim) - - # Convert out to correct dtype and order if needed. - with writable_array(out, dtype='float32', order='C') as out_arr: - vol_id = astra_data( - vol_geom, datatype='volume', data=out_arr, ndim=vol_space.ndim - ) - # Create algorithm - algo_id = astra_algorithm( - 'backward', ndim, vol_id, sino_id, proj_id, impl='cpu' - ) - - # Run algorithm - astra.algorithm.run(algo_id) - - # Weight the adjoint by appropriate weights - scaling_factor = float(proj_data.space.weighting.const) - scaling_factor /= float(vol_space.weighting.const) - - out *= scaling_factor - - # Delete ASTRA objects - astra.algorithm.delete(algo_id) - astra.data2d.delete((vol_id, sino_id)) - astra.projector.delete(proj_id) - - return out - - -class AstraCpuImpl: - """Thin wrapper implementing ASTRA CPU for `RayTransform`.""" - - def __init__(self, geometry, vol_space, proj_space): - """Initialize a new instance. - - Parameters - ---------- - geometry : `Geometry` - Geometry defining the tomographic setup. - vol_space : `DiscretizedSpace` - Reconstruction space, the space of the images to be forward - projected. - proj_space : `DiscretizedSpace` - Projection space, the space of the result. - """ - if not isinstance(geometry, Geometry): - raise TypeError( - '`geometry` must be a `Geometry` instance, got {!r}' - ''.format(geometry) - ) - if not isinstance(vol_space, DiscretizedSpace): - raise TypeError( - '`vol_space` must be a `DiscretizedSpace` instance, got {!r}' - ''.format(vol_space) - ) - if not isinstance(proj_space, DiscretizedSpace): - raise TypeError( - '`proj_space` must be a `DiscretizedSpace` instance, got {!r}' - ''.format(proj_space) - ) - if geometry.ndim > 2: - raise ValueError( - '`impl` {!r} only works for 2d'.format(self.__class__.__name__) - ) - - if vol_space.size >= 512 ** 2: - warnings.warn( - "The 'astra_cpu' backend may be too slow for volumes of this " - "size. Consider using 'astra_cuda' if your machine has an " - "Nvidia GPU.", - RuntimeWarning, - ) - - self.geometry = geometry - self._vol_space = vol_space - self._proj_space = proj_space - - @property - def vol_space(self): - return self._vol_space - - @property - def proj_space(self): - return self._proj_space - - @_add_default_complex_impl - def call_backward(self, x, out, **kwargs): - return astra_cpu_back_projector( - x, self.geometry, self.vol_space.real_space, out, **kwargs - ) - - @_add_default_complex_impl - def call_forward(self, x, out, **kwargs): - return astra_cpu_forward_projector( - x, self.geometry, self.proj_space.real_space, out, **kwargs - ) - - -if __name__ == '__main__': - from odl.util.testutils import run_doctests - - run_doctests() diff --git a/odl/trafos/backends/pyfftw_bindings.py b/odl/trafos/backends/pyfftw_bindings.py index 75348a22563..d72403d7181 100644 --- a/odl/trafos/backends/pyfftw_bindings.py +++ b/odl/trafos/backends/pyfftw_bindings.py @@ -30,7 +30,7 @@ 'ODL functionality, see issue #1002.', RuntimeWarning) -from odl.util import ( +from odl.core.util import ( is_real_dtype, dtype_repr, complex_dtype, normalized_axes_tuple) __all__ = ('pyfftw_call', 'PYFFTW_AVAILABLE') @@ -303,5 +303,5 @@ def _pyfftw_check_args(arr_in, arr_out, axes, halfcomplex, direction): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests(skip_if=not PYFFTW_AVAILABLE) diff --git a/odl/trafos/backends/pywt_bindings.py b/odl/trafos/backends/pywt_bindings.py index d69d4262adf..a2bea4f5751 100644 --- a/odl/trafos/backends/pywt_bindings.py +++ b/odl/trafos/backends/pywt_bindings.py @@ -150,5 +150,5 @@ def precompute_raveled_slices(coeff_shapes, axes=None): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests(skip_if=not PYWT_AVAILABLE) diff --git a/odl/deform/__init__.py b/odl/trafos/deform/__init__.py similarity index 100% rename from odl/deform/__init__.py rename to odl/trafos/deform/__init__.py diff --git a/odl/deform/linearized.py b/odl/trafos/deform/linearized.py similarity index 94% rename from odl/deform/linearized.py rename to odl/trafos/deform/linearized.py index cd28fc8c5e5..ef19fcaf8eb 100644 --- a/odl/deform/linearized.py +++ b/odl/trafos/deform/linearized.py @@ -12,13 +12,14 @@ import numpy as np -from odl.discr import DiscretizedSpace, Divergence, Gradient -from odl.discr.discr_space import DiscretizedSpaceElement -from odl.discr.discr_utils import _normalize_interp, per_axis_interpolator -from odl.operator import Operator, PointwiseInner -from odl.space import ProductSpace -from odl.space.pspace import ProductSpaceElement -from odl.util import indent, signature_string +from odl.core.discr import DiscretizedSpace, Divergence, Gradient +from odl.core.discr.discr_space import DiscretizedSpaceElement +from odl.core.discr.discr_utils import _normalize_interp, per_axis_interpolator +from odl.core.operator import Operator, PointwiseInner +from odl.core.space import ProductSpace +from odl.core.space.pspace import ProductSpaceElement +from odl.core.util import indent, signature_string +from odl.core.array_API_support import exp, lookup_array_backend __all__ = ('LinDeformFixedTempl', 'LinDeformFixedDisp', 'linear_deform') @@ -78,11 +79,20 @@ def linear_deform(template, displacement, interp='linear', out=None): array([ 0. , 0. , 1. , 0.5, 0. ]) """ points = template.space.points() + if isinstance(displacement, ProductSpaceElement): + impl, device = displacement[0].impl, displacement[0].device + backend = lookup_array_backend(impl) + else: + raise ValueError(f'{type(displacement)}') + + points = backend.array_constructor(points, device=device) + for i, vi in enumerate(displacement): points[:, i] += vi.asarray().ravel() templ_interpolator = per_axis_interpolator( template, coord_vecs=template.space.grid.coord_vectors, interp=interp ) + values = templ_interpolator(points.T, out=out) return values.reshape(template.space.shape) @@ -351,7 +361,7 @@ def __init__(self, displacement, templ_space=None, interp='linear'): >>> space = odl.uniform_discr(0, 1, 5) >>> disp_field = space.tangent_bundle.element([[0, 0, 0, -0.2, 0]]) - >>> op = odl.deform.LinDeformFixedDisp(disp_field, interp='nearest') + >>> op = odl.trafos.deform.LinDeformFixedDisp(disp_field, interp='nearest') >>> template = [0, 0, 1, 0, 0] >>> print(op([0, 0, 1, 0, 0])) [ 0., 0., 1., 1., 0.] @@ -361,7 +371,7 @@ def __init__(self, displacement, templ_space=None, interp='linear'): points, 0.1, one gets the mean of the values. >>> disp_field = space.tangent_bundle.element([[0, 0, 0, -0.1, 0]]) - >>> op = odl.deform.LinDeformFixedDisp(disp_field, interp='linear') + >>> op = odl.trafos.deform.LinDeformFixedDisp(disp_field, interp='linear') >>> template = [0, 0, 1, 0, 0] >>> print(op(template)) [ 0. , 0. , 1. , 0.5, 0. ] @@ -449,7 +459,7 @@ def adjoint(self): # TODO allow users to select what method to use here. div_op = Divergence(domain=self.displacement.space, method='forward', pad_mode='symmetric') - jacobian_det = self.domain.element(np.exp(-div_op(self.displacement))) + jacobian_det = self.domain.element(exp(-div_op(self.displacement))) return jacobian_det * self.inverse @@ -465,5 +475,5 @@ def __repr__(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/ufunc_ops/__init__.py b/odl/trafos/fourier/__init__.py similarity index 74% rename from odl/ufunc_ops/__init__.py rename to odl/trafos/fourier/__init__.py index da9ca8f282a..f8f57fa25d5 100644 --- a/odl/ufunc_ops/__init__.py +++ b/odl/trafos/fourier/__init__.py @@ -6,11 +6,11 @@ # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. -"""Universal functions as `Operator` and `Functional`.""" +"""Utilities for transformations.""" from __future__ import absolute_import -from .ufunc_ops import * +from .fourier import * __all__ = () -__all__ = ufunc_ops.__all__ +__all__ += fourier.__all__ diff --git a/odl/trafos/fourier.py b/odl/trafos/fourier/fourier.py similarity index 88% rename from odl/trafos/fourier.py rename to odl/trafos/fourier/fourier.py index 15424f402f7..36d4919f092 100644 --- a/odl/trafos/fourier.py +++ b/odl/trafos/fourier/fourier.py @@ -12,29 +12,31 @@ import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr import DiscretizedSpace, uniform_discr -from odl.operator import Operator -from odl.set import ComplexNumbers, RealNumbers +from odl.core.discr import DiscretizedSpace, uniform_discr +from odl.core.operator import Operator +from odl.core.set import ComplexNumbers, RealNumbers from odl.trafos.backends.pyfftw_bindings import ( PYFFTW_AVAILABLE, _flag_pyfftw_to_odl, pyfftw_call) from odl.trafos.util import ( dft_postprocess_data, dft_preprocess_data, reciprocal_grid, reciprocal_space) -from odl.util import ( - complex_dtype, conj_exponent, dtype_repr, is_complex_floating_dtype, - is_real_dtype, normalized_axes_tuple, normalized_scalar_param_list) +from odl.core.util import ( + complex_dtype, conj_exponent, dtype_repr, is_complex_dtype, + is_real_floating_dtype, normalized_axes_tuple, normalized_scalar_param_list) +from odl.core.util.dtype_utils import _universal_dtype_identifier +from odl.core.array_API_support import lookup_array_backend __all__ = ('DiscreteFourierTransform', 'DiscreteFourierTransformInverse', 'FourierTransform', 'FourierTransformInverse') -_SUPPORTED_FOURIER_IMPLS = ('numpy',) -_DEFAULT_FOURIER_IMPL = 'numpy' +_SUPPORTED_FOURIER_IMPLS = ('default','numpy') +_DEFAULT_FOURIER_IMPL = 'default' if PYFFTW_AVAILABLE: _SUPPORTED_FOURIER_IMPLS += ('pyfftw',) - _DEFAULT_FOURIER_IMPL = 'pyfftw' + # _DEFAULT_FOURIER_IMPL = 'pyfftw' class DiscreteFourierTransformBase(Operator): @@ -109,7 +111,7 @@ def __init__(self, inverse, domain, range=None, axes=None, sign='-', else: self.__halfcomplex = bool(halfcomplex) - ran_dtype = complex_dtype(domain.dtype) + ran_dtype = complex_dtype(domain.dtype_identifier) # Sign of the transform if sign not in ('+', '-'): @@ -129,14 +131,14 @@ def __init__(self, inverse, domain, range=None, axes=None, sign='-', shape = np.atleast_1d(ran_shape) range = uniform_discr( - [0] * len(shape), shape - 1, shape, ran_dtype, impl, - nodes_on_bdry=True, exponent=conj_exponent(domain.exponent)) + [0] * len(shape), shape - 1, shape, ran_dtype, + nodes_on_bdry=True, exponent=conj_exponent(domain.exponent), impl=domain.impl, device=domain.device) else: if range.shape != ran_shape: raise ValueError('expected range shape {}, got {}.' ''.format(ran_shape, range.shape)) - if range.dtype != ran_dtype: + if range.dtype_identifier != ran_dtype: raise ValueError('expected range data type {}, got {}.' ''.format(dtype_repr(ran_dtype), dtype_repr(range.dtype))) @@ -173,9 +175,13 @@ def _call(self, x, out, **kwargs): # TODO: Implement zero padding if self.impl == 'numpy': out[:] = self._call_numpy(x.asarray()) - else: + elif self.impl=='pyfftw': out[:] = self._call_pyfftw(x.asarray(), out.asarray(), **kwargs) - + else: + if self.domain.impl == 'numpy' and PYFFTW_AVAILABLE: + out[:] = self._call_pyfftw(x.asarray(), out.asarray(), **kwargs) + else: + out[:] = self._call_array_API(x.asarray()) @property def impl(self): """Backend for the FFT implementation.""" @@ -218,6 +224,21 @@ def inverse(self): Abstract method. """ raise NotImplementedError('abstract method') + + def _call_array_API(self, x): + """Return ``self(x)`` using the array-API low-level FFT. + + Parameters + ---------- + x : `ArrayLike` + Input array to be transformed + + Returns + ------- + out : `ArrayLike` + Result of the transform + """ + raise NotImplementedError('abstract method') def _call_numpy(self, x): """Return ``self(x)`` using numpy. @@ -447,6 +468,28 @@ def __init__(self, domain, range=None, axes=None, sign='-', super(DiscreteFourierTransform, self).__init__( inverse=False, domain=domain, range=range, axes=axes, sign=sign, halfcomplex=halfcomplex, impl=impl) + + def _call_array_API(self, x): + """Return ``self(x)`` using the low-level array-API FFT. + + See Also + -------- + DiscreteFourierTransformBase._call_array_API + """ + # assert isinstance(x, np.ndarray) + backend = self.domain.array_backend + namespace = backend.array_namespace + + if self.halfcomplex: + return namespace.fft.rfftn(x, axes=self.axes) + else: + if self.sign == '-': + return namespace.fft.fftn(x, axes=self.axes) + else: + # Need to undo Numpy IFFT scaling + return ( + np.prod(np.take(self.domain.shape, self.axes)) * namespace.fft.ifftn(x, axes=self.axes) + ) def _call_numpy(self, x): """Return ``self(x)`` using numpy. @@ -506,7 +549,7 @@ def inverse(self): sign = '+' if self.sign == '-' else '-' return DiscreteFourierTransformInverse( domain=self.range, range=self.domain, axes=self.axes, - halfcomplex=self.halfcomplex, sign=sign) + halfcomplex=self.halfcomplex, sign=sign, impl=self.impl) class DiscreteFourierTransformInverse(DiscreteFourierTransformBase): @@ -601,6 +644,32 @@ def __init__(self, range, domain=None, axes=None, sign='+', inverse=True, domain=range, range=domain, axes=axes, sign=sign, halfcomplex=halfcomplex, impl=impl) + def _call_array_API(self, x): + """Return ``self(x)`` using the low-level array-API functions. + + Parameters + ---------- + x : `ArrayLike` + Input array to be transformed + + Returns + ------- + out : `ArrayLike` + Result of the transform + """ + namespace = self.domain.array_backend.array_namespace + + if self.halfcomplex: + return namespace.fft.irfftn(x, axes=self.axes) + else: + if self.sign == '+': + return namespace.fft.ifftn(x, axes=self.axes) + else: + return ( + namespace.fft.fftn(x, axes=self.axes) / + np.prod(np.take(self.domain.shape, self.axes)) + ) + def _call_numpy(self, x): """Return ``self(x)`` using numpy. @@ -690,7 +759,7 @@ def inverse(self): sign = '-' if self.sign == '+' else '+' return DiscreteFourierTransform( domain=self.range, range=self.domain, axes=self.axes, - halfcomplex=self.halfcomplex, sign=sign) + halfcomplex=self.halfcomplex, sign=sign, impl=self.impl) class FourierTransformBase(Operator): @@ -804,14 +873,6 @@ def __init__(self, inverse, domain, range=None, impl=None, **kwargs): if not isinstance(domain, DiscretizedSpace): raise TypeError('domain {!r} is not a `DiscretizedSpace` instance' ''.format(domain)) - if domain.impl != 'numpy': - raise NotImplementedError( - 'Only Numpy-based data spaces are supported, got {}' - ''.format(domain.tspace)) - - # axes - axes = kwargs.pop('axes', np.arange(domain.ndim)) - self.__axes = normalized_axes_tuple(axes, domain.ndim) # Implementation if impl is None: @@ -821,6 +882,15 @@ def __init__(self, inverse, domain, range=None, impl=None, **kwargs): raise ValueError("`impl` '{}' not supported".format(impl_in)) self.__impl = impl + if self.impl != 'default' and domain.impl != 'numpy': + raise NotImplementedError( + f'Only Numpy-based data spaces are supported for non-default FFT backends, got {domain.tspace}' + ) + + # axes + axes = kwargs.pop('axes', np.arange(domain.ndim)) + self.__axes = normalized_axes_tuple(axes, domain.ndim) + # Handle half-complex yes/no and shifts halfcomplex = kwargs.pop('halfcomplex', True) shift = kwargs.pop('shift', True) @@ -865,7 +935,7 @@ def __init__(self, inverse, domain, range=None, impl=None, **kwargs): # self._halfcomplex and self._axes need to be set for this range = reciprocal_space(domain, axes=self.axes, halfcomplex=self.halfcomplex, - shift=self.shifts) + shift=self.shifts, impl=domain.impl, device=domain.device) if inverse: super(FourierTransformBase, self).__init__( @@ -907,9 +977,14 @@ def _call(self, x, out, **kwargs): # TODO: Implement zero padding if self.impl == 'numpy': out[:] = self._call_numpy(x.asarray()) - else: + elif self.impl == 'pyfftw': # 0-overhead assignment if asarray() does not copy out[:] = self._call_pyfftw(x.asarray(), out.asarray(), **kwargs) + else: + if self.domain.impl == 'numpy' and PYFFTW_AVAILABLE: + out[:] = self._call_pyfftw(x.asarray(), out.asarray(), **kwargs) + else: + out[:] = self._call_array_API(x.asarray()) def _call_numpy(self, x): """Return ``self(x)`` for numpy back-end. @@ -925,6 +1000,21 @@ def _call_numpy(self, x): Result of the transform """ raise NotImplementedError('abstract method') + + def _call_array_API(self, x): + """Return ``self(x)`` for the default array-API back-end. + + Parameters + ---------- + x : `ArrayLike` + Array representing the function to be transformed + + Returns + ------- + out : `ArrayLike` + Result of the transform + """ + raise NotImplementedError('abstract method') def _call_pyfftw(self, x, out, **kwargs): """Implement ``self(x[, out, **kwargs])`` for pyfftw back-end. @@ -1299,6 +1389,46 @@ def _postprocess(self, x, out=None): out, real_grid=self.domain.grid, recip_grid=self.range.grid, shift=self.shifts, axes=self.axes, sign=self.sign, interp='nearest', op='multiply', out=out) + + def _call_array_API(self, x): + """Return ``self(x)`` for the array-API back-end. + + Parameters + ---------- + x : `ArrayLike` + Array representing the function to be transformed + + Returns + ------- + out : `ArrayLike` + Result of the transform + """ + # Pre-processing before calculating the DFT + # Note: since the FFT call is out-of-place, it does not matter if + # preprocess produces real or complex output in the R2C variant. + # There is no significant time difference between (full) R2C and + # C2C DFT in Numpy. + backend = self.domain.array_backend + preproc = self._preprocess(x) + dtype = _universal_dtype_identifier(preproc.dtype) + # The actual call to the FFT library, out-of-place unfortunately + if self.halfcomplex: + out = backend.array_namespace.fft.rfftn(preproc, axes=self.axes) + else: + if self.sign == '-': + out = backend.array_constructor( + backend.array_namespace.fft.fftn(preproc, axes=self.axes), dtype=backend.available_dtypes[complex_dtype(dtype)], + copy=AVOID_UNNECESSARY_COPY + ) + else: + out = backend.array_namespace.fft.ifftn(preproc, axes=self.axes) + # Numpy's FFT normalizes by 1 / prod(shape[axes]), we + # need to undo that + out *= np.prod(np.take(self.domain.shape, self.axes)) + + # Post-processing accounting for shift, scaling and interpolation + self._postprocess(out, out=out) + return out def _call_numpy(self, x): """Return ``self(x)`` for numpy back-end. @@ -1319,14 +1449,14 @@ def _call_numpy(self, x): # There is no significant time difference between (full) R2C and # C2C DFT in Numpy. preproc = self._preprocess(x) - + dtype = _universal_dtype_identifier(preproc.dtype) # The actual call to the FFT library, out-of-place unfortunately if self.halfcomplex: out = np.fft.rfftn(preproc, axes=self.axes) else: if self.sign == '-': out = ( np.fft.fftn(preproc, axes=self.axes) - .astype(complex_dtype(preproc.dtype), copy=AVOID_UNNECESSARY_COPY) + .astype(complex_dtype(dtype), copy=AVOID_UNNECESSARY_COPY) ) else: out = np.fft.ifftn(preproc, axes=self.axes) @@ -1372,11 +1502,11 @@ def _call_pyfftw(self, x, out, **kwargs): # Pre-processing before calculating the sums, in-place for C2C and R2C if self.halfcomplex: preproc = self._preprocess(x) - assert is_real_dtype(preproc.dtype) + assert is_real_floating_dtype(preproc.dtype) else: # out is preproc in this case preproc = self._preprocess(x, out=out) - assert is_complex_floating_dtype(preproc.dtype) + assert is_complex_dtype(preproc.dtype) # The actual call to the FFT library. We store the plan for re-use. # The FFT is calculated in-place, except if the range is real and @@ -1386,11 +1516,11 @@ def _call_pyfftw(self, x, out, **kwargs): preproc, out, direction=direction, halfcomplex=self.halfcomplex, axes=self.axes, normalise_idft=False, **kwargs) - assert is_complex_floating_dtype(out.dtype) + assert is_complex_dtype(out.dtype) # Post-processing accounting for shift, scaling and interpolation out = self._postprocess(out, out=out) - assert is_complex_floating_dtype(out.dtype) + assert is_complex_dtype(out.dtype) return out @property @@ -1543,6 +1673,48 @@ def _postprocess(self, x, out=None): return dft_preprocess_data( x, shift=self.shifts, axes=self.axes, sign=self.sign, out=out) + def _call_array_API(self, x): + """Return ``self(x)`` for array-API back-end. + + Parameters + ---------- + x : `ArrayLike` + Array representing the function to be transformed + + Returns + ------- + out : `ArrayLike` + Result of the transform + """ + # Pre-processing before calculating the DFT + preproc = self._preprocess(x) + namespace = self.domain.array_backend.array_namespace + + # The actual call to the FFT library + # Normalization by 1 / prod(shape[axes]) is done by Numpy's FFT if + # one of the "i" functions is used. For sign='-' we need to do it + # ourselves. + if self.halfcomplex: + s = np.asarray(self.range.shape)[list(self.axes)] + s = list(s) + out = namespace.fft.irfftn(preproc, axes=self.axes, s=s) + else: + if self.sign == '-': + out = namespace.fft.fftn(preproc, axes=self.axes) + out /= np.prod(np.take(self.domain.shape, self.axes)) + else: + out = namespace.fft.ifftn(preproc, axes=self.axes) + + # Post-processing in IFT = pre-processing in FT (in-place) + self._postprocess(out, out=out) + if self.halfcomplex: + assert is_real_floating_dtype(out.dtype) + + if self.range.field == RealNumbers(): + return out.real + else: + return out + def _call_numpy(self, x): """Return ``self(x)`` for numpy back-end. @@ -1576,7 +1748,7 @@ def _call_numpy(self, x): # Post-processing in IFT = pre-processing in FT (in-place) self._postprocess(out, out=out) if self.halfcomplex: - assert is_real_dtype(out.dtype) + assert is_real_floating_dtype(out.dtype) if self.range.field == RealNumbers(): return out.real @@ -1663,5 +1835,5 @@ def inverse(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests() diff --git a/odl/trafos/util/ft_utils.py b/odl/trafos/util/ft_utils.py index d4d4f65dcbf..eef1f59c05d 100644 --- a/odl/trafos/util/ft_utils.py +++ b/odl/trafos/util/ft_utils.py @@ -12,17 +12,20 @@ import numpy as np -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY +from odl.core.util.npy_compat import AVOID_UNNECESSARY_COPY -from odl.discr import ( +from odl.core.discr import ( DiscretizedSpace, uniform_discr_frompartition, uniform_grid, uniform_partition_fromgrid) -from odl.set import RealNumbers -from odl.util import ( +from odl.core.set import RealNumbers +from odl.core.util import ( complex_dtype, conj_exponent, dtype_repr, fast_1d_tensor_mult, - is_complex_floating_dtype, is_numeric_dtype, is_real_dtype, + is_complex_dtype, is_numeric_dtype, is_real_dtype, is_real_floating_dtype, is_string, normalized_axes_tuple, normalized_scalar_param_list) +from odl.core.array_API_support import get_array_and_backend, ArrayBackend + +from odl.core.util.dtype_utils import _universal_dtype_identifier __all__ = ('reciprocal_grid', 'realspace_grid', 'reciprocal_space', @@ -296,7 +299,9 @@ def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None): type and ``shift`` is not ``True``. In this case, the return type is the complex counterpart of ``arr.dtype``. """ - arr = np.asarray(arr) + arr, backend = get_array_and_backend(arr) + backend : ArrayBackend + dtype = backend.get_dtype_identifier(array=arr) if not is_numeric_dtype(arr.dtype): raise ValueError('array has non-numeric data type {}' ''.format(dtype_repr(arr.dtype))) @@ -317,10 +322,13 @@ def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None): # Make a copy of arr with correct data type if necessary, or copy values. if out is None: - if is_real_dtype(arr.dtype) and not all(shift_list): - out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True) + if all(shift_list): + dtype = backend.available_dtypes[dtype] else: - out = arr.copy() + dtype = backend.available_dtypes[complex_dtype(dtype)] + + out = backend.array_constructor( + arr, dtype=dtype, copy=True, device=arr.device) else: out[:] = arr @@ -334,17 +342,19 @@ def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None): imag = 1j else: raise ValueError("`sign` '{}' not understood".format(sign)) + + out_dtype = _universal_dtype_identifier(out.dtype) def _onedim_arr(length, shift): if shift: # (-1)^indices - factor = np.ones(length, dtype=out.dtype) + factor = np.ones(length, dtype=out_dtype) factor[1::2] = -1 else: - factor = np.arange(length, dtype=out.dtype) + factor = np.arange(length, dtype=out_dtype) factor *= -imag * np.pi * (1 - 1.0 / length) np.exp(factor, out=factor) - return factor.astype(out.dtype, copy=AVOID_UNNECESSARY_COPY) + return factor.astype(out_dtype, copy=AVOID_UNNECESSARY_COPY) onedim_arrs = [] for axis, shift in zip(axes, shift_list): @@ -460,15 +470,18 @@ def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes, *Numerical Recipes in C - The Art of Scientific Computing* (Volume 3). Cambridge University Press, 2007. """ - arr = np.asarray(arr) + arr, backend = get_array_and_backend(arr) + backend : ArrayBackend + dtype = backend.get_dtype_identifier(array=arr) if is_real_floating_dtype(arr.dtype): arr = arr.astype(complex_dtype(arr.dtype)) - elif not is_complex_floating_dtype(arr.dtype): + elif not is_complex_dtype(arr.dtype): raise ValueError('array data type {} is not a complex floating point ' 'data type'.format(dtype_repr(arr.dtype))) if out is None: - out = arr.copy() + out = backend.array_constructor(arr, device=arr.device, copy=True) + elif out is not arr: out[:] = arr @@ -498,6 +511,8 @@ def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes, if is_string(interp): interp = [str(interp).lower()] * arr.ndim + out_dtype = _universal_dtype_identifier(out.dtype) + onedim_arrs = [] for ax, shift, intp in zip(axes, shift_list, interp): x = real_grid.min_pt[ax] @@ -542,7 +557,8 @@ def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes, else: onedim_arr /= interp_kernel - onedim_arrs.append(onedim_arr.astype(out.dtype, copy=AVOID_UNNECESSARY_COPY)) + + onedim_arrs.append(onedim_arr.astype(out_dtype, copy=AVOID_UNNECESSARY_COPY)) fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out) return out @@ -612,13 +628,14 @@ def reciprocal_space(space, axes=None, halfcomplex=False, shift=True, dtype = kwargs.pop('dtype', None) if dtype is None: - dtype = complex_dtype(space.dtype) + dtype = complex_dtype(space.dtype_identifier) else: - if not is_complex_floating_dtype(dtype): + if not is_complex_dtype(dtype): raise ValueError('{} is not a complex data type' ''.format(dtype_repr(dtype))) impl = kwargs.pop('impl', 'numpy') + device = kwargs.pop('device', 'cpu') # Calculate range recip_grid = reciprocal_grid(space.grid, shift=shift, @@ -645,6 +662,7 @@ def reciprocal_space(space, axes=None, halfcomplex=False, shift=True, recip_spc = uniform_discr_frompartition(part, exponent=exponent, dtype=dtype, impl=impl, + device=device, axis_labels=axis_labels) return recip_spc diff --git a/odl/trafos/wavelet/__init__.py b/odl/trafos/wavelet/__init__.py new file mode 100644 index 00000000000..2e65c475480 --- /dev/null +++ b/odl/trafos/wavelet/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2020 The ODL contributors +# +# This file is part of ODL. +# +# This Source Code Form is subject to the terms of the Mozilla Public License, +# v. 2.0. If a copy of the MPL was not distributed with this file, You can +# obtain one at https://mozilla.org/MPL/2.0/. + +"""Utilities for transformations.""" + +from __future__ import absolute_import + +from .wavelet import * + +__all__ = () +__all__ += wavelet.__all__ diff --git a/odl/trafos/wavelet.py b/odl/trafos/wavelet/wavelet.py similarity index 97% rename from odl/trafos/wavelet.py rename to odl/trafos/wavelet/wavelet.py index 97abcfdb92c..c207adae1b1 100644 --- a/odl/trafos/wavelet.py +++ b/odl/trafos/wavelet/wavelet.py @@ -12,8 +12,8 @@ import numpy as np -from odl.discr import DiscretizedSpace -from odl.operator import Operator +from odl.core.discr import DiscretizedSpace +from odl.core.operator import Operator from odl.trafos.backends.pywt_bindings import ( PYWT_AVAILABLE, precompute_raveled_slices, pywt_pad_mode, pywt_wavelet) @@ -427,9 +427,9 @@ def _call(self, x): """Return wavelet transform of ``x``.""" if self.impl == 'pywt': coeffs = pywt.wavedecn( - x, wavelet=self.pywt_wavelet, level=self.nlevels, + x.data, wavelet=self.pywt_wavelet, level=self.nlevels, mode=self.pywt_pad_mode, axes=self.axes) - return pywt.ravel_coeffs(coeffs, axes=self.axes)[0] + return self.range.element(pywt.ravel_coeffs(coeffs, axes=self.axes)[0]) else: raise RuntimeError("bad `impl` '{}'".format(self.impl)) @@ -586,13 +586,14 @@ def __init__(self, range, wavelet, nlevels=None, pad_mode='constant', >>> space = odl.uniform_discr([0, 0], [1, 1], (4, 4)) >>> wavelet_trafo = odl.trafos.WaveletTransform( ... domain=space, nlevels=1, wavelet='haar') - >>> orig_array = np.array([[1, 1, 1, 1], - ... [0, 0, 0, 0], - ... [0, 0, 1, 1], - ... [1, 0, 1, 0]]) + >>> orig_array = space.element(np.array([[1, 1, 1, 1], + ... [0, 0, 0, 0], + ... [0, 0, 1, 1], + ... [1, 0, 1, 0]])) >>> decomp = wavelet_trafo(orig_array) >>> recon = wavelet_trafo.inverse(decomp) - >>> np.allclose(recon, orig_array) + >>> from odl.core.util.testutils import all_almost_equal + >>> all_almost_equal(recon, orig_array) True References @@ -607,7 +608,7 @@ def __init__(self, range, wavelet, nlevels=None, pad_mode='constant', def _call(self, coeffs): """Return the inverse wavelet transform of ``coeffs``.""" if self.impl == 'pywt': - coeffs = pywt.unravel_coeffs(coeffs, + coeffs = pywt.unravel_coeffs(coeffs.data, coeff_slices=self._coeff_slices, coeff_shapes=self._coeff_shapes, output_format='wavedecn') @@ -639,7 +640,7 @@ def _call(self, coeffs): ''.format(i, n_recon - 1, n_recon, n_intended)) recon = recon[tuple(recon_slc)] - return recon + return self.range.element(recon) else: raise RuntimeError("bad `impl` '{}'".format(self.impl)) @@ -687,5 +688,5 @@ def inverse(self): if __name__ == '__main__': - from odl.util.testutils import run_doctests + from odl.core.util.testutils import run_doctests run_doctests(skip_if=not PYWT_AVAILABLE) diff --git a/odl/ufunc_ops/ufunc_ops.py b/odl/ufunc_ops/ufunc_ops.py deleted file mode 100644 index b1e558d80da..00000000000 --- a/odl/ufunc_ops/ufunc_ops.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright 2014-2017 The ODL contributors -# -# This file is part of ODL. -# -# This Source Code Form is subject to the terms of the Mozilla Public License, -# v. 2.0. If a copy of the MPL was not distributed with this file, You can -# obtain one at https://mozilla.org/MPL/2.0/. - -"""Ufunc operators for ODL vectors.""" - -from __future__ import print_function, division, absolute_import -import numpy as np - -from odl.util.npy_compat import AVOID_UNNECESSARY_COPY - -from odl.set import LinearSpace, RealNumbers, Field -from odl.space import ProductSpace, tensor_space -from odl.operator import Operator, MultiplyOperator -from odl.solvers import (Functional, ScalingFunctional, FunctionalQuotient, - ConstantFunctional) -from odl.util.ufuncs import UFUNCS - -__all__ = () - -SUPP_TYPECODES = '?bhilqpBHILQPefdgFDG' -SUPP_TYPECODES_TO_DTYPES = {tc: np.dtype(tc) for tc in SUPP_TYPECODES} - - -def find_min_signature(ufunc, dtypes_in): - """Determine the minimum matching ufunc signature for given dtypes. - - Parameters - ---------- - ufunc : str or numpy.ufunc - Ufunc whose signatures are to be considered. - dtypes_in : - Sequence of objects specifying input dtypes. Its length must match - the number of inputs of ``ufunc``, and its entries must be understood - by `numpy.dtype`. - - Returns - ------- - signature : str - Minimum matching ufunc signature, see, e.g., ``np.add.types`` - for examples. - - Raises - ------ - TypeError - If no valid signature is found. - """ - if not isinstance(ufunc, np.ufunc): - ufunc = getattr(np, str(ufunc)) - - dtypes_in = [np.dtype(dt_in) for dt_in in dtypes_in] - tcs_in = [dt.base.char for dt in dtypes_in] - - if len(tcs_in) != ufunc.nin: - raise ValueError('expected {} input dtype(s) for {}, got {}' - ''.format(ufunc.nin, ufunc, len(tcs_in))) - - valid_sigs = [] - for sig in ufunc.types: - sig_tcs_in, sig_tcs_out = sig.split('->') - if all(np.dtype(tc_in) <= np.dtype(sig_tc_in) and - sig_tc_in in SUPP_TYPECODES - for tc_in, sig_tc_in in zip(tcs_in, sig_tcs_in)): - valid_sigs.append(sig) - - if not valid_sigs: - raise TypeError('no valid signature found for {} and input dtypes {}' - ''.format(ufunc, tuple(dt.name for dt in dtypes_in))) - - def in_dtypes(sig): - """Comparison key function for input dtypes of a signature.""" - sig_tcs_in = sig.split('->')[0] - return tuple(np.dtype(tc) for tc in sig_tcs_in) - - return min(valid_sigs, key=in_dtypes) - - -def dtypes_out(ufunc, dtypes_in): - """Return the result dtype(s) of ``ufunc`` with inputs of given dtypes.""" - sig = find_min_signature(ufunc, dtypes_in) - tcs_out = sig.split('->')[1] - return tuple(np.dtype(tc) for tc in tcs_out) - - -def _is_integer_only_ufunc(name): - return 'shift' in name or 'bitwise' in name or name == 'invert' - - -LINEAR_UFUNCS = ['negative', 'rad2deg', 'deg2rad', 'add', 'subtract'] - - -RAW_EXAMPLES_DOCSTRING = """ -Examples --------- ->>> import odl ->>> space = odl.{space!r} ->>> op = odl.ufunc_ops.{name}(space) ->>> print(op({arg})) -{result!s} -""" - - -def gradient_factory(name): - """Create gradient `Functional` for some ufuncs.""" - - if name == 'sin': - def gradient(self): - """Return the gradient operator.""" - return cos(self.domain) - elif name == 'cos': - def gradient(self): - """Return the gradient operator.""" - return -sin(self.domain) - elif name == 'tan': - def gradient(self): - """Return the gradient operator.""" - return 1 + square(self.domain) * self - elif name == 'sqrt': - def gradient(self): - """Return the gradient operator.""" - return FunctionalQuotient(ConstantFunctional(self.domain, 0.5), - self) - elif name == 'square': - def gradient(self): - """Return the gradient operator.""" - return ScalingFunctional(self.domain, 2.0) - elif name == 'log': - def gradient(self): - """Return the gradient operator.""" - return reciprocal(self.domain) - elif name == 'exp': - def gradient(self): - """Return the gradient operator.""" - return self - elif name == 'reciprocal': - def gradient(self): - """Return the gradient operator.""" - return FunctionalQuotient(ConstantFunctional(self.domain, -1.0), - square(self.domain)) - elif name == 'sinh': - def gradient(self): - """Return the gradient operator.""" - return cosh(self.domain) - elif name == 'cosh': - def gradient(self): - """Return the gradient operator.""" - return sinh(self.domain) - else: - # Fallback to default - gradient = Functional.gradient - - return gradient - - -def derivative_factory(name): - """Create derivative function for some ufuncs.""" - - if name == 'sin': - def derivative(self, point): - """Return the derivative operator.""" - return MultiplyOperator(cos(self.domain)(point)) - elif name == 'cos': - def derivative(self, point): - """Return the derivative operator.""" - point = self.domain.element(point) - return MultiplyOperator(-sin(self.domain)(point)) - elif name == 'tan': - def derivative(self, point): - """Return the derivative operator.""" - return MultiplyOperator(1 + self(point) ** 2) - elif name == 'sqrt': - def derivative(self, point): - """Return the derivative operator.""" - return MultiplyOperator(0.5 / self(point)) - elif name == 'square': - def derivative(self, point): - """Return the derivative operator.""" - point = self.domain.element(point) - return MultiplyOperator(2.0 * point) - elif name == 'log': - def derivative(self, point): - """Return the derivative operator.""" - point = self.domain.element(point) - return MultiplyOperator(1.0 / point) - elif name == 'exp': - def derivative(self, point): - """Return the derivative operator.""" - return MultiplyOperator(self(point)) - elif name == 'reciprocal': - def derivative(self, point): - """Return the derivative operator.""" - point = self.domain.element(point) - return MultiplyOperator(-self(point) ** 2) - elif name == 'sinh': - def derivative(self, point): - """Return the derivative operator.""" - point = self.domain.element(point) - return MultiplyOperator(cosh(self.domain)(point)) - elif name == 'cosh': - def derivative(self, point): - """Return the derivative operator.""" - return MultiplyOperator(sinh(self.domain)(point)) - else: - # Fallback to default - derivative = Operator.derivative - - return derivative - - -def ufunc_class_factory(name, nargin, nargout, docstring): - """Create a Ufunc `Operator` from a given specification.""" - - assert 0 <= nargin <= 2 - - def __init__(self, space): - """Initialize an instance. - - Parameters - ---------- - space : `TensorSpace` - The domain of the operator. - """ - if not isinstance(space, LinearSpace): - raise TypeError('`space` {!r} not a `LinearSpace`'.format(space)) - - if nargin == 1: - domain = space0 = space - dtypes = [space.dtype] - elif nargin == len(space) == 2 and isinstance(space, ProductSpace): - domain = space - space0 = space[0] - dtypes = [space[0].dtype, space[1].dtype] - else: - domain = ProductSpace(space, nargin) - space0 = space - dtypes = [space.dtype, space.dtype] - - dts_out = dtypes_out(name, dtypes) - - if nargout == 1: - range = space0.astype(dts_out[0]) - else: - range = ProductSpace(space0.astype(dts_out[0]), - space0.astype(dts_out[1])) - - linear = name in LINEAR_UFUNCS - Operator.__init__(self, domain=domain, range=range, linear=linear) - - def _call(self, x, out=None): - """Return ``self(x)``.""" - # TODO: use `__array_ufunc__` when implemented on `ProductSpace`, - # or try both - if out is None: - if nargin == 1: - return getattr(x.ufuncs, name)() - else: - return getattr(x[0].ufuncs, name)(*x[1:]) - else: - if nargin == 1: - return getattr(x.ufuncs, name)(out=out) - else: - return getattr(x[0].ufuncs, name)(*x[1:], out=out) - - def __repr__(self): - """Return ``repr(self)``.""" - return '{}({!r})'.format(name, self.domain) - - # Create example (also functions as doctest) - if 'shift' in name or 'bitwise' in name or name == 'invert': - dtype = int - else: - dtype = float - - space = tensor_space(3, dtype=dtype) - if nargin == 1: - vec = space.element([-1, 1, 2]) - arg = '{}'.format(vec) - with np.errstate(all='ignore'): - result = getattr(vec.ufuncs, name)() - else: - vec = space.element([-1, 1, 2]) - vec2 = space.element([3, 4, 5]) - arg = '[{}, {}]'.format(vec, vec2) - with np.errstate(all='ignore'): - result = getattr(vec.ufuncs, name)(vec2) - - if nargout == 2: - result_space = ProductSpace(vec.space, 2) - result = repr(result_space.element(result)) - - examples_docstring = RAW_EXAMPLES_DOCSTRING.format(space=space, name=name, - arg=arg, result=result) - full_docstring = docstring + examples_docstring - - attributes = {"__init__": __init__, - "_call": _call, - "derivative": derivative_factory(name), - "__repr__": __repr__, - "__doc__": full_docstring} - - full_name = name + '_op' - - return type(full_name, (Operator,), attributes) - - -def ufunc_functional_factory(name, nargin, nargout, docstring): - """Create a ufunc `Functional` from a given specification.""" - - assert 0 <= nargin <= 2 - - def __init__(self, field): - """Initialize an instance. - - Parameters - ---------- - field : `Field` - The domain of the functional. - """ - if not isinstance(field, Field): - raise TypeError('`field` {!r} not a `Field`'.format(space)) - - if _is_integer_only_ufunc(name): - raise ValueError("ufunc '{}' only defined with integral dtype" - "".format(name)) - - linear = name in LINEAR_UFUNCS - Functional.__init__(self, space=field, linear=linear) - - def _call(self, x): - """Return ``self(x)``.""" - if nargin == 1: - return getattr(np, name)(x) - else: - return getattr(np, name)(*x) - - def __repr__(self): - """Return ``repr(self)``.""" - return '{}({!r})'.format(name, self.domain) - - # Create example (also functions as doctest) - - if nargin != 1: - raise NotImplementedError('Currently not suppored') - - if nargout != 1: - raise NotImplementedError('Currently not suppored') - - space = RealNumbers() - val = 1.0 - arg = '{}'.format(val) - with np.errstate(all='ignore'): - result = np.float64(getattr(np, name)(val)) - - examples_docstring = RAW_EXAMPLES_DOCSTRING.format(space=space, name=name, - arg=arg, result=result) - full_docstring = docstring + examples_docstring - - attributes = {"__init__": __init__, - "_call": _call, - "gradient": property(gradient_factory(name)), - "__repr__": __repr__, - "__doc__": full_docstring} - - full_name = name + '_op' - - return type(full_name, (Functional,), attributes) - - -RAW_UFUNC_FACTORY_DOCSTRING = """{docstring} -Notes ------ -This creates a `Operator`/`Functional` that applies a ufunc pointwise. - -Examples --------- -{operator_example} -{functional_example} -""" - -RAW_UFUNC_FACTORY_FUNCTIONAL_DOCSTRING = """ -Create functional with domain/range as real numbers: - ->>> func = odl.ufunc_ops.{name}() -""" - -RAW_UFUNC_FACTORY_OPERATOR_DOCSTRING = """ -Create operator that acts pointwise on a `TensorSpace` - ->>> space = odl.rn(3) ->>> op = odl.ufunc_ops.{name}(space) -""" - - -# Create an operator for each ufunc -for name, nargin, nargout, docstring in UFUNCS: - def indirection(name, docstring): - # Indirection is needed since name should be saved but is changed - # in the loop. - - def ufunc_factory(domain=RealNumbers()): - # Create a `Operator` or `Functional` depending on arguments - try: - if isinstance(domain, Field): - return globals()[name + '_func'](domain) - else: - return globals()[name + '_op'](domain) - except KeyError: - raise ValueError('ufunc not available for {}'.format(domain)) - return ufunc_factory - - globals()[name + '_op'] = ufunc_class_factory(name, nargin, - nargout, docstring) - if not _is_integer_only_ufunc(name): - operator_example = RAW_UFUNC_FACTORY_OPERATOR_DOCSTRING.format( - name=name) - else: - operator_example = "" - - if not _is_integer_only_ufunc(name) and nargin == 1 and nargout == 1: - globals()[name + '_func'] = ufunc_functional_factory( - name, nargin, nargout, docstring) - functional_example = RAW_UFUNC_FACTORY_FUNCTIONAL_DOCSTRING.format( - name=name) - else: - functional_example = "" - - ufunc_factory = indirection(name, docstring) - - ufunc_factory.__doc__ = RAW_UFUNC_FACTORY_DOCSTRING.format( - docstring=docstring, name=name, - functional_example=functional_example, - operator_example=operator_example) - - globals()[name] = ufunc_factory - __all__ += (name,) - - -if __name__ == '__main__': - from odl.util.testutils import run_doctests - run_doctests() diff --git a/odl/util/ufuncs.py b/odl/util/ufuncs.py deleted file mode 100644 index 6926e642501..00000000000 --- a/odl/util/ufuncs.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2014-2019 The ODL contributors -# -# This file is part of ODL. -# -# This Source Code Form is subject to the terms of the Mozilla Public License, -# v. 2.0. If a copy of the MPL was not distributed with this file, You can -# obtain one at https://mozilla.org/MPL/2.0/. - -"""Universal functions (ufuncs) for ODL-wrapped arrays. - -These functions are internal and should only be used as methods on -`Tensor`-like classes. - -See `numpy.ufuncs -`_ -for more information. - -Notes ------ -The default implementation of these methods uses the ``__array_ufunc__`` -dispatch machinery `introduced in Numpy 1.13 -`_. -""" - -from __future__ import print_function, division, absolute_import -from builtins import object -import numpy as np -import re - - -__all__ = ('TensorSpaceUfuncs', 'ProductSpaceUfuncs') - - -# Some are ignored since they don't cooperate with dtypes, needs fix -RAW_UFUNCS = ['absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh', - 'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_or', - 'bitwise_xor', 'ceil', 'conj', 'copysign', 'cos', 'cosh', - 'deg2rad', 'divide', 'equal', 'exp', 'exp2', 'expm1', 'floor', - 'floor_divide', 'fmax', 'fmin', 'fmod', 'greater', - 'greater_equal', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', - 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', - 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', - 'multiply', 'negative', 'not_equal', 'power', - 'rad2deg', 'reciprocal', 'remainder', 'right_shift', 'rint', - 'sign', 'signbit', 'sin', 'sinh', 'sqrt', 'square', 'subtract', - 'tan', 'tanh', 'true_divide', 'trunc'] -# ,'isreal', 'iscomplex', 'ldexp', 'frexp' - -# Add some standardized information -UFUNCS = [] -for name in RAW_UFUNCS: - ufunc = getattr(np, name) - n_in, n_out = ufunc.nin, ufunc.nout - descr = ufunc.__doc__.splitlines()[2] - # Numpy occasionally uses single ticks for doc, we only use them for links - descr = re.sub('`+', '``', descr) - doc = descr + """ - -See Also --------- -numpy.{} -""".format(name) - UFUNCS.append((name, n_in, n_out, doc)) - -# TODO: add the following reductions (to the CUDA implementation): -# ['var', 'trace', 'tensordot', 'std', 'ptp', 'mean', 'diff', 'cumsum', -# 'cumprod', 'average'] - - -# --- Wrappers for `Tensor` --- # - - -def wrap_ufunc_base(name, n_in, n_out, doc): - """Return ufunc wrapper for implementation-agnostic ufunc classes.""" - ufunc = getattr(np, name) - if n_in == 1: - if n_out == 1: - def wrapper(self, out=None, **kwargs): - if out is None or isinstance(out, (type(self.elem), - type(self.elem.data))): - out = (out,) - - return self.elem.__array_ufunc__( - ufunc, '__call__', self.elem, out=out, **kwargs) - - elif n_out == 2: - def wrapper(self, out=None, **kwargs): - if out is None: - out = (None, None) - - return self.elem.__array_ufunc__( - ufunc, '__call__', self.elem, out=out, **kwargs) - - else: - raise NotImplementedError - - elif n_in == 2: - if n_out == 1: - def wrapper(self, x2, out=None, **kwargs): - return self.elem.__array_ufunc__( - ufunc, '__call__', self.elem, x2, out=(out,), **kwargs) - - else: - raise NotImplementedError - else: - raise NotImplementedError - - wrapper.__name__ = wrapper.__qualname__ = name - wrapper.__doc__ = doc - return wrapper - - -class TensorSpaceUfuncs(object): - - """Ufuncs for `Tensor` objects. - - Internal object, should not be created except in `Tensor`. - """ - - def __init__(self, elem): - """Create ufunc wrapper for elem.""" - self.elem = elem - - # Reductions for backwards compatibility - - def sum(self, axis=None, dtype=None, out=None, keepdims=False): - """Return the sum of ``self``. - - See Also - -------- - numpy.sum - prod - """ - return self.elem.__array_ufunc__( - np.add, 'reduce', self.elem, - axis=axis, dtype=dtype, out=(out,), keepdims=keepdims) - - def prod(self, axis=None, dtype=None, out=None, keepdims=False): - """Return the product of ``self``. - - See Also - -------- - numpy.prod - sum - """ - return self.elem.__array_ufunc__( - np.multiply, 'reduce', self.elem, - axis=axis, dtype=dtype, out=(out,), keepdims=keepdims) - - def min(self, axis=None, dtype=None, out=None, keepdims=False): - """Return the minimum of ``self``. - - See Also - -------- - numpy.amin - max - """ - return self.elem.__array_ufunc__( - np.minimum, 'reduce', self.elem, - axis=axis, dtype=dtype, out=(out,), keepdims=keepdims) - - def max(self, axis=None, dtype=None, out=None, keepdims=False): - """Return the maximum of ``self``. - - See Also - -------- - numpy.amax - min - """ - return self.elem.__array_ufunc__( - np.maximum, 'reduce', self.elem, - axis=axis, dtype=dtype, out=(out,), keepdims=keepdims) - - -# Add ufunc methods to ufunc class -for name, n_in, n_out, doc in UFUNCS: - method = wrap_ufunc_base(name, n_in, n_out, doc) - setattr(TensorSpaceUfuncs, name, method) - - -# --- Wrappers for `ProductSpaceElement` --- # - - -def wrap_ufunc_productspace(name, n_in, n_out, doc): - """Return ufunc wrapper for `ProductSpaceUfuncs`.""" - if n_in == 1: - if n_out == 1: - def wrapper(self, out=None, **kwargs): - if out is None: - result = [getattr(x.ufuncs, name)(**kwargs) - for x in self.elem] - return self.elem.space.element(result) - else: - for x, out_x in zip(self.elem, out): - getattr(x.ufuncs, name)(out=out_x, **kwargs) - return out - - elif n_out == 2: - def wrapper(self, out1=None, out2=None, **kwargs): - if out1 is None: - out1 = self.elem.space.element() - if out2 is None: - out2 = self.elem.space.element() - for x, out1_x, out2_x in zip(self.elem, out1, out2): - getattr(x.ufuncs, name)(out1=out1_x, out2=out2_x, **kwargs) - return out1, out2 - - else: - raise NotImplementedError - - elif n_in == 2: - if n_out == 1: - def wrapper(self, x2, out=None, **kwargs): - if x2 in self.elem.space: - if out is None: - result = [getattr(x.ufuncs, name)(x2p, **kwargs) - for x, x2p in zip(self.elem, x2)] - return self.elem.space.element(result) - else: - for x, x2p, outp in zip(self.elem, x2, out): - getattr(x.ufuncs, name)(x2p, out=outp, **kwargs) - return out - else: - if out is None: - result = [getattr(x.ufuncs, name)(x2, **kwargs) - for x in self.elem] - return self.elem.space.element(result) - else: - for x, outp in zip(self.elem, out): - getattr(x.ufuncs, name)(x2, out=outp, **kwargs) - return out - - else: - raise NotImplementedError - else: - raise NotImplementedError - - wrapper.__name__ = wrapper.__qualname__ = name - wrapper.__doc__ = doc - return wrapper - - -class ProductSpaceUfuncs(object): - - """Ufuncs for `ProductSpaceElement` objects. - - Internal object, should not be created except in `ProductSpaceElement`. - """ - def __init__(self, elem): - """Create ufunc wrapper for ``elem``.""" - self.elem = elem - - def sum(self): - """Return the sum of ``self``. - - See Also - -------- - numpy.sum - prod - """ - results = [x.ufuncs.sum() for x in self.elem] - return np.sum(results) - - def prod(self): - """Return the product of ``self``. - - See Also - -------- - numpy.prod - sum - """ - results = [x.ufuncs.prod() for x in self.elem] - return np.prod(results) - - def min(self): - """Return the minimum of ``self``. - - See Also - -------- - numpy.amin - max - """ - results = [x.ufuncs.min() for x in self.elem] - return np.min(results) - - def max(self): - """Return the maximum of ``self``. - - See Also - -------- - numpy.amax - min - """ - results = [x.ufuncs.max() for x in self.elem] - return np.max(results) - - -# Add ufunc methods to ufunc class -for name, n_in, n_out, doc in UFUNCS: - method = wrap_ufunc_productspace(name, n_in, n_out, doc) - setattr(ProductSpaceUfuncs, name, method) diff --git a/setup.cfg b/setup.cfg index 6434094a9d7..ffe49cae52f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,8 +41,10 @@ install_requires = setuptools >=65.6 future >=0.16 packaging >=17.0 - numpy >=2.1, <2.2 - scipy >=1.1 + array-api-compat >=1.12 + numpy >=2.3 + scipy >=1.15 + python_requires = >=3.7 tests_require = pytest >=5.4.0 ; python_version >= "3" @@ -73,12 +75,12 @@ all = coveralls matplotlib pyfftw - pywavelets >=1.0.1 - scikit-image - + pywavelets >=1.8 + scikit-image >= 0.25 + astra >=2.4 [options.entry_points] -pytest11 = odl_plugins=odl.util.pytest_config +pytest11 = odl_plugins=odl.core.util.pytest_config [bdist_wheel] universal = 1